aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2013-12-20 11:30:50 -0500
committerKevin Hilman <khilman@linaro.org>2013-12-20 11:30:50 -0500
commitcd15c51d6c2f577f896471a058f33a95f164dba2 (patch)
tree9d42b8ea581ca35bdf75467b8bbbab21e7056ff9 /drivers/gpu
parent5b8314a98888b12159d0b1b14982b8dd2d94e3f5 (diff)
parent130f769e81fc472beb2211320777e26050e3fa15 (diff)
Merge tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
I accidentally removed some mux code for omap4 that I thought was dead code as omap4 has been booting with device tree only since v3.10. Turns out I also removed some display related mux code, so let's revert that except for the dead code parts. * tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (439 commits) Revert "ARM: OMAP2+: Remove legacy mux code for display.c" +Linux 3.13-rc4
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c15
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h33
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c118
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/host1x/bus.c5
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c4
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
53 files changed, 849 insertions, 269 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e796f6..0a1e4a5f4234 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2674 int modes = 0; 2674 int modes = 0;
2675 u8 cea_mode; 2675 u8 cea_mode;
2676 2676
2677 if (video_db == NULL || video_index > video_len) 2677 if (video_db == NULL || video_index >= video_len)
2678 return 0; 2678 return 0;
2679 2679
2680 /* CEA modes are numbered 1..127 */ 2680 /* CEA modes are numbered 1..127 */
@@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
2701 if (structure & (1 << 8)) { 2701 if (structure & (1 << 8)) {
2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); 2702 newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
2703 if (newmode) { 2703 if (newmode) {
2704 newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; 2704 newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
2705 drm_mode_probed_add(connector, newmode); 2705 drm_mode_probed_add(connector, newmode);
2706 modes++; 2706 modes++;
2707 } 2707 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006a95a0..22b8f5eced80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
173static void exynos_drm_preclose(struct drm_device *dev, 173static void exynos_drm_preclose(struct drm_device *dev,
174 struct drm_file *file) 174 struct drm_file *file)
175{ 175{
176 exynos_drm_subdrv_close(dev, file);
177}
178
179static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
180{
176 struct exynos_drm_private *private = dev->dev_private; 181 struct exynos_drm_private *private = dev->dev_private;
177 struct drm_pending_vblank_event *e, *t; 182 struct drm_pending_vblank_event *v, *vt;
183 struct drm_pending_event *e, *et;
178 unsigned long flags; 184 unsigned long flags;
179 185
180 /* release events of current file */ 186 if (!file->driver_priv)
187 return;
188
189 /* Release all events not unhandled by page flip handler. */
181 spin_lock_irqsave(&dev->event_lock, flags); 190 spin_lock_irqsave(&dev->event_lock, flags);
182 list_for_each_entry_safe(e, t, &private->pageflip_event_list, 191 list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
183 base.link) { 192 base.link) {
184 if (e->base.file_priv == file) { 193 if (v->base.file_priv == file) {
185 list_del(&e->base.link); 194 list_del(&v->base.link);
186 e->base.destroy(&e->base); 195 drm_vblank_put(dev, v->pipe);
196 v->base.destroy(&v->base);
187 } 197 }
188 } 198 }
189 spin_unlock_irqrestore(&dev->event_lock, flags);
190 199
191 exynos_drm_subdrv_close(dev, file); 200 /* Release all events handled by page flip handler but not freed. */
192} 201 list_for_each_entry_safe(e, et, &file->event_list, link) {
202 list_del(&e->link);
203 e->destroy(e);
204 }
205 spin_unlock_irqrestore(&dev->event_lock, flags);
193 206
194static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
195{
196 if (!file->driver_priv)
197 return;
198 207
199 kfree(file->driver_priv); 208 kfree(file->driver_priv);
200 file->driver_priv = NULL; 209 file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b5eae9..a61878bf5dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
31#include "exynos_drm_iommu.h" 31#include "exynos_drm_iommu.h"
32 32
33/* 33/*
34 * FIMD is stand for Fully Interactive Mobile Display and 34 * FIMD stands for Fully Interactive Mobile Display and
35 * as a display controller, it transfers contents drawn on memory 35 * as a display controller, it transfers contents drawn on memory
36 * to a LCD Panel through Display Interfaces such as RGB or 36 * to a LCD Panel through Display Interfaces such as RGB or
37 * CPU Interface. 37 * CPU Interface.
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..2e367a1c6a64 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
534 * Disable CRTCs directly since we want to preserve sw state 534 * Disable CRTCs directly since we want to preserve sw state
535 * for _thaw. 535 * for _thaw.
536 */ 536 */
537 mutex_lock(&dev->mode_config.mutex);
537 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 538 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
538 dev_priv->display.crtc_disable(crtc); 539 dev_priv->display.crtc_disable(crtc);
540 mutex_unlock(&dev->mode_config.mutex);
539 541
540 intel_modeset_suspend_hw(dev); 542 intel_modeset_suspend_hw(dev);
541 } 543 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..621c7c67a643 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
4442 if (dev_priv->ellc_size) 4442 if (dev_priv->ellc_size)
4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4444 4444
4445 if (IS_HSW_GT3(dev)) 4445 if (IS_HASWELL(dev))
4446 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4446 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4447 else 4447 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4448 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4449 4448
4450 if (HAS_PCH_NOP(dev)) { 4449 if (HAS_PCH_NOP(dev)) {
4451 u32 temp = I915_READ(GEN7_MSG_CTL); 4450 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..b7e787fb4649 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 190 }
188} 191}
189 192
190static void eb_destroy(struct eb_vmas *eb) { 193static void
194i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
195{
196 struct drm_i915_gem_exec_object2 *entry;
197 struct drm_i915_gem_object *obj = vma->obj;
198
199 if (!drm_mm_node_allocated(&vma->node))
200 return;
201
202 entry = vma->exec_entry;
203
204 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
205 i915_gem_object_unpin_fence(obj);
206
207 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
208 i915_gem_object_unpin(obj);
209
210 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
211}
212
213static void eb_destroy(struct eb_vmas *eb)
214{
191 while (!list_empty(&eb->vmas)) { 215 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 216 struct i915_vma *vma;
193 217
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 219 struct i915_vma,
196 exec_list); 220 exec_list);
197 list_del_init(&vma->exec_list); 221 list_del_init(&vma->exec_list);
222 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 223 drm_gem_object_unreference(&vma->obj->base);
199 } 224 }
200 kfree(eb); 225 kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 503 return ret;
479} 504}
480 505
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 506static int
485need_reloc_mappable(struct i915_vma *vma) 507need_reloc_mappable(struct i915_vma *vma)
486{ 508{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 574 return 0;
553} 575}
554 576
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 577static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 578i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 579 struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 672 goto err;
671 } 673 }
672 674
673err: /* Decrement pin count for bound objects */ 675err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 676 if (ret != -ENOSPC || retry++)
678 return ret; 677 return ret;
679 678
679 /* Decrement pin count for bound objects */
680 list_for_each_entry(vma, vmas, exec_list)
681 i915_gem_execbuffer_unreserve_vma(vma);
682
680 ret = i915_gem_evict_vm(vm, true); 683 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 684 if (ret)
682 return ret; 685 return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 711 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 712 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 713 list_del_init(&vma->exec_list);
714 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 715 drm_gem_object_unreference(&vma->obj->base);
712 } 716 }
713 717
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..38cb8d44a013 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
235 */ 235 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
238#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
239#define MI_FLUSH_DW_STORE_INDEX (1<<21) 240#define MI_FLUSH_DW_STORE_INDEX (1<<21)
240#define MI_INVALIDATE_TLB (1<<18) 241#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 330077bcd0bd..526c8ded16b0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1158 if (wait) 1158 if (wait)
1159 intel_wait_ddi_buf_idle(dev_priv, port); 1159 intel_wait_ddi_buf_idle(dev_priv, port);
1160 1160
1161 if (type == INTEL_OUTPUT_EDP) { 1161 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1163 ironlake_edp_panel_vdd_on(intel_dp); 1163 ironlake_edp_panel_vdd_on(intel_dp);
1164 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1164 ironlake_edp_panel_off(intel_dp); 1165 ironlake_edp_panel_off(intel_dp);
1165 } 1166 }
1166 1167
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ec8b488bb1d..080f6fd4e839 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5815 uint16_t postoff = 0; 5815 uint16_t postoff = 0;
5816 5816
5817 if (intel_crtc->config.limited_color_range) 5817 if (intel_crtc->config.limited_color_range)
5818 postoff = (16 * (1 << 13) / 255) & 0x1fff; 5818 postoff = (16 * (1 << 12) / 255) & 0x1fff;
5819 5819
5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6402 6402
6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6404 * we'll hang the machine! */ 6404 * we'll hang the machine! */
6405 dev_priv->uncore.funcs.force_wake_get(dev_priv); 6405 gen6_gt_force_wake_get(dev_priv);
6406 6406
6407 if (val & LCPLL_POWER_DOWN_ALLOW) { 6407 if (val & LCPLL_POWER_DOWN_ALLOW) {
6408 val &= ~LCPLL_POWER_DOWN_ALLOW; 6408 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6436 DRM_ERROR("Switching back to LCPLL failed\n"); 6436 DRM_ERROR("Switching back to LCPLL failed\n");
6437 } 6437 }
6438 6438
6439 dev_priv->uncore.funcs.force_wake_put(dev_priv); 6439 gen6_gt_force_wake_put(dev_priv);
6440} 6440}
6441 6441
6442void hsw_enable_pc8_work(struct work_struct *__work) 6442void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8354,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8355 DERRMR_PIPEB_PRI_FLIP_DONE | 8355 DERRMR_PIPEB_PRI_FLIP_DONE |
8356 DERRMR_PIPEC_PRI_FLIP_DONE)); 8356 DERRMR_PIPEC_PRI_FLIP_DONE));
8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8358 MI_SRM_LRM_GLOBAL_GTT);
8358 intel_ring_emit(ring, DERRMR); 8359 intel_ring_emit(ring, DERRMR);
8359 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8360 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8360 } 8361 }
@@ -10049,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10049 intel_ddi_init(dev, PORT_D); 10050 intel_ddi_init(dev, PORT_D);
10050 } else if (HAS_PCH_SPLIT(dev)) { 10051 } else if (HAS_PCH_SPLIT(dev)) {
10051 int found; 10052 int found;
10052 dpd_is_edp = intel_dpd_is_edp(dev); 10053 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10053 10054
10054 if (has_edp_a(dev)) 10055 if (has_edp_a(dev))
10055 intel_dp_init(dev, DP_A, PORT_A); 10056 intel_dp_init(dev, DP_A, PORT_A);
@@ -10086,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10086 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10087 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10087 PORT_C); 10088 PORT_C);
10088 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10089 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10089 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10090 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10090 PORT_C);
10091 } 10091 }
10092 10092
10093 intel_dsi_init(dev); 10093 intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b2e842fef01..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3326} 3326}
3327 3327
3328/* check the VBT to see whether the eDP is on DP-D port */ 3328/* check the VBT to see whether the eDP is on DP-D port */
3329bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3330{ 3330{
3331 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332 union child_device_config *p_child; 3332 union child_device_config *p_child;
3333 int i; 3333 int i;
3334 static const short port_mapping[] = {
3335 [PORT_B] = PORT_IDPB,
3336 [PORT_C] = PORT_IDPC,
3337 [PORT_D] = PORT_IDPD,
3338 };
3339
3340 if (port == PORT_A)
3341 return true;
3334 3342
3335 if (!dev_priv->vbt.child_dev_num) 3343 if (!dev_priv->vbt.child_dev_num)
3336 return false; 3344 return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3346 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3339 p_child = dev_priv->vbt.child_dev + i; 3347 p_child = dev_priv->vbt.child_dev + i;
3340 3348
3341 if (p_child->common.dvo_port == PORT_IDPD && 3349 if (p_child->common.dvo_port == port_mapping[port] &&
3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3350 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3351 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3344 return true; 3352 return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3616 intel_dp->DP = I915_READ(intel_dp->output_reg); 3624 intel_dp->DP = I915_READ(intel_dp->output_reg);
3617 intel_dp->attached_connector = intel_connector; 3625 intel_dp->attached_connector = intel_connector;
3618 3626
3619 type = DRM_MODE_CONNECTOR_DisplayPort; 3627 if (intel_dp_is_edp(dev, port))
3620 /*
3621 * FIXME : We need to initialize built-in panels before external panels.
3622 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3623 */
3624 switch (port) {
3625 case PORT_A:
3626 type = DRM_MODE_CONNECTOR_eDP; 3628 type = DRM_MODE_CONNECTOR_eDP;
3627 break; 3629 else
3628 case PORT_C: 3630 type = DRM_MODE_CONNECTOR_DisplayPort;
3629 if (IS_VALLEYVIEW(dev))
3630 type = DRM_MODE_CONNECTOR_eDP;
3631 break;
3632 case PORT_D:
3633 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3634 type = DRM_MODE_CONNECTOR_eDP;
3635 break;
3636 default: /* silence GCC warning */
3637 break;
3638 }
3639 3631
3640 /* 3632 /*
3641 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3633 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..a18e88b3e425 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
708void intel_dp_check_link_status(struct intel_dp *intel_dp); 708void intel_dp_check_link_status(struct intel_dp *intel_dp);
709bool intel_dp_compute_config(struct intel_encoder *encoder, 709bool intel_dp_compute_config(struct intel_encoder *encoder,
710 struct intel_crtc_config *pipe_config); 710 struct intel_crtc_config *pipe_config);
711bool intel_dpd_is_edp(struct drm_device *dev); 711bool intel_dp_is_edp(struct drm_device *dev, enum port port);
712void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 712void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
713void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 713void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
714void ironlake_edp_panel_on(struct intel_dp *intel_dp); 714void ironlake_edp_panel_on(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index caf2ee4e5441..6e0d5e075b15 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1180 1180
1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1182 clock = adjusted_mode->crtc_clock; 1182 clock = adjusted_mode->crtc_clock;
1183 htotal = adjusted_mode->htotal; 1183 htotal = adjusted_mode->crtc_htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1185 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1186 1186
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1267 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1269 clock = adjusted_mode->crtc_clock; 1269 clock = adjusted_mode->crtc_clock;
1270 htotal = adjusted_mode->htotal; 1270 htotal = adjusted_mode->crtc_htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1272 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1273 1273
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1498 const struct drm_display_mode *adjusted_mode = 1498 const struct drm_display_mode *adjusted_mode =
1499 &to_intel_crtc(crtc)->config.adjusted_mode; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1500 int clock = adjusted_mode->crtc_clock; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal; 1501 int htotal = adjusted_mode->crtc_htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1503 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1504 unsigned long line_time_us; 1504 unsigned long line_time_us;
@@ -1624,7 +1624,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1624 const struct drm_display_mode *adjusted_mode = 1624 const struct drm_display_mode *adjusted_mode =
1625 &to_intel_crtc(enabled)->config.adjusted_mode; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1626 int clock = adjusted_mode->crtc_clock; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal; 1627 int htotal = adjusted_mode->crtc_htotal;
1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1629 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1630 unsigned long line_time_us; 1630 unsigned long line_time_us;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1776 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1778 clock = adjusted_mode->crtc_clock; 1778 clock = adjusted_mode->crtc_clock;
1779 htotal = adjusted_mode->htotal; 1779 htotal = adjusted_mode->crtc_htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1781 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1782 1782
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2469 /* The WM are computed with base on how long it takes to fill a single 2469 /* The WM are computed with base on how long it takes to fill a single
2470 * row at the given clock rate, multiplied by 8. 2470 * row at the given clock rate, multiplied by 8.
2471 * */ 2471 * */
2472 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 2472 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2473 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 2473 mode->crtc_clock);
2474 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2474 intel_ddi_get_cdclk_freq(dev_priv)); 2475 intel_ddi_get_cdclk_freq(dev_priv));
2475 2476
2476 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2477 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801613e6..b3fa1ba191b7 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
59nouveau-y += core/subdev/clock/nv50.o 59nouveau-y += core/subdev/clock/nv50.o
60nouveau-y += core/subdev/clock/nv84.o 60nouveau-y += core/subdev/clock/nv84.o
61nouveau-y += core/subdev/clock/nva3.o 61nouveau-y += core/subdev/clock/nva3.o
62nouveau-y += core/subdev/clock/nvaa.o
62nouveau-y += core/subdev/clock/nvc0.o 63nouveau-y += core/subdev/clock/nvc0.o
63nouveau-y += core/subdev/clock/nve0.o 64nouveau-y += core/subdev/clock/nve0.o
64nouveau-y += core/subdev/clock/pllnv04.o 65nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db139827047c..db3fc7be856a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 284 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 285 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 286 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 287 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 288 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; 313 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; 314 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; 315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 316 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 317 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f555788121c..e6352bd5b4ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
33#include <engine/dmaobj.h> 33#include <engine/dmaobj.h>
34#include <engine/fifo.h> 34#include <engine/fifo.h>
35 35
36#include "nv04.h"
36#include "nv50.h" 37#include "nv50.h"
37 38
38/******************************************************************************* 39/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
460 nv_subdev(priv)->intr = nv04_fifo_intr; 461 nv_subdev(priv)->intr = nv04_fifo_intr;
461 nv_engine(priv)->cclass = &nv50_fifo_cclass; 462 nv_engine(priv)->cclass = &nv50_fifo_cclass;
462 nv_engine(priv)->sclass = nv50_fifo_sclass; 463 nv_engine(priv)->sclass = nv50_fifo_sclass;
464 priv->base.pause = nv04_fifo_pause;
465 priv->base.start = nv04_fifo_start;
463 return 0; 466 return 0;
464} 467}
465 468
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc834c84..fe0f41e65d9b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
35#include <engine/dmaobj.h> 35#include <engine/dmaobj.h>
36#include <engine/fifo.h> 36#include <engine/fifo.h>
37 37
38#include "nv04.h"
38#include "nv50.h" 39#include "nv50.h"
39 40
40/******************************************************************************* 41/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
432 nv_subdev(priv)->intr = nv04_fifo_intr; 433 nv_subdev(priv)->intr = nv04_fifo_intr;
433 nv_engine(priv)->cclass = &nv84_fifo_cclass; 434 nv_engine(priv)->cclass = &nv84_fifo_cclass;
434 nv_engine(priv)->sclass = nv84_fifo_sclass; 435 nv_engine(priv)->sclass = nv84_fifo_sclass;
436 priv->base.pause = nv04_fifo_pause;
437 priv->base.start = nv04_fifo_start;
435 return 0; 438 return 0;
436} 439}
437 440
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4bb828..5ce686ee729e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
178 178
179 chan->vblank.nr_event = pdisp->vblank->index_nr; 179 chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
180 chan->vblank.event = kzalloc(chan->vblank.nr_event * 180 chan->vblank.event = kzalloc(chan->vblank.nr_event *
181 sizeof(*chan->vblank.event), GFP_KERNEL); 181 sizeof(*chan->vblank.event), GFP_KERNEL);
182 if (!chan->vblank.event) 182 if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc0edba..8f4ced75444a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
14 nv_clk_src_hclk, 14 nv_clk_src_hclk,
15 nv_clk_src_hclkm3, 15 nv_clk_src_hclkm3,
16 nv_clk_src_hclkm3d2, 16 nv_clk_src_hclkm3d2,
17 nv_clk_src_hclkm2d3, /* NVAA */
18 nv_clk_src_hclkm4, /* NVAA */
19 nv_clk_src_cclk, /* NVAA */
17 20
18 nv_clk_src_host, 21 nv_clk_src_host,
19 22
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
127extern struct nouveau_oclass nv40_clock_oclass; 130extern struct nouveau_oclass nv40_clock_oclass;
128extern struct nouveau_oclass *nv50_clock_oclass; 131extern struct nouveau_oclass *nv50_clock_oclass;
129extern struct nouveau_oclass *nv84_clock_oclass; 132extern struct nouveau_oclass *nv84_clock_oclass;
133extern struct nouveau_oclass *nvaa_clock_oclass;
130extern struct nouveau_oclass nva3_clock_oclass; 134extern struct nouveau_oclass nva3_clock_oclass;
131extern struct nouveau_oclass nvc0_clock_oclass; 135extern struct nouveau_oclass nvc0_clock_oclass;
132extern struct nouveau_oclass nve0_clock_oclass; 136extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b12928..30c1f3a4158e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
69 return 0; 69 return 0;
70} 70}
71 71
72static struct nouveau_clocks
73nv04_domain[] = {
74 { nv_clk_src_max }
75};
76
72static int 77static int
73nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 78nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, void *data, u32 size, 79 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
78 int ret; 83 int ret;
79 84
80 ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
81 *pobject = nv_object(priv); 86 *pobject = nv_object(priv);
82 if (ret) 87 if (ret)
83 return ret; 88 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 000000000000..7a723b4f564d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/fifo.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28#include <subdev/timer.h>
29#include <subdev/clock.h>
30
31#include "pll.h"
32
33struct nvaa_clock_priv {
34 struct nouveau_clock base;
35 enum nv_clk_src csrc, ssrc, vsrc;
36 u32 cctrl, sctrl;
37 u32 ccoef, scoef;
38 u32 cpost, spost;
39 u32 vdiv;
40};
41
42static u32
43read_div(struct nouveau_clock *clk)
44{
45 return nv_rd32(clk, 0x004600);
46}
47
48static u32
49read_pll(struct nouveau_clock *clk, u32 base)
50{
51 u32 ctrl = nv_rd32(clk, base + 0);
52 u32 coef = nv_rd32(clk, base + 4);
53 u32 ref = clk->read(clk, nv_clk_src_href);
54 u32 post_div = 0;
55 u32 clock = 0;
56 int N1, M1;
57
58 switch (base){
59 case 0x4020:
60 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
61 break;
62 case 0x4028:
63 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
64 break;
65 default:
66 break;
67 }
68
69 N1 = (coef & 0x0000ff00) >> 8;
70 M1 = (coef & 0x000000ff);
71 if ((ctrl & 0x80000000) && M1) {
72 clock = ref * N1 / M1;
73 clock = clock / post_div;
74 }
75
76 return clock;
77}
78
79static int
80nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
81{
82 struct nvaa_clock_priv *priv = (void *)clk;
83 u32 mast = nv_rd32(clk, 0x00c054);
84 u32 P = 0;
85
86 switch (src) {
87 case nv_clk_src_crystal:
88 return nv_device(priv)->crystal;
89 case nv_clk_src_href:
90 return 100000; /* PCIE reference clock */
91 case nv_clk_src_hclkm4:
92 return clk->read(clk, nv_clk_src_href) * 4;
93 case nv_clk_src_hclkm2d3:
94 return clk->read(clk, nv_clk_src_href) * 2 / 3;
95 case nv_clk_src_host:
96 switch (mast & 0x000c0000) {
97 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
98 case 0x00040000: break;
99 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
100 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
101 }
102 break;
103 case nv_clk_src_core:
104 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
105
106 switch (mast & 0x00000003) {
107 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
108 case 0x00000001: return 0;
109 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
110 case 0x00000003: return read_pll(clk, 0x004028) >> P;
111 }
112 break;
113 case nv_clk_src_cclk:
114 if ((mast & 0x03000000) != 0x03000000)
115 return clk->read(clk, nv_clk_src_core);
116
117 if ((mast & 0x00000200) == 0x00000000)
118 return clk->read(clk, nv_clk_src_core);
119
120 switch (mast & 0x00000c00) {
121 case 0x00000000: return clk->read(clk, nv_clk_src_href);
122 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
123 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
124 default: return 0;
125 }
126 case nv_clk_src_shader:
127 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
128 switch (mast & 0x00000030) {
129 case 0x00000000:
130 if (mast & 0x00000040)
131 return clk->read(clk, nv_clk_src_href) >> P;
132 return clk->read(clk, nv_clk_src_crystal) >> P;
133 case 0x00000010: break;
134 case 0x00000020: return read_pll(clk, 0x004028) >> P;
135 case 0x00000030: return read_pll(clk, 0x004020) >> P;
136 }
137 break;
138 case nv_clk_src_mem:
139 return 0;
140 break;
141 case nv_clk_src_vdec:
142 P = (read_div(clk) & 0x00000700) >> 8;
143
144 switch (mast & 0x00400000) {
145 case 0x00400000:
146 return clk->read(clk, nv_clk_src_core) >> P;
147 break;
148 default:
149 return 500000 >> P;
150 break;
151 }
152 break;
153 default:
154 break;
155 }
156
157 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
158 return 0;
159}
160
161static u32
162calc_pll(struct nvaa_clock_priv *priv, u32 reg,
163 u32 clock, int *N, int *M, int *P)
164{
165 struct nouveau_bios *bios = nouveau_bios(priv);
166 struct nvbios_pll pll;
167 struct nouveau_clock *clk = &priv->base;
168 int ret;
169
170 ret = nvbios_pll_parse(bios, reg, &pll);
171 if (ret)
172 return 0;
173
174 pll.vco2.max_freq = 0;
175 pll.refclk = clk->read(clk, nv_clk_src_href);
176 if (!pll.refclk)
177 return 0;
178
179 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
180}
181
182static inline u32
183calc_P(u32 src, u32 target, int *div)
184{
185 u32 clk0 = src, clk1 = src;
186 for (*div = 0; *div <= 7; (*div)++) {
187 if (clk0 <= target) {
188 clk1 = clk0 << (*div ? 1 : 0);
189 break;
190 }
191 clk0 >>= 1;
192 }
193
194 if (target - clk0 <= clk1 - target)
195 return clk0;
196 (*div)--;
197 return clk1;
198}
199
200static int
201nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
202{
203 struct nvaa_clock_priv *priv = (void *)clk;
204 const int shader = cstate->domain[nv_clk_src_shader];
205 const int core = cstate->domain[nv_clk_src_core];
206 const int vdec = cstate->domain[nv_clk_src_vdec];
207 u32 out = 0, clock = 0;
208 int N, M, P1, P2 = 0;
209 int divs = 0;
210
211 /* cclk: find suitable source, disable PLL if we can */
212 if (core < clk->read(clk, nv_clk_src_hclkm4))
213 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
214
215 /* Calculate clock * 2, so shader clock can use it too */
216 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
217
218 if (abs(core - out) <=
219 abs(core - (clock >> 1))) {
220 priv->csrc = nv_clk_src_hclkm4;
221 priv->cctrl = divs << 16;
222 } else {
223 /* NVCTRL is actually used _after_ NVPOST, and after what we
224 * call NVPLL. To make matters worse, NVPOST is an integer
225 * divider instead of a right-shift number. */
226 if(P1 > 2) {
227 P2 = P1 - 2;
228 P1 = 2;
229 }
230
231 priv->csrc = nv_clk_src_core;
232 priv->ccoef = (N << 8) | M;
233
234 priv->cctrl = (P2 + 1) << 16;
235 priv->cpost = (1 << P1) << 16;
236 }
237
238 /* sclk: nvpll + divisor, href or spll */
239 out = 0;
240 if (shader == clk->read(clk, nv_clk_src_href)) {
241 priv->ssrc = nv_clk_src_href;
242 } else {
243 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
244 if (priv->csrc == nv_clk_src_core) {
245 out = calc_P((core << 1), shader, &divs);
246 }
247
248 if (abs(shader - out) <=
249 abs(shader - clock) &&
250 (divs + P2) <= 7) {
251 priv->ssrc = nv_clk_src_core;
252 priv->sctrl = (divs + P2) << 16;
253 } else {
254 priv->ssrc = nv_clk_src_shader;
255 priv->scoef = (N << 8) | M;
256 priv->sctrl = P1 << 16;
257 }
258 }
259
260 /* vclk */
261 out = calc_P(core, vdec, &divs);
262 clock = calc_P(500000, vdec, &P1);
263 if(abs(vdec - out) <=
264 abs(vdec - clock)) {
265 priv->vsrc = nv_clk_src_cclk;
266 priv->vdiv = divs << 16;
267 } else {
268 priv->vsrc = nv_clk_src_vdec;
269 priv->vdiv = P1 << 16;
270 }
271
272 /* Print strategy! */
273 nv_debug(priv, "nvpll: %08x %08x %08x\n",
274 priv->ccoef, priv->cpost, priv->cctrl);
275 nv_debug(priv, " spll: %08x %08x %08x\n",
276 priv->scoef, priv->spost, priv->sctrl);
277 nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
278 if (priv->csrc == nv_clk_src_hclkm4)
279 nv_debug(priv, "core: hrefm4\n");
280 else
281 nv_debug(priv, "core: nvpll\n");
282
283 if (priv->ssrc == nv_clk_src_hclkm4)
284 nv_debug(priv, "shader: hrefm4\n");
285 else if (priv->ssrc == nv_clk_src_core)
286 nv_debug(priv, "shader: nvpll\n");
287 else
288 nv_debug(priv, "shader: spll\n");
289
290 if (priv->vsrc == nv_clk_src_hclkm4)
291 nv_debug(priv, "vdec: 500MHz\n");
292 else
293 nv_debug(priv, "vdec: core\n");
294
295 return 0;
296}
297
298static int
299nvaa_clock_prog(struct nouveau_clock *clk)
300{
301 struct nvaa_clock_priv *priv = (void *)clk;
302 struct nouveau_fifo *pfifo = nouveau_fifo(clk);
303 unsigned long flags;
304 u32 pllmask = 0, mast, ptherm_gate;
305 int ret = -EBUSY;
306
307 /* halt and idle execution engines */
308 ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
309 nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
310 /* Wait until the interrupt handler is finished */
311 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
312 goto resume;
313
314 if (pfifo)
315 pfifo->pause(pfifo, &flags);
316
317 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
318 goto resume;
319 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
320 goto resume;
321
322 /* First switch to safe clocks: href */
323 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
324 mast &= ~0x00400e73;
325 mast |= 0x03000000;
326
327 switch (priv->csrc) {
328 case nv_clk_src_hclkm4:
329 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
330 mast |= 0x00000002;
331 break;
332 case nv_clk_src_core:
333 nv_wr32(clk, 0x402c, priv->ccoef);
334 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
335 nv_wr32(clk, 0x4040, priv->cpost);
336 pllmask |= (0x3 << 8);
337 mast |= 0x00000003;
338 break;
339 default:
340 nv_warn(priv,"Reclocking failed: unknown core clock\n");
341 goto resume;
342 }
343
344 switch (priv->ssrc) {
345 case nv_clk_src_href:
346 nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
347 /* mast |= 0x00000000; */
348 break;
349 case nv_clk_src_core:
350 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
351 mast |= 0x00000020;
352 break;
353 case nv_clk_src_shader:
354 nv_wr32(clk, 0x4024, priv->scoef);
355 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
356 nv_wr32(clk, 0x4070, priv->spost);
357 pllmask |= (0x3 << 12);
358 mast |= 0x00000030;
359 break;
360 default:
361 nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
362 goto resume;
363 }
364
365 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
366 nv_warn(priv,"Reclocking failed: unstable PLLs\n");
367 goto resume;
368 }
369
370 switch (priv->vsrc) {
371 case nv_clk_src_cclk:
372 mast |= 0x00400000;
373 default:
374 nv_wr32(clk, 0x4600, priv->vdiv);
375 }
376
377 nv_wr32(clk, 0xc054, mast);
378 ret = 0;
379
380resume:
381 if (pfifo)
382 pfifo->start(pfifo, &flags);
383
384 nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
385 nv_wr32(clk, 0x020060, ptherm_gate);
386
387 /* Disable some PLLs and dividers when unused */
388 if (priv->csrc != nv_clk_src_core) {
389 nv_wr32(clk, 0x4040, 0x00000000);
390 nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
391 }
392
393 if (priv->ssrc != nv_clk_src_shader) {
394 nv_wr32(clk, 0x4070, 0x00000000);
395 nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
396 }
397
398 return ret;
399}
400
401static void
402nvaa_clock_tidy(struct nouveau_clock *clk)
403{
404}
405
406static struct nouveau_clocks
407nvaa_domains[] = {
408 { nv_clk_src_crystal, 0xff },
409 { nv_clk_src_href , 0xff },
410 { nv_clk_src_core , 0xff, 0, "core", 1000 },
411 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
412 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
413 { nv_clk_src_max }
414};
415
416static int
417nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
418 struct nouveau_oclass *oclass, void *data, u32 size,
419 struct nouveau_object **pobject)
420{
421 struct nvaa_clock_priv *priv;
422 int ret;
423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
425 *pobject = nv_object(priv);
426 if (ret)
427 return ret;
428
429 priv->base.read = nvaa_clock_read;
430 priv->base.calc = nvaa_clock_calc;
431 priv->base.prog = nvaa_clock_prog;
432 priv->base.tidy = nvaa_clock_tidy;
433 return 0;
434}
435
436struct nouveau_oclass *
437nvaa_clock_oclass = &(struct nouveau_oclass) {
438 .handle = NV_SUBDEV(CLOCK, 0xaa),
439 .ofuncs = &(struct nouveau_ofuncs) {
440 .ctor = nvaa_clock_ctor,
441 .dtor = _nouveau_clock_dtor,
442 .init = _nouveau_clock_init,
443 .fini = _nouveau_clock_fini,
444 },
445};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6b6316..32e7064b819b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
58}; 58};
59 59
60static uint32_t formats[] = { 60static uint32_t formats[] = {
61 DRM_FORMAT_NV12,
62 DRM_FORMAT_UYVY, 61 DRM_FORMAT_UYVY,
62 DRM_FORMAT_NV12,
63}; 63};
64 64
65/* Sine can be approximated with 65/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 99 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
100 struct nouveau_bo *cur = nv_plane->cur; 100 struct nouveau_bo *cur = nv_plane->cur;
101 bool flip = nv_plane->flip; 101 bool flip = nv_plane->flip;
102 int format = ALIGN(src_w * 4, 0x100);
103 int soff = NV_PCRTC0_SIZE * nv_crtc->index; 102 int soff = NV_PCRTC0_SIZE * nv_crtc->index;
104 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; 103 int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
105 int ret; 104 int format, ret;
105
106 /* Source parameters given in 16.16 fixed point, ignore fractional. */
107 src_x >>= 16;
108 src_y >>= 16;
109 src_w >>= 16;
110 src_h >>= 16;
111
112 format = ALIGN(src_w * 4, 0x100);
106 113
107 if (format > 0xffff) 114 if (format > 0xffff)
108 return -EINVAL; 115 return -ERANGE;
116
117 if (dev->chipset >= 0x30) {
118 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
119 return -ERANGE;
120 } else {
121 if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
122 return -ERANGE;
123 }
109 124
110 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 125 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
111 if (ret) 126 if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
113 128
114 nv_plane->cur = nv_fb->nvbo; 129 nv_plane->cur = nv_fb->nvbo;
115 130
116 /* Source parameters given in 16.16 fixed point, ignore fractional. */
117 src_x = src_x >> 16;
118 src_y = src_y >> 16;
119 src_w = src_w >> 16;
120 src_h = src_h >> 16;
121
122 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); 131 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
123 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 132 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
124 133
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
245{ 254{
246 struct nouveau_device *dev = nouveau_dev(device); 255 struct nouveau_device *dev = nouveau_dev(device);
247 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 256 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
257 int num_formats = ARRAY_SIZE(formats);
248 int ret; 258 int ret;
249 259
250 if (!plane) 260 if (!plane)
251 return; 261 return;
252 262
263 switch (dev->chipset) {
264 case 0x10:
265 case 0x11:
266 case 0x15:
267 case 0x1a:
268 case 0x20:
269 num_formats = 1;
270 break;
271 }
272
253 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, 273 ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
254 &nv10_plane_funcs, 274 &nv10_plane_funcs,
255 formats, ARRAY_SIZE(formats), false); 275 formats, num_formats, false);
256 if (ret) 276 if (ret)
257 goto err; 277 goto err;
258 278
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92183c4..29c3efdfc7dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
608 fence = nouveau_fence_ref(new_bo->bo.sync_obj); 608 fence = nouveau_fence_ref(new_bo->bo.sync_obj);
609 spin_unlock(&new_bo->bo.bdev->fence_lock); 609 spin_unlock(&new_bo->bo.bdev->fence_lock);
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence);
611 if (ret) 612 if (ret)
612 return ret; 613 return ret;
613 614
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
701 702
702 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 703 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
703 if (s->event) 704 if (s->event)
704 drm_send_vblank_event(dev, -1, s->event); 705 drm_send_vblank_event(dev, s->crtc, s->event);
705 706
706 list_del(&s->head); 707 list_del(&s->head);
707 if (ps) 708 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c08b11a..4e384a2f99c3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1265 uint32_t start, uint32_t size) 1265 uint32_t start, uint32_t size)
1266{ 1266{
1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1267 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1268 u32 end = max(start + size, (u32)256); 1268 u32 end = min_t(u32, start + size, 256);
1269 u32 i; 1269 u32 i;
1270 1270
1271 for (i = start; i < end; i++) { 1271 for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 0652ee0a2098..f685035dbe39 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; 44 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); 45 int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
46 unsigned char *base; 46 unsigned char *base;
47 u16 out; 47 u16 out = cpu_to_le16(0);
48 48
49 memset(&args, 0, sizeof(args)); 49 memset(&args, 0, sizeof(args));
50 50
@@ -55,11 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); 55 DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
56 return -EINVAL; 56 return -EINVAL;
57 } 57 }
58 args.ucRegIndex = buf[0]; 58 if (buf == NULL)
59 if (num > 1) { 59 args.ucRegIndex = 0;
60 else
61 args.ucRegIndex = buf[0];
62 if (num)
60 num--; 63 num--;
64 if (num)
61 memcpy(&out, &buf[1], num); 65 memcpy(&out, &buf[1], num);
62 }
63 args.lpI2CDataOut = cpu_to_le16(out); 66 args.lpI2CDataOut = cpu_to_le16(out);
64 } else { 67 } else {
65 if (num > ATOM_MAX_HW_I2C_READ) { 68 if (num > ATOM_MAX_HW_I2C_READ) {
@@ -96,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
96 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 99 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
97 struct i2c_msg *p; 100 struct i2c_msg *p;
98 int i, remaining, current_count, buffer_offset, max_bytes, ret; 101 int i, remaining, current_count, buffer_offset, max_bytes, ret;
99 u8 buf = 0, flags; 102 u8 flags;
100 103
101 /* check for bus probe */ 104 /* check for bus probe */
102 p = &msgs[0]; 105 p = &msgs[0];
103 if ((num == 1) && (p->len == 0)) { 106 if ((num == 1) && (p->len == 0)) {
104 ret = radeon_process_i2c_ch(i2c, 107 ret = radeon_process_i2c_ch(i2c,
105 p->addr, HW_I2C_WRITE, 108 p->addr, HW_I2C_WRITE,
106 &buf, 1); 109 NULL, 0);
107 if (ret) 110 if (ret)
108 return ret; 111 return ret;
109 else 112 else
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e0ce72..de86493cbc44 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset = dig->afmt->offset; 96 u32 offset;
97 97
98 if (!dig->afmt->pin) 98 if (!dig || !dig->afmt || !dig->afmt->pin)
99 return; 99 return;
100 100
101 offset = dig->afmt->offset;
102
101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
103} 105}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
112 struct radeon_connector *radeon_connector = NULL; 114 struct radeon_connector *radeon_connector = NULL;
113 u32 tmp = 0, offset; 115 u32 tmp = 0, offset;
114 116
115 if (!dig->afmt->pin) 117 if (!dig || !dig->afmt || !dig->afmt->pin)
116 return; 118 return;
117 119
118 offset = dig->afmt->pin->offset; 120 offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
156 u8 *sadb; 158 u8 *sadb;
157 int sad_count; 159 int sad_count;
158 160
159 if (!dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
160 return; 162 return;
161 163
162 offset = dig->afmt->pin->offset; 164 offset = dig->afmt->pin->offset;
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
217 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 219 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
218 }; 220 };
219 221
220 if (!dig->afmt->pin) 222 if (!dig || !dig->afmt || !dig->afmt->pin)
221 return; 223 return;
222 224
223 offset = dig->afmt->pin->offset; 225 offset = dig->afmt->pin->offset;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index cdc003085a76..49c4d48f54d6 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
785 struct ni_ps *ps = ni_get_ps(rps); 785 struct ni_ps *ps = ni_get_ps(rps);
786 struct radeon_clock_and_voltage_limits *max_limits; 786 struct radeon_clock_and_voltage_limits *max_limits;
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk;
789 u16 vddc, vddci; 789 u16 vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; 790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
791 int i; 791 int i;
792 792
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
839 839
840 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
841 841
842 /* adjust low state */
842 if (disable_mclk_switching) { 843 if (disable_mclk_switching) {
843 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 844 ps->performance_levels[0].mclk =
844 sclk = ps->performance_levels[0].sclk; 845 ps->performance_levels[ps->performance_level_count - 1].mclk;
845 vddc = ps->performance_levels[0].vddc; 846 ps->performance_levels[0].vddci =
846 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; 847 ps->performance_levels[ps->performance_level_count - 1].vddci;
847 } else {
848 sclk = ps->performance_levels[0].sclk;
849 mclk = ps->performance_levels[0].mclk;
850 vddc = ps->performance_levels[0].vddc;
851 vddci = ps->performance_levels[0].vddci;
852 } 848 }
853 849
854 /* adjusted low state */
855 ps->performance_levels[0].sclk = sclk;
856 ps->performance_levels[0].mclk = mclk;
857 ps->performance_levels[0].vddc = vddc;
858 ps->performance_levels[0].vddci = vddci;
859
860 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, 850 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
861 &ps->performance_levels[0].sclk, 851 &ps->performance_levels[0].sclk,
862 &ps->performance_levels[0].mclk); 852 &ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
868 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; 858 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
869 } 859 }
870 860
861 /* adjust remaining states */
871 if (disable_mclk_switching) { 862 if (disable_mclk_switching) {
872 mclk = ps->performance_levels[0].mclk; 863 mclk = ps->performance_levels[0].mclk;
864 vddci = ps->performance_levels[0].vddci;
873 for (i = 1; i < ps->performance_level_count; i++) { 865 for (i = 1; i < ps->performance_level_count; i++) {
874 if (mclk < ps->performance_levels[i].mclk) 866 if (mclk < ps->performance_levels[i].mclk)
875 mclk = ps->performance_levels[i].mclk; 867 mclk = ps->performance_levels[i].mclk;
868 if (vddci < ps->performance_levels[i].vddci)
869 vddci = ps->performance_levels[i].vddci;
876 } 870 }
877 for (i = 0; i < ps->performance_level_count; i++) { 871 for (i = 0; i < ps->performance_level_count; i++) {
878 ps->performance_levels[i].mclk = mclk; 872 ps->performance_levels[i].mclk = mclk;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262f3f0e..b7d3ecba43e3 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 304 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 305 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
306 } 306 }
307 } else if (ASIC_IS_DCE3(rdev)) { 307 } else {
308 /* according to the reg specs, this should DCE3.2 only, but in 308 /* according to the reg specs, this should DCE3.2 only, but in
309 * practice it seems to cover DCE3.0/3.1 as well. 309 * practice it seems to cover DCE2.0/3.0/3.1 as well.
310 */ 310 */
311 if (dig->dig_encoder == 0) { 311 if (dig->dig_encoder == 0) {
312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); 312 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); 317 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 318 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
319 } 319 }
320 } else {
321 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
322 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
323 AUDIO_DTO_MODULE(clock / 10));
324 } 320 }
325} 321}
326 322
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ecf2a3960c07..b1f990d0eaa1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2710,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2710 struct radeon_vm *vm, 2710 struct radeon_vm *vm,
2711 struct radeon_fence *fence); 2711 struct radeon_fence *fence);
2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2712uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2713int radeon_vm_bo_update_pte(struct radeon_device *rdev, 2713int radeon_vm_bo_update(struct radeon_device *rdev,
2714 struct radeon_vm *vm, 2714 struct radeon_vm *vm,
2715 struct radeon_bo *bo, 2715 struct radeon_bo *bo,
2716 struct ttm_mem_reg *mem); 2716 struct ttm_mem_reg *mem);
2717void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2717void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2718 struct radeon_bo *bo); 2718 struct radeon_bo *bo);
2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 2719struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee184ffd5..5c39bf7c3d88 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
2918 mpll_param->dll_speed = args.ucDllSpeed; 2918 mpll_param->dll_speed = args.ucDllSpeed;
2919 mpll_param->bwcntl = args.ucBWCntl; 2919 mpll_param->bwcntl = args.ucBWCntl;
2920 mpll_param->vco_mode = 2920 mpll_param->vco_mode =
2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; 2921 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
2922 mpll_param->yclk_sel = 2922 mpll_param->yclk_sel =
2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; 2923 (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
2924 mpll_param->qdr = 2924 mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f41594b2eeac..0b366169d64d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -360,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
360 struct radeon_bo *bo; 360 struct radeon_bo *bo;
361 int r; 361 int r;
362 362
363 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); 363 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
364 if (r) { 364 if (r) {
365 return r; 365 return r;
366 } 366 }
367 list_for_each_entry(lobj, &parser->validated, tv.head) { 367 list_for_each_entry(lobj, &parser->validated, tv.head) {
368 bo = lobj->bo; 368 bo = lobj->bo;
369 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 369 r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
370 if (r) { 370 if (r) {
371 return r; 371 return r;
372 } 372 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfae7e6f..00e0d449021c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support 110 * 1.33- Add r6xx/r7xx const buffer support
111 * 1.34- fix evergreen/cayman GS register
111 */ 112 */
112#define DRIVER_MAJOR 1 113#define DRIVER_MAJOR 1
113#define DRIVER_MINOR 33 114#define DRIVER_MINOR 34
114#define DRIVER_PATCHLEVEL 0 115#define DRIVER_PATCHLEVEL 0
115 116
116long radeon_drm_ioctl(struct file *filp, 117long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3044e504f4ec..96e440061bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon_trace.h"
32 33
33/* 34/*
34 * GART 35 * GART
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
737 for (i = 0; i < 2; ++i) { 738 for (i = 0; i < 2; ++i) {
738 if (choices[i]) { 739 if (choices[i]) {
739 vm->id = choices[i]; 740 vm->id = choices[i];
741 trace_radeon_vm_grab_id(vm->id, ring);
740 return rdev->vm_manager.active[choices[i]]; 742 return rdev->vm_manager.active[choices[i]];
741 } 743 }
742 } 744 }
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1116} 1118}
1117 1119
1118/** 1120/**
1119 * radeon_vm_bo_update_pte - map a bo into the vm page table 1121 * radeon_vm_bo_update - map a bo into the vm page table
1120 * 1122 *
1121 * @rdev: radeon_device pointer 1123 * @rdev: radeon_device pointer
1122 * @vm: requested vm 1124 * @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1128 * 1130 *
1129 * Object have to be reserved & global and local mutex must be locked! 1131 * Object have to be reserved & global and local mutex must be locked!
1130 */ 1132 */
1131int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1133int radeon_vm_bo_update(struct radeon_device *rdev,
1132 struct radeon_vm *vm, 1134 struct radeon_vm *vm,
1133 struct radeon_bo *bo, 1135 struct radeon_bo *bo,
1134 struct ttm_mem_reg *mem) 1136 struct ttm_mem_reg *mem)
1135{ 1137{
1136 struct radeon_ib ib; 1138 struct radeon_ib ib;
1137 struct radeon_bo_va *bo_va; 1139 struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1176 bo_va->valid = false; 1178 bo_va->valid = false;
1177 } 1179 }
1178 1180
1181 trace_radeon_vm_bo_update(bo_va);
1182
1179 nptes = radeon_bo_ngpu_pages(bo); 1183 nptes = radeon_bo_ngpu_pages(bo);
1180 1184
1181 /* assume two extra pdes in case the mapping overlaps the borders */ 1185 /* assume two extra pdes in case the mapping overlaps the borders */
@@ -1257,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
1257 mutex_lock(&rdev->vm_manager.lock); 1261 mutex_lock(&rdev->vm_manager.lock);
1258 mutex_lock(&bo_va->vm->mutex); 1262 mutex_lock(&bo_va->vm->mutex);
1259 if (bo_va->soffset) { 1263 if (bo_va->soffset) {
1260 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1264 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
1261 } 1265 }
1262 mutex_unlock(&rdev->vm_manager.lock); 1266 mutex_unlock(&rdev->vm_manager.lock);
1263 list_del(&bo_va->vm_list); 1267 list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d1385ccc672c..984097b907ef 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
537 struct device_attribute *attr, 537 struct device_attribute *attr,
538 char *buf) 538 char *buf)
539{ 539{
540 struct drm_device *ddev = dev_get_drvdata(dev); 540 struct radeon_device *rdev = dev_get_drvdata(dev);
541 struct radeon_device *rdev = ddev->dev_private;
542 int temp; 541 int temp;
543 542
544 if (rdev->asic->pm.get_temperature) 543 if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
553 struct device_attribute *attr, 552 struct device_attribute *attr,
554 char *buf) 553 char *buf)
555{ 554{
556 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct radeon_device *rdev = dev_get_drvdata(dev);
557 struct radeon_device *rdev = ddev->dev_private;
558 int hyst = to_sensor_dev_attr(attr)->index; 556 int hyst = to_sensor_dev_attr(attr)->index;
559 int temp; 557 int temp;
560 558
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
566 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 564 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
567} 565}
568 566
569static ssize_t radeon_hwmon_show_name(struct device *dev,
570 struct device_attribute *attr,
571 char *buf)
572{
573 return sprintf(buf, "radeon\n");
574}
575
576static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 567static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
577static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 568static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
578static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 569static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
579static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
580 570
581static struct attribute *hwmon_attributes[] = { 571static struct attribute *hwmon_attributes[] = {
582 &sensor_dev_attr_temp1_input.dev_attr.attr, 572 &sensor_dev_attr_temp1_input.dev_attr.attr,
583 &sensor_dev_attr_temp1_crit.dev_attr.attr, 573 &sensor_dev_attr_temp1_crit.dev_attr.attr,
584 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 574 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
585 &sensor_dev_attr_name.dev_attr.attr,
586 NULL 575 NULL
587}; 576};
588 577
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
590 struct attribute *attr, int index) 579 struct attribute *attr, int index)
591{ 580{
592 struct device *dev = container_of(kobj, struct device, kobj); 581 struct device *dev = container_of(kobj, struct device, kobj);
593 struct drm_device *ddev = dev_get_drvdata(dev); 582 struct radeon_device *rdev = dev_get_drvdata(dev);
594 struct radeon_device *rdev = ddev->dev_private;
595 583
596 /* Skip limit attributes if DPM is not enabled */ 584 /* Skip limit attributes if DPM is not enabled */
597 if (rdev->pm.pm_method != PM_METHOD_DPM && 585 if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
607 .is_visible = hwmon_attributes_visible, 595 .is_visible = hwmon_attributes_visible,
608}; 596};
609 597
598static const struct attribute_group *hwmon_groups[] = {
599 &hwmon_attrgroup,
600 NULL
601};
602
610static int radeon_hwmon_init(struct radeon_device *rdev) 603static int radeon_hwmon_init(struct radeon_device *rdev)
611{ 604{
612 int err = 0; 605 int err = 0;
613 606 struct device *hwmon_dev;
614 rdev->pm.int_hwmon_dev = NULL;
615 607
616 switch (rdev->pm.int_thermal_type) { 608 switch (rdev->pm.int_thermal_type) {
617 case THERMAL_TYPE_RV6XX: 609 case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
624 case THERMAL_TYPE_KV: 616 case THERMAL_TYPE_KV:
625 if (rdev->asic->pm.get_temperature == NULL) 617 if (rdev->asic->pm.get_temperature == NULL)
626 return err; 618 return err;
627 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 619 hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
628 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 620 "radeon", rdev,
629 err = PTR_ERR(rdev->pm.int_hwmon_dev); 621 hwmon_groups);
622 if (IS_ERR(hwmon_dev)) {
623 err = PTR_ERR(hwmon_dev);
630 dev_err(rdev->dev, 624 dev_err(rdev->dev,
631 "Unable to register hwmon device: %d\n", err); 625 "Unable to register hwmon device: %d\n", err);
632 break;
633 }
634 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
635 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
636 &hwmon_attrgroup);
637 if (err) {
638 dev_err(rdev->dev,
639 "Unable to create hwmon sysfs file: %d\n", err);
640 hwmon_device_unregister(rdev->dev);
641 } 626 }
642 break; 627 break;
643 default: 628 default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
647 return err; 632 return err;
648} 633}
649 634
650static void radeon_hwmon_fini(struct radeon_device *rdev)
651{
652 if (rdev->pm.int_hwmon_dev) {
653 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
654 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
655 }
656}
657
658static void radeon_dpm_thermal_work_handler(struct work_struct *work) 635static void radeon_dpm_thermal_work_handler(struct work_struct *work)
659{ 636{
660 struct radeon_device *rdev = 637 struct radeon_device *rdev =
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1337 1314
1338 if (rdev->pm.power_state) 1315 if (rdev->pm.power_state)
1339 kfree(rdev->pm.power_state); 1316 kfree(rdev->pm.power_state);
1340
1341 radeon_hwmon_fini(rdev);
1342} 1317}
1343 1318
1344static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1319static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1358 1333
1359 if (rdev->pm.power_state) 1334 if (rdev->pm.power_state)
1360 kfree(rdev->pm.power_state); 1335 kfree(rdev->pm.power_state);
1361
1362 radeon_hwmon_fini(rdev);
1363} 1336}
1364 1337
1365void radeon_pm_fini(struct radeon_device *rdev) 1338void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9f0e18172b6e..0473257d4078 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
47 __entry->fences) 47 __entry->fences)
48); 48);
49 49
50TRACE_EVENT(radeon_vm_grab_id,
51 TP_PROTO(unsigned vmid, int ring),
52 TP_ARGS(vmid, ring),
53 TP_STRUCT__entry(
54 __field(u32, vmid)
55 __field(u32, ring)
56 ),
57
58 TP_fast_assign(
59 __entry->vmid = vmid;
60 __entry->ring = ring;
61 ),
62 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
63);
64
65TRACE_EVENT(radeon_vm_bo_update,
66 TP_PROTO(struct radeon_bo_va *bo_va),
67 TP_ARGS(bo_va),
68 TP_STRUCT__entry(
69 __field(u64, soffset)
70 __field(u64, eoffset)
71 __field(u32, flags)
72 ),
73
74 TP_fast_assign(
75 __entry->soffset = bo_va->soffset;
76 __entry->eoffset = bo_va->eoffset;
77 __entry->flags = bo_va->flags;
78 ),
79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
80 __entry->soffset, __entry->eoffset, __entry->flags)
81);
82
50TRACE_EVENT(radeon_vm_set_page, 83TRACE_EVENT(radeon_vm_set_page,
51 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 84 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
52 uint32_t incr, uint32_t flags), 85 uint32_t incr, uint32_t flags),
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8c46b0..d46b58d078aa 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 210x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
220x000089B0 VGT_HS_OFFCHIP_PARAM 220x000089B0 VGT_HS_OFFCHIP_PARAM
230x00008A14 PA_CL_ENHANCE 230x00008A14 PA_CL_ENHANCE
240x00008A60 PA_SC_LINE_STIPPLE_VALUE 240x00008A60 PA_SU_LINE_STIPPLE_VALUE
250x00008B10 PA_SC_LINE_STIPPLE_STATE 250x00008B10 PA_SC_LINE_STIPPLE_STATE
260x00008BF0 PA_SC_ENHANCE 260x00008BF0 PA_SC_ENHANCE
270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 270x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5320x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5330x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5340x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5350x00028B74 VGT_GS_INSTANCE_CNT 5350x00028B90 VGT_GS_INSTANCE_CNT
5360x00028BD4 PA_SC_CENTROID_PRIORITY_0 5360x00028BD4 PA_SC_CENTROID_PRIORITY_0
5370x00028BD8 PA_SC_CENTROID_PRIORITY_1 5370x00028BD8 PA_SC_CENTROID_PRIORITY_1
5380x00028BDC PA_SC_LINE_CNTL 5380x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37689bf..57745c8761c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
220x000089A4 VGT_COMPUTE_START_Z 220x000089A4 VGT_COMPUTE_START_Z
230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 230x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
240x00008A14 PA_CL_ENHANCE 240x00008A14 PA_CL_ENHANCE
250x00008A60 PA_SC_LINE_STIPPLE_VALUE 250x00008A60 PA_SU_LINE_STIPPLE_VALUE
260x00008B10 PA_SC_LINE_STIPPLE_STATE 260x00008B10 PA_SC_LINE_STIPPLE_STATE
270x00008BF0 PA_SC_ENHANCE 270x00008BF0 PA_SC_ENHANCE
280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 280x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 5450x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 5460x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET 5470x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5480x00028B74 VGT_GS_INSTANCE_CNT 5480x00028B90 VGT_GS_INSTANCE_CNT
5490x00028C00 PA_SC_LINE_CNTL 5490x00028C00 PA_SC_LINE_CNTL
5500x00028C08 PA_SU_VTX_CNTL 5500x00028C08 PA_SU_VTX_CNTL
5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ 5510x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64ccaa0695..a36736dab5e0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev)
3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 3882 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 3883 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3884 /* size in MB on si */ 3884 /* size in MB on si */
3885 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3885 tmp = RREG32(CONFIG_MEMSIZE);
3886 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 3886 /* some boards may have garbage in the upper 16 bits */
3887 if (tmp & 0xffff0000) {
3888 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
3889 if (tmp & 0xffff)
3890 tmp &= 0xffff;
3891 }
3892 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
3893 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
3887 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3894 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3888 si_vram_gtt_location(rdev, &rdev->mc); 3895 si_vram_gtt_location(rdev, &rdev->mc);
3889 radeon_update_bandwidth_info(rdev); 3896 radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e178137718..07eba596d458 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
135 unsigned int num_relocs = args->num_relocs; 135 unsigned int num_relocs = args->num_relocs;
136 unsigned int num_waitchks = args->num_waitchks; 136 unsigned int num_waitchks = args->num_waitchks;
137 struct drm_tegra_cmdbuf __user *cmdbufs = 137 struct drm_tegra_cmdbuf __user *cmdbufs =
138 (void * __user)(uintptr_t)args->cmdbufs; 138 (void __user *)(uintptr_t)args->cmdbufs;
139 struct drm_tegra_reloc __user *relocs = 139 struct drm_tegra_reloc __user *relocs =
140 (void * __user)(uintptr_t)args->relocs; 140 (void __user *)(uintptr_t)args->relocs;
141 struct drm_tegra_waitchk __user *waitchks = 141 struct drm_tegra_waitchk __user *waitchks =
142 (void * __user)(uintptr_t)args->waitchks; 142 (void __user *)(uintptr_t)args->waitchks;
143 struct drm_tegra_syncpt syncpt; 143 struct drm_tegra_syncpt syncpt;
144 struct host1x_job *job; 144 struct host1x_job *job;
145 int err; 145 int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
163 struct drm_tegra_cmdbuf cmdbuf; 163 struct drm_tegra_cmdbuf cmdbuf;
164 struct host1x_bo *bo; 164 struct host1x_bo *bo;
165 165
166 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); 166 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
167 if (err) 167 err = -EFAULT;
168 goto fail; 168 goto fail;
169 }
169 170
170 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); 171 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
171 if (!bo) { 172 if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
178 cmdbufs++; 179 cmdbufs++;
179 } 180 }
180 181
181 err = copy_from_user(job->relocarray, relocs, 182 if (copy_from_user(job->relocarray, relocs,
182 sizeof(*relocs) * num_relocs); 183 sizeof(*relocs) * num_relocs)) {
183 if (err) 184 err = -EFAULT;
184 goto fail; 185 goto fail;
186 }
185 187
186 while (num_relocs--) { 188 while (num_relocs--) {
187 struct host1x_reloc *reloc = &job->relocarray[num_relocs]; 189 struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
199 } 201 }
200 } 202 }
201 203
202 err = copy_from_user(job->waitchk, waitchks, 204 if (copy_from_user(job->waitchk, waitchks,
203 sizeof(*waitchks) * num_waitchks); 205 sizeof(*waitchks) * num_waitchks)) {
204 if (err) 206 err = -EFAULT;
205 goto fail; 207 goto fail;
208 }
206 209
207 err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, 210 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
208 sizeof(syncpt)); 211 sizeof(syncpt))) {
209 if (err) 212 err = -EFAULT;
210 goto fail; 213 goto fail;
214 }
211 215
212 job->is_addr_reg = context->client->ops->is_addr_reg; 216 job->is_addr_reg = context->client->ops->is_addr_reg;
213 job->syncpt_incrs = syncpt.incrs; 217 job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
573} 577}
574#endif 578#endif
575 579
576struct drm_driver tegra_drm_driver = { 580static struct drm_driver tegra_drm_driver = {
577 .driver_features = DRIVER_MODESET | DRIVER_GEM, 581 .driver_features = DRIVER_MODESET | DRIVER_GEM,
578 .load = tegra_drm_load, 582 .load = tegra_drm_load,
579 .unload = tegra_drm_unload, 583 .unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259ed7f8..7da0b923131f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
116 116
117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) 117static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
118{ 118{
119 return container_of(crtc, struct tegra_dc, base); 119 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
120} 120}
121 121
122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, 122static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f7719e317..a3835e7de184 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
247 info->var.yoffset * fb->pitches[0]; 247 info->var.yoffset * fb->pitches[0];
248 248
249 drm->mode_config.fb_base = (resource_size_t)bo->paddr; 249 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
250 info->screen_base = bo->vaddr + offset; 250 info->screen_base = (void __iomem *)bo->vaddr + offset;
251 info->screen_size = size; 251 info->screen_size = size;
252 info->fix.smem_start = (unsigned long)(bo->paddr + offset); 252 info->fix.smem_start = (unsigned long)(bo->paddr + offset);
253 info->fix.smem_len = size; 253 info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4fb880..3b29018913a5 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
14 14
15struct tegra_rgb { 15struct tegra_rgb {
16 struct tegra_output output; 16 struct tegra_output output;
17 struct tegra_dc *dc;
18
17 struct clk *clk_parent; 19 struct clk *clk_parent;
18 struct clk *clk; 20 struct clk *clk;
19}; 21};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
84 86
85static int tegra_output_rgb_enable(struct tegra_output *output) 87static int tegra_output_rgb_enable(struct tegra_output *output)
86{ 88{
87 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 89 struct tegra_rgb *rgb = to_rgb(output);
88 90
89 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); 91 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
90 92
91 return 0; 93 return 0;
92} 94}
93 95
94static int tegra_output_rgb_disable(struct tegra_output *output) 96static int tegra_output_rgb_disable(struct tegra_output *output)
95{ 97{
96 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 98 struct tegra_rgb *rgb = to_rgb(output);
97 99
98 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); 100 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
99 101
100 return 0; 102 return 0;
101} 103}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
146 148
147 rgb->output.dev = dc->dev; 149 rgb->output.dev = dc->dev;
148 rgb->output.of_node = np; 150 rgb->output.of_node = np;
151 rgb->dc = dc;
149 152
150 err = tegra_output_probe(&rgb->output); 153 err = tegra_output_probe(&rgb->output);
151 if (err < 0) 154 if (err < 0)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe990736..8d67b943ac05 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
125 125
126static void udl_gem_put_pages(struct udl_gem_object *obj) 126static void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 127{
128 if (obj->base.import_attach) {
129 drm_free_large(obj->pages);
130 obj->pages = NULL;
131 return;
132 }
133
128 drm_gem_put_pages(&obj->base, obj->pages, false, false); 134 drm_gem_put_pages(&obj->base, obj->pages, false, false);
129 obj->pages = NULL; 135 obj->pages = NULL;
130} 136}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f0aef6..0489c6152482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
150 bool mapped; 150 bool mapped;
151}; 151};
152 152
153const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
154
153/** 155/**
154 * Helper functions to advance a struct vmw_piter iterator. 156 * Helper functions to advance a struct vmw_piter iterator.
155 * 157 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index db85985c7086..20890ad8408b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
615 * TTM buffer object driver - vmwgfx_buffer.c 615 * TTM buffer object driver - vmwgfx_buffer.c
616 */ 616 */
617 617
618extern const size_t vmw_tt_size;
618extern struct ttm_placement vmw_vram_placement; 619extern struct ttm_placement vmw_vram_placement;
619extern struct ttm_placement vmw_vram_ne_placement; 620extern struct ttm_placement vmw_vram_ne_placement;
620extern struct ttm_placement vmw_vram_sys_placement; 621extern struct ttm_placement vmw_vram_sys_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d867b426..03f1c2038631 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
75 vmw_surface_unreference(&du->cursor_surface); 75 vmw_surface_unreference(&du->cursor_surface);
76 if (du->cursor_dmabuf) 76 if (du->cursor_dmabuf)
77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 77 vmw_dmabuf_unreference(&du->cursor_dmabuf);
78 drm_sysfs_connector_remove(&du->connector);
78 drm_crtc_cleanup(&du->crtc); 79 drm_crtc_cleanup(&du->crtc);
79 drm_encoder_cleanup(&du->encoder); 80 drm_encoder_cleanup(&du->encoder);
80 drm_connector_cleanup(&du->connector); 81 drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e60529..a055a26819c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
260 connector->encoder = NULL; 260 connector->encoder = NULL;
261 encoder->crtc = NULL; 261 encoder->crtc = NULL;
262 crtc->fb = NULL; 262 crtc->fb = NULL;
263 crtc->enabled = false;
263 264
264 vmw_ldu_del_active(dev_priv, ldu); 265 vmw_ldu_del_active(dev_priv, ldu);
265 266
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
285 crtc->x = set->x; 286 crtc->x = set->x;
286 crtc->y = set->y; 287 crtc->y = set->y;
287 crtc->mode = *mode; 288 crtc->mode = *mode;
289 crtc->enabled = true;
288 290
289 vmw_ldu_add_active(dev_priv, ldu, vfb); 291 vmw_ldu_add_active(dev_priv, ldu, vfb);
290 292
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
369 encoder->possible_crtcs = (1 << unit); 371 encoder->possible_crtcs = (1 << unit);
370 encoder->possible_clones = 0; 372 encoder->possible_clones = 0;
371 373
374 (void) drm_sysfs_connector_add(connector);
375
372 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
373 377
374 drm_mode_crtc_set_gamma_size(crtc, 256); 378 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index efe2b74c5eb1..9b5ea2ac7ddf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
352/** 352/**
353 * Buffer management. 353 * Buffer management.
354 */ 354 */
355
356/**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365{
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385}
386
355void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 387void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356{ 388{
357 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
359 kfree(vmw_bo); 391 kfree(vmw_bo);
360} 392}
361 393
394static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395{
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399}
400
362int vmw_dmabuf_init(struct vmw_private *dev_priv, 401int vmw_dmabuf_init(struct vmw_private *dev_priv,
363 struct vmw_dma_buffer *vmw_bo, 402 struct vmw_dma_buffer *vmw_bo,
364 size_t size, struct ttm_placement *placement, 403 size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
368 struct ttm_bo_device *bdev = &dev_priv->bdev; 407 struct ttm_bo_device *bdev = &dev_priv->bdev;
369 size_t acc_size; 408 size_t acc_size;
370 int ret; 409 int ret;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
371 411
372 BUG_ON(!bo_free); 412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
373 413
374 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
375 memset(vmw_bo, 0, sizeof(*vmw_bo)); 415 memset(vmw_bo, 0, sizeof(*vmw_bo));
376 416
377 INIT_LIST_HEAD(&vmw_bo->res_list); 417 INIT_LIST_HEAD(&vmw_bo->res_list);
378 418
379 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380 ttm_bo_type_device, placement, 420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
381 0, interruptible, 422 0, interruptible,
382 NULL, acc_size, NULL, bo_free); 423 NULL, acc_size, NULL, bo_free);
383 return ret; 424 return ret;
384} 425}
385 426
386static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390 ttm_prime_object_kfree(vmw_user_bo, prime);
391}
392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 427static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394{ 428{
395 struct vmw_user_dma_buffer *vmw_user_bo; 429 struct vmw_user_dma_buffer *vmw_user_bo;
@@ -781,54 +815,55 @@ err_ref:
781} 815}
782 816
783 817
818/**
819 * vmw_dumb_create - Create a dumb kms buffer
820 *
821 * @file_priv: Pointer to a struct drm_file identifying the caller.
822 * @dev: Pointer to the drm device.
823 * @args: Pointer to a struct drm_mode_create_dumb structure
824 *
825 * This is a driver callback for the core drm create_dumb functionality.
826 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
827 * that the arguments have a different format.
828 */
784int vmw_dumb_create(struct drm_file *file_priv, 829int vmw_dumb_create(struct drm_file *file_priv,
785 struct drm_device *dev, 830 struct drm_device *dev,
786 struct drm_mode_create_dumb *args) 831 struct drm_mode_create_dumb *args)
787{ 832{
788 struct vmw_private *dev_priv = vmw_priv(dev); 833 struct vmw_private *dev_priv = vmw_priv(dev);
789 struct vmw_master *vmaster = vmw_master(file_priv->master); 834 struct vmw_master *vmaster = vmw_master(file_priv->master);
790 struct vmw_user_dma_buffer *vmw_user_bo; 835 struct vmw_dma_buffer *dma_buf;
791 struct ttm_buffer_object *tmp;
792 int ret; 836 int ret;
793 837
794 args->pitch = args->width * ((args->bpp + 7) / 8); 838 args->pitch = args->width * ((args->bpp + 7) / 8);
795 args->size = args->pitch * args->height; 839 args->size = args->pitch * args->height;
796 840
797 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
798 if (vmw_user_bo == NULL)
799 return -ENOMEM;
800
801 ret = ttm_read_lock(&vmaster->lock, true); 841 ret = ttm_read_lock(&vmaster->lock, true);
802 if (ret != 0) { 842 if (unlikely(ret != 0))
803 kfree(vmw_user_bo);
804 return ret; 843 return ret;
805 }
806 844
807 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, 845 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
808 &vmw_vram_sys_placement, true, 846 args->size, false, &args->handle,
809 &vmw_user_dmabuf_destroy); 847 &dma_buf);
810 if (ret != 0)
811 goto out_no_dmabuf;
812
813 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
814 ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
815 args->size,
816 &vmw_user_bo->prime,
817 false,
818 ttm_buffer_type,
819 &vmw_user_dmabuf_release, NULL);
820 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
821 goto out_no_base_object; 849 goto out_no_dmabuf;
822
823 args->handle = vmw_user_bo->prime.base.hash.key;
824 850
825out_no_base_object: 851 vmw_dmabuf_unreference(&dma_buf);
826 ttm_bo_unref(&tmp);
827out_no_dmabuf: 852out_no_dmabuf:
828 ttm_read_unlock(&vmaster->lock); 853 ttm_read_unlock(&vmaster->lock);
829 return ret; 854 return ret;
830} 855}
831 856
857/**
858 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
859 *
860 * @file_priv: Pointer to a struct drm_file identifying the caller.
861 * @dev: Pointer to the drm device.
862 * @handle: Handle identifying the dumb buffer.
863 * @offset: The address space offset returned.
864 *
865 * This is a driver callback for the core drm dumb_map_offset functionality.
866 */
832int vmw_dumb_map_offset(struct drm_file *file_priv, 867int vmw_dumb_map_offset(struct drm_file *file_priv,
833 struct drm_device *dev, uint32_t handle, 868 struct drm_device *dev, uint32_t handle,
834 uint64_t *offset) 869 uint64_t *offset)
@@ -846,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
846 return 0; 881 return 0;
847} 882}
848 883
884/**
885 * vmw_dumb_destroy - Destroy a dumb boffer
886 *
887 * @file_priv: Pointer to a struct drm_file identifying the caller.
888 * @dev: Pointer to the drm device.
889 * @handle: Handle identifying the dumb buffer.
890 *
891 * This is a driver callback for the core drm dumb_destroy functionality.
892 */
849int vmw_dumb_destroy(struct drm_file *file_priv, 893int vmw_dumb_destroy(struct drm_file *file_priv,
850 struct drm_device *dev, 894 struct drm_device *dev,
851 uint32_t handle) 895 uint32_t handle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3d5a21..22406c8651ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
310 crtc->fb = NULL; 310 crtc->fb = NULL;
311 crtc->x = 0; 311 crtc->x = 0;
312 crtc->y = 0; 312 crtc->y = 0;
313 crtc->enabled = false;
313 314
314 vmw_sou_del_active(dev_priv, sou); 315 vmw_sou_del_active(dev_priv, sou);
315 316
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
370 crtc->fb = NULL; 371 crtc->fb = NULL;
371 crtc->x = 0; 372 crtc->x = 0;
372 crtc->y = 0; 373 crtc->y = 0;
374 crtc->enabled = false;
373 375
374 return ret; 376 return ret;
375 } 377 }
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
382 crtc->fb = fb; 384 crtc->fb = fb;
383 crtc->x = set->x; 385 crtc->x = set->x;
384 crtc->y = set->y; 386 crtc->y = set->y;
387 crtc->enabled = true;
385 388
386 return 0; 389 return 0;
387} 390}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
464 encoder->possible_crtcs = (1 << unit); 467 encoder->possible_crtcs = (1 << unit);
465 encoder->possible_clones = 0; 468 encoder->possible_clones = 0;
466 469
470 (void) drm_sysfs_connector_add(connector);
471
467 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
468 473
469 drm_mode_crtc_set_gamma_size(crtc, 256); 474 drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 509383f8be03..6a929591aa73 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -19,6 +19,7 @@
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include "bus.h"
22#include "dev.h" 23#include "dev.h"
23 24
24static DEFINE_MUTEX(clients_lock); 25static DEFINE_MUTEX(clients_lock);
@@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x,
257 return -ENODEV; 258 return -ENODEV;
258} 259}
259 260
260struct bus_type host1x_bus_type = { 261static struct bus_type host1x_bus_type = {
261 .name = "host1x", 262 .name = "host1x",
262}; 263};
263 264
@@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x,
301 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 302 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
302 device->dev.dma_mask = &device->dev.coherent_dma_mask; 303 device->dev.dma_mask = &device->dev.coherent_dma_mask;
303 device->dev.release = host1x_device_release; 304 device->dev.release = host1x_device_release;
304 dev_set_name(&device->dev, driver->name); 305 dev_set_name(&device->dev, "%s", driver->name);
305 device->dev.bus = &host1x_bus_type; 306 device->dev.bus = &host1x_bus_type;
306 device->dev.parent = host1x->dev; 307 device->dev.parent = host1x->dev;
307 308
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 37e2a63241a9..6b09b71940c2 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
54 u32 *p = (u32 *)((u32)pb->mapped + getptr); 54 u32 *p = (u32 *)((u32)pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP; 55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP; 56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, 57 dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
58 pb->phys + getptr); 58 (u64)pb->phys + getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1); 59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 } 60 }
61 wmb(); 61 wmb();
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 640c75ca5a8b..f72c873eff81 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
163 continue; 163 continue;
164 } 164 }
165 165
166 host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", 166 host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
167 g->base, g->offset, g->words); 167 (u64)g->base, g->offset, g->words);
168 168
169 show_gather(o, g->base + g->offset, g->words, cdma, 169 show_gather(o, g->base + g->offset, g->words, cdma,
170 g->base, mapped); 170 g->base, mapped);