aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-30 16:36:57 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-30 16:36:57 -0400
commit955e36d0b4d3e29c9c8a865d166a42718aed302e (patch)
tree9913b327cc443e5eb3d399eafb9d460d9261d2f2 /drivers/gpu/drm
parentce31d9f4fc05964f6c0dd3a8661dc1a1d843a1e2 (diff)
parentc83155a6044341d67b85b441ba719f86058f6e2b (diff)
Merge branch 'topic/skl-stage1' into drm-intel-next-queued
SKL stage 1 patches still need polish so will likely miss the 3.18 merge window. We've decided to postpone to 3.19 so let's pull this in to make patch merging and conflict handling easier. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_gem.c3
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c103
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c90
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c453
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c3
-rw-r--r--drivers/gpu/drm/gma500/gtt.h1
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c41
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h156
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_display.c125
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c64
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c206
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c3
-rw-r--r--drivers/gpu/drm/tegra/gem.h1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
76 files changed, 1217 insertions, 700 deletions
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 00b6cd461a03..b000ea3a829a 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -8,6 +8,8 @@
8#ifndef ARMADA_GEM_H 8#ifndef ARMADA_GEM_H
9#define ARMADA_GEM_H 9#define ARMADA_GEM_H
10 10
11#include <drm/drm_gem.h>
12
11/* GEM */ 13/* GEM */
12struct armada_gem_object { 14struct armada_gem_object {
13 struct drm_gem_object obj; 15 struct drm_gem_object obj;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 7485ff945ca9..86205a28e56b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -36,6 +36,8 @@
36#include <drm/ttm/ttm_memory.h> 36#include <drm/ttm/ttm_memory.h>
37#include <drm/ttm/ttm_module.h> 37#include <drm/ttm/ttm_module.h>
38 38
39#include <drm/drm_gem.h>
40
39#include <linux/i2c.h> 41#include <linux/i2c.h>
40#include <linux/i2c-algo-bit.h> 42#include <linux/i2c-algo-bit.h>
41 43
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 5098c7dd435c..c65d432f42c4 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -427,7 +427,7 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma)
427 struct ast_private *ast; 427 struct ast_private *ast;
428 428
429 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 429 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
430 return drm_mmap(filp, vma); 430 return -EINVAL;
431 431
432 file_priv = filp->private_data; 432 file_priv = filp->private_data;
433 ast = file_priv->minor->dev->dev_private; 433 ast = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 4f6e7b3a3635..71f2687fc3cc 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -7,6 +7,8 @@
7#include <drm/drm_crtc_helper.h> 7#include <drm/drm_crtc_helper.h>
8#include <drm/drm_fb_helper.h> 8#include <drm/drm_fb_helper.h>
9 9
10#include <drm/drm_gem.h>
11
10#include <ttm/ttm_bo_driver.h> 12#include <ttm/ttm_bo_driver.h>
11#include <ttm/ttm_page_alloc.h> 13#include <ttm/ttm_page_alloc.h>
12 14
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 2af30e7607d7..324f5a09a0a1 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -339,7 +339,7 @@ int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
339 struct bochs_device *bochs; 339 struct bochs_device *bochs;
340 340
341 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 341 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
342 return drm_mmap(filp, vma); 342 return -EINVAL;
343 343
344 file_priv = filp->private_data; 344 file_priv = filp->private_data;
345 bochs = file_priv->minor->dev->dev_private; 345 bochs = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index dd2cfc9024aa..d44e69daa239 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -21,6 +21,8 @@
21#include <drm/ttm/ttm_memory.h> 21#include <drm/ttm/ttm_memory.h>
22#include <drm/ttm/ttm_module.h> 22#include <drm/ttm/ttm_module.h>
23 23
24#include <drm/drm_gem.h>
25
24#define DRIVER_AUTHOR "Matthew Garrett" 26#define DRIVER_AUTHOR "Matthew Garrett"
25 27
26#define DRIVER_NAME "cirrus" 28#define DRIVER_NAME "cirrus"
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 3e7d758330a9..d3c615f9b183 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -411,7 +411,7 @@ int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
411 struct cirrus_device *cirrus; 411 struct cirrus_device *cirrus;
412 412
413 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 413 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
414 return drm_mmap(filp, vma); 414 return -EINVAL;
415 415
416 file_priv = filp->private_data; 416 file_priv = filp->private_data;
417 cirrus = file_priv->minor->dev->dev_private; 417 cirrus = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6a119026a76b..8889f8ec50ab 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -56,22 +56,19 @@ static struct idr drm_minors_idr;
56struct class *drm_class; 56struct class *drm_class;
57static struct dentry *drm_debugfs_root; 57static struct dentry *drm_debugfs_root;
58 58
59int drm_err(const char *func, const char *format, ...) 59void drm_err(const char *func, const char *format, ...)
60{ 60{
61 struct va_format vaf; 61 struct va_format vaf;
62 va_list args; 62 va_list args;
63 int r;
64 63
65 va_start(args, format); 64 va_start(args, format);
66 65
67 vaf.fmt = format; 66 vaf.fmt = format;
68 vaf.va = &args; 67 vaf.va = &args;
69 68
70 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); 69 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
71 70
72 va_end(args); 71 va_end(args);
73
74 return r;
75} 72}
76EXPORT_SYMBOL(drm_err); 73EXPORT_SYMBOL(drm_err);
77 74
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb5dd67153e4..cd45e45e2cce 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -38,6 +38,7 @@
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <drm/drmP.h> 39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h> 40#include <drm/drm_vma_manager.h>
41#include <drm/drm_gem.h>
41#include "drm_internal.h" 42#include "drm_internal.h"
42 43
43/** @file drm_gem.c 44/** @file drm_gem.c
@@ -888,7 +889,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
888 vma_pages(vma)); 889 vma_pages(vma));
889 if (!node) { 890 if (!node) {
890 mutex_unlock(&dev->struct_mutex); 891 mutex_unlock(&dev->struct_mutex);
891 return drm_mmap(filp, vma); 892 return -EINVAL;
892 } else if (!drm_vma_node_is_allowed(node, filp)) { 893 } else if (!drm_vma_node_is_allowed(node, filp)) {
893 mutex_unlock(&dev->struct_mutex); 894 mutex_unlock(&dev->struct_mutex);
894 return -EACCES; 895 return -EACCES;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 0780541f7935..51efebd434f3 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -35,6 +35,8 @@
35 35
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include <drm/drm_gem.h>
39
38#include "drm_legacy.h" 40#include "drm_legacy.h"
39 41
40/** 42/**
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 7e459bf38c26..7cc0a3516871 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -37,7 +37,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
37 37
38/* drm_vm.c */ 38/* drm_vm.c */
39int drm_vma_info(struct seq_file *m, void *data); 39int drm_vma_info(struct seq_file *m, void *data);
40int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 40void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
41void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); 41void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
42 42
43/* drm_prime.c */ 43/* drm_prime.c */
@@ -62,6 +62,8 @@ int drm_gem_name_info(struct seq_file *m, void *data);
62/* drm_irq.c */ 62/* drm_irq.c */
63int drm_control(struct drm_device *dev, void *data, 63int drm_control(struct drm_device *dev, void *data,
64 struct drm_file *file_priv); 64 struct drm_file *file_priv);
65int drm_modeset_ctl(struct drm_device *dev, void *data,
66 struct drm_file *file_priv);
65 67
66/* drm_auth.c */ 68/* drm_auth.c */
67int drm_getmagic(struct drm_device *dev, void *data, 69int drm_getmagic(struct drm_device *dev, void *data,
@@ -93,3 +95,38 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
93 struct drm_file *file_priv); 95 struct drm_file *file_priv);
94void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 96void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
95void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 97void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
98
99/* drm_drv.c */
100int drm_setmaster_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file_priv);
102int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
103 struct drm_file *file_priv);
104struct drm_master *drm_master_create(struct drm_minor *minor);
105
106/* drm_debugfs.c */
107#if defined(CONFIG_DEBUG_FS)
108int drm_debugfs_init(struct drm_minor *minor, int minor_id,
109 struct dentry *root);
110int drm_debugfs_cleanup(struct drm_minor *minor);
111int drm_debugfs_connector_add(struct drm_connector *connector);
112void drm_debugfs_connector_remove(struct drm_connector *connector);
113#else
114static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
115 struct dentry *root)
116{
117 return 0;
118}
119
120static inline int drm_debugfs_cleanup(struct drm_minor *minor)
121{
122 return 0;
123}
124
125static inline int drm_debugfs_connector_add(struct drm_connector *connector)
126{
127 return 0;
128}
129static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
130{
131}
132#endif
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6aa6a9e95570..eb6dfe52cab2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -231,6 +231,9 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
231 break; 231 break;
232 } 232 }
233 233
234 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
235 msg.flags = MIPI_DSI_MSG_USE_LPM;
236
234 return ops->transfer(dsi->host, &msg); 237 return ops->transfer(dsi->host, &msg);
235} 238}
236EXPORT_SYMBOL(mipi_dsi_dcs_write); 239EXPORT_SYMBOL(mipi_dsi_dcs_write);
@@ -260,6 +263,9 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
260 if (!ops || !ops->transfer) 263 if (!ops || !ops->transfer)
261 return -ENOSYS; 264 return -ENOSYS;
262 265
266 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
267 msg.flags = MIPI_DSI_MSG_USE_LPM;
268
263 return ops->transfer(dsi->host, &msg); 269 return ops->transfer(dsi->host, &msg);
264} 270}
265EXPORT_SYMBOL(mipi_dsi_dcs_read); 271EXPORT_SYMBOL(mipi_dsi_dcs_read);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 2807a771f505..7826de9da276 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -29,6 +29,8 @@
29#include <linux/export.h> 29#include <linux/export.h>
30#include <linux/dma-buf.h> 30#include <linux/dma-buf.h>
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_gem.h>
33
32#include "drm_internal.h" 34#include "drm_internal.h"
33 35
34/* 36/*
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 06cad0323699..4a2c328959e5 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -57,15 +57,11 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
57{ 57{
58 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 58 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
59 59
60#if defined(__i386__) || defined(__x86_64__) 60#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
61 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) 61 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
62 tmp = pgprot_noncached(tmp); 62 tmp = pgprot_noncached(tmp);
63 else 63 else
64 tmp = pgprot_writecombine(tmp); 64 tmp = pgprot_writecombine(tmp);
65#elif defined(__powerpc__)
66 pgprot_val(tmp) |= _PAGE_NO_CACHE;
67 if (map->type == _DRM_REGISTERS)
68 pgprot_val(tmp) |= _PAGE_GUARDED;
69#elif defined(__ia64__) 65#elif defined(__ia64__)
70 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 66 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
71 vma->vm_start)) 67 vma->vm_start))
@@ -421,7 +417,6 @@ void drm_vm_open_locked(struct drm_device *dev,
421 list_add(&vma_entry->head, &dev->vmalist); 417 list_add(&vma_entry->head, &dev->vmalist);
422 } 418 }
423} 419}
424EXPORT_SYMBOL_GPL(drm_vm_open_locked);
425 420
426static void drm_vm_open(struct vm_area_struct *vma) 421static void drm_vm_open(struct vm_area_struct *vma)
427{ 422{
@@ -541,7 +536,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
541 * according to the mapping type and remaps the pages. Finally sets the file 536 * according to the mapping type and remaps the pages. Finally sets the file
542 * pointer and calls vm_open(). 537 * pointer and calls vm_open().
543 */ 538 */
544int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 539static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
545{ 540{
546 struct drm_file *priv = filp->private_data; 541 struct drm_file *priv = filp->private_data;
547 struct drm_device *dev = priv->minor->dev; 542 struct drm_device *dev = priv->minor->dev;
@@ -655,7 +650,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
655 return 0; 650 return 0;
656} 651}
657 652
658int drm_mmap(struct file *filp, struct vm_area_struct *vma) 653int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
659{ 654{
660 struct drm_file *priv = filp->private_data; 655 struct drm_file *priv = filp->private_data;
661 struct drm_device *dev = priv->minor->dev; 656 struct drm_device *dev = priv->minor->dev;
@@ -670,7 +665,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
670 665
671 return ret; 666 return ret;
672} 667}
673EXPORT_SYMBOL(drm_mmap); 668EXPORT_SYMBOL(drm_legacy_mmap);
674 669
675void drm_legacy_vma_flush(struct drm_device *dev) 670void drm_legacy_vma_flush(struct drm_device *dev)
676{ 671{
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 02602a8254c4..cd50ece31601 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -937,6 +937,8 @@ static enum drm_connector_status exynos_dp_detect(
937 937
938static void exynos_dp_connector_destroy(struct drm_connector *connector) 938static void exynos_dp_connector_destroy(struct drm_connector *connector)
939{ 939{
940 drm_connector_unregister(connector);
941 drm_connector_cleanup(connector);
940} 942}
941 943
942static struct drm_connector_funcs exynos_dp_connector_funcs = { 944static struct drm_connector_funcs exynos_dp_connector_funcs = {
@@ -1358,8 +1360,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
1358 1360
1359 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1361 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1360 1362
1363 exynos_dp_connector_destroy(&dp->connector);
1361 encoder->funcs->destroy(encoder); 1364 encoder->funcs->destroy(encoder);
1362 drm_connector_cleanup(&dp->connector);
1363} 1365}
1364 1366
1365static const struct component_ops exynos_dp_ops = { 1367static const struct component_ops exynos_dp_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b68e58f78cd1..8e38e9f8e542 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -32,7 +32,6 @@ enum exynos_crtc_mode {
32 * Exynos specific crtc structure. 32 * Exynos specific crtc structure.
33 * 33 *
34 * @drm_crtc: crtc object. 34 * @drm_crtc: crtc object.
35 * @drm_plane: pointer of private plane object for this crtc
36 * @manager: the manager associated with this crtc 35 * @manager: the manager associated with this crtc
37 * @pipe: a crtc index created at load() with a new crtc object creation 36 * @pipe: a crtc index created at load() with a new crtc object creation
38 * and the crtc object would be set to private->crtc array 37 * and the crtc object would be set to private->crtc array
@@ -46,7 +45,6 @@ enum exynos_crtc_mode {
46 */ 45 */
47struct exynos_drm_crtc { 46struct exynos_drm_crtc {
48 struct drm_crtc drm_crtc; 47 struct drm_crtc drm_crtc;
49 struct drm_plane *plane;
50 struct exynos_drm_manager *manager; 48 struct exynos_drm_manager *manager;
51 unsigned int pipe; 49 unsigned int pipe;
52 unsigned int dpms; 50 unsigned int dpms;
@@ -94,12 +92,12 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
94 92
95 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 93 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
96 94
97 exynos_plane_commit(exynos_crtc->plane); 95 exynos_plane_commit(crtc->primary);
98 96
99 if (manager->ops->commit) 97 if (manager->ops->commit)
100 manager->ops->commit(manager); 98 manager->ops->commit(manager);
101 99
102 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 100 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
103} 101}
104 102
105static bool 103static bool
@@ -123,10 +121,9 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
123{ 121{
124 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
125 struct exynos_drm_manager *manager = exynos_crtc->manager; 123 struct exynos_drm_manager *manager = exynos_crtc->manager;
126 struct drm_plane *plane = exynos_crtc->plane; 124 struct drm_framebuffer *fb = crtc->primary->fb;
127 unsigned int crtc_w; 125 unsigned int crtc_w;
128 unsigned int crtc_h; 126 unsigned int crtc_h;
129 int ret;
130 127
131 /* 128 /*
132 * copy the mode data adjusted by mode_fixup() into crtc->mode 129 * copy the mode data adjusted by mode_fixup() into crtc->mode
@@ -134,29 +131,21 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
134 */ 131 */
135 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); 132 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
136 133
137 crtc_w = crtc->primary->fb->width - x; 134 crtc_w = fb->width - x;
138 crtc_h = crtc->primary->fb->height - y; 135 crtc_h = fb->height - y;
139 136
140 if (manager->ops->mode_set) 137 if (manager->ops->mode_set)
141 manager->ops->mode_set(manager, &crtc->mode); 138 manager->ops->mode_set(manager, &crtc->mode);
142 139
143 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 140 return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
144 x, y, crtc_w, crtc_h); 141 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
145 if (ret)
146 return ret;
147
148 plane->crtc = crtc;
149 plane->fb = crtc->primary->fb;
150 drm_framebuffer_reference(plane->fb);
151
152 return 0;
153} 142}
154 143
155static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y, 144static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
156 struct drm_framebuffer *old_fb) 145 struct drm_framebuffer *old_fb)
157{ 146{
158 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 147 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
159 struct drm_plane *plane = exynos_crtc->plane; 148 struct drm_framebuffer *fb = crtc->primary->fb;
160 unsigned int crtc_w; 149 unsigned int crtc_w;
161 unsigned int crtc_h; 150 unsigned int crtc_h;
162 int ret; 151 int ret;
@@ -167,11 +156,11 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
167 return -EPERM; 156 return -EPERM;
168 } 157 }
169 158
170 crtc_w = crtc->primary->fb->width - x; 159 crtc_w = fb->width - x;
171 crtc_h = crtc->primary->fb->height - y; 160 crtc_h = fb->height - y;
172 161
173 ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, 162 ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
174 x, y, crtc_w, crtc_h); 163 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
175 if (ret) 164 if (ret)
176 return ret; 165 return ret;
177 166
@@ -304,8 +293,7 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
304 exynos_drm_crtc_commit(crtc); 293 exynos_drm_crtc_commit(crtc);
305 break; 294 break;
306 case CRTC_MODE_BLANK: 295 case CRTC_MODE_BLANK:
307 exynos_plane_dpms(exynos_crtc->plane, 296 exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
308 DRM_MODE_DPMS_OFF);
309 break; 297 break;
310 default: 298 default:
311 break; 299 break;
@@ -351,8 +339,10 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
351int exynos_drm_crtc_create(struct exynos_drm_manager *manager) 339int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
352{ 340{
353 struct exynos_drm_crtc *exynos_crtc; 341 struct exynos_drm_crtc *exynos_crtc;
342 struct drm_plane *plane;
354 struct exynos_drm_private *private = manager->drm_dev->dev_private; 343 struct exynos_drm_private *private = manager->drm_dev->dev_private;
355 struct drm_crtc *crtc; 344 struct drm_crtc *crtc;
345 int ret;
356 346
357 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); 347 exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
358 if (!exynos_crtc) 348 if (!exynos_crtc)
@@ -364,11 +354,11 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
364 exynos_crtc->dpms = DRM_MODE_DPMS_OFF; 354 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
365 exynos_crtc->manager = manager; 355 exynos_crtc->manager = manager;
366 exynos_crtc->pipe = manager->pipe; 356 exynos_crtc->pipe = manager->pipe;
367 exynos_crtc->plane = exynos_plane_init(manager->drm_dev, 357 plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
368 1 << manager->pipe, true); 358 DRM_PLANE_TYPE_PRIMARY);
369 if (!exynos_crtc->plane) { 359 if (IS_ERR(plane)) {
370 kfree(exynos_crtc); 360 ret = PTR_ERR(plane);
371 return -ENOMEM; 361 goto err_plane;
372 } 362 }
373 363
374 manager->crtc = &exynos_crtc->drm_crtc; 364 manager->crtc = &exynos_crtc->drm_crtc;
@@ -376,12 +366,22 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
376 366
377 private->crtc[manager->pipe] = crtc; 367 private->crtc[manager->pipe] = crtc;
378 368
379 drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs); 369 ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
370 &exynos_crtc_funcs);
371 if (ret < 0)
372 goto err_crtc;
373
380 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); 374 drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
381 375
382 exynos_drm_crtc_attach_mode_property(crtc); 376 exynos_drm_crtc_attach_mode_property(crtc);
383 377
384 return 0; 378 return 0;
379
380err_crtc:
381 plane->funcs->destroy(plane);
382err_plane:
383 kfree(exynos_crtc);
384 return ret;
385} 385}
386 386
387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) 387int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index fa08f05e3e34..96c87db388fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -342,8 +342,12 @@ int exynos_dpi_remove(struct device *dev)
342 struct exynos_dpi *ctx = exynos_dpi_display.ctx; 342 struct exynos_dpi *ctx = exynos_dpi_display.ctx;
343 343
344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
345
346 exynos_dpi_connector_destroy(&ctx->connector);
345 encoder->funcs->destroy(encoder); 347 encoder->funcs->destroy(encoder);
346 drm_connector_cleanup(&ctx->connector); 348
349 if (ctx->panel)
350 drm_panel_detach(ctx->panel);
347 351
348 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 352 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
349 353
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 5aae95cf5b23..443a2069858a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <linux/anon_inodes.h>
19#include <linux/component.h> 18#include <linux/component.h>
20 19
21#include <drm/exynos_drm.h> 20#include <drm/exynos_drm.h>
@@ -86,8 +85,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
86 struct drm_plane *plane; 85 struct drm_plane *plane;
87 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1; 86 unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
88 87
89 plane = exynos_plane_init(dev, possible_crtcs, false); 88 plane = exynos_plane_init(dev, possible_crtcs,
90 if (!plane) 89 DRM_PLANE_TYPE_OVERLAY);
90 if (IS_ERR(plane))
91 goto err_mode_config_cleanup; 91 goto err_mode_config_cleanup;
92 } 92 }
93 93
@@ -116,6 +116,23 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
116 /* force connectors detection */ 116 /* force connectors detection */
117 drm_helper_hpd_irq_event(dev); 117 drm_helper_hpd_irq_event(dev);
118 118
119 /*
120 * enable drm irq mode.
121 * - with irq_enabled = true, we can use the vblank feature.
122 *
123 * P.S. note that we wouldn't use drm irq handler but
124 * just specific driver own one instead because
125 * drm framework supports only one irq handler.
126 */
127 dev->irq_enabled = true;
128
129 /*
130 * with vblank_disable_allowed = true, vblank interrupt will be disabled
131 * by drm timer once a current process gives up ownership of
132 * vblank event.(after drm_vblank_put function is called)
133 */
134 dev->vblank_disable_allowed = true;
135
119 return 0; 136 return 0;
120 137
121err_unbind_all: 138err_unbind_all:
@@ -136,23 +153,19 @@ static int exynos_drm_unload(struct drm_device *dev)
136 exynos_drm_device_subdrv_remove(dev); 153 exynos_drm_device_subdrv_remove(dev);
137 154
138 exynos_drm_fbdev_fini(dev); 155 exynos_drm_fbdev_fini(dev);
139 drm_vblank_cleanup(dev);
140 drm_kms_helper_poll_fini(dev); 156 drm_kms_helper_poll_fini(dev);
141 drm_mode_config_cleanup(dev);
142 157
158 component_unbind_all(dev->dev, dev);
159 drm_vblank_cleanup(dev);
160 drm_mode_config_cleanup(dev);
143 drm_release_iommu_mapping(dev); 161 drm_release_iommu_mapping(dev);
144 kfree(dev->dev_private);
145 162
146 component_unbind_all(dev->dev, dev); 163 kfree(dev->dev_private);
147 dev->dev_private = NULL; 164 dev->dev_private = NULL;
148 165
149 return 0; 166 return 0;
150} 167}
151 168
152static const struct file_operations exynos_drm_gem_fops = {
153 .mmap = exynos_drm_gem_mmap_buffer,
154};
155
156static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 169static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
157{ 170{
158 struct drm_connector *connector; 171 struct drm_connector *connector;
@@ -191,7 +204,6 @@ static int exynos_drm_resume(struct drm_device *dev)
191static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 204static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
192{ 205{
193 struct drm_exynos_file_private *file_priv; 206 struct drm_exynos_file_private *file_priv;
194 struct file *anon_filp;
195 int ret; 207 int ret;
196 208
197 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 209 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -204,21 +216,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
204 if (ret) 216 if (ret)
205 goto err_file_priv_free; 217 goto err_file_priv_free;
206 218
207 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
208 NULL, 0);
209 if (IS_ERR(anon_filp)) {
210 ret = PTR_ERR(anon_filp);
211 goto err_subdrv_close;
212 }
213
214 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
215 file_priv->anon_filp = anon_filp;
216
217 return ret; 219 return ret;
218 220
219err_subdrv_close:
220 exynos_drm_subdrv_close(dev, file);
221
222err_file_priv_free: 221err_file_priv_free:
223 kfree(file_priv); 222 kfree(file_priv);
224 file->driver_priv = NULL; 223 file->driver_priv = NULL;
@@ -234,7 +233,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
234static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 233static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
235{ 234{
236 struct exynos_drm_private *private = dev->dev_private; 235 struct exynos_drm_private *private = dev->dev_private;
237 struct drm_exynos_file_private *file_priv;
238 struct drm_pending_vblank_event *v, *vt; 236 struct drm_pending_vblank_event *v, *vt;
239 struct drm_pending_event *e, *et; 237 struct drm_pending_event *e, *et;
240 unsigned long flags; 238 unsigned long flags;
@@ -260,10 +258,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
260 } 258 }
261 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
262 260
263 file_priv = file->driver_priv;
264 if (file_priv->anon_filp)
265 fput(file_priv->anon_filp);
266
267 kfree(file->driver_priv); 261 kfree(file->driver_priv);
268 file->driver_priv = NULL; 262 file->driver_priv = NULL;
269} 263}
@@ -282,11 +276,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
282static const struct drm_ioctl_desc exynos_ioctls[] = { 276static const struct drm_ioctl_desc exynos_ioctls[] = {
283 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 277 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
284 DRM_UNLOCKED | DRM_AUTH), 278 DRM_UNLOCKED | DRM_AUTH),
285 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
286 exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
287 DRM_AUTH),
288 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
289 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
290 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, 279 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
291 exynos_drm_gem_get_ioctl, DRM_UNLOCKED), 280 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
292 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 281 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
@@ -486,21 +475,20 @@ void exynos_drm_component_del(struct device *dev,
486 mutex_unlock(&drm_component_lock); 475 mutex_unlock(&drm_component_lock);
487} 476}
488 477
489static int compare_of(struct device *dev, void *data) 478static int compare_dev(struct device *dev, void *data)
490{ 479{
491 return dev == (struct device *)data; 480 return dev == (struct device *)data;
492} 481}
493 482
494static int exynos_drm_add_components(struct device *dev, struct master *m) 483static struct component_match *exynos_drm_match_add(struct device *dev)
495{ 484{
485 struct component_match *match = NULL;
496 struct component_dev *cdev; 486 struct component_dev *cdev;
497 unsigned int attach_cnt = 0; 487 unsigned int attach_cnt = 0;
498 488
499 mutex_lock(&drm_component_lock); 489 mutex_lock(&drm_component_lock);
500 490
501 list_for_each_entry(cdev, &drm_component_list, list) { 491 list_for_each_entry(cdev, &drm_component_list, list) {
502 int ret;
503
504 /* 492 /*
505 * Add components to master only in case that crtc and 493 * Add components to master only in case that crtc and
506 * encoder/connector device objects exist. 494 * encoder/connector device objects exist.
@@ -515,16 +503,10 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
515 /* 503 /*
516 * fimd and dpi modules have same device object so add 504 * fimd and dpi modules have same device object so add
517 * only crtc device object in this case. 505 * only crtc device object in this case.
518 *
519 * TODO. if dpi module follows driver-model driver then
520 * below codes can be removed.
521 */ 506 */
522 if (cdev->crtc_dev == cdev->conn_dev) { 507 if (cdev->crtc_dev == cdev->conn_dev) {
523 ret = component_master_add_child(m, compare_of, 508 component_match_add(dev, &match, compare_dev,
524 cdev->crtc_dev); 509 cdev->crtc_dev);
525 if (ret < 0)
526 return ret;
527
528 goto out_lock; 510 goto out_lock;
529 } 511 }
530 512
@@ -534,11 +516,8 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
534 * connector/encoder need pipe number of crtc when they 516 * connector/encoder need pipe number of crtc when they
535 * are created. 517 * are created.
536 */ 518 */
537 ret = component_master_add_child(m, compare_of, cdev->crtc_dev); 519 component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
538 ret |= component_master_add_child(m, compare_of, 520 component_match_add(dev, &match, compare_dev, cdev->conn_dev);
539 cdev->conn_dev);
540 if (ret < 0)
541 return ret;
542 521
543out_lock: 522out_lock:
544 mutex_lock(&drm_component_lock); 523 mutex_lock(&drm_component_lock);
@@ -546,7 +525,7 @@ out_lock:
546 525
547 mutex_unlock(&drm_component_lock); 526 mutex_unlock(&drm_component_lock);
548 527
549 return attach_cnt ? 0 : -ENODEV; 528 return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
550} 529}
551 530
552static int exynos_drm_bind(struct device *dev) 531static int exynos_drm_bind(struct device *dev)
@@ -560,13 +539,13 @@ static void exynos_drm_unbind(struct device *dev)
560} 539}
561 540
562static const struct component_master_ops exynos_drm_ops = { 541static const struct component_master_ops exynos_drm_ops = {
563 .add_components = exynos_drm_add_components,
564 .bind = exynos_drm_bind, 542 .bind = exynos_drm_bind,
565 .unbind = exynos_drm_unbind, 543 .unbind = exynos_drm_unbind,
566}; 544};
567 545
568static int exynos_drm_platform_probe(struct platform_device *pdev) 546static int exynos_drm_platform_probe(struct platform_device *pdev)
569{ 547{
548 struct component_match *match;
570 int ret; 549 int ret;
571 550
572 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 551 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -633,13 +612,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
633 goto err_unregister_ipp_drv; 612 goto err_unregister_ipp_drv;
634#endif 613#endif
635 614
636 ret = component_master_add(&pdev->dev, &exynos_drm_ops); 615 match = exynos_drm_match_add(&pdev->dev);
616 if (IS_ERR(match)) {
617 ret = PTR_ERR(match);
618 goto err_unregister_resources;
619 }
620
621 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
622 match);
637 if (ret < 0) 623 if (ret < 0)
638 DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n"); 624 goto err_unregister_resources;
639 625
640 return 0; 626 return ret;
627
628err_unregister_resources:
641 629
642#ifdef CONFIG_DRM_EXYNOS_IPP 630#ifdef CONFIG_DRM_EXYNOS_IPP
631 exynos_platform_device_ipp_unregister();
643err_unregister_ipp_drv: 632err_unregister_ipp_drv:
644 platform_driver_unregister(&ipp_driver); 633 platform_driver_unregister(&ipp_driver);
645err_unregister_gsc_drv: 634err_unregister_gsc_drv:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 69a6fa397d75..d22e640f59a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
240struct drm_exynos_file_private { 240struct drm_exynos_file_private {
241 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct device *ipp_dev; 242 struct device *ipp_dev;
243 struct file *anon_filp;
244}; 243};
245 244
246/* 245/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 442aa2d00132..24741d8758e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -114,6 +114,8 @@
114#define DSIM_SYNC_INFORM (1 << 27) 114#define DSIM_SYNC_INFORM (1 << 27)
115#define DSIM_EOT_DISABLE (1 << 28) 115#define DSIM_EOT_DISABLE (1 << 28)
116#define DSIM_MFLUSH_VS (1 << 29) 116#define DSIM_MFLUSH_VS (1 << 29)
117/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
118#define DSIM_CLKLANE_STOP (1 << 30)
117 119
118/* DSIM_ESCMODE */ 120/* DSIM_ESCMODE */
119#define DSIM_TX_TRIGGER_RST (1 << 4) 121#define DSIM_TX_TRIGGER_RST (1 << 4)
@@ -262,6 +264,7 @@ struct exynos_dsi_driver_data {
262 unsigned int plltmr_reg; 264 unsigned int plltmr_reg;
263 265
264 unsigned int has_freqband:1; 266 unsigned int has_freqband:1;
267 unsigned int has_clklane_stop:1;
265}; 268};
266 269
267struct exynos_dsi { 270struct exynos_dsi {
@@ -301,9 +304,16 @@ struct exynos_dsi {
301#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
302#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 305#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
303 306
307static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
308 .plltmr_reg = 0x50,
309 .has_freqband = 1,
310 .has_clklane_stop = 1,
311};
312
304static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { 313static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
305 .plltmr_reg = 0x50, 314 .plltmr_reg = 0x50,
306 .has_freqband = 1, 315 .has_freqband = 1,
316 .has_clklane_stop = 1,
307}; 317};
308 318
309static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 319static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
@@ -311,6 +321,8 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
311}; 321};
312 322
313static struct of_device_id exynos_dsi_of_match[] = { 323static struct of_device_id exynos_dsi_of_match[] = {
324 { .compatible = "samsung,exynos3250-mipi-dsi",
325 .data = &exynos3_dsi_driver_data },
314 { .compatible = "samsung,exynos4210-mipi-dsi", 326 { .compatible = "samsung,exynos4210-mipi-dsi",
315 .data = &exynos4_dsi_driver_data }, 327 .data = &exynos4_dsi_driver_data },
316 { .compatible = "samsung,exynos5410-mipi-dsi", 328 { .compatible = "samsung,exynos5410-mipi-dsi",
@@ -421,7 +433,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
421 if (!fout) { 433 if (!fout) {
422 dev_err(dsi->dev, 434 dev_err(dsi->dev,
423 "failed to find PLL PMS for requested frequency\n"); 435 "failed to find PLL PMS for requested frequency\n");
424 return -EFAULT; 436 return 0;
425 } 437 }
426 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); 438 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
427 439
@@ -453,7 +465,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
453 do { 465 do {
454 if (timeout-- == 0) { 466 if (timeout-- == 0) {
455 dev_err(dsi->dev, "PLL failed to stabilize\n"); 467 dev_err(dsi->dev, "PLL failed to stabilize\n");
456 return -EFAULT; 468 return 0;
457 } 469 }
458 reg = readl(dsi->reg_base + DSIM_STATUS_REG); 470 reg = readl(dsi->reg_base + DSIM_STATUS_REG);
459 } while ((reg & DSIM_PLL_STABLE) == 0); 471 } while ((reg & DSIM_PLL_STABLE) == 0);
@@ -569,6 +581,7 @@ static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
569 581
570static int exynos_dsi_init_link(struct exynos_dsi *dsi) 582static int exynos_dsi_init_link(struct exynos_dsi *dsi)
571{ 583{
584 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
572 int timeout; 585 int timeout;
573 u32 reg; 586 u32 reg;
574 u32 lanes_mask; 587 u32 lanes_mask;
@@ -650,6 +663,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
650 reg |= DSIM_LANE_EN(lanes_mask); 663 reg |= DSIM_LANE_EN(lanes_mask);
651 writel(reg, dsi->reg_base + DSIM_CONFIG_REG); 664 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
652 665
666 /*
667 * Use non-continuous clock mode if the periparal wants and
668 * host controller supports
669 *
670 * In non-continous clock mode, host controller will turn off
671 * the HS clock between high-speed transmissions to reduce
672 * power consumption.
673 */
674 if (driver_data->has_clklane_stop &&
675 dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
676 reg |= DSIM_CLKLANE_STOP;
677 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
678 }
679
653 /* Check clock and data lane state are stop state */ 680 /* Check clock and data lane state are stop state */
654 timeout = 100; 681 timeout = 100;
655 do { 682 do {
@@ -1414,6 +1441,9 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
1414 1441
1415static void exynos_dsi_connector_destroy(struct drm_connector *connector) 1442static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1416{ 1443{
1444 drm_connector_unregister(connector);
1445 drm_connector_cleanup(connector);
1446 connector->dev = NULL;
1417} 1447}
1418 1448
1419static struct drm_connector_funcs exynos_dsi_connector_funcs = { 1449static struct drm_connector_funcs exynos_dsi_connector_funcs = {
@@ -1634,10 +1664,10 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
1634 1664
1635 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); 1665 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1636 1666
1637 mipi_dsi_host_unregister(&dsi->dsi_host); 1667 exynos_dsi_connector_destroy(&dsi->connector);
1638
1639 encoder->funcs->destroy(encoder); 1668 encoder->funcs->destroy(encoder);
1640 drm_connector_cleanup(&dsi->connector); 1669
1670 mipi_dsi_host_unregister(&dsi->dsi_host);
1641} 1671}
1642 1672
1643static const struct component_ops exynos_dsi_component_ops = { 1673static const struct component_ops exynos_dsi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 65a22cad7b36..d346d1e6eda0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -165,6 +165,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
165 165
166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 166 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
167 if (ret) { 167 if (ret) {
168 kfree(exynos_fb);
168 DRM_ERROR("failed to initialize framebuffer\n"); 169 DRM_ERROR("failed to initialize framebuffer\n");
169 return ERR_PTR(ret); 170 return ERR_PTR(ret);
170 } 171 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 32e63f60e1d1..e12ea90c6237 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -123,6 +123,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
123 123
124 fbi->screen_base = buffer->kvaddr + offset; 124 fbi->screen_base = buffer->kvaddr + offset;
125 fbi->screen_size = size; 125 fbi->screen_size = size;
126 fbi->fix.smem_len = size;
126 127
127 return 0; 128 return 0;
128} 129}
@@ -353,9 +354,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
353 354
354 fbdev = to_exynos_fbdev(private->fb_helper); 355 fbdev = to_exynos_fbdev(private->fb_helper);
355 356
356 if (fbdev->exynos_gem_obj)
357 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
358
359 exynos_drm_fbdev_destroy(dev, private->fb_helper); 357 exynos_drm_fbdev_destroy(dev, private->fb_helper);
360 kfree(fbdev); 358 kfree(fbdev);
361 private->fb_helper = NULL; 359 private->fb_helper = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index ec7cc9ea50df..68d38eb6774d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -336,9 +336,6 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
336 fimc_set_bits(ctx, EXYNOS_CIWDOFST, 336 fimc_set_bits(ctx, EXYNOS_CIWDOFST,
337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | 337 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
338 EXYNOS_CIWDOFST_CLROVFICR); 338 EXYNOS_CIWDOFST_CLROVFICR);
339 fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
340 EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
341 EXYNOS_CIWDOFST_CLROVFICR);
342 339
343 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", 340 dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
344 ctx->id, status); 341 ctx->id, status);
@@ -718,24 +715,24 @@ static int fimc_src_set_addr(struct device *dev,
718 case IPP_BUF_ENQUEUE: 715 case IPP_BUF_ENQUEUE:
719 config = &property->config[EXYNOS_DRM_OPS_SRC]; 716 config = &property->config[EXYNOS_DRM_OPS_SRC];
720 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], 717 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
721 EXYNOS_CIIYSA(buf_id)); 718 EXYNOS_CIIYSA0);
722 719
723 if (config->fmt == DRM_FORMAT_YVU420) { 720 if (config->fmt == DRM_FORMAT_YVU420) {
724 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 721 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
725 EXYNOS_CIICBSA(buf_id)); 722 EXYNOS_CIICBSA0);
726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 723 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
727 EXYNOS_CIICRSA(buf_id)); 724 EXYNOS_CIICRSA0);
728 } else { 725 } else {
729 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], 726 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
730 EXYNOS_CIICBSA(buf_id)); 727 EXYNOS_CIICBSA0);
731 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], 728 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
732 EXYNOS_CIICRSA(buf_id)); 729 EXYNOS_CIICRSA0);
733 } 730 }
734 break; 731 break;
735 case IPP_BUF_DEQUEUE: 732 case IPP_BUF_DEQUEUE:
736 fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id)); 733 fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
737 fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id)); 734 fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
738 fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id)); 735 fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
739 break; 736 break;
740 default: 737 default:
741 /* bypass */ 738 /* bypass */
@@ -1122,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
1122 return 0; 1119 return 0;
1123} 1120}
1124 1121
1125static int fimc_dst_get_buf_count(struct fimc_context *ctx) 1122static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1126{
1127 u32 cfg, buf_num;
1128
1129 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1130
1131 buf_num = hweight32(cfg);
1132
1133 DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1134
1135 return buf_num;
1136}
1137
1138static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1139 enum drm_exynos_ipp_buf_type buf_type) 1123 enum drm_exynos_ipp_buf_type buf_type)
1140{ 1124{
1141 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1142 bool enable;
1143 u32 cfg;
1144 u32 mask = 0x00000001 << buf_id;
1145 int ret = 0;
1146 unsigned long flags; 1125 unsigned long flags;
1126 u32 buf_num;
1127 u32 cfg;
1147 1128
1148 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); 1129 DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1149 1130
1150 spin_lock_irqsave(&ctx->lock, flags); 1131 spin_lock_irqsave(&ctx->lock, flags);
1151 1132
1152 /* mask register set */
1153 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); 1133 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
1154 1134
1155 switch (buf_type) { 1135 if (buf_type == IPP_BUF_ENQUEUE)
1156 case IPP_BUF_ENQUEUE: 1136 cfg |= (1 << buf_id);
1157 enable = true; 1137 else
1158 break; 1138 cfg &= ~(1 << buf_id);
1159 case IPP_BUF_DEQUEUE:
1160 enable = false;
1161 break;
1162 default:
1163 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1164 ret = -EINVAL;
1165 goto err_unlock;
1166 }
1167 1139
1168 /* sequence id */
1169 cfg &= ~mask;
1170 cfg |= (enable << buf_id);
1171 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); 1140 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
1172 1141
1173 /* interrupt enable */ 1142 buf_num = hweight32(cfg);
1174 if (buf_type == IPP_BUF_ENQUEUE &&
1175 fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
1176 fimc_mask_irq(ctx, true);
1177 1143
1178 /* interrupt disable */ 1144 if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
1179 if (buf_type == IPP_BUF_DEQUEUE && 1145 fimc_mask_irq(ctx, true);
1180 fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP) 1146 else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
1181 fimc_mask_irq(ctx, false); 1147 fimc_mask_irq(ctx, false);
1182 1148
1183err_unlock:
1184 spin_unlock_irqrestore(&ctx->lock, flags); 1149 spin_unlock_irqrestore(&ctx->lock, flags);
1185 return ret;
1186} 1150}
1187 1151
1188static int fimc_dst_set_addr(struct device *dev, 1152static int fimc_dst_set_addr(struct device *dev,
@@ -1240,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
1240 break; 1204 break;
1241 } 1205 }
1242 1206
1243 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); 1207 fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1208
1209 return 0;
1244} 1210}
1245 1211
1246static struct exynos_drm_ipp_ops fimc_dst_ops = { 1212static struct exynos_drm_ipp_ops fimc_dst_ops = {
@@ -1291,14 +1257,11 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1291 1257
1292 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); 1258 DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
1293 1259
1294 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { 1260 fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1295 DRM_ERROR("failed to dequeue.\n");
1296 return IRQ_HANDLED;
1297 }
1298 1261
1299 event_work->ippdrv = ippdrv; 1262 event_work->ippdrv = ippdrv;
1300 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; 1263 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1301 queue_work(ippdrv->event_workq, (struct work_struct *)event_work); 1264 queue_work(ippdrv->event_workq, &event_work->work);
1302 1265
1303 return IRQ_HANDLED; 1266 return IRQ_HANDLED;
1304} 1267}
@@ -1590,11 +1553,8 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1590 1553
1591 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); 1554 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1592 1555
1593 if (cmd == IPP_CMD_M2M) { 1556 if (cmd == IPP_CMD_M2M)
1594 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1595
1596 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); 1557 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1597 }
1598 1558
1599 return 0; 1559 return 0;
1600} 1560}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 5d09e33fef87..085b066a9993 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -104,6 +104,14 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
104 .has_limited_fmt = 1, 104 .has_limited_fmt = 1,
105}; 105};
106 106
107static struct fimd_driver_data exynos3_fimd_driver_data = {
108 .timing_base = 0x20000,
109 .lcdblk_offset = 0x210,
110 .lcdblk_bypass_shift = 1,
111 .has_shadowcon = 1,
112 .has_vidoutcon = 1,
113};
114
107static struct fimd_driver_data exynos4_fimd_driver_data = { 115static struct fimd_driver_data exynos4_fimd_driver_data = {
108 .timing_base = 0x0, 116 .timing_base = 0x0,
109 .lcdblk_offset = 0x210, 117 .lcdblk_offset = 0x210,
@@ -168,6 +176,8 @@ struct fimd_context {
168static const struct of_device_id fimd_driver_dt_match[] = { 176static const struct of_device_id fimd_driver_dt_match[] = {
169 { .compatible = "samsung,s3c6400-fimd", 177 { .compatible = "samsung,s3c6400-fimd",
170 .data = &s3c64xx_fimd_driver_data }, 178 .data = &s3c64xx_fimd_driver_data },
179 { .compatible = "samsung,exynos3250-fimd",
180 .data = &exynos3_fimd_driver_data },
171 { .compatible = "samsung,exynos4210-fimd", 181 { .compatible = "samsung,exynos4210-fimd",
172 .data = &exynos4_fimd_driver_data }, 182 .data = &exynos4_fimd_driver_data },
173 { .compatible = "samsung,exynos5250-fimd", 183 { .compatible = "samsung,exynos5250-fimd",
@@ -204,7 +214,6 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
204 DRM_DEBUG_KMS("vblank wait timed out.\n"); 214 DRM_DEBUG_KMS("vblank wait timed out.\n");
205} 215}
206 216
207
208static void fimd_clear_channel(struct exynos_drm_manager *mgr) 217static void fimd_clear_channel(struct exynos_drm_manager *mgr)
209{ 218{
210 struct fimd_context *ctx = mgr->ctx; 219 struct fimd_context *ctx = mgr->ctx;
@@ -214,17 +223,31 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
214 223
215 /* Check if any channel is enabled. */ 224 /* Check if any channel is enabled. */
216 for (win = 0; win < WINDOWS_NR; win++) { 225 for (win = 0; win < WINDOWS_NR; win++) {
217 u32 val = readl(ctx->regs + SHADOWCON); 226 u32 val = readl(ctx->regs + WINCON(win));
218 if (val & SHADOWCON_CHx_ENABLE(win)) { 227
219 val &= ~SHADOWCON_CHx_ENABLE(win); 228 if (val & WINCONx_ENWIN) {
220 writel(val, ctx->regs + SHADOWCON); 229 /* wincon */
230 val &= ~WINCONx_ENWIN;
231 writel(val, ctx->regs + WINCON(win));
232
233 /* unprotect windows */
234 if (ctx->driver_data->has_shadowcon) {
235 val = readl(ctx->regs + SHADOWCON);
236 val &= ~SHADOWCON_CHx_ENABLE(win);
237 writel(val, ctx->regs + SHADOWCON);
238 }
221 ch_enabled = 1; 239 ch_enabled = 1;
222 } 240 }
223 } 241 }
224 242
225 /* Wait for vsync, as disable channel takes effect at next vsync */ 243 /* Wait for vsync, as disable channel takes effect at next vsync */
226 if (ch_enabled) 244 if (ch_enabled) {
245 unsigned int state = ctx->suspended;
246
247 ctx->suspended = 0;
227 fimd_wait_for_vblank(mgr); 248 fimd_wait_for_vblank(mgr);
249 ctx->suspended = state;
250 }
228} 251}
229 252
230static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, 253static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
@@ -237,23 +260,6 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
237 mgr->drm_dev = ctx->drm_dev = drm_dev; 260 mgr->drm_dev = ctx->drm_dev = drm_dev;
238 mgr->pipe = ctx->pipe = priv->pipe++; 261 mgr->pipe = ctx->pipe = priv->pipe++;
239 262
240 /*
241 * enable drm irq mode.
242 * - with irq_enabled = true, we can use the vblank feature.
243 *
244 * P.S. note that we wouldn't use drm irq handler but
245 * just specific driver own one instead because
246 * drm framework supports only one irq handler.
247 */
248 drm_dev->irq_enabled = true;
249
250 /*
251 * with vblank_disable_allowed = true, vblank interrupt will be disabled
252 * by drm timer once a current process gives up ownership of
253 * vblank event.(after drm_vblank_put function is called)
254 */
255 drm_dev->vblank_disable_allowed = true;
256
257 /* attach this sub driver to iommu mapping if supported. */ 263 /* attach this sub driver to iommu mapping if supported. */
258 if (is_drm_iommu_supported(ctx->drm_dev)) { 264 if (is_drm_iommu_supported(ctx->drm_dev)) {
259 /* 265 /*
@@ -1051,7 +1057,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1051{ 1057{
1052 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1058 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1053 struct fimd_context *ctx = fimd_manager.ctx; 1059 struct fimd_context *ctx = fimd_manager.ctx;
1054 struct drm_crtc *crtc = mgr->crtc;
1055 1060
1056 fimd_dpms(mgr, DRM_MODE_DPMS_OFF); 1061 fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
1057 1062
@@ -1059,8 +1064,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
1059 exynos_dpi_remove(dev); 1064 exynos_dpi_remove(dev);
1060 1065
1061 fimd_mgr_remove(mgr); 1066 fimd_mgr_remove(mgr);
1062
1063 crtc->funcs->destroy(crtc);
1064} 1067}
1065 1068
1066static const struct component_ops fimd_component_ops = { 1069static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 15db80138382..0d5b9698d384 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -318,40 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
318 drm_gem_object_unreference_unlocked(obj); 318 drm_gem_object_unreference_unlocked(obj);
319} 319}
320 320
321int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 321int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322 struct drm_file *file_priv)
323{
324 struct drm_exynos_gem_map_off *args = data;
325
326 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
327 args->handle, (unsigned long)args->offset);
328
329 if (!(dev->driver->driver_features & DRIVER_GEM)) {
330 DRM_ERROR("does not support GEM.\n");
331 return -ENODEV;
332 }
333
334 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
335 &args->offset);
336}
337
338int exynos_drm_gem_mmap_buffer(struct file *filp,
339 struct vm_area_struct *vma) 322 struct vm_area_struct *vma)
340{ 323{
341 struct drm_gem_object *obj = filp->private_data; 324 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
342 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
343 struct drm_device *drm_dev = obj->dev;
344 struct exynos_drm_gem_buf *buffer; 325 struct exynos_drm_gem_buf *buffer;
345 unsigned long vm_size; 326 unsigned long vm_size;
346 int ret; 327 int ret;
347 328
348 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 329 vma->vm_flags &= ~VM_PFNMAP;
349 330 vma->vm_pgoff = 0;
350 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
351 vma->vm_private_data = obj;
352 vma->vm_ops = drm_dev->driver->gem_vm_ops;
353
354 update_vm_cache_attr(exynos_gem_obj, vma);
355 331
356 vm_size = vma->vm_end - vma->vm_start; 332 vm_size = vma->vm_end - vma->vm_start;
357 333
@@ -373,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
373 return ret; 349 return ret;
374 } 350 }
375 351
376 /*
377 * take a reference to this mapping of the object. And this reference
378 * is unreferenced by the corresponding vm_close call.
379 */
380 drm_gem_object_reference(obj);
381
382 drm_vm_open_locked(drm_dev, vma);
383
384 return 0;
385}
386
387int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
389{
390 struct drm_exynos_file_private *exynos_file_priv;
391 struct drm_exynos_gem_mmap *args = data;
392 struct drm_gem_object *obj;
393 struct file *anon_filp;
394 unsigned long addr;
395
396 if (!(dev->driver->driver_features & DRIVER_GEM)) {
397 DRM_ERROR("does not support GEM.\n");
398 return -ENODEV;
399 }
400
401 mutex_lock(&dev->struct_mutex);
402
403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
404 if (!obj) {
405 DRM_ERROR("failed to lookup gem object.\n");
406 mutex_unlock(&dev->struct_mutex);
407 return -EINVAL;
408 }
409
410 exynos_file_priv = file_priv->driver_priv;
411 anon_filp = exynos_file_priv->anon_filp;
412 anon_filp->private_data = obj;
413
414 addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
415 MAP_SHARED, 0);
416
417 drm_gem_object_unreference(obj);
418
419 if (IS_ERR_VALUE(addr)) {
420 mutex_unlock(&dev->struct_mutex);
421 return (int)addr;
422 }
423
424 mutex_unlock(&dev->struct_mutex);
425
426 args->mapped = addr;
427
428 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
429
430 return 0; 352 return 0;
431} 353}
432 354
@@ -710,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
710 exynos_gem_obj = to_exynos_gem_obj(obj); 632 exynos_gem_obj = to_exynos_gem_obj(obj);
711 633
712 ret = check_gem_flags(exynos_gem_obj->flags); 634 ret = check_gem_flags(exynos_gem_obj->flags);
713 if (ret) { 635 if (ret)
714 drm_gem_vm_close(vma); 636 goto err_close_vm;
715 drm_gem_free_mmap_offset(obj);
716 return ret;
717 }
718
719 vma->vm_flags &= ~VM_PFNMAP;
720 vma->vm_flags |= VM_MIXEDMAP;
721 637
722 update_vm_cache_attr(exynos_gem_obj, vma); 638 update_vm_cache_attr(exynos_gem_obj, vma);
723 639
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641 if (ret)
642 goto err_close_vm;
643
644 return ret;
645
646err_close_vm:
647 drm_gem_vm_close(vma);
648 drm_gem_free_mmap_offset(obj);
649
724 return ret; 650 return ret;
725} 651}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 1592c0ba7de8..ec58fe9c40df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -12,6 +12,8 @@
12#ifndef _EXYNOS_DRM_GEM_H_ 12#ifndef _EXYNOS_DRM_GEM_H_
13#define _EXYNOS_DRM_GEM_H_ 13#define _EXYNOS_DRM_GEM_H_
14 14
15#include <drm/drm_gem.h>
16
15#define to_exynos_gem_obj(x) container_of(x,\ 17#define to_exynos_gem_obj(x) container_of(x,\
16 struct exynos_drm_gem_obj, base) 18 struct exynos_drm_gem_obj, base)
17 19
@@ -111,20 +113,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
111 unsigned int gem_handle, 113 unsigned int gem_handle,
112 struct drm_file *filp); 114 struct drm_file *filp);
113 115
114/* get buffer offset to map to user space. */
115int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv);
117
118/*
119 * mmap the physically continuous memory that a gem object contains
120 * to user space.
121 */
122int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv);
124
125int exynos_drm_gem_mmap_buffer(struct file *filp,
126 struct vm_area_struct *vma);
127
128/* map user space allocated by malloc to pages. */ 116/* map user space allocated by malloc to pages. */
129int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, 117int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv); 118 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 9e3ff1672965..c6a013fc321c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1326,8 +1326,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1326 buf_id[EXYNOS_DRM_OPS_SRC]; 1326 buf_id[EXYNOS_DRM_OPS_SRC];
1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 1327 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1328 buf_id[EXYNOS_DRM_OPS_DST]; 1328 buf_id[EXYNOS_DRM_OPS_DST];
1329 queue_work(ippdrv->event_workq, 1329 queue_work(ippdrv->event_workq, &event_work->work);
1330 (struct work_struct *)event_work);
1331 } 1330 }
1332 1331
1333 return IRQ_HANDLED; 1332 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index c411399070d6..00d74b18f7cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -75,7 +75,6 @@ struct drm_exynos_ipp_mem_node {
75 u32 prop_id; 75 u32 prop_id;
76 u32 buf_id; 76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info; 77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79}; 78};
80 79
81/* 80/*
@@ -319,44 +318,6 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
319 sz->hsize, sz->vsize, config->flip, config->degree); 318 sz->hsize, sz->vsize, config->flip, config->degree);
320} 319}
321 320
322static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
323{
324 struct exynos_drm_ippdrv *ippdrv;
325 struct drm_exynos_ipp_cmd_node *c_node;
326 u32 prop_id = property->prop_id;
327
328 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
329
330 ippdrv = ipp_find_drv_by_handle(prop_id);
331 if (IS_ERR(ippdrv)) {
332 DRM_ERROR("failed to get ipp driver.\n");
333 return -EINVAL;
334 }
335
336 /*
337 * Find command node using command list in ippdrv.
338 * when we find this command no using prop_id.
339 * return property information set in this command node.
340 */
341 mutex_lock(&ippdrv->cmd_lock);
342 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
343 if ((c_node->property.prop_id == prop_id) &&
344 (c_node->state == IPP_STATE_STOP)) {
345 mutex_unlock(&ippdrv->cmd_lock);
346 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
347 property->cmd, (int)ippdrv);
348
349 c_node->property = *property;
350 return 0;
351 }
352 }
353 mutex_unlock(&ippdrv->cmd_lock);
354
355 DRM_ERROR("failed to search property.\n");
356
357 return -EINVAL;
358}
359
360static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 321static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
361{ 322{
362 struct drm_exynos_ipp_cmd_work *cmd_work; 323 struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -392,6 +353,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
392 struct drm_exynos_ipp_property *property = data; 353 struct drm_exynos_ipp_property *property = data;
393 struct exynos_drm_ippdrv *ippdrv; 354 struct exynos_drm_ippdrv *ippdrv;
394 struct drm_exynos_ipp_cmd_node *c_node; 355 struct drm_exynos_ipp_cmd_node *c_node;
356 u32 prop_id;
395 int ret, i; 357 int ret, i;
396 358
397 if (!ctx) { 359 if (!ctx) {
@@ -404,6 +366,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
404 return -EINVAL; 366 return -EINVAL;
405 } 367 }
406 368
369 prop_id = property->prop_id;
370
407 /* 371 /*
408 * This is log print for user application property. 372 * This is log print for user application property.
409 * user application set various property. 373 * user application set various property.
@@ -412,14 +376,24 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
412 ipp_print_property(property, i); 376 ipp_print_property(property, i);
413 377
414 /* 378 /*
415 * set property ioctl generated new prop_id. 379 * In case prop_id is not zero try to set existing property.
416 * but in this case already asigned prop_id using old set property.
417 * e.g PAUSE state. this case supports find current prop_id and use it
418 * instead of allocation.
419 */ 380 */
420 if (property->prop_id) { 381 if (prop_id) {
421 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 382 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
422 return ipp_find_and_set_property(property); 383
384 if (!c_node || c_node->filp != file) {
385 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386 return -EINVAL;
387 }
388
389 if (c_node->state != IPP_STATE_STOP) {
390 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391 return -EINVAL;
392 }
393
394 c_node->property = *property;
395
396 return 0;
423 } 397 }
424 398
425 /* find ipp driver using ipp id */ 399 /* find ipp driver using ipp id */
@@ -445,9 +419,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
445 property->prop_id, property->cmd, (int)ippdrv); 419 property->prop_id, property->cmd, (int)ippdrv);
446 420
447 /* stored property information and ippdrv in private data */ 421 /* stored property information and ippdrv in private data */
448 c_node->dev = dev;
449 c_node->property = *property; 422 c_node->property = *property;
450 c_node->state = IPP_STATE_IDLE; 423 c_node->state = IPP_STATE_IDLE;
424 c_node->filp = file;
451 425
452 c_node->start_work = ipp_create_cmd_work(); 426 c_node->start_work = ipp_create_cmd_work();
453 if (IS_ERR(c_node->start_work)) { 427 if (IS_ERR(c_node->start_work)) {
@@ -499,105 +473,37 @@ err_clear:
499 return ret; 473 return ret;
500} 474}
501 475
502static void ipp_clean_cmd_node(struct ipp_context *ctx, 476static int ipp_put_mem_node(struct drm_device *drm_dev,
503 struct drm_exynos_ipp_cmd_node *c_node)
504{
505 /* delete list */
506 list_del(&c_node->list);
507
508 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
509 c_node->property.prop_id);
510
511 /* destroy mutex */
512 mutex_destroy(&c_node->lock);
513 mutex_destroy(&c_node->mem_lock);
514 mutex_destroy(&c_node->event_lock);
515
516 /* free command node */
517 kfree(c_node->start_work);
518 kfree(c_node->stop_work);
519 kfree(c_node->event_work);
520 kfree(c_node);
521}
522
523static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
524{
525 switch (c_node->property.cmd) {
526 case IPP_CMD_WB:
527 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
528 case IPP_CMD_OUTPUT:
529 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
530 case IPP_CMD_M2M:
531 default:
532 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
533 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
534 }
535}
536
537static struct drm_exynos_ipp_mem_node
538 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
539 struct drm_exynos_ipp_queue_buf *qbuf)
540{
541 struct drm_exynos_ipp_mem_node *m_node;
542 struct list_head *head;
543 int count = 0;
544
545 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
546
547 /* source/destination memory list */
548 head = &c_node->mem_list[qbuf->ops_id];
549
550 /* find memory node from memory list */
551 list_for_each_entry(m_node, head, list) {
552 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
553
554 /* compare buffer id */
555 if (m_node->buf_id == qbuf->buf_id)
556 return m_node;
557 }
558
559 return NULL;
560}
561
562static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
563 struct drm_exynos_ipp_cmd_node *c_node, 477 struct drm_exynos_ipp_cmd_node *c_node,
564 struct drm_exynos_ipp_mem_node *m_node) 478 struct drm_exynos_ipp_mem_node *m_node)
565{ 479{
566 struct exynos_drm_ipp_ops *ops = NULL; 480 int i;
567 int ret = 0;
568 481
569 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 482 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
570 483
571 if (!m_node) { 484 if (!m_node) {
572 DRM_ERROR("invalid queue node.\n"); 485 DRM_ERROR("invalid dequeue node.\n");
573 return -EFAULT; 486 return -EFAULT;
574 } 487 }
575 488
576 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 489 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
577 490
578 /* get operations callback */ 491 /* put gem buffer */
579 ops = ippdrv->ops[m_node->ops_id]; 492 for_each_ipp_planar(i) {
580 if (!ops) { 493 unsigned long handle = m_node->buf_info.handles[i];
581 DRM_ERROR("not support ops.\n"); 494 if (handle)
582 return -EFAULT; 495 exynos_drm_gem_put_dma_addr(drm_dev, handle,
496 c_node->filp);
583 } 497 }
584 498
585 /* set address and enable irq */ 499 list_del(&m_node->list);
586 if (ops->set_addr) { 500 kfree(m_node);
587 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
588 m_node->buf_id, IPP_BUF_ENQUEUE);
589 if (ret) {
590 DRM_ERROR("failed to set addr.\n");
591 return ret;
592 }
593 }
594 501
595 return ret; 502 return 0;
596} 503}
597 504
598static struct drm_exynos_ipp_mem_node 505static struct drm_exynos_ipp_mem_node
599 *ipp_get_mem_node(struct drm_device *drm_dev, 506 *ipp_get_mem_node(struct drm_device *drm_dev,
600 struct drm_file *file,
601 struct drm_exynos_ipp_cmd_node *c_node, 507 struct drm_exynos_ipp_cmd_node *c_node,
602 struct drm_exynos_ipp_queue_buf *qbuf) 508 struct drm_exynos_ipp_queue_buf *qbuf)
603{ 509{
@@ -615,6 +521,7 @@ static struct drm_exynos_ipp_mem_node
615 m_node->ops_id = qbuf->ops_id; 521 m_node->ops_id = qbuf->ops_id;
616 m_node->prop_id = qbuf->prop_id; 522 m_node->prop_id = qbuf->prop_id;
617 m_node->buf_id = qbuf->buf_id; 523 m_node->buf_id = qbuf->buf_id;
524 INIT_LIST_HEAD(&m_node->list);
618 525
619 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 526 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
620 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 527 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
@@ -627,10 +534,11 @@ static struct drm_exynos_ipp_mem_node
627 dma_addr_t *addr; 534 dma_addr_t *addr;
628 535
629 addr = exynos_drm_gem_get_dma_addr(drm_dev, 536 addr = exynos_drm_gem_get_dma_addr(drm_dev,
630 qbuf->handle[i], file); 537 qbuf->handle[i], c_node->filp);
631 if (IS_ERR(addr)) { 538 if (IS_ERR(addr)) {
632 DRM_ERROR("failed to get addr.\n"); 539 DRM_ERROR("failed to get addr.\n");
633 goto err_clear; 540 ipp_put_mem_node(drm_dev, c_node, m_node);
541 return ERR_PTR(-EFAULT);
634 } 542 }
635 543
636 buf_info->handles[i] = qbuf->handle[i]; 544 buf_info->handles[i] = qbuf->handle[i];
@@ -640,46 +548,30 @@ static struct drm_exynos_ipp_mem_node
640 } 548 }
641 } 549 }
642 550
643 m_node->filp = file;
644 mutex_lock(&c_node->mem_lock); 551 mutex_lock(&c_node->mem_lock);
645 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 552 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
646 mutex_unlock(&c_node->mem_lock); 553 mutex_unlock(&c_node->mem_lock);
647 554
648 return m_node; 555 return m_node;
649
650err_clear:
651 kfree(m_node);
652 return ERR_PTR(-EFAULT);
653} 556}
654 557
655static int ipp_put_mem_node(struct drm_device *drm_dev, 558static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
656 struct drm_exynos_ipp_cmd_node *c_node, 559 struct drm_exynos_ipp_cmd_node *c_node, int ops)
657 struct drm_exynos_ipp_mem_node *m_node)
658{ 560{
659 int i; 561 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
660 562 struct list_head *head = &c_node->mem_list[ops];
661 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
662 563
663 if (!m_node) { 564 mutex_lock(&c_node->mem_lock);
664 DRM_ERROR("invalid dequeue node.\n");
665 return -EFAULT;
666 }
667 565
668 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 566 list_for_each_entry_safe(m_node, tm_node, head, list) {
567 int ret;
669 568
670 /* put gem buffer */ 569 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
671 for_each_ipp_planar(i) { 570 if (ret)
672 unsigned long handle = m_node->buf_info.handles[i]; 571 DRM_ERROR("failed to put m_node.\n");
673 if (handle)
674 exynos_drm_gem_put_dma_addr(drm_dev, handle,
675 m_node->filp);
676 } 572 }
677 573
678 /* delete list in queue */ 574 mutex_unlock(&c_node->mem_lock);
679 list_del(&m_node->list);
680 kfree(m_node);
681
682 return 0;
683} 575}
684 576
685static void ipp_free_event(struct drm_pending_event *event) 577static void ipp_free_event(struct drm_pending_event *event)
@@ -688,7 +580,6 @@ static void ipp_free_event(struct drm_pending_event *event)
688} 580}
689 581
690static int ipp_get_event(struct drm_device *drm_dev, 582static int ipp_get_event(struct drm_device *drm_dev,
691 struct drm_file *file,
692 struct drm_exynos_ipp_cmd_node *c_node, 583 struct drm_exynos_ipp_cmd_node *c_node,
693 struct drm_exynos_ipp_queue_buf *qbuf) 584 struct drm_exynos_ipp_queue_buf *qbuf)
694{ 585{
@@ -700,7 +591,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
700 e = kzalloc(sizeof(*e), GFP_KERNEL); 591 e = kzalloc(sizeof(*e), GFP_KERNEL);
701 if (!e) { 592 if (!e) {
702 spin_lock_irqsave(&drm_dev->event_lock, flags); 593 spin_lock_irqsave(&drm_dev->event_lock, flags);
703 file->event_space += sizeof(e->event); 594 c_node->filp->event_space += sizeof(e->event);
704 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 595 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
705 return -ENOMEM; 596 return -ENOMEM;
706 } 597 }
@@ -712,7 +603,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
712 e->event.prop_id = qbuf->prop_id; 603 e->event.prop_id = qbuf->prop_id;
713 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 604 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
714 e->base.event = &e->event.base; 605 e->base.event = &e->event.base;
715 e->base.file_priv = file; 606 e->base.file_priv = c_node->filp;
716 e->base.destroy = ipp_free_event; 607 e->base.destroy = ipp_free_event;
717 mutex_lock(&c_node->event_lock); 608 mutex_lock(&c_node->event_lock);
718 list_add_tail(&e->base.link, &c_node->event_list); 609 list_add_tail(&e->base.link, &c_node->event_list);
@@ -757,6 +648,115 @@ out_unlock:
757 return; 648 return;
758} 649}
759 650
651static void ipp_clean_cmd_node(struct ipp_context *ctx,
652 struct drm_exynos_ipp_cmd_node *c_node)
653{
654 int i;
655
656 /* cancel works */
657 cancel_work_sync(&c_node->start_work->work);
658 cancel_work_sync(&c_node->stop_work->work);
659 cancel_work_sync(&c_node->event_work->work);
660
661 /* put event */
662 ipp_put_event(c_node, NULL);
663
664 for_each_ipp_ops(i)
665 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
666
667 /* delete list */
668 list_del(&c_node->list);
669
670 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
671 c_node->property.prop_id);
672
673 /* destroy mutex */
674 mutex_destroy(&c_node->lock);
675 mutex_destroy(&c_node->mem_lock);
676 mutex_destroy(&c_node->event_lock);
677
678 /* free command node */
679 kfree(c_node->start_work);
680 kfree(c_node->stop_work);
681 kfree(c_node->event_work);
682 kfree(c_node);
683}
684
685static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
686{
687 switch (c_node->property.cmd) {
688 case IPP_CMD_WB:
689 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
690 case IPP_CMD_OUTPUT:
691 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
692 case IPP_CMD_M2M:
693 default:
694 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
695 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
696 }
697}
698
699static struct drm_exynos_ipp_mem_node
700 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
701 struct drm_exynos_ipp_queue_buf *qbuf)
702{
703 struct drm_exynos_ipp_mem_node *m_node;
704 struct list_head *head;
705 int count = 0;
706
707 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
708
709 /* source/destination memory list */
710 head = &c_node->mem_list[qbuf->ops_id];
711
712 /* find memory node from memory list */
713 list_for_each_entry(m_node, head, list) {
714 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
715
716 /* compare buffer id */
717 if (m_node->buf_id == qbuf->buf_id)
718 return m_node;
719 }
720
721 return NULL;
722}
723
724static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
725 struct drm_exynos_ipp_cmd_node *c_node,
726 struct drm_exynos_ipp_mem_node *m_node)
727{
728 struct exynos_drm_ipp_ops *ops = NULL;
729 int ret = 0;
730
731 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
732
733 if (!m_node) {
734 DRM_ERROR("invalid queue node.\n");
735 return -EFAULT;
736 }
737
738 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
739
740 /* get operations callback */
741 ops = ippdrv->ops[m_node->ops_id];
742 if (!ops) {
743 DRM_ERROR("not support ops.\n");
744 return -EFAULT;
745 }
746
747 /* set address and enable irq */
748 if (ops->set_addr) {
749 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
750 m_node->buf_id, IPP_BUF_ENQUEUE);
751 if (ret) {
752 DRM_ERROR("failed to set addr.\n");
753 return ret;
754 }
755 }
756
757 return ret;
758}
759
760static void ipp_handle_cmd_work(struct device *dev, 760static void ipp_handle_cmd_work(struct device *dev,
761 struct exynos_drm_ippdrv *ippdrv, 761 struct exynos_drm_ippdrv *ippdrv,
762 struct drm_exynos_ipp_cmd_work *cmd_work, 762 struct drm_exynos_ipp_cmd_work *cmd_work,
@@ -766,7 +766,7 @@ static void ipp_handle_cmd_work(struct device *dev,
766 766
767 cmd_work->ippdrv = ippdrv; 767 cmd_work->ippdrv = ippdrv;
768 cmd_work->c_node = c_node; 768 cmd_work->c_node = c_node;
769 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 769 queue_work(ctx->cmd_workq, &cmd_work->work);
770} 770}
771 771
772static int ipp_queue_buf_with_run(struct device *dev, 772static int ipp_queue_buf_with_run(struct device *dev,
@@ -872,7 +872,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
872 /* find command node */ 872 /* find command node */
873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
874 qbuf->prop_id); 874 qbuf->prop_id);
875 if (!c_node) { 875 if (!c_node || c_node->filp != file) {
876 DRM_ERROR("failed to get command node.\n"); 876 DRM_ERROR("failed to get command node.\n");
877 return -ENODEV; 877 return -ENODEV;
878 } 878 }
@@ -881,7 +881,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
881 switch (qbuf->buf_type) { 881 switch (qbuf->buf_type) {
882 case IPP_BUF_ENQUEUE: 882 case IPP_BUF_ENQUEUE:
883 /* get memory node */ 883 /* get memory node */
884 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 884 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
885 if (IS_ERR(m_node)) { 885 if (IS_ERR(m_node)) {
886 DRM_ERROR("failed to get m_node.\n"); 886 DRM_ERROR("failed to get m_node.\n");
887 return PTR_ERR(m_node); 887 return PTR_ERR(m_node);
@@ -894,7 +894,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
894 */ 894 */
895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
896 /* get event for destination buffer */ 896 /* get event for destination buffer */
897 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 897 ret = ipp_get_event(drm_dev, c_node, qbuf);
898 if (ret) { 898 if (ret) {
899 DRM_ERROR("failed to get event.\n"); 899 DRM_ERROR("failed to get event.\n");
900 goto err_clean_node; 900 goto err_clean_node;
@@ -1007,7 +1007,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1007 1007
1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1009 cmd_ctrl->prop_id); 1009 cmd_ctrl->prop_id);
1010 if (!c_node) { 1010 if (!c_node || c_node->filp != file) {
1011 DRM_ERROR("invalid command node list.\n"); 1011 DRM_ERROR("invalid command node list.\n");
1012 return -ENODEV; 1012 return -ENODEV;
1013 } 1013 }
@@ -1257,80 +1257,39 @@ static int ipp_stop_property(struct drm_device *drm_dev,
1257 struct exynos_drm_ippdrv *ippdrv, 1257 struct exynos_drm_ippdrv *ippdrv,
1258 struct drm_exynos_ipp_cmd_node *c_node) 1258 struct drm_exynos_ipp_cmd_node *c_node)
1259{ 1259{
1260 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1261 struct drm_exynos_ipp_property *property = &c_node->property; 1260 struct drm_exynos_ipp_property *property = &c_node->property;
1262 struct list_head *head; 1261 int i;
1263 int ret = 0, i;
1264 1262
1265 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1263 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1266 1264
1267 /* put event */ 1265 /* stop operations */
1268 ipp_put_event(c_node, NULL); 1266 if (ippdrv->stop)
1269 1267 ippdrv->stop(ippdrv->dev, property->cmd);
1270 mutex_lock(&c_node->mem_lock);
1271 1268
1272 /* check command */ 1269 /* check command */
1273 switch (property->cmd) { 1270 switch (property->cmd) {
1274 case IPP_CMD_M2M: 1271 case IPP_CMD_M2M:
1275 for_each_ipp_ops(i) { 1272 for_each_ipp_ops(i)
1276 /* source/destination memory list */ 1273 ipp_clean_mem_nodes(drm_dev, c_node, i);
1277 head = &c_node->mem_list[i];
1278
1279 list_for_each_entry_safe(m_node, tm_node,
1280 head, list) {
1281 ret = ipp_put_mem_node(drm_dev, c_node,
1282 m_node);
1283 if (ret) {
1284 DRM_ERROR("failed to put m_node.\n");
1285 goto err_clear;
1286 }
1287 }
1288 }
1289 break; 1274 break;
1290 case IPP_CMD_WB: 1275 case IPP_CMD_WB:
1291 /* destination memory list */ 1276 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1292 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1293
1294 list_for_each_entry_safe(m_node, tm_node, head, list) {
1295 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1296 if (ret) {
1297 DRM_ERROR("failed to put m_node.\n");
1298 goto err_clear;
1299 }
1300 }
1301 break; 1277 break;
1302 case IPP_CMD_OUTPUT: 1278 case IPP_CMD_OUTPUT:
1303 /* source memory list */ 1279 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1304 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1305
1306 list_for_each_entry_safe(m_node, tm_node, head, list) {
1307 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1308 if (ret) {
1309 DRM_ERROR("failed to put m_node.\n");
1310 goto err_clear;
1311 }
1312 }
1313 break; 1280 break;
1314 default: 1281 default:
1315 DRM_ERROR("invalid operations.\n"); 1282 DRM_ERROR("invalid operations.\n");
1316 ret = -EINVAL; 1283 return -EINVAL;
1317 goto err_clear;
1318 } 1284 }
1319 1285
1320err_clear: 1286 return 0;
1321 mutex_unlock(&c_node->mem_lock);
1322
1323 /* stop operations */
1324 if (ippdrv->stop)
1325 ippdrv->stop(ippdrv->dev, property->cmd);
1326
1327 return ret;
1328} 1287}
1329 1288
1330void ipp_sched_cmd(struct work_struct *work) 1289void ipp_sched_cmd(struct work_struct *work)
1331{ 1290{
1332 struct drm_exynos_ipp_cmd_work *cmd_work = 1291 struct drm_exynos_ipp_cmd_work *cmd_work =
1333 (struct drm_exynos_ipp_cmd_work *)work; 1292 container_of(work, struct drm_exynos_ipp_cmd_work, work);
1334 struct exynos_drm_ippdrv *ippdrv; 1293 struct exynos_drm_ippdrv *ippdrv;
1335 struct drm_exynos_ipp_cmd_node *c_node; 1294 struct drm_exynos_ipp_cmd_node *c_node;
1336 struct drm_exynos_ipp_property *property; 1295 struct drm_exynos_ipp_property *property;
@@ -1543,7 +1502,7 @@ err_event_unlock:
1543void ipp_sched_event(struct work_struct *work) 1502void ipp_sched_event(struct work_struct *work)
1544{ 1503{
1545 struct drm_exynos_ipp_event_work *event_work = 1504 struct drm_exynos_ipp_event_work *event_work =
1546 (struct drm_exynos_ipp_event_work *)work; 1505 container_of(work, struct drm_exynos_ipp_event_work, work);
1547 struct exynos_drm_ippdrv *ippdrv; 1506 struct exynos_drm_ippdrv *ippdrv;
1548 struct drm_exynos_ipp_cmd_node *c_node; 1507 struct drm_exynos_ipp_cmd_node *c_node;
1549 int ret; 1508 int ret;
@@ -1646,11 +1605,11 @@ err:
1646 1605
1647static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1606static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1648{ 1607{
1649 struct exynos_drm_ippdrv *ippdrv; 1608 struct exynos_drm_ippdrv *ippdrv, *t;
1650 struct ipp_context *ctx = get_ipp_context(dev); 1609 struct ipp_context *ctx = get_ipp_context(dev);
1651 1610
1652 /* get ipp driver entry */ 1611 /* get ipp driver entry */
1653 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1612 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1654 if (is_drm_iommu_supported(drm_dev)) 1613 if (is_drm_iommu_supported(drm_dev))
1655 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1614 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1656 1615
@@ -1677,14 +1636,11 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1677static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1636static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1678 struct drm_file *file) 1637 struct drm_file *file)
1679{ 1638{
1680 struct drm_exynos_file_private *file_priv = file->driver_priv;
1681 struct exynos_drm_ippdrv *ippdrv = NULL; 1639 struct exynos_drm_ippdrv *ippdrv = NULL;
1682 struct ipp_context *ctx = get_ipp_context(dev); 1640 struct ipp_context *ctx = get_ipp_context(dev);
1683 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1641 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1684 int count = 0; 1642 int count = 0;
1685 1643
1686 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1687
1688 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1644 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1689 mutex_lock(&ippdrv->cmd_lock); 1645 mutex_lock(&ippdrv->cmd_lock);
1690 list_for_each_entry_safe(c_node, tc_node, 1646 list_for_each_entry_safe(c_node, tc_node,
@@ -1692,7 +1648,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1648 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1693 count++, (int)ippdrv); 1649 count++, (int)ippdrv);
1694 1650
1695 if (c_node->dev == file_priv->ipp_dev) { 1651 if (c_node->filp == file) {
1696 /* 1652 /*
1697 * userland goto unnormal state. process killed. 1653 * userland goto unnormal state. process killed.
1698 * and close the file. 1654 * and close the file.
@@ -1808,63 +1764,12 @@ static int ipp_remove(struct platform_device *pdev)
1808 return 0; 1764 return 0;
1809} 1765}
1810 1766
1811static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1812{
1813 DRM_DEBUG_KMS("enable[%d]\n", enable);
1814
1815 return 0;
1816}
1817
1818#ifdef CONFIG_PM_SLEEP
1819static int ipp_suspend(struct device *dev)
1820{
1821 struct ipp_context *ctx = get_ipp_context(dev);
1822
1823 if (pm_runtime_suspended(dev))
1824 return 0;
1825
1826 return ipp_power_ctrl(ctx, false);
1827}
1828
1829static int ipp_resume(struct device *dev)
1830{
1831 struct ipp_context *ctx = get_ipp_context(dev);
1832
1833 if (!pm_runtime_suspended(dev))
1834 return ipp_power_ctrl(ctx, true);
1835
1836 return 0;
1837}
1838#endif
1839
1840#ifdef CONFIG_PM_RUNTIME
1841static int ipp_runtime_suspend(struct device *dev)
1842{
1843 struct ipp_context *ctx = get_ipp_context(dev);
1844
1845 return ipp_power_ctrl(ctx, false);
1846}
1847
1848static int ipp_runtime_resume(struct device *dev)
1849{
1850 struct ipp_context *ctx = get_ipp_context(dev);
1851
1852 return ipp_power_ctrl(ctx, true);
1853}
1854#endif
1855
1856static const struct dev_pm_ops ipp_pm_ops = {
1857 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1858 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1859};
1860
1861struct platform_driver ipp_driver = { 1767struct platform_driver ipp_driver = {
1862 .probe = ipp_probe, 1768 .probe = ipp_probe,
1863 .remove = ipp_remove, 1769 .remove = ipp_remove,
1864 .driver = { 1770 .driver = {
1865 .name = "exynos-drm-ipp", 1771 .name = "exynos-drm-ipp",
1866 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1867 .pm = &ipp_pm_ops,
1868 }, 1773 },
1869}; 1774};
1870 1775
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 6f48d62aeb30..2a61547a39d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,6 @@ struct drm_exynos_ipp_cmd_work {
48/* 48/*
49 * A structure of command node. 49 * A structure of command node.
50 * 50 *
51 * @dev: IPP device.
52 * @list: list head to command queue information. 51 * @list: list head to command queue information.
53 * @event_list: list head of event. 52 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information. 53 * @mem_list: list head to source,destination memory queue information.
@@ -62,9 +61,9 @@ struct drm_exynos_ipp_cmd_work {
62 * @stop_work: stop command work structure. 61 * @stop_work: stop command work structure.
63 * @event_work: event work structure. 62 * @event_work: event work structure.
64 * @state: state of command node. 63 * @state: state of command node.
64 * @filp: associated file pointer.
65 */ 65 */
66struct drm_exynos_ipp_cmd_node { 66struct drm_exynos_ipp_cmd_node {
67 struct device *dev;
68 struct list_head list; 67 struct list_head list;
69 struct list_head event_list; 68 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; 69 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
@@ -78,6 +77,7 @@ struct drm_exynos_ipp_cmd_node {
78 struct drm_exynos_ipp_cmd_work *stop_work; 77 struct drm_exynos_ipp_cmd_work *stop_work;
79 struct drm_exynos_ipp_event_work *event_work; 78 struct drm_exynos_ipp_event_work *event_work;
80 enum drm_exynos_ipp_state state; 79 enum drm_exynos_ipp_state state;
80 struct drm_file *filp;
81}; 81};
82 82
83/* 83/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8371cbd7631d..c7045a663763 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -139,6 +139,8 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
139 overlay->crtc_x, overlay->crtc_y, 139 overlay->crtc_x, overlay->crtc_y,
140 overlay->crtc_width, overlay->crtc_height); 140 overlay->crtc_width, overlay->crtc_height);
141 141
142 plane->crtc = crtc;
143
142 exynos_drm_crtc_plane_mode_set(crtc, overlay); 144 exynos_drm_crtc_plane_mode_set(crtc, overlay);
143 145
144 return 0; 146 return 0;
@@ -187,8 +189,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
187 if (ret < 0) 189 if (ret < 0)
188 return ret; 190 return ret;
189 191
190 plane->crtc = crtc;
191
192 exynos_plane_commit(plane); 192 exynos_plane_commit(plane);
193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); 193 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
194 194
@@ -254,25 +254,26 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
254} 254}
255 255
256struct drm_plane *exynos_plane_init(struct drm_device *dev, 256struct drm_plane *exynos_plane_init(struct drm_device *dev,
257 unsigned long possible_crtcs, bool priv) 257 unsigned long possible_crtcs,
258 enum drm_plane_type type)
258{ 259{
259 struct exynos_plane *exynos_plane; 260 struct exynos_plane *exynos_plane;
260 int err; 261 int err;
261 262
262 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); 263 exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
263 if (!exynos_plane) 264 if (!exynos_plane)
264 return NULL; 265 return ERR_PTR(-ENOMEM);
265 266
266 err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, 267 err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
267 &exynos_plane_funcs, formats, ARRAY_SIZE(formats), 268 &exynos_plane_funcs, formats,
268 priv); 269 ARRAY_SIZE(formats), type);
269 if (err) { 270 if (err) {
270 DRM_ERROR("failed to initialize plane\n"); 271 DRM_ERROR("failed to initialize plane\n");
271 kfree(exynos_plane); 272 kfree(exynos_plane);
272 return NULL; 273 return ERR_PTR(err);
273 } 274 }
274 275
275 if (priv) 276 if (type == DRM_PLANE_TYPE_PRIMARY)
276 exynos_plane->overlay.zpos = DEFAULT_ZPOS; 277 exynos_plane->overlay.zpos = DEFAULT_ZPOS;
277 else 278 else
278 exynos_plane_attach_zpos_property(&exynos_plane->base); 279 exynos_plane_attach_zpos_property(&exynos_plane->base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 84d464c90d3d..0d1986b115f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -17,4 +17,5 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
17void exynos_plane_commit(struct drm_plane *plane); 17void exynos_plane_commit(struct drm_plane *plane);
18void exynos_plane_dpms(struct drm_plane *plane, int mode); 18void exynos_plane_dpms(struct drm_plane *plane, int mode);
19struct drm_plane *exynos_plane_init(struct drm_device *dev, 19struct drm_plane *exynos_plane_init(struct drm_device *dev,
20 unsigned long possible_crtcs, bool priv); 20 unsigned long possible_crtcs,
21 enum drm_plane_type type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 55af6b41c1df..b6a37d4f5b13 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -156,8 +156,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
156 event_work->ippdrv = ippdrv; 156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] = 157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; 158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq, 159 queue_work(ippdrv->event_workq, &event_work->work);
160 (struct work_struct *)event_work);
161 } else { 160 } else {
162 DRM_ERROR("the SFR is set illegally\n"); 161 DRM_ERROR("the SFR is set illegally\n");
163 } 162 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 9528d81d8004..d565207040a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -303,23 +303,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
303 mgr->drm_dev = ctx->drm_dev = drm_dev; 303 mgr->drm_dev = ctx->drm_dev = drm_dev;
304 mgr->pipe = ctx->pipe = priv->pipe++; 304 mgr->pipe = ctx->pipe = priv->pipe++;
305 305
306 /*
307 * enable drm irq mode.
308 * - with irq_enabled = 1, we can use the vblank feature.
309 *
310 * P.S. note that we wouldn't use drm irq handler but
311 * just specific driver own one instead because
312 * drm framework supports only one irq handler.
313 */
314 drm_dev->irq_enabled = 1;
315
316 /*
317 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
318 * by drm timer once a current process gives up ownership of
319 * vblank event.(after drm_vblank_put function is called)
320 */
321 drm_dev->vblank_disable_allowed = 1;
322
323 return 0; 306 return 0;
324} 307}
325 308
@@ -648,7 +631,6 @@ static int vidi_remove(struct platform_device *pdev)
648 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); 631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
649 struct vidi_context *ctx = mgr->ctx; 632 struct vidi_context *ctx = mgr->ctx;
650 struct drm_encoder *encoder = ctx->encoder; 633 struct drm_encoder *encoder = ctx->encoder;
651 struct drm_crtc *crtc = mgr->crtc;
652 634
653 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 635 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
654 kfree(ctx->raw_edid); 636 kfree(ctx->raw_edid);
@@ -657,7 +639,6 @@ static int vidi_remove(struct platform_device *pdev)
657 return -EINVAL; 639 return -EINVAL;
658 } 640 }
659 641
660 crtc->funcs->destroy(crtc);
661 encoder->funcs->destroy(encoder); 642 encoder->funcs->destroy(encoder);
662 drm_connector_cleanup(&ctx->connector); 643 drm_connector_cleanup(&ctx->connector);
663 644
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 562966db2aa1..7910fb37d9bb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1040,6 +1040,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
1040 1040
1041static void hdmi_connector_destroy(struct drm_connector *connector) 1041static void hdmi_connector_destroy(struct drm_connector *connector)
1042{ 1042{
1043 drm_connector_unregister(connector);
1044 drm_connector_cleanup(connector);
1043} 1045}
1044 1046
1045static struct drm_connector_funcs hdmi_connector_funcs = { 1047static struct drm_connector_funcs hdmi_connector_funcs = {
@@ -2314,8 +2316,8 @@ static void hdmi_unbind(struct device *dev, struct device *master, void *data)
2314 struct drm_encoder *encoder = display->encoder; 2316 struct drm_encoder *encoder = display->encoder;
2315 struct hdmi_context *hdata = display->ctx; 2317 struct hdmi_context *hdata = display->ctx;
2316 2318
2319 hdmi_connector_destroy(&hdata->connector);
2317 encoder->funcs->destroy(encoder); 2320 encoder->funcs->destroy(encoder);
2318 drm_connector_cleanup(&hdata->connector);
2319} 2321}
2320 2322
2321static const struct component_ops hdmi_component_ops = { 2323static const struct component_ops hdmi_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e8b4ec84b312..a41c84ee3a2d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1302,15 +1302,12 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1302static void mixer_unbind(struct device *dev, struct device *master, void *data) 1302static void mixer_unbind(struct device *dev, struct device *master, void *data)
1303{ 1303{
1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
1305 struct drm_crtc *crtc = mgr->crtc;
1306 1305
1307 dev_info(dev, "remove successful\n"); 1306 dev_info(dev, "remove successful\n");
1308 1307
1309 mixer_mgr_remove(mgr); 1308 mixer_mgr_remove(mgr);
1310 1309
1311 pm_runtime_disable(dev); 1310 pm_runtime_disable(dev);
1312
1313 crtc->funcs->destroy(crtc);
1314} 1311}
1315 1312
1316static const struct component_ops mixer_component_ops = { 1313static const struct component_ops mixer_component_ops = {
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index f5860a739bd8..cdbb350c9d5d 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -21,6 +21,7 @@
21#define _PSB_GTT_H_ 21#define _PSB_GTT_H_
22 22
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_gem.h>
24 25
25/* This wants cleaning up with respect to the psb_dev and un-needed stuff */ 26/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
26struct psb_gtt { 27struct psb_gtt {
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 6cb08a1c6b62..44f4a131c8dd 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -47,7 +47,7 @@ static const struct file_operations i810_driver_fops = {
47 .open = drm_open, 47 .open = drm_open,
48 .release = drm_release, 48 .release = drm_release,
49 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
50 .mmap = drm_mmap, 50 .mmap = drm_legacy_mmap,
51 .poll = drm_poll, 51 .poll = drm_poll,
52#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
53 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0ba5c7145240..14c88c22281c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1985,7 +1985,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1985 I915_READ(MAD_DIMM_C2)); 1985 I915_READ(MAD_DIMM_C2));
1986 seq_printf(m, "TILECTL = 0x%08x\n", 1986 seq_printf(m, "TILECTL = 0x%08x\n",
1987 I915_READ(TILECTL)); 1987 I915_READ(TILECTL));
1988 if (IS_GEN8(dev)) 1988 if (INTEL_INFO(dev)->gen >= 8)
1989 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1989 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1990 I915_READ(GAMTARBMODE)); 1990 I915_READ(GAMTARBMODE));
1991 else 1991 else
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0bc1583114e7..1c035c49577e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1534,7 +1534,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
1534 1534
1535 info = (struct intel_device_info *)&dev_priv->info; 1535 info = (struct intel_device_info *)&dev_priv->info;
1536 1536
1537 if (IS_VALLEYVIEW(dev)) 1537 if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
1538 for_each_pipe(dev_priv, pipe) 1538 for_each_pipe(dev_priv, pipe)
1539 info->num_sprites[pipe] = 2; 1539 info->num_sprites[pipe] = 2;
1540 else 1540 else
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8ce1b13ad97e..6948877c881c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
356 CURSOR_OFFSETS, 356 CURSOR_OFFSETS,
357}; 357};
358 358
359static const struct intel_device_info intel_skylake_info = {
360 .is_preliminary = 1,
361 .is_skylake = 1,
362 .gen = 9, .num_pipes = 3,
363 .need_gfx_hws = 1, .has_hotplug = 1,
364 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
365 .has_llc = 1,
366 .has_ddi = 1,
367 .has_fbc = 1,
368 GEN_DEFAULT_PIPEOFFSETS,
369 IVB_CURSOR_OFFSETS,
370};
371
359/* 372/*
360 * Make sure any device matches here are from most specific to most 373 * Make sure any device matches here are from most specific to most
361 * general. For example, since the Quanta match is based on the subsystem 374 * general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
392 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 405 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
393 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
394 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
395 INTEL_CHV_IDS(&intel_cherryview_info) 408 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info)
396 410
397static const struct pci_device_id pciidlist[] = { /* aka */ 411static const struct pci_device_id pciidlist[] = { /* aka */
398 INTEL_PCI_IDS, 412 INTEL_PCI_IDS,
@@ -461,6 +475,16 @@ void intel_detect_pch(struct drm_device *dev)
461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
462 WARN_ON(!IS_HASWELL(dev)); 476 WARN_ON(!IS_HASWELL(dev));
463 WARN_ON(!IS_ULT(dev)); 477 WARN_ON(!IS_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
481 WARN_ON(!IS_SKYLAKE(dev));
482 WARN_ON(IS_ULT(dev));
483 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
484 dev_priv->pch_type = PCH_SPT;
485 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
486 WARN_ON(!IS_SKYLAKE(dev));
487 WARN_ON(!IS_ULT(dev));
464 } else 488 } else
465 continue; 489 continue;
466 490
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8f05258ff49b..4cd2aa347f37 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,6 +43,7 @@
43#include <linux/i2c-algo-bit.h> 43#include <linux/i2c-algo-bit.h>
44#include <drm/intel-gtt.h> 44#include <drm/intel-gtt.h>
45#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 45#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
46#include <drm/drm_gem.h>
46#include <linux/backlight.h> 47#include <linux/backlight.h>
47#include <linux/hashtable.h> 48#include <linux/hashtable.h>
48#include <linux/intel-iommu.h> 49#include <linux/intel-iommu.h>
@@ -75,6 +76,14 @@ enum transcoder {
75}; 76};
76#define transcoder_name(t) ((t) + 'A') 77#define transcoder_name(t) ((t) + 'A')
77 78
79/*
80 * This is the maximum (across all platforms) number of planes (primary +
81 * sprites) that can be active at the same time on one pipe.
82 *
83 * This value doesn't count the cursor plane.
84 */
85#define I915_MAX_PLANES 3
86
78enum plane { 87enum plane {
79 PLANE_A = 0, 88 PLANE_A = 0,
80 PLANE_B, 89 PLANE_B,
@@ -550,6 +559,7 @@ struct intel_uncore {
550 func(is_ivybridge) sep \ 559 func(is_ivybridge) sep \
551 func(is_valleyview) sep \ 560 func(is_valleyview) sep \
552 func(is_haswell) sep \ 561 func(is_haswell) sep \
562 func(is_skylake) sep \
553 func(is_preliminary) sep \ 563 func(is_preliminary) sep \
554 func(has_fbc) sep \ 564 func(has_fbc) sep \
555 func(has_pipe_cxsr) sep \ 565 func(has_pipe_cxsr) sep \
@@ -715,6 +725,7 @@ enum intel_pch {
715 PCH_IBX, /* Ibexpeak PCH */ 725 PCH_IBX, /* Ibexpeak PCH */
716 PCH_CPT, /* Cougarpoint PCH */ 726 PCH_CPT, /* Cougarpoint PCH */
717 PCH_LPT, /* Lynxpoint PCH */ 727 PCH_LPT, /* Lynxpoint PCH */
728 PCH_SPT, /* Sunrisepoint PCH */
718 PCH_NOP, 729 PCH_NOP,
719}; 730};
720 731
@@ -2104,6 +2115,7 @@ struct drm_i915_cmd_table {
2104#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2115#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2105#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2116#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2106#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2117#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2118#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2107#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2119#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2108#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2120#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2109 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2121 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2136,6 +2148,7 @@ struct drm_i915_cmd_table {
2136#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2148#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
2137#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2149#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
2138#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2150#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
2151#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
2139 2152
2140#define RENDER_RING (1<<RCS) 2153#define RENDER_RING (1<<RCS)
2141#define BSD_RING (1<<VCS) 2154#define BSD_RING (1<<VCS)
@@ -2199,8 +2212,11 @@ struct drm_i915_cmd_table {
2199#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2212#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2200#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2213#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2201#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2214#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2215#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2216#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2202 2217
2203#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 2218#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
2219#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2204#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2220#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2205#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2221#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2206#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2222#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 55a2ebb510bf..e05e0063a3b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3166,6 +3166,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
3166 obj->stride, obj->tiling_mode); 3166 obj->stride, obj->tiling_mode);
3167 3167
3168 switch (INTEL_INFO(dev)->gen) { 3168 switch (INTEL_INFO(dev)->gen) {
3169 case 9:
3169 case 8: 3170 case 8:
3170 case 7: 3171 case 7:
3171 case 6: 3172 case 6:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 90c9bf6e71b7..273dad964e1b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1100,7 +1100,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1100 1100
1101 if (INTEL_INFO(dev)->gen < 8) 1101 if (INTEL_INFO(dev)->gen < 8)
1102 return gen6_ppgtt_init(ppgtt); 1102 return gen6_ppgtt_init(ppgtt);
1103 else if (IS_GEN8(dev)) 1103 else if (IS_GEN8(dev) || IS_GEN9(dev))
1104 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1104 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1105 else 1105 else
1106 BUG(); 1106 BUG();
@@ -1853,6 +1853,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
1853 return (gmch_ctrl - 0x17 + 9) << 22; 1853 return (gmch_ctrl - 0x17 + 9) << 22;
1854} 1854}
1855 1855
1856static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
1857{
1858 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1859 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
1860
1861 if (gen9_gmch_ctl < 0xf0)
1862 return gen9_gmch_ctl << 25; /* 32 MB units */
1863 else
1864 /* 4MB increments starting at 0xf0 for 4MB */
1865 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
1866}
1867
1856static int ggtt_probe_common(struct drm_device *dev, 1868static int ggtt_probe_common(struct drm_device *dev,
1857 size_t gtt_size) 1869 size_t gtt_size)
1858{ 1870{
@@ -1949,7 +1961,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
1949 1961
1950 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1962 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1951 1963
1952 if (IS_CHERRYVIEW(dev)) { 1964 if (INTEL_INFO(dev)->gen >= 9) {
1965 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
1966 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1967 } else if (IS_CHERRYVIEW(dev)) {
1953 *stolen = chv_get_stolen_size(snb_gmch_ctl); 1968 *stolen = chv_get_stolen_size(snb_gmch_ctl);
1954 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); 1969 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1955 } else { 1970 } else {
@@ -2121,6 +2136,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2121 vma->obj = obj; 2136 vma->obj = obj;
2122 2137
2123 switch (INTEL_INFO(vm->dev)->gen) { 2138 switch (INTEL_INFO(vm->dev)->gen) {
2139 case 9:
2124 case 8: 2140 case 8:
2125 case 7: 2141 case 7:
2126 case 6: 2142 case 6:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 386e45dbeff1..e664599de6e7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -765,6 +765,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
765 765
766 /* Fences */ 766 /* Fences */
767 switch (INTEL_INFO(dev)->gen) { 767 switch (INTEL_INFO(dev)->gen) {
768 case 9:
768 case 8: 769 case 8:
769 case 7: 770 case 7:
770 case 6: 771 case 6:
@@ -923,6 +924,7 @@ static void i915_record_ring_state(struct drm_device *dev,
923 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 924 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
924 925
925 switch (INTEL_INFO(dev)->gen) { 926 switch (INTEL_INFO(dev)->gen) {
927 case 9:
926 case 8: 928 case 8:
927 for (i = 0; i < 4; i++) { 929 for (i = 0; i < 4; i++) {
928 ering->vm_info.pdp[i] = 930 ering->vm_info.pdp[i] =
@@ -1387,6 +1389,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1387 WARN_ONCE(1, "Unsupported platform\n"); 1389 WARN_ONCE(1, "Unsupported platform\n");
1388 case 7: 1390 case 7:
1389 case 8: 1391 case 8:
1392 case 9:
1390 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1393 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1391 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1394 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1392 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1395 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a08cdc62f841..080981b56a4e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -502,7 +502,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
502 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 502 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
503 else if (IS_GEN7(dev)) 503 else if (IS_GEN7(dev))
504 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 504 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
505 else if (IS_GEN8(dev)) 505 else if (IS_GEN8(dev) || IS_GEN9(dev))
506 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 506 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
507 507
508 return old; 508 return old;
@@ -2584,7 +2584,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2584 } 2584 }
2585 2585
2586 for_each_pipe(dev_priv, pipe) { 2586 for_each_pipe(dev_priv, pipe) {
2587 uint32_t pipe_iir; 2587 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2588 2588
2589 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2589 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2590 continue; 2590 continue;
@@ -2593,11 +2593,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2593 if (pipe_iir) { 2593 if (pipe_iir) {
2594 ret = IRQ_HANDLED; 2594 ret = IRQ_HANDLED;
2595 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2595 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2596
2596 if (pipe_iir & GEN8_PIPE_VBLANK && 2597 if (pipe_iir & GEN8_PIPE_VBLANK &&
2597 intel_pipe_handle_vblank(dev, pipe)) 2598 intel_pipe_handle_vblank(dev, pipe))
2598 intel_check_page_flip(dev, pipe); 2599 intel_check_page_flip(dev, pipe);
2599 2600
2600 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2601 if (IS_GEN9(dev))
2602 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2603 else
2604 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2605
2606 if (flip_done) {
2601 intel_prepare_page_flip(dev, pipe); 2607 intel_prepare_page_flip(dev, pipe);
2602 intel_finish_page_flip_plane(dev, pipe); 2608 intel_finish_page_flip_plane(dev, pipe);
2603 } 2609 }
@@ -2612,11 +2618,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2612 pipe_name(pipe)); 2618 pipe_name(pipe));
2613 } 2619 }
2614 2620
2615 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2621
2622 if (IS_GEN9(dev))
2623 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2624 else
2625 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2626
2627 if (fault_errors)
2616 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2628 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2617 pipe_name(pipe), 2629 pipe_name(pipe),
2618 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2630 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2619 }
2620 } else 2631 } else
2621 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2632 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2622 } 2633 }
@@ -3796,12 +3807,20 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3796 3807
3797static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3808static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3798{ 3809{
3799 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3810 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3800 GEN8_PIPE_CDCLK_CRC_DONE | 3811 uint32_t de_pipe_enables;
3801 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3802 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3803 GEN8_PIPE_FIFO_UNDERRUN;
3804 int pipe; 3812 int pipe;
3813
3814 if (IS_GEN9(dev_priv))
3815 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3816 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3817 else
3818 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3819 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3820
3821 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3822 GEN8_PIPE_FIFO_UNDERRUN;
3823
3805 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3824 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3806 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3825 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3807 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3826 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
@@ -4699,7 +4718,7 @@ void intel_irq_init(struct drm_device *dev)
4699 dev->driver->enable_vblank = valleyview_enable_vblank; 4718 dev->driver->enable_vblank = valleyview_enable_vblank;
4700 dev->driver->disable_vblank = valleyview_disable_vblank; 4719 dev->driver->disable_vblank = valleyview_disable_vblank;
4701 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4720 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4702 } else if (IS_GEN8(dev)) { 4721 } else if (INTEL_INFO(dev)->gen >= 8) {
4703 dev->driver->irq_handler = gen8_irq_handler; 4722 dev->driver->irq_handler = gen8_irq_handler;
4704 dev->driver->irq_preinstall = gen8_irq_reset; 4723 dev->driver->irq_preinstall = gen8_irq_reset;
4705 dev->driver->irq_postinstall = gen8_irq_postinstall; 4724 dev->driver->irq_postinstall = gen8_irq_postinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 124ea60c1386..c62f3eb3911d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,8 +26,8 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PLANE(plane, a, b) _PIPE(plane, a, b)
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ 32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33 (pipe) == PIPE_B ? (b) : (c)) 33 (pipe) == PIPE_B ? (b) : (c))
@@ -3642,6 +3642,7 @@ enum punit_power_well {
3642#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) 3642#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
3643#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) 3643#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
3644#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 3644#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
3645#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
3645 3646
3646/* 3647/*
3647 * Computing GMCH M and N values for the Display Port link 3648 * Computing GMCH M and N values for the Display Port link
@@ -4507,6 +4508,143 @@ enum punit_power_well {
4507#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) 4508#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
4508#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) 4509#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
4509 4510
4511/* Skylake plane registers */
4512
4513#define _PLANE_CTL_1_A 0x70180
4514#define _PLANE_CTL_2_A 0x70280
4515#define _PLANE_CTL_3_A 0x70380
4516#define PLANE_CTL_ENABLE (1 << 31)
4517#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
4518#define PLANE_CTL_FORMAT_MASK (0xf << 24)
4519#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
4520#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
4521#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
4522#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
4523#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
4524#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
4525#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
4526#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
4527#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
4528#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
4529#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
4530#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
4531#define PLANE_CTL_ORDER_BGRX (0 << 20)
4532#define PLANE_CTL_ORDER_RGBX (1 << 20)
4533#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
4534#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
4535#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
4536#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
4537#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
4538#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
4539#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
4540#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
4541#define PLANE_CTL_TILED_MASK (0x7 << 10)
4542#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
4543#define PLANE_CTL_TILED_X ( 1 << 10)
4544#define PLANE_CTL_TILED_Y ( 4 << 10)
4545#define PLANE_CTL_TILED_YF ( 5 << 10)
4546#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
4547#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
4548#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
4549#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
4550#define _PLANE_STRIDE_1_A 0x70188
4551#define _PLANE_STRIDE_2_A 0x70288
4552#define _PLANE_STRIDE_3_A 0x70388
4553#define _PLANE_POS_1_A 0x7018c
4554#define _PLANE_POS_2_A 0x7028c
4555#define _PLANE_POS_3_A 0x7038c
4556#define _PLANE_SIZE_1_A 0x70190
4557#define _PLANE_SIZE_2_A 0x70290
4558#define _PLANE_SIZE_3_A 0x70390
4559#define _PLANE_SURF_1_A 0x7019c
4560#define _PLANE_SURF_2_A 0x7029c
4561#define _PLANE_SURF_3_A 0x7039c
4562#define _PLANE_OFFSET_1_A 0x701a4
4563#define _PLANE_OFFSET_2_A 0x702a4
4564#define _PLANE_OFFSET_3_A 0x703a4
4565#define _PLANE_KEYVAL_1_A 0x70194
4566#define _PLANE_KEYVAL_2_A 0x70294
4567#define _PLANE_KEYMSK_1_A 0x70198
4568#define _PLANE_KEYMSK_2_A 0x70298
4569#define _PLANE_KEYMAX_1_A 0x701a0
4570#define _PLANE_KEYMAX_2_A 0x702a0
4571
4572#define _PLANE_CTL_1_B 0x71180
4573#define _PLANE_CTL_2_B 0x71280
4574#define _PLANE_CTL_3_B 0x71380
4575#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
4576#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
4577#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
4578#define PLANE_CTL(pipe, plane) \
4579 _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
4580
4581#define _PLANE_STRIDE_1_B 0x71188
4582#define _PLANE_STRIDE_2_B 0x71288
4583#define _PLANE_STRIDE_3_B 0x71388
4584#define _PLANE_STRIDE_1(pipe) \
4585 _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
4586#define _PLANE_STRIDE_2(pipe) \
4587 _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
4588#define _PLANE_STRIDE_3(pipe) \
4589 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
4590#define PLANE_STRIDE(pipe, plane) \
4591 _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
4592
4593#define _PLANE_POS_1_B 0x7118c
4594#define _PLANE_POS_2_B 0x7128c
4595#define _PLANE_POS_3_B 0x7138c
4596#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
4597#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
4598#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
4599#define PLANE_POS(pipe, plane) \
4600 _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
4601
4602#define _PLANE_SIZE_1_B 0x71190
4603#define _PLANE_SIZE_2_B 0x71290
4604#define _PLANE_SIZE_3_B 0x71390
4605#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
4606#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
4607#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
4608#define PLANE_SIZE(pipe, plane) \
4609 _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
4610
4611#define _PLANE_SURF_1_B 0x7119c
4612#define _PLANE_SURF_2_B 0x7129c
4613#define _PLANE_SURF_3_B 0x7139c
4614#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
4615#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
4616#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
4617#define PLANE_SURF(pipe, plane) \
4618 _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
4619
4620#define _PLANE_OFFSET_1_B 0x711a4
4621#define _PLANE_OFFSET_2_B 0x712a4
4622#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
4623#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
4624#define PLANE_OFFSET(pipe, plane) \
4625 _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
4626
4627#define _PLANE_KEYVAL_1_B 0x71194
4628#define _PLANE_KEYVAL_2_B 0x71294
4629#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
4630#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
4631#define PLANE_KEYVAL(pipe, plane) \
4632 _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
4633
4634#define _PLANE_KEYMSK_1_B 0x71198
4635#define _PLANE_KEYMSK_2_B 0x71298
4636#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
4637#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
4638#define PLANE_KEYMSK(pipe, plane) \
4639 _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
4640
4641#define _PLANE_KEYMAX_1_B 0x711a0
4642#define _PLANE_KEYMAX_2_B 0x712a0
4643#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
4644#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
4645#define PLANE_KEYMAX(pipe, plane) \
4646 _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
4647
4510/* VBIOS regs */ 4648/* VBIOS regs */
4511#define VGACNTRL 0x71400 4649#define VGACNTRL 0x71400
4512# define VGA_DISP_DISABLE (1 << 31) 4650# define VGA_DISP_DISABLE (1 << 31)
@@ -4743,10 +4881,23 @@ enum punit_power_well {
4743#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) 4881#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4744#define GEN8_PIPE_VSYNC (1 << 1) 4882#define GEN8_PIPE_VSYNC (1 << 1)
4745#define GEN8_PIPE_VBLANK (1 << 0) 4883#define GEN8_PIPE_VBLANK (1 << 0)
4884#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
4885#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
4886#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
4887#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
4888#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
4889#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
4890#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
4891#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
4746#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ 4892#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
4747 (GEN8_PIPE_CURSOR_FAULT | \ 4893 (GEN8_PIPE_CURSOR_FAULT | \
4748 GEN8_PIPE_SPRITE_FAULT | \ 4894 GEN8_PIPE_SPRITE_FAULT | \
4749 GEN8_PIPE_PRIMARY_FAULT) 4895 GEN8_PIPE_PRIMARY_FAULT)
4896#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
4897 (GEN9_PIPE_CURSOR_FAULT | \
4898 GEN9_PIPE_PLANE3_FAULT | \
4899 GEN9_PIPE_PLANE2_FAULT | \
4900 GEN9_PIPE_PLANE1_FAULT)
4750 4901
4751#define GEN8_DE_PORT_ISR 0x44440 4902#define GEN8_DE_PORT_ISR 0x44440
4752#define GEN8_DE_PORT_IMR 0x44444 4903#define GEN8_DE_PORT_IMR 0x44444
@@ -5753,6 +5904,9 @@ enum punit_power_well {
5753#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 5904#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
5754#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 5905#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
5755 5906
5907#define GEN9_HALF_SLICE_CHICKEN5 0xe188
5908#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
5909
5756#define GEN8_ROW_CHICKEN 0xe4f0 5910#define GEN8_ROW_CHICKEN 0xe4f0
5757#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 5911#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
5758#define STALL_DOP_GATING_DISABLE (1<<5) 5912#define STALL_DOP_GATING_DISABLE (1<<5)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 295827811433..c9f4b3c43614 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */ 127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
128}; 128};
129 129
130static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
131 { 0x00000018, 0x000000a0 },
132 { 0x00004014, 0x00000098 },
133 { 0x00006012, 0x00000088 },
134 { 0x00008010, 0x00000080 },
135 { 0x00000018, 0x00000098 },
136 { 0x00004014, 0x00000088 },
137 { 0x00006012, 0x00000080 },
138 { 0x00000018, 0x00000088 },
139 { 0x00004014, 0x00000080 },
140};
141
142static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
143 /* Idx NT mV T mV db */
144 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
145 { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
146 { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
147 { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
148 { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
149 { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
150 { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
151 { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
152 { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
153 { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
154};
155
130enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 156enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
131{ 157{
132 struct drm_encoder *encoder = &intel_encoder->base; 158 struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
169 const struct ddi_buf_trans *ddi_translations_hdmi; 195 const struct ddi_buf_trans *ddi_translations_hdmi;
170 const struct ddi_buf_trans *ddi_translations; 196 const struct ddi_buf_trans *ddi_translations;
171 197
172 if (IS_BROADWELL(dev)) { 198 if (IS_SKYLAKE(dev)) {
199 ddi_translations_fdi = NULL;
200 ddi_translations_dp = skl_ddi_translations_dp;
201 ddi_translations_edp = skl_ddi_translations_dp;
202 ddi_translations_hdmi = skl_ddi_translations_hdmi;
203 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
204 hdmi_800mV_0dB = 7;
205 } else if (IS_BROADWELL(dev)) {
173 ddi_translations_fdi = bdw_ddi_translations_fdi; 206 ddi_translations_fdi = bdw_ddi_translations_fdi;
174 ddi_translations_dp = bdw_ddi_translations_dp; 207 ddi_translations_dp = bdw_ddi_translations_dp;
175 ddi_translations_edp = bdw_ddi_translations_edp; 208 ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
208 ddi_translations = ddi_translations_dp; 241 ddi_translations = ddi_translations_dp;
209 break; 242 break;
210 case PORT_E: 243 case PORT_E:
211 ddi_translations = ddi_translations_fdi; 244 if (ddi_translations_fdi)
245 ddi_translations = ddi_translations_fdi;
246 else
247 ddi_translations = ddi_translations_dp;
212 break; 248 break;
213 default: 249 default:
214 BUG(); 250 BUG();
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 858011d22482..b8488a8c1e9f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1279,7 +1279,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1279 int reg, sprite; 1279 int reg, sprite;
1280 u32 val; 1280 u32 val;
1281 1281
1282 if (IS_VALLEYVIEW(dev)) { 1282 if (INTEL_INFO(dev)->gen >= 9) {
1283 for_each_sprite(pipe, sprite) {
1284 val = I915_READ(PLANE_CTL(pipe, sprite));
1285 WARN(val & PLANE_CTL_ENABLE,
1286 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1287 sprite, pipe_name(pipe));
1288 }
1289 } else if (IS_VALLEYVIEW(dev)) {
1283 for_each_sprite(pipe, sprite) { 1290 for_each_sprite(pipe, sprite) {
1284 reg = SPCNTR(pipe, sprite); 1291 reg = SPCNTR(pipe, sprite);
1285 val = I915_READ(reg); 1292 val = I915_READ(reg);
@@ -2180,7 +2187,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2180 2187
2181 switch (obj->tiling_mode) { 2188 switch (obj->tiling_mode) {
2182 case I915_TILING_NONE: 2189 case I915_TILING_NONE:
2183 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2190 if (INTEL_INFO(dev)->gen >= 9)
2191 alignment = 256 * 1024;
2192 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2184 alignment = 128 * 1024; 2193 alignment = 128 * 1024;
2185 else if (INTEL_INFO(dev)->gen >= 4) 2194 else if (INTEL_INFO(dev)->gen >= 4)
2186 alignment = 4 * 1024; 2195 alignment = 4 * 1024;
@@ -2188,8 +2197,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2188 alignment = 64 * 1024; 2197 alignment = 64 * 1024;
2189 break; 2198 break;
2190 case I915_TILING_X: 2199 case I915_TILING_X:
2191 /* pin() will align the object as required by fence */ 2200 if (INTEL_INFO(dev)->gen >= 9)
2192 alignment = 0; 2201 alignment = 256 * 1024;
2202 else {
2203 /* pin() will align the object as required by fence */
2204 alignment = 0;
2205 }
2193 break; 2206 break;
2194 case I915_TILING_Y: 2207 case I915_TILING_Y:
2195 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2208 WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2619,6 +2632,90 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2619 POSTING_READ(reg); 2632 POSTING_READ(reg);
2620} 2633}
2621 2634
2635static void skylake_update_primary_plane(struct drm_crtc *crtc,
2636 struct drm_framebuffer *fb,
2637 int x, int y)
2638{
2639 struct drm_device *dev = crtc->dev;
2640 struct drm_i915_private *dev_priv = dev->dev_private;
2641 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2642 struct intel_framebuffer *intel_fb;
2643 struct drm_i915_gem_object *obj;
2644 int pipe = intel_crtc->pipe;
2645 u32 plane_ctl, stride;
2646
2647 if (!intel_crtc->primary_enabled) {
2648 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2649 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2650 POSTING_READ(PLANE_CTL(pipe, 0));
2651 return;
2652 }
2653
2654 plane_ctl = PLANE_CTL_ENABLE |
2655 PLANE_CTL_PIPE_GAMMA_ENABLE |
2656 PLANE_CTL_PIPE_CSC_ENABLE;
2657
2658 switch (fb->pixel_format) {
2659 case DRM_FORMAT_RGB565:
2660 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2661 break;
2662 case DRM_FORMAT_XRGB8888:
2663 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2664 break;
2665 case DRM_FORMAT_XBGR8888:
2666 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2667 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2668 break;
2669 case DRM_FORMAT_XRGB2101010:
2670 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2671 break;
2672 case DRM_FORMAT_XBGR2101010:
2673 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2674 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2675 break;
2676 default:
2677 BUG();
2678 }
2679
2680 intel_fb = to_intel_framebuffer(fb);
2681 obj = intel_fb->obj;
2682
2683 /*
2684 * The stride is either expressed as a multiple of 64 bytes chunks for
2685 * linear buffers or in number of tiles for tiled buffers.
2686 */
2687 switch (obj->tiling_mode) {
2688 case I915_TILING_NONE:
2689 stride = fb->pitches[0] >> 6;
2690 break;
2691 case I915_TILING_X:
2692 plane_ctl |= PLANE_CTL_TILED_X;
2693 stride = fb->pitches[0] >> 9;
2694 break;
2695 default:
2696 BUG();
2697 }
2698
2699 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2700
2701 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2702
2703 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2704 i915_gem_obj_ggtt_offset(obj),
2705 x, y, fb->width, fb->height,
2706 fb->pitches[0]);
2707
2708 I915_WRITE(PLANE_POS(pipe, 0), 0);
2709 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2710 I915_WRITE(PLANE_SIZE(pipe, 0),
2711 (intel_crtc->config.pipe_src_h - 1) << 16 |
2712 (intel_crtc->config.pipe_src_w - 1));
2713 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2714 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2715
2716 POSTING_READ(PLANE_SURF(pipe, 0));
2717}
2718
2622/* Assume fb object is pinned & idle & fenced and just update base pointers */ 2719/* Assume fb object is pinned & idle & fenced and just update base pointers */
2623static int 2720static int
2624intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2721intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -6983,7 +7080,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
6983 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 7080 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6984 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 7081 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6985 7082
6986 if (IS_BROADWELL(dev)) { 7083 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
6987 val = 0; 7084 val = 0;
6988 7085
6989 switch (intel_crtc->config.pipe_bpp) { 7086 switch (intel_crtc->config.pipe_bpp) {
@@ -7785,7 +7882,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7785 * DDI E. So just check whether this pipe is wired to DDI E and whether 7882 * DDI E. So just check whether this pipe is wired to DDI E and whether
7786 * the PCH transcoder is on. 7883 * the PCH transcoder is on.
7787 */ 7884 */
7788 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 7885 if (INTEL_INFO(dev)->gen < 9 &&
7886 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7789 pipe_config->has_pch_encoder = true; 7887 pipe_config->has_pch_encoder = true;
7790 7888
7791 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 7889 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -12066,6 +12164,9 @@ static bool intel_crt_present(struct drm_device *dev)
12066{ 12164{
12067 struct drm_i915_private *dev_priv = dev->dev_private; 12165 struct drm_i915_private *dev_priv = dev->dev_private;
12068 12166
12167 if (INTEL_INFO(dev)->gen >= 9)
12168 return false;
12169
12069 if (IS_ULT(dev)) 12170 if (IS_ULT(dev))
12070 return false; 12171 return false;
12071 12172
@@ -12409,8 +12510,12 @@ static void intel_init_display(struct drm_device *dev)
12409 dev_priv->display.crtc_enable = haswell_crtc_enable; 12510 dev_priv->display.crtc_enable = haswell_crtc_enable;
12410 dev_priv->display.crtc_disable = haswell_crtc_disable; 12511 dev_priv->display.crtc_disable = haswell_crtc_disable;
12411 dev_priv->display.off = ironlake_crtc_off; 12512 dev_priv->display.off = ironlake_crtc_off;
12412 dev_priv->display.update_primary_plane = 12513 if (INTEL_INFO(dev)->gen >= 9)
12413 ironlake_update_primary_plane; 12514 dev_priv->display.update_primary_plane =
12515 skylake_update_primary_plane;
12516 else
12517 dev_priv->display.update_primary_plane =
12518 ironlake_update_primary_plane;
12414 } else if (HAS_PCH_SPLIT(dev)) { 12519 } else if (HAS_PCH_SPLIT(dev)) {
12415 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12520 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12416 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12521 dev_priv->display.get_plane_config = ironlake_get_plane_config;
@@ -12494,6 +12599,10 @@ static void intel_init_display(struct drm_device *dev)
12494 dev_priv->display.modeset_global_resources = 12599 dev_priv->display.modeset_global_resources =
12495 valleyview_modeset_global_resources; 12600 valleyview_modeset_global_resources;
12496 dev_priv->display.write_eld = ironlake_write_eld; 12601 dev_priv->display.write_eld = ironlake_write_eld;
12602 } else if (INTEL_INFO(dev)->gen >= 9) {
12603 dev_priv->display.write_eld = haswell_write_eld;
12604 dev_priv->display.modeset_global_resources =
12605 haswell_modeset_global_resources;
12497 } 12606 }
12498 12607
12499 /* Default just returns -ENODEV to indicate unsupported */ 12608 /* Default just returns -ENODEV to indicate unsupported */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 342d624f8312..799918f7822c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -661,6 +661,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
661 return index ? 0 : 100; 661 return index ? 0 : 100;
662} 662}
663 663
664static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
665{
666 /*
667 * SKL doesn't need us to program the AUX clock divider (Hardware will
668 * derive the clock from CDCLK automatically). We still implement the
669 * get_aux_clock_divider vfunc to plug-in into the existing code.
670 */
671 return index ? 0 : 1;
672}
673
664static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 674static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
665 bool has_aux_irq, 675 bool has_aux_irq,
666 int send_bytes, 676 int send_bytes,
@@ -691,6 +701,21 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
691 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 701 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
692} 702}
693 703
704static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
705 bool has_aux_irq,
706 int send_bytes,
707 uint32_t unused)
708{
709 return DP_AUX_CH_CTL_SEND_BUSY |
710 DP_AUX_CH_CTL_DONE |
711 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
712 DP_AUX_CH_CTL_TIME_OUT_ERROR |
713 DP_AUX_CH_CTL_TIME_OUT_1600us |
714 DP_AUX_CH_CTL_RECEIVE_ERROR |
715 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
716 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
717}
718
694static int 719static int
695intel_dp_aux_ch(struct intel_dp *intel_dp, 720intel_dp_aux_ch(struct intel_dp *intel_dp,
696 uint8_t *send, int send_bytes, 721 uint8_t *send, int send_bytes,
@@ -925,7 +950,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
925 BUG(); 950 BUG();
926 } 951 }
927 952
928 if (!HAS_DDI(dev)) 953 /*
954 * The AUX_CTL register is usually DP_CTL + 0x10.
955 *
956 * On Haswell and Broadwell though:
957 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
958 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
959 *
960 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
961 */
962 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
929 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 963 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
930 964
931 intel_dp->aux.name = name; 965 intel_dp->aux.name = name;
@@ -2842,7 +2876,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
2842 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2843 enum port port = dp_to_dig_port(intel_dp)->port; 2877 enum port port = dp_to_dig_port(intel_dp)->port;
2844 2878
2845 if (IS_VALLEYVIEW(dev)) 2879 if (INTEL_INFO(dev)->gen >= 9)
2880 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2881 else if (IS_VALLEYVIEW(dev))
2846 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2847 else if (IS_GEN7(dev) && port == PORT_A) 2883 else if (IS_GEN7(dev) && port == PORT_A)
2848 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2858,7 +2894,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2858 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2859 enum port port = dp_to_dig_port(intel_dp)->port; 2895 enum port port = dp_to_dig_port(intel_dp)->port;
2860 2896
2861 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2897 if (INTEL_INFO(dev)->gen >= 9) {
2898 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2905 default:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2907 }
2908 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2862 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2909 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 2910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2864 return DP_TRAIN_PRE_EMPH_LEVEL_3; 2911 return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3340,7 +3387,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3340 uint32_t signal_levels, mask; 3387 uint32_t signal_levels, mask;
3341 uint8_t train_set = intel_dp->train_set[0]; 3388 uint8_t train_set = intel_dp->train_set[0];
3342 3389
3343 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 3390 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3344 signal_levels = intel_hsw_signal_levels(train_set); 3391 signal_levels = intel_hsw_signal_levels(train_set);
3345 mask = DDI_BUF_EMP_MASK; 3392 mask = DDI_BUF_EMP_MASK;
3346 } else if (IS_CHERRYVIEW(dev)) { 3393 } else if (IS_CHERRYVIEW(dev)) {
@@ -5078,7 +5125,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5078 intel_dp->pps_pipe = INVALID_PIPE; 5125 intel_dp->pps_pipe = INVALID_PIPE;
5079 5126
5080 /* intel_dp vfuncs */ 5127 /* intel_dp vfuncs */
5081 if (IS_VALLEYVIEW(dev)) 5128 if (INTEL_INFO(dev)->gen >= 9)
5129 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5130 else if (IS_VALLEYVIEW(dev))
5082 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 5131 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5083 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5132 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5084 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 5133 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5087,7 +5136,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5087 else 5136 else
5088 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 5137 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5089 5138
5090 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 5139 if (INTEL_INFO(dev)->gen >= 9)
5140 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5141 else
5142 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5091 5143
5092 /* Preserve the current hw state. */ 5144 /* Preserve the current hw state. */
5093 intel_dp->DP = I915_READ(intel_dp->output_reg); 5145 intel_dp->DP = I915_READ(intel_dp->output_reg);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 617126786819..072e69f4080e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -743,6 +743,14 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
743 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 743 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
744} 744}
745 745
746/*
747 * Returns the number of planes for this pipe, ie the number of sprites + 1
748 * (primary plane). This doesn't count the cursor plane then.
749 */
750static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
751{
752 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
753}
746 754
747/* i915_irq.c */ 755/* i915_irq.c */
748bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 756bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f17ada3742de..543e0f17ee62 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1311,7 +1311,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
1311{ 1311{
1312 struct drm_i915_private *dev_priv = dev->dev_private; 1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 1313
1314 if (IS_BROADWELL(dev)) { 1314 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
1315 dev_priv->display.setup_backlight = bdw_setup_backlight; 1315 dev_priv->display.setup_backlight = bdw_setup_backlight;
1316 dev_priv->display.enable_backlight = bdw_enable_backlight; 1316 dev_priv->display.enable_backlight = bdw_enable_backlight;
1317 dev_priv->display.disable_backlight = pch_disable_backlight; 1317 dev_priv->display.disable_backlight = pch_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 011892d5356e..043c5a8eae20 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -66,6 +66,30 @@
66 * i915.i915_enable_fbc parameter 66 * i915.i915_enable_fbc parameter
67 */ 67 */
68 68
69static void gen9_init_clock_gating(struct drm_device *dev)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72
73 /*
74 * WaDisableSDEUnitClockGating:skl
75 * This seems to be a pre-production w/a.
76 */
77 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
78 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
79
80 /*
81 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
82 * This is a pre-production w/a.
83 */
84 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
85 I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
86 ~GEN9_DG_MIRROR_FIX_ENABLE);
87
88 /* Wa4x4STCOptimizationDisable:skl */
89 I915_WRITE(CACHE_MODE_1,
90 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
91}
92
69static void i8xx_disable_fbc(struct drm_device *dev) 93static void i8xx_disable_fbc(struct drm_device *dev)
70{ 94{
71 struct drm_i915_private *dev_priv = dev->dev_private; 95 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6298,7 +6322,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6298 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 6322 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6299 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6323 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6300 6324
6301 if (IS_BROADWELL(dev)) 6325 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
6302 gen8_irq_power_well_post_enable(dev_priv); 6326 gen8_irq_power_well_post_enable(dev_priv);
6303} 6327}
6304 6328
@@ -7408,7 +7432,9 @@ void intel_init_pm(struct drm_device *dev)
7408 i915_ironlake_get_mem_freq(dev); 7432 i915_ironlake_get_mem_freq(dev);
7409 7433
7410 /* For FIFO watermark updates */ 7434 /* For FIFO watermark updates */
7411 if (HAS_PCH_SPLIT(dev)) { 7435 if (IS_GEN9(dev)) {
7436 dev_priv->display.init_clock_gating = gen9_init_clock_gating;
7437 } else if (HAS_PCH_SPLIT(dev)) {
7412 ilk_setup_wm_latency(dev); 7438 ilk_setup_wm_latency(dev);
7413 7439
7414 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 7440 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c21aaad55982..cc50bf65d35a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -827,7 +827,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
827 * 827 *
828 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 828 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
829 */ 829 */
830 if (INTEL_INFO(dev)->gen >= 6) 830 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
831 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 831 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
832 832
833 /* Required for the hardware to program scanline values for waiting */ 833 /* Required for the hardware to program scanline values for waiting */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78044bbed8c9..750b634d45ec 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -139,6 +139,184 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
139} 139}
140 140
141static void 141static void
142skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
143 struct drm_framebuffer *fb,
144 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
145 unsigned int crtc_w, unsigned int crtc_h,
146 uint32_t x, uint32_t y,
147 uint32_t src_w, uint32_t src_h)
148{
149 struct drm_device *dev = drm_plane->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
152 const int pipe = intel_plane->pipe;
153 const int plane = intel_plane->plane + 1;
154 u32 plane_ctl, stride;
155 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
156
157 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
158
159 /* Mask out pixel format bits in case we change it */
160 plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
161 plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
162 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
163 plane_ctl &= ~PLANE_CTL_TILED_MASK;
164 plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
165
166 /* Trickle feed has to be enabled */
167 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
168
169 switch (fb->pixel_format) {
170 case DRM_FORMAT_RGB565:
171 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
172 break;
173 case DRM_FORMAT_XBGR8888:
174 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
175 break;
176 case DRM_FORMAT_XRGB8888:
177 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
178 break;
179 /*
180 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
181 * to be already pre-multiplied. We need to add a knob (or a different
182 * DRM_FORMAT) for user-space to configure that.
183 */
184 case DRM_FORMAT_ABGR8888:
185 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
186 PLANE_CTL_ORDER_RGBX |
187 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
188 break;
189 case DRM_FORMAT_ARGB8888:
190 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
191 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
192 break;
193 case DRM_FORMAT_YUYV:
194 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
195 break;
196 case DRM_FORMAT_YVYU:
197 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
198 break;
199 case DRM_FORMAT_UYVY:
200 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
201 break;
202 case DRM_FORMAT_VYUY:
203 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
204 break;
205 default:
206 BUG();
207 }
208
209 switch (obj->tiling_mode) {
210 case I915_TILING_NONE:
211 stride = fb->pitches[0] >> 6;
212 break;
213 case I915_TILING_X:
214 plane_ctl |= PLANE_CTL_TILED_X;
215 stride = fb->pitches[0] >> 9;
216 break;
217 default:
218 BUG();
219 }
220
221 plane_ctl |= PLANE_CTL_ENABLE;
222 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
223
224 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
225 pixel_size, true,
226 src_w != crtc_w || src_h != crtc_h);
227
228 /* Sizes are 0 based */
229 src_w--;
230 src_h--;
231 crtc_w--;
232 crtc_h--;
233
234 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
235 I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
236 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
237 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
238 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
239 I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
240 POSTING_READ(PLANE_SURF(pipe, plane));
241}
242
243static void
244skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
245{
246 struct drm_device *dev = drm_plane->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
249 const int pipe = intel_plane->pipe;
250 const int plane = intel_plane->plane + 1;
251
252 I915_WRITE(PLANE_CTL(pipe, plane),
253 I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
254
255 /* Activate double buffered register update */
256 I915_WRITE(PLANE_CTL(pipe, plane), 0);
257 POSTING_READ(PLANE_CTL(pipe, plane));
258
259 intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
260}
261
262static int
263skl_update_colorkey(struct drm_plane *drm_plane,
264 struct drm_intel_sprite_colorkey *key)
265{
266 struct drm_device *dev = drm_plane->dev;
267 struct drm_i915_private *dev_priv = dev->dev_private;
268 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
269 const int pipe = intel_plane->pipe;
270 const int plane = intel_plane->plane;
271 u32 plane_ctl;
272
273 I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
274 I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
275 I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
276
277 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
278 plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
279 if (key->flags & I915_SET_COLORKEY_DESTINATION)
280 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
281 else if (key->flags & I915_SET_COLORKEY_SOURCE)
282 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
283 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
284
285 POSTING_READ(PLANE_CTL(pipe, plane));
286
287 return 0;
288}
289
290static void
291skl_get_colorkey(struct drm_plane *drm_plane,
292 struct drm_intel_sprite_colorkey *key)
293{
294 struct drm_device *dev = drm_plane->dev;
295 struct drm_i915_private *dev_priv = dev->dev_private;
296 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
297 const int pipe = intel_plane->pipe;
298 const int plane = intel_plane->plane;
299 u32 plane_ctl;
300
301 key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
302 key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
303 key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
304
305 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
306
307 switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
308 case PLANE_CTL_KEY_ENABLE_DESTINATION:
309 key->flags = I915_SET_COLORKEY_DESTINATION;
310 break;
311 case PLANE_CTL_KEY_ENABLE_SOURCE:
312 key->flags = I915_SET_COLORKEY_SOURCE;
313 break;
314 default:
315 key->flags = I915_SET_COLORKEY_NONE;
316 }
317}
318
319static void
142vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, 320vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
143 struct drm_framebuffer *fb, 321 struct drm_framebuffer *fb,
144 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 322 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -1358,6 +1536,18 @@ static uint32_t vlv_plane_formats[] = {
1358 DRM_FORMAT_VYUY, 1536 DRM_FORMAT_VYUY,
1359}; 1537};
1360 1538
1539static uint32_t skl_plane_formats[] = {
1540 DRM_FORMAT_RGB565,
1541 DRM_FORMAT_ABGR8888,
1542 DRM_FORMAT_ARGB8888,
1543 DRM_FORMAT_XBGR8888,
1544 DRM_FORMAT_XRGB8888,
1545 DRM_FORMAT_YUYV,
1546 DRM_FORMAT_YVYU,
1547 DRM_FORMAT_UYVY,
1548 DRM_FORMAT_VYUY,
1549};
1550
1361int 1551int
1362intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) 1552intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1363{ 1553{
@@ -1421,7 +1611,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1421 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1611 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
1422 } 1612 }
1423 break; 1613 break;
1424 1614 case 9:
1615 /*
1616 * FIXME: Skylake planes can be scaled (with some restrictions),
1617 * but this is for another time.
1618 */
1619 intel_plane->can_scale = false;
1620 intel_plane->max_downscale = 1;
1621 intel_plane->update_plane = skl_update_plane;
1622 intel_plane->disable_plane = skl_disable_plane;
1623 intel_plane->update_colorkey = skl_update_colorkey;
1624 intel_plane->get_colorkey = skl_get_colorkey;
1625
1626 plane_formats = skl_plane_formats;
1627 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
1628 break;
1425 default: 1629 default:
1426 kfree(intel_plane); 1630 kfree(intel_plane);
1427 return -ENODEV; 1631 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0e99852222e1..0b0f4f85c4f2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -965,7 +965,7 @@ static const struct register_whitelist {
965 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 965 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
966 uint32_t gen_bitmask; 966 uint32_t gen_bitmask;
967} whitelist[] = { 967} whitelist[] = {
968 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, 968 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
969}; 969};
970 970
971int i915_reg_read_ioctl(struct drm_device *dev, 971int i915_reg_read_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index cb5c71f4b28e..5e2f131a6a72 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -48,7 +48,7 @@ static const struct file_operations mga_driver_fops = {
48 .open = drm_open, 48 .open = drm_open,
49 .release = drm_release, 49 .release = drm_release,
50 .unlocked_ioctl = drm_ioctl, 50 .unlocked_ioctl = drm_ioctl,
51 .mmap = drm_mmap, 51 .mmap = drm_legacy_mmap,
52 .poll = drm_poll, 52 .poll = drm_poll,
53#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
54 .compat_ioctl = mga_compat_ioctl, 54 .compat_ioctl = mga_compat_ioctl,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index c03e347f3ffd..e9eea1d4e7c3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -22,6 +22,8 @@
22#include <drm/ttm/ttm_memory.h> 22#include <drm/ttm/ttm_memory.h>
23#include <drm/ttm/ttm_module.h> 23#include <drm/ttm/ttm_module.h>
24 24
25#include <drm/drm_gem.h>
26
25#include <linux/i2c.h> 27#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 28#include <linux/i2c-algo-bit.h>
27 29
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index be883ef5a1d3..8ac70626df6c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -428,7 +428,7 @@ int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
428 struct mga_device *mdev; 428 struct mga_device *mdev;
429 429
430 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 430 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
431 return drm_mmap(filp, vma); 431 return -EINVAL;
432 432
433 file_priv = filp->private_data; 433 file_priv = filp->private_data;
434 mdev = file_priv->minor->dev->dev_private; 434 mdev = file_priv->minor->dev->dev_private;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8a2c5fd0893e..afaafd42dee7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -51,6 +51,7 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
51#include <drm/drm_crtc_helper.h> 51#include <drm/drm_crtc_helper.h>
52#include <drm/drm_fb_helper.h> 52#include <drm/drm_fb_helper.h>
53#include <drm/msm_drm.h> 53#include <drm/msm_drm.h>
54#include <drm/drm_gem.h>
54 55
55struct msm_kms; 56struct msm_kms;
56struct msm_gpu; 57struct msm_gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ae95b2d43b36..f238def41a92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,6 +1,8 @@
1#ifndef __NOUVEAU_BO_H__ 1#ifndef __NOUVEAU_BO_H__
2#define __NOUVEAU_BO_H__ 2#define __NOUVEAU_BO_H__
3 3
4#include <drm/drm_gem.h>
5
4struct nouveau_channel; 6struct nouveau_channel;
5struct nouveau_fence; 7struct nouveau_fence;
6struct nouveau_vma; 8struct nouveau_vma;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e81d086577ce..753a6def61e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -281,7 +281,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
282 282
283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
284 return drm_mmap(filp, vma); 284 return -EINVAL;
285 285
286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
287} 287}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 84d73a61b34b..60e47b33c801 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/omap_drm.h> 28#include <drm/omap_drm.h>
29#include <drm/drm_gem.h>
29#include <linux/platform_data/omap_drm.h> 30#include <linux/platform_data/omap_drm.h>
30 31
31 32
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index d75c0a9f674f..ff0772728eb0 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -43,6 +43,8 @@
43#include <ttm/ttm_placement.h> 43#include <ttm/ttm_placement.h>
44#include <ttm/ttm_module.h> 44#include <ttm/ttm_module.h>
45 45
46#include <drm/drm_gem.h>
47
46/* just for ttm_validate_buffer */ 48/* just for ttm_validate_buffer */
47#include <ttm/ttm_execbuf_util.h> 49#include <ttm/ttm_execbuf_util.h>
48 50
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index abe945a04fd4..0cbc4c987164 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -127,7 +127,7 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
127 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { 127 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
128 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", 128 pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
129 __func__, vma->vm_pgoff); 129 __func__, vma->vm_pgoff);
130 return drm_mmap(filp, vma); 130 return -EINVAL;
131 } 131 }
132 132
133 file_priv = filp->private_data; 133 file_priv = filp->private_data;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4a59370eb580..c57b4de63caf 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -46,7 +46,7 @@ static const struct file_operations r128_driver_fops = {
46 .open = drm_open, 46 .open = drm_open,
47 .release = drm_release, 47 .release = drm_release,
48 .unlocked_ioctl = drm_ioctl, 48 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 49 .mmap = drm_legacy_mmap,
50 .poll = drm_poll, 50 .poll = drm_poll,
51#ifdef CONFIG_COMPAT 51#ifdef CONFIG_COMPAT
52 .compat_ioctl = r128_compat_ioctl, 52 .compat_ioctl = r128_compat_ioctl,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 82b0e11ade89..ef91ebb7c671 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -74,6 +74,8 @@
74#include <ttm/ttm_module.h> 74#include <ttm/ttm_module.h>
75#include <ttm/ttm_execbuf_util.h> 75#include <ttm/ttm_execbuf_util.h>
76 76
77#include <drm/drm_gem.h>
78
77#include "radeon_family.h" 79#include "radeon_family.h"
78#include "radeon_mode.h" 80#include "radeon_mode.h"
79#include "radeon_reg.h" 81#include "radeon_reg.h"
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec7e963d9bf7..de108427a197 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,6 +38,8 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <drm/drm_gem.h>
42
41#include "drm_crtc_helper.h" 43#include "drm_crtc_helper.h"
42/* 44/*
43 * KMS wrapper. 45 * KMS wrapper.
@@ -308,7 +310,7 @@ static const struct file_operations radeon_driver_old_fops = {
308 .open = drm_open, 310 .open = drm_open,
309 .release = drm_release, 311 .release = drm_release,
310 .unlocked_ioctl = drm_ioctl, 312 .unlocked_ioctl = drm_ioctl,
311 .mmap = drm_mmap, 313 .mmap = drm_legacy_mmap,
312 .poll = drm_poll, 314 .poll = drm_poll,
313 .read = drm_read, 315 .read = drm_read,
314#ifdef CONFIG_COMPAT 316#ifdef CONFIG_COMPAT
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eca2ce60d440..738a2f248b36 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -675,10 +675,17 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
675 return &gtt->ttm.ttm; 675 return &gtt->ttm.ttm;
676} 676}
677 677
678static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
679{
680 if (!ttm || ttm->func != &radeon_backend_func)
681 return NULL;
682 return (struct radeon_ttm_tt *)ttm;
683}
684
678static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 685static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
679{ 686{
687 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
680 struct radeon_device *rdev; 688 struct radeon_device *rdev;
681 struct radeon_ttm_tt *gtt = (void *)ttm;
682 unsigned i; 689 unsigned i;
683 int r; 690 int r;
684 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 691 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -686,7 +693,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
686 if (ttm->state != tt_unpopulated) 693 if (ttm->state != tt_unpopulated)
687 return 0; 694 return 0;
688 695
689 if (gtt->userptr) { 696 if (gtt && gtt->userptr) {
690 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); 697 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
691 if (!ttm->sg) 698 if (!ttm->sg)
692 return -ENOMEM; 699 return -ENOMEM;
@@ -741,11 +748,11 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
741static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) 748static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
742{ 749{
743 struct radeon_device *rdev; 750 struct radeon_device *rdev;
744 struct radeon_ttm_tt *gtt = (void *)ttm; 751 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
745 unsigned i; 752 unsigned i;
746 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 753 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
747 754
748 if (gtt->userptr) { 755 if (gtt && gtt->userptr) {
749 kfree(ttm->sg); 756 kfree(ttm->sg);
750 ttm->page_flags &= ~TTM_PAGE_FLAG_SG; 757 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
751 return; 758 return;
@@ -782,7 +789,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
782int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 789int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
783 uint32_t flags) 790 uint32_t flags)
784{ 791{
785 struct radeon_ttm_tt *gtt = (void *)ttm; 792 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
786 793
787 if (gtt == NULL) 794 if (gtt == NULL)
788 return -EINVAL; 795 return -EINVAL;
@@ -795,7 +802,7 @@ int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
795 802
796bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) 803bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
797{ 804{
798 struct radeon_ttm_tt *gtt = (void *)ttm; 805 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
799 806
800 if (gtt == NULL) 807 if (gtt == NULL)
801 return false; 808 return false;
@@ -805,7 +812,7 @@ bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
805 812
806bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) 813bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
807{ 814{
808 struct radeon_ttm_tt *gtt = (void *)ttm; 815 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
809 816
810 if (gtt == NULL) 817 if (gtt == NULL)
811 return false; 818 return false;
@@ -956,7 +963,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
956 int r; 963 int r;
957 964
958 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { 965 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
959 return drm_mmap(filp, vma); 966 return -EINVAL;
960 } 967 }
961 968
962 file_priv = filp->private_data; 969 file_priv = filp->private_data;
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 1b09d2182037..21aed1febeb4 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -40,7 +40,7 @@ static const struct file_operations savage_driver_fops = {
40 .open = drm_open, 40 .open = drm_open,
41 .release = drm_release, 41 .release = drm_release,
42 .unlocked_ioctl = drm_ioctl, 42 .unlocked_ioctl = drm_ioctl,
43 .mmap = drm_mmap, 43 .mmap = drm_legacy_mmap,
44 .poll = drm_poll, 44 .poll = drm_poll,
45#ifdef CONFIG_COMPAT 45#ifdef CONFIG_COMPAT
46 .compat_ioctl = drm_compat_ioctl, 46 .compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 54858e6fedaf..79bce76cb8f7 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -70,7 +70,7 @@ static const struct file_operations sis_driver_fops = {
70 .open = drm_open, 70 .open = drm_open,
71 .release = drm_release, 71 .release = drm_release,
72 .unlocked_ioctl = drm_ioctl, 72 .unlocked_ioctl = drm_ioctl,
73 .mmap = drm_mmap, 73 .mmap = drm_legacy_mmap,
74 .poll = drm_poll, 74 .poll = drm_poll,
75#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
76 .compat_ioctl = drm_compat_ioctl, 76 .compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index df533ff999a4..fab5ebcb0fef 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -36,6 +36,7 @@
36#include "tdfx_drv.h" 36#include "tdfx_drv.h"
37 37
38#include <drm/drm_pciids.h> 38#include <drm/drm_pciids.h>
39#include <drm/drm_legacy.h>
39 40
40static struct pci_device_id pciidlist[] = { 41static struct pci_device_id pciidlist[] = {
41 tdfx_PCI_IDS 42 tdfx_PCI_IDS
@@ -46,7 +47,7 @@ static const struct file_operations tdfx_driver_fops = {
46 .open = drm_open, 47 .open = drm_open,
47 .release = drm_release, 48 .release = drm_release,
48 .unlocked_ioctl = drm_ioctl, 49 .unlocked_ioctl = drm_ioctl,
49 .mmap = drm_mmap, 50 .mmap = drm_legacy_mmap,
50 .poll = drm_poll, 51 .poll = drm_poll,
51#ifdef CONFIG_COMPAT 52#ifdef CONFIG_COMPAT
52 .compat_ioctl = drm_compat_ioctl, 53 .compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 43a25c853357..6538b56780c2 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -15,6 +15,7 @@
15 15
16#include <drm/drm.h> 16#include <drm/drm.h>
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_gem.h>
18 19
19#define TEGRA_BO_BOTTOM_UP (1 << 0) 20#define TEGRA_BO_BOTTOM_UP (1 << 0)
20 21
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 824af90cbe31..882cccdad272 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -480,28 +480,24 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
480 480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) 481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{ 482{
483 /* Cached mappings need no adjustment */
484 if (caching_flags & TTM_PL_FLAG_CACHED)
485 return tmp;
486
483#if defined(__i386__) || defined(__x86_64__) 487#if defined(__i386__) || defined(__x86_64__)
484 if (caching_flags & TTM_PL_FLAG_WC) 488 if (caching_flags & TTM_PL_FLAG_WC)
485 tmp = pgprot_writecombine(tmp); 489 tmp = pgprot_writecombine(tmp);
486 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
487 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
488
489#elif defined(__powerpc__)
490 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
491 pgprot_val(tmp) |= _PAGE_NO_CACHE;
492 if (caching_flags & TTM_PL_FLAG_UNCACHED)
493 pgprot_val(tmp) |= _PAGE_GUARDED;
494 }
495#endif 492#endif
496#if defined(__ia64__) || defined(__arm__) 493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
497 if (caching_flags & TTM_PL_FLAG_WC) 494 if (caching_flags & TTM_PL_FLAG_WC)
498 tmp = pgprot_writecombine(tmp); 495 tmp = pgprot_writecombine(tmp);
499 else 496 else
500 tmp = pgprot_noncached(tmp); 497 tmp = pgprot_noncached(tmp);
501#endif 498#endif
502#if defined(__sparc__) || defined(__mips__) 499#if defined(__sparc__) || defined(__mips__)
503 if (!(caching_flags & TTM_PL_FLAG_CACHED)) 500 tmp = pgprot_noncached(tmp);
504 tmp = pgprot_noncached(tmp);
505#endif 501#endif
506 return tmp; 502 return tmp;
507} 503}
@@ -560,9 +556,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
560 * We need to use vmap to get the desired page protection 556 * We need to use vmap to get the desired page protection
561 * or to make the buffer object look contiguous. 557 * or to make the buffer object look contiguous.
562 */ 558 */
563 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 559 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
564 PAGE_KERNEL :
565 ttm_io_prot(mem->placement, PAGE_KERNEL);
566 map->bo_kmap_type = ttm_bo_map_vmap; 560 map->bo_kmap_type = ttm_bo_map_vmap;
567 map->virtual = vmap(ttm->pages + start_page, num_pages, 561 map->virtual = vmap(ttm->pages + start_page, num_pages,
568 0, prot); 562 0, prot);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index d05437f219e9..8fb7213277cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -197,9 +197,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197 cvma.vm_page_prot); 197 cvma.vm_page_prot);
198 } else { 198 } else {
199 ttm = bo->ttm; 199 ttm = bo->ttm;
200 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) 200 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
201 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 201 cvma.vm_page_prot);
202 cvma.vm_page_prot);
203 202
204 /* Allocate all page at once, most common usage */ 203 /* Allocate all page at once, most common usage */
205 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 204 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 51e10ee77f39..c7490a2489a7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -15,6 +15,7 @@
15#define UDL_DRV_H 15#define UDL_DRV_H
16 16
17#include <linux/usb.h> 17#include <linux/usb.h>
18#include <drm/drm_gem.h>
18 19
19#define DRIVER_NAME "udl" 20#define DRIVER_NAME "udl"
20#define DRIVER_DESC "DisplayLink" 21#define DRIVER_DESC "DisplayLink"
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index c16ffa63ded6..ed8aa8ff861a 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,7 +62,7 @@ static const struct file_operations via_driver_fops = {
62 .open = drm_open, 62 .open = drm_open,
63 .release = drm_release, 63 .release = drm_release,
64 .unlocked_ioctl = drm_ioctl, 64 .unlocked_ioctl = drm_ioctl,
65 .mmap = drm_mmap, 65 .mmap = drm_legacy_mmap,
66 .poll = drm_poll, 66 .poll = drm_poll,
67#ifdef CONFIG_COMPAT 67#ifdef CONFIG_COMPAT
68 .compat_ioctl = drm_compat_ioctl, 68 .compat_ioctl = drm_compat_ioctl,