aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_atomic.c106
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c101
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c244
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h8
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c46
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c87
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c85
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c38
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h17
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c6
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c58
-rw-r--r--include/uapi/linux/Kbuild1
17 files changed, 508 insertions, 311 deletions
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c97588a28216..11f102e7ddfd 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2046,6 +2046,112 @@ static void complete_crtc_signaling(struct drm_device *dev,
2046 kfree(fence_state); 2046 kfree(fence_state);
2047} 2047}
2048 2048
2049int drm_atomic_remove_fb(struct drm_framebuffer *fb)
2050{
2051 struct drm_modeset_acquire_ctx ctx;
2052 struct drm_device *dev = fb->dev;
2053 struct drm_atomic_state *state;
2054 struct drm_plane *plane;
2055 struct drm_connector *conn;
2056 struct drm_connector_state *conn_state;
2057 int i, ret = 0;
2058 unsigned plane_mask, disable_crtcs = false;
2059
2060 state = drm_atomic_state_alloc(dev);
2061 if (!state)
2062 return -ENOMEM;
2063
2064 drm_modeset_acquire_init(&ctx, 0);
2065 state->acquire_ctx = &ctx;
2066
2067retry:
2068 plane_mask = 0;
2069 ret = drm_modeset_lock_all_ctx(dev, &ctx);
2070 if (ret)
2071 goto unlock;
2072
2073 drm_for_each_plane(plane, dev) {
2074 struct drm_plane_state *plane_state;
2075
2076 if (plane->state->fb != fb)
2077 continue;
2078
2079 plane_state = drm_atomic_get_plane_state(state, plane);
2080 if (IS_ERR(plane_state)) {
2081 ret = PTR_ERR(plane_state);
2082 goto unlock;
2083 }
2084
2085 /*
2086 * Some drivers do not support keeping crtc active with the
2087 * primary plane disabled. If we fail to commit with -EINVAL
2088 * then we will try to perform the same commit but with all
2089 * crtc's disabled for primary planes as well.
2090 */
2091 if (disable_crtcs && plane_state->crtc->primary == plane) {
2092 struct drm_crtc_state *crtc_state;
2093
2094 crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
2095
2096 ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
2097 if (ret)
2098 goto unlock;
2099
2100 crtc_state->active = false;
2101 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
2102 if (ret)
2103 goto unlock;
2104 }
2105
2106 drm_atomic_set_fb_for_plane(plane_state, NULL);
2107 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
2108 if (ret)
2109 goto unlock;
2110
2111 plane_mask |= BIT(drm_plane_index(plane));
2112
2113 plane->old_fb = plane->fb;
2114 }
2115
2116 /* This list is only not empty when disable_crtcs is set. */
2117 for_each_connector_in_state(state, conn, conn_state, i) {
2118 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
2119
2120 if (ret)
2121 goto unlock;
2122 }
2123
2124 if (plane_mask)
2125 ret = drm_atomic_commit(state);
2126
2127unlock:
2128 if (plane_mask)
2129 drm_atomic_clean_old_fb(dev, plane_mask, ret);
2130
2131 if (ret == -EDEADLK) {
2132 drm_modeset_backoff(&ctx);
2133 goto retry;
2134 }
2135
2136 drm_atomic_state_put(state);
2137
2138 if (ret == -EINVAL && !disable_crtcs) {
2139 disable_crtcs = true;
2140
2141 state = drm_atomic_state_alloc(dev);
2142 if (state) {
2143 state->acquire_ctx = &ctx;
2144 goto retry;
2145 }
2146 ret = -ENOMEM;
2147 }
2148
2149 drm_modeset_drop_locks(&ctx);
2150 drm_modeset_acquire_fini(&ctx);
2151
2152 return ret;
2153}
2154
2049int drm_mode_atomic_ioctl(struct drm_device *dev, 2155int drm_mode_atomic_ioctl(struct drm_device *dev,
2050 void *data, struct drm_file *file_priv) 2156 void *data, struct drm_file *file_priv)
2051{ 2157{
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 955c5690bf64..e0678f8a51cf 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -183,6 +183,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
183 struct drm_property *property, uint64_t *val); 183 struct drm_property *property, uint64_t *val);
184int drm_mode_atomic_ioctl(struct drm_device *dev, 184int drm_mode_atomic_ioctl(struct drm_device *dev,
185 void *data, struct drm_file *file_priv); 185 void *data, struct drm_file *file_priv);
186int drm_atomic_remove_fb(struct drm_framebuffer *fb);
186 187
187 188
188/* drm_plane.c */ 189/* drm_plane.c */
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 28a0108a1ab8..c0e593a7f9b4 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -773,6 +773,12 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
773 * in this manner. 773 * in this manner.
774 */ 774 */
775 if (drm_framebuffer_read_refcount(fb) > 1) { 775 if (drm_framebuffer_read_refcount(fb) > 1) {
776 if (drm_drv_uses_atomic_modeset(dev)) {
777 int ret = drm_atomic_remove_fb(fb);
778 WARN(ret, "atomic remove_fb failed with %i\n", ret);
779 goto out;
780 }
781
776 drm_modeset_lock_all(dev); 782 drm_modeset_lock_all(dev);
777 /* remove from any CRTC */ 783 /* remove from any CRTC */
778 drm_for_each_crtc(crtc, dev) { 784 drm_for_each_crtc(crtc, dev) {
@@ -790,6 +796,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
790 drm_modeset_unlock_all(dev); 796 drm_modeset_unlock_all(dev);
791 } 797 }
792 798
799out:
793 drm_framebuffer_unreference(fb); 800 drm_framebuffer_unreference(fb);
794} 801}
795EXPORT_SYMBOL(drm_framebuffer_remove); 802EXPORT_SYMBOL(drm_framebuffer_remove);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c30d649cb147..b360e6251836 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -14,19 +14,19 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <asm/dma-iommu.h>
18
19#include <drm/drmP.h> 17#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
22#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
23#include <drm/drm_of.h> 21#include <drm/drm_of.h>
24#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/dma-iommu.h>
25#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/of_graph.h> 26#include <linux/of_graph.h>
28#include <linux/component.h> 27#include <linux/component.h>
29#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/iommu.h>
30 30
31#include "rockchip_drm_drv.h" 31#include "rockchip_drm_drv.h"
32#include "rockchip_drm_fb.h" 32#include "rockchip_drm_fb.h"
@@ -50,28 +50,31 @@ static struct drm_driver rockchip_drm_driver;
50int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, 50int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
51 struct device *dev) 51 struct device *dev)
52{ 52{
53 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; 53 struct rockchip_drm_private *private = drm_dev->dev_private;
54 int ret; 54 int ret;
55 55
56 if (!is_support_iommu) 56 if (!is_support_iommu)
57 return 0; 57 return 0;
58 58
59 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 59 ret = iommu_attach_device(private->domain, dev);
60 if (ret) 60 if (ret) {
61 dev_err(dev, "Failed to attach iommu device\n");
61 return ret; 62 return ret;
63 }
62 64
63 dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); 65 return 0;
64
65 return arm_iommu_attach_device(dev, mapping);
66} 66}
67 67
68void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, 68void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
69 struct device *dev) 69 struct device *dev)
70{ 70{
71 struct rockchip_drm_private *private = drm_dev->dev_private;
72 struct iommu_domain *domain = private->domain;
73
71 if (!is_support_iommu) 74 if (!is_support_iommu)
72 return; 75 return;
73 76
74 arm_iommu_detach_device(dev); 77 iommu_detach_device(domain, dev);
75} 78}
76 79
77int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 80int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -123,11 +126,46 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
123 priv->crtc_funcs[pipe]->disable_vblank(crtc); 126 priv->crtc_funcs[pipe]->disable_vblank(crtc);
124} 127}
125 128
129static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
130{
131 struct rockchip_drm_private *private = drm_dev->dev_private;
132 struct iommu_domain_geometry *geometry;
133 u64 start, end;
134
135 if (!is_support_iommu)
136 return 0;
137
138 private->domain = iommu_domain_alloc(&platform_bus_type);
139 if (!private->domain)
140 return -ENOMEM;
141
142 geometry = &private->domain->geometry;
143 start = geometry->aperture_start;
144 end = geometry->aperture_end;
145
146 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
147 start, end);
148 drm_mm_init(&private->mm, start, end - start + 1);
149 mutex_init(&private->mm_lock);
150
151 return 0;
152}
153
154static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
155{
156 struct rockchip_drm_private *private = drm_dev->dev_private;
157
158 if (!is_support_iommu)
159 return;
160
161 drm_mm_takedown(&private->mm);
162 iommu_domain_free(private->domain);
163}
164
126static int rockchip_drm_bind(struct device *dev) 165static int rockchip_drm_bind(struct device *dev)
127{ 166{
128 struct drm_device *drm_dev; 167 struct drm_device *drm_dev;
129 struct rockchip_drm_private *private; 168 struct rockchip_drm_private *private;
130 struct dma_iommu_mapping *mapping = NULL;
131 int ret; 169 int ret;
132 170
133 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); 171 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
@@ -151,38 +189,14 @@ static int rockchip_drm_bind(struct device *dev)
151 189
152 rockchip_drm_mode_config_init(drm_dev); 190 rockchip_drm_mode_config_init(drm_dev);
153 191
154 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 192 ret = rockchip_drm_init_iommu(drm_dev);
155 GFP_KERNEL); 193 if (ret)
156 if (!dev->dma_parms) {
157 ret = -ENOMEM;
158 goto err_config_cleanup; 194 goto err_config_cleanup;
159 }
160
161 if (is_support_iommu) {
162 /* TODO(djkurtz): fetch the mapping start/size from somewhere */
163 mapping = arm_iommu_create_mapping(&platform_bus_type,
164 0x00000000,
165 SZ_2G);
166 if (IS_ERR(mapping)) {
167 ret = PTR_ERR(mapping);
168 goto err_config_cleanup;
169 }
170
171 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
172 if (ret)
173 goto err_release_mapping;
174
175 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
176
177 ret = arm_iommu_attach_device(dev, mapping);
178 if (ret)
179 goto err_release_mapping;
180 }
181 195
182 /* Try to bind all sub drivers. */ 196 /* Try to bind all sub drivers. */
183 ret = component_bind_all(dev, drm_dev); 197 ret = component_bind_all(dev, drm_dev);
184 if (ret) 198 if (ret)
185 goto err_detach_device; 199 goto err_iommu_cleanup;
186 200
187 /* init kms poll for handling hpd */ 201 /* init kms poll for handling hpd */
188 drm_kms_helper_poll_init(drm_dev); 202 drm_kms_helper_poll_init(drm_dev);
@@ -207,8 +221,6 @@ static int rockchip_drm_bind(struct device *dev)
207 if (ret) 221 if (ret)
208 goto err_fbdev_fini; 222 goto err_fbdev_fini;
209 223
210 if (is_support_iommu)
211 arm_iommu_release_mapping(mapping);
212 return 0; 224 return 0;
213err_fbdev_fini: 225err_fbdev_fini:
214 rockchip_drm_fbdev_fini(drm_dev); 226 rockchip_drm_fbdev_fini(drm_dev);
@@ -217,12 +229,8 @@ err_vblank_cleanup:
217err_kms_helper_poll_fini: 229err_kms_helper_poll_fini:
218 drm_kms_helper_poll_fini(drm_dev); 230 drm_kms_helper_poll_fini(drm_dev);
219 component_unbind_all(dev, drm_dev); 231 component_unbind_all(dev, drm_dev);
220err_detach_device: 232err_iommu_cleanup:
221 if (is_support_iommu) 233 rockchip_iommu_cleanup(drm_dev);
222 arm_iommu_detach_device(dev);
223err_release_mapping:
224 if (is_support_iommu)
225 arm_iommu_release_mapping(mapping);
226err_config_cleanup: 234err_config_cleanup:
227 drm_mode_config_cleanup(drm_dev); 235 drm_mode_config_cleanup(drm_dev);
228 drm_dev->dev_private = NULL; 236 drm_dev->dev_private = NULL;
@@ -239,8 +247,7 @@ static void rockchip_drm_unbind(struct device *dev)
239 drm_vblank_cleanup(drm_dev); 247 drm_vblank_cleanup(drm_dev);
240 drm_kms_helper_poll_fini(drm_dev); 248 drm_kms_helper_poll_fini(drm_dev);
241 component_unbind_all(dev, drm_dev); 249 component_unbind_all(dev, drm_dev);
242 if (is_support_iommu) 250 rockchip_iommu_cleanup(drm_dev);
243 arm_iommu_detach_device(dev);
244 drm_mode_config_cleanup(drm_dev); 251 drm_mode_config_cleanup(drm_dev);
245 drm_dev->dev_private = NULL; 252 drm_dev->dev_private = NULL;
246 drm_dev_unregister(drm_dev); 253 drm_dev_unregister(drm_dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index fb6226cf84b7..adc39302bec5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@
30 30
31struct drm_device; 31struct drm_device;
32struct drm_connector; 32struct drm_connector;
33struct iommu_domain;
33 34
34/* 35/*
35 * Rockchip drm private crtc funcs. 36 * Rockchip drm private crtc funcs.
@@ -60,7 +61,10 @@ struct rockchip_drm_private {
60 struct drm_gem_object *fbdev_bo; 61 struct drm_gem_object *fbdev_bo;
61 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 62 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
62 struct drm_atomic_state *state; 63 struct drm_atomic_state *state;
63 64 struct iommu_domain *domain;
65 /* protect drm_mm on multi-threads */
66 struct mutex mm_lock;
67 struct drm_mm mm;
64 struct list_head psr_list; 68 struct list_head psr_list;
65 spinlock_t psr_list_lock; 69 spinlock_t psr_list_lock;
66}; 70};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b70f9423379c..df9e57064f19 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -16,11 +16,146 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_gem.h> 17#include <drm/drm_gem.h>
18#include <drm/drm_vma_manager.h> 18#include <drm/drm_vma_manager.h>
19#include <linux/iommu.h>
19 20
20#include "rockchip_drm_drv.h" 21#include "rockchip_drm_drv.h"
21#include "rockchip_drm_gem.h" 22#include "rockchip_drm_gem.h"
22 23
23static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, 24static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
25{
26 struct drm_device *drm = rk_obj->base.dev;
27 struct rockchip_drm_private *private = drm->dev_private;
28 int prot = IOMMU_READ | IOMMU_WRITE;
29 ssize_t ret;
30
31 mutex_lock(&private->mm_lock);
32
33 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
34 rk_obj->base.size, PAGE_SIZE,
35 0, 0);
36
37 mutex_unlock(&private->mm_lock);
38 if (ret < 0) {
39 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
40 return ret;
41 }
42
43 rk_obj->dma_addr = rk_obj->mm.start;
44
45 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
46 rk_obj->sgt->nents, prot);
47 if (ret < rk_obj->base.size) {
48 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
49 ret, rk_obj->base.size);
50 ret = -ENOMEM;
51 goto err_remove_node;
52 }
53
54 rk_obj->size = ret;
55
56 return 0;
57
58err_remove_node:
59 drm_mm_remove_node(&rk_obj->mm);
60
61 return ret;
62}
63
64static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
65{
66 struct drm_device *drm = rk_obj->base.dev;
67 struct rockchip_drm_private *private = drm->dev_private;
68
69 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
70
71 mutex_lock(&private->mm_lock);
72
73 drm_mm_remove_node(&rk_obj->mm);
74
75 mutex_unlock(&private->mm_lock);
76
77 return 0;
78}
79
80static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
81{
82 struct drm_device *drm = rk_obj->base.dev;
83 int ret, i;
84 struct scatterlist *s;
85
86 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
87 if (IS_ERR(rk_obj->pages))
88 return PTR_ERR(rk_obj->pages);
89
90 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
91
92 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
93 if (IS_ERR(rk_obj->sgt)) {
94 ret = PTR_ERR(rk_obj->sgt);
95 goto err_put_pages;
96 }
97
98 /*
99 * Fake up the SG table so that dma_sync_sg_for_device() can be used
100 * to flush the pages associated with it.
101 *
102 * TODO: Replace this by drm_clflush_sg() once it can be implemented
103 * without relying on symbols that are not exported.
104 */
105 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
106 sg_dma_address(s) = sg_phys(s);
107
108 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
109 DMA_TO_DEVICE);
110
111 return 0;
112
113err_put_pages:
114 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
115 return ret;
116}
117
118static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
119{
120 sg_free_table(rk_obj->sgt);
121 kfree(rk_obj->sgt);
122 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
123}
124
125static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
126 bool alloc_kmap)
127{
128 int ret;
129
130 ret = rockchip_gem_get_pages(rk_obj);
131 if (ret < 0)
132 return ret;
133
134 ret = rockchip_gem_iommu_map(rk_obj);
135 if (ret < 0)
136 goto err_free;
137
138 if (alloc_kmap) {
139 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
140 pgprot_writecombine(PAGE_KERNEL));
141 if (!rk_obj->kvaddr) {
142 DRM_ERROR("failed to vmap() buffer\n");
143 ret = -ENOMEM;
144 goto err_unmap;
145 }
146 }
147
148 return 0;
149
150err_unmap:
151 rockchip_gem_iommu_unmap(rk_obj);
152err_free:
153 rockchip_gem_put_pages(rk_obj);
154
155 return ret;
156}
157
158static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
24 bool alloc_kmap) 159 bool alloc_kmap)
25{ 160{
26 struct drm_gem_object *obj = &rk_obj->base; 161 struct drm_gem_object *obj = &rk_obj->base;
@@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
42 return 0; 177 return 0;
43} 178}
44 179
45static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) 180static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
181 bool alloc_kmap)
182{
183 struct drm_gem_object *obj = &rk_obj->base;
184 struct drm_device *drm = obj->dev;
185 struct rockchip_drm_private *private = drm->dev_private;
186
187 if (private->domain)
188 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
189 else
190 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
191}
192
193static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
194{
195 vunmap(rk_obj->kvaddr);
196 rockchip_gem_iommu_unmap(rk_obj);
197 rockchip_gem_put_pages(rk_obj);
198}
199
200static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
46{ 201{
47 struct drm_gem_object *obj = &rk_obj->base; 202 struct drm_gem_object *obj = &rk_obj->base;
48 struct drm_device *drm = obj->dev; 203 struct drm_device *drm = obj->dev;
@@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
51 rk_obj->dma_attrs); 206 rk_obj->dma_attrs);
52} 207}
53 208
54static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, 209static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
55 struct vm_area_struct *vma) 210{
211 if (rk_obj->pages)
212 rockchip_gem_free_iommu(rk_obj);
213 else
214 rockchip_gem_free_dma(rk_obj);
215}
56 216
217static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
218 struct vm_area_struct *vma)
57{ 219{
220 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
221 unsigned int i, count = obj->size >> PAGE_SHIFT;
222 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
223 unsigned long uaddr = vma->vm_start;
224 unsigned long offset = vma->vm_pgoff;
225 unsigned long end = user_count + offset;
58 int ret; 226 int ret;
227
228 if (user_count == 0)
229 return -ENXIO;
230 if (end > count)
231 return -ENXIO;
232
233 for (i = offset; i < end; i++) {
234 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
235 if (ret)
236 return ret;
237 uaddr += PAGE_SIZE;
238 }
239
240 return 0;
241}
242
243static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
244 struct vm_area_struct *vma)
245{
59 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 246 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
60 struct drm_device *drm = obj->dev; 247 struct drm_device *drm = obj->dev;
61 248
249 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
250 obj->size, rk_obj->dma_attrs);
251}
252
253static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
254 struct vm_area_struct *vma)
255{
256 int ret;
257 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
258
62 /* 259 /*
63 * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear 260 * We allocated a struct page table for rk_obj, so clear
64 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 261 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
65 */ 262 */
66 vma->vm_flags &= ~VM_PFNMAP; 263 vma->vm_flags &= ~VM_PFNMAP;
67 vma->vm_pgoff = 0; 264 vma->vm_pgoff = 0;
68 265
69 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 266 if (rk_obj->pages)
70 obj->size, rk_obj->dma_attrs); 267 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
268 else
269 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
270
71 if (ret) 271 if (ret)
72 drm_gem_vm_close(vma); 272 drm_gem_vm_close(vma);
73 273
@@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
101 return rockchip_drm_gem_object_mmap(obj, vma); 301 return rockchip_drm_gem_object_mmap(obj, vma);
102} 302}
103 303
304static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
305{
306 drm_gem_object_release(&rk_obj->base);
307 kfree(rk_obj);
308}
309
104struct rockchip_gem_object * 310struct rockchip_gem_object *
105 rockchip_gem_create_object(struct drm_device *drm, unsigned int size, 311 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
106 bool alloc_kmap) 312 bool alloc_kmap)
@@ -117,7 +323,7 @@ struct rockchip_gem_object *
117 323
118 obj = &rk_obj->base; 324 obj = &rk_obj->base;
119 325
120 drm_gem_private_object_init(drm, obj, size); 326 drm_gem_object_init(drm, obj, size);
121 327
122 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); 328 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
123 if (ret) 329 if (ret)
@@ -126,7 +332,7 @@ struct rockchip_gem_object *
126 return rk_obj; 332 return rk_obj;
127 333
128err_free_rk_obj: 334err_free_rk_obj:
129 kfree(rk_obj); 335 rockchip_gem_release_object(rk_obj);
130 return ERR_PTR(ret); 336 return ERR_PTR(ret);
131} 337}
132 338
@@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
138{ 344{
139 struct rockchip_gem_object *rk_obj; 345 struct rockchip_gem_object *rk_obj;
140 346
141 drm_gem_free_mmap_offset(obj);
142
143 rk_obj = to_rockchip_obj(obj); 347 rk_obj = to_rockchip_obj(obj);
144 348
145 rockchip_gem_free_buf(rk_obj); 349 rockchip_gem_free_buf(rk_obj);
146 350
147 kfree(rk_obj); 351 rockchip_gem_release_object(rk_obj);
148} 352}
149 353
150/* 354/*
@@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
253 struct sg_table *sgt; 457 struct sg_table *sgt;
254 int ret; 458 int ret;
255 459
460 if (rk_obj->pages)
461 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
462
256 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 463 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
257 if (!sgt) 464 if (!sgt)
258 return ERR_PTR(-ENOMEM); 465 return ERR_PTR(-ENOMEM);
@@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
273{ 480{
274 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 481 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
275 482
483 if (rk_obj->pages)
484 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
485 pgprot_writecombine(PAGE_KERNEL));
486
276 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 487 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
277 return NULL; 488 return NULL;
278 489
@@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
281 492
282void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 493void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
283{ 494{
284 /* Nothing to do */ 495 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
496
497 if (rk_obj->pages) {
498 vunmap(vaddr);
499 return;
500 }
501
502 /* Nothing to do if allocated by DMA mapping API. */
285} 503}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 18b3488db4ec..3f6ea4d18a5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -23,7 +23,15 @@ struct rockchip_gem_object {
23 23
24 void *kvaddr; 24 void *kvaddr;
25 dma_addr_t dma_addr; 25 dma_addr_t dma_addr;
26 /* Used when IOMMU is disabled */
26 unsigned long dma_attrs; 27 unsigned long dma_attrs;
28
29 /* Used when IOMMU is enabled */
30 struct drm_mm_node mm;
31 unsigned long num_pages;
32 struct page **pages;
33 struct sg_table *sgt;
34 size_t size;
27}; 35};
28 36
29struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); 37struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e992bed98dcb..d45a4335df5d 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -134,21 +134,6 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
134 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 134 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
135} 135}
136 136
137static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
138 struct drm_crtc_state *old_crtc_state)
139{
140 struct sti_mixer *mixer = to_sti_mixer(crtc);
141
142 if (crtc->state->event) {
143 crtc->state->event->pipe = drm_crtc_index(crtc);
144
145 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
146
147 mixer->pending_event = crtc->state->event;
148 crtc->state->event = NULL;
149 }
150}
151
152static void sti_crtc_atomic_flush(struct drm_crtc *crtc, 137static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
153 struct drm_crtc_state *old_crtc_state) 138 struct drm_crtc_state *old_crtc_state)
154{ 139{
@@ -156,6 +141,8 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
156 struct sti_mixer *mixer = to_sti_mixer(crtc); 141 struct sti_mixer *mixer = to_sti_mixer(crtc);
157 struct sti_compositor *compo = dev_get_drvdata(mixer->dev); 142 struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
158 struct drm_plane *p; 143 struct drm_plane *p;
144 struct drm_pending_vblank_event *event;
145 unsigned long flags;
159 146
160 DRM_DEBUG_DRIVER("\n"); 147 DRM_DEBUG_DRIVER("\n");
161 148
@@ -220,13 +207,24 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
220 break; 207 break;
221 } 208 }
222 } 209 }
210
211 event = crtc->state->event;
212 if (event) {
213 crtc->state->event = NULL;
214
215 spin_lock_irqsave(&crtc->dev->event_lock, flags);
216 if (drm_crtc_vblank_get(crtc) == 0)
217 drm_crtc_arm_vblank_event(crtc, event);
218 else
219 drm_crtc_send_vblank_event(crtc, event);
220 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
221 }
223} 222}
224 223
225static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 224static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
226 .enable = sti_crtc_enable, 225 .enable = sti_crtc_enable,
227 .disable = sti_crtc_disabling, 226 .disable = sti_crtc_disabling,
228 .mode_set_nofb = sti_crtc_mode_set_nofb, 227 .mode_set_nofb = sti_crtc_mode_set_nofb,
229 .atomic_begin = sti_crtc_atomic_begin,
230 .atomic_flush = sti_crtc_atomic_flush, 228 .atomic_flush = sti_crtc_atomic_flush,
231}; 229};
232 230
@@ -250,7 +248,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
250 struct sti_compositor *compo; 248 struct sti_compositor *compo;
251 struct drm_crtc *crtc = data; 249 struct drm_crtc *crtc = data;
252 struct sti_mixer *mixer; 250 struct sti_mixer *mixer;
253 unsigned long flags;
254 struct sti_private *priv; 251 struct sti_private *priv;
255 unsigned int pipe; 252 unsigned int pipe;
256 253
@@ -267,14 +264,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
267 264
268 drm_crtc_handle_vblank(crtc); 265 drm_crtc_handle_vblank(crtc);
269 266
270 spin_lock_irqsave(&crtc->dev->event_lock, flags);
271 if (mixer->pending_event) {
272 drm_crtc_send_vblank_event(crtc, mixer->pending_event);
273 drm_crtc_vblank_put(crtc);
274 mixer->pending_event = NULL;
275 }
276 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
277
278 if (mixer->status == STI_MIXER_DISABLING) { 267 if (mixer->status == STI_MIXER_DISABLING) {
279 struct drm_plane *p; 268 struct drm_plane *p;
280 269
@@ -317,19 +306,12 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
317 struct sti_private *priv = drm_dev->dev_private; 306 struct sti_private *priv = drm_dev->dev_private;
318 struct sti_compositor *compo = priv->compo; 307 struct sti_compositor *compo = priv->compo;
319 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe]; 308 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
320 struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
321 struct sti_vtg *vtg = compo->vtg[pipe]; 309 struct sti_vtg *vtg = compo->vtg[pipe];
322 310
323 DRM_DEBUG_DRIVER("\n"); 311 DRM_DEBUG_DRIVER("\n");
324 312
325 if (sti_vtg_unregister_client(vtg, vtg_vblank_nb)) 313 if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
326 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 314 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
327
328 /* free the resources of the pending requests */
329 if (compo->mixer[pipe]->pending_event) {
330 drm_crtc_vblank_put(crtc);
331 compo->mixer[pipe]->pending_event = NULL;
332 }
333} 315}
334 316
335static int sti_crtc_late_register(struct drm_crtc *crtc) 317static int sti_crtc_late_register(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 788feed208d7..e6c1646b9c53 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -58,7 +58,9 @@ static int sti_drm_fps_set(void *data, u64 val)
58 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { 58 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
59 struct sti_plane *plane = to_sti_plane(p); 59 struct sti_plane *plane = to_sti_plane(p);
60 60
61 memset(&plane->fps_info, 0, sizeof(plane->fps_info));
61 plane->fps_info.output = (val >> i) & 1; 62 plane->fps_info.output = (val >> i) & 1;
63
62 i++; 64 i++;
63 } 65 }
64 66
@@ -115,50 +117,13 @@ err:
115 return ret; 117 return ret;
116} 118}
117 119
118static void sti_atomic_schedule(struct sti_private *private, 120static void sti_drm_dbg_cleanup(struct drm_minor *minor)
119 struct drm_atomic_state *state)
120{
121 private->commit.state = state;
122 schedule_work(&private->commit.work);
123}
124
125static void sti_atomic_complete(struct sti_private *private,
126 struct drm_atomic_state *state)
127{
128 struct drm_device *drm = private->drm_dev;
129
130 /*
131 * Everything below can be run asynchronously without the need to grab
132 * any modeset locks at all under one condition: It must be guaranteed
133 * that the asynchronous work has either been cancelled (if the driver
134 * supports it, which at least requires that the framebuffers get
135 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
136 * before the new state gets committed on the software side with
137 * drm_atomic_helper_swap_state().
138 *
139 * This scheme allows new atomic state updates to be prepared and
140 * checked in parallel to the asynchronous completion of the previous
141 * update. Which is important since compositors need to figure out the
142 * composition of the next frame right after having submitted the
143 * current layout.
144 */
145
146 drm_atomic_helper_commit_modeset_disables(drm, state);
147 drm_atomic_helper_commit_planes(drm, state, 0);
148 drm_atomic_helper_commit_modeset_enables(drm, state);
149
150 drm_atomic_helper_wait_for_vblanks(drm, state);
151
152 drm_atomic_helper_cleanup_planes(drm, state);
153 drm_atomic_state_put(state);
154}
155
156static void sti_atomic_work(struct work_struct *work)
157{ 121{
158 struct sti_private *private = container_of(work, 122 drm_debugfs_remove_files(sti_drm_dbg_list,
159 struct sti_private, commit.work); 123 ARRAY_SIZE(sti_drm_dbg_list), minor);
160 124
161 sti_atomic_complete(private, private->commit.state); 125 drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
126 1, minor);
162} 127}
163 128
164static int sti_atomic_check(struct drm_device *dev, 129static int sti_atomic_check(struct drm_device *dev,
@@ -181,38 +146,6 @@ static int sti_atomic_check(struct drm_device *dev,
181 return ret; 146 return ret;
182} 147}
183 148
184static int sti_atomic_commit(struct drm_device *drm,
185 struct drm_atomic_state *state, bool nonblock)
186{
187 struct sti_private *private = drm->dev_private;
188 int err;
189
190 err = drm_atomic_helper_prepare_planes(drm, state);
191 if (err)
192 return err;
193
194 /* serialize outstanding nonblocking commits */
195 mutex_lock(&private->commit.lock);
196 flush_work(&private->commit.work);
197
198 /*
199 * This is the point of no return - everything below never fails except
200 * when the hw goes bonghits. Which means we can commit the new state on
201 * the software side now.
202 */
203
204 drm_atomic_helper_swap_state(state, true);
205
206 drm_atomic_state_get(state);
207 if (nonblock)
208 sti_atomic_schedule(private, state);
209 else
210 sti_atomic_complete(private, state);
211
212 mutex_unlock(&private->commit.lock);
213 return 0;
214}
215
216static void sti_output_poll_changed(struct drm_device *ddev) 149static void sti_output_poll_changed(struct drm_device *ddev)
217{ 150{
218 struct sti_private *private = ddev->dev_private; 151 struct sti_private *private = ddev->dev_private;
@@ -224,7 +157,7 @@ static const struct drm_mode_config_funcs sti_mode_config_funcs = {
224 .fb_create = drm_fb_cma_create, 157 .fb_create = drm_fb_cma_create,
225 .output_poll_changed = sti_output_poll_changed, 158 .output_poll_changed = sti_output_poll_changed,
226 .atomic_check = sti_atomic_check, 159 .atomic_check = sti_atomic_check,
227 .atomic_commit = sti_atomic_commit, 160 .atomic_commit = drm_atomic_helper_commit,
228}; 161};
229 162
230static void sti_mode_config_init(struct drm_device *dev) 163static void sti_mode_config_init(struct drm_device *dev)
@@ -304,9 +237,6 @@ static int sti_init(struct drm_device *ddev)
304 dev_set_drvdata(ddev->dev, ddev); 237 dev_set_drvdata(ddev->dev, ddev);
305 private->drm_dev = ddev; 238 private->drm_dev = ddev;
306 239
307 mutex_init(&private->commit.lock);
308 INIT_WORK(&private->commit.work, sti_atomic_work);
309
310 drm_mode_config_init(ddev); 240 drm_mode_config_init(ddev);
311 241
312 sti_mode_config_init(ddev); 242 sti_mode_config_init(ddev);
@@ -327,6 +257,7 @@ static void sti_cleanup(struct drm_device *ddev)
327 257
328 drm_kms_helper_poll_fini(ddev); 258 drm_kms_helper_poll_fini(ddev);
329 drm_vblank_cleanup(ddev); 259 drm_vblank_cleanup(ddev);
260 component_unbind_all(ddev->dev, ddev);
330 kfree(private); 261 kfree(private);
331 ddev->dev_private = NULL; 262 ddev->dev_private = NULL;
332} 263}
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 4c75845cc9ab..6502ed2d3351 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -25,12 +25,6 @@ struct sti_private {
25 struct drm_property *plane_zorder_property; 25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev; 26 struct drm_device *drm_dev;
27 struct drm_fbdev_cma *fbdev; 27 struct drm_fbdev_cma *fbdev;
28
29 struct {
30 struct drm_atomic_state *state;
31 struct work_struct work;
32 struct mutex lock;
33 } commit;
34}; 28};
35 29
36extern struct platform_driver sti_tvout_driver; 30extern struct platform_driver sti_tvout_driver;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 877d053d86f4..86279f5022c2 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -610,7 +610,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
610 struct sti_plane *plane = to_sti_plane(drm_plane); 610 struct sti_plane *plane = to_sti_plane(drm_plane);
611 struct sti_gdp *gdp = to_sti_gdp(plane); 611 struct sti_gdp *gdp = to_sti_gdp(plane);
612 struct drm_crtc *crtc = state->crtc; 612 struct drm_crtc *crtc = state->crtc;
613 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
614 struct drm_framebuffer *fb = state->fb; 613 struct drm_framebuffer *fb = state->fb;
615 struct drm_crtc_state *crtc_state; 614 struct drm_crtc_state *crtc_state;
616 struct sti_mixer *mixer; 615 struct sti_mixer *mixer;
@@ -648,45 +647,30 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
648 return -EINVAL; 647 return -EINVAL;
649 } 648 }
650 649
651 if (!gdp->vtg) { 650 /* Set gdp clock */
652 /* Register gdp callback */ 651 if (mode->clock && gdp->clk_pix) {
653 gdp->vtg = compo->vtg[mixer->id]; 652 struct clk *clkp;
654 if (sti_vtg_register_client(gdp->vtg, 653 int rate = mode->clock * 1000;
655 &gdp->vtg_field_nb, crtc)) { 654 int res;
656 DRM_ERROR("Cannot register VTG notifier\n"); 655
656 /*
657 * According to the mixer used, the gdp pixel clock
658 * should have a different parent clock.
659 */
660 if (mixer->id == STI_MIXER_MAIN)
661 clkp = gdp->clk_main_parent;
662 else
663 clkp = gdp->clk_aux_parent;
664
665 if (clkp)
666 clk_set_parent(gdp->clk_pix, clkp);
667
668 res = clk_set_rate(gdp->clk_pix, rate);
669 if (res < 0) {
670 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
671 rate);
657 return -EINVAL; 672 return -EINVAL;
658 } 673 }
659
660 /* Set and enable gdp clock */
661 if (gdp->clk_pix) {
662 struct clk *clkp;
663 int rate = mode->clock * 1000;
664 int res;
665
666 /*
667 * According to the mixer used, the gdp pixel clock
668 * should have a different parent clock.
669 */
670 if (mixer->id == STI_MIXER_MAIN)
671 clkp = gdp->clk_main_parent;
672 else
673 clkp = gdp->clk_aux_parent;
674
675 if (clkp)
676 clk_set_parent(gdp->clk_pix, clkp);
677
678 res = clk_set_rate(gdp->clk_pix, rate);
679 if (res < 0) {
680 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
681 rate);
682 return -EINVAL;
683 }
684
685 if (clk_prepare_enable(gdp->clk_pix)) {
686 DRM_ERROR("Failed to prepare/enable gdp\n");
687 return -EINVAL;
688 }
689 }
690 } 674 }
691 675
692 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 676 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
@@ -724,6 +708,31 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
724 if (!crtc || !fb) 708 if (!crtc || !fb)
725 return; 709 return;
726 710
711 if ((oldstate->fb == state->fb) &&
712 (oldstate->crtc_x == state->crtc_x) &&
713 (oldstate->crtc_y == state->crtc_y) &&
714 (oldstate->crtc_w == state->crtc_w) &&
715 (oldstate->crtc_h == state->crtc_h) &&
716 (oldstate->src_x == state->src_x) &&
717 (oldstate->src_y == state->src_y) &&
718 (oldstate->src_w == state->src_w) &&
719 (oldstate->src_h == state->src_h)) {
720 /* No change since last update, do not post cmd */
721 DRM_DEBUG_DRIVER("No change, not posting cmd\n");
722 plane->status = STI_PLANE_UPDATED;
723 return;
724 }
725
726 if (!gdp->vtg) {
727 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
728 struct sti_mixer *mixer = to_sti_mixer(crtc);
729
730 /* Register gdp callback */
731 gdp->vtg = compo->vtg[mixer->id];
732 sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc);
733 clk_prepare_enable(gdp->clk_pix);
734 }
735
727 mode = &crtc->mode; 736 mode = &crtc->mode;
728 dst_x = state->crtc_x; 737 dst_x = state->crtc_x;
729 dst_y = state->crtc_y; 738 dst_y = state->crtc_y;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index c9151849d604..ce2dcba679d5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -95,7 +95,6 @@
95#define HDMI_CFG_HDCP_EN BIT(2) 95#define HDMI_CFG_HDCP_EN BIT(2)
96#define HDMI_CFG_ESS_NOT_OESS BIT(3) 96#define HDMI_CFG_ESS_NOT_OESS BIT(3)
97#define HDMI_CFG_H_SYNC_POL_NEG BIT(4) 97#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
98#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
99#define HDMI_CFG_V_SYNC_POL_NEG BIT(6) 98#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
100#define HDMI_CFG_422_EN BIT(8) 99#define HDMI_CFG_422_EN BIT(8)
101#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12) 100#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
@@ -159,7 +158,6 @@ struct sti_hdmi_connector {
159 struct drm_encoder *encoder; 158 struct drm_encoder *encoder;
160 struct sti_hdmi *hdmi; 159 struct sti_hdmi *hdmi;
161 struct drm_property *colorspace_property; 160 struct drm_property *colorspace_property;
162 struct drm_property *hdmi_mode_property;
163}; 161};
164 162
165#define to_sti_hdmi_connector(x) \ 163#define to_sti_hdmi_connector(x) \
@@ -266,12 +264,9 @@ static void hdmi_config(struct sti_hdmi *hdmi)
266 264
267 /* Select encryption type and the framing mode */ 265 /* Select encryption type and the framing mode */
268 conf |= HDMI_CFG_ESS_NOT_OESS; 266 conf |= HDMI_CFG_ESS_NOT_OESS;
269 if (hdmi->hdmi_mode == HDMI_MODE_HDMI) 267 if (hdmi->hdmi_monitor)
270 conf |= HDMI_CFG_HDMI_NOT_DVI; 268 conf |= HDMI_CFG_HDMI_NOT_DVI;
271 269
272 /* Enable sink term detection */
273 conf |= HDMI_CFG_SINK_TERM_DET_EN;
274
275 /* Set Hsync polarity */ 270 /* Set Hsync polarity */
276 if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) { 271 if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
277 DRM_DEBUG_DRIVER("H Sync Negative\n"); 272 DRM_DEBUG_DRIVER("H Sync Negative\n");
@@ -607,9 +602,6 @@ static void hdmi_dbg_cfg(struct seq_file *s, int val)
607 tmp = val & HDMI_CFG_ESS_NOT_OESS; 602 tmp = val & HDMI_CFG_ESS_NOT_OESS;
608 DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable"); 603 DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
609 seq_puts(s, "\t\t\t\t\t"); 604 seq_puts(s, "\t\t\t\t\t");
610 tmp = val & HDMI_CFG_SINK_TERM_DET_EN;
611 DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable");
612 seq_puts(s, "\t\t\t\t\t");
613 tmp = val & HDMI_CFG_H_SYNC_POL_NEG; 605 tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
614 DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal"); 606 DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
615 seq_puts(s, "\t\t\t\t\t"); 607 seq_puts(s, "\t\t\t\t\t");
@@ -977,6 +969,11 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
977 if (!edid) 969 if (!edid)
978 goto fail; 970 goto fail;
979 971
972 hdmi->hdmi_monitor = drm_detect_hdmi_monitor(edid);
973 DRM_DEBUG_KMS("%s : %dx%d cm\n",
974 (hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"),
975 edid->width_cm, edid->height_cm);
976
980 count = drm_add_edid_modes(connector, edid); 977 count = drm_add_edid_modes(connector, edid);
981 drm_mode_connector_update_edid_property(connector, edid); 978 drm_mode_connector_update_edid_property(connector, edid);
982 drm_edid_to_eld(connector, edid); 979 drm_edid_to_eld(connector, edid);
@@ -1060,19 +1057,6 @@ static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
1060 } 1057 }
1061 hdmi_connector->colorspace_property = prop; 1058 hdmi_connector->colorspace_property = prop;
1062 drm_object_attach_property(&connector->base, prop, hdmi->colorspace); 1059 drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
1063
1064 /* hdmi_mode property */
1065 hdmi->hdmi_mode = DEFAULT_HDMI_MODE;
1066 prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode",
1067 hdmi_mode_names,
1068 ARRAY_SIZE(hdmi_mode_names));
1069 if (!prop) {
1070 DRM_ERROR("fails to create colorspace property\n");
1071 return;
1072 }
1073 hdmi_connector->hdmi_mode_property = prop;
1074 drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode);
1075
1076} 1060}
1077 1061
1078static int 1062static int
@@ -1090,11 +1074,6 @@ sti_hdmi_connector_set_property(struct drm_connector *connector,
1090 return 0; 1074 return 0;
1091 } 1075 }
1092 1076
1093 if (property == hdmi_connector->hdmi_mode_property) {
1094 hdmi->hdmi_mode = val;
1095 return 0;
1096 }
1097
1098 DRM_ERROR("failed to set hdmi connector property\n"); 1077 DRM_ERROR("failed to set hdmi connector property\n");
1099 return -EINVAL; 1078 return -EINVAL;
1100} 1079}
@@ -1114,11 +1093,6 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
1114 return 0; 1093 return 0;
1115 } 1094 }
1116 1095
1117 if (property == hdmi_connector->hdmi_mode_property) {
1118 *val = hdmi->hdmi_mode;
1119 return 0;
1120 }
1121
1122 DRM_ERROR("failed to get hdmi connector property\n"); 1096 DRM_ERROR("failed to get hdmi connector property\n");
1123 return -EINVAL; 1097 return -EINVAL;
1124} 1098}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 119bc3582ac7..407012350f1a 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -30,19 +30,6 @@ struct hdmi_audio_params {
30 struct hdmi_audio_infoframe cea; 30 struct hdmi_audio_infoframe cea;
31}; 31};
32 32
33/* values for the framing mode property */
34enum sti_hdmi_modes {
35 HDMI_MODE_HDMI,
36 HDMI_MODE_DVI,
37};
38
39static const struct drm_prop_enum_list hdmi_mode_names[] = {
40 { HDMI_MODE_HDMI, "hdmi" },
41 { HDMI_MODE_DVI, "dvi" },
42};
43
44#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI
45
46static const struct drm_prop_enum_list colorspace_mode_names[] = { 33static const struct drm_prop_enum_list colorspace_mode_names[] = {
47 { HDMI_COLORSPACE_RGB, "rgb" }, 34 { HDMI_COLORSPACE_RGB, "rgb" },
48 { HDMI_COLORSPACE_YUV422, "yuv422" }, 35 { HDMI_COLORSPACE_YUV422, "yuv422" },
@@ -73,7 +60,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
73 * @reset: reset control of the hdmi phy 60 * @reset: reset control of the hdmi phy
74 * @ddc_adapt: i2c ddc adapter 61 * @ddc_adapt: i2c ddc adapter
75 * @colorspace: current colorspace selected 62 * @colorspace: current colorspace selected
76 * @hdmi_mode: select framing for HDMI or DVI 63 * @hdmi_monitor: true if HDMI monitor detected else DVI monitor assumed
77 * @audio_pdev: ASoC hdmi-codec platform device 64 * @audio_pdev: ASoC hdmi-codec platform device
78 * @audio: hdmi audio parameters. 65 * @audio: hdmi audio parameters.
79 * @drm_connector: hdmi connector 66 * @drm_connector: hdmi connector
@@ -98,7 +85,7 @@ struct sti_hdmi {
98 struct reset_control *reset; 85 struct reset_control *reset;
99 struct i2c_adapter *ddc_adapt; 86 struct i2c_adapter *ddc_adapt;
100 enum hdmi_colorspace colorspace; 87 enum hdmi_colorspace colorspace;
101 enum sti_hdmi_modes hdmi_mode; 88 bool hdmi_monitor;
102 struct platform_device *audio_pdev; 89 struct platform_device *audio_pdev;
103 struct hdmi_audio_params audio; 90 struct hdmi_audio_params audio;
104 struct drm_connector *drm_connector; 91 struct drm_connector *drm_connector;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 4376fd8a8e52..66f843148ef7 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1037,9 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
1037 src_w = state->src_w >> 16; 1037 src_w = state->src_w >> 16;
1038 src_h = state->src_h >> 16; 1038 src_h = state->src_h >> 16;
1039 1039
1040 if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, 1040 if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
1041 src_w, src_h, 1041 src_w, src_h,
1042 dst_w, dst_h)) { 1042 dst_w, dst_h)) {
1043 DRM_ERROR("Scaling beyond HW capabilities\n"); 1043 DRM_ERROR("Scaling beyond HW capabilities\n");
1044 return -EINVAL; 1044 return -EINVAL;
1045 } 1045 }
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 830a3c42d886..e64a00e61049 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -28,7 +28,6 @@ enum sti_mixer_status {
28 * @regs: mixer registers 28 * @regs: mixer registers
29 * @id: id of the mixer 29 * @id: id of the mixer
30 * @drm_crtc: crtc object link to the mixer 30 * @drm_crtc: crtc object link to the mixer
31 * @pending_event: set if a flip event is pending on crtc
32 * @status: to know the status of the mixer 31 * @status: to know the status of the mixer
33 */ 32 */
34struct sti_mixer { 33struct sti_mixer {
@@ -36,7 +35,6 @@ struct sti_mixer {
36 void __iomem *regs; 35 void __iomem *regs;
37 int id; 36 int id;
38 struct drm_crtc drm_crtc; 37 struct drm_crtc drm_crtc;
39 struct drm_pending_vblank_event *pending_event;
40 enum sti_mixer_status status; 38 enum sti_mixer_status status;
41}; 39};
42 40
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index c3d9c8ae14af..943bce56692e 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -17,7 +17,6 @@
17#include "sti_vtg.h" 17#include "sti_vtg.h"
18 18
19#define VTG_MODE_MASTER 0 19#define VTG_MODE_MASTER 0
20#define VTG_MODE_SLAVE_BY_EXT0 1
21 20
22/* registers offset */ 21/* registers offset */
23#define VTG_MODE 0x0000 22#define VTG_MODE 0x0000
@@ -132,7 +131,6 @@ struct sti_vtg_sync_params {
132 * @irq_status: store the IRQ status value 131 * @irq_status: store the IRQ status value
133 * @notifier_list: notifier callback 132 * @notifier_list: notifier callback
134 * @crtc: the CRTC for vblank event 133 * @crtc: the CRTC for vblank event
135 * @slave: slave vtg
136 * @link: List node to link the structure in lookup list 134 * @link: List node to link the structure in lookup list
137 */ 135 */
138struct sti_vtg { 136struct sti_vtg {
@@ -144,7 +142,6 @@ struct sti_vtg {
144 u32 irq_status; 142 u32 irq_status;
145 struct raw_notifier_head notifier_list; 143 struct raw_notifier_head notifier_list;
146 struct drm_crtc *crtc; 144 struct drm_crtc *crtc;
147 struct sti_vtg *slave;
148 struct list_head link; 145 struct list_head link;
149}; 146};
150 147
@@ -166,10 +163,6 @@ struct sti_vtg *of_vtg_find(struct device_node *np)
166 163
167static void vtg_reset(struct sti_vtg *vtg) 164static void vtg_reset(struct sti_vtg *vtg)
168{ 165{
169 /* reset slave and then master */
170 if (vtg->slave)
171 vtg_reset(vtg->slave);
172
173 writel(1, vtg->regs + VTG_DRST_AUTOC); 166 writel(1, vtg->regs + VTG_DRST_AUTOC);
174} 167}
175 168
@@ -259,10 +252,6 @@ static void vtg_set_mode(struct sti_vtg *vtg,
259{ 252{
260 unsigned int i; 253 unsigned int i;
261 254
262 if (vtg->slave)
263 vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0,
264 vtg->sync_params, mode);
265
266 /* Set the number of clock cycles per line */ 255 /* Set the number of clock cycles per line */
267 writel(mode->htotal, vtg->regs + VTG_CLKLN); 256 writel(mode->htotal, vtg->regs + VTG_CLKLN);
268 257
@@ -318,11 +307,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg,
318 307
319 vtg_reset(vtg); 308 vtg_reset(vtg);
320 309
321 /* enable irq for the vtg vblank synchro */ 310 vtg_enable_irq(vtg);
322 if (vtg->slave)
323 vtg_enable_irq(vtg->slave);
324 else
325 vtg_enable_irq(vtg);
326} 311}
327 312
328/** 313/**
@@ -365,18 +350,12 @@ u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
365int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb, 350int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb,
366 struct drm_crtc *crtc) 351 struct drm_crtc *crtc)
367{ 352{
368 if (vtg->slave)
369 return sti_vtg_register_client(vtg->slave, nb, crtc);
370
371 vtg->crtc = crtc; 353 vtg->crtc = crtc;
372 return raw_notifier_chain_register(&vtg->notifier_list, nb); 354 return raw_notifier_chain_register(&vtg->notifier_list, nb);
373} 355}
374 356
375int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb) 357int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
376{ 358{
377 if (vtg->slave)
378 return sti_vtg_unregister_client(vtg->slave, nb);
379
380 return raw_notifier_chain_unregister(&vtg->notifier_list, nb); 359 return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
381} 360}
382 361
@@ -434,29 +413,20 @@ static int vtg_probe(struct platform_device *pdev)
434 return -ENOMEM; 413 return -ENOMEM;
435 } 414 }
436 415
437 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); 416 vtg->irq = platform_get_irq(pdev, 0);
438 if (np) { 417 if (vtg->irq < 0) {
439 vtg->slave = of_vtg_find(np); 418 DRM_ERROR("Failed to get VTG interrupt\n");
440 of_node_put(np); 419 return vtg->irq;
420 }
441 421
442 if (!vtg->slave) 422 RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
443 return -EPROBE_DEFER; 423
444 } else { 424 ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
445 vtg->irq = platform_get_irq(pdev, 0); 425 vtg_irq_thread, IRQF_ONESHOT,
446 if (vtg->irq < 0) { 426 dev_name(dev), vtg);
447 DRM_ERROR("Failed to get VTG interrupt\n"); 427 if (ret < 0) {
448 return vtg->irq; 428 DRM_ERROR("Failed to register VTG interrupt\n");
449 } 429 return ret;
450
451 RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
452
453 ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
454 vtg_irq_thread, IRQF_ONESHOT,
455 dev_name(dev), vtg);
456 if (ret < 0) {
457 DRM_ERROR("Failed to register VTG interrupt\n");
458 return ret;
459 }
460 } 430 }
461 431
462 vtg_register(vtg); 432 vtg_register(vtg);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index f330ba4547cf..900129c8f6cf 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -109,6 +109,7 @@ header-y += dlm_netlink.h
109header-y += dlm_plock.h 109header-y += dlm_plock.h
110header-y += dm-ioctl.h 110header-y += dm-ioctl.h
111header-y += dm-log-userspace.h 111header-y += dm-log-userspace.h
112header-y += dma-buf.h
112header-y += dn.h 113header-y += dn.h
113header-y += dqblk_xfs.h 114header-y += dqblk_xfs.h
114header-y += edd.h 115header-y += edd.h