aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-09-12 11:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-12 11:27:40 -0400
commit850ebc0c0c50e24126461eec3ca4d07308560058 (patch)
tree6a314f4e77861d0dd4bf9dec8c5da18fca30aa1e
parentc73f6fdf2fc534e47b2a1ebfe00e57d585ef5b57 (diff)
parent83502a5d34386f7c6973bc70e1c423f55f5a2e3a (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "AST, i915, radeon and msm fixes, all over the place. All fixing build issues, regressions, oopses or failure to detect cards" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/ast: AST2000 cannot be detected correctly drm/ast: open key before detect chips drm/msm: don't crash if no msm.vram param drm/msm/hdmi: fix build break on non-CCF platforms drm/msm: Change nested function to static function drm/radeon/dpm: set the thermal type properly for special configs drm/radeon: reduce memory footprint for debugging drm/radeon: add connector quirk for fujitsu board drm/radeon: fix semaphore value init drm/radeon: only use me/pfp sync on evergreen+ drm/i915: Wait for vblank before enabling the TV encoder drm/i915: Evict CS TLBs between batches drm/i915: Fix irq enable tracking in driver load drm/i915: Fix EIO/wedged handling in gem fault handler drm/i915: Prevent recursive deadlock on releasing a busy userptr
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c409
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c46
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
15 files changed, 371 insertions, 262 deletions
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index a2cc6be97983..b792194e0d9c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
67{ 67{
68 struct ast_private *ast = dev->dev_private; 68 struct ast_private *ast = dev->dev_private;
69 uint32_t data, jreg; 69 uint32_t data, jreg;
70 ast_open_key(ast);
70 71
71 if (dev->pdev->device == PCI_CHIP_AST1180) { 72 if (dev->pdev->device == PCI_CHIP_AST1180) {
72 ast->chip = AST1100; 73 ast->chip = AST1100;
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
104 } 105 }
105 ast->vga2_clone = false; 106 ast->vga2_clone = false;
106 } else { 107 } else {
107 ast->chip = 2000; 108 ast->chip = AST2000;
108 DRM_INFO("AST 2000 detected\n"); 109 DRM_INFO("AST 2000 detected\n");
109 } 110 }
110 } 111 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2e7f03ad5ee2..9933c26017ed 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
1336 1336
1337 intel_power_domains_init_hw(dev_priv); 1337 intel_power_domains_init_hw(dev_priv);
1338 1338
1339 /*
1340 * We enable some interrupt sources in our postinstall hooks, so mark
1341 * interrupts as enabled _before_ actually enabling them to avoid
1342 * special cases in our ordering checks.
1343 */
1344 dev_priv->pm._irqs_disabled = false;
1345
1339 ret = drm_irq_install(dev, dev->pdev->irq); 1346 ret = drm_irq_install(dev, dev->pdev->irq);
1340 if (ret) 1347 if (ret)
1341 goto cleanup_gem_stolen; 1348 goto cleanup_gem_stolen;
1342 1349
1343 dev_priv->pm._irqs_disabled = false;
1344
1345 /* Important: The output setup functions called by modeset_init need 1350 /* Important: The output setup functions called by modeset_init need
1346 * working irqs for e.g. gmbus and dp aux transfers. */ 1351 * working irqs for e.g. gmbus and dp aux transfers. */
1347 intel_modeset_init(dev); 1352 intel_modeset_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a830eac5ba3..3524306d8cfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,7 @@ enum hpd_pin {
184 if ((1 << (domain)) & (mask)) 184 if ((1 << (domain)) & (mask))
185 185
186struct drm_i915_private; 186struct drm_i915_private;
187struct i915_mm_struct;
187struct i915_mmu_object; 188struct i915_mmu_object;
188 189
189enum intel_dpll_id { 190enum intel_dpll_id {
@@ -1506,9 +1507,8 @@ struct drm_i915_private {
1506 struct i915_gtt gtt; /* VM representing the global address space */ 1507 struct i915_gtt gtt; /* VM representing the global address space */
1507 1508
1508 struct i915_gem_mm mm; 1509 struct i915_gem_mm mm;
1509#if defined(CONFIG_MMU_NOTIFIER) 1510 DECLARE_HASHTABLE(mm_structs, 7);
1510 DECLARE_HASHTABLE(mmu_notifiers, 7); 1511 struct mutex mm_lock;
1511#endif
1512 1512
1513 /* Kernel Modesetting */ 1513 /* Kernel Modesetting */
1514 1514
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
1814 unsigned workers :4; 1814 unsigned workers :4;
1815#define I915_GEM_USERPTR_MAX_WORKERS 15 1815#define I915_GEM_USERPTR_MAX_WORKERS 15
1816 1816
1817 struct mm_struct *mm; 1817 struct i915_mm_struct *mm;
1818 struct i915_mmu_object *mn; 1818 struct i915_mmu_object *mmu_object;
1819 struct work_struct *work; 1819 struct work_struct *work;
1820 } userptr; 1820 } userptr;
1821 }; 1821 };
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba7f5c6bb50d..ad55b06a3cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,10 +1590,13 @@ unlock:
1590out: 1590out:
1591 switch (ret) { 1591 switch (ret) {
1592 case -EIO: 1592 case -EIO:
1593 /* If this -EIO is due to a gpu hang, give the reset code a 1593 /*
1594 * chance to clean up the mess. Otherwise return the proper 1594 * We eat errors when the gpu is terminally wedged to avoid
1595 * SIGBUS. */ 1595 * userspace unduly crashing (gl has no provisions for mmaps to
1596 if (i915_terminally_wedged(&dev_priv->gpu_error)) { 1596 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1597 * and so needs to be reported.
1598 */
1599 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1597 ret = VM_FAULT_SIGBUS; 1600 ret = VM_FAULT_SIGBUS;
1598 break; 1601 break;
1599 } 1602 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index fe69fc837d9e..d38413997379 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -32,6 +32,15 @@
32#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
33#include <linux/swap.h> 33#include <linux/swap.h>
34 34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
35#if defined(CONFIG_MMU_NOTIFIER) 44#if defined(CONFIG_MMU_NOTIFIER)
36#include <linux/interval_tree.h> 45#include <linux/interval_tree.h>
37 46
@@ -41,16 +50,12 @@ struct i915_mmu_notifier {
41 struct mmu_notifier mn; 50 struct mmu_notifier mn;
42 struct rb_root objects; 51 struct rb_root objects;
43 struct list_head linear; 52 struct list_head linear;
44 struct drm_device *dev;
45 struct mm_struct *mm;
46 struct work_struct work;
47 unsigned long count;
48 unsigned long serial; 53 unsigned long serial;
49 bool has_linear; 54 bool has_linear;
50}; 55};
51 56
52struct i915_mmu_object { 57struct i915_mmu_object {
53 struct i915_mmu_notifier *mmu; 58 struct i915_mmu_notifier *mn;
54 struct interval_tree_node it; 59 struct interval_tree_node it;
55 struct list_head link; 60 struct list_head link;
56 struct drm_i915_gem_object *obj; 61 struct drm_i915_gem_object *obj;
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
96 unsigned long start, 101 unsigned long start,
97 unsigned long end) 102 unsigned long end)
98{ 103{
99 struct i915_mmu_object *mmu; 104 struct i915_mmu_object *mo;
100 unsigned long serial; 105 unsigned long serial;
101 106
102restart: 107restart:
103 serial = mn->serial; 108 serial = mn->serial;
104 list_for_each_entry(mmu, &mn->linear, link) { 109 list_for_each_entry(mo, &mn->linear, link) {
105 struct drm_i915_gem_object *obj; 110 struct drm_i915_gem_object *obj;
106 111
107 if (mmu->it.last < start || mmu->it.start > end) 112 if (mo->it.last < start || mo->it.start > end)
108 continue; 113 continue;
109 114
110 obj = mmu->obj; 115 obj = mo->obj;
111 drm_gem_object_reference(&obj->base); 116 drm_gem_object_reference(&obj->base);
112 spin_unlock(&mn->lock); 117 spin_unlock(&mn->lock);
113 118
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160}; 165};
161 166
162static struct i915_mmu_notifier * 167static struct i915_mmu_notifier *
163__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) 168i915_mmu_notifier_create(struct mm_struct *mm)
164{
165 struct drm_i915_private *dev_priv = to_i915(dev);
166 struct i915_mmu_notifier *mmu;
167
168 /* Protected by dev->struct_mutex */
169 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
170 if (mmu->mm == mm)
171 return mmu;
172
173 return NULL;
174}
175
176static struct i915_mmu_notifier *
177i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
178{ 169{
179 struct drm_i915_private *dev_priv = to_i915(dev); 170 struct i915_mmu_notifier *mn;
180 struct i915_mmu_notifier *mmu;
181 int ret; 171 int ret;
182 172
183 lockdep_assert_held(&dev->struct_mutex); 173 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
184 174 if (mn == NULL)
185 mmu = __i915_mmu_notifier_lookup(dev, mm);
186 if (mmu)
187 return mmu;
188
189 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
190 if (mmu == NULL)
191 return ERR_PTR(-ENOMEM); 175 return ERR_PTR(-ENOMEM);
192 176
193 spin_lock_init(&mmu->lock); 177 spin_lock_init(&mn->lock);
194 mmu->dev = dev; 178 mn->mn.ops = &i915_gem_userptr_notifier;
195 mmu->mn.ops = &i915_gem_userptr_notifier; 179 mn->objects = RB_ROOT;
196 mmu->mm = mm; 180 mn->serial = 1;
197 mmu->objects = RB_ROOT; 181 INIT_LIST_HEAD(&mn->linear);
198 mmu->count = 0; 182 mn->has_linear = false;
199 mmu->serial = 1; 183
200 INIT_LIST_HEAD(&mmu->linear); 184 /* Protected by mmap_sem (write-lock) */
201 mmu->has_linear = false; 185 ret = __mmu_notifier_register(&mn->mn, mm);
202
203 /* Protected by mmap_sem (write-lock) */
204 ret = __mmu_notifier_register(&mmu->mn, mm);
205 if (ret) { 186 if (ret) {
206 kfree(mmu); 187 kfree(mn);
207 return ERR_PTR(ret); 188 return ERR_PTR(ret);
208 } 189 }
209 190
210 /* Protected by dev->struct_mutex */ 191 return mn;
211 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
212 return mmu;
213} 192}
214 193
215static void 194static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
216__i915_mmu_notifier_destroy_worker(struct work_struct *work)
217{ 195{
218 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); 196 if (++mn->serial == 0)
219 mmu_notifier_unregister(&mmu->mn, mmu->mm); 197 mn->serial = 1;
220 kfree(mmu);
221}
222
223static void
224__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
225{
226 lockdep_assert_held(&mmu->dev->struct_mutex);
227
228 /* Protected by dev->struct_mutex */
229 hash_del(&mmu->node);
230
231 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
232 * We enter the function holding struct_mutex, therefore we need
233 * to drop our mutex prior to calling mmu_notifier_unregister in
234 * order to prevent lock inversion (and system-wide deadlock)
235 * between the mmap_sem and struct-mutex. Hence we defer the
236 * unregistration to a workqueue where we hold no locks.
237 */
238 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
239 schedule_work(&mmu->work);
240}
241
242static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
243{
244 if (++mmu->serial == 0)
245 mmu->serial = 1;
246}
247
248static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
249{
250 struct i915_mmu_object *mn;
251
252 list_for_each_entry(mn, &mmu->linear, link)
253 if (mn->is_linear)
254 return true;
255
256 return false;
257}
258
259static void
260i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
261 struct i915_mmu_object *mn)
262{
263 lockdep_assert_held(&mmu->dev->struct_mutex);
264
265 spin_lock(&mmu->lock);
266 list_del(&mn->link);
267 if (mn->is_linear)
268 mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
269 else
270 interval_tree_remove(&mn->it, &mmu->objects);
271 __i915_mmu_notifier_update_serial(mmu);
272 spin_unlock(&mmu->lock);
273
274 /* Protected against _add() by dev->struct_mutex */
275 if (--mmu->count == 0)
276 __i915_mmu_notifier_destroy(mmu);
277} 198}
278 199
279static int 200static int
280i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, 201i915_mmu_notifier_add(struct drm_device *dev,
281 struct i915_mmu_object *mn) 202 struct i915_mmu_notifier *mn,
203 struct i915_mmu_object *mo)
282{ 204{
283 struct interval_tree_node *it; 205 struct interval_tree_node *it;
284 int ret; 206 int ret;
285 207
286 ret = i915_mutex_lock_interruptible(mmu->dev); 208 ret = i915_mutex_lock_interruptible(dev);
287 if (ret) 209 if (ret)
288 return ret; 210 return ret;
289 211
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
291 * remove the objects from the interval tree) before we do 213 * remove the objects from the interval tree) before we do
292 * the check for overlapping objects. 214 * the check for overlapping objects.
293 */ 215 */
294 i915_gem_retire_requests(mmu->dev); 216 i915_gem_retire_requests(dev);
295 217
296 spin_lock(&mmu->lock); 218 spin_lock(&mn->lock);
297 it = interval_tree_iter_first(&mmu->objects, 219 it = interval_tree_iter_first(&mn->objects,
298 mn->it.start, mn->it.last); 220 mo->it.start, mo->it.last);
299 if (it) { 221 if (it) {
300 struct drm_i915_gem_object *obj; 222 struct drm_i915_gem_object *obj;
301 223
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
312 234
313 obj = container_of(it, struct i915_mmu_object, it)->obj; 235 obj = container_of(it, struct i915_mmu_object, it)->obj;
314 if (!obj->userptr.workers) 236 if (!obj->userptr.workers)
315 mmu->has_linear = mn->is_linear = true; 237 mn->has_linear = mo->is_linear = true;
316 else 238 else
317 ret = -EAGAIN; 239 ret = -EAGAIN;
318 } else 240 } else
319 interval_tree_insert(&mn->it, &mmu->objects); 241 interval_tree_insert(&mo->it, &mn->objects);
320 242
321 if (ret == 0) { 243 if (ret == 0) {
322 list_add(&mn->link, &mmu->linear); 244 list_add(&mo->link, &mn->linear);
323 __i915_mmu_notifier_update_serial(mmu); 245 __i915_mmu_notifier_update_serial(mn);
324 } 246 }
325 spin_unlock(&mmu->lock); 247 spin_unlock(&mn->lock);
326 mutex_unlock(&mmu->dev->struct_mutex); 248 mutex_unlock(&dev->struct_mutex);
327 249
328 return ret; 250 return ret;
329} 251}
330 252
253static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
254{
255 struct i915_mmu_object *mo;
256
257 list_for_each_entry(mo, &mn->linear, link)
258 if (mo->is_linear)
259 return true;
260
261 return false;
262}
263
264static void
265i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
266 struct i915_mmu_object *mo)
267{
268 spin_lock(&mn->lock);
269 list_del(&mo->link);
270 if (mo->is_linear)
271 mn->has_linear = i915_mmu_notifier_has_linear(mn);
272 else
273 interval_tree_remove(&mo->it, &mn->objects);
274 __i915_mmu_notifier_update_serial(mn);
275 spin_unlock(&mn->lock);
276}
277
331static void 278static void
332i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 279i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
333{ 280{
334 struct i915_mmu_object *mn; 281 struct i915_mmu_object *mo;
335 282
336 mn = obj->userptr.mn; 283 mo = obj->userptr.mmu_object;
337 if (mn == NULL) 284 if (mo == NULL)
338 return; 285 return;
339 286
340 i915_mmu_notifier_del(mn->mmu, mn); 287 i915_mmu_notifier_del(mo->mn, mo);
341 obj->userptr.mn = NULL; 288 kfree(mo);
289
290 obj->userptr.mmu_object = NULL;
291}
292
293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{
296 if (mm->mn == NULL) {
297 down_write(&mm->mm->mmap_sem);
298 mutex_lock(&to_i915(mm->dev)->mm_lock);
299 if (mm->mn == NULL)
300 mm->mn = i915_mmu_notifier_create(mm->mm);
301 mutex_unlock(&to_i915(mm->dev)->mm_lock);
302 up_write(&mm->mm->mmap_sem);
303 }
304 return mm->mn;
342} 305}
343 306
344static int 307static int
345i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 308i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
346 unsigned flags) 309 unsigned flags)
347{ 310{
348 struct i915_mmu_notifier *mmu; 311 struct i915_mmu_notifier *mn;
349 struct i915_mmu_object *mn; 312 struct i915_mmu_object *mo;
350 int ret; 313 int ret;
351 314
352 if (flags & I915_USERPTR_UNSYNCHRONIZED) 315 if (flags & I915_USERPTR_UNSYNCHRONIZED)
353 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 316 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
354 317
355 down_write(&obj->userptr.mm->mmap_sem); 318 if (WARN_ON(obj->userptr.mm == NULL))
356 ret = i915_mutex_lock_interruptible(obj->base.dev); 319 return -EINVAL;
357 if (ret == 0) {
358 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
359 if (!IS_ERR(mmu))
360 mmu->count++; /* preemptive add to act as a refcount */
361 else
362 ret = PTR_ERR(mmu);
363 mutex_unlock(&obj->base.dev->struct_mutex);
364 }
365 up_write(&obj->userptr.mm->mmap_sem);
366 if (ret)
367 return ret;
368 320
369 mn = kzalloc(sizeof(*mn), GFP_KERNEL); 321 mn = i915_mmu_notifier_find(obj->userptr.mm);
370 if (mn == NULL) { 322 if (IS_ERR(mn))
371 ret = -ENOMEM; 323 return PTR_ERR(mn);
372 goto destroy_mmu;
373 }
374 324
375 mn->mmu = mmu; 325 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
376 mn->it.start = obj->userptr.ptr; 326 if (mo == NULL)
377 mn->it.last = mn->it.start + obj->base.size - 1; 327 return -ENOMEM;
378 mn->obj = obj;
379 328
380 ret = i915_mmu_notifier_add(mmu, mn); 329 mo->mn = mn;
381 if (ret) 330 mo->it.start = obj->userptr.ptr;
382 goto free_mn; 331 mo->it.last = mo->it.start + obj->base.size - 1;
332 mo->obj = obj;
383 333
384 obj->userptr.mn = mn; 334 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
335 if (ret) {
336 kfree(mo);
337 return ret;
338 }
339
340 obj->userptr.mmu_object = mo;
385 return 0; 341 return 0;
342}
343
344static void
345i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
346 struct mm_struct *mm)
347{
348 if (mn == NULL)
349 return;
386 350
387free_mn: 351 mmu_notifier_unregister(&mn->mn, mm);
388 kfree(mn); 352 kfree(mn);
389destroy_mmu:
390 mutex_lock(&obj->base.dev->struct_mutex);
391 if (--mmu->count == 0)
392 __i915_mmu_notifier_destroy(mmu);
393 mutex_unlock(&obj->base.dev->struct_mutex);
394 return ret;
395} 353}
396 354
397#else 355#else
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
413 371
414 return 0; 372 return 0;
415} 373}
374
375static void
376i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
377 struct mm_struct *mm)
378{
379}
380
416#endif 381#endif
417 382
383static struct i915_mm_struct *
384__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
385{
386 struct i915_mm_struct *mm;
387
388 /* Protected by dev_priv->mm_lock */
389 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
390 if (mm->mm == real)
391 return mm;
392
393 return NULL;
394}
395
396static int
397i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
398{
399 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
400 struct i915_mm_struct *mm;
401 int ret = 0;
402
403 /* During release of the GEM object we hold the struct_mutex. This
404 * precludes us from calling mmput() at that time as that may be
405 * the last reference and so call exit_mmap(). exit_mmap() will
406 * attempt to reap the vma, and if we were holding a GTT mmap
407 * would then call drm_gem_vm_close() and attempt to reacquire
408 * the struct mutex. So in order to avoid that recursion, we have
409 * to defer releasing the mm reference until after we drop the
410 * struct_mutex, i.e. we need to schedule a worker to do the clean
411 * up.
412 */
413 mutex_lock(&dev_priv->mm_lock);
414 mm = __i915_mm_struct_find(dev_priv, current->mm);
415 if (mm == NULL) {
416 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
417 if (mm == NULL) {
418 ret = -ENOMEM;
419 goto out;
420 }
421
422 kref_init(&mm->kref);
423 mm->dev = obj->base.dev;
424
425 mm->mm = current->mm;
426 atomic_inc(&current->mm->mm_count);
427
428 mm->mn = NULL;
429
430 /* Protected by dev_priv->mm_lock */
431 hash_add(dev_priv->mm_structs,
432 &mm->node, (unsigned long)mm->mm);
433 } else
434 kref_get(&mm->kref);
435
436 obj->userptr.mm = mm;
437out:
438 mutex_unlock(&dev_priv->mm_lock);
439 return ret;
440}
441
442static void
443__i915_mm_struct_free__worker(struct work_struct *work)
444{
445 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
446 i915_mmu_notifier_free(mm->mn, mm->mm);
447 mmdrop(mm->mm);
448 kfree(mm);
449}
450
451static void
452__i915_mm_struct_free(struct kref *kref)
453{
454 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
455
456 /* Protected by dev_priv->mm_lock */
457 hash_del(&mm->node);
458 mutex_unlock(&to_i915(mm->dev)->mm_lock);
459
460 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
461 schedule_work(&mm->work);
462}
463
464static void
465i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
466{
467 if (obj->userptr.mm == NULL)
468 return;
469
470 kref_put_mutex(&obj->userptr.mm->kref,
471 __i915_mm_struct_free,
472 &to_i915(obj->base.dev)->mm_lock);
473 obj->userptr.mm = NULL;
474}
475
418struct get_pages_work { 476struct get_pages_work {
419 struct work_struct work; 477 struct work_struct work;
420 struct drm_i915_gem_object *obj; 478 struct drm_i915_gem_object *obj;
421 struct task_struct *task; 479 struct task_struct *task;
422}; 480};
423 481
424
425#if IS_ENABLED(CONFIG_SWIOTLB) 482#if IS_ENABLED(CONFIG_SWIOTLB)
426#define swiotlb_active() swiotlb_nr_tbl() 483#define swiotlb_active() swiotlb_nr_tbl()
427#else 484#else
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
479 if (pvec == NULL) 536 if (pvec == NULL)
480 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 537 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
481 if (pvec != NULL) { 538 if (pvec != NULL) {
482 struct mm_struct *mm = obj->userptr.mm; 539 struct mm_struct *mm = obj->userptr.mm->mm;
483 540
484 down_read(&mm->mmap_sem); 541 down_read(&mm->mmap_sem);
485 while (pinned < num_pages) { 542 while (pinned < num_pages) {
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
545 602
546 pvec = NULL; 603 pvec = NULL;
547 pinned = 0; 604 pinned = 0;
548 if (obj->userptr.mm == current->mm) { 605 if (obj->userptr.mm->mm == current->mm) {
549 pvec = kmalloc(num_pages*sizeof(struct page *), 606 pvec = kmalloc(num_pages*sizeof(struct page *),
550 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 607 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
551 if (pvec == NULL) { 608 if (pvec == NULL) {
@@ -651,17 +708,13 @@ static void
651i915_gem_userptr_release(struct drm_i915_gem_object *obj) 708i915_gem_userptr_release(struct drm_i915_gem_object *obj)
652{ 709{
653 i915_gem_userptr_release__mmu_notifier(obj); 710 i915_gem_userptr_release__mmu_notifier(obj);
654 711 i915_gem_userptr_release__mm_struct(obj);
655 if (obj->userptr.mm) {
656 mmput(obj->userptr.mm);
657 obj->userptr.mm = NULL;
658 }
659} 712}
660 713
661static int 714static int
662i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 715i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
663{ 716{
664 if (obj->userptr.mn) 717 if (obj->userptr.mmu_object)
665 return 0; 718 return 0;
666 719
667 return i915_gem_userptr_init__mmu_notifier(obj, 0); 720 return i915_gem_userptr_init__mmu_notifier(obj, 0);
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
736 return -ENODEV; 789 return -ENODEV;
737 } 790 }
738 791
739 /* Allocate the new object */
740 obj = i915_gem_object_alloc(dev); 792 obj = i915_gem_object_alloc(dev);
741 if (obj == NULL) 793 if (obj == NULL)
742 return -ENOMEM; 794 return -ENOMEM;
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
754 * at binding. This means that we need to hook into the mmu_notifier 806 * at binding. This means that we need to hook into the mmu_notifier
755 * in order to detect if the mmu is destroyed. 807 * in order to detect if the mmu is destroyed.
756 */ 808 */
757 ret = -ENOMEM; 809 ret = i915_gem_userptr_init__mm_struct(obj);
758 if ((obj->userptr.mm = get_task_mm(current))) 810 if (ret == 0)
759 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 811 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
760 if (ret == 0) 812 if (ret == 0)
761 ret = drm_gem_handle_create(file, &obj->base, &handle); 813 ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
772int 824int
773i915_gem_init_userptr(struct drm_device *dev) 825i915_gem_init_userptr(struct drm_device *dev)
774{ 826{
775#if defined(CONFIG_MMU_NOTIFIER)
776 struct drm_i915_private *dev_priv = to_i915(dev); 827 struct drm_i915_private *dev_priv = to_i915(dev);
777 hash_init(dev_priv->mmu_notifiers); 828 mutex_init(&dev_priv->mm_lock);
778#endif 829 hash_init(dev_priv->mm_structs);
779 return 0; 830 return 0;
780} 831}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e4d7607da2c4..f29b44c86a2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,16 +334,20 @@
334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 334#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 335#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 336#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
337#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) 337
338#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
339#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
338#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 340#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
339#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) 341#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
340#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 342#define BLT_WRITE_A (2<<20)
341#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 343#define BLT_WRITE_RGB (1<<20)
344#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
342#define BLT_DEPTH_8 (0<<24) 345#define BLT_DEPTH_8 (0<<24)
343#define BLT_DEPTH_16_565 (1<<24) 346#define BLT_DEPTH_16_565 (1<<24)
344#define BLT_DEPTH_16_1555 (2<<24) 347#define BLT_DEPTH_16_1555 (2<<24)
345#define BLT_DEPTH_32 (3<<24) 348#define BLT_DEPTH_32 (3<<24)
346#define BLT_ROP_GXCOPY (0xcc<<16) 349#define BLT_ROP_SRC_COPY (0xcc<<16)
350#define BLT_ROP_COLOR_COPY (0xf0<<16)
347#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ 351#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
348#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ 352#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
349#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 353#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 16371a444426..2d068edd1adc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1363 1363
1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1365#define I830_BATCH_LIMIT (256*1024) 1365#define I830_BATCH_LIMIT (256*1024)
1366#define I830_TLB_ENTRIES (2)
1367#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1366static int 1368static int
1367i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1369i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1368 u64 offset, u32 len, 1370 u64 offset, u32 len,
1369 unsigned flags) 1371 unsigned flags)
1370{ 1372{
1373 u32 cs_offset = ring->scratch.gtt_offset;
1371 int ret; 1374 int ret;
1372 1375
1373 if (flags & I915_DISPATCH_PINNED) { 1376 ret = intel_ring_begin(ring, 6);
1374 ret = intel_ring_begin(ring, 4); 1377 if (ret)
1375 if (ret) 1378 return ret;
1376 return ret;
1377 1379
1378 intel_ring_emit(ring, MI_BATCH_BUFFER); 1380 /* Evict the invalid PTE TLBs */
1379 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1381 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1380 intel_ring_emit(ring, offset + len - 8); 1382 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1381 intel_ring_emit(ring, MI_NOOP); 1383 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1382 intel_ring_advance(ring); 1384 intel_ring_emit(ring, cs_offset);
1383 } else { 1385 intel_ring_emit(ring, 0xdeadbeef);
1384 u32 cs_offset = ring->scratch.gtt_offset; 1386 intel_ring_emit(ring, MI_NOOP);
1387 intel_ring_advance(ring);
1385 1388
1389 if ((flags & I915_DISPATCH_PINNED) == 0) {
1386 if (len > I830_BATCH_LIMIT) 1390 if (len > I830_BATCH_LIMIT)
1387 return -ENOSPC; 1391 return -ENOSPC;
1388 1392
1389 ret = intel_ring_begin(ring, 9+3); 1393 ret = intel_ring_begin(ring, 6 + 2);
1390 if (ret) 1394 if (ret)
1391 return ret; 1395 return ret;
1392 /* Blit the batch (which has now all relocs applied) to the stable batch 1396
1393 * scratch bo area (so that the CS never stumbles over its tlb 1397 /* Blit the batch (which has now all relocs applied) to the
1394 * invalidation bug) ... */ 1398 * stable batch scratch bo area (so that the CS never
1395 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1399 * stumbles over its tlb invalidation bug) ...
1396 XY_SRC_COPY_BLT_WRITE_ALPHA | 1400 */
1397 XY_SRC_COPY_BLT_WRITE_RGB); 1401 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1398 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1402 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1399 intel_ring_emit(ring, 0); 1403 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
1400 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1401 intel_ring_emit(ring, cs_offset); 1404 intel_ring_emit(ring, cs_offset);
1402 intel_ring_emit(ring, 0);
1403 intel_ring_emit(ring, 4096); 1405 intel_ring_emit(ring, 4096);
1404 intel_ring_emit(ring, offset); 1406 intel_ring_emit(ring, offset);
1407
1405 intel_ring_emit(ring, MI_FLUSH); 1408 intel_ring_emit(ring, MI_FLUSH);
1409 intel_ring_emit(ring, MI_NOOP);
1410 intel_ring_advance(ring);
1406 1411
1407 /* ... and execute it. */ 1412 /* ... and execute it. */
1408 intel_ring_emit(ring, MI_BATCH_BUFFER); 1413 offset = cs_offset;
1409 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1410 intel_ring_emit(ring, cs_offset + len - 8);
1411 intel_ring_advance(ring);
1412 } 1414 }
1413 1415
1416 ret = intel_ring_begin(ring, 4);
1417 if (ret)
1418 return ret;
1419
1420 intel_ring_emit(ring, MI_BATCH_BUFFER);
1421 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1422 intel_ring_emit(ring, offset + len - 8);
1423 intel_ring_emit(ring, MI_NOOP);
1424 intel_ring_advance(ring);
1425
1414 return 0; 1426 return 0;
1415} 1427}
1416 1428
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2200 2212
2201 /* Workaround batchbuffer to combat CS tlb bug. */ 2213 /* Workaround batchbuffer to combat CS tlb bug. */
2202 if (HAS_BROKEN_CS_TLB(dev)) { 2214 if (HAS_BROKEN_CS_TLB(dev)) {
2203 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2215 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2204 if (obj == NULL) { 2216 if (obj == NULL) {
2205 DRM_ERROR("Failed to allocate batch bo\n"); 2217 DRM_ERROR("Failed to allocate batch bo\n");
2206 return -ENOMEM; 2218 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c69d3ce1b3d6..c14341ca3ef9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
854 struct drm_device *dev = encoder->base.dev; 854 struct drm_device *dev = encoder->base.dev;
855 struct drm_i915_private *dev_priv = dev->dev_private; 855 struct drm_i915_private *dev_priv = dev->dev_private;
856 856
857 /* Prevents vblank waits from timing out in intel_tv_detect_type() */
858 intel_wait_for_vblank(encoder->base.dev,
859 to_intel_crtc(encoder->base.crtc)->pipe);
860
857 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); 861 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
858} 862}
859 863
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a125a7e32742..c6c9b02e0ada 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
258 priv->hdmi_pdev = pdev; 258 priv->hdmi_pdev = pdev;
259} 259}
260 260
261#ifdef CONFIG_OF
262static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
263{
264 int gpio = of_get_named_gpio(of_node, name, 0);
265 if (gpio < 0) {
266 char name2[32];
267 snprintf(name2, sizeof(name2), "%s-gpio", name);
268 gpio = of_get_named_gpio(of_node, name2, 0);
269 if (gpio < 0) {
270 dev_err(dev, "failed to get gpio: %s (%d)\n",
271 name, gpio);
272 gpio = -1;
273 }
274 }
275 return gpio;
276}
277#endif
278
261static int hdmi_bind(struct device *dev, struct device *master, void *data) 279static int hdmi_bind(struct device *dev, struct device *master, void *data)
262{ 280{
263 static struct hdmi_platform_config config = {}; 281 static struct hdmi_platform_config config = {};
264#ifdef CONFIG_OF 282#ifdef CONFIG_OF
265 struct device_node *of_node = dev->of_node; 283 struct device_node *of_node = dev->of_node;
266 284
267 int get_gpio(const char *name)
268 {
269 int gpio = of_get_named_gpio(of_node, name, 0);
270 if (gpio < 0) {
271 char name2[32];
272 snprintf(name2, sizeof(name2), "%s-gpio", name);
273 gpio = of_get_named_gpio(of_node, name2, 0);
274 if (gpio < 0) {
275 dev_err(dev, "failed to get gpio: %s (%d)\n",
276 name, gpio);
277 gpio = -1;
278 }
279 }
280 return gpio;
281 }
282
283 if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { 285 if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
284 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; 286 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
285 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; 287 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
312 } 314 }
313 315
314 config.mmio_name = "core_physical"; 316 config.mmio_name = "core_physical";
315 config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); 317 config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
316 config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); 318 config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
317 config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); 319 config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
318 config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); 320 config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
319 config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); 321 config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
320 config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm"); 322 config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
321 323
322#else 324#else
323 static const char *hpd_clk_names[] = { 325 static const char *hpd_clk_names[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 902d7685d441..f408b69486a8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,19 +15,25 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#ifdef CONFIG_COMMON_CLK
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/clk-provider.h> 20#include <linux/clk-provider.h>
21#endif
20 22
21#include "hdmi.h" 23#include "hdmi.h"
22 24
23struct hdmi_phy_8960 { 25struct hdmi_phy_8960 {
24 struct hdmi_phy base; 26 struct hdmi_phy base;
25 struct hdmi *hdmi; 27 struct hdmi *hdmi;
28#ifdef CONFIG_COMMON_CLK
26 struct clk_hw pll_hw; 29 struct clk_hw pll_hw;
27 struct clk *pll; 30 struct clk *pll;
28 unsigned long pixclk; 31 unsigned long pixclk;
32#endif
29}; 33};
30#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) 34#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
35
36#ifdef CONFIG_COMMON_CLK
31#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) 37#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
32 38
33/* 39/*
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
374 .parent_names = hdmi_pll_parents, 380 .parent_names = hdmi_pll_parents,
375 .num_parents = ARRAY_SIZE(hdmi_pll_parents), 381 .num_parents = ARRAY_SIZE(hdmi_pll_parents),
376}; 382};
377 383#endif
378 384
379/* 385/*
380 * HDMI Phy: 386 * HDMI Phy:
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
480{ 486{
481 struct hdmi_phy_8960 *phy_8960; 487 struct hdmi_phy_8960 *phy_8960;
482 struct hdmi_phy *phy = NULL; 488 struct hdmi_phy *phy = NULL;
483 int ret, i; 489 int ret;
490#ifdef CONFIG_COMMON_CLK
491 int i;
484 492
485 /* sanity check: */ 493 /* sanity check: */
486 for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) 494 for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
487 if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) 495 if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
488 return ERR_PTR(-EINVAL); 496 return ERR_PTR(-EINVAL);
497#endif
489 498
490 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); 499 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
491 if (!phy_8960) { 500 if (!phy_8960) {
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
499 508
500 phy_8960->hdmi = hdmi; 509 phy_8960->hdmi = hdmi;
501 510
511#ifdef CONFIG_COMMON_CLK
502 phy_8960->pll_hw.init = &pll_init; 512 phy_8960->pll_hw.init = &pll_init;
503 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); 513 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
504 if (IS_ERR(phy_8960->pll)) { 514 if (IS_ERR(phy_8960->pll)) {
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
506 phy_8960->pll = NULL; 516 phy_8960->pll = NULL;
507 goto fail; 517 goto fail;
508 } 518 }
519#endif
509 520
510 return phy; 521 return phy;
511 522
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 26ee80db17af..fcf95680413d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
52#define reglog 0 52#define reglog 0
53#endif 53#endif
54 54
55static char *vram; 55static char *vram = "16m";
56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); 56MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
57module_param(vram, charp, 0); 57module_param(vram, charp, 0);
58 58
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b1e11f8434e2..ac14b67621d3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
405 u8 msg[DP_DPCD_SIZE]; 405 u8 msg[DP_DPCD_SIZE];
406 int ret; 406 int ret;
407 407
408 char dpcd_hex_dump[DP_DPCD_SIZE * 3];
409
410 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 408 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
411 DP_DPCD_SIZE); 409 DP_DPCD_SIZE);
412 if (ret > 0) { 410 if (ret > 0) {
413 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 411 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
414 412
415 hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), 413 DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
416 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 414 dig_connector->dpcd);
417 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
418 415
419 radeon_dp_probe_oui(radeon_connector); 416 radeon_dp_probe_oui(radeon_connector);
420 417
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e616eb5f6e7a..3cfb50056f7a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2769 radeon_ring_write(ring, lower_32_bits(addr)); 2769 radeon_ring_write(ring, lower_32_bits(addr));
2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2771 2771
2772 /* PFP_SYNC_ME packet only exists on 7xx+ */ 2772 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2773 if (emit_wait && (rdev->family >= CHIP_RV770)) { 2773 if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2774 /* Prevent the PFP from running ahead of the semaphore wait */ 2774 /* Prevent the PFP from running ahead of the semaphore wait */
2775 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2775 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2776 radeon_ring_write(ring, 0x0); 2776 radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 92b2d8dd4735..e74c7e387dde 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
447 } 447 }
448 } 448 }
449 449
450 /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
451 if ((dev->pdev->device == 0x9805) &&
452 (dev->pdev->subsystem_vendor == 0x1734) &&
453 (dev->pdev->subsystem_device == 0x11bd)) {
454 if (*connector_type == DRM_MODE_CONNECTOR_VGA)
455 return false;
456 }
450 457
451 return true; 458 return true;
452} 459}
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2281 (controller->ucFanParameters & 2288 (controller->ucFanParameters &
2282 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2289 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2283 rdev->pm.int_thermal_type = THERMAL_TYPE_KV; 2290 rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
2284 } else if ((controller->ucType == 2291 } else if (controller->ucType ==
2285 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || 2292 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
2286 (controller->ucType == 2293 DRM_INFO("External GPIO thermal controller %s fan control\n",
2287 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || 2294 (controller->ucFanParameters &
2288 (controller->ucType == 2295 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2289 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { 2296 rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
2290 DRM_INFO("Special thermal controller config\n"); 2297 } else if (controller->ucType ==
2298 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
2299 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
2300 (controller->ucFanParameters &
2301 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2302 rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
2303 } else if (controller->ucType ==
2304 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
2305 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
2306 (controller->ucFanParameters &
2307 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2308 rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
2291 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 2309 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
2292 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 2310 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2293 pp_lib_thermal_controller_names[controller->ucType], 2311 pp_lib_thermal_controller_names[controller->ucType],
2294 controller->ucI2cAddress >> 1, 2312 controller->ucI2cAddress >> 1,
2295 (controller->ucFanParameters & 2313 (controller->ucFanParameters &
2296 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2314 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2315 rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
2297 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); 2316 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
2298 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); 2317 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2299 if (rdev->pm.i2c_bus) { 2318 if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 56d9fd66d8ae..abd6753a570a 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,7 +34,7 @@
34int radeon_semaphore_create(struct radeon_device *rdev, 34int radeon_semaphore_create(struct radeon_device *rdev,
35 struct radeon_semaphore **semaphore) 35 struct radeon_semaphore **semaphore)
36{ 36{
37 uint32_t *cpu_addr; 37 uint64_t *cpu_addr;
38 int i, r; 38 int i, r;
39 39
40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);