diff options
author | Dave Airlie <airlied@redhat.com> | 2014-09-11 06:17:10 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-09-11 06:17:10 -0400 |
commit | 3afdd8a0e26dfa8c4c67bef56d89fe53ed425f3d (patch) | |
tree | d9810ad592b03b0d8751491908e9969cec6790e5 | |
parent | 69e672fded796818a7e3216eb968e4e01a5555b4 (diff) | |
parent | 7a98948f3b536ca9a077e84966ddc0e9f53726df (diff) |
Merge tag 'drm-intel-fixes-2014-09-10' of git://anongit.freedesktop.org/drm-intel into drm-fixes
more fixes for 3.17, almost all Cc: stable material.
* tag 'drm-intel-fixes-2014-09-10' of git://anongit.freedesktop.org/drm-intel:
drm/i915: Wait for vblank before enabling the TV encoder
drm/i915: Evict CS TLBs between batches
drm/i915: Fix irq enable tracking in driver load
drm/i915: Fix EIO/wedged handling in gem fault handler
drm/i915: Prevent recursive deadlock on releasing a busy userptr
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 409 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 4 |
7 files changed, 300 insertions, 221 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2e7f03ad5ee2..9933c26017ed 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1336 | 1336 | ||
1337 | intel_power_domains_init_hw(dev_priv); | 1337 | intel_power_domains_init_hw(dev_priv); |
1338 | 1338 | ||
1339 | /* | ||
1340 | * We enable some interrupt sources in our postinstall hooks, so mark | ||
1341 | * interrupts as enabled _before_ actually enabling them to avoid | ||
1342 | * special cases in our ordering checks. | ||
1343 | */ | ||
1344 | dev_priv->pm._irqs_disabled = false; | ||
1345 | |||
1339 | ret = drm_irq_install(dev, dev->pdev->irq); | 1346 | ret = drm_irq_install(dev, dev->pdev->irq); |
1340 | if (ret) | 1347 | if (ret) |
1341 | goto cleanup_gem_stolen; | 1348 | goto cleanup_gem_stolen; |
1342 | 1349 | ||
1343 | dev_priv->pm._irqs_disabled = false; | ||
1344 | |||
1345 | /* Important: The output setup functions called by modeset_init need | 1350 | /* Important: The output setup functions called by modeset_init need |
1346 | * working irqs for e.g. gmbus and dp aux transfers. */ | 1351 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1347 | intel_modeset_init(dev); | 1352 | intel_modeset_init(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a830eac5ba3..3524306d8cfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -184,6 +184,7 @@ enum hpd_pin { | |||
184 | if ((1 << (domain)) & (mask)) | 184 | if ((1 << (domain)) & (mask)) |
185 | 185 | ||
186 | struct drm_i915_private; | 186 | struct drm_i915_private; |
187 | struct i915_mm_struct; | ||
187 | struct i915_mmu_object; | 188 | struct i915_mmu_object; |
188 | 189 | ||
189 | enum intel_dpll_id { | 190 | enum intel_dpll_id { |
@@ -1506,9 +1507,8 @@ struct drm_i915_private { | |||
1506 | struct i915_gtt gtt; /* VM representing the global address space */ | 1507 | struct i915_gtt gtt; /* VM representing the global address space */ |
1507 | 1508 | ||
1508 | struct i915_gem_mm mm; | 1509 | struct i915_gem_mm mm; |
1509 | #if defined(CONFIG_MMU_NOTIFIER) | 1510 | DECLARE_HASHTABLE(mm_structs, 7); |
1510 | DECLARE_HASHTABLE(mmu_notifiers, 7); | 1511 | struct mutex mm_lock; |
1511 | #endif | ||
1512 | 1512 | ||
1513 | /* Kernel Modesetting */ | 1513 | /* Kernel Modesetting */ |
1514 | 1514 | ||
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object { | |||
1814 | unsigned workers :4; | 1814 | unsigned workers :4; |
1815 | #define I915_GEM_USERPTR_MAX_WORKERS 15 | 1815 | #define I915_GEM_USERPTR_MAX_WORKERS 15 |
1816 | 1816 | ||
1817 | struct mm_struct *mm; | 1817 | struct i915_mm_struct *mm; |
1818 | struct i915_mmu_object *mn; | 1818 | struct i915_mmu_object *mmu_object; |
1819 | struct work_struct *work; | 1819 | struct work_struct *work; |
1820 | } userptr; | 1820 | } userptr; |
1821 | }; | 1821 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ba7f5c6bb50d..ad55b06a3cb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1590,10 +1590,13 @@ unlock: | |||
1590 | out: | 1590 | out: |
1591 | switch (ret) { | 1591 | switch (ret) { |
1592 | case -EIO: | 1592 | case -EIO: |
1593 | /* If this -EIO is due to a gpu hang, give the reset code a | 1593 | /* |
1594 | * chance to clean up the mess. Otherwise return the proper | 1594 | * We eat errors when the gpu is terminally wedged to avoid |
1595 | * SIGBUS. */ | 1595 | * userspace unduly crashing (gl has no provisions for mmaps to |
1596 | if (i915_terminally_wedged(&dev_priv->gpu_error)) { | 1596 | * fail). But any other -EIO isn't ours (e.g. swap in failure) |
1597 | * and so needs to be reported. | ||
1598 | */ | ||
1599 | if (!i915_terminally_wedged(&dev_priv->gpu_error)) { | ||
1597 | ret = VM_FAULT_SIGBUS; | 1600 | ret = VM_FAULT_SIGBUS; |
1598 | break; | 1601 | break; |
1599 | } | 1602 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index fe69fc837d9e..d38413997379 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -32,6 +32,15 @@ | |||
32 | #include <linux/mempolicy.h> | 32 | #include <linux/mempolicy.h> |
33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
34 | 34 | ||
35 | struct i915_mm_struct { | ||
36 | struct mm_struct *mm; | ||
37 | struct drm_device *dev; | ||
38 | struct i915_mmu_notifier *mn; | ||
39 | struct hlist_node node; | ||
40 | struct kref kref; | ||
41 | struct work_struct work; | ||
42 | }; | ||
43 | |||
35 | #if defined(CONFIG_MMU_NOTIFIER) | 44 | #if defined(CONFIG_MMU_NOTIFIER) |
36 | #include <linux/interval_tree.h> | 45 | #include <linux/interval_tree.h> |
37 | 46 | ||
@@ -41,16 +50,12 @@ struct i915_mmu_notifier { | |||
41 | struct mmu_notifier mn; | 50 | struct mmu_notifier mn; |
42 | struct rb_root objects; | 51 | struct rb_root objects; |
43 | struct list_head linear; | 52 | struct list_head linear; |
44 | struct drm_device *dev; | ||
45 | struct mm_struct *mm; | ||
46 | struct work_struct work; | ||
47 | unsigned long count; | ||
48 | unsigned long serial; | 53 | unsigned long serial; |
49 | bool has_linear; | 54 | bool has_linear; |
50 | }; | 55 | }; |
51 | 56 | ||
52 | struct i915_mmu_object { | 57 | struct i915_mmu_object { |
53 | struct i915_mmu_notifier *mmu; | 58 | struct i915_mmu_notifier *mn; |
54 | struct interval_tree_node it; | 59 | struct interval_tree_node it; |
55 | struct list_head link; | 60 | struct list_head link; |
56 | struct drm_i915_gem_object *obj; | 61 | struct drm_i915_gem_object *obj; |
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn, | |||
96 | unsigned long start, | 101 | unsigned long start, |
97 | unsigned long end) | 102 | unsigned long end) |
98 | { | 103 | { |
99 | struct i915_mmu_object *mmu; | 104 | struct i915_mmu_object *mo; |
100 | unsigned long serial; | 105 | unsigned long serial; |
101 | 106 | ||
102 | restart: | 107 | restart: |
103 | serial = mn->serial; | 108 | serial = mn->serial; |
104 | list_for_each_entry(mmu, &mn->linear, link) { | 109 | list_for_each_entry(mo, &mn->linear, link) { |
105 | struct drm_i915_gem_object *obj; | 110 | struct drm_i915_gem_object *obj; |
106 | 111 | ||
107 | if (mmu->it.last < start || mmu->it.start > end) | 112 | if (mo->it.last < start || mo->it.start > end) |
108 | continue; | 113 | continue; |
109 | 114 | ||
110 | obj = mmu->obj; | 115 | obj = mo->obj; |
111 | drm_gem_object_reference(&obj->base); | 116 | drm_gem_object_reference(&obj->base); |
112 | spin_unlock(&mn->lock); | 117 | spin_unlock(&mn->lock); |
113 | 118 | ||
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |||
160 | }; | 165 | }; |
161 | 166 | ||
162 | static struct i915_mmu_notifier * | 167 | static struct i915_mmu_notifier * |
163 | __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) | 168 | i915_mmu_notifier_create(struct mm_struct *mm) |
164 | { | ||
165 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
166 | struct i915_mmu_notifier *mmu; | ||
167 | |||
168 | /* Protected by dev->struct_mutex */ | ||
169 | hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm) | ||
170 | if (mmu->mm == mm) | ||
171 | return mmu; | ||
172 | |||
173 | return NULL; | ||
174 | } | ||
175 | |||
176 | static struct i915_mmu_notifier * | ||
177 | i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm) | ||
178 | { | 169 | { |
179 | struct drm_i915_private *dev_priv = to_i915(dev); | 170 | struct i915_mmu_notifier *mn; |
180 | struct i915_mmu_notifier *mmu; | ||
181 | int ret; | 171 | int ret; |
182 | 172 | ||
183 | lockdep_assert_held(&dev->struct_mutex); | 173 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
184 | 174 | if (mn == NULL) | |
185 | mmu = __i915_mmu_notifier_lookup(dev, mm); | ||
186 | if (mmu) | ||
187 | return mmu; | ||
188 | |||
189 | mmu = kmalloc(sizeof(*mmu), GFP_KERNEL); | ||
190 | if (mmu == NULL) | ||
191 | return ERR_PTR(-ENOMEM); | 175 | return ERR_PTR(-ENOMEM); |
192 | 176 | ||
193 | spin_lock_init(&mmu->lock); | 177 | spin_lock_init(&mn->lock); |
194 | mmu->dev = dev; | 178 | mn->mn.ops = &i915_gem_userptr_notifier; |
195 | mmu->mn.ops = &i915_gem_userptr_notifier; | 179 | mn->objects = RB_ROOT; |
196 | mmu->mm = mm; | 180 | mn->serial = 1; |
197 | mmu->objects = RB_ROOT; | 181 | INIT_LIST_HEAD(&mn->linear); |
198 | mmu->count = 0; | 182 | mn->has_linear = false; |
199 | mmu->serial = 1; | 183 | |
200 | INIT_LIST_HEAD(&mmu->linear); | 184 | /* Protected by mmap_sem (write-lock) */ |
201 | mmu->has_linear = false; | 185 | ret = __mmu_notifier_register(&mn->mn, mm); |
202 | |||
203 | /* Protected by mmap_sem (write-lock) */ | ||
204 | ret = __mmu_notifier_register(&mmu->mn, mm); | ||
205 | if (ret) { | 186 | if (ret) { |
206 | kfree(mmu); | 187 | kfree(mn); |
207 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
208 | } | 189 | } |
209 | 190 | ||
210 | /* Protected by dev->struct_mutex */ | 191 | return mn; |
211 | hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm); | ||
212 | return mmu; | ||
213 | } | 192 | } |
214 | 193 | ||
215 | static void | 194 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) |
216 | __i915_mmu_notifier_destroy_worker(struct work_struct *work) | ||
217 | { | 195 | { |
218 | struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); | 196 | if (++mn->serial == 0) |
219 | mmu_notifier_unregister(&mmu->mn, mmu->mm); | 197 | mn->serial = 1; |
220 | kfree(mmu); | ||
221 | } | ||
222 | |||
223 | static void | ||
224 | __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu) | ||
225 | { | ||
226 | lockdep_assert_held(&mmu->dev->struct_mutex); | ||
227 | |||
228 | /* Protected by dev->struct_mutex */ | ||
229 | hash_del(&mmu->node); | ||
230 | |||
231 | /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex. | ||
232 | * We enter the function holding struct_mutex, therefore we need | ||
233 | * to drop our mutex prior to calling mmu_notifier_unregister in | ||
234 | * order to prevent lock inversion (and system-wide deadlock) | ||
235 | * between the mmap_sem and struct-mutex. Hence we defer the | ||
236 | * unregistration to a workqueue where we hold no locks. | ||
237 | */ | ||
238 | INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker); | ||
239 | schedule_work(&mmu->work); | ||
240 | } | ||
241 | |||
242 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu) | ||
243 | { | ||
244 | if (++mmu->serial == 0) | ||
245 | mmu->serial = 1; | ||
246 | } | ||
247 | |||
248 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu) | ||
249 | { | ||
250 | struct i915_mmu_object *mn; | ||
251 | |||
252 | list_for_each_entry(mn, &mmu->linear, link) | ||
253 | if (mn->is_linear) | ||
254 | return true; | ||
255 | |||
256 | return false; | ||
257 | } | ||
258 | |||
259 | static void | ||
260 | i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, | ||
261 | struct i915_mmu_object *mn) | ||
262 | { | ||
263 | lockdep_assert_held(&mmu->dev->struct_mutex); | ||
264 | |||
265 | spin_lock(&mmu->lock); | ||
266 | list_del(&mn->link); | ||
267 | if (mn->is_linear) | ||
268 | mmu->has_linear = i915_mmu_notifier_has_linear(mmu); | ||
269 | else | ||
270 | interval_tree_remove(&mn->it, &mmu->objects); | ||
271 | __i915_mmu_notifier_update_serial(mmu); | ||
272 | spin_unlock(&mmu->lock); | ||
273 | |||
274 | /* Protected against _add() by dev->struct_mutex */ | ||
275 | if (--mmu->count == 0) | ||
276 | __i915_mmu_notifier_destroy(mmu); | ||
277 | } | 198 | } |
278 | 199 | ||
279 | static int | 200 | static int |
280 | i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | 201 | i915_mmu_notifier_add(struct drm_device *dev, |
281 | struct i915_mmu_object *mn) | 202 | struct i915_mmu_notifier *mn, |
203 | struct i915_mmu_object *mo) | ||
282 | { | 204 | { |
283 | struct interval_tree_node *it; | 205 | struct interval_tree_node *it; |
284 | int ret; | 206 | int ret; |
285 | 207 | ||
286 | ret = i915_mutex_lock_interruptible(mmu->dev); | 208 | ret = i915_mutex_lock_interruptible(dev); |
287 | if (ret) | 209 | if (ret) |
288 | return ret; | 210 | return ret; |
289 | 211 | ||
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | |||
291 | * remove the objects from the interval tree) before we do | 213 | * remove the objects from the interval tree) before we do |
292 | * the check for overlapping objects. | 214 | * the check for overlapping objects. |
293 | */ | 215 | */ |
294 | i915_gem_retire_requests(mmu->dev); | 216 | i915_gem_retire_requests(dev); |
295 | 217 | ||
296 | spin_lock(&mmu->lock); | 218 | spin_lock(&mn->lock); |
297 | it = interval_tree_iter_first(&mmu->objects, | 219 | it = interval_tree_iter_first(&mn->objects, |
298 | mn->it.start, mn->it.last); | 220 | mo->it.start, mo->it.last); |
299 | if (it) { | 221 | if (it) { |
300 | struct drm_i915_gem_object *obj; | 222 | struct drm_i915_gem_object *obj; |
301 | 223 | ||
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | |||
312 | 234 | ||
313 | obj = container_of(it, struct i915_mmu_object, it)->obj; | 235 | obj = container_of(it, struct i915_mmu_object, it)->obj; |
314 | if (!obj->userptr.workers) | 236 | if (!obj->userptr.workers) |
315 | mmu->has_linear = mn->is_linear = true; | 237 | mn->has_linear = mo->is_linear = true; |
316 | else | 238 | else |
317 | ret = -EAGAIN; | 239 | ret = -EAGAIN; |
318 | } else | 240 | } else |
319 | interval_tree_insert(&mn->it, &mmu->objects); | 241 | interval_tree_insert(&mo->it, &mn->objects); |
320 | 242 | ||
321 | if (ret == 0) { | 243 | if (ret == 0) { |
322 | list_add(&mn->link, &mmu->linear); | 244 | list_add(&mo->link, &mn->linear); |
323 | __i915_mmu_notifier_update_serial(mmu); | 245 | __i915_mmu_notifier_update_serial(mn); |
324 | } | 246 | } |
325 | spin_unlock(&mmu->lock); | 247 | spin_unlock(&mn->lock); |
326 | mutex_unlock(&mmu->dev->struct_mutex); | 248 | mutex_unlock(&dev->struct_mutex); |
327 | 249 | ||
328 | return ret; | 250 | return ret; |
329 | } | 251 | } |
330 | 252 | ||
253 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) | ||
254 | { | ||
255 | struct i915_mmu_object *mo; | ||
256 | |||
257 | list_for_each_entry(mo, &mn->linear, link) | ||
258 | if (mo->is_linear) | ||
259 | return true; | ||
260 | |||
261 | return false; | ||
262 | } | ||
263 | |||
264 | static void | ||
265 | i915_mmu_notifier_del(struct i915_mmu_notifier *mn, | ||
266 | struct i915_mmu_object *mo) | ||
267 | { | ||
268 | spin_lock(&mn->lock); | ||
269 | list_del(&mo->link); | ||
270 | if (mo->is_linear) | ||
271 | mn->has_linear = i915_mmu_notifier_has_linear(mn); | ||
272 | else | ||
273 | interval_tree_remove(&mo->it, &mn->objects); | ||
274 | __i915_mmu_notifier_update_serial(mn); | ||
275 | spin_unlock(&mn->lock); | ||
276 | } | ||
277 | |||
331 | static void | 278 | static void |
332 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | 279 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) |
333 | { | 280 | { |
334 | struct i915_mmu_object *mn; | 281 | struct i915_mmu_object *mo; |
335 | 282 | ||
336 | mn = obj->userptr.mn; | 283 | mo = obj->userptr.mmu_object; |
337 | if (mn == NULL) | 284 | if (mo == NULL) |
338 | return; | 285 | return; |
339 | 286 | ||
340 | i915_mmu_notifier_del(mn->mmu, mn); | 287 | i915_mmu_notifier_del(mo->mn, mo); |
341 | obj->userptr.mn = NULL; | 288 | kfree(mo); |
289 | |||
290 | obj->userptr.mmu_object = NULL; | ||
291 | } | ||
292 | |||
293 | static struct i915_mmu_notifier * | ||
294 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | ||
295 | { | ||
296 | if (mm->mn == NULL) { | ||
297 | down_write(&mm->mm->mmap_sem); | ||
298 | mutex_lock(&to_i915(mm->dev)->mm_lock); | ||
299 | if (mm->mn == NULL) | ||
300 | mm->mn = i915_mmu_notifier_create(mm->mm); | ||
301 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | ||
302 | up_write(&mm->mm->mmap_sem); | ||
303 | } | ||
304 | return mm->mn; | ||
342 | } | 305 | } |
343 | 306 | ||
344 | static int | 307 | static int |
345 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | 308 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, |
346 | unsigned flags) | 309 | unsigned flags) |
347 | { | 310 | { |
348 | struct i915_mmu_notifier *mmu; | 311 | struct i915_mmu_notifier *mn; |
349 | struct i915_mmu_object *mn; | 312 | struct i915_mmu_object *mo; |
350 | int ret; | 313 | int ret; |
351 | 314 | ||
352 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | 315 | if (flags & I915_USERPTR_UNSYNCHRONIZED) |
353 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | 316 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; |
354 | 317 | ||
355 | down_write(&obj->userptr.mm->mmap_sem); | 318 | if (WARN_ON(obj->userptr.mm == NULL)) |
356 | ret = i915_mutex_lock_interruptible(obj->base.dev); | 319 | return -EINVAL; |
357 | if (ret == 0) { | ||
358 | mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm); | ||
359 | if (!IS_ERR(mmu)) | ||
360 | mmu->count++; /* preemptive add to act as a refcount */ | ||
361 | else | ||
362 | ret = PTR_ERR(mmu); | ||
363 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
364 | } | ||
365 | up_write(&obj->userptr.mm->mmap_sem); | ||
366 | if (ret) | ||
367 | return ret; | ||
368 | 320 | ||
369 | mn = kzalloc(sizeof(*mn), GFP_KERNEL); | 321 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
370 | if (mn == NULL) { | 322 | if (IS_ERR(mn)) |
371 | ret = -ENOMEM; | 323 | return PTR_ERR(mn); |
372 | goto destroy_mmu; | ||
373 | } | ||
374 | 324 | ||
375 | mn->mmu = mmu; | 325 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
376 | mn->it.start = obj->userptr.ptr; | 326 | if (mo == NULL) |
377 | mn->it.last = mn->it.start + obj->base.size - 1; | 327 | return -ENOMEM; |
378 | mn->obj = obj; | ||
379 | 328 | ||
380 | ret = i915_mmu_notifier_add(mmu, mn); | 329 | mo->mn = mn; |
381 | if (ret) | 330 | mo->it.start = obj->userptr.ptr; |
382 | goto free_mn; | 331 | mo->it.last = mo->it.start + obj->base.size - 1; |
332 | mo->obj = obj; | ||
383 | 333 | ||
384 | obj->userptr.mn = mn; | 334 | ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); |
335 | if (ret) { | ||
336 | kfree(mo); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | obj->userptr.mmu_object = mo; | ||
385 | return 0; | 341 | return 0; |
342 | } | ||
343 | |||
344 | static void | ||
345 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | ||
346 | struct mm_struct *mm) | ||
347 | { | ||
348 | if (mn == NULL) | ||
349 | return; | ||
386 | 350 | ||
387 | free_mn: | 351 | mmu_notifier_unregister(&mn->mn, mm); |
388 | kfree(mn); | 352 | kfree(mn); |
389 | destroy_mmu: | ||
390 | mutex_lock(&obj->base.dev->struct_mutex); | ||
391 | if (--mmu->count == 0) | ||
392 | __i915_mmu_notifier_destroy(mmu); | ||
393 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
394 | return ret; | ||
395 | } | 353 | } |
396 | 354 | ||
397 | #else | 355 | #else |
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |||
413 | 371 | ||
414 | return 0; | 372 | return 0; |
415 | } | 373 | } |
374 | |||
375 | static void | ||
376 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | ||
377 | struct mm_struct *mm) | ||
378 | { | ||
379 | } | ||
380 | |||
416 | #endif | 381 | #endif |
417 | 382 | ||
383 | static struct i915_mm_struct * | ||
384 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | ||
385 | { | ||
386 | struct i915_mm_struct *mm; | ||
387 | |||
388 | /* Protected by dev_priv->mm_lock */ | ||
389 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | ||
390 | if (mm->mm == real) | ||
391 | return mm; | ||
392 | |||
393 | return NULL; | ||
394 | } | ||
395 | |||
396 | static int | ||
397 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | ||
398 | { | ||
399 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | ||
400 | struct i915_mm_struct *mm; | ||
401 | int ret = 0; | ||
402 | |||
403 | /* During release of the GEM object we hold the struct_mutex. This | ||
404 | * precludes us from calling mmput() at that time as that may be | ||
405 | * the last reference and so call exit_mmap(). exit_mmap() will | ||
406 | * attempt to reap the vma, and if we were holding a GTT mmap | ||
407 | * would then call drm_gem_vm_close() and attempt to reacquire | ||
408 | * the struct mutex. So in order to avoid that recursion, we have | ||
409 | * to defer releasing the mm reference until after we drop the | ||
410 | * struct_mutex, i.e. we need to schedule a worker to do the clean | ||
411 | * up. | ||
412 | */ | ||
413 | mutex_lock(&dev_priv->mm_lock); | ||
414 | mm = __i915_mm_struct_find(dev_priv, current->mm); | ||
415 | if (mm == NULL) { | ||
416 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | ||
417 | if (mm == NULL) { | ||
418 | ret = -ENOMEM; | ||
419 | goto out; | ||
420 | } | ||
421 | |||
422 | kref_init(&mm->kref); | ||
423 | mm->dev = obj->base.dev; | ||
424 | |||
425 | mm->mm = current->mm; | ||
426 | atomic_inc(¤t->mm->mm_count); | ||
427 | |||
428 | mm->mn = NULL; | ||
429 | |||
430 | /* Protected by dev_priv->mm_lock */ | ||
431 | hash_add(dev_priv->mm_structs, | ||
432 | &mm->node, (unsigned long)mm->mm); | ||
433 | } else | ||
434 | kref_get(&mm->kref); | ||
435 | |||
436 | obj->userptr.mm = mm; | ||
437 | out: | ||
438 | mutex_unlock(&dev_priv->mm_lock); | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | __i915_mm_struct_free__worker(struct work_struct *work) | ||
444 | { | ||
445 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | ||
446 | i915_mmu_notifier_free(mm->mn, mm->mm); | ||
447 | mmdrop(mm->mm); | ||
448 | kfree(mm); | ||
449 | } | ||
450 | |||
451 | static void | ||
452 | __i915_mm_struct_free(struct kref *kref) | ||
453 | { | ||
454 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | ||
455 | |||
456 | /* Protected by dev_priv->mm_lock */ | ||
457 | hash_del(&mm->node); | ||
458 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | ||
459 | |||
460 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | ||
461 | schedule_work(&mm->work); | ||
462 | } | ||
463 | |||
464 | static void | ||
465 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | ||
466 | { | ||
467 | if (obj->userptr.mm == NULL) | ||
468 | return; | ||
469 | |||
470 | kref_put_mutex(&obj->userptr.mm->kref, | ||
471 | __i915_mm_struct_free, | ||
472 | &to_i915(obj->base.dev)->mm_lock); | ||
473 | obj->userptr.mm = NULL; | ||
474 | } | ||
475 | |||
418 | struct get_pages_work { | 476 | struct get_pages_work { |
419 | struct work_struct work; | 477 | struct work_struct work; |
420 | struct drm_i915_gem_object *obj; | 478 | struct drm_i915_gem_object *obj; |
421 | struct task_struct *task; | 479 | struct task_struct *task; |
422 | }; | 480 | }; |
423 | 481 | ||
424 | |||
425 | #if IS_ENABLED(CONFIG_SWIOTLB) | 482 | #if IS_ENABLED(CONFIG_SWIOTLB) |
426 | #define swiotlb_active() swiotlb_nr_tbl() | 483 | #define swiotlb_active() swiotlb_nr_tbl() |
427 | #else | 484 | #else |
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
479 | if (pvec == NULL) | 536 | if (pvec == NULL) |
480 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | 537 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); |
481 | if (pvec != NULL) { | 538 | if (pvec != NULL) { |
482 | struct mm_struct *mm = obj->userptr.mm; | 539 | struct mm_struct *mm = obj->userptr.mm->mm; |
483 | 540 | ||
484 | down_read(&mm->mmap_sem); | 541 | down_read(&mm->mmap_sem); |
485 | while (pinned < num_pages) { | 542 | while (pinned < num_pages) { |
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |||
545 | 602 | ||
546 | pvec = NULL; | 603 | pvec = NULL; |
547 | pinned = 0; | 604 | pinned = 0; |
548 | if (obj->userptr.mm == current->mm) { | 605 | if (obj->userptr.mm->mm == current->mm) { |
549 | pvec = kmalloc(num_pages*sizeof(struct page *), | 606 | pvec = kmalloc(num_pages*sizeof(struct page *), |
550 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | 607 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
551 | if (pvec == NULL) { | 608 | if (pvec == NULL) { |
@@ -651,17 +708,13 @@ static void | |||
651 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | 708 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) |
652 | { | 709 | { |
653 | i915_gem_userptr_release__mmu_notifier(obj); | 710 | i915_gem_userptr_release__mmu_notifier(obj); |
654 | 711 | i915_gem_userptr_release__mm_struct(obj); | |
655 | if (obj->userptr.mm) { | ||
656 | mmput(obj->userptr.mm); | ||
657 | obj->userptr.mm = NULL; | ||
658 | } | ||
659 | } | 712 | } |
660 | 713 | ||
661 | static int | 714 | static int |
662 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | 715 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) |
663 | { | 716 | { |
664 | if (obj->userptr.mn) | 717 | if (obj->userptr.mmu_object) |
665 | return 0; | 718 | return 0; |
666 | 719 | ||
667 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | 720 | return i915_gem_userptr_init__mmu_notifier(obj, 0); |
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
736 | return -ENODEV; | 789 | return -ENODEV; |
737 | } | 790 | } |
738 | 791 | ||
739 | /* Allocate the new object */ | ||
740 | obj = i915_gem_object_alloc(dev); | 792 | obj = i915_gem_object_alloc(dev); |
741 | if (obj == NULL) | 793 | if (obj == NULL) |
742 | return -ENOMEM; | 794 | return -ENOMEM; |
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
754 | * at binding. This means that we need to hook into the mmu_notifier | 806 | * at binding. This means that we need to hook into the mmu_notifier |
755 | * in order to detect if the mmu is destroyed. | 807 | * in order to detect if the mmu is destroyed. |
756 | */ | 808 | */ |
757 | ret = -ENOMEM; | 809 | ret = i915_gem_userptr_init__mm_struct(obj); |
758 | if ((obj->userptr.mm = get_task_mm(current))) | 810 | if (ret == 0) |
759 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); | 811 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
760 | if (ret == 0) | 812 | if (ret == 0) |
761 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 813 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
772 | int | 824 | int |
773 | i915_gem_init_userptr(struct drm_device *dev) | 825 | i915_gem_init_userptr(struct drm_device *dev) |
774 | { | 826 | { |
775 | #if defined(CONFIG_MMU_NOTIFIER) | ||
776 | struct drm_i915_private *dev_priv = to_i915(dev); | 827 | struct drm_i915_private *dev_priv = to_i915(dev); |
777 | hash_init(dev_priv->mmu_notifiers); | 828 | mutex_init(&dev_priv->mm_lock); |
778 | #endif | 829 | hash_init(dev_priv->mm_structs); |
779 | return 0; | 830 | return 0; |
780 | } | 831 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e4d7607da2c4..f29b44c86a2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -334,16 +334,20 @@ | |||
334 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) | 334 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) |
335 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) | 335 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) |
336 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) | 336 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) |
337 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) | 337 | |
338 | #define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) | ||
339 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) | ||
338 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) | 340 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) |
339 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) | 341 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) |
340 | #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) | 342 | #define BLT_WRITE_A (2<<20) |
341 | #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) | 343 | #define BLT_WRITE_RGB (1<<20) |
344 | #define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) | ||
342 | #define BLT_DEPTH_8 (0<<24) | 345 | #define BLT_DEPTH_8 (0<<24) |
343 | #define BLT_DEPTH_16_565 (1<<24) | 346 | #define BLT_DEPTH_16_565 (1<<24) |
344 | #define BLT_DEPTH_16_1555 (2<<24) | 347 | #define BLT_DEPTH_16_1555 (2<<24) |
345 | #define BLT_DEPTH_32 (3<<24) | 348 | #define BLT_DEPTH_32 (3<<24) |
346 | #define BLT_ROP_GXCOPY (0xcc<<16) | 349 | #define BLT_ROP_SRC_COPY (0xcc<<16) |
350 | #define BLT_ROP_COLOR_COPY (0xf0<<16) | ||
347 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ | 351 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ |
348 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ | 352 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ |
349 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) | 353 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 16371a444426..2d068edd1adc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
1363 | 1363 | ||
1364 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 1364 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
1365 | #define I830_BATCH_LIMIT (256*1024) | 1365 | #define I830_BATCH_LIMIT (256*1024) |
1366 | #define I830_TLB_ENTRIES (2) | ||
1367 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) | ||
1366 | static int | 1368 | static int |
1367 | i830_dispatch_execbuffer(struct intel_engine_cs *ring, | 1369 | i830_dispatch_execbuffer(struct intel_engine_cs *ring, |
1368 | u64 offset, u32 len, | 1370 | u64 offset, u32 len, |
1369 | unsigned flags) | 1371 | unsigned flags) |
1370 | { | 1372 | { |
1373 | u32 cs_offset = ring->scratch.gtt_offset; | ||
1371 | int ret; | 1374 | int ret; |
1372 | 1375 | ||
1373 | if (flags & I915_DISPATCH_PINNED) { | 1376 | ret = intel_ring_begin(ring, 6); |
1374 | ret = intel_ring_begin(ring, 4); | 1377 | if (ret) |
1375 | if (ret) | 1378 | return ret; |
1376 | return ret; | ||
1377 | 1379 | ||
1378 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1380 | /* Evict the invalid PTE TLBs */ |
1379 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1381 | intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); |
1380 | intel_ring_emit(ring, offset + len - 8); | 1382 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); |
1381 | intel_ring_emit(ring, MI_NOOP); | 1383 | intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ |
1382 | intel_ring_advance(ring); | 1384 | intel_ring_emit(ring, cs_offset); |
1383 | } else { | 1385 | intel_ring_emit(ring, 0xdeadbeef); |
1384 | u32 cs_offset = ring->scratch.gtt_offset; | 1386 | intel_ring_emit(ring, MI_NOOP); |
1387 | intel_ring_advance(ring); | ||
1385 | 1388 | ||
1389 | if ((flags & I915_DISPATCH_PINNED) == 0) { | ||
1386 | if (len > I830_BATCH_LIMIT) | 1390 | if (len > I830_BATCH_LIMIT) |
1387 | return -ENOSPC; | 1391 | return -ENOSPC; |
1388 | 1392 | ||
1389 | ret = intel_ring_begin(ring, 9+3); | 1393 | ret = intel_ring_begin(ring, 6 + 2); |
1390 | if (ret) | 1394 | if (ret) |
1391 | return ret; | 1395 | return ret; |
1392 | /* Blit the batch (which has now all relocs applied) to the stable batch | 1396 | |
1393 | * scratch bo area (so that the CS never stumbles over its tlb | 1397 | /* Blit the batch (which has now all relocs applied) to the |
1394 | * invalidation bug) ... */ | 1398 | * stable batch scratch bo area (so that the CS never |
1395 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | | 1399 | * stumbles over its tlb invalidation bug) ... |
1396 | XY_SRC_COPY_BLT_WRITE_ALPHA | | 1400 | */ |
1397 | XY_SRC_COPY_BLT_WRITE_RGB); | 1401 | intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); |
1398 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); | 1402 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); |
1399 | intel_ring_emit(ring, 0); | 1403 | intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024); |
1400 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); | ||
1401 | intel_ring_emit(ring, cs_offset); | 1404 | intel_ring_emit(ring, cs_offset); |
1402 | intel_ring_emit(ring, 0); | ||
1403 | intel_ring_emit(ring, 4096); | 1405 | intel_ring_emit(ring, 4096); |
1404 | intel_ring_emit(ring, offset); | 1406 | intel_ring_emit(ring, offset); |
1407 | |||
1405 | intel_ring_emit(ring, MI_FLUSH); | 1408 | intel_ring_emit(ring, MI_FLUSH); |
1409 | intel_ring_emit(ring, MI_NOOP); | ||
1410 | intel_ring_advance(ring); | ||
1406 | 1411 | ||
1407 | /* ... and execute it. */ | 1412 | /* ... and execute it. */ |
1408 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1413 | offset = cs_offset; |
1409 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | ||
1410 | intel_ring_emit(ring, cs_offset + len - 8); | ||
1411 | intel_ring_advance(ring); | ||
1412 | } | 1414 | } |
1413 | 1415 | ||
1416 | ret = intel_ring_begin(ring, 4); | ||
1417 | if (ret) | ||
1418 | return ret; | ||
1419 | |||
1420 | intel_ring_emit(ring, MI_BATCH_BUFFER); | ||
1421 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | ||
1422 | intel_ring_emit(ring, offset + len - 8); | ||
1423 | intel_ring_emit(ring, MI_NOOP); | ||
1424 | intel_ring_advance(ring); | ||
1425 | |||
1414 | return 0; | 1426 | return 0; |
1415 | } | 1427 | } |
1416 | 1428 | ||
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2200 | 2212 | ||
2201 | /* Workaround batchbuffer to combat CS tlb bug. */ | 2213 | /* Workaround batchbuffer to combat CS tlb bug. */ |
2202 | if (HAS_BROKEN_CS_TLB(dev)) { | 2214 | if (HAS_BROKEN_CS_TLB(dev)) { |
2203 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); | 2215 | obj = i915_gem_alloc_object(dev, I830_WA_SIZE); |
2204 | if (obj == NULL) { | 2216 | if (obj == NULL) { |
2205 | DRM_ERROR("Failed to allocate batch bo\n"); | 2217 | DRM_ERROR("Failed to allocate batch bo\n"); |
2206 | return -ENOMEM; | 2218 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index c69d3ce1b3d6..c14341ca3ef9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) | |||
854 | struct drm_device *dev = encoder->base.dev; | 854 | struct drm_device *dev = encoder->base.dev; |
855 | struct drm_i915_private *dev_priv = dev->dev_private; | 855 | struct drm_i915_private *dev_priv = dev->dev_private; |
856 | 856 | ||
857 | /* Prevents vblank waits from timing out in intel_tv_detect_type() */ | ||
858 | intel_wait_for_vblank(encoder->base.dev, | ||
859 | to_intel_crtc(encoder->base.crtc)->pipe); | ||
860 | |||
857 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); | 861 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); |
858 | } | 862 | } |
859 | 863 | ||