aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2017-08-24 02:06:28 -0400
committerSinclair Yeh <syeh@vmware.com>2017-08-28 11:40:43 -0400
commitef369904aaf717e0390b483efd47daba9ba8ddf2 (patch)
treec0a5fa8947963555e0caa1e30b2dde91f25cac07
parente300173f06160d65d383d54fdd48027ecd052af3 (diff)
drm/vmwgfx: Move irq bottom half processing to threads
This gets rid of the irq bottom half tasklets and instead performs the work needed in process context. We also convert irq-disabling spinlocks to ordinary spinlocks. This should decrease system latency for other system components, like sound for example but has the potential to increase latency for processes that wait on the GPU. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c68
4 files changed, 112 insertions, 91 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 86178796de6c..2e5718509c23 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -85,7 +85,6 @@ struct vmw_cmdbuf_context {
85 * Internal protection. 85 * Internal protection.
86 * @dheaders: Pool of DMA memory for device command buffer headers with trailing 86 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
87 * space for inline data. Internal protection. 87 * space for inline data. Internal protection.
88 * @tasklet: Tasklet struct for irq processing. Immutable.
89 * @alloc_queue: Wait queue for processes waiting to allocate command buffer 88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
90 * space. 89 * space.
91 * @idle_queue: Wait queue for processes waiting for command buffer idle. 90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -117,7 +116,6 @@ struct vmw_cmdbuf_man {
117 spinlock_t lock; 116 spinlock_t lock;
118 struct dma_pool *headers; 117 struct dma_pool *headers;
119 struct dma_pool *dheaders; 118 struct dma_pool *dheaders;
120 struct tasklet_struct tasklet;
121 wait_queue_head_t alloc_queue; 119 wait_queue_head_t alloc_queue;
122 wait_queue_head_t idle_queue; 120 wait_queue_head_t idle_queue;
123 bool irq_on; 121 bool irq_on;
@@ -278,9 +276,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
278 vmw_cmdbuf_header_inline_free(header); 276 vmw_cmdbuf_header_inline_free(header);
279 return; 277 return;
280 } 278 }
281 spin_lock_bh(&man->lock); 279 spin_lock(&man->lock);
282 __vmw_cmdbuf_header_free(header); 280 __vmw_cmdbuf_header_free(header);
283 spin_unlock_bh(&man->lock); 281 spin_unlock(&man->lock);
284} 282}
285 283
286 284
@@ -468,20 +466,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
468} 466}
469 467
470/** 468/**
471 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt 469 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
472 * handler implemented as a tasklet. 470 * handler implemented as a threaded irq task.
473 * 471 *
474 * @data: Tasklet closure. A pointer to the command buffer manager cast to 472 * @man: Pointer to the command buffer manager.
475 * an unsigned long.
476 * 473 *
477 * The bottom half (tasklet) of the interrupt handler simply calls into the 474 * The bottom half of the interrupt handler simply calls into the
478 * command buffer processor to free finished buffers and submit any 475 * command buffer processor to free finished buffers and submit any
479 * queued buffers to hardware. 476 * queued buffers to hardware.
480 */ 477 */
481static void vmw_cmdbuf_man_tasklet(unsigned long data) 478void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
482{ 479{
483 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
484
485 spin_lock(&man->lock); 480 spin_lock(&man->lock);
486 vmw_cmdbuf_man_process(man); 481 vmw_cmdbuf_man_process(man);
487 spin_unlock(&man->lock); 482 spin_unlock(&man->lock);
@@ -504,7 +499,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
504 uint32_t dummy; 499 uint32_t dummy;
505 bool restart = false; 500 bool restart = false;
506 501
507 spin_lock_bh(&man->lock); 502 spin_lock(&man->lock);
508 list_for_each_entry_safe(entry, next, &man->error, list) { 503 list_for_each_entry_safe(entry, next, &man->error, list) {
509 restart = true; 504 restart = true;
510 DRM_ERROR("Command buffer error.\n"); 505 DRM_ERROR("Command buffer error.\n");
@@ -513,7 +508,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
513 __vmw_cmdbuf_header_free(entry); 508 __vmw_cmdbuf_header_free(entry);
514 wake_up_all(&man->idle_queue); 509 wake_up_all(&man->idle_queue);
515 } 510 }
516 spin_unlock_bh(&man->lock); 511 spin_unlock(&man->lock);
517 512
518 if (restart && vmw_cmdbuf_startstop(man, true)) 513 if (restart && vmw_cmdbuf_startstop(man, true))
519 DRM_ERROR("Failed restarting command buffer context 0.\n"); 514 DRM_ERROR("Failed restarting command buffer context 0.\n");
@@ -536,7 +531,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
536 bool idle = false; 531 bool idle = false;
537 int i; 532 int i;
538 533
539 spin_lock_bh(&man->lock); 534 spin_lock(&man->lock);
540 vmw_cmdbuf_man_process(man); 535 vmw_cmdbuf_man_process(man);
541 for_each_cmdbuf_ctx(man, i, ctx) { 536 for_each_cmdbuf_ctx(man, i, ctx) {
542 if (!list_empty(&ctx->submitted) || 537 if (!list_empty(&ctx->submitted) ||
@@ -548,7 +543,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
548 idle = list_empty(&man->error); 543 idle = list_empty(&man->error);
549 544
550out_unlock: 545out_unlock:
551 spin_unlock_bh(&man->lock); 546 spin_unlock(&man->lock);
552 547
553 return idle; 548 return idle;
554} 549}
@@ -571,7 +566,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
571 if (!cur) 566 if (!cur)
572 return; 567 return;
573 568
574 spin_lock_bh(&man->lock); 569 spin_lock(&man->lock);
575 if (man->cur_pos == 0) { 570 if (man->cur_pos == 0) {
576 __vmw_cmdbuf_header_free(cur); 571 __vmw_cmdbuf_header_free(cur);
577 goto out_unlock; 572 goto out_unlock;
@@ -580,7 +575,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
580 man->cur->cb_header->length = man->cur_pos; 575 man->cur->cb_header->length = man->cur_pos;
581 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 576 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
582out_unlock: 577out_unlock:
583 spin_unlock_bh(&man->lock); 578 spin_unlock(&man->lock);
584 man->cur = NULL; 579 man->cur = NULL;
585 man->cur_pos = 0; 580 man->cur_pos = 0;
586} 581}
@@ -673,14 +668,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
673 return true; 668 return true;
674 669
675 memset(info->node, 0, sizeof(*info->node)); 670 memset(info->node, 0, sizeof(*info->node));
676 spin_lock_bh(&man->lock); 671 spin_lock(&man->lock);
677 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 672 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
678 if (ret) { 673 if (ret) {
679 vmw_cmdbuf_man_process(man); 674 vmw_cmdbuf_man_process(man);
680 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 675 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
681 } 676 }
682 677
683 spin_unlock_bh(&man->lock); 678 spin_unlock(&man->lock);
684 info->done = !ret; 679 info->done = !ret;
685 680
686 return info->done; 681 return info->done;
@@ -801,9 +796,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
801 return 0; 796 return 0;
802 797
803out_no_cb_header: 798out_no_cb_header:
804 spin_lock_bh(&man->lock); 799 spin_lock(&man->lock);
805 drm_mm_remove_node(&header->node); 800 drm_mm_remove_node(&header->node);
806 spin_unlock_bh(&man->lock); 801 spin_unlock(&man->lock);
807 802
808 return ret; 803 return ret;
809} 804}
@@ -1023,18 +1018,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1023 vmw_cmdbuf_cur_unlock(man); 1018 vmw_cmdbuf_cur_unlock(man);
1024} 1019}
1025 1020
1026/**
1027 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1028 *
1029 * @man: The command buffer manager.
1030 */
1031void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1032{
1033 if (!man)
1034 return;
1035
1036 tasklet_schedule(&man->tasklet);
1037}
1038 1021
1039/** 1022/**
1040 * vmw_cmdbuf_send_device_command - Send a command through the device context. 1023 * vmw_cmdbuf_send_device_command - Send a command through the device context.
@@ -1059,9 +1042,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1059 memcpy(cmd, command, size); 1042 memcpy(cmd, command, size);
1060 header->cb_header->length = size; 1043 header->cb_header->length = size;
1061 header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1044 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1062 spin_lock_bh(&man->lock); 1045 spin_lock(&man->lock);
1063 status = vmw_cmdbuf_header_submit(header); 1046 status = vmw_cmdbuf_header_submit(header);
1064 spin_unlock_bh(&man->lock); 1047 spin_unlock(&man->lock);
1065 vmw_cmdbuf_header_free(header); 1048 vmw_cmdbuf_header_free(header);
1066 1049
1067 if (status != SVGA_CB_STATUS_COMPLETED) { 1050 if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1226,8 +1209,6 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1226 spin_lock_init(&man->lock); 1209 spin_lock_init(&man->lock);
1227 mutex_init(&man->cur_mutex); 1210 mutex_init(&man->cur_mutex);
1228 mutex_init(&man->space_mutex); 1211 mutex_init(&man->space_mutex);
1229 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1230 (unsigned long) man);
1231 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1212 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1232 init_waitqueue_head(&man->alloc_queue); 1213 init_waitqueue_head(&man->alloc_queue);
1233 init_waitqueue_head(&man->idle_queue); 1214 init_waitqueue_head(&man->idle_queue);
@@ -1297,7 +1278,6 @@ void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1297 1278
1298 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1279 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1299 &man->dev_priv->error_waiters); 1280 &man->dev_priv->error_waiters);
1300 tasklet_kill(&man->tasklet);
1301 (void) cancel_work_sync(&man->work); 1281 (void) cancel_work_sync(&man->work);
1302 dma_pool_destroy(man->dheaders); 1282 dma_pool_destroy(man->dheaders);
1303 dma_pool_destroy(man->headers); 1283 dma_pool_destroy(man->headers);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5e7e8a3df7c3..b74cee295ede 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -352,6 +352,12 @@ struct vmw_otable_batch {
352 struct ttm_buffer_object *otable_bo; 352 struct ttm_buffer_object *otable_bo;
353}; 353};
354 354
355enum {
356 VMW_IRQTHREAD_FENCE,
357 VMW_IRQTHREAD_CMDBUF,
358 VMW_IRQTHREAD_MAX
359};
360
355struct vmw_private { 361struct vmw_private {
356 struct ttm_bo_device bdev; 362 struct ttm_bo_device bdev;
357 struct ttm_bo_global_ref bo_global_ref; 363 struct ttm_bo_global_ref bo_global_ref;
@@ -530,6 +536,7 @@ struct vmw_private {
530 struct vmw_otable_batch otable_batch; 536 struct vmw_otable_batch otable_batch;
531 537
532 struct vmw_cmdbuf_man *cman; 538 struct vmw_cmdbuf_man *cman;
539 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
533}; 540};
534 541
535static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 542static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -562,24 +569,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
562static inline void vmw_write(struct vmw_private *dev_priv, 569static inline void vmw_write(struct vmw_private *dev_priv,
563 unsigned int offset, uint32_t value) 570 unsigned int offset, uint32_t value)
564{ 571{
565 unsigned long irq_flags; 572 spin_lock(&dev_priv->hw_lock);
566
567 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
568 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 573 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
569 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 574 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
570 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 575 spin_unlock(&dev_priv->hw_lock);
571} 576}
572 577
573static inline uint32_t vmw_read(struct vmw_private *dev_priv, 578static inline uint32_t vmw_read(struct vmw_private *dev_priv,
574 unsigned int offset) 579 unsigned int offset)
575{ 580{
576 unsigned long irq_flags;
577 u32 val; 581 u32 val;
578 582
579 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); 583 spin_lock(&dev_priv->hw_lock);
580 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 584 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
581 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 585 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
582 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); 586 spin_unlock(&dev_priv->hw_lock);
583 587
584 return val; 588 return val;
585} 589}
@@ -1149,13 +1153,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1149extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1153extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1150 struct vmw_cmdbuf_header *header, 1154 struct vmw_cmdbuf_header *header,
1151 bool flush); 1155 bool flush);
1152extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1153extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 1156extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1154 size_t size, bool interruptible, 1157 size_t size, bool interruptible,
1155 struct vmw_cmdbuf_header **p_header); 1158 struct vmw_cmdbuf_header **p_header);
1156extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); 1159extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1157extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 1160extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1158 bool interruptible); 1161 bool interruptible);
1162extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
1159 1163
1160 1164
1161/** 1165/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b8bc5bc7de7e..c812570ff159 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
114 container_of(f, struct vmw_fence_obj, base); 114 container_of(f, struct vmw_fence_obj, base);
115 115
116 struct vmw_fence_manager *fman = fman_from_fence(fence); 116 struct vmw_fence_manager *fman = fman_from_fence(fence);
117 unsigned long irq_flags;
118 117
119 spin_lock_irqsave(&fman->lock, irq_flags); 118 spin_lock(&fman->lock);
120 list_del_init(&fence->head); 119 list_del_init(&fence->head);
121 --fman->num_fence_objects; 120 --fman->num_fence_objects;
122 spin_unlock_irqrestore(&fman->lock, irq_flags); 121 spin_unlock(&fman->lock);
123 fence->destroy(fence); 122 fence->destroy(fence);
124} 123}
125 124
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
252 INIT_LIST_HEAD(&list); 251 INIT_LIST_HEAD(&list);
253 mutex_lock(&fman->goal_irq_mutex); 252 mutex_lock(&fman->goal_irq_mutex);
254 253
255 spin_lock_irq(&fman->lock); 254 spin_lock(&fman->lock);
256 list_splice_init(&fman->cleanup_list, &list); 255 list_splice_init(&fman->cleanup_list, &list);
257 seqno_valid = fman->seqno_valid; 256 seqno_valid = fman->seqno_valid;
258 spin_unlock_irq(&fman->lock); 257 spin_unlock(&fman->lock);
259 258
260 if (!seqno_valid && fman->goal_irq_on) { 259 if (!seqno_valid && fman->goal_irq_on) {
261 fman->goal_irq_on = false; 260 fman->goal_irq_on = false;
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
305 304
306void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) 305void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
307{ 306{
308 unsigned long irq_flags;
309 bool lists_empty; 307 bool lists_empty;
310 308
311 (void) cancel_work_sync(&fman->work); 309 (void) cancel_work_sync(&fman->work);
312 310
313 spin_lock_irqsave(&fman->lock, irq_flags); 311 spin_lock(&fman->lock);
314 lists_empty = list_empty(&fman->fence_list) && 312 lists_empty = list_empty(&fman->fence_list) &&
315 list_empty(&fman->cleanup_list); 313 list_empty(&fman->cleanup_list);
316 spin_unlock_irqrestore(&fman->lock, irq_flags); 314 spin_unlock(&fman->lock);
317 315
318 BUG_ON(!lists_empty); 316 BUG_ON(!lists_empty);
319 kfree(fman); 317 kfree(fman);
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
323 struct vmw_fence_obj *fence, u32 seqno, 321 struct vmw_fence_obj *fence, u32 seqno,
324 void (*destroy) (struct vmw_fence_obj *fence)) 322 void (*destroy) (struct vmw_fence_obj *fence))
325{ 323{
326 unsigned long irq_flags;
327 int ret = 0; 324 int ret = 0;
328 325
329 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, 326 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
331 INIT_LIST_HEAD(&fence->seq_passed_actions); 328 INIT_LIST_HEAD(&fence->seq_passed_actions);
332 fence->destroy = destroy; 329 fence->destroy = destroy;
333 330
334 spin_lock_irqsave(&fman->lock, irq_flags); 331 spin_lock(&fman->lock);
335 if (unlikely(fman->fifo_down)) { 332 if (unlikely(fman->fifo_down)) {
336 ret = -EBUSY; 333 ret = -EBUSY;
337 goto out_unlock; 334 goto out_unlock;
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
340 ++fman->num_fence_objects; 337 ++fman->num_fence_objects;
341 338
342out_unlock: 339out_unlock:
343 spin_unlock_irqrestore(&fman->lock, irq_flags); 340 spin_unlock(&fman->lock);
344 return ret; 341 return ret;
345 342
346} 343}
@@ -489,11 +486,9 @@ rerun:
489 486
490void vmw_fences_update(struct vmw_fence_manager *fman) 487void vmw_fences_update(struct vmw_fence_manager *fman)
491{ 488{
492 unsigned long irq_flags; 489 spin_lock(&fman->lock);
493
494 spin_lock_irqsave(&fman->lock, irq_flags);
495 __vmw_fences_update(fman); 490 __vmw_fences_update(fman);
496 spin_unlock_irqrestore(&fman->lock, irq_flags); 491 spin_unlock(&fman->lock);
497} 492}
498 493
499bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) 494bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -663,14 +658,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
663 * restart when we've released the fman->lock. 658 * restart when we've released the fman->lock.
664 */ 659 */
665 660
666 spin_lock_irq(&fman->lock); 661 spin_lock(&fman->lock);
667 fman->fifo_down = true; 662 fman->fifo_down = true;
668 while (!list_empty(&fman->fence_list)) { 663 while (!list_empty(&fman->fence_list)) {
669 struct vmw_fence_obj *fence = 664 struct vmw_fence_obj *fence =
670 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 665 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
671 head); 666 head);
672 dma_fence_get(&fence->base); 667 dma_fence_get(&fence->base);
673 spin_unlock_irq(&fman->lock); 668 spin_unlock(&fman->lock);
674 669
675 ret = vmw_fence_obj_wait(fence, false, false, 670 ret = vmw_fence_obj_wait(fence, false, false,
676 VMW_FENCE_WAIT_TIMEOUT); 671 VMW_FENCE_WAIT_TIMEOUT);
@@ -686,18 +681,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
686 681
687 BUG_ON(!list_empty(&fence->head)); 682 BUG_ON(!list_empty(&fence->head));
688 dma_fence_put(&fence->base); 683 dma_fence_put(&fence->base);
689 spin_lock_irq(&fman->lock); 684 spin_lock(&fman->lock);
690 } 685 }
691 spin_unlock_irq(&fman->lock); 686 spin_unlock(&fman->lock);
692} 687}
693 688
694void vmw_fence_fifo_up(struct vmw_fence_manager *fman) 689void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
695{ 690{
696 unsigned long irq_flags; 691 spin_lock(&fman->lock);
697
698 spin_lock_irqsave(&fman->lock, irq_flags);
699 fman->fifo_down = false; 692 fman->fifo_down = false;
700 spin_unlock_irqrestore(&fman->lock, irq_flags); 693 spin_unlock(&fman->lock);
701} 694}
702 695
703 696
@@ -812,9 +805,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
812 arg->signaled = vmw_fence_obj_signaled(fence); 805 arg->signaled = vmw_fence_obj_signaled(fence);
813 806
814 arg->signaled_flags = arg->flags; 807 arg->signaled_flags = arg->flags;
815 spin_lock_irq(&fman->lock); 808 spin_lock(&fman->lock);
816 arg->passed_seqno = dev_priv->last_read_seqno; 809 arg->passed_seqno = dev_priv->last_read_seqno;
817 spin_unlock_irq(&fman->lock); 810 spin_unlock(&fman->lock);
818 811
819 ttm_base_object_unref(&base); 812 ttm_base_object_unref(&base);
820 813
@@ -841,8 +834,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
841 * 834 *
842 * This function is called when the seqno of the fence where @action is 835 * This function is called when the seqno of the fence where @action is
843 * attached has passed. It queues the event on the submitter's event list. 836 * attached has passed. It queues the event on the submitter's event list.
844 * This function is always called from atomic context, and may be called 837 * This function is always called from atomic context.
845 * from irq context.
846 */ 838 */
847static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) 839static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
848{ 840{
@@ -851,13 +843,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
851 struct drm_device *dev = eaction->dev; 843 struct drm_device *dev = eaction->dev;
852 struct drm_pending_event *event = eaction->event; 844 struct drm_pending_event *event = eaction->event;
853 struct drm_file *file_priv; 845 struct drm_file *file_priv;
854 unsigned long irq_flags; 846
855 847
856 if (unlikely(event == NULL)) 848 if (unlikely(event == NULL))
857 return; 849 return;
858 850
859 file_priv = event->file_priv; 851 file_priv = event->file_priv;
860 spin_lock_irqsave(&dev->event_lock, irq_flags); 852 spin_lock_irq(&dev->event_lock);
861 853
862 if (likely(eaction->tv_sec != NULL)) { 854 if (likely(eaction->tv_sec != NULL)) {
863 struct timeval tv; 855 struct timeval tv;
@@ -869,7 +861,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
869 861
870 drm_send_event_locked(dev, eaction->event); 862 drm_send_event_locked(dev, eaction->event);
871 eaction->event = NULL; 863 eaction->event = NULL;
872 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 864 spin_unlock_irq(&dev->event_lock);
873} 865}
874 866
875/** 867/**
@@ -904,11 +896,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
904 struct vmw_fence_action *action) 896 struct vmw_fence_action *action)
905{ 897{
906 struct vmw_fence_manager *fman = fman_from_fence(fence); 898 struct vmw_fence_manager *fman = fman_from_fence(fence);
907 unsigned long irq_flags;
908 bool run_update = false; 899 bool run_update = false;
909 900
910 mutex_lock(&fman->goal_irq_mutex); 901 mutex_lock(&fman->goal_irq_mutex);
911 spin_lock_irqsave(&fman->lock, irq_flags); 902 spin_lock(&fman->lock);
912 903
913 fman->pending_actions[action->type]++; 904 fman->pending_actions[action->type]++;
914 if (dma_fence_is_signaled_locked(&fence->base)) { 905 if (dma_fence_is_signaled_locked(&fence->base)) {
@@ -927,7 +918,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
927 run_update = vmw_fence_goal_check_locked(fence); 918 run_update = vmw_fence_goal_check_locked(fence);
928 } 919 }
929 920
930 spin_unlock_irqrestore(&fman->lock, irq_flags); 921 spin_unlock(&fman->lock);
931 922
932 if (run_update) { 923 if (run_update) {
933 if (!fman->goal_irq_on) { 924 if (!fman->goal_irq_on) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9b519c4b4ec2..b9239ba067c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,11 +30,56 @@
30 30
31#define VMW_FENCE_WRAP (1 << 24) 31#define VMW_FENCE_WRAP (1 << 24)
32 32
33/**
34 * vmw_thread_fn - Deferred (process context) irq handler
35 *
36 * @irq: irq number
37 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
38 *
39 * This function implements the deferred part of irq processing.
40 * The function is guaranteed to run at least once after the
41 * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
42 *
43 */
44static irqreturn_t vmw_thread_fn(int irq, void *arg)
45{
46 struct drm_device *dev = (struct drm_device *)arg;
47 struct vmw_private *dev_priv = vmw_priv(dev);
48 irqreturn_t ret = IRQ_NONE;
49
50 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
51 dev_priv->irqthread_pending)) {
52 vmw_fences_update(dev_priv->fman);
53 wake_up_all(&dev_priv->fence_queue);
54 ret = IRQ_HANDLED;
55 }
56
57 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
58 dev_priv->irqthread_pending)) {
59 vmw_cmdbuf_irqthread(dev_priv->cman);
60 ret = IRQ_HANDLED;
61 }
62
63 return ret;
64}
65
66/**
67 * vmw_irq_handler irq handler
68 *
69 * @irq: irq number
70 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
71 *
72 * This function implements the quick part of irq processing.
73 * The function performs fast actions like clearing the device interrupt
74 * flags and also reasonably quick actions like waking processes waiting for
75 * FIFO space. Other IRQ actions are deferred to the IRQ thread.
76 */
33static irqreturn_t vmw_irq_handler(int irq, void *arg) 77static irqreturn_t vmw_irq_handler(int irq, void *arg)
34{ 78{
35 struct drm_device *dev = (struct drm_device *)arg; 79 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev); 80 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status, masked_status; 81 uint32_t status, masked_status;
82 irqreturn_t ret = IRQ_HANDLED;
38 83
39 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 84 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
40 masked_status = status & READ_ONCE(dev_priv->irq_mask); 85 masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +90,21 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
45 if (!status) 90 if (!status)
46 return IRQ_NONE; 91 return IRQ_NONE;
47 92
48 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
49 SVGA_IRQFLAG_FENCE_GOAL)) {
50 vmw_fences_update(dev_priv->fman);
51 wake_up_all(&dev_priv->fence_queue);
52 }
53
54 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 93 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
55 wake_up_all(&dev_priv->fifo_queue); 94 wake_up_all(&dev_priv->fifo_queue);
56 95
57 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 96 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
58 SVGA_IRQFLAG_ERROR)) 97 SVGA_IRQFLAG_FENCE_GOAL)) &&
59 vmw_cmdbuf_tasklet_schedule(dev_priv->cman); 98 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
99 ret = IRQ_WAKE_THREAD;
100
101 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
102 SVGA_IRQFLAG_ERROR)) &&
103 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
104 dev_priv->irqthread_pending))
105 ret = IRQ_WAKE_THREAD;
60 106
61 return IRQ_HANDLED; 107 return ret;
62} 108}
63 109
64static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 110static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
@@ -326,7 +372,7 @@ int vmw_irq_install(struct drm_device *dev, int irq)
326 372
327 vmw_irq_preinstall(dev); 373 vmw_irq_preinstall(dev);
328 374
329 ret = request_threaded_irq(irq, vmw_irq_handler, NULL, 375 ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
330 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); 376 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
331 if (ret < 0) 377 if (ret < 0)
332 return ret; 378 return ret;