aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoris BREZILLON <boris.brezillon@free-electrons.com>2014-11-14 13:30:29 -0500
committerDave Airlie <airlied@redhat.com>2014-11-14 18:25:35 -0500
commit8bd4ae202813ac04f35dacf43263e1cf96743292 (patch)
tree3271272039fa98b8482120dd559f197b00cdf774
parent7fd36c0bae07d8c7fa9668ea6ba28dbcb4f9955b (diff)
drm: rework flip-work helpers to avoid calling func when the FIFO is full
Make use of lists instead of kfifo in order to dynamically allocate task entry when someone require some delayed work, and thus preventing drm_flip_work_queue from directly calling func instead of queuing this call. This allow drm_flip_work_queue to be safely called even within irq handlers. Add new helper functions to allocate a flip work task and queue it when needed. This prevents allocating data within irq context (which might impact the time spent in the irq handler). Signed-off-by: Boris BREZILLON <boris.brezillon@free-electrons.com> Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/drm_flip_work.c97
-rw-r--r--include/drm/drm_flip_work.h31
2 files changed, 96 insertions, 32 deletions
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index f9c7fa3d0012..6f4ae5b655d3 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -25,6 +25,44 @@
25#include "drm_flip_work.h" 25#include "drm_flip_work.h"
26 26
27/** 27/**
28 * drm_flip_work_allocate_task - allocate a flip-work task
29 * @data: data associated to the task
30 * @flags: allocator flags
31 *
32 * Allocate a drm_flip_task object and attach private data to it.
33 */
34struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
35{
36 struct drm_flip_task *task;
37
38 task = kzalloc(sizeof(*task), flags);
39 if (task)
40 task->data = data;
41
42 return task;
43}
44EXPORT_SYMBOL(drm_flip_work_allocate_task);
45
46/**
47 * drm_flip_work_queue_task - queue a specific task
48 * @work: the flip-work
49 * @task: the task to handle
50 *
51 * Queues task, that will later be run (passed back to drm_flip_func_t
52 * func) on a work queue after drm_flip_work_commit() is called.
53 */
54void drm_flip_work_queue_task(struct drm_flip_work *work,
55 struct drm_flip_task *task)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&work->lock, flags);
60 list_add_tail(&task->node, &work->queued);
61 spin_unlock_irqrestore(&work->lock, flags);
62}
63EXPORT_SYMBOL(drm_flip_work_queue_task);
64
65/**
28 * drm_flip_work_queue - queue work 66 * drm_flip_work_queue - queue work
29 * @work: the flip-work 67 * @work: the flip-work
30 * @val: the value to queue 68 * @val: the value to queue
@@ -34,10 +72,14 @@
34 */ 72 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val) 73void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{ 74{
37 if (kfifo_put(&work->fifo, val)) { 75 struct drm_flip_task *task;
38 atomic_inc(&work->pending); 76
77 task = drm_flip_work_allocate_task(val,
78 drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
79 if (task) {
80 drm_flip_work_queue_task(work, task);
39 } else { 81 } else {
40 DRM_ERROR("%s fifo full!\n", work->name); 82 DRM_ERROR("%s could not allocate task!\n", work->name);
41 work->func(work, val); 83 work->func(work, val);
42 } 84 }
43} 85}
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
56void drm_flip_work_commit(struct drm_flip_work *work, 98void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq) 99 struct workqueue_struct *wq)
58{ 100{
59 uint32_t pending = atomic_read(&work->pending); 101 unsigned long flags;
60 atomic_add(pending, &work->count); 102
61 atomic_sub(pending, &work->pending); 103 spin_lock_irqsave(&work->lock, flags);
104 list_splice_tail(&work->queued, &work->commited);
105 INIT_LIST_HEAD(&work->queued);
106 spin_unlock_irqrestore(&work->lock, flags);
62 queue_work(wq, &work->worker); 107 queue_work(wq, &work->worker);
63} 108}
64EXPORT_SYMBOL(drm_flip_work_commit); 109EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,14 +111,26 @@ EXPORT_SYMBOL(drm_flip_work_commit);
66static void flip_worker(struct work_struct *w) 111static void flip_worker(struct work_struct *w)
67{ 112{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); 113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count); 114 struct list_head tasks;
70 void *val = NULL; 115 unsigned long flags;
71 116
72 atomic_sub(count, &work->count); 117 while (1) {
118 struct drm_flip_task *task, *tmp;
73 119
74 while(count--) 120 INIT_LIST_HEAD(&tasks);
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val))) 121 spin_lock_irqsave(&work->lock, flags);
76 work->func(work, val); 122 list_splice_tail(&work->commited, &tasks);
123 INIT_LIST_HEAD(&work->commited);
124 spin_unlock_irqrestore(&work->lock, flags);
125
126 if (list_empty(&tasks))
127 break;
128
129 list_for_each_entry_safe(task, tmp, &tasks, node) {
130 work->func(work, task->data);
131 kfree(task);
132 }
133 }
77} 134}
78 135
79/** 136/**
@@ -91,19 +148,12 @@ static void flip_worker(struct work_struct *w)
91int drm_flip_work_init(struct drm_flip_work *work, int size, 148int drm_flip_work_init(struct drm_flip_work *work, int size,
92 const char *name, drm_flip_func_t func) 149 const char *name, drm_flip_func_t func)
93{ 150{
94 int ret;
95
96 work->name = name; 151 work->name = name;
97 atomic_set(&work->count, 0); 152 INIT_LIST_HEAD(&work->queued);
98 atomic_set(&work->pending, 0); 153 INIT_LIST_HEAD(&work->commited);
154 spin_lock_init(&work->lock);
99 work->func = func; 155 work->func = func;
100 156
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker); 157 INIT_WORK(&work->worker, flip_worker);
108 158
109 return 0; 159 return 0;
@@ -118,7 +168,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
118 */ 168 */
119void drm_flip_work_cleanup(struct drm_flip_work *work) 169void drm_flip_work_cleanup(struct drm_flip_work *work)
120{ 170{
121 WARN_ON(!kfifo_is_empty(&work->fifo)); 171 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
122 kfifo_free(&work->fifo);
123} 172}
124EXPORT_SYMBOL(drm_flip_work_cleanup); 173EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 9eed34dcd6af..3fcb4c44c9e0 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -25,6 +25,7 @@
25#define DRM_FLIP_WORK_H 25#define DRM_FLIP_WORK_H
26 26
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/spinlock.h>
28#include <linux/workqueue.h> 29#include <linux/workqueue.h>
29 30
30/** 31/**
@@ -32,9 +33,9 @@
32 * 33 *
33 * Util to queue up work to run from work-queue context after flip/vblank. 34 * Util to queue up work to run from work-queue context after flip/vblank.
34 * Typically this can be used to defer unref of framebuffer's, cursor 35 * Typically this can be used to defer unref of framebuffer's, cursor
35 * bo's, etc until after vblank. The APIs are all safe (and lockless) 36 * bo's, etc until after vblank. The APIs are all thread-safe.
36 * for up to one producer and once consumer at a time. The single-consumer 37 * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
37 * aspect is ensured by committing the queued work to a single work-queue. 38 * in atomic context.
38 */ 39 */
39 40
40struct drm_flip_work; 41struct drm_flip_work;
@@ -51,22 +52,36 @@ struct drm_flip_work;
51typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); 52typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
52 53
53/** 54/**
55 * struct drm_flip_task - flip work task
56 * @node: list entry element
57 * @data: data to pass to work->func
58 */
59struct drm_flip_task {
60 struct list_head node;
61 void *data;
62};
63
64/**
54 * struct drm_flip_work - flip work queue 65 * struct drm_flip_work - flip work queue
55 * @name: debug name 66 * @name: debug name
56 * @pending: number of queued but not committed items
57 * @count: number of committed items
58 * @func: callback fxn called for each committed item 67 * @func: callback fxn called for each committed item
59 * @worker: worker which calls @func 68 * @worker: worker which calls @func
60 * @fifo: queue of committed items 69 * @queued: queued tasks
70 * @commited: commited tasks
71 * @lock: lock to access queued and commited lists
61 */ 72 */
62struct drm_flip_work { 73struct drm_flip_work {
63 const char *name; 74 const char *name;
64 atomic_t pending, count;
65 drm_flip_func_t func; 75 drm_flip_func_t func;
66 struct work_struct worker; 76 struct work_struct worker;
67 DECLARE_KFIFO_PTR(fifo, void *); 77 struct list_head queued;
78 struct list_head commited;
79 spinlock_t lock;
68}; 80};
69 81
82struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
83void drm_flip_work_queue_task(struct drm_flip_work *work,
84 struct drm_flip_task *task);
70void drm_flip_work_queue(struct drm_flip_work *work, void *val); 85void drm_flip_work_queue(struct drm_flip_work *work, void *val);
71void drm_flip_work_commit(struct drm_flip_work *work, 86void drm_flip_work_commit(struct drm_flip_work *work,
72 struct workqueue_struct *wq); 87 struct workqueue_struct *wq);