aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_flip_work.c
diff options
context:
space:
mode:
authorBoris BREZILLON <boris.brezillon@free-electrons.com>2014-11-14 13:30:29 -0500
committerDave Airlie <airlied@redhat.com>2014-11-14 18:25:35 -0500
commit8bd4ae202813ac04f35dacf43263e1cf96743292 (patch)
tree3271272039fa98b8482120dd559f197b00cdf774 /drivers/gpu/drm/drm_flip_work.c
parent7fd36c0bae07d8c7fa9668ea6ba28dbcb4f9955b (diff)
drm: rework flip-work helpers to avoid calling func when the FIFO is full
Make use of lists instead of kfifo in order to dynamically allocate task entry when someone require some delayed work, and thus preventing drm_flip_work_queue from directly calling func instead of queuing this call. This allow drm_flip_work_queue to be safely called even within irq handlers. Add new helper functions to allocate a flip work task and queue it when needed. This prevents allocating data within irq context (which might impact the time spent in the irq handler). Signed-off-by: Boris BREZILLON <boris.brezillon@free-electrons.com> Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_flip_work.c')
-rw-r--r--drivers/gpu/drm/drm_flip_work.c97
1 files changed, 73 insertions, 24 deletions
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index f9c7fa3d0012..6f4ae5b655d3 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -25,6 +25,44 @@
25#include "drm_flip_work.h" 25#include "drm_flip_work.h"
26 26
27/** 27/**
28 * drm_flip_work_allocate_task - allocate a flip-work task
29 * @data: data associated to the task
30 * @flags: allocator flags
31 *
32 * Allocate a drm_flip_task object and attach private data to it.
33 */
34struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
35{
36 struct drm_flip_task *task;
37
38 task = kzalloc(sizeof(*task), flags);
39 if (task)
40 task->data = data;
41
42 return task;
43}
44EXPORT_SYMBOL(drm_flip_work_allocate_task);
45
46/**
47 * drm_flip_work_queue_task - queue a specific task
48 * @work: the flip-work
49 * @task: the task to handle
50 *
51 * Queues task, that will later be run (passed back to drm_flip_func_t
52 * func) on a work queue after drm_flip_work_commit() is called.
53 */
54void drm_flip_work_queue_task(struct drm_flip_work *work,
55 struct drm_flip_task *task)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&work->lock, flags);
60 list_add_tail(&task->node, &work->queued);
61 spin_unlock_irqrestore(&work->lock, flags);
62}
63EXPORT_SYMBOL(drm_flip_work_queue_task);
64
65/**
28 * drm_flip_work_queue - queue work 66 * drm_flip_work_queue - queue work
29 * @work: the flip-work 67 * @work: the flip-work
30 * @val: the value to queue 68 * @val: the value to queue
@@ -34,10 +72,14 @@
34 */ 72 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val) 73void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{ 74{
37 if (kfifo_put(&work->fifo, val)) { 75 struct drm_flip_task *task;
38 atomic_inc(&work->pending); 76
77 task = drm_flip_work_allocate_task(val,
78 drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
79 if (task) {
80 drm_flip_work_queue_task(work, task);
39 } else { 81 } else {
40 DRM_ERROR("%s fifo full!\n", work->name); 82 DRM_ERROR("%s could not allocate task!\n", work->name);
41 work->func(work, val); 83 work->func(work, val);
42 } 84 }
43} 85}
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
56void drm_flip_work_commit(struct drm_flip_work *work, 98void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq) 99 struct workqueue_struct *wq)
58{ 100{
59 uint32_t pending = atomic_read(&work->pending); 101 unsigned long flags;
60 atomic_add(pending, &work->count); 102
61 atomic_sub(pending, &work->pending); 103 spin_lock_irqsave(&work->lock, flags);
104 list_splice_tail(&work->queued, &work->commited);
105 INIT_LIST_HEAD(&work->queued);
106 spin_unlock_irqrestore(&work->lock, flags);
62 queue_work(wq, &work->worker); 107 queue_work(wq, &work->worker);
63} 108}
64EXPORT_SYMBOL(drm_flip_work_commit); 109EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,14 +111,26 @@ EXPORT_SYMBOL(drm_flip_work_commit);
66static void flip_worker(struct work_struct *w) 111static void flip_worker(struct work_struct *w)
67{ 112{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); 113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count); 114 struct list_head tasks;
70 void *val = NULL; 115 unsigned long flags;
71 116
72 atomic_sub(count, &work->count); 117 while (1) {
118 struct drm_flip_task *task, *tmp;
73 119
74 while(count--) 120 INIT_LIST_HEAD(&tasks);
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val))) 121 spin_lock_irqsave(&work->lock, flags);
76 work->func(work, val); 122 list_splice_tail(&work->commited, &tasks);
123 INIT_LIST_HEAD(&work->commited);
124 spin_unlock_irqrestore(&work->lock, flags);
125
126 if (list_empty(&tasks))
127 break;
128
129 list_for_each_entry_safe(task, tmp, &tasks, node) {
130 work->func(work, task->data);
131 kfree(task);
132 }
133 }
77} 134}
78 135
79/** 136/**
@@ -91,19 +148,12 @@ static void flip_worker(struct work_struct *w)
91int drm_flip_work_init(struct drm_flip_work *work, int size, 148int drm_flip_work_init(struct drm_flip_work *work, int size,
92 const char *name, drm_flip_func_t func) 149 const char *name, drm_flip_func_t func)
93{ 150{
94 int ret;
95
96 work->name = name; 151 work->name = name;
97 atomic_set(&work->count, 0); 152 INIT_LIST_HEAD(&work->queued);
98 atomic_set(&work->pending, 0); 153 INIT_LIST_HEAD(&work->commited);
154 spin_lock_init(&work->lock);
99 work->func = func; 155 work->func = func;
100 156
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker); 157 INIT_WORK(&work->worker, flip_worker);
108 158
109 return 0; 159 return 0;
@@ -118,7 +168,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
118 */ 168 */
119void drm_flip_work_cleanup(struct drm_flip_work *work) 169void drm_flip_work_cleanup(struct drm_flip_work *work)
120{ 170{
121 WARN_ON(!kfifo_is_empty(&work->fifo)); 171 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
122 kfifo_free(&work->fifo);
123} 172}
124EXPORT_SYMBOL(drm_flip_work_cleanup); 173EXPORT_SYMBOL(drm_flip_work_cleanup);