aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media
diff options
context:
space:
mode:
authorMaxim Levitsky <maximlevitsky@gmail.com>2010-07-31 10:59:17 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2010-08-08 22:42:59 -0400
commit0d2cb1de8e81ffc06df67853be5ead3556d3a6b5 (patch)
treeb2fbf6a996da521592fe654c8bd1e9272992b260 /drivers/media
parent45a568fa6f6bf8e5b9c32e52292f297e8473a985 (diff)
V4L/DVB: IR: replace workqueue with kthread
It is perfectly possible to have ir_raw_event_work running concurently on two cpus, thus we must protect it from that situation. This stems from the fact that if hardware sends short packets of samples we might end up queueing the work item more times that nessesary. Such job isn't well suited for a workqueue, so use a kernel thread. Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media')
-rw-r--r--drivers/media/IR/ir-core-priv.h2
-rw-r--r--drivers/media/IR/ir-raw-event.c42
2 files changed, 32 insertions, 12 deletions
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
index dc26e2beeef..84c7a9a5cad 100644
--- a/drivers/media/IR/ir-core-priv.h
+++ b/drivers/media/IR/ir-core-priv.h
@@ -32,7 +32,7 @@ struct ir_raw_handler {
32 32
33struct ir_raw_event_ctrl { 33struct ir_raw_event_ctrl {
34 struct list_head list; /* to keep track of raw clients */ 34 struct list_head list; /* to keep track of raw clients */
35 struct work_struct rx_work; /* for the rx decoding workqueue */ 35 struct task_struct *thread;
36 struct kfifo kfifo; /* fifo for the pulse/space durations */ 36 struct kfifo kfifo; /* fifo for the pulse/space durations */
37 ktime_t last_event; /* when last event occurred */ 37 ktime_t last_event; /* when last event occurred */
38 enum raw_event_type last_type; /* last event type */ 38 enum raw_event_type last_type; /* last event type */
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 9d5c029cfc0..d0c18db4c0d 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -12,9 +12,10 @@
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 */ 13 */
14 14
15#include <linux/workqueue.h> 15#include <linux/kthread.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/freezer.h>
18#include "ir-core-priv.h" 19#include "ir-core-priv.h"
19 20
20/* Define the max number of pulse/space transitions to buffer */ 21/* Define the max number of pulse/space transitions to buffer */
@@ -33,20 +34,30 @@ static u64 available_protocols;
33static struct work_struct wq_load; 34static struct work_struct wq_load;
34#endif 35#endif
35 36
36static void ir_raw_event_work(struct work_struct *work) 37static int ir_raw_event_thread(void *data)
37{ 38{
38 struct ir_raw_event ev; 39 struct ir_raw_event ev;
39 struct ir_raw_handler *handler; 40 struct ir_raw_handler *handler;
40 struct ir_raw_event_ctrl *raw = 41 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
41 container_of(work, struct ir_raw_event_ctrl, rx_work); 42
43 while (!kthread_should_stop()) {
44 try_to_freeze();
42 45
43 while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
44 mutex_lock(&ir_raw_handler_lock); 46 mutex_lock(&ir_raw_handler_lock);
45 list_for_each_entry(handler, &ir_raw_handler_list, list) 47
46 handler->decode(raw->input_dev, ev); 48 while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
49 list_for_each_entry(handler, &ir_raw_handler_list, list)
50 handler->decode(raw->input_dev, ev);
51 raw->prev_ev = ev;
52 }
53
47 mutex_unlock(&ir_raw_handler_lock); 54 mutex_unlock(&ir_raw_handler_lock);
48 raw->prev_ev = ev; 55
56 set_current_state(TASK_INTERRUPTIBLE);
57 schedule();
49 } 58 }
59
60 return 0;
50} 61}
51 62
52/** 63/**
@@ -141,7 +152,7 @@ void ir_raw_event_handle(struct input_dev *input_dev)
141 if (!ir->raw) 152 if (!ir->raw)
142 return; 153 return;
143 154
144 schedule_work(&ir->raw->rx_work); 155 wake_up_process(ir->raw->thread);
145} 156}
146EXPORT_SYMBOL_GPL(ir_raw_event_handle); 157EXPORT_SYMBOL_GPL(ir_raw_event_handle);
147 158
@@ -170,7 +181,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
170 return -ENOMEM; 181 return -ENOMEM;
171 182
172 ir->raw->input_dev = input_dev; 183 ir->raw->input_dev = input_dev;
173 INIT_WORK(&ir->raw->rx_work, ir_raw_event_work); 184
174 ir->raw->enabled_protocols = ~0; 185 ir->raw->enabled_protocols = ~0;
175 rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE, 186 rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
176 GFP_KERNEL); 187 GFP_KERNEL);
@@ -180,6 +191,15 @@ int ir_raw_event_register(struct input_dev *input_dev)
180 return rc; 191 return rc;
181 } 192 }
182 193
194 ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
195 "rc%u", (unsigned int)ir->devno);
196
197 if (IS_ERR(ir->raw->thread)) {
198 kfree(ir->raw);
199 ir->raw = NULL;
200 return PTR_ERR(ir->raw->thread);
201 }
202
183 mutex_lock(&ir_raw_handler_lock); 203 mutex_lock(&ir_raw_handler_lock);
184 list_add_tail(&ir->raw->list, &ir_raw_client_list); 204 list_add_tail(&ir->raw->list, &ir_raw_client_list);
185 list_for_each_entry(handler, &ir_raw_handler_list, list) 205 list_for_each_entry(handler, &ir_raw_handler_list, list)
@@ -198,7 +218,7 @@ void ir_raw_event_unregister(struct input_dev *input_dev)
198 if (!ir->raw) 218 if (!ir->raw)
199 return; 219 return;
200 220
201 cancel_work_sync(&ir->raw->rx_work); 221 kthread_stop(ir->raw->thread);
202 222
203 mutex_lock(&ir_raw_handler_lock); 223 mutex_lock(&ir_raw_handler_lock);
204 list_del(&ir->raw->list); 224 list_del(&ir->raw->list);