diff options
Diffstat (limited to 'drivers/media/IR/ir-raw-event.c')
-rw-r--r-- | drivers/media/IR/ir-raw-event.c | 159 |
1 files changed, 133 insertions, 26 deletions
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c index 6f192ef31db1..43094e7eccfa 100644 --- a/drivers/media/IR/ir-raw-event.c +++ b/drivers/media/IR/ir-raw-event.c | |||
@@ -12,9 +12,10 @@ | |||
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/workqueue.h> | 15 | #include <linux/kthread.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/freezer.h> | ||
18 | #include "ir-core-priv.h" | 19 | #include "ir-core-priv.h" |
19 | 20 | ||
20 | /* Define the max number of pulse/space transitions to buffer */ | 21 | /* Define the max number of pulse/space transitions to buffer */ |
@@ -24,7 +25,7 @@ | |||
24 | static LIST_HEAD(ir_raw_client_list); | 25 | static LIST_HEAD(ir_raw_client_list); |
25 | 26 | ||
26 | /* Used to handle IR raw handler extensions */ | 27 | /* Used to handle IR raw handler extensions */ |
27 | static DEFINE_SPINLOCK(ir_raw_handler_lock); | 28 | static DEFINE_MUTEX(ir_raw_handler_lock); |
28 | static LIST_HEAD(ir_raw_handler_list); | 29 | static LIST_HEAD(ir_raw_handler_list); |
29 | static u64 available_protocols; | 30 | static u64 available_protocols; |
30 | 31 | ||
@@ -33,20 +34,30 @@ static u64 available_protocols; | |||
33 | static struct work_struct wq_load; | 34 | static struct work_struct wq_load; |
34 | #endif | 35 | #endif |
35 | 36 | ||
36 | static void ir_raw_event_work(struct work_struct *work) | 37 | static int ir_raw_event_thread(void *data) |
37 | { | 38 | { |
38 | struct ir_raw_event ev; | 39 | struct ir_raw_event ev; |
39 | struct ir_raw_handler *handler; | 40 | struct ir_raw_handler *handler; |
40 | struct ir_raw_event_ctrl *raw = | 41 | struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; |
41 | container_of(work, struct ir_raw_event_ctrl, rx_work); | 42 | |
42 | 43 | while (!kthread_should_stop()) { | |
43 | while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) { | 44 | try_to_freeze(); |
44 | spin_lock(&ir_raw_handler_lock); | 45 | |
45 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 46 | mutex_lock(&ir_raw_handler_lock); |
46 | handler->decode(raw->input_dev, ev); | 47 | |
47 | spin_unlock(&ir_raw_handler_lock); | 48 | while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) { |
48 | raw->prev_ev = ev; | 49 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
50 | handler->decode(raw->input_dev, ev); | ||
51 | raw->prev_ev = ev; | ||
52 | } | ||
53 | |||
54 | mutex_unlock(&ir_raw_handler_lock); | ||
55 | |||
56 | set_current_state(TASK_INTERRUPTIBLE); | ||
57 | schedule(); | ||
49 | } | 58 | } |
59 | |||
60 | return 0; | ||
50 | } | 61 | } |
51 | 62 | ||
52 | /** | 63 | /** |
@@ -66,6 +77,9 @@ int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev) | |||
66 | if (!ir->raw) | 77 | if (!ir->raw) |
67 | return -EINVAL; | 78 | return -EINVAL; |
68 | 79 | ||
80 | IR_dprintk(2, "sample: (05%dus %s)\n", | ||
81 | TO_US(ev->duration), TO_STR(ev->pulse)); | ||
82 | |||
69 | if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) | 83 | if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) |
70 | return -ENOMEM; | 84 | return -ENOMEM; |
71 | 85 | ||
@@ -126,6 +140,90 @@ int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type typ | |||
126 | EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); | 140 | EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); |
127 | 141 | ||
128 | /** | 142 | /** |
143 | * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing | ||
144 | * @input_dev: the struct input_dev device descriptor | ||
145 | * @type: the type of the event that has occurred | ||
146 | * | ||
147 | * This routine (which may be called from an interrupt context) works | ||
148 | * in similiar manner to ir_raw_event_store_edge. | ||
149 | * This routine is intended for devices with limited internal buffer | ||
150 | * It automerges samples of same type, and handles timeouts | ||
151 | */ | ||
152 | int ir_raw_event_store_with_filter(struct input_dev *input_dev, | ||
153 | struct ir_raw_event *ev) | ||
154 | { | ||
155 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
156 | struct ir_raw_event_ctrl *raw = ir->raw; | ||
157 | |||
158 | if (!raw || !ir->props) | ||
159 | return -EINVAL; | ||
160 | |||
161 | /* Ignore spaces in idle mode */ | ||
162 | if (ir->idle && !ev->pulse) | ||
163 | return 0; | ||
164 | else if (ir->idle) | ||
165 | ir_raw_event_set_idle(input_dev, 0); | ||
166 | |||
167 | if (!raw->this_ev.duration) { | ||
168 | raw->this_ev = *ev; | ||
169 | } else if (ev->pulse == raw->this_ev.pulse) { | ||
170 | raw->this_ev.duration += ev->duration; | ||
171 | } else { | ||
172 | ir_raw_event_store(input_dev, &raw->this_ev); | ||
173 | raw->this_ev = *ev; | ||
174 | } | ||
175 | |||
176 | /* Enter idle mode if nessesary */ | ||
177 | if (!ev->pulse && ir->props->timeout && | ||
178 | raw->this_ev.duration >= ir->props->timeout) | ||
179 | ir_raw_event_set_idle(input_dev, 1); | ||
180 | return 0; | ||
181 | } | ||
182 | EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); | ||
183 | |||
184 | void ir_raw_event_set_idle(struct input_dev *input_dev, int idle) | ||
185 | { | ||
186 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
187 | struct ir_raw_event_ctrl *raw = ir->raw; | ||
188 | ktime_t now; | ||
189 | u64 delta; | ||
190 | |||
191 | if (!ir->props) | ||
192 | return; | ||
193 | |||
194 | if (!ir->raw) | ||
195 | goto out; | ||
196 | |||
197 | if (idle) { | ||
198 | IR_dprintk(2, "enter idle mode\n"); | ||
199 | raw->last_event = ktime_get(); | ||
200 | } else { | ||
201 | IR_dprintk(2, "exit idle mode\n"); | ||
202 | |||
203 | now = ktime_get(); | ||
204 | delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event)); | ||
205 | |||
206 | WARN_ON(raw->this_ev.pulse); | ||
207 | |||
208 | raw->this_ev.duration = | ||
209 | min(raw->this_ev.duration + delta, | ||
210 | (u64)IR_MAX_DURATION); | ||
211 | |||
212 | ir_raw_event_store(input_dev, &raw->this_ev); | ||
213 | |||
214 | if (raw->this_ev.duration == IR_MAX_DURATION) | ||
215 | ir_raw_event_reset(input_dev); | ||
216 | |||
217 | raw->this_ev.duration = 0; | ||
218 | } | ||
219 | out: | ||
220 | if (ir->props->s_idle) | ||
221 | ir->props->s_idle(ir->props->priv, idle); | ||
222 | ir->idle = idle; | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); | ||
225 | |||
226 | /** | ||
129 | * ir_raw_event_handle() - schedules the decoding of stored ir data | 227 | * ir_raw_event_handle() - schedules the decoding of stored ir data |
130 | * @input_dev: the struct input_dev device descriptor | 228 | * @input_dev: the struct input_dev device descriptor |
131 | * | 229 | * |
@@ -138,7 +236,7 @@ void ir_raw_event_handle(struct input_dev *input_dev) | |||
138 | if (!ir->raw) | 236 | if (!ir->raw) |
139 | return; | 237 | return; |
140 | 238 | ||
141 | schedule_work(&ir->raw->rx_work); | 239 | wake_up_process(ir->raw->thread); |
142 | } | 240 | } |
143 | EXPORT_SYMBOL_GPL(ir_raw_event_handle); | 241 | EXPORT_SYMBOL_GPL(ir_raw_event_handle); |
144 | 242 | ||
@@ -147,9 +245,9 @@ u64 | |||
147 | ir_raw_get_allowed_protocols() | 245 | ir_raw_get_allowed_protocols() |
148 | { | 246 | { |
149 | u64 protocols; | 247 | u64 protocols; |
150 | spin_lock(&ir_raw_handler_lock); | 248 | mutex_lock(&ir_raw_handler_lock); |
151 | protocols = available_protocols; | 249 | protocols = available_protocols; |
152 | spin_unlock(&ir_raw_handler_lock); | 250 | mutex_unlock(&ir_raw_handler_lock); |
153 | return protocols; | 251 | return protocols; |
154 | } | 252 | } |
155 | 253 | ||
@@ -167,7 +265,7 @@ int ir_raw_event_register(struct input_dev *input_dev) | |||
167 | return -ENOMEM; | 265 | return -ENOMEM; |
168 | 266 | ||
169 | ir->raw->input_dev = input_dev; | 267 | ir->raw->input_dev = input_dev; |
170 | INIT_WORK(&ir->raw->rx_work, ir_raw_event_work); | 268 | |
171 | ir->raw->enabled_protocols = ~0; | 269 | ir->raw->enabled_protocols = ~0; |
172 | rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE, | 270 | rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE, |
173 | GFP_KERNEL); | 271 | GFP_KERNEL); |
@@ -177,12 +275,21 @@ int ir_raw_event_register(struct input_dev *input_dev) | |||
177 | return rc; | 275 | return rc; |
178 | } | 276 | } |
179 | 277 | ||
180 | spin_lock(&ir_raw_handler_lock); | 278 | ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw, |
279 | "rc%u", (unsigned int)ir->devno); | ||
280 | |||
281 | if (IS_ERR(ir->raw->thread)) { | ||
282 | kfree(ir->raw); | ||
283 | ir->raw = NULL; | ||
284 | return PTR_ERR(ir->raw->thread); | ||
285 | } | ||
286 | |||
287 | mutex_lock(&ir_raw_handler_lock); | ||
181 | list_add_tail(&ir->raw->list, &ir_raw_client_list); | 288 | list_add_tail(&ir->raw->list, &ir_raw_client_list); |
182 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 289 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
183 | if (handler->raw_register) | 290 | if (handler->raw_register) |
184 | handler->raw_register(ir->raw->input_dev); | 291 | handler->raw_register(ir->raw->input_dev); |
185 | spin_unlock(&ir_raw_handler_lock); | 292 | mutex_unlock(&ir_raw_handler_lock); |
186 | 293 | ||
187 | return 0; | 294 | return 0; |
188 | } | 295 | } |
@@ -195,14 +302,14 @@ void ir_raw_event_unregister(struct input_dev *input_dev) | |||
195 | if (!ir->raw) | 302 | if (!ir->raw) |
196 | return; | 303 | return; |
197 | 304 | ||
198 | cancel_work_sync(&ir->raw->rx_work); | 305 | kthread_stop(ir->raw->thread); |
199 | 306 | ||
200 | spin_lock(&ir_raw_handler_lock); | 307 | mutex_lock(&ir_raw_handler_lock); |
201 | list_del(&ir->raw->list); | 308 | list_del(&ir->raw->list); |
202 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 309 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
203 | if (handler->raw_unregister) | 310 | if (handler->raw_unregister) |
204 | handler->raw_unregister(ir->raw->input_dev); | 311 | handler->raw_unregister(ir->raw->input_dev); |
205 | spin_unlock(&ir_raw_handler_lock); | 312 | mutex_unlock(&ir_raw_handler_lock); |
206 | 313 | ||
207 | kfifo_free(&ir->raw->kfifo); | 314 | kfifo_free(&ir->raw->kfifo); |
208 | kfree(ir->raw); | 315 | kfree(ir->raw); |
@@ -217,13 +324,13 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) | |||
217 | { | 324 | { |
218 | struct ir_raw_event_ctrl *raw; | 325 | struct ir_raw_event_ctrl *raw; |
219 | 326 | ||
220 | spin_lock(&ir_raw_handler_lock); | 327 | mutex_lock(&ir_raw_handler_lock); |
221 | list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); | 328 | list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); |
222 | if (ir_raw_handler->raw_register) | 329 | if (ir_raw_handler->raw_register) |
223 | list_for_each_entry(raw, &ir_raw_client_list, list) | 330 | list_for_each_entry(raw, &ir_raw_client_list, list) |
224 | ir_raw_handler->raw_register(raw->input_dev); | 331 | ir_raw_handler->raw_register(raw->input_dev); |
225 | available_protocols |= ir_raw_handler->protocols; | 332 | available_protocols |= ir_raw_handler->protocols; |
226 | spin_unlock(&ir_raw_handler_lock); | 333 | mutex_unlock(&ir_raw_handler_lock); |
227 | 334 | ||
228 | return 0; | 335 | return 0; |
229 | } | 336 | } |
@@ -233,13 +340,13 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) | |||
233 | { | 340 | { |
234 | struct ir_raw_event_ctrl *raw; | 341 | struct ir_raw_event_ctrl *raw; |
235 | 342 | ||
236 | spin_lock(&ir_raw_handler_lock); | 343 | mutex_lock(&ir_raw_handler_lock); |
237 | list_del(&ir_raw_handler->list); | 344 | list_del(&ir_raw_handler->list); |
238 | if (ir_raw_handler->raw_unregister) | 345 | if (ir_raw_handler->raw_unregister) |
239 | list_for_each_entry(raw, &ir_raw_client_list, list) | 346 | list_for_each_entry(raw, &ir_raw_client_list, list) |
240 | ir_raw_handler->raw_unregister(raw->input_dev); | 347 | ir_raw_handler->raw_unregister(raw->input_dev); |
241 | available_protocols &= ~ir_raw_handler->protocols; | 348 | available_protocols &= ~ir_raw_handler->protocols; |
242 | spin_unlock(&ir_raw_handler_lock); | 349 | mutex_unlock(&ir_raw_handler_lock); |
243 | } | 350 | } |
244 | EXPORT_SYMBOL(ir_raw_handler_unregister); | 351 | EXPORT_SYMBOL(ir_raw_handler_unregister); |
245 | 352 | ||