diff options
Diffstat (limited to 'drivers/media/rc/rc-raw.c')
-rw-r--r-- | drivers/media/rc/rc-raw.c | 191 |
1 files changed, 90 insertions, 101 deletions
diff --git a/drivers/media/rc/rc-raw.c b/drivers/media/rc/rc-raw.c index d6c556e3f0d8..ab9b1e4071c0 100644 --- a/drivers/media/rc/rc-raw.c +++ b/drivers/media/rc/rc-raw.c | |||
@@ -64,7 +64,7 @@ static int ir_raw_event_thread(void *data) | |||
64 | 64 | ||
65 | mutex_lock(&ir_raw_handler_lock); | 65 | mutex_lock(&ir_raw_handler_lock); |
66 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 66 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
67 | handler->decode(raw->input_dev, ev); | 67 | handler->decode(raw->dev, ev); |
68 | raw->prev_ev = ev; | 68 | raw->prev_ev = ev; |
69 | mutex_unlock(&ir_raw_handler_lock); | 69 | mutex_unlock(&ir_raw_handler_lock); |
70 | } | 70 | } |
@@ -74,7 +74,7 @@ static int ir_raw_event_thread(void *data) | |||
74 | 74 | ||
75 | /** | 75 | /** |
76 | * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders | 76 | * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders |
77 | * @input_dev: the struct input_dev device descriptor | 77 | * @dev: the struct rc_dev device descriptor |
78 | * @ev: the struct ir_raw_event descriptor of the pulse/space | 78 | * @ev: the struct ir_raw_event descriptor of the pulse/space |
79 | * | 79 | * |
80 | * This routine (which may be called from an interrupt context) stores a | 80 | * This routine (which may be called from an interrupt context) stores a |
@@ -82,17 +82,15 @@ static int ir_raw_event_thread(void *data) | |||
82 | * signalled as positive values and spaces as negative values. A zero value | 82 | * signalled as positive values and spaces as negative values. A zero value |
83 | * will reset the decoding state machines. | 83 | * will reset the decoding state machines. |
84 | */ | 84 | */ |
85 | int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev) | 85 | int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev) |
86 | { | 86 | { |
87 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | 87 | if (!dev->raw) |
88 | |||
89 | if (!ir->raw) | ||
90 | return -EINVAL; | 88 | return -EINVAL; |
91 | 89 | ||
92 | IR_dprintk(2, "sample: (%05dus %s)\n", | 90 | IR_dprintk(2, "sample: (%05dus %s)\n", |
93 | TO_US(ev->duration), TO_STR(ev->pulse)); | 91 | TO_US(ev->duration), TO_STR(ev->pulse)); |
94 | 92 | ||
95 | if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) | 93 | if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev)) |
96 | return -ENOMEM; | 94 | return -ENOMEM; |
97 | 95 | ||
98 | return 0; | 96 | return 0; |
@@ -101,7 +99,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store); | |||
101 | 99 | ||
102 | /** | 100 | /** |
103 | * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space | 101 | * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space |
104 | * @input_dev: the struct input_dev device descriptor | 102 | * @dev: the struct rc_dev device descriptor |
105 | * @type: the type of the event that has occurred | 103 | * @type: the type of the event that has occurred |
106 | * | 104 | * |
107 | * This routine (which may be called from an interrupt context) is used to | 105 | * This routine (which may be called from an interrupt context) is used to |
@@ -110,50 +108,49 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store); | |||
110 | * hardware which does not provide durations directly but only interrupts | 108 | * hardware which does not provide durations directly but only interrupts |
111 | * (or similar events) on state change. | 109 | * (or similar events) on state change. |
112 | */ | 110 | */ |
113 | int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type) | 111 | int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) |
114 | { | 112 | { |
115 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
116 | ktime_t now; | 113 | ktime_t now; |
117 | s64 delta; /* ns */ | 114 | s64 delta; /* ns */ |
118 | struct ir_raw_event ev; | 115 | struct ir_raw_event ev; |
119 | int rc = 0; | 116 | int rc = 0; |
120 | 117 | ||
121 | if (!ir->raw) | 118 | if (!dev->raw) |
122 | return -EINVAL; | 119 | return -EINVAL; |
123 | 120 | ||
124 | now = ktime_get(); | 121 | now = ktime_get(); |
125 | delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event)); | 122 | delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); |
126 | 123 | ||
127 | /* Check for a long duration since last event or if we're | 124 | /* Check for a long duration since last event or if we're |
128 | * being called for the first time, note that delta can't | 125 | * being called for the first time, note that delta can't |
129 | * possibly be negative. | 126 | * possibly be negative. |
130 | */ | 127 | */ |
131 | ev.duration = 0; | 128 | ev.duration = 0; |
132 | if (delta > IR_MAX_DURATION || !ir->raw->last_type) | 129 | if (delta > IR_MAX_DURATION || !dev->raw->last_type) |
133 | type |= IR_START_EVENT; | 130 | type |= IR_START_EVENT; |
134 | else | 131 | else |
135 | ev.duration = delta; | 132 | ev.duration = delta; |
136 | 133 | ||
137 | if (type & IR_START_EVENT) | 134 | if (type & IR_START_EVENT) |
138 | ir_raw_event_reset(input_dev); | 135 | ir_raw_event_reset(dev); |
139 | else if (ir->raw->last_type & IR_SPACE) { | 136 | else if (dev->raw->last_type & IR_SPACE) { |
140 | ev.pulse = false; | 137 | ev.pulse = false; |
141 | rc = ir_raw_event_store(input_dev, &ev); | 138 | rc = ir_raw_event_store(dev, &ev); |
142 | } else if (ir->raw->last_type & IR_PULSE) { | 139 | } else if (dev->raw->last_type & IR_PULSE) { |
143 | ev.pulse = true; | 140 | ev.pulse = true; |
144 | rc = ir_raw_event_store(input_dev, &ev); | 141 | rc = ir_raw_event_store(dev, &ev); |
145 | } else | 142 | } else |
146 | return 0; | 143 | return 0; |
147 | 144 | ||
148 | ir->raw->last_event = now; | 145 | dev->raw->last_event = now; |
149 | ir->raw->last_type = type; | 146 | dev->raw->last_type = type; |
150 | return rc; | 147 | return rc; |
151 | } | 148 | } |
152 | EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); | 149 | EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); |
153 | 150 | ||
154 | /** | 151 | /** |
155 | * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing | 152 | * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing |
156 | * @input_dev: the struct input_dev device descriptor | 153 | * @dev: the struct rc_dev device descriptor |
157 | * @type: the type of the event that has occurred | 154 | * @type: the type of the event that has occurred |
158 | * | 155 | * |
159 | * This routine (which may be called from an interrupt context) works | 156 | * This routine (which may be called from an interrupt context) works |
@@ -161,84 +158,76 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); | |||
161 | * This routine is intended for devices with limited internal buffer | 158 | * This routine is intended for devices with limited internal buffer |
162 | * It automerges samples of same type, and handles timeouts | 159 | * It automerges samples of same type, and handles timeouts |
163 | */ | 160 | */ |
164 | int ir_raw_event_store_with_filter(struct input_dev *input_dev, | 161 | int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev) |
165 | struct ir_raw_event *ev) | ||
166 | { | 162 | { |
167 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | 163 | if (!dev->raw) |
168 | struct ir_raw_event_ctrl *raw = ir->raw; | ||
169 | |||
170 | if (!raw || !ir->props) | ||
171 | return -EINVAL; | 164 | return -EINVAL; |
172 | 165 | ||
173 | /* Ignore spaces in idle mode */ | 166 | /* Ignore spaces in idle mode */ |
174 | if (ir->idle && !ev->pulse) | 167 | if (dev->idle && !ev->pulse) |
175 | return 0; | 168 | return 0; |
176 | else if (ir->idle) | 169 | else if (dev->idle) |
177 | ir_raw_event_set_idle(input_dev, false); | 170 | ir_raw_event_set_idle(dev, false); |
178 | 171 | ||
179 | if (!raw->this_ev.duration) { | 172 | if (!dev->raw->this_ev.duration) |
180 | raw->this_ev = *ev; | 173 | dev->raw->this_ev = *ev; |
181 | } else if (ev->pulse == raw->this_ev.pulse) { | 174 | else if (ev->pulse == dev->raw->this_ev.pulse) |
182 | raw->this_ev.duration += ev->duration; | 175 | dev->raw->this_ev.duration += ev->duration; |
183 | } else { | 176 | else { |
184 | ir_raw_event_store(input_dev, &raw->this_ev); | 177 | ir_raw_event_store(dev, &dev->raw->this_ev); |
185 | raw->this_ev = *ev; | 178 | dev->raw->this_ev = *ev; |
186 | } | 179 | } |
187 | 180 | ||
188 | /* Enter idle mode if nessesary */ | 181 | /* Enter idle mode if nessesary */ |
189 | if (!ev->pulse && ir->props->timeout && | 182 | if (!ev->pulse && dev->timeout && |
190 | raw->this_ev.duration >= ir->props->timeout) { | 183 | dev->raw->this_ev.duration >= dev->timeout) |
191 | ir_raw_event_set_idle(input_dev, true); | 184 | ir_raw_event_set_idle(dev, true); |
192 | } | 185 | |
193 | return 0; | 186 | return 0; |
194 | } | 187 | } |
195 | EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); | 188 | EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); |
196 | 189 | ||
197 | /** | 190 | /** |
198 | * ir_raw_event_set_idle() - hint the ir core if device is receiving | 191 | * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not |
199 | * IR data or not | 192 | * @dev: the struct rc_dev device descriptor |
200 | * @input_dev: the struct input_dev device descriptor | 193 | * @idle: whether the device is idle or not |
201 | * @idle: the hint value | ||
202 | */ | 194 | */ |
203 | void ir_raw_event_set_idle(struct input_dev *input_dev, bool idle) | 195 | void ir_raw_event_set_idle(struct rc_dev *dev, bool idle) |
204 | { | 196 | { |
205 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | 197 | if (!dev->raw) |
206 | struct ir_raw_event_ctrl *raw = ir->raw; | ||
207 | |||
208 | if (!ir->props || !ir->raw) | ||
209 | return; | 198 | return; |
210 | 199 | ||
211 | IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave"); | 200 | IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave"); |
212 | 201 | ||
213 | if (idle) { | 202 | if (idle) { |
214 | raw->this_ev.timeout = true; | 203 | dev->raw->this_ev.timeout = true; |
215 | ir_raw_event_store(input_dev, &raw->this_ev); | 204 | ir_raw_event_store(dev, &dev->raw->this_ev); |
216 | init_ir_raw_event(&raw->this_ev); | 205 | init_ir_raw_event(&dev->raw->this_ev); |
217 | } | 206 | } |
218 | 207 | ||
219 | if (ir->props->s_idle) | 208 | if (dev->s_idle) |
220 | ir->props->s_idle(ir->props->priv, idle); | 209 | dev->s_idle(dev, idle); |
221 | ir->idle = idle; | 210 | |
211 | dev->idle = idle; | ||
222 | } | 212 | } |
223 | EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); | 213 | EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); |
224 | 214 | ||
225 | /** | 215 | /** |
226 | * ir_raw_event_handle() - schedules the decoding of stored ir data | 216 | * ir_raw_event_handle() - schedules the decoding of stored ir data |
227 | * @input_dev: the struct input_dev device descriptor | 217 | * @dev: the struct rc_dev device descriptor |
228 | * | 218 | * |
229 | * This routine will signal the workqueue to start decoding stored ir data. | 219 | * This routine will tell rc-core to start decoding stored ir data. |
230 | */ | 220 | */ |
231 | void ir_raw_event_handle(struct input_dev *input_dev) | 221 | void ir_raw_event_handle(struct rc_dev *dev) |
232 | { | 222 | { |
233 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
234 | unsigned long flags; | 223 | unsigned long flags; |
235 | 224 | ||
236 | if (!ir->raw) | 225 | if (!dev->raw) |
237 | return; | 226 | return; |
238 | 227 | ||
239 | spin_lock_irqsave(&ir->raw->lock, flags); | 228 | spin_lock_irqsave(&dev->raw->lock, flags); |
240 | wake_up_process(ir->raw->thread); | 229 | wake_up_process(dev->raw->thread); |
241 | spin_unlock_irqrestore(&ir->raw->lock, flags); | 230 | spin_unlock_irqrestore(&dev->raw->lock, flags); |
242 | } | 231 | } |
243 | EXPORT_SYMBOL_GPL(ir_raw_event_handle); | 232 | EXPORT_SYMBOL_GPL(ir_raw_event_handle); |
244 | 233 | ||
@@ -256,69 +245,69 @@ ir_raw_get_allowed_protocols() | |||
256 | /* | 245 | /* |
257 | * Used to (un)register raw event clients | 246 | * Used to (un)register raw event clients |
258 | */ | 247 | */ |
259 | int ir_raw_event_register(struct input_dev *input_dev) | 248 | int ir_raw_event_register(struct rc_dev *dev) |
260 | { | 249 | { |
261 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
262 | int rc; | 250 | int rc; |
263 | struct ir_raw_handler *handler; | 251 | struct ir_raw_handler *handler; |
264 | 252 | ||
265 | ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL); | 253 | if (!dev) |
266 | if (!ir->raw) | 254 | return -EINVAL; |
267 | return -ENOMEM; | ||
268 | 255 | ||
269 | ir->raw->input_dev = input_dev; | 256 | dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL); |
257 | if (!dev->raw) | ||
258 | return -ENOMEM; | ||
270 | 259 | ||
271 | ir->raw->enabled_protocols = ~0; | 260 | dev->raw->dev = dev; |
272 | rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE, | 261 | dev->raw->enabled_protocols = ~0; |
262 | rc = kfifo_alloc(&dev->raw->kfifo, | ||
263 | sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, | ||
273 | GFP_KERNEL); | 264 | GFP_KERNEL); |
274 | if (rc < 0) { | 265 | if (rc < 0) |
275 | kfree(ir->raw); | 266 | goto out; |
276 | ir->raw = NULL; | ||
277 | return rc; | ||
278 | } | ||
279 | 267 | ||
280 | spin_lock_init(&ir->raw->lock); | 268 | spin_lock_init(&dev->raw->lock); |
281 | ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw, | 269 | dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, |
282 | "rc%u", (unsigned int)ir->devno); | 270 | "rc%ld", dev->devno); |
283 | 271 | ||
284 | if (IS_ERR(ir->raw->thread)) { | 272 | if (IS_ERR(dev->raw->thread)) { |
285 | int ret = PTR_ERR(ir->raw->thread); | 273 | rc = PTR_ERR(dev->raw->thread); |
286 | 274 | goto out; | |
287 | kfree(ir->raw); | ||
288 | ir->raw = NULL; | ||
289 | return ret; | ||
290 | } | 275 | } |
291 | 276 | ||
292 | mutex_lock(&ir_raw_handler_lock); | 277 | mutex_lock(&ir_raw_handler_lock); |
293 | list_add_tail(&ir->raw->list, &ir_raw_client_list); | 278 | list_add_tail(&dev->raw->list, &ir_raw_client_list); |
294 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 279 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
295 | if (handler->raw_register) | 280 | if (handler->raw_register) |
296 | handler->raw_register(ir->raw->input_dev); | 281 | handler->raw_register(dev); |
297 | mutex_unlock(&ir_raw_handler_lock); | 282 | mutex_unlock(&ir_raw_handler_lock); |
298 | 283 | ||
299 | return 0; | 284 | return 0; |
285 | |||
286 | out: | ||
287 | kfree(dev->raw); | ||
288 | dev->raw = NULL; | ||
289 | return rc; | ||
300 | } | 290 | } |
301 | 291 | ||
302 | void ir_raw_event_unregister(struct input_dev *input_dev) | 292 | void ir_raw_event_unregister(struct rc_dev *dev) |
303 | { | 293 | { |
304 | struct ir_input_dev *ir = input_get_drvdata(input_dev); | ||
305 | struct ir_raw_handler *handler; | 294 | struct ir_raw_handler *handler; |
306 | 295 | ||
307 | if (!ir->raw) | 296 | if (!dev || !dev->raw) |
308 | return; | 297 | return; |
309 | 298 | ||
310 | kthread_stop(ir->raw->thread); | 299 | kthread_stop(dev->raw->thread); |
311 | 300 | ||
312 | mutex_lock(&ir_raw_handler_lock); | 301 | mutex_lock(&ir_raw_handler_lock); |
313 | list_del(&ir->raw->list); | 302 | list_del(&dev->raw->list); |
314 | list_for_each_entry(handler, &ir_raw_handler_list, list) | 303 | list_for_each_entry(handler, &ir_raw_handler_list, list) |
315 | if (handler->raw_unregister) | 304 | if (handler->raw_unregister) |
316 | handler->raw_unregister(ir->raw->input_dev); | 305 | handler->raw_unregister(dev); |
317 | mutex_unlock(&ir_raw_handler_lock); | 306 | mutex_unlock(&ir_raw_handler_lock); |
318 | 307 | ||
319 | kfifo_free(&ir->raw->kfifo); | 308 | kfifo_free(&dev->raw->kfifo); |
320 | kfree(ir->raw); | 309 | kfree(dev->raw); |
321 | ir->raw = NULL; | 310 | dev->raw = NULL; |
322 | } | 311 | } |
323 | 312 | ||
324 | /* | 313 | /* |
@@ -333,7 +322,7 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) | |||
333 | list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); | 322 | list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); |
334 | if (ir_raw_handler->raw_register) | 323 | if (ir_raw_handler->raw_register) |
335 | list_for_each_entry(raw, &ir_raw_client_list, list) | 324 | list_for_each_entry(raw, &ir_raw_client_list, list) |
336 | ir_raw_handler->raw_register(raw->input_dev); | 325 | ir_raw_handler->raw_register(raw->dev); |
337 | available_protocols |= ir_raw_handler->protocols; | 326 | available_protocols |= ir_raw_handler->protocols; |
338 | mutex_unlock(&ir_raw_handler_lock); | 327 | mutex_unlock(&ir_raw_handler_lock); |
339 | 328 | ||
@@ -349,7 +338,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) | |||
349 | list_del(&ir_raw_handler->list); | 338 | list_del(&ir_raw_handler->list); |
350 | if (ir_raw_handler->raw_unregister) | 339 | if (ir_raw_handler->raw_unregister) |
351 | list_for_each_entry(raw, &ir_raw_client_list, list) | 340 | list_for_each_entry(raw, &ir_raw_client_list, list) |
352 | ir_raw_handler->raw_unregister(raw->input_dev); | 341 | ir_raw_handler->raw_unregister(raw->dev); |
353 | available_protocols &= ~ir_raw_handler->protocols; | 342 | available_protocols &= ~ir_raw_handler->protocols; |
354 | mutex_unlock(&ir_raw_handler_lock); | 343 | mutex_unlock(&ir_raw_handler_lock); |
355 | } | 344 | } |