diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/irda/sir_kthread.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/net/irda/sir_kthread.c')
-rw-r--r-- | drivers/net/irda/sir_kthread.c | 502 |
1 files changed, 502 insertions, 0 deletions
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c new file mode 100644 index 000000000000..18cea1099530 --- /dev/null +++ b/drivers/net/irda/sir_kthread.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /********************************************************************* | ||
2 | * | ||
3 | * sir_kthread.c: dedicated thread to process scheduled | ||
4 | * sir device setup requests | ||
5 | * | ||
6 | * Copyright (c) 2002 Martin Diehl | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | ********************************************************************/ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/version.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/completion.h> | ||
21 | #include <linux/delay.h> | ||
22 | |||
23 | #include <net/irda/irda.h> | ||
24 | |||
25 | #include "sir-dev.h" | ||
26 | |||
27 | /************************************************************************** | ||
28 | * | ||
29 | * kIrDAd kernel thread and config state machine | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | struct irda_request_queue { | ||
34 | struct list_head request_list; | ||
35 | spinlock_t lock; | ||
36 | task_t *thread; | ||
37 | struct completion exit; | ||
38 | wait_queue_head_t kick, done; | ||
39 | atomic_t num_pending; | ||
40 | }; | ||
41 | |||
42 | static struct irda_request_queue irda_rq_queue; | ||
43 | |||
44 | static int irda_queue_request(struct irda_request *rq) | ||
45 | { | ||
46 | int ret = 0; | ||
47 | unsigned long flags; | ||
48 | |||
49 | if (!test_and_set_bit(0, &rq->pending)) { | ||
50 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
51 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
52 | wake_up(&irda_rq_queue.kick); | ||
53 | atomic_inc(&irda_rq_queue.num_pending); | ||
54 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
55 | ret = 1; | ||
56 | } | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static void irda_request_timer(unsigned long data) | ||
61 | { | ||
62 | struct irda_request *rq = (struct irda_request *)data; | ||
63 | unsigned long flags; | ||
64 | |||
65 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
66 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
67 | wake_up(&irda_rq_queue.kick); | ||
68 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
69 | } | ||
70 | |||
71 | static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay) | ||
72 | { | ||
73 | int ret = 0; | ||
74 | struct timer_list *timer = &rq->timer; | ||
75 | |||
76 | if (!test_and_set_bit(0, &rq->pending)) { | ||
77 | timer->expires = jiffies + delay; | ||
78 | timer->function = irda_request_timer; | ||
79 | timer->data = (unsigned long)rq; | ||
80 | atomic_inc(&irda_rq_queue.num_pending); | ||
81 | add_timer(timer); | ||
82 | ret = 1; | ||
83 | } | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static void run_irda_queue(void) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct list_head *entry, *tmp; | ||
91 | struct irda_request *rq; | ||
92 | |||
93 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
94 | list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) { | ||
95 | rq = list_entry(entry, struct irda_request, lh_request); | ||
96 | list_del_init(entry); | ||
97 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
98 | |||
99 | clear_bit(0, &rq->pending); | ||
100 | rq->func(rq->data); | ||
101 | |||
102 | if (atomic_dec_and_test(&irda_rq_queue.num_pending)) | ||
103 | wake_up(&irda_rq_queue.done); | ||
104 | |||
105 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
106 | } | ||
107 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
108 | } | ||
109 | |||
110 | static int irda_thread(void *startup) | ||
111 | { | ||
112 | DECLARE_WAITQUEUE(wait, current); | ||
113 | |||
114 | daemonize("kIrDAd"); | ||
115 | |||
116 | irda_rq_queue.thread = current; | ||
117 | |||
118 | complete((struct completion *)startup); | ||
119 | |||
120 | while (irda_rq_queue.thread != NULL) { | ||
121 | |||
122 | /* We use TASK_INTERRUPTIBLE, rather than | ||
123 | * TASK_UNINTERRUPTIBLE. Andrew Morton made this | ||
124 | * change ; he told me that it is safe, because "signal | ||
125 | * blocking is now handled in daemonize()", he added | ||
126 | * that the problem is that "uninterruptible sleep | ||
127 | * contributes to load average", making user worry. | ||
128 | * Jean II */ | ||
129 | set_task_state(current, TASK_INTERRUPTIBLE); | ||
130 | add_wait_queue(&irda_rq_queue.kick, &wait); | ||
131 | if (list_empty(&irda_rq_queue.request_list)) | ||
132 | schedule(); | ||
133 | else | ||
134 | __set_task_state(current, TASK_RUNNING); | ||
135 | remove_wait_queue(&irda_rq_queue.kick, &wait); | ||
136 | |||
137 | /* make swsusp happy with our thread */ | ||
138 | if (current->flags & PF_FREEZE) | ||
139 | refrigerator(PF_FREEZE); | ||
140 | |||
141 | run_irda_queue(); | ||
142 | } | ||
143 | |||
144 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35) | ||
145 | reparent_to_init(); | ||
146 | #endif | ||
147 | complete_and_exit(&irda_rq_queue.exit, 0); | ||
148 | /* never reached */ | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | |||
153 | static void flush_irda_queue(void) | ||
154 | { | ||
155 | if (atomic_read(&irda_rq_queue.num_pending)) { | ||
156 | |||
157 | DECLARE_WAITQUEUE(wait, current); | ||
158 | |||
159 | if (!list_empty(&irda_rq_queue.request_list)) | ||
160 | run_irda_queue(); | ||
161 | |||
162 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
163 | add_wait_queue(&irda_rq_queue.done, &wait); | ||
164 | if (atomic_read(&irda_rq_queue.num_pending)) | ||
165 | schedule(); | ||
166 | else | ||
167 | __set_task_state(current, TASK_RUNNING); | ||
168 | remove_wait_queue(&irda_rq_queue.done, &wait); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | /* substate handler of the config-fsm to handle the cases where we want | ||
173 | * to wait for transmit completion before changing the port configuration | ||
174 | */ | ||
175 | |||
176 | static int irda_tx_complete_fsm(struct sir_dev *dev) | ||
177 | { | ||
178 | struct sir_fsm *fsm = &dev->fsm; | ||
179 | unsigned next_state, delay; | ||
180 | unsigned bytes_left; | ||
181 | |||
182 | do { | ||
183 | next_state = fsm->substate; /* default: stay in current substate */ | ||
184 | delay = 0; | ||
185 | |||
186 | switch(fsm->substate) { | ||
187 | |||
188 | case SIRDEV_STATE_WAIT_XMIT: | ||
189 | if (dev->drv->chars_in_buffer) | ||
190 | bytes_left = dev->drv->chars_in_buffer(dev); | ||
191 | else | ||
192 | bytes_left = 0; | ||
193 | if (!bytes_left) { | ||
194 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | ||
195 | break; | ||
196 | } | ||
197 | |||
198 | if (dev->speed > 115200) | ||
199 | delay = (bytes_left*8*10000) / (dev->speed/100); | ||
200 | else if (dev->speed > 0) | ||
201 | delay = (bytes_left*10*10000) / (dev->speed/100); | ||
202 | else | ||
203 | delay = 0; | ||
204 | /* expected delay (usec) until remaining bytes are sent */ | ||
205 | if (delay < 100) { | ||
206 | udelay(delay); | ||
207 | delay = 0; | ||
208 | break; | ||
209 | } | ||
210 | /* sleep some longer delay (msec) */ | ||
211 | delay = (delay+999) / 1000; | ||
212 | break; | ||
213 | |||
214 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | ||
215 | /* block until underlaying hardware buffer are empty */ | ||
216 | if (dev->drv->wait_until_sent) | ||
217 | dev->drv->wait_until_sent(dev); | ||
218 | next_state = SIRDEV_STATE_TX_DONE; | ||
219 | break; | ||
220 | |||
221 | case SIRDEV_STATE_TX_DONE: | ||
222 | return 0; | ||
223 | |||
224 | default: | ||
225 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
226 | return -EINVAL; | ||
227 | } | ||
228 | fsm->substate = next_state; | ||
229 | } while (delay == 0); | ||
230 | return delay; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Function irda_config_fsm | ||
235 | * | ||
236 | * State machine to handle the configuration of the device (and attached dongle, if any). | ||
237 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | ||
238 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | ||
239 | * long. Instead, for longer delays we start a timer to reschedule us later. | ||
240 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | ||
241 | * Both must be unlocked/restarted on completion - but only on final exit. | ||
242 | */ | ||
243 | |||
244 | static void irda_config_fsm(void *data) | ||
245 | { | ||
246 | struct sir_dev *dev = data; | ||
247 | struct sir_fsm *fsm = &dev->fsm; | ||
248 | int next_state; | ||
249 | int ret = -1; | ||
250 | unsigned delay; | ||
251 | |||
252 | IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); | ||
253 | |||
254 | do { | ||
255 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | ||
256 | __FUNCTION__, fsm->state, fsm->substate); | ||
257 | |||
258 | next_state = fsm->state; | ||
259 | delay = 0; | ||
260 | |||
261 | switch(fsm->state) { | ||
262 | |||
263 | case SIRDEV_STATE_DONGLE_OPEN: | ||
264 | if (dev->dongle_drv != NULL) { | ||
265 | ret = sirdev_put_dongle(dev); | ||
266 | if (ret) { | ||
267 | fsm->result = -EINVAL; | ||
268 | next_state = SIRDEV_STATE_ERROR; | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | /* Initialize dongle */ | ||
274 | ret = sirdev_get_dongle(dev, fsm->param); | ||
275 | if (ret) { | ||
276 | fsm->result = ret; | ||
277 | next_state = SIRDEV_STATE_ERROR; | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | /* Dongles are powered through the modem control lines which | ||
282 | * were just set during open. Before resetting, let's wait for | ||
283 | * the power to stabilize. This is what some dongle drivers did | ||
284 | * in open before, while others didn't - should be safe anyway. | ||
285 | */ | ||
286 | |||
287 | delay = 50; | ||
288 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
289 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
290 | |||
291 | fsm->param = 9600; | ||
292 | |||
293 | break; | ||
294 | |||
295 | case SIRDEV_STATE_DONGLE_CLOSE: | ||
296 | /* shouldn't we just treat this as success=? */ | ||
297 | if (dev->dongle_drv == NULL) { | ||
298 | fsm->result = -EINVAL; | ||
299 | next_state = SIRDEV_STATE_ERROR; | ||
300 | break; | ||
301 | } | ||
302 | |||
303 | ret = sirdev_put_dongle(dev); | ||
304 | if (ret) { | ||
305 | fsm->result = ret; | ||
306 | next_state = SIRDEV_STATE_ERROR; | ||
307 | break; | ||
308 | } | ||
309 | next_state = SIRDEV_STATE_DONE; | ||
310 | break; | ||
311 | |||
312 | case SIRDEV_STATE_SET_DTR_RTS: | ||
313 | ret = sirdev_set_dtr_rts(dev, | ||
314 | (fsm->param&0x02) ? TRUE : FALSE, | ||
315 | (fsm->param&0x01) ? TRUE : FALSE); | ||
316 | next_state = SIRDEV_STATE_DONE; | ||
317 | break; | ||
318 | |||
319 | case SIRDEV_STATE_SET_SPEED: | ||
320 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | ||
321 | next_state = SIRDEV_STATE_DONGLE_CHECK; | ||
322 | break; | ||
323 | |||
324 | case SIRDEV_STATE_DONGLE_CHECK: | ||
325 | ret = irda_tx_complete_fsm(dev); | ||
326 | if (ret < 0) { | ||
327 | fsm->result = ret; | ||
328 | next_state = SIRDEV_STATE_ERROR; | ||
329 | break; | ||
330 | } | ||
331 | if ((delay=ret) != 0) | ||
332 | break; | ||
333 | |||
334 | if (dev->dongle_drv) { | ||
335 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
336 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
337 | } | ||
338 | else { | ||
339 | dev->speed = fsm->param; | ||
340 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
341 | } | ||
342 | break; | ||
343 | |||
344 | case SIRDEV_STATE_DONGLE_RESET: | ||
345 | if (dev->dongle_drv->reset) { | ||
346 | ret = dev->dongle_drv->reset(dev); | ||
347 | if (ret < 0) { | ||
348 | fsm->result = ret; | ||
349 | next_state = SIRDEV_STATE_ERROR; | ||
350 | break; | ||
351 | } | ||
352 | } | ||
353 | else | ||
354 | ret = 0; | ||
355 | if ((delay=ret) == 0) { | ||
356 | /* set serial port according to dongle default speed */ | ||
357 | if (dev->drv->set_speed) | ||
358 | dev->drv->set_speed(dev, dev->speed); | ||
359 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | ||
360 | next_state = SIRDEV_STATE_DONGLE_SPEED; | ||
361 | } | ||
362 | break; | ||
363 | |||
364 | case SIRDEV_STATE_DONGLE_SPEED: | ||
365 | if (dev->dongle_drv->reset) { | ||
366 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | ||
367 | if (ret < 0) { | ||
368 | fsm->result = ret; | ||
369 | next_state = SIRDEV_STATE_ERROR; | ||
370 | break; | ||
371 | } | ||
372 | } | ||
373 | else | ||
374 | ret = 0; | ||
375 | if ((delay=ret) == 0) | ||
376 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
377 | break; | ||
378 | |||
379 | case SIRDEV_STATE_PORT_SPEED: | ||
380 | /* Finally we are ready to change the serial port speed */ | ||
381 | if (dev->drv->set_speed) | ||
382 | dev->drv->set_speed(dev, dev->speed); | ||
383 | dev->new_speed = 0; | ||
384 | next_state = SIRDEV_STATE_DONE; | ||
385 | break; | ||
386 | |||
387 | case SIRDEV_STATE_DONE: | ||
388 | /* Signal network layer so it can send more frames */ | ||
389 | netif_wake_queue(dev->netdev); | ||
390 | next_state = SIRDEV_STATE_COMPLETE; | ||
391 | break; | ||
392 | |||
393 | default: | ||
394 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
395 | fsm->result = -EINVAL; | ||
396 | /* fall thru */ | ||
397 | |||
398 | case SIRDEV_STATE_ERROR: | ||
399 | IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); | ||
400 | |||
401 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | ||
402 | netif_stop_queue(dev->netdev); | ||
403 | #else | ||
404 | netif_wake_queue(dev->netdev); | ||
405 | #endif | ||
406 | /* fall thru */ | ||
407 | |||
408 | case SIRDEV_STATE_COMPLETE: | ||
409 | /* config change finished, so we are not busy any longer */ | ||
410 | sirdev_enable_rx(dev); | ||
411 | up(&fsm->sem); | ||
412 | return; | ||
413 | } | ||
414 | fsm->state = next_state; | ||
415 | } while(!delay); | ||
416 | |||
417 | irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay)); | ||
418 | } | ||
419 | |||
420 | /* schedule some device configuration task for execution by kIrDAd | ||
421 | * on behalf of the above state machine. | ||
422 | * can be called from process or interrupt/tasklet context. | ||
423 | */ | ||
424 | |||
425 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | ||
426 | { | ||
427 | struct sir_fsm *fsm = &dev->fsm; | ||
428 | int xmit_was_down; | ||
429 | |||
430 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); | ||
431 | |||
432 | if (down_trylock(&fsm->sem)) { | ||
433 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | ||
434 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); | ||
435 | return -EWOULDBLOCK; | ||
436 | } else | ||
437 | down(&fsm->sem); | ||
438 | } | ||
439 | |||
440 | if (fsm->state == SIRDEV_STATE_DEAD) { | ||
441 | /* race with sirdev_close should never happen */ | ||
442 | IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); | ||
443 | up(&fsm->sem); | ||
444 | return -ESTALE; /* or better EPIPE? */ | ||
445 | } | ||
446 | |||
447 | xmit_was_down = netif_queue_stopped(dev->netdev); | ||
448 | netif_stop_queue(dev->netdev); | ||
449 | atomic_set(&dev->enable_rx, 0); | ||
450 | |||
451 | fsm->state = initial_state; | ||
452 | fsm->param = param; | ||
453 | fsm->result = 0; | ||
454 | |||
455 | INIT_LIST_HEAD(&fsm->rq.lh_request); | ||
456 | fsm->rq.pending = 0; | ||
457 | fsm->rq.func = irda_config_fsm; | ||
458 | fsm->rq.data = dev; | ||
459 | |||
460 | if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */ | ||
461 | atomic_set(&dev->enable_rx, 1); | ||
462 | if (!xmit_was_down) | ||
463 | netif_wake_queue(dev->netdev); | ||
464 | up(&fsm->sem); | ||
465 | return -EAGAIN; | ||
466 | } | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | int __init irda_thread_create(void) | ||
471 | { | ||
472 | struct completion startup; | ||
473 | int pid; | ||
474 | |||
475 | spin_lock_init(&irda_rq_queue.lock); | ||
476 | irda_rq_queue.thread = NULL; | ||
477 | INIT_LIST_HEAD(&irda_rq_queue.request_list); | ||
478 | init_waitqueue_head(&irda_rq_queue.kick); | ||
479 | init_waitqueue_head(&irda_rq_queue.done); | ||
480 | atomic_set(&irda_rq_queue.num_pending, 0); | ||
481 | |||
482 | init_completion(&startup); | ||
483 | pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES); | ||
484 | if (pid <= 0) | ||
485 | return -EAGAIN; | ||
486 | else | ||
487 | wait_for_completion(&startup); | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | void __exit irda_thread_join(void) | ||
493 | { | ||
494 | if (irda_rq_queue.thread) { | ||
495 | flush_irda_queue(); | ||
496 | init_completion(&irda_rq_queue.exit); | ||
497 | irda_rq_queue.thread = NULL; | ||
498 | wake_up(&irda_rq_queue.kick); | ||
499 | wait_for_completion(&irda_rq_queue.exit); | ||
500 | } | ||
501 | } | ||
502 | |||