diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-05-11 18:35:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-05-11 18:35:54 -0400 |
commit | 6572b2064a54f1ed29fcbf6d16dfc5de71dfe495 (patch) | |
tree | 5ab97c864956028045da27ca489d6447b2350c48 /drivers/net/irda/sir_kthread.c | |
parent | f7a014af2d76a96e5af51b64f954328b700fa62f (diff) | |
parent | 210525d65d33d17eb6bea6c965ce442d60d9aa8d (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET_SCHED]: HFSC: fix thinko in hfsc_adjust_levels()
[IPV6]: skb leakage in inet6_csk_xmit
[BRIDGE]: Do sysfs registration inside rtnl.
[NET]: Do sysfs registration as part of register_netdevice.
[TG3]: Fix possible NULL deref in tg3_run_loopback().
[NET] linkwatch: Handle jiffies wrap-around
[IRDA]: Switching to a workqueue for the SIR work
[IRDA]: smsc-ircc: Minimal hotplug support.
[IRDA]: Removing unused EXPORT_SYMBOLs
[IRDA]: New maintainer.
[NET]: Make netdev_chain a raw notifier.
[IPV4]: ip_options_fragment() has no effect on fragmentation
[NET]: Add missing operstates documentation.
Diffstat (limited to 'drivers/net/irda/sir_kthread.c')
-rw-r--r-- | drivers/net/irda/sir_kthread.c | 508 |
1 files changed, 0 insertions, 508 deletions
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c deleted file mode 100644 index e3904d6bfecd..000000000000 --- a/drivers/net/irda/sir_kthread.c +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | /********************************************************************* | ||
2 | * | ||
3 | * sir_kthread.c: dedicated thread to process scheduled | ||
4 | * sir device setup requests | ||
5 | * | ||
6 | * Copyright (c) 2002 Martin Diehl | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | ********************************************************************/ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/version.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/completion.h> | ||
21 | #include <linux/delay.h> | ||
22 | |||
23 | #include <net/irda/irda.h> | ||
24 | |||
25 | #include "sir-dev.h" | ||
26 | |||
27 | /************************************************************************** | ||
28 | * | ||
29 | * kIrDAd kernel thread and config state machine | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | struct irda_request_queue { | ||
34 | struct list_head request_list; | ||
35 | spinlock_t lock; | ||
36 | task_t *thread; | ||
37 | struct completion exit; | ||
38 | wait_queue_head_t kick, done; | ||
39 | atomic_t num_pending; | ||
40 | }; | ||
41 | |||
42 | static struct irda_request_queue irda_rq_queue; | ||
43 | |||
44 | static int irda_queue_request(struct irda_request *rq) | ||
45 | { | ||
46 | int ret = 0; | ||
47 | unsigned long flags; | ||
48 | |||
49 | if (!test_and_set_bit(0, &rq->pending)) { | ||
50 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
51 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
52 | wake_up(&irda_rq_queue.kick); | ||
53 | atomic_inc(&irda_rq_queue.num_pending); | ||
54 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
55 | ret = 1; | ||
56 | } | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static void irda_request_timer(unsigned long data) | ||
61 | { | ||
62 | struct irda_request *rq = (struct irda_request *)data; | ||
63 | unsigned long flags; | ||
64 | |||
65 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
66 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
67 | wake_up(&irda_rq_queue.kick); | ||
68 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
69 | } | ||
70 | |||
71 | static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay) | ||
72 | { | ||
73 | int ret = 0; | ||
74 | struct timer_list *timer = &rq->timer; | ||
75 | |||
76 | if (!test_and_set_bit(0, &rq->pending)) { | ||
77 | timer->expires = jiffies + delay; | ||
78 | timer->function = irda_request_timer; | ||
79 | timer->data = (unsigned long)rq; | ||
80 | atomic_inc(&irda_rq_queue.num_pending); | ||
81 | add_timer(timer); | ||
82 | ret = 1; | ||
83 | } | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static void run_irda_queue(void) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct list_head *entry, *tmp; | ||
91 | struct irda_request *rq; | ||
92 | |||
93 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
94 | list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) { | ||
95 | rq = list_entry(entry, struct irda_request, lh_request); | ||
96 | list_del_init(entry); | ||
97 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
98 | |||
99 | clear_bit(0, &rq->pending); | ||
100 | rq->func(rq->data); | ||
101 | |||
102 | if (atomic_dec_and_test(&irda_rq_queue.num_pending)) | ||
103 | wake_up(&irda_rq_queue.done); | ||
104 | |||
105 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
106 | } | ||
107 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
108 | } | ||
109 | |||
110 | static int irda_thread(void *startup) | ||
111 | { | ||
112 | DECLARE_WAITQUEUE(wait, current); | ||
113 | |||
114 | daemonize("kIrDAd"); | ||
115 | |||
116 | irda_rq_queue.thread = current; | ||
117 | |||
118 | complete((struct completion *)startup); | ||
119 | |||
120 | while (irda_rq_queue.thread != NULL) { | ||
121 | |||
122 | /* We use TASK_INTERRUPTIBLE, rather than | ||
123 | * TASK_UNINTERRUPTIBLE. Andrew Morton made this | ||
124 | * change ; he told me that it is safe, because "signal | ||
125 | * blocking is now handled in daemonize()", he added | ||
126 | * that the problem is that "uninterruptible sleep | ||
127 | * contributes to load average", making user worry. | ||
128 | * Jean II */ | ||
129 | set_task_state(current, TASK_INTERRUPTIBLE); | ||
130 | add_wait_queue(&irda_rq_queue.kick, &wait); | ||
131 | if (list_empty(&irda_rq_queue.request_list)) | ||
132 | schedule(); | ||
133 | else | ||
134 | __set_task_state(current, TASK_RUNNING); | ||
135 | remove_wait_queue(&irda_rq_queue.kick, &wait); | ||
136 | |||
137 | /* make swsusp happy with our thread */ | ||
138 | try_to_freeze(); | ||
139 | |||
140 | run_irda_queue(); | ||
141 | } | ||
142 | |||
143 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35) | ||
144 | reparent_to_init(); | ||
145 | #endif | ||
146 | complete_and_exit(&irda_rq_queue.exit, 0); | ||
147 | /* never reached */ | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | |||
152 | static void flush_irda_queue(void) | ||
153 | { | ||
154 | if (atomic_read(&irda_rq_queue.num_pending)) { | ||
155 | |||
156 | DECLARE_WAITQUEUE(wait, current); | ||
157 | |||
158 | if (!list_empty(&irda_rq_queue.request_list)) | ||
159 | run_irda_queue(); | ||
160 | |||
161 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
162 | add_wait_queue(&irda_rq_queue.done, &wait); | ||
163 | if (atomic_read(&irda_rq_queue.num_pending)) | ||
164 | schedule(); | ||
165 | else | ||
166 | __set_task_state(current, TASK_RUNNING); | ||
167 | remove_wait_queue(&irda_rq_queue.done, &wait); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* substate handler of the config-fsm to handle the cases where we want | ||
172 | * to wait for transmit completion before changing the port configuration | ||
173 | */ | ||
174 | |||
175 | static int irda_tx_complete_fsm(struct sir_dev *dev) | ||
176 | { | ||
177 | struct sir_fsm *fsm = &dev->fsm; | ||
178 | unsigned next_state, delay; | ||
179 | unsigned bytes_left; | ||
180 | |||
181 | do { | ||
182 | next_state = fsm->substate; /* default: stay in current substate */ | ||
183 | delay = 0; | ||
184 | |||
185 | switch(fsm->substate) { | ||
186 | |||
187 | case SIRDEV_STATE_WAIT_XMIT: | ||
188 | if (dev->drv->chars_in_buffer) | ||
189 | bytes_left = dev->drv->chars_in_buffer(dev); | ||
190 | else | ||
191 | bytes_left = 0; | ||
192 | if (!bytes_left) { | ||
193 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | ||
194 | break; | ||
195 | } | ||
196 | |||
197 | if (dev->speed > 115200) | ||
198 | delay = (bytes_left*8*10000) / (dev->speed/100); | ||
199 | else if (dev->speed > 0) | ||
200 | delay = (bytes_left*10*10000) / (dev->speed/100); | ||
201 | else | ||
202 | delay = 0; | ||
203 | /* expected delay (usec) until remaining bytes are sent */ | ||
204 | if (delay < 100) { | ||
205 | udelay(delay); | ||
206 | delay = 0; | ||
207 | break; | ||
208 | } | ||
209 | /* sleep some longer delay (msec) */ | ||
210 | delay = (delay+999) / 1000; | ||
211 | break; | ||
212 | |||
213 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | ||
214 | /* block until underlaying hardware buffer are empty */ | ||
215 | if (dev->drv->wait_until_sent) | ||
216 | dev->drv->wait_until_sent(dev); | ||
217 | next_state = SIRDEV_STATE_TX_DONE; | ||
218 | break; | ||
219 | |||
220 | case SIRDEV_STATE_TX_DONE: | ||
221 | return 0; | ||
222 | |||
223 | default: | ||
224 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | fsm->substate = next_state; | ||
228 | } while (delay == 0); | ||
229 | return delay; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Function irda_config_fsm | ||
234 | * | ||
235 | * State machine to handle the configuration of the device (and attached dongle, if any). | ||
236 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | ||
237 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | ||
238 | * long. Instead, for longer delays we start a timer to reschedule us later. | ||
239 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | ||
240 | * Both must be unlocked/restarted on completion - but only on final exit. | ||
241 | */ | ||
242 | |||
243 | static void irda_config_fsm(void *data) | ||
244 | { | ||
245 | struct sir_dev *dev = data; | ||
246 | struct sir_fsm *fsm = &dev->fsm; | ||
247 | int next_state; | ||
248 | int ret = -1; | ||
249 | unsigned delay; | ||
250 | |||
251 | IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); | ||
252 | |||
253 | do { | ||
254 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | ||
255 | __FUNCTION__, fsm->state, fsm->substate); | ||
256 | |||
257 | next_state = fsm->state; | ||
258 | delay = 0; | ||
259 | |||
260 | switch(fsm->state) { | ||
261 | |||
262 | case SIRDEV_STATE_DONGLE_OPEN: | ||
263 | if (dev->dongle_drv != NULL) { | ||
264 | ret = sirdev_put_dongle(dev); | ||
265 | if (ret) { | ||
266 | fsm->result = -EINVAL; | ||
267 | next_state = SIRDEV_STATE_ERROR; | ||
268 | break; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* Initialize dongle */ | ||
273 | ret = sirdev_get_dongle(dev, fsm->param); | ||
274 | if (ret) { | ||
275 | fsm->result = ret; | ||
276 | next_state = SIRDEV_STATE_ERROR; | ||
277 | break; | ||
278 | } | ||
279 | |||
280 | /* Dongles are powered through the modem control lines which | ||
281 | * were just set during open. Before resetting, let's wait for | ||
282 | * the power to stabilize. This is what some dongle drivers did | ||
283 | * in open before, while others didn't - should be safe anyway. | ||
284 | */ | ||
285 | |||
286 | delay = 50; | ||
287 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
288 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
289 | |||
290 | fsm->param = 9600; | ||
291 | |||
292 | break; | ||
293 | |||
294 | case SIRDEV_STATE_DONGLE_CLOSE: | ||
295 | /* shouldn't we just treat this as success=? */ | ||
296 | if (dev->dongle_drv == NULL) { | ||
297 | fsm->result = -EINVAL; | ||
298 | next_state = SIRDEV_STATE_ERROR; | ||
299 | break; | ||
300 | } | ||
301 | |||
302 | ret = sirdev_put_dongle(dev); | ||
303 | if (ret) { | ||
304 | fsm->result = ret; | ||
305 | next_state = SIRDEV_STATE_ERROR; | ||
306 | break; | ||
307 | } | ||
308 | next_state = SIRDEV_STATE_DONE; | ||
309 | break; | ||
310 | |||
311 | case SIRDEV_STATE_SET_DTR_RTS: | ||
312 | ret = sirdev_set_dtr_rts(dev, | ||
313 | (fsm->param&0x02) ? TRUE : FALSE, | ||
314 | (fsm->param&0x01) ? TRUE : FALSE); | ||
315 | next_state = SIRDEV_STATE_DONE; | ||
316 | break; | ||
317 | |||
318 | case SIRDEV_STATE_SET_SPEED: | ||
319 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | ||
320 | next_state = SIRDEV_STATE_DONGLE_CHECK; | ||
321 | break; | ||
322 | |||
323 | case SIRDEV_STATE_DONGLE_CHECK: | ||
324 | ret = irda_tx_complete_fsm(dev); | ||
325 | if (ret < 0) { | ||
326 | fsm->result = ret; | ||
327 | next_state = SIRDEV_STATE_ERROR; | ||
328 | break; | ||
329 | } | ||
330 | if ((delay=ret) != 0) | ||
331 | break; | ||
332 | |||
333 | if (dev->dongle_drv) { | ||
334 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
335 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
336 | } | ||
337 | else { | ||
338 | dev->speed = fsm->param; | ||
339 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
340 | } | ||
341 | break; | ||
342 | |||
343 | case SIRDEV_STATE_DONGLE_RESET: | ||
344 | if (dev->dongle_drv->reset) { | ||
345 | ret = dev->dongle_drv->reset(dev); | ||
346 | if (ret < 0) { | ||
347 | fsm->result = ret; | ||
348 | next_state = SIRDEV_STATE_ERROR; | ||
349 | break; | ||
350 | } | ||
351 | } | ||
352 | else | ||
353 | ret = 0; | ||
354 | if ((delay=ret) == 0) { | ||
355 | /* set serial port according to dongle default speed */ | ||
356 | if (dev->drv->set_speed) | ||
357 | dev->drv->set_speed(dev, dev->speed); | ||
358 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | ||
359 | next_state = SIRDEV_STATE_DONGLE_SPEED; | ||
360 | } | ||
361 | break; | ||
362 | |||
363 | case SIRDEV_STATE_DONGLE_SPEED: | ||
364 | if (dev->dongle_drv->reset) { | ||
365 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | ||
366 | if (ret < 0) { | ||
367 | fsm->result = ret; | ||
368 | next_state = SIRDEV_STATE_ERROR; | ||
369 | break; | ||
370 | } | ||
371 | } | ||
372 | else | ||
373 | ret = 0; | ||
374 | if ((delay=ret) == 0) | ||
375 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
376 | break; | ||
377 | |||
378 | case SIRDEV_STATE_PORT_SPEED: | ||
379 | /* Finally we are ready to change the serial port speed */ | ||
380 | if (dev->drv->set_speed) | ||
381 | dev->drv->set_speed(dev, dev->speed); | ||
382 | dev->new_speed = 0; | ||
383 | next_state = SIRDEV_STATE_DONE; | ||
384 | break; | ||
385 | |||
386 | case SIRDEV_STATE_DONE: | ||
387 | /* Signal network layer so it can send more frames */ | ||
388 | netif_wake_queue(dev->netdev); | ||
389 | next_state = SIRDEV_STATE_COMPLETE; | ||
390 | break; | ||
391 | |||
392 | default: | ||
393 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
394 | fsm->result = -EINVAL; | ||
395 | /* fall thru */ | ||
396 | |||
397 | case SIRDEV_STATE_ERROR: | ||
398 | IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); | ||
399 | |||
400 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | ||
401 | netif_stop_queue(dev->netdev); | ||
402 | #else | ||
403 | netif_wake_queue(dev->netdev); | ||
404 | #endif | ||
405 | /* fall thru */ | ||
406 | |||
407 | case SIRDEV_STATE_COMPLETE: | ||
408 | /* config change finished, so we are not busy any longer */ | ||
409 | sirdev_enable_rx(dev); | ||
410 | up(&fsm->sem); | ||
411 | return; | ||
412 | } | ||
413 | fsm->state = next_state; | ||
414 | } while(!delay); | ||
415 | |||
416 | irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay)); | ||
417 | } | ||
418 | |||
419 | /* schedule some device configuration task for execution by kIrDAd | ||
420 | * on behalf of the above state machine. | ||
421 | * can be called from process or interrupt/tasklet context. | ||
422 | */ | ||
423 | |||
424 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | ||
425 | { | ||
426 | struct sir_fsm *fsm = &dev->fsm; | ||
427 | int xmit_was_down; | ||
428 | |||
429 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); | ||
430 | |||
431 | if (down_trylock(&fsm->sem)) { | ||
432 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | ||
433 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); | ||
434 | return -EWOULDBLOCK; | ||
435 | } else | ||
436 | down(&fsm->sem); | ||
437 | } | ||
438 | |||
439 | if (fsm->state == SIRDEV_STATE_DEAD) { | ||
440 | /* race with sirdev_close should never happen */ | ||
441 | IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); | ||
442 | up(&fsm->sem); | ||
443 | return -ESTALE; /* or better EPIPE? */ | ||
444 | } | ||
445 | |||
446 | xmit_was_down = netif_queue_stopped(dev->netdev); | ||
447 | netif_stop_queue(dev->netdev); | ||
448 | atomic_set(&dev->enable_rx, 0); | ||
449 | |||
450 | fsm->state = initial_state; | ||
451 | fsm->param = param; | ||
452 | fsm->result = 0; | ||
453 | |||
454 | INIT_LIST_HEAD(&fsm->rq.lh_request); | ||
455 | fsm->rq.pending = 0; | ||
456 | fsm->rq.func = irda_config_fsm; | ||
457 | fsm->rq.data = dev; | ||
458 | |||
459 | if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */ | ||
460 | atomic_set(&dev->enable_rx, 1); | ||
461 | if (!xmit_was_down) | ||
462 | netif_wake_queue(dev->netdev); | ||
463 | up(&fsm->sem); | ||
464 | return -EAGAIN; | ||
465 | } | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | static int __init irda_thread_create(void) | ||
470 | { | ||
471 | struct completion startup; | ||
472 | int pid; | ||
473 | |||
474 | spin_lock_init(&irda_rq_queue.lock); | ||
475 | irda_rq_queue.thread = NULL; | ||
476 | INIT_LIST_HEAD(&irda_rq_queue.request_list); | ||
477 | init_waitqueue_head(&irda_rq_queue.kick); | ||
478 | init_waitqueue_head(&irda_rq_queue.done); | ||
479 | atomic_set(&irda_rq_queue.num_pending, 0); | ||
480 | |||
481 | init_completion(&startup); | ||
482 | pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES); | ||
483 | if (pid <= 0) | ||
484 | return -EAGAIN; | ||
485 | else | ||
486 | wait_for_completion(&startup); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static void __exit irda_thread_join(void) | ||
492 | { | ||
493 | if (irda_rq_queue.thread) { | ||
494 | flush_irda_queue(); | ||
495 | init_completion(&irda_rq_queue.exit); | ||
496 | irda_rq_queue.thread = NULL; | ||
497 | wake_up(&irda_rq_queue.kick); | ||
498 | wait_for_completion(&irda_rq_queue.exit); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | module_init(irda_thread_create); | ||
503 | module_exit(irda_thread_join); | ||
504 | |||
505 | MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); | ||
506 | MODULE_DESCRIPTION("IrDA SIR core"); | ||
507 | MODULE_LICENSE("GPL"); | ||
508 | |||