aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/krxiod.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/rxrpc/krxiod.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'net/rxrpc/krxiod.c')
-rw-r--r--net/rxrpc/krxiod.c261
1 files changed, 261 insertions, 0 deletions
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
new file mode 100644
index 000000000000..2b537f425a17
--- /dev/null
+++ b/net/rxrpc/krxiod.c
@@ -0,0 +1,261 @@
1/* krxiod.c: Rx I/O daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/completion.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <rxrpc/krxiod.h>
17#include <rxrpc/transport.h>
18#include <rxrpc/peer.h>
19#include <rxrpc/call.h>
20#include "internal.h"
21
22static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
23static DECLARE_COMPLETION(rxrpc_krxiod_dead);
24
25static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
26
27static LIST_HEAD(rxrpc_krxiod_transportq);
28static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
29
30static LIST_HEAD(rxrpc_krxiod_callq);
31static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
32
33static volatile int rxrpc_krxiod_die;
34
35/*****************************************************************************/
36/*
37 * Rx I/O daemon
38 */
39static int rxrpc_krxiod(void *arg)
40{
41 DECLARE_WAITQUEUE(krxiod,current);
42
43 printk("Started krxiod %d\n",current->pid);
44
45 daemonize("krxiod");
46
47 /* loop around waiting for work to do */
48 do {
49 /* wait for work or to be told to exit */
50 _debug("### Begin Wait");
51 if (!atomic_read(&rxrpc_krxiod_qcount)) {
52 set_current_state(TASK_INTERRUPTIBLE);
53
54 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
55
56 for (;;) {
57 set_current_state(TASK_INTERRUPTIBLE);
58 if (atomic_read(&rxrpc_krxiod_qcount) ||
59 rxrpc_krxiod_die ||
60 signal_pending(current))
61 break;
62
63 schedule();
64 }
65
66 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
67 set_current_state(TASK_RUNNING);
68 }
69 _debug("### End Wait");
70
71 /* do work if been given some to do */
72 _debug("### Begin Work");
73
74 /* see if there's a transport in need of attention */
75 if (!list_empty(&rxrpc_krxiod_transportq)) {
76 struct rxrpc_transport *trans = NULL;
77
78 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
79
80 if (!list_empty(&rxrpc_krxiod_transportq)) {
81 trans = list_entry(
82 rxrpc_krxiod_transportq.next,
83 struct rxrpc_transport,
84 krxiodq_link);
85
86 list_del_init(&trans->krxiodq_link);
87 atomic_dec(&rxrpc_krxiod_qcount);
88
89 /* make sure it hasn't gone away and doesn't go
90 * away */
91 if (atomic_read(&trans->usage)>0)
92 rxrpc_get_transport(trans);
93 else
94 trans = NULL;
95 }
96
97 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
98
99 if (trans) {
100 rxrpc_trans_receive_packet(trans);
101 rxrpc_put_transport(trans);
102 }
103 }
104
105 /* see if there's a call in need of attention */
106 if (!list_empty(&rxrpc_krxiod_callq)) {
107 struct rxrpc_call *call = NULL;
108
109 spin_lock_irq(&rxrpc_krxiod_callq_lock);
110
111 if (!list_empty(&rxrpc_krxiod_callq)) {
112 call = list_entry(rxrpc_krxiod_callq.next,
113 struct rxrpc_call,
114 rcv_krxiodq_lk);
115 list_del_init(&call->rcv_krxiodq_lk);
116 atomic_dec(&rxrpc_krxiod_qcount);
117
118 /* make sure it hasn't gone away and doesn't go
119 * away */
120 if (atomic_read(&call->usage) > 0) {
121 _debug("@@@ KRXIOD"
122 " Begin Attend Call %p", call);
123 rxrpc_get_call(call);
124 }
125 else {
126 call = NULL;
127 }
128 }
129
130 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
131
132 if (call) {
133 rxrpc_call_do_stuff(call);
134 rxrpc_put_call(call);
135 _debug("@@@ KRXIOD End Attend Call %p", call);
136 }
137 }
138
139 _debug("### End Work");
140
141 try_to_freeze(PF_FREEZE);
142
143 /* discard pending signals */
144 rxrpc_discard_my_signals();
145
146 } while (!rxrpc_krxiod_die);
147
148 /* and that's all */
149 complete_and_exit(&rxrpc_krxiod_dead, 0);
150
151} /* end rxrpc_krxiod() */
152
153/*****************************************************************************/
154/*
155 * start up a krxiod daemon
156 */
157int __init rxrpc_krxiod_init(void)
158{
159 return kernel_thread(rxrpc_krxiod, NULL, 0);
160
161} /* end rxrpc_krxiod_init() */
162
163/*****************************************************************************/
164/*
165 * kill the krxiod daemon and wait for it to complete
166 */
167void rxrpc_krxiod_kill(void)
168{
169 rxrpc_krxiod_die = 1;
170 wake_up_all(&rxrpc_krxiod_sleepq);
171 wait_for_completion(&rxrpc_krxiod_dead);
172
173} /* end rxrpc_krxiod_kill() */
174
175/*****************************************************************************/
176/*
177 * queue a transport for attention by krxiod
178 */
179void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
180{
181 unsigned long flags;
182
183 _enter("");
184
185 if (list_empty(&trans->krxiodq_link)) {
186 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
187
188 if (list_empty(&trans->krxiodq_link)) {
189 if (atomic_read(&trans->usage) > 0) {
190 list_add_tail(&trans->krxiodq_link,
191 &rxrpc_krxiod_transportq);
192 atomic_inc(&rxrpc_krxiod_qcount);
193 }
194 }
195
196 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
197 wake_up_all(&rxrpc_krxiod_sleepq);
198 }
199
200 _leave("");
201
202} /* end rxrpc_krxiod_queue_transport() */
203
204/*****************************************************************************/
205/*
206 * dequeue a transport from krxiod's attention queue
207 */
208void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
209{
210 unsigned long flags;
211
212 _enter("");
213
214 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
215 if (!list_empty(&trans->krxiodq_link)) {
216 list_del_init(&trans->krxiodq_link);
217 atomic_dec(&rxrpc_krxiod_qcount);
218 }
219 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
220
221 _leave("");
222
223} /* end rxrpc_krxiod_dequeue_transport() */
224
225/*****************************************************************************/
226/*
227 * queue a call for attention by krxiod
228 */
229void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
230{
231 unsigned long flags;
232
233 if (list_empty(&call->rcv_krxiodq_lk)) {
234 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
235 if (atomic_read(&call->usage) > 0) {
236 list_add_tail(&call->rcv_krxiodq_lk,
237 &rxrpc_krxiod_callq);
238 atomic_inc(&rxrpc_krxiod_qcount);
239 }
240 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
241 }
242 wake_up_all(&rxrpc_krxiod_sleepq);
243
244} /* end rxrpc_krxiod_queue_call() */
245
246/*****************************************************************************/
247/*
248 * dequeue a call from krxiod's attention queue
249 */
250void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
251{
252 unsigned long flags;
253
254 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
255 if (!list_empty(&call->rcv_krxiodq_lk)) {
256 list_del_init(&call->rcv_krxiodq_lk);
257 atomic_dec(&rxrpc_krxiod_qcount);
258 }
259 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
260
261} /* end rxrpc_krxiod_dequeue_call() */