aboutsummaryrefslogtreecommitdiffstats
path: root/fs/afs/cmservice.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/afs/cmservice.c')
-rw-r--r--fs/afs/cmservice.c926
1 files changed, 374 insertions, 552 deletions
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 3d097fddcb7a..6685f4cbccb3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -1,4 +1,4 @@
1/* cmservice.c: AFS Cache Manager Service 1/* AFS Cache Manager Service
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -12,641 +12,463 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/completion.h> 15#include <linux/ip.h>
16#include "server.h"
17#include "cell.h"
18#include "transport.h"
19#include <rxrpc/rxrpc.h>
20#include <rxrpc/transport.h>
21#include <rxrpc/connection.h>
22#include <rxrpc/call.h>
23#include "cmservice.h"
24#include "internal.h" 16#include "internal.h"
17#include "afs_cm.h"
25 18
26static unsigned afscm_usage; /* AFS cache manager usage count */ 19struct workqueue_struct *afs_cm_workqueue;
27static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */
28
29static int afscm_new_call(struct rxrpc_call *call);
30static void afscm_attention(struct rxrpc_call *call);
31static void afscm_error(struct rxrpc_call *call);
32static void afscm_aemap(struct rxrpc_call *call);
33
34static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
35static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
36static void _SRXAFSCM_Probe(struct rxrpc_call *call);
37
38typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
39
40static const struct rxrpc_operation AFSCM_ops[] = {
41 {
42 .id = 204,
43 .asize = RXRPC_APP_MARK_EOF,
44 .name = "CallBack",
45 .user = _SRXAFSCM_CallBack,
46 },
47 {
48 .id = 205,
49 .asize = RXRPC_APP_MARK_EOF,
50 .name = "InitCallBackState",
51 .user = _SRXAFSCM_InitCallBackState,
52 },
53 {
54 .id = 206,
55 .asize = RXRPC_APP_MARK_EOF,
56 .name = "Probe",
57 .user = _SRXAFSCM_Probe,
58 },
59#if 0
60 {
61 .id = 207,
62 .asize = RXRPC_APP_MARK_EOF,
63 .name = "GetLock",
64 .user = _SRXAFSCM_GetLock,
65 },
66 {
67 .id = 208,
68 .asize = RXRPC_APP_MARK_EOF,
69 .name = "GetCE",
70 .user = _SRXAFSCM_GetCE,
71 },
72 {
73 .id = 209,
74 .asize = RXRPC_APP_MARK_EOF,
75 .name = "GetXStatsVersion",
76 .user = _SRXAFSCM_GetXStatsVersion,
77 },
78 {
79 .id = 210,
80 .asize = RXRPC_APP_MARK_EOF,
81 .name = "GetXStats",
82 .user = _SRXAFSCM_GetXStats,
83 }
84#endif
85};
86 20
87static struct rxrpc_service AFSCM_service = { 21static int afs_deliver_cb_init_call_back_state(struct afs_call *,
88 .name = "AFS/CM", 22 struct sk_buff *, bool);
89 .owner = THIS_MODULE, 23static int afs_deliver_cb_init_call_back_state3(struct afs_call *,
90 .link = LIST_HEAD_INIT(AFSCM_service.link), 24 struct sk_buff *, bool);
91 .new_call = afscm_new_call, 25static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
92 .service_id = 1, 26static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
93 .attn_func = afscm_attention, 27static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *,
94 .error_func = afscm_error, 28 bool);
95 .aemap_func = afscm_aemap, 29static void afs_cm_destructor(struct afs_call *);
96 .ops_begin = &AFSCM_ops[0],
97 .ops_end = &AFSCM_ops[ARRAY_SIZE(AFSCM_ops)],
98};
99 30
100static DECLARE_COMPLETION(kafscmd_alive);
101static DECLARE_COMPLETION(kafscmd_dead);
102static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
103static LIST_HEAD(kafscmd_attention_list);
104static LIST_HEAD(afscm_calls);
105static DEFINE_SPINLOCK(afscm_calls_lock);
106static DEFINE_SPINLOCK(kafscmd_attention_lock);
107static int kafscmd_die;
108
109/*****************************************************************************/
110/* 31/*
111 * AFS Cache Manager kernel thread 32 * CB.CallBack operation type
112 */ 33 */
113static int kafscmd(void *arg) 34static const struct afs_call_type afs_SRXCBCallBack = {
114{ 35 .name = "CB.CallBack",
115 DECLARE_WAITQUEUE(myself, current); 36 .deliver = afs_deliver_cb_callback,
116 37 .abort_to_error = afs_abort_to_error,
117 struct rxrpc_call *call; 38 .destructor = afs_cm_destructor,
118 _SRXAFSCM_xxxx_t func; 39};
119 int die;
120
121 printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid);
122
123 daemonize("kafscmd");
124
125 complete(&kafscmd_alive);
126
127 /* loop around looking for things to attend to */
128 do {
129 if (list_empty(&kafscmd_attention_list)) {
130 set_current_state(TASK_INTERRUPTIBLE);
131 add_wait_queue(&kafscmd_sleepq, &myself);
132
133 for (;;) {
134 set_current_state(TASK_INTERRUPTIBLE);
135 if (!list_empty(&kafscmd_attention_list) ||
136 signal_pending(current) ||
137 kafscmd_die)
138 break;
139
140 schedule();
141 }
142
143 remove_wait_queue(&kafscmd_sleepq, &myself);
144 set_current_state(TASK_RUNNING);
145 }
146
147 die = kafscmd_die;
148
149 /* dequeue the next call requiring attention */
150 call = NULL;
151 spin_lock(&kafscmd_attention_lock);
152
153 if (!list_empty(&kafscmd_attention_list)) {
154 call = list_entry(kafscmd_attention_list.next,
155 struct rxrpc_call,
156 app_attn_link);
157 list_del_init(&call->app_attn_link);
158 die = 0;
159 }
160
161 spin_unlock(&kafscmd_attention_lock);
162
163 if (call) {
164 /* act upon it */
165 _debug("@@@ Begin Attend Call %p", call);
166
167 func = call->app_user;
168 if (func)
169 func(call);
170
171 rxrpc_put_call(call);
172
173 _debug("@@@ End Attend Call %p", call);
174 }
175
176 } while(!die);
177
178 /* and that's all */
179 complete_and_exit(&kafscmd_dead, 0);
180
181} /* end kafscmd() */
182 40
183/*****************************************************************************/
184/* 41/*
185 * handle a call coming in to the cache manager 42 * CB.InitCallBackState operation type
186 * - if I want to keep the call, I must increment its usage count
187 * - the return value will be negated and passed back in an abort packet if
188 * non-zero
189 * - serialised by virtue of there only being one krxiod
190 */ 43 */
191static int afscm_new_call(struct rxrpc_call *call) 44static const struct afs_call_type afs_SRXCBInitCallBackState = {
192{ 45 .name = "CB.InitCallBackState",
193 _enter("%p{cid=%u u=%d}", 46 .deliver = afs_deliver_cb_init_call_back_state,
194 call, ntohl(call->call_id), atomic_read(&call->usage)); 47 .abort_to_error = afs_abort_to_error,
195 48 .destructor = afs_cm_destructor,
196 rxrpc_get_call(call); 49};
197
198 /* add to my current call list */
199 spin_lock(&afscm_calls_lock);
200 list_add(&call->app_link,&afscm_calls);
201 spin_unlock(&afscm_calls_lock);
202
203 _leave(" = 0");
204 return 0;
205
206} /* end afscm_new_call() */
207 50
208/*****************************************************************************/
209/* 51/*
210 * queue on the kafscmd queue for attention 52 * CB.InitCallBackState3 operation type
211 */ 53 */
212static void afscm_attention(struct rxrpc_call *call) 54static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
213{ 55 .name = "CB.InitCallBackState3",
214 _enter("%p{cid=%u u=%d}", 56 .deliver = afs_deliver_cb_init_call_back_state3,
215 call, ntohl(call->call_id), atomic_read(&call->usage)); 57 .abort_to_error = afs_abort_to_error,
216 58 .destructor = afs_cm_destructor,
217 spin_lock(&kafscmd_attention_lock); 59};
218
219 if (list_empty(&call->app_attn_link)) {
220 list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
221 rxrpc_get_call(call);
222 }
223
224 spin_unlock(&kafscmd_attention_lock);
225
226 wake_up(&kafscmd_sleepq);
227
228 _leave(" {u=%d}", atomic_read(&call->usage));
229} /* end afscm_attention() */
230 60
231/*****************************************************************************/
232/* 61/*
233 * handle my call being aborted 62 * CB.Probe operation type
234 * - clean up, dequeue and put my ref to the call
235 */ 63 */
236static void afscm_error(struct rxrpc_call *call) 64static const struct afs_call_type afs_SRXCBProbe = {
237{ 65 .name = "CB.Probe",
238 int removed; 66 .deliver = afs_deliver_cb_probe,
239 67 .abort_to_error = afs_abort_to_error,
240 _enter("%p{est=%s ac=%u er=%d}", 68 .destructor = afs_cm_destructor,
241 call, 69};
242 rxrpc_call_error_states[call->app_err_state],
243 call->app_abort_code,
244 call->app_errno);
245
246 spin_lock(&kafscmd_attention_lock);
247
248 if (list_empty(&call->app_attn_link)) {
249 list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
250 rxrpc_get_call(call);
251 }
252
253 spin_unlock(&kafscmd_attention_lock);
254
255 removed = 0;
256 spin_lock(&afscm_calls_lock);
257 if (!list_empty(&call->app_link)) {
258 list_del_init(&call->app_link);
259 removed = 1;
260 }
261 spin_unlock(&afscm_calls_lock);
262
263 if (removed)
264 rxrpc_put_call(call);
265
266 wake_up(&kafscmd_sleepq);
267 70
268 _leave(""); 71/*
269} /* end afscm_error() */ 72 * CB.GetCapabilities operation type
73 */
74static const struct afs_call_type afs_SRXCBGetCapabilites = {
75 .name = "CB.GetCapabilities",
76 .deliver = afs_deliver_cb_get_capabilities,
77 .abort_to_error = afs_abort_to_error,
78 .destructor = afs_cm_destructor,
79};
270 80
271/*****************************************************************************/
272/* 81/*
273 * map afs abort codes to/from Linux error codes 82 * route an incoming cache manager call
274 * - called with call->lock held 83 * - return T if supported, F if not
275 */ 84 */
276static void afscm_aemap(struct rxrpc_call *call) 85bool afs_cm_incoming_call(struct afs_call *call)
277{ 86{
278 switch (call->app_err_state) { 87 u32 operation_id = ntohl(call->operation_ID);
279 case RXRPC_ESTATE_LOCAL_ABORT: 88
280 call->app_abort_code = -call->app_errno; 89 _enter("{CB.OP %u}", operation_id);
281 break; 90
282 case RXRPC_ESTATE_PEER_ABORT: 91 switch (operation_id) {
283 call->app_errno = -ECONNABORTED; 92 case CBCallBack:
284 break; 93 call->type = &afs_SRXCBCallBack;
94 return true;
95 case CBInitCallBackState:
96 call->type = &afs_SRXCBInitCallBackState;
97 return true;
98 case CBInitCallBackState3:
99 call->type = &afs_SRXCBInitCallBackState3;
100 return true;
101 case CBProbe:
102 call->type = &afs_SRXCBProbe;
103 return true;
104 case CBGetCapabilities:
105 call->type = &afs_SRXCBGetCapabilites;
106 return true;
285 default: 107 default:
286 break; 108 return false;
287 } 109 }
288} /* end afscm_aemap() */ 110}
289 111
290/*****************************************************************************/
291/* 112/*
292 * start the cache manager service if not already started 113 * clean up a cache manager call
293 */ 114 */
294int afscm_start(void) 115static void afs_cm_destructor(struct afs_call *call)
295{ 116{
296 int ret; 117 _enter("");
297
298 down_write(&afscm_sem);
299 if (!afscm_usage) {
300 ret = kernel_thread(kafscmd, NULL, 0);
301 if (ret < 0)
302 goto out;
303
304 wait_for_completion(&kafscmd_alive);
305
306 ret = rxrpc_add_service(afs_transport, &AFSCM_service);
307 if (ret < 0)
308 goto kill;
309
310 afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
311 afs_mntpt_expiry_timeout * HZ);
312 }
313
314 afscm_usage++;
315 up_write(&afscm_sem);
316
317 return 0;
318
319 kill:
320 kafscmd_die = 1;
321 wake_up(&kafscmd_sleepq);
322 wait_for_completion(&kafscmd_dead);
323
324 out:
325 up_write(&afscm_sem);
326 return ret;
327 118
328} /* end afscm_start() */ 119 afs_put_server(call->server);
120 call->server = NULL;
121 kfree(call->buffer);
122 call->buffer = NULL;
123}
329 124
330/*****************************************************************************/
331/* 125/*
332 * stop the cache manager service 126 * allow the fileserver to see if the cache manager is still alive
333 */ 127 */
334void afscm_stop(void) 128static void SRXAFSCB_CallBack(struct work_struct *work)
335{ 129{
336 struct rxrpc_call *call; 130 struct afs_call *call = container_of(work, struct afs_call, work);
337 131
338 down_write(&afscm_sem); 132 _enter("");
339 133
340 BUG_ON(afscm_usage == 0); 134 /* be sure to send the reply *before* attempting to spam the AFS server
341 afscm_usage--; 135 * with FSFetchStatus requests on the vnodes with broken callbacks lest
136 * the AFS server get into a vicious cycle of trying to break further
137 * callbacks because it hadn't received completion of the CBCallBack op
138 * yet */
139 afs_send_empty_reply(call);
342 140
343 if (afscm_usage == 0) { 141 afs_break_callbacks(call->server, call->count, call->request);
344 /* don't want more incoming calls */ 142 _leave("");
345 rxrpc_del_service(afs_transport, &AFSCM_service); 143}
346
347 /* abort any calls I've still got open (the afscm_error() will
348 * dequeue them) */
349 spin_lock(&afscm_calls_lock);
350 while (!list_empty(&afscm_calls)) {
351 call = list_entry(afscm_calls.next,
352 struct rxrpc_call,
353 app_link);
354 144
355 list_del_init(&call->app_link); 145/*
356 rxrpc_get_call(call); 146 * deliver request data to a CB.CallBack call
357 spin_unlock(&afscm_calls_lock); 147 */
148static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
149 bool last)
150{
151 struct afs_callback *cb;
152 struct afs_server *server;
153 struct in_addr addr;
154 __be32 *bp;
155 u32 tmp;
156 int ret, loop;
157
158 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
159
160 switch (call->unmarshall) {
161 case 0:
162 call->offset = 0;
163 call->unmarshall++;
164
165 /* extract the FID array and its count in two steps */
166 case 1:
167 _debug("extract FID count");
168 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
169 switch (ret) {
170 case 0: break;
171 case -EAGAIN: return 0;
172 default: return ret;
173 }
358 174
359 rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and 175 call->count = ntohl(call->tmp);
360 * put */ 176 _debug("FID count: %u", call->count);
177 if (call->count > AFSCBMAX)
178 return -EBADMSG;
179
180 call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL);
181 if (!call->buffer)
182 return -ENOMEM;
183 call->offset = 0;
184 call->unmarshall++;
185
186 case 2:
187 _debug("extract FID array");
188 ret = afs_extract_data(call, skb, last, call->buffer,
189 call->count * 3 * 4);
190 switch (ret) {
191 case 0: break;
192 case -EAGAIN: return 0;
193 default: return ret;
194 }
361 195
362 _debug("nuking active call %08x.%d", 196 _debug("unmarshall FID array");
363 ntohl(call->conn->conn_id), 197 call->request = kcalloc(call->count,
364 ntohl(call->call_id)); 198 sizeof(struct afs_callback),
365 rxrpc_put_call(call); 199 GFP_KERNEL);
366 rxrpc_put_call(call); 200 if (!call->request)
201 return -ENOMEM;
202
203 cb = call->request;
204 bp = call->buffer;
205 for (loop = call->count; loop > 0; loop--, cb++) {
206 cb->fid.vid = ntohl(*bp++);
207 cb->fid.vnode = ntohl(*bp++);
208 cb->fid.unique = ntohl(*bp++);
209 cb->type = AFSCM_CB_UNTYPED;
210 }
367 211
368 spin_lock(&afscm_calls_lock); 212 call->offset = 0;
213 call->unmarshall++;
214
215 /* extract the callback array and its count in two steps */
216 case 3:
217 _debug("extract CB count");
218 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
219 switch (ret) {
220 case 0: break;
221 case -EAGAIN: return 0;
222 default: return ret;
369 } 223 }
370 spin_unlock(&afscm_calls_lock);
371 224
372 /* get rid of my daemon */ 225 tmp = ntohl(call->tmp);
373 kafscmd_die = 1; 226 _debug("CB count: %u", tmp);
374 wake_up(&kafscmd_sleepq); 227 if (tmp != call->count && tmp != 0)
375 wait_for_completion(&kafscmd_dead); 228 return -EBADMSG;
229 call->offset = 0;
230 call->unmarshall++;
231 if (tmp == 0)
232 goto empty_cb_array;
233
234 case 4:
235 _debug("extract CB array");
236 ret = afs_extract_data(call, skb, last, call->request,
237 call->count * 3 * 4);
238 switch (ret) {
239 case 0: break;
240 case -EAGAIN: return 0;
241 default: return ret;
242 }
376 243
377 /* dispose of any calls waiting for attention */ 244 _debug("unmarshall CB array");
378 spin_lock(&kafscmd_attention_lock); 245 cb = call->request;
379 while (!list_empty(&kafscmd_attention_list)) { 246 bp = call->buffer;
380 call = list_entry(kafscmd_attention_list.next, 247 for (loop = call->count; loop > 0; loop--, cb++) {
381 struct rxrpc_call, 248 cb->version = ntohl(*bp++);
382 app_attn_link); 249 cb->expiry = ntohl(*bp++);
250 cb->type = ntohl(*bp++);
251 }
383 252
384 list_del_init(&call->app_attn_link); 253 empty_cb_array:
385 spin_unlock(&kafscmd_attention_lock); 254 call->offset = 0;
255 call->unmarshall++;
386 256
387 rxrpc_put_call(call); 257 case 5:
258 _debug("trailer");
259 if (skb->len != 0)
260 return -EBADMSG;
261 break;
262 }
388 263
389 spin_lock(&kafscmd_attention_lock); 264 if (!last)
390 } 265 return 0;
391 spin_unlock(&kafscmd_attention_lock);
392 266
393 afs_kafstimod_del_timer(&afs_mntpt_expiry_timer); 267 call->state = AFS_CALL_REPLYING;
394 }
395 268
396 up_write(&afscm_sem); 269 /* we'll need the file server record as that tells us which set of
270 * vnodes to operate upon */
271 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
272 server = afs_find_server(&addr);
273 if (!server)
274 return -ENOTCONN;
275 call->server = server;
397 276
398} /* end afscm_stop() */ 277 INIT_WORK(&call->work, SRXAFSCB_CallBack);
278 schedule_work(&call->work);
279 return 0;
280}
399 281
400/*****************************************************************************/
401/* 282/*
402 * handle the fileserver breaking a set of callbacks 283 * allow the fileserver to request callback state (re-)initialisation
403 */ 284 */
404static void _SRXAFSCM_CallBack(struct rxrpc_call *call) 285static void SRXAFSCB_InitCallBackState(struct work_struct *work)
405{ 286{
406 struct afs_server *server; 287 struct afs_call *call = container_of(work, struct afs_call, work);
407 size_t count, qty, tmp;
408 int ret = 0, removed;
409
410 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
411
412 server = afs_server_get_from_peer(call->conn->peer);
413
414 switch (call->app_call_state) {
415 /* we've received the last packet
416 * - drain all the data from the call and send the reply
417 */
418 case RXRPC_CSTATE_SRVR_GOT_ARGS:
419 ret = -EBADMSG;
420 qty = call->app_ready_qty;
421 if (qty < 8 || qty > 50 * (6 * 4) + 8)
422 break;
423
424 {
425 struct afs_callback *cb, *pcb;
426 int loop;
427 __be32 *fp, *bp;
428
429 fp = rxrpc_call_alloc_scratch(call, qty);
430
431 /* drag the entire argument block out to the scratch
432 * space */
433 ret = rxrpc_call_read_data(call, fp, qty, 0);
434 if (ret < 0)
435 break;
436
437 /* and unmarshall the parameter block */
438 ret = -EBADMSG;
439 count = ntohl(*fp++);
440 if (count>AFSCBMAX ||
441 (count * (3 * 4) + 8 != qty &&
442 count * (6 * 4) + 8 != qty))
443 break;
444
445 bp = fp + count*3;
446 tmp = ntohl(*bp++);
447 if (tmp > 0 && tmp != count)
448 break;
449 if (tmp == 0)
450 bp = NULL;
451
452 pcb = cb = rxrpc_call_alloc_scratch_s(
453 call, struct afs_callback);
454
455 for (loop = count - 1; loop >= 0; loop--) {
456 pcb->fid.vid = ntohl(*fp++);
457 pcb->fid.vnode = ntohl(*fp++);
458 pcb->fid.unique = ntohl(*fp++);
459 if (bp) {
460 pcb->version = ntohl(*bp++);
461 pcb->expiry = ntohl(*bp++);
462 pcb->type = ntohl(*bp++);
463 }
464 else {
465 pcb->version = 0;
466 pcb->expiry = 0;
467 pcb->type = AFSCM_CB_UNTYPED;
468 }
469 pcb++;
470 }
471
472 /* invoke the actual service routine */
473 ret = SRXAFSCM_CallBack(server, count, cb);
474 if (ret < 0)
475 break;
476 }
477 288
478 /* send the reply */ 289 _enter("{%p}", call->server);
479 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
480 GFP_KERNEL, 0, &count);
481 if (ret < 0)
482 break;
483 break;
484
485 /* operation complete */
486 case RXRPC_CSTATE_COMPLETE:
487 call->app_user = NULL;
488 removed = 0;
489 spin_lock(&afscm_calls_lock);
490 if (!list_empty(&call->app_link)) {
491 list_del_init(&call->app_link);
492 removed = 1;
493 }
494 spin_unlock(&afscm_calls_lock);
495 290
496 if (removed) 291 afs_init_callback_state(call->server);
497 rxrpc_put_call(call); 292 afs_send_empty_reply(call);
498 break; 293 _leave("");
294}
499 295
500 /* operation terminated on error */ 296/*
501 case RXRPC_CSTATE_ERROR: 297 * deliver request data to a CB.InitCallBackState call
502 call->app_user = NULL; 298 */
503 break; 299static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
300 struct sk_buff *skb,
301 bool last)
302{
303 struct afs_server *server;
304 struct in_addr addr;
504 305
505 default: 306 _enter(",{%u},%d", skb->len, last);
506 break;
507 }
508 307
509 if (ret < 0) 308 if (skb->len > 0)
510 rxrpc_call_abort(call, ret); 309 return -EBADMSG;
310 if (!last)
311 return 0;
511 312
512 afs_put_server(server); 313 /* no unmarshalling required */
314 call->state = AFS_CALL_REPLYING;
513 315
514 _leave(" = %d", ret); 316 /* we'll need the file server record as that tells us which set of
317 * vnodes to operate upon */
318 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
319 server = afs_find_server(&addr);
320 if (!server)
321 return -ENOTCONN;
322 call->server = server;
515 323
516} /* end _SRXAFSCM_CallBack() */ 324 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
325 schedule_work(&call->work);
326 return 0;
327}
517 328
518/*****************************************************************************/
519/* 329/*
520 * handle the fileserver asking us to initialise our callback state 330 * deliver request data to a CB.InitCallBackState3 call
521 */ 331 */
522static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call) 332static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
333 struct sk_buff *skb,
334 bool last)
523{ 335{
524 struct afs_server *server; 336 struct afs_server *server;
525 size_t count; 337 struct in_addr addr;
526 int ret = 0, removed;
527 338
528 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); 339 _enter(",{%u},%d", skb->len, last);
529 340
530 server = afs_server_get_from_peer(call->conn->peer); 341 if (!last)
342 return 0;
531 343
532 switch (call->app_call_state) { 344 /* no unmarshalling required */
533 /* we've received the last packet - drain all the data from the 345 call->state = AFS_CALL_REPLYING;
534 * call */
535 case RXRPC_CSTATE_SRVR_GOT_ARGS:
536 /* shouldn't be any args */
537 ret = -EBADMSG;
538 break;
539
540 /* send the reply when asked for it */
541 case RXRPC_CSTATE_SRVR_SND_REPLY:
542 /* invoke the actual service routine */
543 ret = SRXAFSCM_InitCallBackState(server);
544 if (ret < 0)
545 break;
546
547 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
548 GFP_KERNEL, 0, &count);
549 if (ret < 0)
550 break;
551 break;
552 346
553 /* operation complete */ 347 /* we'll need the file server record as that tells us which set of
554 case RXRPC_CSTATE_COMPLETE: 348 * vnodes to operate upon */
555 call->app_user = NULL; 349 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
556 removed = 0; 350 server = afs_find_server(&addr);
557 spin_lock(&afscm_calls_lock); 351 if (!server)
558 if (!list_empty(&call->app_link)) { 352 return -ENOTCONN;
559 list_del_init(&call->app_link); 353 call->server = server;
560 removed = 1;
561 }
562 spin_unlock(&afscm_calls_lock);
563 354
564 if (removed) 355 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
565 rxrpc_put_call(call); 356 schedule_work(&call->work);
566 break; 357 return 0;
567 358}
568 /* operation terminated on error */
569 case RXRPC_CSTATE_ERROR:
570 call->app_user = NULL;
571 break;
572
573 default:
574 break;
575 }
576
577 if (ret < 0)
578 rxrpc_call_abort(call, ret);
579
580 afs_put_server(server);
581 359
582 _leave(" = %d", ret); 360/*
361 * allow the fileserver to see if the cache manager is still alive
362 */
363static void SRXAFSCB_Probe(struct work_struct *work)
364{
365 struct afs_call *call = container_of(work, struct afs_call, work);
583 366
584} /* end _SRXAFSCM_InitCallBackState() */ 367 _enter("");
368 afs_send_empty_reply(call);
369 _leave("");
370}
585 371
586/*****************************************************************************/
587/* 372/*
588 * handle a probe from a fileserver 373 * deliver request data to a CB.Probe call
589 */ 374 */
590static void _SRXAFSCM_Probe(struct rxrpc_call *call) 375static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
376 bool last)
591{ 377{
592 struct afs_server *server; 378 _enter(",{%u},%d", skb->len, last);
593 size_t count;
594 int ret = 0, removed;
595
596 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
597 379
598 server = afs_server_get_from_peer(call->conn->peer); 380 if (skb->len > 0)
381 return -EBADMSG;
382 if (!last)
383 return 0;
599 384
600 switch (call->app_call_state) { 385 /* no unmarshalling required */
601 /* we've received the last packet - drain all the data from the 386 call->state = AFS_CALL_REPLYING;
602 * call */
603 case RXRPC_CSTATE_SRVR_GOT_ARGS:
604 /* shouldn't be any args */
605 ret = -EBADMSG;
606 break;
607 387
608 /* send the reply when asked for it */ 388 INIT_WORK(&call->work, SRXAFSCB_Probe);
609 case RXRPC_CSTATE_SRVR_SND_REPLY: 389 schedule_work(&call->work);
610 /* invoke the actual service routine */ 390 return 0;
611 ret = SRXAFSCM_Probe(server); 391}
612 if (ret < 0)
613 break;
614
615 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
616 GFP_KERNEL, 0, &count);
617 if (ret < 0)
618 break;
619 break;
620 392
621 /* operation complete */ 393/*
622 case RXRPC_CSTATE_COMPLETE: 394 * allow the fileserver to ask about the cache manager's capabilities
623 call->app_user = NULL; 395 */
624 removed = 0; 396static void SRXAFSCB_GetCapabilities(struct work_struct *work)
625 spin_lock(&afscm_calls_lock); 397{
626 if (!list_empty(&call->app_link)) { 398 struct afs_interface *ifs;
627 list_del_init(&call->app_link); 399 struct afs_call *call = container_of(work, struct afs_call, work);
628 removed = 1; 400 int loop, nifs;
401
402 struct {
403 struct /* InterfaceAddr */ {
404 __be32 nifs;
405 __be32 uuid[11];
406 __be32 ifaddr[32];
407 __be32 netmask[32];
408 __be32 mtu[32];
409 } ia;
410 struct /* Capabilities */ {
411 __be32 capcount;
412 __be32 caps[1];
413 } cap;
414 } reply;
415
416 _enter("");
417
418 nifs = 0;
419 ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL);
420 if (ifs) {
421 nifs = afs_get_ipv4_interfaces(ifs, 32, false);
422 if (nifs < 0) {
423 kfree(ifs);
424 ifs = NULL;
425 nifs = 0;
629 } 426 }
630 spin_unlock(&afscm_calls_lock); 427 }
631 428
632 if (removed) 429 memset(&reply, 0, sizeof(reply));
633 rxrpc_put_call(call); 430 reply.ia.nifs = htonl(nifs);
634 break; 431
432 reply.ia.uuid[0] = htonl(afs_uuid.time_low);
433 reply.ia.uuid[1] = htonl(afs_uuid.time_mid);
434 reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version);
435 reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved);
436 reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low);
437 for (loop = 0; loop < 6; loop++)
438 reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]);
439
440 if (ifs) {
441 for (loop = 0; loop < nifs; loop++) {
442 reply.ia.ifaddr[loop] = ifs[loop].address.s_addr;
443 reply.ia.netmask[loop] = ifs[loop].netmask.s_addr;
444 reply.ia.mtu[loop] = htonl(ifs[loop].mtu);
445 }
446 }
635 447
636 /* operation terminated on error */ 448 reply.cap.capcount = htonl(1);
637 case RXRPC_CSTATE_ERROR: 449 reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
638 call->app_user = NULL; 450 afs_send_simple_reply(call, &reply, sizeof(reply));
639 break;
640 451
641 default: 452 _leave("");
642 break; 453}
643 }
644 454
645 if (ret < 0) 455/*
646 rxrpc_call_abort(call, ret); 456 * deliver request data to a CB.GetCapabilities call
457 */
458static int afs_deliver_cb_get_capabilities(struct afs_call *call,
459 struct sk_buff *skb, bool last)
460{
461 _enter(",{%u},%d", skb->len, last);
647 462
648 afs_put_server(server); 463 if (skb->len > 0)
464 return -EBADMSG;
465 if (!last)
466 return 0;
649 467
650 _leave(" = %d", ret); 468 /* no unmarshalling required */
469 call->state = AFS_CALL_REPLYING;
651 470
652} /* end _SRXAFSCM_Probe() */ 471 INIT_WORK(&call->work, SRXAFSCB_GetCapabilities);
472 schedule_work(&call->work);
473 return 0;
474}