aboutsummaryrefslogtreecommitdiffstats
path: root/fs/afs/server.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2007-04-26 18:55:03 -0400
committerDavid S. Miller <davem@davemloft.net>2007-04-26 18:55:03 -0400
commit08e0e7c82eeadec6f4871a386b86bf0f0fbcb4eb (patch)
tree1c4f7e91e20e56ff2ec755e988a6ee828b1a21c0 /fs/afs/server.c
parent651350d10f93bed7003c9a66e24cf25e0f8eed3d (diff)
[AF_RXRPC]: Make the in-kernel AFS filesystem use AF_RXRPC.
Make the in-kernel AFS filesystem use AF_RXRPC instead of the old RxRPC code. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'fs/afs/server.c')
-rw-r--r--fs/afs/server.c624
1 files changed, 230 insertions, 394 deletions
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 44b0ce53e913..bde6125c2f22 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -1,6 +1,6 @@
1/* AFS server record management 1/* AFS server record management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -11,127 +11,205 @@
11 11
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <rxrpc/peer.h>
15#include <rxrpc/connection.h>
16#include "volume.h"
17#include "cell.h"
18#include "server.h"
19#include "transport.h"
20#include "vlclient.h"
21#include "kafstimod.h"
22#include "internal.h" 14#include "internal.h"
23 15
24DEFINE_SPINLOCK(afs_server_peer_lock); 16unsigned afs_server_timeout = 10; /* server timeout in seconds */
25 17
26#define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */ 18static void afs_reap_server(struct work_struct *);
27#define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */
28 19
29static void __afs_server_timeout(struct afs_timer *timer) 20/* tree of all the servers, indexed by IP address */
21static struct rb_root afs_servers = RB_ROOT;
22static DEFINE_RWLOCK(afs_servers_lock);
23
24/* LRU list of all the servers not currently in use */
25static LIST_HEAD(afs_server_graveyard);
26static DEFINE_SPINLOCK(afs_server_graveyard_lock);
27static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server);
28
29/*
30 * install a server record in the master tree
31 */
32static int afs_install_server(struct afs_server *server)
30{ 33{
31 struct afs_server *server = 34 struct afs_server *xserver;
32 list_entry(timer, struct afs_server, timeout); 35 struct rb_node **pp, *p;
36 int ret;
37
38 _enter("%p", server);
33 39
34 _debug("SERVER TIMEOUT [%p{u=%d}]", 40 write_lock(&afs_servers_lock);
35 server, atomic_read(&server->usage)); 41
42 ret = -EEXIST;
43 pp = &afs_servers.rb_node;
44 p = NULL;
45 while (*pp) {
46 p = *pp;
47 _debug("- consider %p", p);
48 xserver = rb_entry(p, struct afs_server, master_rb);
49 if (server->addr.s_addr < xserver->addr.s_addr)
50 pp = &(*pp)->rb_left;
51 else if (server->addr.s_addr > xserver->addr.s_addr)
52 pp = &(*pp)->rb_right;
53 else
54 goto error;
55 }
36 56
37 afs_server_do_timeout(server); 57 rb_link_node(&server->master_rb, p, pp);
38} 58 rb_insert_color(&server->master_rb, &afs_servers);
59 ret = 0;
39 60
40static const struct afs_timer_ops afs_server_timer_ops = { 61error:
41 .timed_out = __afs_server_timeout, 62 write_unlock(&afs_servers_lock);
42}; 63 return ret;
64}
43 65
44/* 66/*
45 * lookup a server record in a cell 67 * allocate a new server record
46 * - TODO: search the cell's server list
47 */ 68 */
48int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, 69static struct afs_server *afs_alloc_server(struct afs_cell *cell,
49 struct afs_server **_server) 70 const struct in_addr *addr)
50{ 71{
51 struct afs_server *server, *active, *zombie; 72 struct afs_server *server;
52 int loop;
53 73
54 _enter("%p,%08x,", cell, ntohl(addr->s_addr)); 74 _enter("");
55 75
56 /* allocate and initialise a server record */
57 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); 76 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
58 if (!server) { 77 if (server) {
59 _leave(" = -ENOMEM"); 78 atomic_set(&server->usage, 1);
60 return -ENOMEM; 79 server->cell = cell;
80
81 INIT_LIST_HEAD(&server->link);
82 INIT_LIST_HEAD(&server->grave);
83 init_rwsem(&server->sem);
84 spin_lock_init(&server->fs_lock);
85 server->fs_vnodes = RB_ROOT;
86 server->cb_promises = RB_ROOT;
87 spin_lock_init(&server->cb_lock);
88 init_waitqueue_head(&server->cb_break_waitq);
89 INIT_DELAYED_WORK(&server->cb_break_work,
90 afs_dispatch_give_up_callbacks);
91
92 memcpy(&server->addr, addr, sizeof(struct in_addr));
93 server->addr.s_addr = addr->s_addr;
61 } 94 }
62 95
63 atomic_set(&server->usage, 1); 96 _leave(" = %p{%d}", server, atomic_read(&server->usage));
64 97 return server;
65 INIT_LIST_HEAD(&server->link); 98}
66 init_rwsem(&server->sem);
67 INIT_LIST_HEAD(&server->fs_callq);
68 spin_lock_init(&server->fs_lock);
69 INIT_LIST_HEAD(&server->cb_promises);
70 spin_lock_init(&server->cb_lock);
71
72 for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++)
73 server->fs_conn_cnt[loop] = 4;
74 99
75 memcpy(&server->addr, addr, sizeof(struct in_addr)); 100/*
76 server->addr.s_addr = addr->s_addr; 101 * get an FS-server record for a cell
102 */
103struct afs_server *afs_lookup_server(struct afs_cell *cell,
104 const struct in_addr *addr)
105{
106 struct afs_server *server, *candidate;
77 107
78 afs_timer_init(&server->timeout, &afs_server_timer_ops); 108 _enter("%p,"NIPQUAD_FMT, cell, NIPQUAD(addr->s_addr));
79 109
80 /* add to the cell */ 110 /* quick scan of the list to see if we already have the server */
81 write_lock(&cell->sv_lock); 111 read_lock(&cell->servers_lock);
82 112
83 /* check the active list */ 113 list_for_each_entry(server, &cell->servers, link) {
84 list_for_each_entry(active, &cell->sv_list, link) { 114 if (server->addr.s_addr == addr->s_addr)
85 if (active->addr.s_addr == addr->s_addr) 115 goto found_server_quickly;
86 goto use_active_server;
87 } 116 }
117 read_unlock(&cell->servers_lock);
88 118
89 /* check the inactive list */ 119 candidate = afs_alloc_server(cell, addr);
90 spin_lock(&cell->sv_gylock); 120 if (!candidate) {
91 list_for_each_entry(zombie, &cell->sv_graveyard, link) { 121 _leave(" = -ENOMEM");
92 if (zombie->addr.s_addr == addr->s_addr) 122 return ERR_PTR(-ENOMEM);
93 goto resurrect_server;
94 } 123 }
95 spin_unlock(&cell->sv_gylock);
96 124
97 afs_get_cell(cell); 125 write_lock(&cell->servers_lock);
98 server->cell = cell;
99 list_add_tail(&server->link, &cell->sv_list);
100 126
101 write_unlock(&cell->sv_lock); 127 /* check the cell's server list again */
128 list_for_each_entry(server, &cell->servers, link) {
129 if (server->addr.s_addr == addr->s_addr)
130 goto found_server;
131 }
132
133 _debug("new");
134 server = candidate;
135 if (afs_install_server(server) < 0)
136 goto server_in_two_cells;
102 137
103 *_server = server; 138 afs_get_cell(cell);
104 _leave(" = 0 (%p)", server); 139 list_add_tail(&server->link, &cell->servers);
105 return 0; 140
141 write_unlock(&cell->servers_lock);
142 _leave(" = %p{%d}", server, atomic_read(&server->usage));
143 return server;
144
145 /* found a matching server quickly */
146found_server_quickly:
147 _debug("found quickly");
148 afs_get_server(server);
149 read_unlock(&cell->servers_lock);
150no_longer_unused:
151 if (!list_empty(&server->grave)) {
152 spin_lock(&afs_server_graveyard_lock);
153 list_del_init(&server->grave);
154 spin_unlock(&afs_server_graveyard_lock);
155 }
156 _leave(" = %p{%d}", server, atomic_read(&server->usage));
157 return server;
158
159 /* found a matching server on the second pass */
160found_server:
161 _debug("found");
162 afs_get_server(server);
163 write_unlock(&cell->servers_lock);
164 kfree(candidate);
165 goto no_longer_unused;
166
167 /* found a server that seems to be in two cells */
168server_in_two_cells:
169 write_unlock(&cell->servers_lock);
170 kfree(candidate);
171 printk(KERN_NOTICE "kAFS:"
172 " Server "NIPQUAD_FMT" appears to be in two cells\n",
173 NIPQUAD(*addr));
174 _leave(" = -EEXIST");
175 return ERR_PTR(-EEXIST);
176}
106 177
107 /* found a matching active server */ 178/*
108use_active_server: 179 * look up a server by its IP address
109 _debug("active server"); 180 */
110 afs_get_server(active); 181struct afs_server *afs_find_server(const struct in_addr *_addr)
111 write_unlock(&cell->sv_lock); 182{
183 struct afs_server *server = NULL;
184 struct rb_node *p;
185 struct in_addr addr = *_addr;
112 186
113 kfree(server); 187 _enter(NIPQUAD_FMT, NIPQUAD(addr.s_addr));
114 188
115 *_server = active; 189 read_lock(&afs_servers_lock);
116 _leave(" = 0 (%p)", active);
117 return 0;
118 190
119 /* found a matching server in the graveyard, so resurrect it and 191 p = afs_servers.rb_node;
120 * dispose of the new record */ 192 while (p) {
121resurrect_server: 193 server = rb_entry(p, struct afs_server, master_rb);
122 _debug("resurrecting server");
123 194
124 list_move_tail(&zombie->link, &cell->sv_list); 195 _debug("- consider %p", p);
125 afs_get_server(zombie);
126 afs_kafstimod_del_timer(&zombie->timeout);
127 spin_unlock(&cell->sv_gylock);
128 write_unlock(&cell->sv_lock);
129 196
130 kfree(server); 197 if (addr.s_addr < server->addr.s_addr) {
198 p = p->rb_left;
199 } else if (addr.s_addr > server->addr.s_addr) {
200 p = p->rb_right;
201 } else {
202 afs_get_server(server);
203 goto found;
204 }
205 }
131 206
132 *_server = zombie; 207 server = NULL;
133 _leave(" = 0 (%p)", zombie); 208found:
134 return 0; 209 read_unlock(&afs_servers_lock);
210 ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr);
211 _leave(" = %p", server);
212 return server;
135} 213}
136 214
137/* 215/*
@@ -140,347 +218,105 @@ resurrect_server:
140 */ 218 */
141void afs_put_server(struct afs_server *server) 219void afs_put_server(struct afs_server *server)
142{ 220{
143 struct afs_cell *cell;
144
145 if (!server) 221 if (!server)
146 return; 222 return;
147 223
148 _enter("%p", server); 224 _enter("%p{%d}", server, atomic_read(&server->usage));
149
150 cell = server->cell;
151 225
152 /* sanity check */ 226 ASSERTCMP(atomic_read(&server->usage), >, 0);
153 BUG_ON(atomic_read(&server->usage) <= 0);
154
155 /* to prevent a race, the decrement and the dequeue must be effectively
156 * atomic */
157 write_lock(&cell->sv_lock);
158 227
159 if (likely(!atomic_dec_and_test(&server->usage))) { 228 if (likely(!atomic_dec_and_test(&server->usage))) {
160 write_unlock(&cell->sv_lock);
161 _leave(""); 229 _leave("");
162 return; 230 return;
163 } 231 }
164 232
165 spin_lock(&cell->sv_gylock); 233 afs_flush_callback_breaks(server);
166 list_move_tail(&server->link, &cell->sv_graveyard);
167
168 /* time out in 10 secs */
169 afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
170
171 spin_unlock(&cell->sv_gylock);
172 write_unlock(&cell->sv_lock);
173 234
174 _leave(" [killed]"); 235 spin_lock(&afs_server_graveyard_lock);
236 if (atomic_read(&server->usage) == 0) {
237 list_move_tail(&server->grave, &afs_server_graveyard);
238 server->time_of_death = get_seconds();
239 schedule_delayed_work(&afs_server_reaper,
240 afs_server_timeout * HZ);
241 }
242 spin_unlock(&afs_server_graveyard_lock);
243 _leave(" [dead]");
175} 244}
176 245
177/* 246/*
178 * timeout server record 247 * destroy a dead server
179 * - removes from the cell's graveyard if the usage count is zero
180 */ 248 */
181void afs_server_do_timeout(struct afs_server *server) 249static void afs_destroy_server(struct afs_server *server)
182{ 250{
183 struct rxrpc_peer *peer;
184 struct afs_cell *cell;
185 int loop;
186
187 _enter("%p", server); 251 _enter("%p", server);
188 252
189 cell = server->cell; 253 ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL);
190 254 ASSERTCMP(server->cb_promises.rb_node, ==, NULL);
191 BUG_ON(atomic_read(&server->usage) < 0); 255 ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail);
192 256 ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0);
193 /* remove from graveyard if still dead */
194 spin_lock(&cell->vl_gylock);
195 if (atomic_read(&server->usage) == 0)
196 list_del_init(&server->link);
197 else
198 server = NULL;
199 spin_unlock(&cell->vl_gylock);
200
201 if (!server) {
202 _leave("");
203 return; /* resurrected */
204 }
205
206 /* we can now destroy it properly */
207 afs_put_cell(cell);
208
209 /* uncross-point the structs under a global lock */
210 spin_lock(&afs_server_peer_lock);
211 peer = server->peer;
212 if (peer) {
213 server->peer = NULL;
214 peer->user = NULL;
215 }
216 spin_unlock(&afs_server_peer_lock);
217
218 /* finish cleaning up the server */
219 for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--)
220 if (server->fs_conn[loop])
221 rxrpc_put_connection(server->fs_conn[loop]);
222
223 if (server->vlserver)
224 rxrpc_put_connection(server->vlserver);
225 257
258 afs_put_cell(server->cell);
226 kfree(server); 259 kfree(server);
227
228 _leave(" [destroyed]");
229} 260}
230 261
231/* 262/*
232 * get a callslot on a connection to the fileserver on the specified server 263 * reap dead server records
233 */ 264 */
234int afs_server_request_callslot(struct afs_server *server, 265static void afs_reap_server(struct work_struct *work)
235 struct afs_server_callslot *callslot)
236{ 266{
237 struct afs_server_callslot *pcallslot; 267 LIST_HEAD(corpses);
238 struct rxrpc_connection *conn; 268 struct afs_server *server;
239 int nconn, ret; 269 unsigned long delay, expiry;
240 270 time_t now;
241 _enter("%p,",server); 271
242 272 now = get_seconds();
243 INIT_LIST_HEAD(&callslot->link); 273 spin_lock(&afs_server_graveyard_lock);
244 callslot->task = current; 274
245 callslot->conn = NULL; 275 while (!list_empty(&afs_server_graveyard)) {
246 callslot->nconn = -1; 276 server = list_entry(afs_server_graveyard.next,
247 callslot->ready = 0; 277 struct afs_server, grave);
248 278
249 ret = 0; 279 /* the queue is ordered most dead first */
250 conn = NULL; 280 expiry = server->time_of_death + afs_server_timeout;
251 281 if (expiry > now) {
252 /* get hold of a callslot first */ 282 delay = (expiry - now) * HZ;
253 spin_lock(&server->fs_lock); 283 if (!schedule_delayed_work(&afs_server_reaper, delay)) {
254 284 cancel_delayed_work(&afs_server_reaper);
255 /* resurrect the server if it's death timeout has expired */ 285 schedule_delayed_work(&afs_server_reaper,
256 if (server->fs_state) { 286 delay);
257 if (time_before(jiffies, server->fs_dead_jif)) { 287 }
258 ret = server->fs_state; 288 break;
259 spin_unlock(&server->fs_lock);
260 _leave(" = %d [still dead]", ret);
261 return ret;
262 } 289 }
263 290
264 server->fs_state = 0; 291 write_lock(&server->cell->servers_lock);
265 } 292 write_lock(&afs_servers_lock);
266 293 if (atomic_read(&server->usage) > 0) {
267 /* try and find a connection that has spare callslots */ 294 list_del_init(&server->grave);
268 for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) { 295 } else {
269 if (server->fs_conn_cnt[nconn] > 0) { 296 list_move_tail(&server->grave, &corpses);
270 server->fs_conn_cnt[nconn]--; 297 list_del_init(&server->link);
271 spin_unlock(&server->fs_lock); 298 rb_erase(&server->master_rb, &afs_servers);
272 callslot->nconn = nconn;
273 goto obtained_slot;
274 } 299 }
300 write_unlock(&afs_servers_lock);
301 write_unlock(&server->cell->servers_lock);
275 } 302 }
276 303
277 /* none were available - wait interruptibly for one to become 304 spin_unlock(&afs_server_graveyard_lock);
278 * available */
279 set_current_state(TASK_INTERRUPTIBLE);
280 list_add_tail(&callslot->link, &server->fs_callq);
281 spin_unlock(&server->fs_lock);
282
283 while (!callslot->ready && !signal_pending(current)) {
284 schedule();
285 set_current_state(TASK_INTERRUPTIBLE);
286 }
287
288 set_current_state(TASK_RUNNING);
289
290 /* even if we were interrupted we may still be queued */
291 if (!callslot->ready) {
292 spin_lock(&server->fs_lock);
293 list_del_init(&callslot->link);
294 spin_unlock(&server->fs_lock);
295 }
296
297 nconn = callslot->nconn;
298
299 /* if interrupted, we must release any slot we also got before
300 * returning an error */
301 if (signal_pending(current)) {
302 ret = -EINTR;
303 goto error_release;
304 }
305
306 /* if we were woken up with an error, then pass that error back to the
307 * called */
308 if (nconn < 0) {
309 _leave(" = %d", callslot->errno);
310 return callslot->errno;
311 }
312
313 /* were we given a connection directly? */
314 if (callslot->conn) {
315 /* yes - use it */
316 _leave(" = 0 (nc=%d)", nconn);
317 return 0;
318 }
319 305
320 /* got a callslot, but no connection */ 306 /* now reap the corpses we've extracted */
321obtained_slot: 307 while (!list_empty(&corpses)) {
322 308 server = list_entry(corpses.next, struct afs_server, grave);
323 /* need to get hold of the RxRPC connection */ 309 list_del(&server->grave);
324 down_write(&server->sem); 310 afs_destroy_server(server);
325
326 /* quick check to see if there's an outstanding error */
327 ret = server->fs_state;
328 if (ret)
329 goto error_release_upw;
330
331 if (server->fs_conn[nconn]) {
332 /* reuse an existing connection */
333 rxrpc_get_connection(server->fs_conn[nconn]);
334 callslot->conn = server->fs_conn[nconn];
335 } else {
336 /* create a new connection */
337 ret = rxrpc_create_connection(afs_transport,
338 htons(7000),
339 server->addr.s_addr,
340 FS_SERVICE_ID,
341 NULL,
342 &server->fs_conn[nconn]);
343
344 if (ret < 0)
345 goto error_release_upw;
346
347 callslot->conn = server->fs_conn[0];
348 rxrpc_get_connection(callslot->conn);
349 } 311 }
350
351 up_write(&server->sem);
352
353 _leave(" = 0");
354 return 0;
355
356 /* handle an error occurring */
357error_release_upw:
358 up_write(&server->sem);
359
360error_release:
361 /* either release the callslot or pass it along to another deserving
362 * task */
363 spin_lock(&server->fs_lock);
364
365 if (nconn < 0) {
366 /* no callslot allocated */
367 } else if (list_empty(&server->fs_callq)) {
368 /* no one waiting */
369 server->fs_conn_cnt[nconn]++;
370 spin_unlock(&server->fs_lock);
371 } else {
372 /* someone's waiting - dequeue them and wake them up */
373 pcallslot = list_entry(server->fs_callq.next,
374 struct afs_server_callslot, link);
375 list_del_init(&pcallslot->link);
376
377 pcallslot->errno = server->fs_state;
378 if (!pcallslot->errno) {
379 /* pass them out callslot details */
380 callslot->conn = xchg(&pcallslot->conn,
381 callslot->conn);
382 pcallslot->nconn = nconn;
383 callslot->nconn = nconn = -1;
384 }
385 pcallslot->ready = 1;
386 wake_up_process(pcallslot->task);
387 spin_unlock(&server->fs_lock);
388 }
389
390 rxrpc_put_connection(callslot->conn);
391 callslot->conn = NULL;
392
393 _leave(" = %d", ret);
394 return ret;
395} 312}
396 313
397/* 314/*
398 * release a callslot back to the server 315 * discard all the server records for rmmod
399 * - transfers the RxRPC connection to the next pending callslot if possible
400 */ 316 */
401void afs_server_release_callslot(struct afs_server *server, 317void __exit afs_purge_servers(void)
402 struct afs_server_callslot *callslot)
403{ 318{
404 struct afs_server_callslot *pcallslot; 319 afs_server_timeout = 0;
405 320 cancel_delayed_work(&afs_server_reaper);
406 _enter("{ad=%08x,cnt=%u},{%d}", 321 schedule_delayed_work(&afs_server_reaper, 0);
407 ntohl(server->addr.s_addr),
408 server->fs_conn_cnt[callslot->nconn],
409 callslot->nconn);
410
411 BUG_ON(callslot->nconn < 0);
412
413 spin_lock(&server->fs_lock);
414
415 if (list_empty(&server->fs_callq)) {
416 /* no one waiting */
417 server->fs_conn_cnt[callslot->nconn]++;
418 spin_unlock(&server->fs_lock);
419 } else {
420 /* someone's waiting - dequeue them and wake them up */
421 pcallslot = list_entry(server->fs_callq.next,
422 struct afs_server_callslot, link);
423 list_del_init(&pcallslot->link);
424
425 pcallslot->errno = server->fs_state;
426 if (!pcallslot->errno) {
427 /* pass them out callslot details */
428 callslot->conn = xchg(&pcallslot->conn, callslot->conn);
429 pcallslot->nconn = callslot->nconn;
430 callslot->nconn = -1;
431 }
432
433 pcallslot->ready = 1;
434 wake_up_process(pcallslot->task);
435 spin_unlock(&server->fs_lock);
436 }
437
438 rxrpc_put_connection(callslot->conn);
439
440 _leave("");
441}
442
443/*
444 * get a handle to a connection to the vlserver (volume location) on the
445 * specified server
446 */
447int afs_server_get_vlconn(struct afs_server *server,
448 struct rxrpc_connection **_conn)
449{
450 struct rxrpc_connection *conn;
451 int ret;
452
453 _enter("%p,", server);
454
455 ret = 0;
456 conn = NULL;
457 down_read(&server->sem);
458
459 if (server->vlserver) {
460 /* reuse an existing connection */
461 rxrpc_get_connection(server->vlserver);
462 conn = server->vlserver;
463 up_read(&server->sem);
464 } else {
465 /* create a new connection */
466 up_read(&server->sem);
467 down_write(&server->sem);
468 if (!server->vlserver) {
469 ret = rxrpc_create_connection(afs_transport,
470 htons(7003),
471 server->addr.s_addr,
472 VL_SERVICE_ID,
473 NULL,
474 &server->vlserver);
475 }
476 if (ret == 0) {
477 rxrpc_get_connection(server->vlserver);
478 conn = server->vlserver;
479 }
480 up_write(&server->sem);
481 }
482
483 *_conn = conn;
484 _leave(" = %d", ret);
485 return ret;
486} 322}