diff options
Diffstat (limited to 'fs/afs')
36 files changed, 10055 insertions, 0 deletions
diff --git a/fs/afs/Makefile b/fs/afs/Makefile new file mode 100644 index 000000000000..4029c9da4b86 --- /dev/null +++ b/fs/afs/Makefile | |||
@@ -0,0 +1,28 @@ | |||
1 | # | ||
2 | # Makefile for Red Hat Linux AFS client. | ||
3 | # | ||
4 | |||
5 | #CFLAGS += -finstrument-functions | ||
6 | |||
7 | kafs-objs := \ | ||
8 | callback.o \ | ||
9 | cell.o \ | ||
10 | cmservice.o \ | ||
11 | dir.o \ | ||
12 | file.o \ | ||
13 | fsclient.o \ | ||
14 | inode.o \ | ||
15 | kafsasyncd.o \ | ||
16 | kafstimod.o \ | ||
17 | main.o \ | ||
18 | misc.o \ | ||
19 | mntpt.o \ | ||
20 | proc.o \ | ||
21 | server.o \ | ||
22 | super.o \ | ||
23 | vlclient.o \ | ||
24 | vlocation.o \ | ||
25 | vnode.o \ | ||
26 | volume.o | ||
27 | |||
28 | obj-$(CONFIG_AFS_FS) := kafs.o | ||
diff --git a/fs/afs/cache.h b/fs/afs/cache.h new file mode 100644 index 000000000000..9eb7722b34d5 --- /dev/null +++ b/fs/afs/cache.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* cache.h: AFS local cache management interface | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_CACHE_H | ||
13 | #define _LINUX_AFS_CACHE_H | ||
14 | |||
15 | #undef AFS_CACHING_SUPPORT | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #ifdef AFS_CACHING_SUPPORT | ||
19 | #include <linux/cachefs.h> | ||
20 | #endif | ||
21 | #include "types.h" | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | |||
25 | #endif /* __KERNEL__ */ | ||
26 | |||
27 | #endif /* _LINUX_AFS_CACHE_H */ | ||
diff --git a/fs/afs/callback.c b/fs/afs/callback.c new file mode 100644 index 000000000000..2fd62f89ae01 --- /dev/null +++ b/fs/afs/callback.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software may be freely redistributed under the terms of the | ||
5 | * GNU General Public License. | ||
6 | * | ||
7 | * You should have received a copy of the GNU General Public License | ||
8 | * along with this program; if not, write to the Free Software | ||
9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
10 | * | ||
11 | * Authors: David Woodhouse <dwmw2@cambridge.redhat.com> | ||
12 | * David Howells <dhowells@redhat.com> | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include "server.h" | ||
20 | #include "vnode.h" | ||
21 | #include "internal.h" | ||
22 | |||
23 | /*****************************************************************************/ | ||
24 | /* | ||
25 | * allow the fileserver to request callback state (re-)initialisation | ||
26 | */ | ||
27 | int SRXAFSCM_InitCallBackState(struct afs_server *server) | ||
28 | { | ||
29 | struct list_head callbacks; | ||
30 | |||
31 | _enter("%p", server); | ||
32 | |||
33 | INIT_LIST_HEAD(&callbacks); | ||
34 | |||
35 | /* transfer the callback list from the server to a temp holding area */ | ||
36 | spin_lock(&server->cb_lock); | ||
37 | |||
38 | list_add(&callbacks, &server->cb_promises); | ||
39 | list_del_init(&server->cb_promises); | ||
40 | |||
41 | /* munch our way through the list, grabbing the inode, dropping all the | ||
42 | * locks and regetting them in the right order | ||
43 | */ | ||
44 | while (!list_empty(&callbacks)) { | ||
45 | struct afs_vnode *vnode; | ||
46 | struct inode *inode; | ||
47 | |||
48 | vnode = list_entry(callbacks.next, struct afs_vnode, cb_link); | ||
49 | list_del_init(&vnode->cb_link); | ||
50 | |||
51 | /* try and grab the inode - may fail */ | ||
52 | inode = igrab(AFS_VNODE_TO_I(vnode)); | ||
53 | if (inode) { | ||
54 | int release = 0; | ||
55 | |||
56 | spin_unlock(&server->cb_lock); | ||
57 | spin_lock(&vnode->lock); | ||
58 | |||
59 | if (vnode->cb_server == server) { | ||
60 | vnode->cb_server = NULL; | ||
61 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
62 | spin_lock(&afs_cb_hash_lock); | ||
63 | list_del_init(&vnode->cb_hash_link); | ||
64 | spin_unlock(&afs_cb_hash_lock); | ||
65 | release = 1; | ||
66 | } | ||
67 | |||
68 | spin_unlock(&vnode->lock); | ||
69 | |||
70 | iput(inode); | ||
71 | afs_put_server(server); | ||
72 | |||
73 | spin_lock(&server->cb_lock); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | spin_unlock(&server->cb_lock); | ||
78 | |||
79 | _leave(" = 0"); | ||
80 | return 0; | ||
81 | } /* end SRXAFSCM_InitCallBackState() */ | ||
82 | |||
83 | /*****************************************************************************/ | ||
84 | /* | ||
85 | * allow the fileserver to break callback promises | ||
86 | */ | ||
87 | int SRXAFSCM_CallBack(struct afs_server *server, size_t count, | ||
88 | struct afs_callback callbacks[]) | ||
89 | { | ||
90 | _enter("%p,%u,", server, count); | ||
91 | |||
92 | for (; count > 0; callbacks++, count--) { | ||
93 | struct afs_vnode *vnode = NULL; | ||
94 | struct inode *inode = NULL; | ||
95 | int valid = 0; | ||
96 | |||
97 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", | ||
98 | callbacks->fid.vid, | ||
99 | callbacks->fid.vnode, | ||
100 | callbacks->fid.unique, | ||
101 | callbacks->version, | ||
102 | callbacks->expiry, | ||
103 | callbacks->type | ||
104 | ); | ||
105 | |||
106 | /* find the inode for this fid */ | ||
107 | spin_lock(&afs_cb_hash_lock); | ||
108 | |||
109 | list_for_each_entry(vnode, | ||
110 | &afs_cb_hash(server, &callbacks->fid), | ||
111 | cb_hash_link) { | ||
112 | if (memcmp(&vnode->fid, &callbacks->fid, | ||
113 | sizeof(struct afs_fid)) != 0) | ||
114 | continue; | ||
115 | |||
116 | /* right vnode, but is it same server? */ | ||
117 | if (vnode->cb_server != server) | ||
118 | break; /* no */ | ||
119 | |||
120 | /* try and nail the inode down */ | ||
121 | inode = igrab(AFS_VNODE_TO_I(vnode)); | ||
122 | break; | ||
123 | } | ||
124 | |||
125 | spin_unlock(&afs_cb_hash_lock); | ||
126 | |||
127 | if (inode) { | ||
128 | /* we've found the record for this vnode */ | ||
129 | spin_lock(&vnode->lock); | ||
130 | if (vnode->cb_server == server) { | ||
131 | /* the callback _is_ on the calling server */ | ||
132 | vnode->cb_server = NULL; | ||
133 | valid = 1; | ||
134 | |||
135 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
136 | vnode->flags |= AFS_VNODE_CHANGED; | ||
137 | |||
138 | spin_lock(&server->cb_lock); | ||
139 | list_del_init(&vnode->cb_link); | ||
140 | spin_unlock(&server->cb_lock); | ||
141 | |||
142 | spin_lock(&afs_cb_hash_lock); | ||
143 | list_del_init(&vnode->cb_hash_link); | ||
144 | spin_unlock(&afs_cb_hash_lock); | ||
145 | } | ||
146 | spin_unlock(&vnode->lock); | ||
147 | |||
148 | if (valid) { | ||
149 | invalidate_remote_inode(inode); | ||
150 | afs_put_server(server); | ||
151 | } | ||
152 | iput(inode); | ||
153 | } | ||
154 | } | ||
155 | |||
156 | _leave(" = 0"); | ||
157 | return 0; | ||
158 | } /* end SRXAFSCM_CallBack() */ | ||
159 | |||
160 | /*****************************************************************************/ | ||
161 | /* | ||
162 | * allow the fileserver to see if the cache manager is still alive | ||
163 | */ | ||
164 | int SRXAFSCM_Probe(struct afs_server *server) | ||
165 | { | ||
166 | _debug("SRXAFSCM_Probe(%p)\n", server); | ||
167 | return 0; | ||
168 | } /* end SRXAFSCM_Probe() */ | ||
diff --git a/fs/afs/cell.c b/fs/afs/cell.c new file mode 100644 index 000000000000..009a9ae88d61 --- /dev/null +++ b/fs/afs/cell.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /* cell.c: AFS cell and server record management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <rxrpc/peer.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include "volume.h" | ||
18 | #include "cell.h" | ||
19 | #include "server.h" | ||
20 | #include "transport.h" | ||
21 | #include "vlclient.h" | ||
22 | #include "kafstimod.h" | ||
23 | #include "super.h" | ||
24 | #include "internal.h" | ||
25 | |||
26 | DECLARE_RWSEM(afs_proc_cells_sem); | ||
27 | LIST_HEAD(afs_proc_cells); | ||
28 | |||
29 | static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells); | ||
30 | static DEFINE_RWLOCK(afs_cells_lock); | ||
31 | static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ | ||
32 | static struct afs_cell *afs_cell_root; | ||
33 | |||
34 | #ifdef AFS_CACHING_SUPPORT | ||
35 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
36 | const void *entry); | ||
37 | static void afs_cell_cache_update(void *source, void *entry); | ||
38 | |||
39 | struct cachefs_index_def afs_cache_cell_index_def = { | ||
40 | .name = "cell_ix", | ||
41 | .data_size = sizeof(struct afs_cache_cell), | ||
42 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
43 | .match = afs_cell_cache_match, | ||
44 | .update = afs_cell_cache_update, | ||
45 | }; | ||
46 | #endif | ||
47 | |||
48 | /*****************************************************************************/ | ||
49 | /* | ||
50 | * create a cell record | ||
51 | * - "name" is the name of the cell | ||
52 | * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format | ||
53 | */ | ||
54 | int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) | ||
55 | { | ||
56 | struct afs_cell *cell; | ||
57 | char *next; | ||
58 | int ret; | ||
59 | |||
60 | _enter("%s", name); | ||
61 | |||
62 | BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ | ||
63 | |||
64 | /* allocate and initialise a cell record */ | ||
65 | cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL); | ||
66 | if (!cell) { | ||
67 | _leave(" = -ENOMEM"); | ||
68 | return -ENOMEM; | ||
69 | } | ||
70 | |||
71 | down_write(&afs_cells_sem); | ||
72 | |||
73 | memset(cell, 0, sizeof(struct afs_cell)); | ||
74 | atomic_set(&cell->usage, 0); | ||
75 | |||
76 | INIT_LIST_HEAD(&cell->link); | ||
77 | |||
78 | rwlock_init(&cell->sv_lock); | ||
79 | INIT_LIST_HEAD(&cell->sv_list); | ||
80 | INIT_LIST_HEAD(&cell->sv_graveyard); | ||
81 | spin_lock_init(&cell->sv_gylock); | ||
82 | |||
83 | init_rwsem(&cell->vl_sem); | ||
84 | INIT_LIST_HEAD(&cell->vl_list); | ||
85 | INIT_LIST_HEAD(&cell->vl_graveyard); | ||
86 | spin_lock_init(&cell->vl_gylock); | ||
87 | |||
88 | strcpy(cell->name,name); | ||
89 | |||
90 | /* fill in the VL server list from the rest of the string */ | ||
91 | ret = -EINVAL; | ||
92 | do { | ||
93 | unsigned a, b, c, d; | ||
94 | |||
95 | next = strchr(vllist, ':'); | ||
96 | if (next) | ||
97 | *next++ = 0; | ||
98 | |||
99 | if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4) | ||
100 | goto badaddr; | ||
101 | |||
102 | if (a > 255 || b > 255 || c > 255 || d > 255) | ||
103 | goto badaddr; | ||
104 | |||
105 | cell->vl_addrs[cell->vl_naddrs++].s_addr = | ||
106 | htonl((a << 24) | (b << 16) | (c << 8) | d); | ||
107 | |||
108 | if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS) | ||
109 | break; | ||
110 | |||
111 | } while(vllist = next, vllist); | ||
112 | |||
113 | /* add a proc dir for this cell */ | ||
114 | ret = afs_proc_cell_setup(cell); | ||
115 | if (ret < 0) | ||
116 | goto error; | ||
117 | |||
118 | #ifdef AFS_CACHING_SUPPORT | ||
119 | /* put it up for caching */ | ||
120 | cachefs_acquire_cookie(afs_cache_netfs.primary_index, | ||
121 | &afs_vlocation_cache_index_def, | ||
122 | cell, | ||
123 | &cell->cache); | ||
124 | #endif | ||
125 | |||
126 | /* add to the cell lists */ | ||
127 | write_lock(&afs_cells_lock); | ||
128 | list_add_tail(&cell->link, &afs_cells); | ||
129 | write_unlock(&afs_cells_lock); | ||
130 | |||
131 | down_write(&afs_proc_cells_sem); | ||
132 | list_add_tail(&cell->proc_link, &afs_proc_cells); | ||
133 | up_write(&afs_proc_cells_sem); | ||
134 | |||
135 | *_cell = cell; | ||
136 | up_write(&afs_cells_sem); | ||
137 | |||
138 | _leave(" = 0 (%p)", cell); | ||
139 | return 0; | ||
140 | |||
141 | badaddr: | ||
142 | printk(KERN_ERR "kAFS: bad VL server IP address: '%s'\n", vllist); | ||
143 | error: | ||
144 | up_write(&afs_cells_sem); | ||
145 | kfree(cell); | ||
146 | _leave(" = %d", ret); | ||
147 | return ret; | ||
148 | } /* end afs_cell_create() */ | ||
149 | |||
150 | /*****************************************************************************/ | ||
151 | /* | ||
152 | * initialise the cell database from module parameters | ||
153 | */ | ||
154 | int afs_cell_init(char *rootcell) | ||
155 | { | ||
156 | struct afs_cell *old_root, *new_root; | ||
157 | char *cp; | ||
158 | int ret; | ||
159 | |||
160 | _enter(""); | ||
161 | |||
162 | if (!rootcell) { | ||
163 | /* module is loaded with no parameters, or built statically. | ||
164 | * - in the future we might initialize cell DB here. | ||
165 | */ | ||
166 | _leave(" = 0 (but no root)"); | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | cp = strchr(rootcell, ':'); | ||
171 | if (!cp) { | ||
172 | printk(KERN_ERR "kAFS: no VL server IP addresses specified\n"); | ||
173 | _leave(" = %d (no colon)", -EINVAL); | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | |||
177 | /* allocate a cell record for the root cell */ | ||
178 | *cp++ = 0; | ||
179 | ret = afs_cell_create(rootcell, cp, &new_root); | ||
180 | if (ret < 0) { | ||
181 | _leave(" = %d", ret); | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | /* as afs_put_cell() takes locks by itself, we have to do | ||
186 | * a little gymnastics to be race-free. | ||
187 | */ | ||
188 | afs_get_cell(new_root); | ||
189 | |||
190 | write_lock(&afs_cells_lock); | ||
191 | while (afs_cell_root) { | ||
192 | old_root = afs_cell_root; | ||
193 | afs_cell_root = NULL; | ||
194 | write_unlock(&afs_cells_lock); | ||
195 | afs_put_cell(old_root); | ||
196 | write_lock(&afs_cells_lock); | ||
197 | } | ||
198 | afs_cell_root = new_root; | ||
199 | write_unlock(&afs_cells_lock); | ||
200 | |||
201 | _leave(" = %d", ret); | ||
202 | return ret; | ||
203 | |||
204 | } /* end afs_cell_init() */ | ||
205 | |||
206 | /*****************************************************************************/ | ||
207 | /* | ||
208 | * lookup a cell record | ||
209 | */ | ||
210 | int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell) | ||
211 | { | ||
212 | struct afs_cell *cell; | ||
213 | int ret; | ||
214 | |||
215 | _enter("\"%*.*s\",", namesz, namesz, name ? name : ""); | ||
216 | |||
217 | *_cell = NULL; | ||
218 | |||
219 | if (name) { | ||
220 | /* if the cell was named, look for it in the cell record list */ | ||
221 | ret = -ENOENT; | ||
222 | cell = NULL; | ||
223 | read_lock(&afs_cells_lock); | ||
224 | |||
225 | list_for_each_entry(cell, &afs_cells, link) { | ||
226 | if (strncmp(cell->name, name, namesz) == 0) { | ||
227 | afs_get_cell(cell); | ||
228 | goto found; | ||
229 | } | ||
230 | } | ||
231 | cell = NULL; | ||
232 | found: | ||
233 | |||
234 | read_unlock(&afs_cells_lock); | ||
235 | |||
236 | if (cell) | ||
237 | ret = 0; | ||
238 | } | ||
239 | else { | ||
240 | read_lock(&afs_cells_lock); | ||
241 | |||
242 | cell = afs_cell_root; | ||
243 | if (!cell) { | ||
244 | /* this should not happen unless user tries to mount | ||
245 | * when root cell is not set. Return an impossibly | ||
246 | * bizzare errno to alert the user. Things like | ||
247 | * ENOENT might be "more appropriate" but they happen | ||
248 | * for other reasons. | ||
249 | */ | ||
250 | ret = -EDESTADDRREQ; | ||
251 | } | ||
252 | else { | ||
253 | afs_get_cell(cell); | ||
254 | ret = 0; | ||
255 | } | ||
256 | |||
257 | read_unlock(&afs_cells_lock); | ||
258 | } | ||
259 | |||
260 | *_cell = cell; | ||
261 | _leave(" = %d (%p)", ret, cell); | ||
262 | return ret; | ||
263 | |||
264 | } /* end afs_cell_lookup() */ | ||
265 | |||
266 | /*****************************************************************************/ | ||
267 | /* | ||
268 | * try and get a cell record | ||
269 | */ | ||
270 | struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell) | ||
271 | { | ||
272 | struct afs_cell *cell; | ||
273 | |||
274 | write_lock(&afs_cells_lock); | ||
275 | |||
276 | cell = *_cell; | ||
277 | if (cell && !list_empty(&cell->link)) | ||
278 | afs_get_cell(cell); | ||
279 | else | ||
280 | cell = NULL; | ||
281 | |||
282 | write_unlock(&afs_cells_lock); | ||
283 | |||
284 | return cell; | ||
285 | } /* end afs_get_cell_maybe() */ | ||
286 | |||
287 | /*****************************************************************************/ | ||
288 | /* | ||
289 | * destroy a cell record | ||
290 | */ | ||
291 | void afs_put_cell(struct afs_cell *cell) | ||
292 | { | ||
293 | if (!cell) | ||
294 | return; | ||
295 | |||
296 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); | ||
297 | |||
298 | /* sanity check */ | ||
299 | BUG_ON(atomic_read(&cell->usage) <= 0); | ||
300 | |||
301 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
302 | * atomic */ | ||
303 | write_lock(&afs_cells_lock); | ||
304 | |||
305 | if (likely(!atomic_dec_and_test(&cell->usage))) { | ||
306 | write_unlock(&afs_cells_lock); | ||
307 | _leave(""); | ||
308 | return; | ||
309 | } | ||
310 | |||
311 | write_unlock(&afs_cells_lock); | ||
312 | |||
313 | BUG_ON(!list_empty(&cell->sv_list)); | ||
314 | BUG_ON(!list_empty(&cell->sv_graveyard)); | ||
315 | BUG_ON(!list_empty(&cell->vl_list)); | ||
316 | BUG_ON(!list_empty(&cell->vl_graveyard)); | ||
317 | |||
318 | _leave(" [unused]"); | ||
319 | } /* end afs_put_cell() */ | ||
320 | |||
321 | /*****************************************************************************/ | ||
322 | /* | ||
323 | * destroy a cell record | ||
324 | */ | ||
325 | static void afs_cell_destroy(struct afs_cell *cell) | ||
326 | { | ||
327 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); | ||
328 | |||
329 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
330 | * atomic */ | ||
331 | write_lock(&afs_cells_lock); | ||
332 | |||
333 | /* sanity check */ | ||
334 | BUG_ON(atomic_read(&cell->usage) != 0); | ||
335 | |||
336 | list_del_init(&cell->link); | ||
337 | |||
338 | write_unlock(&afs_cells_lock); | ||
339 | |||
340 | down_write(&afs_cells_sem); | ||
341 | |||
342 | afs_proc_cell_remove(cell); | ||
343 | |||
344 | down_write(&afs_proc_cells_sem); | ||
345 | list_del_init(&cell->proc_link); | ||
346 | up_write(&afs_proc_cells_sem); | ||
347 | |||
348 | #ifdef AFS_CACHING_SUPPORT | ||
349 | cachefs_relinquish_cookie(cell->cache, 0); | ||
350 | #endif | ||
351 | |||
352 | up_write(&afs_cells_sem); | ||
353 | |||
354 | BUG_ON(!list_empty(&cell->sv_list)); | ||
355 | BUG_ON(!list_empty(&cell->sv_graveyard)); | ||
356 | BUG_ON(!list_empty(&cell->vl_list)); | ||
357 | BUG_ON(!list_empty(&cell->vl_graveyard)); | ||
358 | |||
359 | /* finish cleaning up the cell */ | ||
360 | kfree(cell); | ||
361 | |||
362 | _leave(" [destroyed]"); | ||
363 | } /* end afs_cell_destroy() */ | ||
364 | |||
365 | /*****************************************************************************/ | ||
366 | /* | ||
367 | * lookup the server record corresponding to an Rx RPC peer | ||
368 | */ | ||
369 | int afs_server_find_by_peer(const struct rxrpc_peer *peer, | ||
370 | struct afs_server **_server) | ||
371 | { | ||
372 | struct afs_server *server; | ||
373 | struct afs_cell *cell; | ||
374 | |||
375 | _enter("%p{a=%08x},", peer, ntohl(peer->addr.s_addr)); | ||
376 | |||
377 | /* search the cell list */ | ||
378 | read_lock(&afs_cells_lock); | ||
379 | |||
380 | list_for_each_entry(cell, &afs_cells, link) { | ||
381 | |||
382 | _debug("? cell %s",cell->name); | ||
383 | |||
384 | write_lock(&cell->sv_lock); | ||
385 | |||
386 | /* check the active list */ | ||
387 | list_for_each_entry(server, &cell->sv_list, link) { | ||
388 | _debug("?? server %08x", ntohl(server->addr.s_addr)); | ||
389 | |||
390 | if (memcmp(&server->addr, &peer->addr, | ||
391 | sizeof(struct in_addr)) == 0) | ||
392 | goto found_server; | ||
393 | } | ||
394 | |||
395 | /* check the inactive list */ | ||
396 | spin_lock(&cell->sv_gylock); | ||
397 | list_for_each_entry(server, &cell->sv_graveyard, link) { | ||
398 | _debug("?? dead server %08x", | ||
399 | ntohl(server->addr.s_addr)); | ||
400 | |||
401 | if (memcmp(&server->addr, &peer->addr, | ||
402 | sizeof(struct in_addr)) == 0) | ||
403 | goto found_dead_server; | ||
404 | } | ||
405 | spin_unlock(&cell->sv_gylock); | ||
406 | |||
407 | write_unlock(&cell->sv_lock); | ||
408 | } | ||
409 | read_unlock(&afs_cells_lock); | ||
410 | |||
411 | _leave(" = -ENOENT"); | ||
412 | return -ENOENT; | ||
413 | |||
414 | /* we found it in the graveyard - resurrect it */ | ||
415 | found_dead_server: | ||
416 | list_del(&server->link); | ||
417 | list_add_tail(&server->link, &cell->sv_list); | ||
418 | afs_get_server(server); | ||
419 | afs_kafstimod_del_timer(&server->timeout); | ||
420 | spin_unlock(&cell->sv_gylock); | ||
421 | goto success; | ||
422 | |||
423 | /* we found it - increment its ref count and return it */ | ||
424 | found_server: | ||
425 | afs_get_server(server); | ||
426 | |||
427 | success: | ||
428 | write_unlock(&cell->sv_lock); | ||
429 | read_unlock(&afs_cells_lock); | ||
430 | |||
431 | *_server = server; | ||
432 | _leave(" = 0 (s=%p c=%p)", server, cell); | ||
433 | return 0; | ||
434 | |||
435 | } /* end afs_server_find_by_peer() */ | ||
436 | |||
437 | /*****************************************************************************/ | ||
438 | /* | ||
439 | * purge in-memory cell database on module unload or afs_init() failure | ||
440 | * - the timeout daemon is stopped before calling this | ||
441 | */ | ||
442 | void afs_cell_purge(void) | ||
443 | { | ||
444 | struct afs_vlocation *vlocation; | ||
445 | struct afs_cell *cell; | ||
446 | |||
447 | _enter(""); | ||
448 | |||
449 | afs_put_cell(afs_cell_root); | ||
450 | |||
451 | while (!list_empty(&afs_cells)) { | ||
452 | cell = NULL; | ||
453 | |||
454 | /* remove the next cell from the front of the list */ | ||
455 | write_lock(&afs_cells_lock); | ||
456 | |||
457 | if (!list_empty(&afs_cells)) { | ||
458 | cell = list_entry(afs_cells.next, | ||
459 | struct afs_cell, link); | ||
460 | list_del_init(&cell->link); | ||
461 | } | ||
462 | |||
463 | write_unlock(&afs_cells_lock); | ||
464 | |||
465 | if (cell) { | ||
466 | _debug("PURGING CELL %s (%d)", | ||
467 | cell->name, atomic_read(&cell->usage)); | ||
468 | |||
469 | BUG_ON(!list_empty(&cell->sv_list)); | ||
470 | BUG_ON(!list_empty(&cell->vl_list)); | ||
471 | |||
472 | /* purge the cell's VL graveyard list */ | ||
473 | _debug(" - clearing VL graveyard"); | ||
474 | |||
475 | spin_lock(&cell->vl_gylock); | ||
476 | |||
477 | while (!list_empty(&cell->vl_graveyard)) { | ||
478 | vlocation = list_entry(cell->vl_graveyard.next, | ||
479 | struct afs_vlocation, | ||
480 | link); | ||
481 | list_del_init(&vlocation->link); | ||
482 | |||
483 | afs_kafstimod_del_timer(&vlocation->timeout); | ||
484 | |||
485 | spin_unlock(&cell->vl_gylock); | ||
486 | |||
487 | afs_vlocation_do_timeout(vlocation); | ||
488 | /* TODO: race if move to use krxtimod instead | ||
489 | * of kafstimod */ | ||
490 | |||
491 | spin_lock(&cell->vl_gylock); | ||
492 | } | ||
493 | |||
494 | spin_unlock(&cell->vl_gylock); | ||
495 | |||
496 | /* purge the cell's server graveyard list */ | ||
497 | _debug(" - clearing server graveyard"); | ||
498 | |||
499 | spin_lock(&cell->sv_gylock); | ||
500 | |||
501 | while (!list_empty(&cell->sv_graveyard)) { | ||
502 | struct afs_server *server; | ||
503 | |||
504 | server = list_entry(cell->sv_graveyard.next, | ||
505 | struct afs_server, link); | ||
506 | list_del_init(&server->link); | ||
507 | |||
508 | afs_kafstimod_del_timer(&server->timeout); | ||
509 | |||
510 | spin_unlock(&cell->sv_gylock); | ||
511 | |||
512 | afs_server_do_timeout(server); | ||
513 | |||
514 | spin_lock(&cell->sv_gylock); | ||
515 | } | ||
516 | |||
517 | spin_unlock(&cell->sv_gylock); | ||
518 | |||
519 | /* now the cell should be left with no references */ | ||
520 | afs_cell_destroy(cell); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | _leave(""); | ||
525 | } /* end afs_cell_purge() */ | ||
526 | |||
527 | /*****************************************************************************/ | ||
528 | /* | ||
529 | * match a cell record obtained from the cache | ||
530 | */ | ||
531 | #ifdef AFS_CACHING_SUPPORT | ||
532 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
533 | const void *entry) | ||
534 | { | ||
535 | const struct afs_cache_cell *ccell = entry; | ||
536 | struct afs_cell *cell = target; | ||
537 | |||
538 | _enter("{%s},{%s}", ccell->name, cell->name); | ||
539 | |||
540 | if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) { | ||
541 | _leave(" = SUCCESS"); | ||
542 | return CACHEFS_MATCH_SUCCESS; | ||
543 | } | ||
544 | |||
545 | _leave(" = FAILED"); | ||
546 | return CACHEFS_MATCH_FAILED; | ||
547 | } /* end afs_cell_cache_match() */ | ||
548 | #endif | ||
549 | |||
550 | /*****************************************************************************/ | ||
551 | /* | ||
552 | * update a cell record in the cache | ||
553 | */ | ||
554 | #ifdef AFS_CACHING_SUPPORT | ||
555 | static void afs_cell_cache_update(void *source, void *entry) | ||
556 | { | ||
557 | struct afs_cache_cell *ccell = entry; | ||
558 | struct afs_cell *cell = source; | ||
559 | |||
560 | _enter("%p,%p", source, entry); | ||
561 | |||
562 | strncpy(ccell->name, cell->name, sizeof(ccell->name)); | ||
563 | |||
564 | memcpy(ccell->vl_servers, | ||
565 | cell->vl_addrs, | ||
566 | min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs))); | ||
567 | |||
568 | } /* end afs_cell_cache_update() */ | ||
569 | #endif | ||
diff --git a/fs/afs/cell.h b/fs/afs/cell.h new file mode 100644 index 000000000000..48349108fb00 --- /dev/null +++ b/fs/afs/cell.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* cell.h: AFS cell record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_CELL_H | ||
13 | #define _LINUX_AFS_CELL_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "cache.h" | ||
17 | |||
18 | #define AFS_CELL_MAX_ADDRS 15 | ||
19 | |||
20 | extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */ | ||
21 | |||
22 | /*****************************************************************************/ | ||
23 | /* | ||
24 | * entry in the cached cell catalogue | ||
25 | */ | ||
26 | struct afs_cache_cell | ||
27 | { | ||
28 | char name[64]; /* cell name (padded with NULs) */ | ||
29 | struct in_addr vl_servers[15]; /* cached cell VL servers */ | ||
30 | }; | ||
31 | |||
32 | /*****************************************************************************/ | ||
33 | /* | ||
34 | * AFS cell record | ||
35 | */ | ||
36 | struct afs_cell | ||
37 | { | ||
38 | atomic_t usage; | ||
39 | struct list_head link; /* main cell list link */ | ||
40 | struct list_head proc_link; /* /proc cell list link */ | ||
41 | struct proc_dir_entry *proc_dir; /* /proc dir for this cell */ | ||
42 | #ifdef AFS_CACHING_SUPPORT | ||
43 | struct cachefs_cookie *cache; /* caching cookie */ | ||
44 | #endif | ||
45 | |||
46 | /* server record management */ | ||
47 | rwlock_t sv_lock; /* active server list lock */ | ||
48 | struct list_head sv_list; /* active server list */ | ||
49 | struct list_head sv_graveyard; /* inactive server list */ | ||
50 | spinlock_t sv_gylock; /* inactive server list lock */ | ||
51 | |||
52 | /* volume location record management */ | ||
53 | struct rw_semaphore vl_sem; /* volume management serialisation semaphore */ | ||
54 | struct list_head vl_list; /* cell's active VL record list */ | ||
55 | struct list_head vl_graveyard; /* cell's inactive VL record list */ | ||
56 | spinlock_t vl_gylock; /* graveyard lock */ | ||
57 | unsigned short vl_naddrs; /* number of VL servers in addr list */ | ||
58 | unsigned short vl_curr_svix; /* current server index */ | ||
59 | struct in_addr vl_addrs[AFS_CELL_MAX_ADDRS]; /* cell VL server addresses */ | ||
60 | |||
61 | char name[0]; /* cell name - must go last */ | ||
62 | }; | ||
63 | |||
64 | extern int afs_cell_init(char *rootcell); | ||
65 | |||
66 | extern int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell); | ||
67 | |||
68 | extern int afs_cell_lookup(const char *name, unsigned nmsize, struct afs_cell **_cell); | ||
69 | |||
70 | #define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0) | ||
71 | |||
72 | extern struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell); | ||
73 | |||
74 | extern void afs_put_cell(struct afs_cell *cell); | ||
75 | |||
76 | extern void afs_cell_purge(void); | ||
77 | |||
78 | #endif /* _LINUX_AFS_CELL_H */ | ||
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c new file mode 100644 index 000000000000..0a57fd7c726f --- /dev/null +++ b/fs/afs/cmservice.c | |||
@@ -0,0 +1,652 @@ | |||
1 | /* cmservice.c: AFS Cache Manager Service | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include "server.h" | ||
17 | #include "cell.h" | ||
18 | #include "transport.h" | ||
19 | #include <rxrpc/rxrpc.h> | ||
20 | #include <rxrpc/transport.h> | ||
21 | #include <rxrpc/connection.h> | ||
22 | #include <rxrpc/call.h> | ||
23 | #include "cmservice.h" | ||
24 | #include "internal.h" | ||
25 | |||
26 | static unsigned afscm_usage; /* AFS cache manager usage count */ | ||
27 | static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */ | ||
28 | |||
29 | static int afscm_new_call(struct rxrpc_call *call); | ||
30 | static void afscm_attention(struct rxrpc_call *call); | ||
31 | static void afscm_error(struct rxrpc_call *call); | ||
32 | static void afscm_aemap(struct rxrpc_call *call); | ||
33 | |||
34 | static void _SRXAFSCM_CallBack(struct rxrpc_call *call); | ||
35 | static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call); | ||
36 | static void _SRXAFSCM_Probe(struct rxrpc_call *call); | ||
37 | |||
38 | typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call); | ||
39 | |||
40 | static const struct rxrpc_operation AFSCM_ops[] = { | ||
41 | { | ||
42 | .id = 204, | ||
43 | .asize = RXRPC_APP_MARK_EOF, | ||
44 | .name = "CallBack", | ||
45 | .user = _SRXAFSCM_CallBack, | ||
46 | }, | ||
47 | { | ||
48 | .id = 205, | ||
49 | .asize = RXRPC_APP_MARK_EOF, | ||
50 | .name = "InitCallBackState", | ||
51 | .user = _SRXAFSCM_InitCallBackState, | ||
52 | }, | ||
53 | { | ||
54 | .id = 206, | ||
55 | .asize = RXRPC_APP_MARK_EOF, | ||
56 | .name = "Probe", | ||
57 | .user = _SRXAFSCM_Probe, | ||
58 | }, | ||
59 | #if 0 | ||
60 | { | ||
61 | .id = 207, | ||
62 | .asize = RXRPC_APP_MARK_EOF, | ||
63 | .name = "GetLock", | ||
64 | .user = _SRXAFSCM_GetLock, | ||
65 | }, | ||
66 | { | ||
67 | .id = 208, | ||
68 | .asize = RXRPC_APP_MARK_EOF, | ||
69 | .name = "GetCE", | ||
70 | .user = _SRXAFSCM_GetCE, | ||
71 | }, | ||
72 | { | ||
73 | .id = 209, | ||
74 | .asize = RXRPC_APP_MARK_EOF, | ||
75 | .name = "GetXStatsVersion", | ||
76 | .user = _SRXAFSCM_GetXStatsVersion, | ||
77 | }, | ||
78 | { | ||
79 | .id = 210, | ||
80 | .asize = RXRPC_APP_MARK_EOF, | ||
81 | .name = "GetXStats", | ||
82 | .user = _SRXAFSCM_GetXStats, | ||
83 | } | ||
84 | #endif | ||
85 | }; | ||
86 | |||
87 | static struct rxrpc_service AFSCM_service = { | ||
88 | .name = "AFS/CM", | ||
89 | .owner = THIS_MODULE, | ||
90 | .link = LIST_HEAD_INIT(AFSCM_service.link), | ||
91 | .new_call = afscm_new_call, | ||
92 | .service_id = 1, | ||
93 | .attn_func = afscm_attention, | ||
94 | .error_func = afscm_error, | ||
95 | .aemap_func = afscm_aemap, | ||
96 | .ops_begin = &AFSCM_ops[0], | ||
97 | .ops_end = &AFSCM_ops[sizeof(AFSCM_ops) / sizeof(AFSCM_ops[0])], | ||
98 | }; | ||
99 | |||
100 | static DECLARE_COMPLETION(kafscmd_alive); | ||
101 | static DECLARE_COMPLETION(kafscmd_dead); | ||
102 | static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq); | ||
103 | static LIST_HEAD(kafscmd_attention_list); | ||
104 | static LIST_HEAD(afscm_calls); | ||
105 | static DEFINE_SPINLOCK(afscm_calls_lock); | ||
106 | static DEFINE_SPINLOCK(kafscmd_attention_lock); | ||
107 | static int kafscmd_die; | ||
108 | |||
109 | /*****************************************************************************/ | ||
110 | /* | ||
111 | * AFS Cache Manager kernel thread | ||
112 | */ | ||
113 | static int kafscmd(void *arg) | ||
114 | { | ||
115 | DECLARE_WAITQUEUE(myself, current); | ||
116 | |||
117 | struct rxrpc_call *call; | ||
118 | _SRXAFSCM_xxxx_t func; | ||
119 | int die; | ||
120 | |||
121 | printk("kAFS: Started kafscmd %d\n", current->pid); | ||
122 | |||
123 | daemonize("kafscmd"); | ||
124 | |||
125 | complete(&kafscmd_alive); | ||
126 | |||
127 | /* loop around looking for things to attend to */ | ||
128 | do { | ||
129 | if (list_empty(&kafscmd_attention_list)) { | ||
130 | set_current_state(TASK_INTERRUPTIBLE); | ||
131 | add_wait_queue(&kafscmd_sleepq, &myself); | ||
132 | |||
133 | for (;;) { | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | ||
135 | if (!list_empty(&kafscmd_attention_list) || | ||
136 | signal_pending(current) || | ||
137 | kafscmd_die) | ||
138 | break; | ||
139 | |||
140 | schedule(); | ||
141 | } | ||
142 | |||
143 | remove_wait_queue(&kafscmd_sleepq, &myself); | ||
144 | set_current_state(TASK_RUNNING); | ||
145 | } | ||
146 | |||
147 | die = kafscmd_die; | ||
148 | |||
149 | /* dequeue the next call requiring attention */ | ||
150 | call = NULL; | ||
151 | spin_lock(&kafscmd_attention_lock); | ||
152 | |||
153 | if (!list_empty(&kafscmd_attention_list)) { | ||
154 | call = list_entry(kafscmd_attention_list.next, | ||
155 | struct rxrpc_call, | ||
156 | app_attn_link); | ||
157 | list_del_init(&call->app_attn_link); | ||
158 | die = 0; | ||
159 | } | ||
160 | |||
161 | spin_unlock(&kafscmd_attention_lock); | ||
162 | |||
163 | if (call) { | ||
164 | /* act upon it */ | ||
165 | _debug("@@@ Begin Attend Call %p", call); | ||
166 | |||
167 | func = call->app_user; | ||
168 | if (func) | ||
169 | func(call); | ||
170 | |||
171 | rxrpc_put_call(call); | ||
172 | |||
173 | _debug("@@@ End Attend Call %p", call); | ||
174 | } | ||
175 | |||
176 | } while(!die); | ||
177 | |||
178 | /* and that's all */ | ||
179 | complete_and_exit(&kafscmd_dead, 0); | ||
180 | |||
181 | } /* end kafscmd() */ | ||
182 | |||
183 | /*****************************************************************************/ | ||
184 | /* | ||
185 | * handle a call coming in to the cache manager | ||
186 | * - if I want to keep the call, I must increment its usage count | ||
187 | * - the return value will be negated and passed back in an abort packet if | ||
188 | * non-zero | ||
189 | * - serialised by virtue of there only being one krxiod | ||
190 | */ | ||
191 | static int afscm_new_call(struct rxrpc_call *call) | ||
192 | { | ||
193 | _enter("%p{cid=%u u=%d}", | ||
194 | call, ntohl(call->call_id), atomic_read(&call->usage)); | ||
195 | |||
196 | rxrpc_get_call(call); | ||
197 | |||
198 | /* add to my current call list */ | ||
199 | spin_lock(&afscm_calls_lock); | ||
200 | list_add(&call->app_link,&afscm_calls); | ||
201 | spin_unlock(&afscm_calls_lock); | ||
202 | |||
203 | _leave(" = 0"); | ||
204 | return 0; | ||
205 | |||
206 | } /* end afscm_new_call() */ | ||
207 | |||
208 | /*****************************************************************************/ | ||
209 | /* | ||
210 | * queue on the kafscmd queue for attention | ||
211 | */ | ||
212 | static void afscm_attention(struct rxrpc_call *call) | ||
213 | { | ||
214 | _enter("%p{cid=%u u=%d}", | ||
215 | call, ntohl(call->call_id), atomic_read(&call->usage)); | ||
216 | |||
217 | spin_lock(&kafscmd_attention_lock); | ||
218 | |||
219 | if (list_empty(&call->app_attn_link)) { | ||
220 | list_add_tail(&call->app_attn_link, &kafscmd_attention_list); | ||
221 | rxrpc_get_call(call); | ||
222 | } | ||
223 | |||
224 | spin_unlock(&kafscmd_attention_lock); | ||
225 | |||
226 | wake_up(&kafscmd_sleepq); | ||
227 | |||
228 | _leave(" {u=%d}", atomic_read(&call->usage)); | ||
229 | } /* end afscm_attention() */ | ||
230 | |||
231 | /*****************************************************************************/ | ||
232 | /* | ||
233 | * handle my call being aborted | ||
234 | * - clean up, dequeue and put my ref to the call | ||
235 | */ | ||
236 | static void afscm_error(struct rxrpc_call *call) | ||
237 | { | ||
238 | int removed; | ||
239 | |||
240 | _enter("%p{est=%s ac=%u er=%d}", | ||
241 | call, | ||
242 | rxrpc_call_error_states[call->app_err_state], | ||
243 | call->app_abort_code, | ||
244 | call->app_errno); | ||
245 | |||
246 | spin_lock(&kafscmd_attention_lock); | ||
247 | |||
248 | if (list_empty(&call->app_attn_link)) { | ||
249 | list_add_tail(&call->app_attn_link, &kafscmd_attention_list); | ||
250 | rxrpc_get_call(call); | ||
251 | } | ||
252 | |||
253 | spin_unlock(&kafscmd_attention_lock); | ||
254 | |||
255 | removed = 0; | ||
256 | spin_lock(&afscm_calls_lock); | ||
257 | if (!list_empty(&call->app_link)) { | ||
258 | list_del_init(&call->app_link); | ||
259 | removed = 1; | ||
260 | } | ||
261 | spin_unlock(&afscm_calls_lock); | ||
262 | |||
263 | if (removed) | ||
264 | rxrpc_put_call(call); | ||
265 | |||
266 | wake_up(&kafscmd_sleepq); | ||
267 | |||
268 | _leave(""); | ||
269 | } /* end afscm_error() */ | ||
270 | |||
271 | /*****************************************************************************/ | ||
272 | /* | ||
273 | * map afs abort codes to/from Linux error codes | ||
274 | * - called with call->lock held | ||
275 | */ | ||
276 | static void afscm_aemap(struct rxrpc_call *call) | ||
277 | { | ||
278 | switch (call->app_err_state) { | ||
279 | case RXRPC_ESTATE_LOCAL_ABORT: | ||
280 | call->app_abort_code = -call->app_errno; | ||
281 | break; | ||
282 | case RXRPC_ESTATE_PEER_ABORT: | ||
283 | call->app_errno = -ECONNABORTED; | ||
284 | break; | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | } /* end afscm_aemap() */ | ||
289 | |||
290 | /*****************************************************************************/ | ||
291 | /* | ||
292 | * start the cache manager service if not already started | ||
293 | */ | ||
294 | int afscm_start(void) | ||
295 | { | ||
296 | int ret; | ||
297 | |||
298 | down_write(&afscm_sem); | ||
299 | if (!afscm_usage) { | ||
300 | ret = kernel_thread(kafscmd, NULL, 0); | ||
301 | if (ret < 0) | ||
302 | goto out; | ||
303 | |||
304 | wait_for_completion(&kafscmd_alive); | ||
305 | |||
306 | ret = rxrpc_add_service(afs_transport, &AFSCM_service); | ||
307 | if (ret < 0) | ||
308 | goto kill; | ||
309 | |||
310 | afs_kafstimod_add_timer(&afs_mntpt_expiry_timer, | ||
311 | afs_mntpt_expiry_timeout * HZ); | ||
312 | } | ||
313 | |||
314 | afscm_usage++; | ||
315 | up_write(&afscm_sem); | ||
316 | |||
317 | return 0; | ||
318 | |||
319 | kill: | ||
320 | kafscmd_die = 1; | ||
321 | wake_up(&kafscmd_sleepq); | ||
322 | wait_for_completion(&kafscmd_dead); | ||
323 | |||
324 | out: | ||
325 | up_write(&afscm_sem); | ||
326 | return ret; | ||
327 | |||
328 | } /* end afscm_start() */ | ||
329 | |||
330 | /*****************************************************************************/ | ||
331 | /* | ||
332 | * stop the cache manager service | ||
333 | */ | ||
334 | void afscm_stop(void) | ||
335 | { | ||
336 | struct rxrpc_call *call; | ||
337 | |||
338 | down_write(&afscm_sem); | ||
339 | |||
340 | BUG_ON(afscm_usage == 0); | ||
341 | afscm_usage--; | ||
342 | |||
343 | if (afscm_usage == 0) { | ||
344 | /* don't want more incoming calls */ | ||
345 | rxrpc_del_service(afs_transport, &AFSCM_service); | ||
346 | |||
347 | /* abort any calls I've still got open (the afscm_error() will | ||
348 | * dequeue them) */ | ||
349 | spin_lock(&afscm_calls_lock); | ||
350 | while (!list_empty(&afscm_calls)) { | ||
351 | call = list_entry(afscm_calls.next, | ||
352 | struct rxrpc_call, | ||
353 | app_link); | ||
354 | |||
355 | list_del_init(&call->app_link); | ||
356 | rxrpc_get_call(call); | ||
357 | spin_unlock(&afscm_calls_lock); | ||
358 | |||
359 | rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and | ||
360 | * put */ | ||
361 | |||
362 | _debug("nuking active call %08x.%d", | ||
363 | ntohl(call->conn->conn_id), | ||
364 | ntohl(call->call_id)); | ||
365 | rxrpc_put_call(call); | ||
366 | rxrpc_put_call(call); | ||
367 | |||
368 | spin_lock(&afscm_calls_lock); | ||
369 | } | ||
370 | spin_unlock(&afscm_calls_lock); | ||
371 | |||
372 | /* get rid of my daemon */ | ||
373 | kafscmd_die = 1; | ||
374 | wake_up(&kafscmd_sleepq); | ||
375 | wait_for_completion(&kafscmd_dead); | ||
376 | |||
377 | /* dispose of any calls waiting for attention */ | ||
378 | spin_lock(&kafscmd_attention_lock); | ||
379 | while (!list_empty(&kafscmd_attention_list)) { | ||
380 | call = list_entry(kafscmd_attention_list.next, | ||
381 | struct rxrpc_call, | ||
382 | app_attn_link); | ||
383 | |||
384 | list_del_init(&call->app_attn_link); | ||
385 | spin_unlock(&kafscmd_attention_lock); | ||
386 | |||
387 | rxrpc_put_call(call); | ||
388 | |||
389 | spin_lock(&kafscmd_attention_lock); | ||
390 | } | ||
391 | spin_unlock(&kafscmd_attention_lock); | ||
392 | |||
393 | afs_kafstimod_del_timer(&afs_mntpt_expiry_timer); | ||
394 | } | ||
395 | |||
396 | up_write(&afscm_sem); | ||
397 | |||
398 | } /* end afscm_stop() */ | ||
399 | |||
400 | /*****************************************************************************/ | ||
401 | /* | ||
402 | * handle the fileserver breaking a set of callbacks | ||
403 | */ | ||
404 | static void _SRXAFSCM_CallBack(struct rxrpc_call *call) | ||
405 | { | ||
406 | struct afs_server *server; | ||
407 | size_t count, qty, tmp; | ||
408 | int ret = 0, removed; | ||
409 | |||
410 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | ||
411 | |||
412 | server = afs_server_get_from_peer(call->conn->peer); | ||
413 | |||
414 | switch (call->app_call_state) { | ||
415 | /* we've received the last packet | ||
416 | * - drain all the data from the call and send the reply | ||
417 | */ | ||
418 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
419 | ret = -EBADMSG; | ||
420 | qty = call->app_ready_qty; | ||
421 | if (qty < 8 || qty > 50 * (6 * 4) + 8) | ||
422 | break; | ||
423 | |||
424 | { | ||
425 | struct afs_callback *cb, *pcb; | ||
426 | int loop; | ||
427 | __be32 *fp, *bp; | ||
428 | |||
429 | fp = rxrpc_call_alloc_scratch(call, qty); | ||
430 | |||
431 | /* drag the entire argument block out to the scratch | ||
432 | * space */ | ||
433 | ret = rxrpc_call_read_data(call, fp, qty, 0); | ||
434 | if (ret < 0) | ||
435 | break; | ||
436 | |||
437 | /* and unmarshall the parameter block */ | ||
438 | ret = -EBADMSG; | ||
439 | count = ntohl(*fp++); | ||
440 | if (count>AFSCBMAX || | ||
441 | (count * (3 * 4) + 8 != qty && | ||
442 | count * (6 * 4) + 8 != qty)) | ||
443 | break; | ||
444 | |||
445 | bp = fp + count*3; | ||
446 | tmp = ntohl(*bp++); | ||
447 | if (tmp > 0 && tmp != count) | ||
448 | break; | ||
449 | if (tmp == 0) | ||
450 | bp = NULL; | ||
451 | |||
452 | pcb = cb = rxrpc_call_alloc_scratch_s( | ||
453 | call, struct afs_callback); | ||
454 | |||
455 | for (loop = count - 1; loop >= 0; loop--) { | ||
456 | pcb->fid.vid = ntohl(*fp++); | ||
457 | pcb->fid.vnode = ntohl(*fp++); | ||
458 | pcb->fid.unique = ntohl(*fp++); | ||
459 | if (bp) { | ||
460 | pcb->version = ntohl(*bp++); | ||
461 | pcb->expiry = ntohl(*bp++); | ||
462 | pcb->type = ntohl(*bp++); | ||
463 | } | ||
464 | else { | ||
465 | pcb->version = 0; | ||
466 | pcb->expiry = 0; | ||
467 | pcb->type = AFSCM_CB_UNTYPED; | ||
468 | } | ||
469 | pcb++; | ||
470 | } | ||
471 | |||
472 | /* invoke the actual service routine */ | ||
473 | ret = SRXAFSCM_CallBack(server, count, cb); | ||
474 | if (ret < 0) | ||
475 | break; | ||
476 | } | ||
477 | |||
478 | /* send the reply */ | ||
479 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | ||
480 | GFP_KERNEL, 0, &count); | ||
481 | if (ret < 0) | ||
482 | break; | ||
483 | break; | ||
484 | |||
485 | /* operation complete */ | ||
486 | case RXRPC_CSTATE_COMPLETE: | ||
487 | call->app_user = NULL; | ||
488 | removed = 0; | ||
489 | spin_lock(&afscm_calls_lock); | ||
490 | if (!list_empty(&call->app_link)) { | ||
491 | list_del_init(&call->app_link); | ||
492 | removed = 1; | ||
493 | } | ||
494 | spin_unlock(&afscm_calls_lock); | ||
495 | |||
496 | if (removed) | ||
497 | rxrpc_put_call(call); | ||
498 | break; | ||
499 | |||
500 | /* operation terminated on error */ | ||
501 | case RXRPC_CSTATE_ERROR: | ||
502 | call->app_user = NULL; | ||
503 | break; | ||
504 | |||
505 | default: | ||
506 | break; | ||
507 | } | ||
508 | |||
509 | if (ret < 0) | ||
510 | rxrpc_call_abort(call, ret); | ||
511 | |||
512 | afs_put_server(server); | ||
513 | |||
514 | _leave(" = %d", ret); | ||
515 | |||
516 | } /* end _SRXAFSCM_CallBack() */ | ||
517 | |||
518 | /*****************************************************************************/ | ||
519 | /* | ||
520 | * handle the fileserver asking us to initialise our callback state | ||
521 | */ | ||
522 | static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call) | ||
523 | { | ||
524 | struct afs_server *server; | ||
525 | size_t count; | ||
526 | int ret = 0, removed; | ||
527 | |||
528 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | ||
529 | |||
530 | server = afs_server_get_from_peer(call->conn->peer); | ||
531 | |||
532 | switch (call->app_call_state) { | ||
533 | /* we've received the last packet - drain all the data from the | ||
534 | * call */ | ||
535 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
536 | /* shouldn't be any args */ | ||
537 | ret = -EBADMSG; | ||
538 | break; | ||
539 | |||
540 | /* send the reply when asked for it */ | ||
541 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
542 | /* invoke the actual service routine */ | ||
543 | ret = SRXAFSCM_InitCallBackState(server); | ||
544 | if (ret < 0) | ||
545 | break; | ||
546 | |||
547 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | ||
548 | GFP_KERNEL, 0, &count); | ||
549 | if (ret < 0) | ||
550 | break; | ||
551 | break; | ||
552 | |||
553 | /* operation complete */ | ||
554 | case RXRPC_CSTATE_COMPLETE: | ||
555 | call->app_user = NULL; | ||
556 | removed = 0; | ||
557 | spin_lock(&afscm_calls_lock); | ||
558 | if (!list_empty(&call->app_link)) { | ||
559 | list_del_init(&call->app_link); | ||
560 | removed = 1; | ||
561 | } | ||
562 | spin_unlock(&afscm_calls_lock); | ||
563 | |||
564 | if (removed) | ||
565 | rxrpc_put_call(call); | ||
566 | break; | ||
567 | |||
568 | /* operation terminated on error */ | ||
569 | case RXRPC_CSTATE_ERROR: | ||
570 | call->app_user = NULL; | ||
571 | break; | ||
572 | |||
573 | default: | ||
574 | break; | ||
575 | } | ||
576 | |||
577 | if (ret < 0) | ||
578 | rxrpc_call_abort(call, ret); | ||
579 | |||
580 | afs_put_server(server); | ||
581 | |||
582 | _leave(" = %d", ret); | ||
583 | |||
584 | } /* end _SRXAFSCM_InitCallBackState() */ | ||
585 | |||
586 | /*****************************************************************************/ | ||
587 | /* | ||
588 | * handle a probe from a fileserver | ||
589 | */ | ||
590 | static void _SRXAFSCM_Probe(struct rxrpc_call *call) | ||
591 | { | ||
592 | struct afs_server *server; | ||
593 | size_t count; | ||
594 | int ret = 0, removed; | ||
595 | |||
596 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | ||
597 | |||
598 | server = afs_server_get_from_peer(call->conn->peer); | ||
599 | |||
600 | switch (call->app_call_state) { | ||
601 | /* we've received the last packet - drain all the data from the | ||
602 | * call */ | ||
603 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
604 | /* shouldn't be any args */ | ||
605 | ret = -EBADMSG; | ||
606 | break; | ||
607 | |||
608 | /* send the reply when asked for it */ | ||
609 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
610 | /* invoke the actual service routine */ | ||
611 | ret = SRXAFSCM_Probe(server); | ||
612 | if (ret < 0) | ||
613 | break; | ||
614 | |||
615 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | ||
616 | GFP_KERNEL, 0, &count); | ||
617 | if (ret < 0) | ||
618 | break; | ||
619 | break; | ||
620 | |||
621 | /* operation complete */ | ||
622 | case RXRPC_CSTATE_COMPLETE: | ||
623 | call->app_user = NULL; | ||
624 | removed = 0; | ||
625 | spin_lock(&afscm_calls_lock); | ||
626 | if (!list_empty(&call->app_link)) { | ||
627 | list_del_init(&call->app_link); | ||
628 | removed = 1; | ||
629 | } | ||
630 | spin_unlock(&afscm_calls_lock); | ||
631 | |||
632 | if (removed) | ||
633 | rxrpc_put_call(call); | ||
634 | break; | ||
635 | |||
636 | /* operation terminated on error */ | ||
637 | case RXRPC_CSTATE_ERROR: | ||
638 | call->app_user = NULL; | ||
639 | break; | ||
640 | |||
641 | default: | ||
642 | break; | ||
643 | } | ||
644 | |||
645 | if (ret < 0) | ||
646 | rxrpc_call_abort(call, ret); | ||
647 | |||
648 | afs_put_server(server); | ||
649 | |||
650 | _leave(" = %d", ret); | ||
651 | |||
652 | } /* end _SRXAFSCM_Probe() */ | ||
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h new file mode 100644 index 000000000000..af8d4d689cb2 --- /dev/null +++ b/fs/afs/cmservice.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* cmservice.h: AFS Cache Manager Service declarations | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_CMSERVICE_H | ||
13 | #define _LINUX_AFS_CMSERVICE_H | ||
14 | |||
15 | #include <rxrpc/transport.h> | ||
16 | #include "types.h" | ||
17 | |||
18 | /* cache manager start/stop */ | ||
19 | extern int afscm_start(void); | ||
20 | extern void afscm_stop(void); | ||
21 | |||
22 | /* cache manager server functions */ | ||
23 | extern int SRXAFSCM_InitCallBackState(struct afs_server *server); | ||
24 | extern int SRXAFSCM_CallBack(struct afs_server *server, | ||
25 | size_t count, | ||
26 | struct afs_callback callbacks[]); | ||
27 | extern int SRXAFSCM_Probe(struct afs_server *server); | ||
28 | |||
29 | #endif /* _LINUX_AFS_CMSERVICE_H */ | ||
diff --git a/fs/afs/dir.c b/fs/afs/dir.c new file mode 100644 index 000000000000..6682d6d7f294 --- /dev/null +++ b/fs/afs/dir.c | |||
@@ -0,0 +1,666 @@ | |||
1 | /* dir.c: AFS filesystem directory handling | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include "vnode.h" | ||
21 | #include "volume.h" | ||
22 | #include <rxrpc/call.h> | ||
23 | #include "super.h" | ||
24 | #include "internal.h" | ||
25 | |||
26 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | ||
27 | struct nameidata *nd); | ||
28 | static int afs_dir_open(struct inode *inode, struct file *file); | ||
29 | static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir); | ||
30 | static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd); | ||
31 | static int afs_d_delete(struct dentry *dentry); | ||
32 | static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, | ||
33 | loff_t fpos, ino_t ino, unsigned dtype); | ||
34 | |||
35 | struct file_operations afs_dir_file_operations = { | ||
36 | .open = afs_dir_open, | ||
37 | .readdir = afs_dir_readdir, | ||
38 | }; | ||
39 | |||
40 | struct inode_operations afs_dir_inode_operations = { | ||
41 | .lookup = afs_dir_lookup, | ||
42 | .getattr = afs_inode_getattr, | ||
43 | #if 0 /* TODO */ | ||
44 | .create = afs_dir_create, | ||
45 | .link = afs_dir_link, | ||
46 | .unlink = afs_dir_unlink, | ||
47 | .symlink = afs_dir_symlink, | ||
48 | .mkdir = afs_dir_mkdir, | ||
49 | .rmdir = afs_dir_rmdir, | ||
50 | .mknod = afs_dir_mknod, | ||
51 | .rename = afs_dir_rename, | ||
52 | #endif | ||
53 | }; | ||
54 | |||
55 | static struct dentry_operations afs_fs_dentry_operations = { | ||
56 | .d_revalidate = afs_d_revalidate, | ||
57 | .d_delete = afs_d_delete, | ||
58 | }; | ||
59 | |||
60 | #define AFS_DIR_HASHTBL_SIZE 128 | ||
61 | #define AFS_DIR_DIRENT_SIZE 32 | ||
62 | #define AFS_DIRENT_PER_BLOCK 64 | ||
63 | |||
64 | union afs_dirent { | ||
65 | struct { | ||
66 | uint8_t valid; | ||
67 | uint8_t unused[1]; | ||
68 | __be16 hash_next; | ||
69 | __be32 vnode; | ||
70 | __be32 unique; | ||
71 | uint8_t name[16]; | ||
72 | uint8_t overflow[4]; /* if any char of the name (inc | ||
73 | * NUL) reaches here, consume | ||
74 | * the next dirent too */ | ||
75 | } u; | ||
76 | uint8_t extended_name[32]; | ||
77 | }; | ||
78 | |||
79 | /* AFS directory page header (one at the beginning of every 2048-byte chunk) */ | ||
80 | struct afs_dir_pagehdr { | ||
81 | __be16 npages; | ||
82 | __be16 magic; | ||
83 | #define AFS_DIR_MAGIC htons(1234) | ||
84 | uint8_t nentries; | ||
85 | uint8_t bitmap[8]; | ||
86 | uint8_t pad[19]; | ||
87 | }; | ||
88 | |||
89 | /* directory block layout */ | ||
90 | union afs_dir_block { | ||
91 | |||
92 | struct afs_dir_pagehdr pagehdr; | ||
93 | |||
94 | struct { | ||
95 | struct afs_dir_pagehdr pagehdr; | ||
96 | uint8_t alloc_ctrs[128]; | ||
97 | /* dir hash table */ | ||
98 | uint16_t hashtable[AFS_DIR_HASHTBL_SIZE]; | ||
99 | } hdr; | ||
100 | |||
101 | union afs_dirent dirents[AFS_DIRENT_PER_BLOCK]; | ||
102 | }; | ||
103 | |||
104 | /* layout on a linux VM page */ | ||
105 | struct afs_dir_page { | ||
106 | union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)]; | ||
107 | }; | ||
108 | |||
109 | struct afs_dir_lookup_cookie { | ||
110 | struct afs_fid fid; | ||
111 | const char *name; | ||
112 | size_t nlen; | ||
113 | int found; | ||
114 | }; | ||
115 | |||
116 | /*****************************************************************************/ | ||
117 | /* | ||
118 | * check that a directory page is valid | ||
119 | */ | ||
120 | static inline void afs_dir_check_page(struct inode *dir, struct page *page) | ||
121 | { | ||
122 | struct afs_dir_page *dbuf; | ||
123 | loff_t latter; | ||
124 | int tmp, qty; | ||
125 | |||
126 | #if 0 | ||
127 | /* check the page count */ | ||
128 | qty = desc.size / sizeof(dbuf->blocks[0]); | ||
129 | if (qty == 0) | ||
130 | goto error; | ||
131 | |||
132 | if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) { | ||
133 | printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", | ||
134 | __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages)); | ||
135 | goto error; | ||
136 | } | ||
137 | #endif | ||
138 | |||
139 | /* determine how many magic numbers there should be in this page */ | ||
140 | latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT); | ||
141 | if (latter >= PAGE_SIZE) | ||
142 | qty = PAGE_SIZE; | ||
143 | else | ||
144 | qty = latter; | ||
145 | qty /= sizeof(union afs_dir_block); | ||
146 | |||
147 | /* check them */ | ||
148 | dbuf = page_address(page); | ||
149 | for (tmp = 0; tmp < qty; tmp++) { | ||
150 | if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) { | ||
151 | printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n", | ||
152 | __FUNCTION__, dir->i_ino, tmp, qty, | ||
153 | ntohs(dbuf->blocks[tmp].pagehdr.magic)); | ||
154 | goto error; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | SetPageChecked(page); | ||
159 | return; | ||
160 | |||
161 | error: | ||
162 | SetPageChecked(page); | ||
163 | SetPageError(page); | ||
164 | |||
165 | } /* end afs_dir_check_page() */ | ||
166 | |||
167 | /*****************************************************************************/ | ||
168 | /* | ||
169 | * discard a page cached in the pagecache | ||
170 | */ | ||
171 | static inline void afs_dir_put_page(struct page *page) | ||
172 | { | ||
173 | kunmap(page); | ||
174 | page_cache_release(page); | ||
175 | |||
176 | } /* end afs_dir_put_page() */ | ||
177 | |||
178 | /*****************************************************************************/ | ||
179 | /* | ||
180 | * get a page into the pagecache | ||
181 | */ | ||
182 | static struct page *afs_dir_get_page(struct inode *dir, unsigned long index) | ||
183 | { | ||
184 | struct page *page; | ||
185 | |||
186 | _enter("{%lu},%lu", dir->i_ino, index); | ||
187 | |||
188 | page = read_cache_page(dir->i_mapping,index, | ||
189 | (filler_t *) dir->i_mapping->a_ops->readpage, | ||
190 | NULL); | ||
191 | if (!IS_ERR(page)) { | ||
192 | wait_on_page_locked(page); | ||
193 | kmap(page); | ||
194 | if (!PageUptodate(page)) | ||
195 | goto fail; | ||
196 | if (!PageChecked(page)) | ||
197 | afs_dir_check_page(dir, page); | ||
198 | if (PageError(page)) | ||
199 | goto fail; | ||
200 | } | ||
201 | return page; | ||
202 | |||
203 | fail: | ||
204 | afs_dir_put_page(page); | ||
205 | return ERR_PTR(-EIO); | ||
206 | } /* end afs_dir_get_page() */ | ||
207 | |||
208 | /*****************************************************************************/ | ||
209 | /* | ||
210 | * open an AFS directory file | ||
211 | */ | ||
212 | static int afs_dir_open(struct inode *inode, struct file *file) | ||
213 | { | ||
214 | _enter("{%lu}", inode->i_ino); | ||
215 | |||
216 | BUG_ON(sizeof(union afs_dir_block) != 2048); | ||
217 | BUG_ON(sizeof(union afs_dirent) != 32); | ||
218 | |||
219 | if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) | ||
220 | return -ENOENT; | ||
221 | |||
222 | _leave(" = 0"); | ||
223 | return 0; | ||
224 | |||
225 | } /* end afs_dir_open() */ | ||
226 | |||
227 | /*****************************************************************************/ | ||
228 | /* | ||
229 | * deal with one block in an AFS directory | ||
230 | */ | ||
231 | static int afs_dir_iterate_block(unsigned *fpos, | ||
232 | union afs_dir_block *block, | ||
233 | unsigned blkoff, | ||
234 | void *cookie, | ||
235 | filldir_t filldir) | ||
236 | { | ||
237 | union afs_dirent *dire; | ||
238 | unsigned offset, next, curr; | ||
239 | size_t nlen; | ||
240 | int tmp, ret; | ||
241 | |||
242 | _enter("%u,%x,%p,,",*fpos,blkoff,block); | ||
243 | |||
244 | curr = (*fpos - blkoff) / sizeof(union afs_dirent); | ||
245 | |||
246 | /* walk through the block, an entry at a time */ | ||
247 | for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries; | ||
248 | offset < AFS_DIRENT_PER_BLOCK; | ||
249 | offset = next | ||
250 | ) { | ||
251 | next = offset + 1; | ||
252 | |||
253 | /* skip entries marked unused in the bitmap */ | ||
254 | if (!(block->pagehdr.bitmap[offset / 8] & | ||
255 | (1 << (offset % 8)))) { | ||
256 | _debug("ENT[%Zu.%u]: unused\n", | ||
257 | blkoff / sizeof(union afs_dir_block), offset); | ||
258 | if (offset >= curr) | ||
259 | *fpos = blkoff + | ||
260 | next * sizeof(union afs_dirent); | ||
261 | continue; | ||
262 | } | ||
263 | |||
264 | /* got a valid entry */ | ||
265 | dire = &block->dirents[offset]; | ||
266 | nlen = strnlen(dire->u.name, | ||
267 | sizeof(*block) - | ||
268 | offset * sizeof(union afs_dirent)); | ||
269 | |||
270 | _debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n", | ||
271 | blkoff / sizeof(union afs_dir_block), offset, | ||
272 | (offset < curr ? "skip" : "fill"), | ||
273 | nlen, dire->u.name); | ||
274 | |||
275 | /* work out where the next possible entry is */ | ||
276 | for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_dirent)) { | ||
277 | if (next >= AFS_DIRENT_PER_BLOCK) { | ||
278 | _debug("ENT[%Zu.%u]:" | ||
279 | " %u travelled beyond end dir block" | ||
280 | " (len %u/%Zu)\n", | ||
281 | blkoff / sizeof(union afs_dir_block), | ||
282 | offset, next, tmp, nlen); | ||
283 | return -EIO; | ||
284 | } | ||
285 | if (!(block->pagehdr.bitmap[next / 8] & | ||
286 | (1 << (next % 8)))) { | ||
287 | _debug("ENT[%Zu.%u]:" | ||
288 | " %u unmarked extension (len %u/%Zu)\n", | ||
289 | blkoff / sizeof(union afs_dir_block), | ||
290 | offset, next, tmp, nlen); | ||
291 | return -EIO; | ||
292 | } | ||
293 | |||
294 | _debug("ENT[%Zu.%u]: ext %u/%Zu\n", | ||
295 | blkoff / sizeof(union afs_dir_block), | ||
296 | next, tmp, nlen); | ||
297 | next++; | ||
298 | } | ||
299 | |||
300 | /* skip if starts before the current position */ | ||
301 | if (offset < curr) | ||
302 | continue; | ||
303 | |||
304 | /* found the next entry */ | ||
305 | ret = filldir(cookie, | ||
306 | dire->u.name, | ||
307 | nlen, | ||
308 | blkoff + offset * sizeof(union afs_dirent), | ||
309 | ntohl(dire->u.vnode), | ||
310 | filldir == afs_dir_lookup_filldir ? | ||
311 | ntohl(dire->u.unique) : DT_UNKNOWN); | ||
312 | if (ret < 0) { | ||
313 | _leave(" = 0 [full]"); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | *fpos = blkoff + next * sizeof(union afs_dirent); | ||
318 | } | ||
319 | |||
320 | _leave(" = 1 [more]"); | ||
321 | return 1; | ||
322 | } /* end afs_dir_iterate_block() */ | ||
323 | |||
324 | /*****************************************************************************/ | ||
325 | /* | ||
326 | * read an AFS directory | ||
327 | */ | ||
328 | static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, | ||
329 | filldir_t filldir) | ||
330 | { | ||
331 | union afs_dir_block *dblock; | ||
332 | struct afs_dir_page *dbuf; | ||
333 | struct page *page; | ||
334 | unsigned blkoff, limit; | ||
335 | int ret; | ||
336 | |||
337 | _enter("{%lu},%u,,", dir->i_ino, *fpos); | ||
338 | |||
339 | if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { | ||
340 | _leave(" = -ESTALE"); | ||
341 | return -ESTALE; | ||
342 | } | ||
343 | |||
344 | /* round the file position up to the next entry boundary */ | ||
345 | *fpos += sizeof(union afs_dirent) - 1; | ||
346 | *fpos &= ~(sizeof(union afs_dirent) - 1); | ||
347 | |||
348 | /* walk through the blocks in sequence */ | ||
349 | ret = 0; | ||
350 | while (*fpos < dir->i_size) { | ||
351 | blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1); | ||
352 | |||
353 | /* fetch the appropriate page from the directory */ | ||
354 | page = afs_dir_get_page(dir, blkoff / PAGE_SIZE); | ||
355 | if (IS_ERR(page)) { | ||
356 | ret = PTR_ERR(page); | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | limit = blkoff & ~(PAGE_SIZE - 1); | ||
361 | |||
362 | dbuf = page_address(page); | ||
363 | |||
364 | /* deal with the individual blocks stashed on this page */ | ||
365 | do { | ||
366 | dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / | ||
367 | sizeof(union afs_dir_block)]; | ||
368 | ret = afs_dir_iterate_block(fpos, dblock, blkoff, | ||
369 | cookie, filldir); | ||
370 | if (ret != 1) { | ||
371 | afs_dir_put_page(page); | ||
372 | goto out; | ||
373 | } | ||
374 | |||
375 | blkoff += sizeof(union afs_dir_block); | ||
376 | |||
377 | } while (*fpos < dir->i_size && blkoff < limit); | ||
378 | |||
379 | afs_dir_put_page(page); | ||
380 | ret = 0; | ||
381 | } | ||
382 | |||
383 | out: | ||
384 | _leave(" = %d", ret); | ||
385 | return ret; | ||
386 | } /* end afs_dir_iterate() */ | ||
387 | |||
388 | /*****************************************************************************/ | ||
389 | /* | ||
390 | * read an AFS directory | ||
391 | */ | ||
392 | static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir) | ||
393 | { | ||
394 | unsigned fpos; | ||
395 | int ret; | ||
396 | |||
397 | _enter("{%Ld,{%lu}}", file->f_pos, file->f_dentry->d_inode->i_ino); | ||
398 | |||
399 | fpos = file->f_pos; | ||
400 | ret = afs_dir_iterate(file->f_dentry->d_inode, &fpos, cookie, filldir); | ||
401 | file->f_pos = fpos; | ||
402 | |||
403 | _leave(" = %d", ret); | ||
404 | return ret; | ||
405 | } /* end afs_dir_readdir() */ | ||
406 | |||
407 | /*****************************************************************************/ | ||
408 | /* | ||
409 | * search the directory for a name | ||
410 | * - if afs_dir_iterate_block() spots this function, it'll pass the FID | ||
411 | * uniquifier through dtype | ||
412 | */ | ||
413 | static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, | ||
414 | loff_t fpos, ino_t ino, unsigned dtype) | ||
415 | { | ||
416 | struct afs_dir_lookup_cookie *cookie = _cookie; | ||
417 | |||
418 | _enter("{%s,%Zu},%s,%u,,%lu,%u", | ||
419 | cookie->name, cookie->nlen, name, nlen, ino, dtype); | ||
420 | |||
421 | if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { | ||
422 | _leave(" = 0 [no]"); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | cookie->fid.vnode = ino; | ||
427 | cookie->fid.unique = dtype; | ||
428 | cookie->found = 1; | ||
429 | |||
430 | _leave(" = -1 [found]"); | ||
431 | return -1; | ||
432 | } /* end afs_dir_lookup_filldir() */ | ||
433 | |||
434 | /*****************************************************************************/ | ||
435 | /* | ||
436 | * look up an entry in a directory | ||
437 | */ | ||
438 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | ||
439 | struct nameidata *nd) | ||
440 | { | ||
441 | struct afs_dir_lookup_cookie cookie; | ||
442 | struct afs_super_info *as; | ||
443 | struct afs_vnode *vnode; | ||
444 | struct inode *inode; | ||
445 | unsigned fpos; | ||
446 | int ret; | ||
447 | |||
448 | _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name); | ||
449 | |||
450 | /* insanity checks first */ | ||
451 | BUG_ON(sizeof(union afs_dir_block) != 2048); | ||
452 | BUG_ON(sizeof(union afs_dirent) != 32); | ||
453 | |||
454 | if (dentry->d_name.len > 255) { | ||
455 | _leave(" = -ENAMETOOLONG"); | ||
456 | return ERR_PTR(-ENAMETOOLONG); | ||
457 | } | ||
458 | |||
459 | vnode = AFS_FS_I(dir); | ||
460 | if (vnode->flags & AFS_VNODE_DELETED) { | ||
461 | _leave(" = -ESTALE"); | ||
462 | return ERR_PTR(-ESTALE); | ||
463 | } | ||
464 | |||
465 | as = dir->i_sb->s_fs_info; | ||
466 | |||
467 | /* search the directory */ | ||
468 | cookie.name = dentry->d_name.name; | ||
469 | cookie.nlen = dentry->d_name.len; | ||
470 | cookie.fid.vid = as->volume->vid; | ||
471 | cookie.found = 0; | ||
472 | |||
473 | fpos = 0; | ||
474 | ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir); | ||
475 | if (ret < 0) { | ||
476 | _leave(" = %d", ret); | ||
477 | return ERR_PTR(ret); | ||
478 | } | ||
479 | |||
480 | ret = -ENOENT; | ||
481 | if (!cookie.found) { | ||
482 | _leave(" = %d", ret); | ||
483 | return ERR_PTR(ret); | ||
484 | } | ||
485 | |||
486 | /* instantiate the dentry */ | ||
487 | ret = afs_iget(dir->i_sb, &cookie.fid, &inode); | ||
488 | if (ret < 0) { | ||
489 | _leave(" = %d", ret); | ||
490 | return ERR_PTR(ret); | ||
491 | } | ||
492 | |||
493 | dentry->d_op = &afs_fs_dentry_operations; | ||
494 | dentry->d_fsdata = (void *) (unsigned long) vnode->status.version; | ||
495 | |||
496 | d_add(dentry, inode); | ||
497 | _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }", | ||
498 | cookie.fid.vnode, | ||
499 | cookie.fid.unique, | ||
500 | dentry->d_inode->i_ino, | ||
501 | dentry->d_inode->i_version); | ||
502 | |||
503 | return NULL; | ||
504 | } /* end afs_dir_lookup() */ | ||
505 | |||
506 | /*****************************************************************************/ | ||
507 | /* | ||
508 | * check that a dentry lookup hit has found a valid entry | ||
509 | * - NOTE! the hit can be a negative hit too, so we can't assume we have an | ||
510 | * inode | ||
511 | * (derived from nfs_lookup_revalidate) | ||
512 | */ | ||
513 | static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | ||
514 | { | ||
515 | struct afs_dir_lookup_cookie cookie; | ||
516 | struct dentry *parent; | ||
517 | struct inode *inode, *dir; | ||
518 | unsigned fpos; | ||
519 | int ret; | ||
520 | |||
521 | _enter("{sb=%p n=%s},", dentry->d_sb, dentry->d_name.name); | ||
522 | |||
523 | /* lock down the parent dentry so we can peer at it */ | ||
524 | parent = dget_parent(dentry->d_parent); | ||
525 | |||
526 | dir = parent->d_inode; | ||
527 | inode = dentry->d_inode; | ||
528 | |||
529 | /* handle a negative dentry */ | ||
530 | if (!inode) | ||
531 | goto out_bad; | ||
532 | |||
533 | /* handle a bad inode */ | ||
534 | if (is_bad_inode(inode)) { | ||
535 | printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n", | ||
536 | dentry->d_parent->d_name.name, dentry->d_name.name); | ||
537 | goto out_bad; | ||
538 | } | ||
539 | |||
540 | /* force a full look up if the parent directory changed since last the | ||
541 | * server was consulted | ||
542 | * - otherwise this inode must still exist, even if the inode details | ||
543 | * themselves have changed | ||
544 | */ | ||
545 | if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED) | ||
546 | afs_vnode_fetch_status(AFS_FS_I(dir)); | ||
547 | |||
548 | if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { | ||
549 | _debug("%s: parent dir deleted", dentry->d_name.name); | ||
550 | goto out_bad; | ||
551 | } | ||
552 | |||
553 | if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) { | ||
554 | _debug("%s: file already deleted", dentry->d_name.name); | ||
555 | goto out_bad; | ||
556 | } | ||
557 | |||
558 | if ((unsigned long) dentry->d_fsdata != | ||
559 | (unsigned long) AFS_FS_I(dir)->status.version) { | ||
560 | _debug("%s: parent changed %lu -> %u", | ||
561 | dentry->d_name.name, | ||
562 | (unsigned long) dentry->d_fsdata, | ||
563 | (unsigned) AFS_FS_I(dir)->status.version); | ||
564 | |||
565 | /* search the directory for this vnode */ | ||
566 | cookie.name = dentry->d_name.name; | ||
567 | cookie.nlen = dentry->d_name.len; | ||
568 | cookie.fid.vid = AFS_FS_I(inode)->volume->vid; | ||
569 | cookie.found = 0; | ||
570 | |||
571 | fpos = 0; | ||
572 | ret = afs_dir_iterate(dir, &fpos, &cookie, | ||
573 | afs_dir_lookup_filldir); | ||
574 | if (ret < 0) { | ||
575 | _debug("failed to iterate dir %s: %d", | ||
576 | parent->d_name.name, ret); | ||
577 | goto out_bad; | ||
578 | } | ||
579 | |||
580 | if (!cookie.found) { | ||
581 | _debug("%s: dirent not found", dentry->d_name.name); | ||
582 | goto not_found; | ||
583 | } | ||
584 | |||
585 | /* if the vnode ID has changed, then the dirent points to a | ||
586 | * different file */ | ||
587 | if (cookie.fid.vnode != AFS_FS_I(inode)->fid.vnode) { | ||
588 | _debug("%s: dirent changed", dentry->d_name.name); | ||
589 | goto not_found; | ||
590 | } | ||
591 | |||
592 | /* if the vnode ID uniqifier has changed, then the file has | ||
593 | * been deleted */ | ||
594 | if (cookie.fid.unique != AFS_FS_I(inode)->fid.unique) { | ||
595 | _debug("%s: file deleted (uq %u -> %u I:%lu)", | ||
596 | dentry->d_name.name, | ||
597 | cookie.fid.unique, | ||
598 | AFS_FS_I(inode)->fid.unique, | ||
599 | inode->i_version); | ||
600 | spin_lock(&AFS_FS_I(inode)->lock); | ||
601 | AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED; | ||
602 | spin_unlock(&AFS_FS_I(inode)->lock); | ||
603 | invalidate_remote_inode(inode); | ||
604 | goto out_bad; | ||
605 | } | ||
606 | |||
607 | dentry->d_fsdata = | ||
608 | (void *) (unsigned long) AFS_FS_I(dir)->status.version; | ||
609 | } | ||
610 | |||
611 | out_valid: | ||
612 | dput(parent); | ||
613 | _leave(" = 1 [valid]"); | ||
614 | return 1; | ||
615 | |||
616 | /* the dirent, if it exists, now points to a different vnode */ | ||
617 | not_found: | ||
618 | spin_lock(&dentry->d_lock); | ||
619 | dentry->d_flags |= DCACHE_NFSFS_RENAMED; | ||
620 | spin_unlock(&dentry->d_lock); | ||
621 | |||
622 | out_bad: | ||
623 | if (inode) { | ||
624 | /* don't unhash if we have submounts */ | ||
625 | if (have_submounts(dentry)) | ||
626 | goto out_valid; | ||
627 | } | ||
628 | |||
629 | shrink_dcache_parent(dentry); | ||
630 | |||
631 | _debug("dropping dentry %s/%s", | ||
632 | dentry->d_parent->d_name.name, dentry->d_name.name); | ||
633 | d_drop(dentry); | ||
634 | |||
635 | dput(parent); | ||
636 | |||
637 | _leave(" = 0 [bad]"); | ||
638 | return 0; | ||
639 | } /* end afs_d_revalidate() */ | ||
640 | |||
641 | /*****************************************************************************/ | ||
642 | /* | ||
643 | * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't | ||
644 | * sleep) | ||
645 | * - called from dput() when d_count is going to 0. | ||
646 | * - return 1 to request dentry be unhashed, 0 otherwise | ||
647 | */ | ||
648 | static int afs_d_delete(struct dentry *dentry) | ||
649 | { | ||
650 | _enter("%s", dentry->d_name.name); | ||
651 | |||
652 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) | ||
653 | goto zap; | ||
654 | |||
655 | if (dentry->d_inode) { | ||
656 | if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED) | ||
657 | goto zap; | ||
658 | } | ||
659 | |||
660 | _leave(" = 0 [keep]"); | ||
661 | return 0; | ||
662 | |||
663 | zap: | ||
664 | _leave(" = 1 [zap]"); | ||
665 | return 1; | ||
666 | } /* end afs_d_delete() */ | ||
diff --git a/fs/afs/errors.h b/fs/afs/errors.h new file mode 100644 index 000000000000..574d94ac8d05 --- /dev/null +++ b/fs/afs/errors.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* errors.h: AFS abort/error codes | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_ERRORS_H | ||
13 | #define _LINUX_AFS_ERRORS_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | /* file server abort codes */ | ||
18 | typedef enum { | ||
19 | VSALVAGE = 101, /* volume needs salvaging */ | ||
20 | VNOVNODE = 102, /* no such file/dir (vnode) */ | ||
21 | VNOVOL = 103, /* no such volume or volume unavailable */ | ||
22 | VVOLEXISTS = 104, /* volume name already exists */ | ||
23 | VNOSERVICE = 105, /* volume not currently in service */ | ||
24 | VOFFLINE = 106, /* volume is currently offline (more info available [VVL-spec]) */ | ||
25 | VONLINE = 107, /* volume is already online */ | ||
26 | VDISKFULL = 108, /* disk partition is full */ | ||
27 | VOVERQUOTA = 109, /* volume's maximum quota exceeded */ | ||
28 | VBUSY = 110, /* volume is temporarily unavailable */ | ||
29 | VMOVED = 111, /* volume moved to new server - ask this FS where */ | ||
30 | } afs_rxfs_abort_t; | ||
31 | |||
32 | extern int afs_abort_to_error(int abortcode); | ||
33 | |||
34 | #endif /* _LINUX_AFS_ERRORS_H */ | ||
diff --git a/fs/afs/file.c b/fs/afs/file.c new file mode 100644 index 000000000000..6b6bb7c8abf6 --- /dev/null +++ b/fs/afs/file.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* file.c: AFS filesystem file handling | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/buffer_head.h> | ||
20 | #include "volume.h" | ||
21 | #include "vnode.h" | ||
22 | #include <rxrpc/call.h> | ||
23 | #include "internal.h" | ||
24 | |||
25 | #if 0 | ||
26 | static int afs_file_open(struct inode *inode, struct file *file); | ||
27 | static int afs_file_release(struct inode *inode, struct file *file); | ||
28 | #endif | ||
29 | |||
30 | static int afs_file_readpage(struct file *file, struct page *page); | ||
31 | static int afs_file_invalidatepage(struct page *page, unsigned long offset); | ||
32 | static int afs_file_releasepage(struct page *page, int gfp_flags); | ||
33 | |||
34 | static ssize_t afs_file_write(struct file *file, const char __user *buf, | ||
35 | size_t size, loff_t *off); | ||
36 | |||
37 | struct inode_operations afs_file_inode_operations = { | ||
38 | .getattr = afs_inode_getattr, | ||
39 | }; | ||
40 | |||
41 | struct file_operations afs_file_file_operations = { | ||
42 | .read = generic_file_read, | ||
43 | .write = afs_file_write, | ||
44 | .mmap = generic_file_mmap, | ||
45 | #if 0 | ||
46 | .open = afs_file_open, | ||
47 | .release = afs_file_release, | ||
48 | .fsync = afs_file_fsync, | ||
49 | #endif | ||
50 | }; | ||
51 | |||
52 | struct address_space_operations afs_fs_aops = { | ||
53 | .readpage = afs_file_readpage, | ||
54 | .sync_page = block_sync_page, | ||
55 | .set_page_dirty = __set_page_dirty_nobuffers, | ||
56 | .releasepage = afs_file_releasepage, | ||
57 | .invalidatepage = afs_file_invalidatepage, | ||
58 | }; | ||
59 | |||
60 | /*****************************************************************************/ | ||
61 | /* | ||
62 | * AFS file write | ||
63 | */ | ||
64 | static ssize_t afs_file_write(struct file *file, const char __user *buf, | ||
65 | size_t size, loff_t *off) | ||
66 | { | ||
67 | struct afs_vnode *vnode; | ||
68 | |||
69 | vnode = AFS_FS_I(file->f_dentry->d_inode); | ||
70 | if (vnode->flags & AFS_VNODE_DELETED) | ||
71 | return -ESTALE; | ||
72 | |||
73 | return -EIO; | ||
74 | } /* end afs_file_write() */ | ||
75 | |||
76 | /*****************************************************************************/ | ||
77 | /* | ||
78 | * deal with notification that a page was read from the cache | ||
79 | */ | ||
80 | #ifdef AFS_CACHING_SUPPORT | ||
81 | static void afs_file_readpage_read_complete(void *cookie_data, | ||
82 | struct page *page, | ||
83 | void *data, | ||
84 | int error) | ||
85 | { | ||
86 | _enter("%p,%p,%p,%d", cookie_data, page, data, error); | ||
87 | |||
88 | if (error) | ||
89 | SetPageError(page); | ||
90 | else | ||
91 | SetPageUptodate(page); | ||
92 | unlock_page(page); | ||
93 | |||
94 | } /* end afs_file_readpage_read_complete() */ | ||
95 | #endif | ||
96 | |||
97 | /*****************************************************************************/ | ||
98 | /* | ||
99 | * deal with notification that a page was written to the cache | ||
100 | */ | ||
101 | #ifdef AFS_CACHING_SUPPORT | ||
102 | static void afs_file_readpage_write_complete(void *cookie_data, | ||
103 | struct page *page, | ||
104 | void *data, | ||
105 | int error) | ||
106 | { | ||
107 | _enter("%p,%p,%p,%d", cookie_data, page, data, error); | ||
108 | |||
109 | unlock_page(page); | ||
110 | |||
111 | } /* end afs_file_readpage_write_complete() */ | ||
112 | #endif | ||
113 | |||
114 | /*****************************************************************************/ | ||
115 | /* | ||
116 | * AFS read page from file (or symlink) | ||
117 | */ | ||
118 | static int afs_file_readpage(struct file *file, struct page *page) | ||
119 | { | ||
120 | struct afs_rxfs_fetch_descriptor desc; | ||
121 | #ifdef AFS_CACHING_SUPPORT | ||
122 | struct cachefs_page *pageio; | ||
123 | #endif | ||
124 | struct afs_vnode *vnode; | ||
125 | struct inode *inode; | ||
126 | int ret; | ||
127 | |||
128 | inode = page->mapping->host; | ||
129 | |||
130 | _enter("{%lu},{%lu}", inode->i_ino, page->index); | ||
131 | |||
132 | vnode = AFS_FS_I(inode); | ||
133 | |||
134 | if (!PageLocked(page)) | ||
135 | PAGE_BUG(page); | ||
136 | |||
137 | ret = -ESTALE; | ||
138 | if (vnode->flags & AFS_VNODE_DELETED) | ||
139 | goto error; | ||
140 | |||
141 | #ifdef AFS_CACHING_SUPPORT | ||
142 | ret = cachefs_page_get_private(page, &pageio, GFP_NOIO); | ||
143 | if (ret < 0) | ||
144 | goto error; | ||
145 | |||
146 | /* is it cached? */ | ||
147 | ret = cachefs_read_or_alloc_page(vnode->cache, | ||
148 | page, | ||
149 | afs_file_readpage_read_complete, | ||
150 | NULL, | ||
151 | GFP_KERNEL); | ||
152 | #else | ||
153 | ret = -ENOBUFS; | ||
154 | #endif | ||
155 | |||
156 | switch (ret) { | ||
157 | /* read BIO submitted and wb-journal entry found */ | ||
158 | case 1: | ||
159 | BUG(); // TODO - handle wb-journal match | ||
160 | |||
161 | /* read BIO submitted (page in cache) */ | ||
162 | case 0: | ||
163 | break; | ||
164 | |||
165 | /* no page available in cache */ | ||
166 | case -ENOBUFS: | ||
167 | case -ENODATA: | ||
168 | default: | ||
169 | desc.fid = vnode->fid; | ||
170 | desc.offset = page->index << PAGE_CACHE_SHIFT; | ||
171 | desc.size = min((size_t) (inode->i_size - desc.offset), | ||
172 | (size_t) PAGE_SIZE); | ||
173 | desc.buffer = kmap(page); | ||
174 | |||
175 | clear_page(desc.buffer); | ||
176 | |||
177 | /* read the contents of the file from the server into the | ||
178 | * page */ | ||
179 | ret = afs_vnode_fetch_data(vnode, &desc); | ||
180 | kunmap(page); | ||
181 | if (ret < 0) { | ||
182 | if (ret==-ENOENT) { | ||
183 | _debug("got NOENT from server" | ||
184 | " - marking file deleted and stale"); | ||
185 | vnode->flags |= AFS_VNODE_DELETED; | ||
186 | ret = -ESTALE; | ||
187 | } | ||
188 | |||
189 | #ifdef AFS_CACHING_SUPPORT | ||
190 | cachefs_uncache_page(vnode->cache, page); | ||
191 | #endif | ||
192 | goto error; | ||
193 | } | ||
194 | |||
195 | SetPageUptodate(page); | ||
196 | |||
197 | #ifdef AFS_CACHING_SUPPORT | ||
198 | if (cachefs_write_page(vnode->cache, | ||
199 | page, | ||
200 | afs_file_readpage_write_complete, | ||
201 | NULL, | ||
202 | GFP_KERNEL) != 0 | ||
203 | ) { | ||
204 | cachefs_uncache_page(vnode->cache, page); | ||
205 | unlock_page(page); | ||
206 | } | ||
207 | #else | ||
208 | unlock_page(page); | ||
209 | #endif | ||
210 | } | ||
211 | |||
212 | _leave(" = 0"); | ||
213 | return 0; | ||
214 | |||
215 | error: | ||
216 | SetPageError(page); | ||
217 | unlock_page(page); | ||
218 | |||
219 | _leave(" = %d", ret); | ||
220 | return ret; | ||
221 | |||
222 | } /* end afs_file_readpage() */ | ||
223 | |||
224 | /*****************************************************************************/ | ||
225 | /* | ||
226 | * get a page cookie for the specified page | ||
227 | */ | ||
228 | #ifdef AFS_CACHING_SUPPORT | ||
229 | int afs_cache_get_page_cookie(struct page *page, | ||
230 | struct cachefs_page **_page_cookie) | ||
231 | { | ||
232 | int ret; | ||
233 | |||
234 | _enter(""); | ||
235 | ret = cachefs_page_get_private(page,_page_cookie, GFP_NOIO); | ||
236 | |||
237 | _leave(" = %d", ret); | ||
238 | return ret; | ||
239 | } /* end afs_cache_get_page_cookie() */ | ||
240 | #endif | ||
241 | |||
242 | /*****************************************************************************/ | ||
243 | /* | ||
244 | * invalidate part or all of a page | ||
245 | */ | ||
246 | static int afs_file_invalidatepage(struct page *page, unsigned long offset) | ||
247 | { | ||
248 | int ret = 1; | ||
249 | |||
250 | _enter("{%lu},%lu", page->index, offset); | ||
251 | |||
252 | BUG_ON(!PageLocked(page)); | ||
253 | |||
254 | if (PagePrivate(page)) { | ||
255 | #ifdef AFS_CACHING_SUPPORT | ||
256 | struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); | ||
257 | cachefs_uncache_page(vnode->cache,page); | ||
258 | #endif | ||
259 | |||
260 | /* We release buffers only if the entire page is being | ||
261 | * invalidated. | ||
262 | * The get_block cached value has been unconditionally | ||
263 | * invalidated, so real IO is not possible anymore. | ||
264 | */ | ||
265 | if (offset == 0) { | ||
266 | BUG_ON(!PageLocked(page)); | ||
267 | |||
268 | ret = 0; | ||
269 | if (!PageWriteback(page)) | ||
270 | ret = page->mapping->a_ops->releasepage(page, | ||
271 | 0); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | _leave(" = %d", ret); | ||
276 | return ret; | ||
277 | } /* end afs_file_invalidatepage() */ | ||
278 | |||
279 | /*****************************************************************************/ | ||
280 | /* | ||
281 | * release a page and cleanup its private data | ||
282 | */ | ||
283 | static int afs_file_releasepage(struct page *page, int gfp_flags) | ||
284 | { | ||
285 | struct cachefs_page *pageio; | ||
286 | |||
287 | _enter("{%lu},%x", page->index, gfp_flags); | ||
288 | |||
289 | if (PagePrivate(page)) { | ||
290 | #ifdef AFS_CACHING_SUPPORT | ||
291 | struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); | ||
292 | cachefs_uncache_page(vnode->cache, page); | ||
293 | #endif | ||
294 | |||
295 | pageio = (struct cachefs_page *) page->private; | ||
296 | page->private = 0; | ||
297 | ClearPagePrivate(page); | ||
298 | |||
299 | if (pageio) | ||
300 | kfree(pageio); | ||
301 | } | ||
302 | |||
303 | _leave(" = 0"); | ||
304 | return 0; | ||
305 | } /* end afs_file_releasepage() */ | ||
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c new file mode 100644 index 000000000000..61bc371532ab --- /dev/null +++ b/fs/afs/fsclient.c | |||
@@ -0,0 +1,837 @@ | |||
1 | /* fsclient.c: AFS File Server client stubs | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <rxrpc/rxrpc.h> | ||
15 | #include <rxrpc/transport.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include "fsclient.h" | ||
19 | #include "cmservice.h" | ||
20 | #include "vnode.h" | ||
21 | #include "server.h" | ||
22 | #include "errors.h" | ||
23 | #include "internal.h" | ||
24 | |||
25 | #define FSFETCHSTATUS 132 /* AFS Fetch file status */ | ||
26 | #define FSFETCHDATA 130 /* AFS Fetch file data */ | ||
27 | #define FSGIVEUPCALLBACKS 147 /* AFS Discard callback promises */ | ||
28 | #define FSGETVOLUMEINFO 148 /* AFS Get root volume information */ | ||
29 | #define FSGETROOTVOLUME 151 /* AFS Get root volume name */ | ||
30 | #define FSLOOKUP 161 /* AFS lookup file in directory */ | ||
31 | |||
32 | /*****************************************************************************/ | ||
33 | /* | ||
34 | * map afs abort codes to/from Linux error codes | ||
35 | * - called with call->lock held | ||
36 | */ | ||
37 | static void afs_rxfs_aemap(struct rxrpc_call *call) | ||
38 | { | ||
39 | switch (call->app_err_state) { | ||
40 | case RXRPC_ESTATE_LOCAL_ABORT: | ||
41 | call->app_abort_code = -call->app_errno; | ||
42 | break; | ||
43 | case RXRPC_ESTATE_PEER_ABORT: | ||
44 | call->app_errno = afs_abort_to_error(call->app_abort_code); | ||
45 | break; | ||
46 | default: | ||
47 | break; | ||
48 | } | ||
49 | } /* end afs_rxfs_aemap() */ | ||
50 | |||
51 | /*****************************************************************************/ | ||
52 | /* | ||
53 | * get the root volume name from a fileserver | ||
54 | * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2 | ||
55 | */ | ||
56 | #if 0 | ||
57 | int afs_rxfs_get_root_volume(struct afs_server *server, | ||
58 | char *buf, size_t *buflen) | ||
59 | { | ||
60 | struct rxrpc_connection *conn; | ||
61 | struct rxrpc_call *call; | ||
62 | struct kvec piov[2]; | ||
63 | size_t sent; | ||
64 | int ret; | ||
65 | u32 param[1]; | ||
66 | |||
67 | DECLARE_WAITQUEUE(myself, current); | ||
68 | |||
69 | kenter("%p,%p,%u",server, buf, *buflen); | ||
70 | |||
71 | /* get hold of the fileserver connection */ | ||
72 | ret = afs_server_get_fsconn(server, &conn); | ||
73 | if (ret < 0) | ||
74 | goto out; | ||
75 | |||
76 | /* create a call through that connection */ | ||
77 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
78 | if (ret < 0) { | ||
79 | printk("kAFS: Unable to create call: %d\n", ret); | ||
80 | goto out_put_conn; | ||
81 | } | ||
82 | call->app_opcode = FSGETROOTVOLUME; | ||
83 | |||
84 | /* we want to get event notifications from the call */ | ||
85 | add_wait_queue(&call->waitq, &myself); | ||
86 | |||
87 | /* marshall the parameters */ | ||
88 | param[0] = htonl(FSGETROOTVOLUME); | ||
89 | |||
90 | piov[0].iov_len = sizeof(param); | ||
91 | piov[0].iov_base = param; | ||
92 | |||
93 | /* send the parameters to the server */ | ||
94 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
95 | 0, &sent); | ||
96 | if (ret < 0) | ||
97 | goto abort; | ||
98 | |||
99 | /* wait for the reply to completely arrive */ | ||
100 | for (;;) { | ||
101 | set_current_state(TASK_INTERRUPTIBLE); | ||
102 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
103 | signal_pending(current)) | ||
104 | break; | ||
105 | schedule(); | ||
106 | } | ||
107 | set_current_state(TASK_RUNNING); | ||
108 | |||
109 | ret = -EINTR; | ||
110 | if (signal_pending(current)) | ||
111 | goto abort; | ||
112 | |||
113 | switch (call->app_call_state) { | ||
114 | case RXRPC_CSTATE_ERROR: | ||
115 | ret = call->app_errno; | ||
116 | kdebug("Got Error: %d", ret); | ||
117 | goto out_unwait; | ||
118 | |||
119 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
120 | /* read the reply */ | ||
121 | kdebug("Got Reply: qty=%d", call->app_ready_qty); | ||
122 | |||
123 | ret = -EBADMSG; | ||
124 | if (call->app_ready_qty <= 4) | ||
125 | goto abort; | ||
126 | |||
127 | ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0); | ||
128 | if (ret < 0) | ||
129 | goto abort; | ||
130 | |||
131 | #if 0 | ||
132 | /* unmarshall the reply */ | ||
133 | bp = buffer; | ||
134 | for (loop = 0; loop < 65; loop++) | ||
135 | entry->name[loop] = ntohl(*bp++); | ||
136 | entry->name[64] = 0; | ||
137 | |||
138 | entry->type = ntohl(*bp++); | ||
139 | entry->num_servers = ntohl(*bp++); | ||
140 | |||
141 | for (loop = 0; loop < 8; loop++) | ||
142 | entry->servers[loop].addr.s_addr = *bp++; | ||
143 | |||
144 | for (loop = 0; loop < 8; loop++) | ||
145 | entry->servers[loop].partition = ntohl(*bp++); | ||
146 | |||
147 | for (loop = 0; loop < 8; loop++) | ||
148 | entry->servers[loop].flags = ntohl(*bp++); | ||
149 | |||
150 | for (loop = 0; loop < 3; loop++) | ||
151 | entry->volume_ids[loop] = ntohl(*bp++); | ||
152 | |||
153 | entry->clone_id = ntohl(*bp++); | ||
154 | entry->flags = ntohl(*bp); | ||
155 | #endif | ||
156 | |||
157 | /* success */ | ||
158 | ret = 0; | ||
159 | goto out_unwait; | ||
160 | |||
161 | default: | ||
162 | BUG(); | ||
163 | } | ||
164 | |||
165 | abort: | ||
166 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
167 | rxrpc_call_abort(call, ret); | ||
168 | schedule(); | ||
169 | out_unwait: | ||
170 | set_current_state(TASK_RUNNING); | ||
171 | remove_wait_queue(&call->waitq, &myself); | ||
172 | rxrpc_put_call(call); | ||
173 | out_put_conn: | ||
174 | afs_server_release_fsconn(server, conn); | ||
175 | out: | ||
176 | kleave(""); | ||
177 | return ret; | ||
178 | } /* end afs_rxfs_get_root_volume() */ | ||
179 | #endif | ||
180 | |||
181 | /*****************************************************************************/ | ||
182 | /* | ||
183 | * get information about a volume | ||
184 | */ | ||
185 | #if 0 | ||
186 | int afs_rxfs_get_volume_info(struct afs_server *server, | ||
187 | const char *name, | ||
188 | struct afs_volume_info *vinfo) | ||
189 | { | ||
190 | struct rxrpc_connection *conn; | ||
191 | struct rxrpc_call *call; | ||
192 | struct kvec piov[3]; | ||
193 | size_t sent; | ||
194 | int ret; | ||
195 | u32 param[2], *bp, zero; | ||
196 | |||
197 | DECLARE_WAITQUEUE(myself, current); | ||
198 | |||
199 | _enter("%p,%s,%p", server, name, vinfo); | ||
200 | |||
201 | /* get hold of the fileserver connection */ | ||
202 | ret = afs_server_get_fsconn(server, &conn); | ||
203 | if (ret < 0) | ||
204 | goto out; | ||
205 | |||
206 | /* create a call through that connection */ | ||
207 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
208 | if (ret < 0) { | ||
209 | printk("kAFS: Unable to create call: %d\n", ret); | ||
210 | goto out_put_conn; | ||
211 | } | ||
212 | call->app_opcode = FSGETVOLUMEINFO; | ||
213 | |||
214 | /* we want to get event notifications from the call */ | ||
215 | add_wait_queue(&call->waitq, &myself); | ||
216 | |||
217 | /* marshall the parameters */ | ||
218 | piov[1].iov_len = strlen(name); | ||
219 | piov[1].iov_base = (char *) name; | ||
220 | |||
221 | zero = 0; | ||
222 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | ||
223 | piov[2].iov_base = &zero; | ||
224 | |||
225 | param[0] = htonl(FSGETVOLUMEINFO); | ||
226 | param[1] = htonl(piov[1].iov_len); | ||
227 | |||
228 | piov[0].iov_len = sizeof(param); | ||
229 | piov[0].iov_base = param; | ||
230 | |||
231 | /* send the parameters to the server */ | ||
232 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
233 | 0, &sent); | ||
234 | if (ret < 0) | ||
235 | goto abort; | ||
236 | |||
237 | /* wait for the reply to completely arrive */ | ||
238 | bp = rxrpc_call_alloc_scratch(call, 64); | ||
239 | |||
240 | ret = rxrpc_call_read_data(call, bp, 64, | ||
241 | RXRPC_CALL_READ_BLOCK | | ||
242 | RXRPC_CALL_READ_ALL); | ||
243 | if (ret < 0) { | ||
244 | if (ret == -ECONNABORTED) { | ||
245 | ret = call->app_errno; | ||
246 | goto out_unwait; | ||
247 | } | ||
248 | goto abort; | ||
249 | } | ||
250 | |||
251 | /* unmarshall the reply */ | ||
252 | vinfo->vid = ntohl(*bp++); | ||
253 | vinfo->type = ntohl(*bp++); | ||
254 | |||
255 | vinfo->type_vids[0] = ntohl(*bp++); | ||
256 | vinfo->type_vids[1] = ntohl(*bp++); | ||
257 | vinfo->type_vids[2] = ntohl(*bp++); | ||
258 | vinfo->type_vids[3] = ntohl(*bp++); | ||
259 | vinfo->type_vids[4] = ntohl(*bp++); | ||
260 | |||
261 | vinfo->nservers = ntohl(*bp++); | ||
262 | vinfo->servers[0].addr.s_addr = *bp++; | ||
263 | vinfo->servers[1].addr.s_addr = *bp++; | ||
264 | vinfo->servers[2].addr.s_addr = *bp++; | ||
265 | vinfo->servers[3].addr.s_addr = *bp++; | ||
266 | vinfo->servers[4].addr.s_addr = *bp++; | ||
267 | vinfo->servers[5].addr.s_addr = *bp++; | ||
268 | vinfo->servers[6].addr.s_addr = *bp++; | ||
269 | vinfo->servers[7].addr.s_addr = *bp++; | ||
270 | |||
271 | ret = -EBADMSG; | ||
272 | if (vinfo->nservers > 8) | ||
273 | goto abort; | ||
274 | |||
275 | /* success */ | ||
276 | ret = 0; | ||
277 | |||
278 | out_unwait: | ||
279 | set_current_state(TASK_RUNNING); | ||
280 | remove_wait_queue(&call->waitq, &myself); | ||
281 | rxrpc_put_call(call); | ||
282 | out_put_conn: | ||
283 | afs_server_release_fsconn(server, conn); | ||
284 | out: | ||
285 | _leave(""); | ||
286 | return ret; | ||
287 | |||
288 | abort: | ||
289 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
290 | rxrpc_call_abort(call, ret); | ||
291 | schedule(); | ||
292 | goto out_unwait; | ||
293 | |||
294 | } /* end afs_rxfs_get_volume_info() */ | ||
295 | #endif | ||
296 | |||
297 | /*****************************************************************************/ | ||
298 | /* | ||
299 | * fetch the status information for a file | ||
300 | */ | ||
301 | int afs_rxfs_fetch_file_status(struct afs_server *server, | ||
302 | struct afs_vnode *vnode, | ||
303 | struct afs_volsync *volsync) | ||
304 | { | ||
305 | struct afs_server_callslot callslot; | ||
306 | struct rxrpc_call *call; | ||
307 | struct kvec piov[1]; | ||
308 | size_t sent; | ||
309 | int ret; | ||
310 | __be32 *bp; | ||
311 | |||
312 | DECLARE_WAITQUEUE(myself, current); | ||
313 | |||
314 | _enter("%p,{%u,%u,%u}", | ||
315 | server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | ||
316 | |||
317 | /* get hold of the fileserver connection */ | ||
318 | ret = afs_server_request_callslot(server, &callslot); | ||
319 | if (ret < 0) | ||
320 | goto out; | ||
321 | |||
322 | /* create a call through that connection */ | ||
323 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, | ||
324 | &call); | ||
325 | if (ret < 0) { | ||
326 | printk("kAFS: Unable to create call: %d\n", ret); | ||
327 | goto out_put_conn; | ||
328 | } | ||
329 | call->app_opcode = FSFETCHSTATUS; | ||
330 | |||
331 | /* we want to get event notifications from the call */ | ||
332 | add_wait_queue(&call->waitq, &myself); | ||
333 | |||
334 | /* marshall the parameters */ | ||
335 | bp = rxrpc_call_alloc_scratch(call, 16); | ||
336 | bp[0] = htonl(FSFETCHSTATUS); | ||
337 | bp[1] = htonl(vnode->fid.vid); | ||
338 | bp[2] = htonl(vnode->fid.vnode); | ||
339 | bp[3] = htonl(vnode->fid.unique); | ||
340 | |||
341 | piov[0].iov_len = 16; | ||
342 | piov[0].iov_base = bp; | ||
343 | |||
344 | /* send the parameters to the server */ | ||
345 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
346 | 0, &sent); | ||
347 | if (ret < 0) | ||
348 | goto abort; | ||
349 | |||
350 | /* wait for the reply to completely arrive */ | ||
351 | bp = rxrpc_call_alloc_scratch(call, 120); | ||
352 | |||
353 | ret = rxrpc_call_read_data(call, bp, 120, | ||
354 | RXRPC_CALL_READ_BLOCK | | ||
355 | RXRPC_CALL_READ_ALL); | ||
356 | if (ret < 0) { | ||
357 | if (ret == -ECONNABORTED) { | ||
358 | ret = call->app_errno; | ||
359 | goto out_unwait; | ||
360 | } | ||
361 | goto abort; | ||
362 | } | ||
363 | |||
364 | /* unmarshall the reply */ | ||
365 | vnode->status.if_version = ntohl(*bp++); | ||
366 | vnode->status.type = ntohl(*bp++); | ||
367 | vnode->status.nlink = ntohl(*bp++); | ||
368 | vnode->status.size = ntohl(*bp++); | ||
369 | vnode->status.version = ntohl(*bp++); | ||
370 | vnode->status.author = ntohl(*bp++); | ||
371 | vnode->status.owner = ntohl(*bp++); | ||
372 | vnode->status.caller_access = ntohl(*bp++); | ||
373 | vnode->status.anon_access = ntohl(*bp++); | ||
374 | vnode->status.mode = ntohl(*bp++); | ||
375 | vnode->status.parent.vid = vnode->fid.vid; | ||
376 | vnode->status.parent.vnode = ntohl(*bp++); | ||
377 | vnode->status.parent.unique = ntohl(*bp++); | ||
378 | bp++; /* seg size */ | ||
379 | vnode->status.mtime_client = ntohl(*bp++); | ||
380 | vnode->status.mtime_server = ntohl(*bp++); | ||
381 | bp++; /* group */ | ||
382 | bp++; /* sync counter */ | ||
383 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
384 | bp++; /* spare2 */ | ||
385 | bp++; /* spare3 */ | ||
386 | bp++; /* spare4 */ | ||
387 | |||
388 | vnode->cb_version = ntohl(*bp++); | ||
389 | vnode->cb_expiry = ntohl(*bp++); | ||
390 | vnode->cb_type = ntohl(*bp++); | ||
391 | |||
392 | if (volsync) { | ||
393 | volsync->creation = ntohl(*bp++); | ||
394 | bp++; /* spare2 */ | ||
395 | bp++; /* spare3 */ | ||
396 | bp++; /* spare4 */ | ||
397 | bp++; /* spare5 */ | ||
398 | bp++; /* spare6 */ | ||
399 | } | ||
400 | |||
401 | /* success */ | ||
402 | ret = 0; | ||
403 | |||
404 | out_unwait: | ||
405 | set_current_state(TASK_RUNNING); | ||
406 | remove_wait_queue(&call->waitq, &myself); | ||
407 | rxrpc_put_call(call); | ||
408 | out_put_conn: | ||
409 | afs_server_release_callslot(server, &callslot); | ||
410 | out: | ||
411 | _leave(""); | ||
412 | return ret; | ||
413 | |||
414 | abort: | ||
415 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
416 | rxrpc_call_abort(call, ret); | ||
417 | schedule(); | ||
418 | goto out_unwait; | ||
419 | } /* end afs_rxfs_fetch_file_status() */ | ||
420 | |||
421 | /*****************************************************************************/ | ||
422 | /* | ||
423 | * fetch the contents of a file or directory | ||
424 | */ | ||
425 | int afs_rxfs_fetch_file_data(struct afs_server *server, | ||
426 | struct afs_vnode *vnode, | ||
427 | struct afs_rxfs_fetch_descriptor *desc, | ||
428 | struct afs_volsync *volsync) | ||
429 | { | ||
430 | struct afs_server_callslot callslot; | ||
431 | struct rxrpc_call *call; | ||
432 | struct kvec piov[1]; | ||
433 | size_t sent; | ||
434 | int ret; | ||
435 | __be32 *bp; | ||
436 | |||
437 | DECLARE_WAITQUEUE(myself, current); | ||
438 | |||
439 | _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}", | ||
440 | server, | ||
441 | desc->fid.vid, | ||
442 | desc->fid.vnode, | ||
443 | desc->fid.unique, | ||
444 | desc->size, | ||
445 | desc->offset); | ||
446 | |||
447 | /* get hold of the fileserver connection */ | ||
448 | ret = afs_server_request_callslot(server, &callslot); | ||
449 | if (ret < 0) | ||
450 | goto out; | ||
451 | |||
452 | /* create a call through that connection */ | ||
453 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
454 | if (ret < 0) { | ||
455 | printk("kAFS: Unable to create call: %d\n", ret); | ||
456 | goto out_put_conn; | ||
457 | } | ||
458 | call->app_opcode = FSFETCHDATA; | ||
459 | |||
460 | /* we want to get event notifications from the call */ | ||
461 | add_wait_queue(&call->waitq, &myself); | ||
462 | |||
463 | /* marshall the parameters */ | ||
464 | bp = rxrpc_call_alloc_scratch(call, 24); | ||
465 | bp[0] = htonl(FSFETCHDATA); | ||
466 | bp[1] = htonl(desc->fid.vid); | ||
467 | bp[2] = htonl(desc->fid.vnode); | ||
468 | bp[3] = htonl(desc->fid.unique); | ||
469 | bp[4] = htonl(desc->offset); | ||
470 | bp[5] = htonl(desc->size); | ||
471 | |||
472 | piov[0].iov_len = 24; | ||
473 | piov[0].iov_base = bp; | ||
474 | |||
475 | /* send the parameters to the server */ | ||
476 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
477 | 0, &sent); | ||
478 | if (ret < 0) | ||
479 | goto abort; | ||
480 | |||
481 | /* wait for the data count to arrive */ | ||
482 | ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK); | ||
483 | if (ret < 0) | ||
484 | goto read_failed; | ||
485 | |||
486 | desc->actual = ntohl(bp[0]); | ||
487 | if (desc->actual != desc->size) { | ||
488 | ret = -EBADMSG; | ||
489 | goto abort; | ||
490 | } | ||
491 | |||
492 | /* call the app to read the actual data */ | ||
493 | rxrpc_call_reset_scratch(call); | ||
494 | |||
495 | ret = rxrpc_call_read_data(call, desc->buffer, desc->actual, | ||
496 | RXRPC_CALL_READ_BLOCK); | ||
497 | if (ret < 0) | ||
498 | goto read_failed; | ||
499 | |||
500 | /* wait for the rest of the reply to completely arrive */ | ||
501 | rxrpc_call_reset_scratch(call); | ||
502 | bp = rxrpc_call_alloc_scratch(call, 120); | ||
503 | |||
504 | ret = rxrpc_call_read_data(call, bp, 120, | ||
505 | RXRPC_CALL_READ_BLOCK | | ||
506 | RXRPC_CALL_READ_ALL); | ||
507 | if (ret < 0) | ||
508 | goto read_failed; | ||
509 | |||
510 | /* unmarshall the reply */ | ||
511 | vnode->status.if_version = ntohl(*bp++); | ||
512 | vnode->status.type = ntohl(*bp++); | ||
513 | vnode->status.nlink = ntohl(*bp++); | ||
514 | vnode->status.size = ntohl(*bp++); | ||
515 | vnode->status.version = ntohl(*bp++); | ||
516 | vnode->status.author = ntohl(*bp++); | ||
517 | vnode->status.owner = ntohl(*bp++); | ||
518 | vnode->status.caller_access = ntohl(*bp++); | ||
519 | vnode->status.anon_access = ntohl(*bp++); | ||
520 | vnode->status.mode = ntohl(*bp++); | ||
521 | vnode->status.parent.vid = desc->fid.vid; | ||
522 | vnode->status.parent.vnode = ntohl(*bp++); | ||
523 | vnode->status.parent.unique = ntohl(*bp++); | ||
524 | bp++; /* seg size */ | ||
525 | vnode->status.mtime_client = ntohl(*bp++); | ||
526 | vnode->status.mtime_server = ntohl(*bp++); | ||
527 | bp++; /* group */ | ||
528 | bp++; /* sync counter */ | ||
529 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
530 | bp++; /* spare2 */ | ||
531 | bp++; /* spare3 */ | ||
532 | bp++; /* spare4 */ | ||
533 | |||
534 | vnode->cb_version = ntohl(*bp++); | ||
535 | vnode->cb_expiry = ntohl(*bp++); | ||
536 | vnode->cb_type = ntohl(*bp++); | ||
537 | |||
538 | if (volsync) { | ||
539 | volsync->creation = ntohl(*bp++); | ||
540 | bp++; /* spare2 */ | ||
541 | bp++; /* spare3 */ | ||
542 | bp++; /* spare4 */ | ||
543 | bp++; /* spare5 */ | ||
544 | bp++; /* spare6 */ | ||
545 | } | ||
546 | |||
547 | /* success */ | ||
548 | ret = 0; | ||
549 | |||
550 | out_unwait: | ||
551 | set_current_state(TASK_RUNNING); | ||
552 | remove_wait_queue(&call->waitq,&myself); | ||
553 | rxrpc_put_call(call); | ||
554 | out_put_conn: | ||
555 | afs_server_release_callslot(server, &callslot); | ||
556 | out: | ||
557 | _leave(" = %d", ret); | ||
558 | return ret; | ||
559 | |||
560 | read_failed: | ||
561 | if (ret == -ECONNABORTED) { | ||
562 | ret = call->app_errno; | ||
563 | goto out_unwait; | ||
564 | } | ||
565 | |||
566 | abort: | ||
567 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
568 | rxrpc_call_abort(call, ret); | ||
569 | schedule(); | ||
570 | goto out_unwait; | ||
571 | |||
572 | } /* end afs_rxfs_fetch_file_data() */ | ||
573 | |||
574 | /*****************************************************************************/ | ||
575 | /* | ||
576 | * ask the AFS fileserver to discard a callback request on a file | ||
577 | */ | ||
578 | int afs_rxfs_give_up_callback(struct afs_server *server, | ||
579 | struct afs_vnode *vnode) | ||
580 | { | ||
581 | struct afs_server_callslot callslot; | ||
582 | struct rxrpc_call *call; | ||
583 | struct kvec piov[1]; | ||
584 | size_t sent; | ||
585 | int ret; | ||
586 | __be32 *bp; | ||
587 | |||
588 | DECLARE_WAITQUEUE(myself, current); | ||
589 | |||
590 | _enter("%p,{%u,%u,%u}", | ||
591 | server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | ||
592 | |||
593 | /* get hold of the fileserver connection */ | ||
594 | ret = afs_server_request_callslot(server, &callslot); | ||
595 | if (ret < 0) | ||
596 | goto out; | ||
597 | |||
598 | /* create a call through that connection */ | ||
599 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
600 | if (ret < 0) { | ||
601 | printk("kAFS: Unable to create call: %d\n", ret); | ||
602 | goto out_put_conn; | ||
603 | } | ||
604 | call->app_opcode = FSGIVEUPCALLBACKS; | ||
605 | |||
606 | /* we want to get event notifications from the call */ | ||
607 | add_wait_queue(&call->waitq, &myself); | ||
608 | |||
609 | /* marshall the parameters */ | ||
610 | bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4); | ||
611 | |||
612 | piov[0].iov_len = (1 + 4 + 4) * 4; | ||
613 | piov[0].iov_base = bp; | ||
614 | |||
615 | *bp++ = htonl(FSGIVEUPCALLBACKS); | ||
616 | *bp++ = htonl(1); | ||
617 | *bp++ = htonl(vnode->fid.vid); | ||
618 | *bp++ = htonl(vnode->fid.vnode); | ||
619 | *bp++ = htonl(vnode->fid.unique); | ||
620 | *bp++ = htonl(1); | ||
621 | *bp++ = htonl(vnode->cb_version); | ||
622 | *bp++ = htonl(vnode->cb_expiry); | ||
623 | *bp++ = htonl(vnode->cb_type); | ||
624 | |||
625 | /* send the parameters to the server */ | ||
626 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
627 | 0, &sent); | ||
628 | if (ret < 0) | ||
629 | goto abort; | ||
630 | |||
631 | /* wait for the reply to completely arrive */ | ||
632 | for (;;) { | ||
633 | set_current_state(TASK_INTERRUPTIBLE); | ||
634 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
635 | signal_pending(current)) | ||
636 | break; | ||
637 | schedule(); | ||
638 | } | ||
639 | set_current_state(TASK_RUNNING); | ||
640 | |||
641 | ret = -EINTR; | ||
642 | if (signal_pending(current)) | ||
643 | goto abort; | ||
644 | |||
645 | switch (call->app_call_state) { | ||
646 | case RXRPC_CSTATE_ERROR: | ||
647 | ret = call->app_errno; | ||
648 | goto out_unwait; | ||
649 | |||
650 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
651 | ret = 0; | ||
652 | goto out_unwait; | ||
653 | |||
654 | default: | ||
655 | BUG(); | ||
656 | } | ||
657 | |||
658 | out_unwait: | ||
659 | set_current_state(TASK_RUNNING); | ||
660 | remove_wait_queue(&call->waitq, &myself); | ||
661 | rxrpc_put_call(call); | ||
662 | out_put_conn: | ||
663 | afs_server_release_callslot(server, &callslot); | ||
664 | out: | ||
665 | _leave(""); | ||
666 | return ret; | ||
667 | |||
668 | abort: | ||
669 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
670 | rxrpc_call_abort(call, ret); | ||
671 | schedule(); | ||
672 | goto out_unwait; | ||
673 | } /* end afs_rxfs_give_up_callback() */ | ||
674 | |||
675 | /*****************************************************************************/ | ||
676 | /* | ||
677 | * look a filename up in a directory | ||
678 | * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2 | ||
679 | */ | ||
680 | #if 0 | ||
681 | int afs_rxfs_lookup(struct afs_server *server, | ||
682 | struct afs_vnode *dir, | ||
683 | const char *filename, | ||
684 | struct afs_vnode *vnode, | ||
685 | struct afs_volsync *volsync) | ||
686 | { | ||
687 | struct rxrpc_connection *conn; | ||
688 | struct rxrpc_call *call; | ||
689 | struct kvec piov[3]; | ||
690 | size_t sent; | ||
691 | int ret; | ||
692 | u32 *bp, zero; | ||
693 | |||
694 | DECLARE_WAITQUEUE(myself, current); | ||
695 | |||
696 | kenter("%p,{%u,%u,%u},%s", | ||
697 | server, fid->vid, fid->vnode, fid->unique, filename); | ||
698 | |||
699 | /* get hold of the fileserver connection */ | ||
700 | ret = afs_server_get_fsconn(server, &conn); | ||
701 | if (ret < 0) | ||
702 | goto out; | ||
703 | |||
704 | /* create a call through that connection */ | ||
705 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
706 | if (ret < 0) { | ||
707 | printk("kAFS: Unable to create call: %d\n", ret); | ||
708 | goto out_put_conn; | ||
709 | } | ||
710 | call->app_opcode = FSLOOKUP; | ||
711 | |||
712 | /* we want to get event notifications from the call */ | ||
713 | add_wait_queue(&call->waitq,&myself); | ||
714 | |||
715 | /* marshall the parameters */ | ||
716 | bp = rxrpc_call_alloc_scratch(call, 20); | ||
717 | |||
718 | zero = 0; | ||
719 | |||
720 | piov[0].iov_len = 20; | ||
721 | piov[0].iov_base = bp; | ||
722 | piov[1].iov_len = strlen(filename); | ||
723 | piov[1].iov_base = (char *) filename; | ||
724 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | ||
725 | piov[2].iov_base = &zero; | ||
726 | |||
727 | *bp++ = htonl(FSLOOKUP); | ||
728 | *bp++ = htonl(dirfid->vid); | ||
729 | *bp++ = htonl(dirfid->vnode); | ||
730 | *bp++ = htonl(dirfid->unique); | ||
731 | *bp++ = htonl(piov[1].iov_len); | ||
732 | |||
733 | /* send the parameters to the server */ | ||
734 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
735 | 0, &sent); | ||
736 | if (ret < 0) | ||
737 | goto abort; | ||
738 | |||
739 | /* wait for the reply to completely arrive */ | ||
740 | bp = rxrpc_call_alloc_scratch(call, 220); | ||
741 | |||
742 | ret = rxrpc_call_read_data(call, bp, 220, | ||
743 | RXRPC_CALL_READ_BLOCK | | ||
744 | RXRPC_CALL_READ_ALL); | ||
745 | if (ret < 0) { | ||
746 | if (ret == -ECONNABORTED) { | ||
747 | ret = call->app_errno; | ||
748 | goto out_unwait; | ||
749 | } | ||
750 | goto abort; | ||
751 | } | ||
752 | |||
753 | /* unmarshall the reply */ | ||
754 | fid->vid = ntohl(*bp++); | ||
755 | fid->vnode = ntohl(*bp++); | ||
756 | fid->unique = ntohl(*bp++); | ||
757 | |||
758 | vnode->status.if_version = ntohl(*bp++); | ||
759 | vnode->status.type = ntohl(*bp++); | ||
760 | vnode->status.nlink = ntohl(*bp++); | ||
761 | vnode->status.size = ntohl(*bp++); | ||
762 | vnode->status.version = ntohl(*bp++); | ||
763 | vnode->status.author = ntohl(*bp++); | ||
764 | vnode->status.owner = ntohl(*bp++); | ||
765 | vnode->status.caller_access = ntohl(*bp++); | ||
766 | vnode->status.anon_access = ntohl(*bp++); | ||
767 | vnode->status.mode = ntohl(*bp++); | ||
768 | vnode->status.parent.vid = dirfid->vid; | ||
769 | vnode->status.parent.vnode = ntohl(*bp++); | ||
770 | vnode->status.parent.unique = ntohl(*bp++); | ||
771 | bp++; /* seg size */ | ||
772 | vnode->status.mtime_client = ntohl(*bp++); | ||
773 | vnode->status.mtime_server = ntohl(*bp++); | ||
774 | bp++; /* group */ | ||
775 | bp++; /* sync counter */ | ||
776 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
777 | bp++; /* spare2 */ | ||
778 | bp++; /* spare3 */ | ||
779 | bp++; /* spare4 */ | ||
780 | |||
781 | dir->status.if_version = ntohl(*bp++); | ||
782 | dir->status.type = ntohl(*bp++); | ||
783 | dir->status.nlink = ntohl(*bp++); | ||
784 | dir->status.size = ntohl(*bp++); | ||
785 | dir->status.version = ntohl(*bp++); | ||
786 | dir->status.author = ntohl(*bp++); | ||
787 | dir->status.owner = ntohl(*bp++); | ||
788 | dir->status.caller_access = ntohl(*bp++); | ||
789 | dir->status.anon_access = ntohl(*bp++); | ||
790 | dir->status.mode = ntohl(*bp++); | ||
791 | dir->status.parent.vid = dirfid->vid; | ||
792 | dir->status.parent.vnode = ntohl(*bp++); | ||
793 | dir->status.parent.unique = ntohl(*bp++); | ||
794 | bp++; /* seg size */ | ||
795 | dir->status.mtime_client = ntohl(*bp++); | ||
796 | dir->status.mtime_server = ntohl(*bp++); | ||
797 | bp++; /* group */ | ||
798 | bp++; /* sync counter */ | ||
799 | dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
800 | bp++; /* spare2 */ | ||
801 | bp++; /* spare3 */ | ||
802 | bp++; /* spare4 */ | ||
803 | |||
804 | callback->fid = *fid; | ||
805 | callback->version = ntohl(*bp++); | ||
806 | callback->expiry = ntohl(*bp++); | ||
807 | callback->type = ntohl(*bp++); | ||
808 | |||
809 | if (volsync) { | ||
810 | volsync->creation = ntohl(*bp++); | ||
811 | bp++; /* spare2 */ | ||
812 | bp++; /* spare3 */ | ||
813 | bp++; /* spare4 */ | ||
814 | bp++; /* spare5 */ | ||
815 | bp++; /* spare6 */ | ||
816 | } | ||
817 | |||
818 | /* success */ | ||
819 | ret = 0; | ||
820 | |||
821 | out_unwait: | ||
822 | set_current_state(TASK_RUNNING); | ||
823 | remove_wait_queue(&call->waitq, &myself); | ||
824 | rxrpc_put_call(call); | ||
825 | out_put_conn: | ||
826 | afs_server_release_fsconn(server, conn); | ||
827 | out: | ||
828 | kleave(""); | ||
829 | return ret; | ||
830 | |||
831 | abort: | ||
832 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
833 | rxrpc_call_abort(call, ret); | ||
834 | schedule(); | ||
835 | goto out_unwait; | ||
836 | } /* end afs_rxfs_lookup() */ | ||
837 | #endif | ||
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h new file mode 100644 index 000000000000..8ba3e749ee3c --- /dev/null +++ b/fs/afs/fsclient.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* fsclient.h: AFS File Server client stub declarations | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_FSCLIENT_H | ||
13 | #define _LINUX_AFS_FSCLIENT_H | ||
14 | |||
15 | #include "server.h" | ||
16 | |||
17 | extern int afs_rxfs_get_volume_info(struct afs_server *server, | ||
18 | const char *name, | ||
19 | struct afs_volume_info *vinfo); | ||
20 | |||
21 | extern int afs_rxfs_fetch_file_status(struct afs_server *server, | ||
22 | struct afs_vnode *vnode, | ||
23 | struct afs_volsync *volsync); | ||
24 | |||
25 | struct afs_rxfs_fetch_descriptor { | ||
26 | struct afs_fid fid; /* file ID to fetch */ | ||
27 | size_t size; /* total number of bytes to fetch */ | ||
28 | off_t offset; /* offset in file to start from */ | ||
29 | void *buffer; /* read buffer */ | ||
30 | size_t actual; /* actual size sent back by server */ | ||
31 | }; | ||
32 | |||
33 | extern int afs_rxfs_fetch_file_data(struct afs_server *server, | ||
34 | struct afs_vnode *vnode, | ||
35 | struct afs_rxfs_fetch_descriptor *desc, | ||
36 | struct afs_volsync *volsync); | ||
37 | |||
38 | extern int afs_rxfs_give_up_callback(struct afs_server *server, | ||
39 | struct afs_vnode *vnode); | ||
40 | |||
41 | /* this doesn't appear to work in OpenAFS server */ | ||
42 | extern int afs_rxfs_lookup(struct afs_server *server, | ||
43 | struct afs_vnode *dir, | ||
44 | const char *filename, | ||
45 | struct afs_vnode *vnode, | ||
46 | struct afs_volsync *volsync); | ||
47 | |||
48 | /* this is apparently mis-implemented in OpenAFS server */ | ||
49 | extern int afs_rxfs_get_root_volume(struct afs_server *server, | ||
50 | char *buf, | ||
51 | size_t *buflen); | ||
52 | |||
53 | |||
54 | #endif /* _LINUX_AFS_FSCLIENT_H */ | ||
diff --git a/fs/afs/inode.c b/fs/afs/inode.c new file mode 100644 index 000000000000..c476fde33fbc --- /dev/null +++ b/fs/afs/inode.c | |||
@@ -0,0 +1,287 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software may be freely redistributed under the terms of the | ||
5 | * GNU General Public License. | ||
6 | * | ||
7 | * You should have received a copy of the GNU General Public License | ||
8 | * along with this program; if not, write to the Free Software | ||
9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
10 | * | ||
11 | * Authors: David Woodhouse <dwmw2@cambridge.redhat.com> | ||
12 | * David Howells <dhowells@redhat.com> | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/pagemap.h> | ||
23 | #include "volume.h" | ||
24 | #include "vnode.h" | ||
25 | #include "super.h" | ||
26 | #include "internal.h" | ||
27 | |||
28 | struct afs_iget_data { | ||
29 | struct afs_fid fid; | ||
30 | struct afs_volume *volume; /* volume on which resides */ | ||
31 | }; | ||
32 | |||
33 | /*****************************************************************************/ | ||
34 | /* | ||
35 | * map the AFS file status to the inode member variables | ||
36 | */ | ||
37 | static int afs_inode_map_status(struct afs_vnode *vnode) | ||
38 | { | ||
39 | struct inode *inode = AFS_VNODE_TO_I(vnode); | ||
40 | |||
41 | _debug("FS: ft=%d lk=%d sz=%Zu ver=%Lu mod=%hu", | ||
42 | vnode->status.type, | ||
43 | vnode->status.nlink, | ||
44 | vnode->status.size, | ||
45 | vnode->status.version, | ||
46 | vnode->status.mode); | ||
47 | |||
48 | switch (vnode->status.type) { | ||
49 | case AFS_FTYPE_FILE: | ||
50 | inode->i_mode = S_IFREG | vnode->status.mode; | ||
51 | inode->i_op = &afs_file_inode_operations; | ||
52 | inode->i_fop = &afs_file_file_operations; | ||
53 | break; | ||
54 | case AFS_FTYPE_DIR: | ||
55 | inode->i_mode = S_IFDIR | vnode->status.mode; | ||
56 | inode->i_op = &afs_dir_inode_operations; | ||
57 | inode->i_fop = &afs_dir_file_operations; | ||
58 | break; | ||
59 | case AFS_FTYPE_SYMLINK: | ||
60 | inode->i_mode = S_IFLNK | vnode->status.mode; | ||
61 | inode->i_op = &page_symlink_inode_operations; | ||
62 | break; | ||
63 | default: | ||
64 | printk("kAFS: AFS vnode with undefined type\n"); | ||
65 | return -EBADMSG; | ||
66 | } | ||
67 | |||
68 | inode->i_nlink = vnode->status.nlink; | ||
69 | inode->i_uid = vnode->status.owner; | ||
70 | inode->i_gid = 0; | ||
71 | inode->i_size = vnode->status.size; | ||
72 | inode->i_ctime.tv_sec = vnode->status.mtime_server; | ||
73 | inode->i_ctime.tv_nsec = 0; | ||
74 | inode->i_atime = inode->i_mtime = inode->i_ctime; | ||
75 | inode->i_blksize = PAGE_CACHE_SIZE; | ||
76 | inode->i_blocks = 0; | ||
77 | inode->i_version = vnode->fid.unique; | ||
78 | inode->i_mapping->a_ops = &afs_fs_aops; | ||
79 | |||
80 | /* check to see whether a symbolic link is really a mountpoint */ | ||
81 | if (vnode->status.type == AFS_FTYPE_SYMLINK) { | ||
82 | afs_mntpt_check_symlink(vnode); | ||
83 | |||
84 | if (vnode->flags & AFS_VNODE_MOUNTPOINT) { | ||
85 | inode->i_mode = S_IFDIR | vnode->status.mode; | ||
86 | inode->i_op = &afs_mntpt_inode_operations; | ||
87 | inode->i_fop = &afs_mntpt_file_operations; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | return 0; | ||
92 | } /* end afs_inode_map_status() */ | ||
93 | |||
94 | /*****************************************************************************/ | ||
95 | /* | ||
96 | * attempt to fetch the status of an inode, coelescing multiple simultaneous | ||
97 | * fetches | ||
98 | */ | ||
99 | static int afs_inode_fetch_status(struct inode *inode) | ||
100 | { | ||
101 | struct afs_vnode *vnode; | ||
102 | int ret; | ||
103 | |||
104 | vnode = AFS_FS_I(inode); | ||
105 | |||
106 | ret = afs_vnode_fetch_status(vnode); | ||
107 | |||
108 | if (ret == 0) | ||
109 | ret = afs_inode_map_status(vnode); | ||
110 | |||
111 | return ret; | ||
112 | |||
113 | } /* end afs_inode_fetch_status() */ | ||
114 | |||
115 | /*****************************************************************************/ | ||
116 | /* | ||
117 | * iget5() comparator | ||
118 | */ | ||
119 | static int afs_iget5_test(struct inode *inode, void *opaque) | ||
120 | { | ||
121 | struct afs_iget_data *data = opaque; | ||
122 | |||
123 | return inode->i_ino == data->fid.vnode && | ||
124 | inode->i_version == data->fid.unique; | ||
125 | } /* end afs_iget5_test() */ | ||
126 | |||
127 | /*****************************************************************************/ | ||
128 | /* | ||
129 | * iget5() inode initialiser | ||
130 | */ | ||
131 | static int afs_iget5_set(struct inode *inode, void *opaque) | ||
132 | { | ||
133 | struct afs_iget_data *data = opaque; | ||
134 | struct afs_vnode *vnode = AFS_FS_I(inode); | ||
135 | |||
136 | inode->i_ino = data->fid.vnode; | ||
137 | inode->i_version = data->fid.unique; | ||
138 | vnode->fid = data->fid; | ||
139 | vnode->volume = data->volume; | ||
140 | |||
141 | return 0; | ||
142 | } /* end afs_iget5_set() */ | ||
143 | |||
144 | /*****************************************************************************/ | ||
145 | /* | ||
146 | * inode retrieval | ||
147 | */ | ||
148 | inline int afs_iget(struct super_block *sb, struct afs_fid *fid, | ||
149 | struct inode **_inode) | ||
150 | { | ||
151 | struct afs_iget_data data = { .fid = *fid }; | ||
152 | struct afs_super_info *as; | ||
153 | struct afs_vnode *vnode; | ||
154 | struct inode *inode; | ||
155 | int ret; | ||
156 | |||
157 | _enter(",{%u,%u,%u},,", fid->vid, fid->vnode, fid->unique); | ||
158 | |||
159 | as = sb->s_fs_info; | ||
160 | data.volume = as->volume; | ||
161 | |||
162 | inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set, | ||
163 | &data); | ||
164 | if (!inode) { | ||
165 | _leave(" = -ENOMEM"); | ||
166 | return -ENOMEM; | ||
167 | } | ||
168 | |||
169 | vnode = AFS_FS_I(inode); | ||
170 | |||
171 | /* deal with an existing inode */ | ||
172 | if (!(inode->i_state & I_NEW)) { | ||
173 | ret = afs_vnode_fetch_status(vnode); | ||
174 | if (ret==0) | ||
175 | *_inode = inode; | ||
176 | else | ||
177 | iput(inode); | ||
178 | _leave(" = %d", ret); | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | #ifdef AFS_CACHING_SUPPORT | ||
183 | /* set up caching before reading the status, as fetch-status reads the | ||
184 | * first page of symlinks to see if they're really mntpts */ | ||
185 | cachefs_acquire_cookie(vnode->volume->cache, | ||
186 | NULL, | ||
187 | vnode, | ||
188 | &vnode->cache); | ||
189 | #endif | ||
190 | |||
191 | /* okay... it's a new inode */ | ||
192 | inode->i_flags |= S_NOATIME; | ||
193 | vnode->flags |= AFS_VNODE_CHANGED; | ||
194 | ret = afs_inode_fetch_status(inode); | ||
195 | if (ret<0) | ||
196 | goto bad_inode; | ||
197 | |||
198 | /* success */ | ||
199 | unlock_new_inode(inode); | ||
200 | |||
201 | *_inode = inode; | ||
202 | _leave(" = 0 [CB { v=%u x=%lu t=%u }]", | ||
203 | vnode->cb_version, | ||
204 | vnode->cb_timeout.timo_jif, | ||
205 | vnode->cb_type); | ||
206 | return 0; | ||
207 | |||
208 | /* failure */ | ||
209 | bad_inode: | ||
210 | make_bad_inode(inode); | ||
211 | unlock_new_inode(inode); | ||
212 | iput(inode); | ||
213 | |||
214 | _leave(" = %d [bad]", ret); | ||
215 | return ret; | ||
216 | } /* end afs_iget() */ | ||
217 | |||
218 | /*****************************************************************************/ | ||
219 | /* | ||
220 | * read the attributes of an inode | ||
221 | */ | ||
222 | int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||
223 | struct kstat *stat) | ||
224 | { | ||
225 | struct afs_vnode *vnode; | ||
226 | struct inode *inode; | ||
227 | int ret; | ||
228 | |||
229 | inode = dentry->d_inode; | ||
230 | |||
231 | _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version); | ||
232 | |||
233 | vnode = AFS_FS_I(inode); | ||
234 | |||
235 | ret = afs_inode_fetch_status(inode); | ||
236 | if (ret == -ENOENT) { | ||
237 | _leave(" = %d [%d %p]", | ||
238 | ret, atomic_read(&dentry->d_count), dentry->d_inode); | ||
239 | return ret; | ||
240 | } | ||
241 | else if (ret < 0) { | ||
242 | make_bad_inode(inode); | ||
243 | _leave(" = %d", ret); | ||
244 | return ret; | ||
245 | } | ||
246 | |||
247 | /* transfer attributes from the inode structure to the stat | ||
248 | * structure */ | ||
249 | generic_fillattr(inode, stat); | ||
250 | |||
251 | _leave(" = 0 CB { v=%u x=%u t=%u }", | ||
252 | vnode->cb_version, | ||
253 | vnode->cb_expiry, | ||
254 | vnode->cb_type); | ||
255 | |||
256 | return 0; | ||
257 | } /* end afs_inode_getattr() */ | ||
258 | |||
259 | /*****************************************************************************/ | ||
260 | /* | ||
261 | * clear an AFS inode | ||
262 | */ | ||
263 | void afs_clear_inode(struct inode *inode) | ||
264 | { | ||
265 | struct afs_vnode *vnode; | ||
266 | |||
267 | vnode = AFS_FS_I(inode); | ||
268 | |||
269 | _enter("ino=%lu { vn=%08x v=%u x=%u t=%u }", | ||
270 | inode->i_ino, | ||
271 | vnode->fid.vnode, | ||
272 | vnode->cb_version, | ||
273 | vnode->cb_expiry, | ||
274 | vnode->cb_type | ||
275 | ); | ||
276 | |||
277 | BUG_ON(inode->i_ino != vnode->fid.vnode); | ||
278 | |||
279 | afs_vnode_give_up_callback(vnode); | ||
280 | |||
281 | #ifdef AFS_CACHING_SUPPORT | ||
282 | cachefs_relinquish_cookie(vnode->cache, 0); | ||
283 | vnode->cache = NULL; | ||
284 | #endif | ||
285 | |||
286 | _leave(""); | ||
287 | } /* end afs_clear_inode() */ | ||
diff --git a/fs/afs/internal.h b/fs/afs/internal.h new file mode 100644 index 000000000000..f09860b45c1a --- /dev/null +++ b/fs/afs/internal.h | |||
@@ -0,0 +1,140 @@ | |||
1 | /* internal.h: internal AFS stuff | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_INTERNAL_H | ||
13 | #define AFS_INTERNAL_H | ||
14 | |||
15 | #include <linux/compiler.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | |||
20 | /* | ||
21 | * debug tracing | ||
22 | */ | ||
23 | #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ## a) | ||
24 | #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ## a) | ||
25 | #define kdebug(FMT, a...) printk(FMT"\n" , ## a) | ||
26 | #define kproto(FMT, a...) printk("### "FMT"\n" , ## a) | ||
27 | #define knet(FMT, a...) printk(FMT"\n" , ## a) | ||
28 | |||
29 | #ifdef __KDEBUG | ||
30 | #define _enter(FMT, a...) kenter(FMT , ## a) | ||
31 | #define _leave(FMT, a...) kleave(FMT , ## a) | ||
32 | #define _debug(FMT, a...) kdebug(FMT , ## a) | ||
33 | #define _proto(FMT, a...) kproto(FMT , ## a) | ||
34 | #define _net(FMT, a...) knet(FMT , ## a) | ||
35 | #else | ||
36 | #define _enter(FMT, a...) do { } while(0) | ||
37 | #define _leave(FMT, a...) do { } while(0) | ||
38 | #define _debug(FMT, a...) do { } while(0) | ||
39 | #define _proto(FMT, a...) do { } while(0) | ||
40 | #define _net(FMT, a...) do { } while(0) | ||
41 | #endif | ||
42 | |||
43 | static inline void afs_discard_my_signals(void) | ||
44 | { | ||
45 | while (signal_pending(current)) { | ||
46 | siginfo_t sinfo; | ||
47 | |||
48 | spin_lock_irq(¤t->sighand->siglock); | ||
49 | dequeue_signal(current,¤t->blocked, &sinfo); | ||
50 | spin_unlock_irq(¤t->sighand->siglock); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * cell.c | ||
56 | */ | ||
57 | extern struct rw_semaphore afs_proc_cells_sem; | ||
58 | extern struct list_head afs_proc_cells; | ||
59 | #ifdef AFS_CACHING_SUPPORT | ||
60 | extern struct cachefs_index_def afs_cache_cell_index_def; | ||
61 | #endif | ||
62 | |||
63 | /* | ||
64 | * dir.c | ||
65 | */ | ||
66 | extern struct inode_operations afs_dir_inode_operations; | ||
67 | extern struct file_operations afs_dir_file_operations; | ||
68 | |||
69 | /* | ||
70 | * file.c | ||
71 | */ | ||
72 | extern struct address_space_operations afs_fs_aops; | ||
73 | extern struct inode_operations afs_file_inode_operations; | ||
74 | extern struct file_operations afs_file_file_operations; | ||
75 | |||
76 | #ifdef AFS_CACHING_SUPPORT | ||
77 | extern int afs_cache_get_page_cookie(struct page *page, | ||
78 | struct cachefs_page **_page_cookie); | ||
79 | #endif | ||
80 | |||
81 | /* | ||
82 | * inode.c | ||
83 | */ | ||
84 | extern int afs_iget(struct super_block *sb, struct afs_fid *fid, | ||
85 | struct inode **_inode); | ||
86 | extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||
87 | struct kstat *stat); | ||
88 | extern void afs_clear_inode(struct inode *inode); | ||
89 | |||
90 | /* | ||
91 | * key_afs.c | ||
92 | */ | ||
93 | #ifdef CONFIG_KEYS | ||
94 | extern int afs_key_register(void); | ||
95 | extern void afs_key_unregister(void); | ||
96 | #endif | ||
97 | |||
98 | /* | ||
99 | * main.c | ||
100 | */ | ||
101 | #ifdef AFS_CACHING_SUPPORT | ||
102 | extern struct cachefs_netfs afs_cache_netfs; | ||
103 | #endif | ||
104 | |||
105 | /* | ||
106 | * mntpt.c | ||
107 | */ | ||
108 | extern struct inode_operations afs_mntpt_inode_operations; | ||
109 | extern struct file_operations afs_mntpt_file_operations; | ||
110 | extern struct afs_timer afs_mntpt_expiry_timer; | ||
111 | extern struct afs_timer_ops afs_mntpt_expiry_timer_ops; | ||
112 | extern unsigned long afs_mntpt_expiry_timeout; | ||
113 | |||
114 | extern int afs_mntpt_check_symlink(struct afs_vnode *vnode); | ||
115 | |||
116 | /* | ||
117 | * super.c | ||
118 | */ | ||
119 | extern int afs_fs_init(void); | ||
120 | extern void afs_fs_exit(void); | ||
121 | |||
122 | #define AFS_CB_HASH_COUNT (PAGE_SIZE / sizeof(struct list_head)) | ||
123 | |||
124 | extern struct list_head afs_cb_hash_tbl[]; | ||
125 | extern spinlock_t afs_cb_hash_lock; | ||
126 | |||
127 | #define afs_cb_hash(SRV,FID) \ | ||
128 | afs_cb_hash_tbl[((unsigned long)(SRV) + \ | ||
129 | (FID)->vid + (FID)->vnode + (FID)->unique) % \ | ||
130 | AFS_CB_HASH_COUNT] | ||
131 | |||
132 | /* | ||
133 | * proc.c | ||
134 | */ | ||
135 | extern int afs_proc_init(void); | ||
136 | extern void afs_proc_cleanup(void); | ||
137 | extern int afs_proc_cell_setup(struct afs_cell *cell); | ||
138 | extern void afs_proc_cell_remove(struct afs_cell *cell); | ||
139 | |||
140 | #endif /* AFS_INTERNAL_H */ | ||
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c new file mode 100644 index 000000000000..6fc88ae8ad94 --- /dev/null +++ b/fs/afs/kafsasyncd.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* kafsasyncd.c: AFS asynchronous operation daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * | ||
12 | * The AFS async daemon is used to the following: | ||
13 | * - probe "dead" servers to see whether they've come back to life yet. | ||
14 | * - probe "live" servers that we haven't talked to for a while to see if they are better | ||
15 | * candidates for serving than what we're currently using | ||
16 | * - poll volume location servers to keep up to date volume location lists | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/completion.h> | ||
23 | #include "cell.h" | ||
24 | #include "server.h" | ||
25 | #include "volume.h" | ||
26 | #include "kafsasyncd.h" | ||
27 | #include "kafstimod.h" | ||
28 | #include <rxrpc/call.h> | ||
29 | #include <asm/errno.h> | ||
30 | #include "internal.h" | ||
31 | |||
32 | static DECLARE_COMPLETION(kafsasyncd_alive); | ||
33 | static DECLARE_COMPLETION(kafsasyncd_dead); | ||
34 | static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq); | ||
35 | static struct task_struct *kafsasyncd_task; | ||
36 | static int kafsasyncd_die; | ||
37 | |||
38 | static int kafsasyncd(void *arg); | ||
39 | |||
40 | static LIST_HEAD(kafsasyncd_async_attnq); | ||
41 | static LIST_HEAD(kafsasyncd_async_busyq); | ||
42 | static DEFINE_SPINLOCK(kafsasyncd_async_lock); | ||
43 | |||
44 | static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call) | ||
45 | { | ||
46 | } | ||
47 | |||
48 | static void kafsasyncd_null_call_error_func(struct rxrpc_call *call) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | /*****************************************************************************/ | ||
53 | /* | ||
54 | * start the async daemon | ||
55 | */ | ||
56 | int afs_kafsasyncd_start(void) | ||
57 | { | ||
58 | int ret; | ||
59 | |||
60 | ret = kernel_thread(kafsasyncd, NULL, 0); | ||
61 | if (ret < 0) | ||
62 | return ret; | ||
63 | |||
64 | wait_for_completion(&kafsasyncd_alive); | ||
65 | |||
66 | return ret; | ||
67 | } /* end afs_kafsasyncd_start() */ | ||
68 | |||
69 | /*****************************************************************************/ | ||
70 | /* | ||
71 | * stop the async daemon | ||
72 | */ | ||
73 | void afs_kafsasyncd_stop(void) | ||
74 | { | ||
75 | /* get rid of my daemon */ | ||
76 | kafsasyncd_die = 1; | ||
77 | wake_up(&kafsasyncd_sleepq); | ||
78 | wait_for_completion(&kafsasyncd_dead); | ||
79 | |||
80 | } /* end afs_kafsasyncd_stop() */ | ||
81 | |||
82 | /*****************************************************************************/ | ||
83 | /* | ||
84 | * probing daemon | ||
85 | */ | ||
86 | static int kafsasyncd(void *arg) | ||
87 | { | ||
88 | struct afs_async_op *op; | ||
89 | int die; | ||
90 | |||
91 | DECLARE_WAITQUEUE(myself, current); | ||
92 | |||
93 | kafsasyncd_task = current; | ||
94 | |||
95 | printk("kAFS: Started kafsasyncd %d\n", current->pid); | ||
96 | |||
97 | daemonize("kafsasyncd"); | ||
98 | |||
99 | complete(&kafsasyncd_alive); | ||
100 | |||
101 | /* loop around looking for things to attend to */ | ||
102 | do { | ||
103 | set_current_state(TASK_INTERRUPTIBLE); | ||
104 | add_wait_queue(&kafsasyncd_sleepq, &myself); | ||
105 | |||
106 | for (;;) { | ||
107 | if (!list_empty(&kafsasyncd_async_attnq) || | ||
108 | signal_pending(current) || | ||
109 | kafsasyncd_die) | ||
110 | break; | ||
111 | |||
112 | schedule(); | ||
113 | set_current_state(TASK_INTERRUPTIBLE); | ||
114 | } | ||
115 | |||
116 | remove_wait_queue(&kafsasyncd_sleepq, &myself); | ||
117 | set_current_state(TASK_RUNNING); | ||
118 | |||
119 | try_to_freeze(PF_FREEZE); | ||
120 | |||
121 | /* discard pending signals */ | ||
122 | afs_discard_my_signals(); | ||
123 | |||
124 | die = kafsasyncd_die; | ||
125 | |||
126 | /* deal with the next asynchronous operation requiring | ||
127 | * attention */ | ||
128 | if (!list_empty(&kafsasyncd_async_attnq)) { | ||
129 | struct afs_async_op *op; | ||
130 | |||
131 | _debug("@@@ Begin Asynchronous Operation"); | ||
132 | |||
133 | op = NULL; | ||
134 | spin_lock(&kafsasyncd_async_lock); | ||
135 | |||
136 | if (!list_empty(&kafsasyncd_async_attnq)) { | ||
137 | op = list_entry(kafsasyncd_async_attnq.next, | ||
138 | struct afs_async_op, link); | ||
139 | list_del(&op->link); | ||
140 | list_add_tail(&op->link, | ||
141 | &kafsasyncd_async_busyq); | ||
142 | } | ||
143 | |||
144 | spin_unlock(&kafsasyncd_async_lock); | ||
145 | |||
146 | _debug("@@@ Operation %p {%p}\n", | ||
147 | op, op ? op->ops : NULL); | ||
148 | |||
149 | if (op) | ||
150 | op->ops->attend(op); | ||
151 | |||
152 | _debug("@@@ End Asynchronous Operation"); | ||
153 | } | ||
154 | |||
155 | } while(!die); | ||
156 | |||
157 | /* need to kill all outstanding asynchronous operations before | ||
158 | * exiting */ | ||
159 | kafsasyncd_task = NULL; | ||
160 | spin_lock(&kafsasyncd_async_lock); | ||
161 | |||
162 | /* fold the busy and attention queues together */ | ||
163 | list_splice_init(&kafsasyncd_async_busyq, | ||
164 | &kafsasyncd_async_attnq); | ||
165 | |||
166 | /* dequeue kafsasyncd from all their wait queues */ | ||
167 | list_for_each_entry(op, &kafsasyncd_async_attnq, link) { | ||
168 | op->call->app_attn_func = kafsasyncd_null_call_attn_func; | ||
169 | op->call->app_error_func = kafsasyncd_null_call_error_func; | ||
170 | remove_wait_queue(&op->call->waitq, &op->waiter); | ||
171 | } | ||
172 | |||
173 | spin_unlock(&kafsasyncd_async_lock); | ||
174 | |||
175 | /* abort all the operations */ | ||
176 | while (!list_empty(&kafsasyncd_async_attnq)) { | ||
177 | op = list_entry(kafsasyncd_async_attnq.next, struct afs_async_op, link); | ||
178 | list_del_init(&op->link); | ||
179 | |||
180 | rxrpc_call_abort(op->call, -EIO); | ||
181 | rxrpc_put_call(op->call); | ||
182 | op->call = NULL; | ||
183 | |||
184 | op->ops->discard(op); | ||
185 | } | ||
186 | |||
187 | /* and that's all */ | ||
188 | _leave(""); | ||
189 | complete_and_exit(&kafsasyncd_dead, 0); | ||
190 | |||
191 | } /* end kafsasyncd() */ | ||
192 | |||
193 | /*****************************************************************************/ | ||
194 | /* | ||
195 | * begin an operation | ||
196 | * - place operation on busy queue | ||
197 | */ | ||
198 | void afs_kafsasyncd_begin_op(struct afs_async_op *op) | ||
199 | { | ||
200 | _enter(""); | ||
201 | |||
202 | spin_lock(&kafsasyncd_async_lock); | ||
203 | |||
204 | init_waitqueue_entry(&op->waiter, kafsasyncd_task); | ||
205 | add_wait_queue(&op->call->waitq, &op->waiter); | ||
206 | |||
207 | list_del(&op->link); | ||
208 | list_add_tail(&op->link, &kafsasyncd_async_busyq); | ||
209 | |||
210 | spin_unlock(&kafsasyncd_async_lock); | ||
211 | |||
212 | _leave(""); | ||
213 | } /* end afs_kafsasyncd_begin_op() */ | ||
214 | |||
215 | /*****************************************************************************/ | ||
216 | /* | ||
217 | * request attention for an operation | ||
218 | * - move to attention queue | ||
219 | */ | ||
220 | void afs_kafsasyncd_attend_op(struct afs_async_op *op) | ||
221 | { | ||
222 | _enter(""); | ||
223 | |||
224 | spin_lock(&kafsasyncd_async_lock); | ||
225 | |||
226 | list_del(&op->link); | ||
227 | list_add_tail(&op->link, &kafsasyncd_async_attnq); | ||
228 | |||
229 | spin_unlock(&kafsasyncd_async_lock); | ||
230 | |||
231 | wake_up(&kafsasyncd_sleepq); | ||
232 | |||
233 | _leave(""); | ||
234 | } /* end afs_kafsasyncd_attend_op() */ | ||
235 | |||
236 | /*****************************************************************************/ | ||
237 | /* | ||
238 | * terminate an operation | ||
239 | * - remove from either queue | ||
240 | */ | ||
241 | void afs_kafsasyncd_terminate_op(struct afs_async_op *op) | ||
242 | { | ||
243 | _enter(""); | ||
244 | |||
245 | spin_lock(&kafsasyncd_async_lock); | ||
246 | |||
247 | if (!list_empty(&op->link)) { | ||
248 | list_del_init(&op->link); | ||
249 | remove_wait_queue(&op->call->waitq, &op->waiter); | ||
250 | } | ||
251 | |||
252 | spin_unlock(&kafsasyncd_async_lock); | ||
253 | |||
254 | wake_up(&kafsasyncd_sleepq); | ||
255 | |||
256 | _leave(""); | ||
257 | } /* end afs_kafsasyncd_terminate_op() */ | ||
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h new file mode 100644 index 000000000000..791803f9a6fb --- /dev/null +++ b/fs/afs/kafsasyncd.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* kafsasyncd.h: AFS asynchronous operation daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_KAFSASYNCD_H | ||
13 | #define _LINUX_AFS_KAFSASYNCD_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | struct afs_async_op; | ||
18 | |||
19 | struct afs_async_op_ops { | ||
20 | void (*attend)(struct afs_async_op *op); | ||
21 | void (*discard)(struct afs_async_op *op); | ||
22 | }; | ||
23 | |||
24 | /*****************************************************************************/ | ||
25 | /* | ||
26 | * asynchronous operation record | ||
27 | */ | ||
28 | struct afs_async_op | ||
29 | { | ||
30 | struct list_head link; | ||
31 | struct afs_server *server; /* server being contacted */ | ||
32 | struct rxrpc_call *call; /* RxRPC call performing op */ | ||
33 | wait_queue_t waiter; /* wait queue for kafsasyncd */ | ||
34 | const struct afs_async_op_ops *ops; /* operations */ | ||
35 | }; | ||
36 | |||
37 | static inline void afs_async_op_init(struct afs_async_op *op, | ||
38 | const struct afs_async_op_ops *ops) | ||
39 | { | ||
40 | INIT_LIST_HEAD(&op->link); | ||
41 | op->call = NULL; | ||
42 | op->ops = ops; | ||
43 | } | ||
44 | |||
45 | extern int afs_kafsasyncd_start(void); | ||
46 | extern void afs_kafsasyncd_stop(void); | ||
47 | |||
48 | extern void afs_kafsasyncd_begin_op(struct afs_async_op *op); | ||
49 | extern void afs_kafsasyncd_attend_op(struct afs_async_op *op); | ||
50 | extern void afs_kafsasyncd_terminate_op(struct afs_async_op *op); | ||
51 | |||
52 | #endif /* _LINUX_AFS_KAFSASYNCD_H */ | ||
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c new file mode 100644 index 000000000000..86e710dd057e --- /dev/null +++ b/fs/afs/kafstimod.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* kafstimod.c: AFS timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include "cell.h" | ||
17 | #include "volume.h" | ||
18 | #include "kafstimod.h" | ||
19 | #include <asm/errno.h> | ||
20 | #include "internal.h" | ||
21 | |||
22 | static DECLARE_COMPLETION(kafstimod_alive); | ||
23 | static DECLARE_COMPLETION(kafstimod_dead); | ||
24 | static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq); | ||
25 | static int kafstimod_die; | ||
26 | |||
27 | static LIST_HEAD(kafstimod_list); | ||
28 | static DEFINE_SPINLOCK(kafstimod_lock); | ||
29 | |||
30 | static int kafstimod(void *arg); | ||
31 | |||
32 | /*****************************************************************************/ | ||
33 | /* | ||
34 | * start the timeout daemon | ||
35 | */ | ||
36 | int afs_kafstimod_start(void) | ||
37 | { | ||
38 | int ret; | ||
39 | |||
40 | ret = kernel_thread(kafstimod, NULL, 0); | ||
41 | if (ret < 0) | ||
42 | return ret; | ||
43 | |||
44 | wait_for_completion(&kafstimod_alive); | ||
45 | |||
46 | return ret; | ||
47 | } /* end afs_kafstimod_start() */ | ||
48 | |||
49 | /*****************************************************************************/ | ||
50 | /* | ||
51 | * stop the timeout daemon | ||
52 | */ | ||
53 | void afs_kafstimod_stop(void) | ||
54 | { | ||
55 | /* get rid of my daemon */ | ||
56 | kafstimod_die = 1; | ||
57 | wake_up(&kafstimod_sleepq); | ||
58 | wait_for_completion(&kafstimod_dead); | ||
59 | |||
60 | } /* end afs_kafstimod_stop() */ | ||
61 | |||
62 | /*****************************************************************************/ | ||
63 | /* | ||
64 | * timeout processing daemon | ||
65 | */ | ||
66 | static int kafstimod(void *arg) | ||
67 | { | ||
68 | struct afs_timer *timer; | ||
69 | |||
70 | DECLARE_WAITQUEUE(myself, current); | ||
71 | |||
72 | printk("kAFS: Started kafstimod %d\n", current->pid); | ||
73 | |||
74 | daemonize("kafstimod"); | ||
75 | |||
76 | complete(&kafstimod_alive); | ||
77 | |||
78 | /* loop around looking for things to attend to */ | ||
79 | loop: | ||
80 | set_current_state(TASK_INTERRUPTIBLE); | ||
81 | add_wait_queue(&kafstimod_sleepq, &myself); | ||
82 | |||
83 | for (;;) { | ||
84 | unsigned long jif; | ||
85 | signed long timeout; | ||
86 | |||
87 | /* deal with the server being asked to die */ | ||
88 | if (kafstimod_die) { | ||
89 | remove_wait_queue(&kafstimod_sleepq, &myself); | ||
90 | _leave(""); | ||
91 | complete_and_exit(&kafstimod_dead, 0); | ||
92 | } | ||
93 | |||
94 | try_to_freeze(PF_FREEZE); | ||
95 | |||
96 | /* discard pending signals */ | ||
97 | afs_discard_my_signals(); | ||
98 | |||
99 | /* work out the time to elapse before the next event */ | ||
100 | spin_lock(&kafstimod_lock); | ||
101 | if (list_empty(&kafstimod_list)) { | ||
102 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
103 | } | ||
104 | else { | ||
105 | timer = list_entry(kafstimod_list.next, | ||
106 | struct afs_timer, link); | ||
107 | timeout = timer->timo_jif; | ||
108 | jif = jiffies; | ||
109 | |||
110 | if (time_before_eq((unsigned long) timeout, jif)) | ||
111 | goto immediate; | ||
112 | |||
113 | else { | ||
114 | timeout = (long) timeout - (long) jiffies; | ||
115 | } | ||
116 | } | ||
117 | spin_unlock(&kafstimod_lock); | ||
118 | |||
119 | schedule_timeout(timeout); | ||
120 | |||
121 | set_current_state(TASK_INTERRUPTIBLE); | ||
122 | } | ||
123 | |||
124 | /* the thing on the front of the queue needs processing | ||
125 | * - we come here with the lock held and timer pointing to the expired | ||
126 | * entry | ||
127 | */ | ||
128 | immediate: | ||
129 | remove_wait_queue(&kafstimod_sleepq, &myself); | ||
130 | set_current_state(TASK_RUNNING); | ||
131 | |||
132 | _debug("@@@ Begin Timeout of %p", timer); | ||
133 | |||
134 | /* dequeue the timer */ | ||
135 | list_del_init(&timer->link); | ||
136 | spin_unlock(&kafstimod_lock); | ||
137 | |||
138 | /* call the timeout function */ | ||
139 | timer->ops->timed_out(timer); | ||
140 | |||
141 | _debug("@@@ End Timeout"); | ||
142 | goto loop; | ||
143 | |||
144 | } /* end kafstimod() */ | ||
145 | |||
146 | /*****************************************************************************/ | ||
147 | /* | ||
148 | * (re-)queue a timer | ||
149 | */ | ||
150 | void afs_kafstimod_add_timer(struct afs_timer *timer, unsigned long timeout) | ||
151 | { | ||
152 | struct afs_timer *ptimer; | ||
153 | struct list_head *_p; | ||
154 | |||
155 | _enter("%p,%lu", timer, timeout); | ||
156 | |||
157 | spin_lock(&kafstimod_lock); | ||
158 | |||
159 | list_del(&timer->link); | ||
160 | |||
161 | /* the timer was deferred or reset - put it back in the queue at the | ||
162 | * right place */ | ||
163 | timer->timo_jif = jiffies + timeout; | ||
164 | |||
165 | list_for_each(_p, &kafstimod_list) { | ||
166 | ptimer = list_entry(_p, struct afs_timer, link); | ||
167 | if (time_before(timer->timo_jif, ptimer->timo_jif)) | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | list_add_tail(&timer->link, _p); /* insert before stopping point */ | ||
172 | |||
173 | spin_unlock(&kafstimod_lock); | ||
174 | |||
175 | wake_up(&kafstimod_sleepq); | ||
176 | |||
177 | _leave(""); | ||
178 | } /* end afs_kafstimod_add_timer() */ | ||
179 | |||
180 | /*****************************************************************************/ | ||
181 | /* | ||
182 | * dequeue a timer | ||
183 | * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued | ||
184 | */ | ||
185 | int afs_kafstimod_del_timer(struct afs_timer *timer) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | |||
189 | _enter("%p", timer); | ||
190 | |||
191 | spin_lock(&kafstimod_lock); | ||
192 | |||
193 | if (list_empty(&timer->link)) | ||
194 | ret = -ENOENT; | ||
195 | else | ||
196 | list_del_init(&timer->link); | ||
197 | |||
198 | spin_unlock(&kafstimod_lock); | ||
199 | |||
200 | wake_up(&kafstimod_sleepq); | ||
201 | |||
202 | _leave(" = %d", ret); | ||
203 | return ret; | ||
204 | } /* end afs_kafstimod_del_timer() */ | ||
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h new file mode 100644 index 000000000000..e312f1a61a7f --- /dev/null +++ b/fs/afs/kafstimod.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* kafstimod.h: AFS timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_KAFSTIMOD_H | ||
13 | #define _LINUX_AFS_KAFSTIMOD_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | struct afs_timer; | ||
18 | |||
19 | struct afs_timer_ops { | ||
20 | /* called when the front of the timer queue has timed out */ | ||
21 | void (*timed_out)(struct afs_timer *timer); | ||
22 | }; | ||
23 | |||
24 | /*****************************************************************************/ | ||
25 | /* | ||
26 | * AFS timer/timeout record | ||
27 | */ | ||
28 | struct afs_timer | ||
29 | { | ||
30 | struct list_head link; /* link in timer queue */ | ||
31 | unsigned long timo_jif; /* timeout time */ | ||
32 | const struct afs_timer_ops *ops; /* timeout expiry function */ | ||
33 | }; | ||
34 | |||
35 | static inline void afs_timer_init(struct afs_timer *timer, | ||
36 | const struct afs_timer_ops *ops) | ||
37 | { | ||
38 | INIT_LIST_HEAD(&timer->link); | ||
39 | timer->ops = ops; | ||
40 | } | ||
41 | |||
42 | extern int afs_kafstimod_start(void); | ||
43 | extern void afs_kafstimod_stop(void); | ||
44 | |||
45 | extern void afs_kafstimod_add_timer(struct afs_timer *timer, | ||
46 | unsigned long timeout); | ||
47 | extern int afs_kafstimod_del_timer(struct afs_timer *timer); | ||
48 | |||
49 | #endif /* _LINUX_AFS_KAFSTIMOD_H */ | ||
diff --git a/fs/afs/main.c b/fs/afs/main.c new file mode 100644 index 000000000000..913c689bdb35 --- /dev/null +++ b/fs/afs/main.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* main.c: AFS client file system | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/completion.h> | ||
17 | #include <rxrpc/rxrpc.h> | ||
18 | #include <rxrpc/transport.h> | ||
19 | #include <rxrpc/call.h> | ||
20 | #include <rxrpc/peer.h> | ||
21 | #include "cache.h" | ||
22 | #include "cell.h" | ||
23 | #include "server.h" | ||
24 | #include "fsclient.h" | ||
25 | #include "cmservice.h" | ||
26 | #include "kafstimod.h" | ||
27 | #include "kafsasyncd.h" | ||
28 | #include "internal.h" | ||
29 | |||
30 | struct rxrpc_transport *afs_transport; | ||
31 | |||
32 | static int afs_adding_peer(struct rxrpc_peer *peer); | ||
33 | static void afs_discarding_peer(struct rxrpc_peer *peer); | ||
34 | |||
35 | |||
36 | MODULE_DESCRIPTION("AFS Client File System"); | ||
37 | MODULE_AUTHOR("Red Hat, Inc."); | ||
38 | MODULE_LICENSE("GPL"); | ||
39 | |||
40 | static char *rootcell; | ||
41 | |||
42 | module_param(rootcell, charp, 0); | ||
43 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); | ||
44 | |||
45 | |||
46 | static struct rxrpc_peer_ops afs_peer_ops = { | ||
47 | .adding = afs_adding_peer, | ||
48 | .discarding = afs_discarding_peer, | ||
49 | }; | ||
50 | |||
51 | struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT]; | ||
52 | DEFINE_SPINLOCK(afs_cb_hash_lock); | ||
53 | |||
54 | #ifdef AFS_CACHING_SUPPORT | ||
55 | static struct cachefs_netfs_operations afs_cache_ops = { | ||
56 | .get_page_cookie = afs_cache_get_page_cookie, | ||
57 | }; | ||
58 | |||
59 | struct cachefs_netfs afs_cache_netfs = { | ||
60 | .name = "afs", | ||
61 | .version = 0, | ||
62 | .ops = &afs_cache_ops, | ||
63 | }; | ||
64 | #endif | ||
65 | |||
66 | /*****************************************************************************/ | ||
67 | /* | ||
68 | * initialise the AFS client FS module | ||
69 | */ | ||
70 | static int __init afs_init(void) | ||
71 | { | ||
72 | int loop, ret; | ||
73 | |||
74 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); | ||
75 | |||
76 | /* initialise the callback hash table */ | ||
77 | spin_lock_init(&afs_cb_hash_lock); | ||
78 | for (loop = AFS_CB_HASH_COUNT - 1; loop >= 0; loop--) | ||
79 | INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]); | ||
80 | |||
81 | /* register the /proc stuff */ | ||
82 | ret = afs_proc_init(); | ||
83 | if (ret < 0) | ||
84 | return ret; | ||
85 | |||
86 | #ifdef AFS_CACHING_SUPPORT | ||
87 | /* we want to be able to cache */ | ||
88 | ret = cachefs_register_netfs(&afs_cache_netfs, | ||
89 | &afs_cache_cell_index_def); | ||
90 | if (ret < 0) | ||
91 | goto error; | ||
92 | #endif | ||
93 | |||
94 | #ifdef CONFIG_KEYS_TURNED_OFF | ||
95 | ret = afs_key_register(); | ||
96 | if (ret < 0) | ||
97 | goto error_cache; | ||
98 | #endif | ||
99 | |||
100 | /* initialise the cell DB */ | ||
101 | ret = afs_cell_init(rootcell); | ||
102 | if (ret < 0) | ||
103 | goto error_keys; | ||
104 | |||
105 | /* start the timeout daemon */ | ||
106 | ret = afs_kafstimod_start(); | ||
107 | if (ret < 0) | ||
108 | goto error_keys; | ||
109 | |||
110 | /* start the async operation daemon */ | ||
111 | ret = afs_kafsasyncd_start(); | ||
112 | if (ret < 0) | ||
113 | goto error_kafstimod; | ||
114 | |||
115 | /* create the RxRPC transport */ | ||
116 | ret = rxrpc_create_transport(7001, &afs_transport); | ||
117 | if (ret < 0) | ||
118 | goto error_kafsasyncd; | ||
119 | |||
120 | afs_transport->peer_ops = &afs_peer_ops; | ||
121 | |||
122 | /* register the filesystems */ | ||
123 | ret = afs_fs_init(); | ||
124 | if (ret < 0) | ||
125 | goto error_transport; | ||
126 | |||
127 | return ret; | ||
128 | |||
129 | error_transport: | ||
130 | rxrpc_put_transport(afs_transport); | ||
131 | error_kafsasyncd: | ||
132 | afs_kafsasyncd_stop(); | ||
133 | error_kafstimod: | ||
134 | afs_kafstimod_stop(); | ||
135 | error_keys: | ||
136 | #ifdef CONFIG_KEYS_TURNED_OFF | ||
137 | afs_key_unregister(); | ||
138 | error_cache: | ||
139 | #endif | ||
140 | #ifdef AFS_CACHING_SUPPORT | ||
141 | cachefs_unregister_netfs(&afs_cache_netfs); | ||
142 | error: | ||
143 | #endif | ||
144 | afs_cell_purge(); | ||
145 | afs_proc_cleanup(); | ||
146 | printk(KERN_ERR "kAFS: failed to register: %d\n", ret); | ||
147 | return ret; | ||
148 | } /* end afs_init() */ | ||
149 | |||
150 | /* XXX late_initcall is kludgy, but the only alternative seems to create | ||
151 | * a transport upon the first mount, which is worse. Or is it? | ||
152 | */ | ||
153 | late_initcall(afs_init); /* must be called after net/ to create socket */ | ||
154 | /*****************************************************************************/ | ||
155 | /* | ||
156 | * clean up on module removal | ||
157 | */ | ||
158 | static void __exit afs_exit(void) | ||
159 | { | ||
160 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); | ||
161 | |||
162 | afs_fs_exit(); | ||
163 | rxrpc_put_transport(afs_transport); | ||
164 | afs_kafstimod_stop(); | ||
165 | afs_kafsasyncd_stop(); | ||
166 | afs_cell_purge(); | ||
167 | #ifdef CONFIG_KEYS_TURNED_OFF | ||
168 | afs_key_unregister(); | ||
169 | #endif | ||
170 | #ifdef AFS_CACHING_SUPPORT | ||
171 | cachefs_unregister_netfs(&afs_cache_netfs); | ||
172 | #endif | ||
173 | afs_proc_cleanup(); | ||
174 | |||
175 | } /* end afs_exit() */ | ||
176 | |||
177 | module_exit(afs_exit); | ||
178 | |||
179 | /*****************************************************************************/ | ||
180 | /* | ||
181 | * notification that new peer record is being added | ||
182 | * - called from krxsecd | ||
183 | * - return an error to induce an abort | ||
184 | * - mustn't sleep (caller holds an rwlock) | ||
185 | */ | ||
186 | static int afs_adding_peer(struct rxrpc_peer *peer) | ||
187 | { | ||
188 | struct afs_server *server; | ||
189 | int ret; | ||
190 | |||
191 | _debug("kAFS: Adding new peer %08x\n", ntohl(peer->addr.s_addr)); | ||
192 | |||
193 | /* determine which server the peer resides in (if any) */ | ||
194 | ret = afs_server_find_by_peer(peer, &server); | ||
195 | if (ret < 0) | ||
196 | return ret; /* none that we recognise, so abort */ | ||
197 | |||
198 | _debug("Server %p{u=%d}\n", server, atomic_read(&server->usage)); | ||
199 | |||
200 | _debug("Cell %p{u=%d}\n", | ||
201 | server->cell, atomic_read(&server->cell->usage)); | ||
202 | |||
203 | /* cross-point the structs under a global lock */ | ||
204 | spin_lock(&afs_server_peer_lock); | ||
205 | peer->user = server; | ||
206 | server->peer = peer; | ||
207 | spin_unlock(&afs_server_peer_lock); | ||
208 | |||
209 | afs_put_server(server); | ||
210 | |||
211 | return 0; | ||
212 | } /* end afs_adding_peer() */ | ||
213 | |||
214 | /*****************************************************************************/ | ||
215 | /* | ||
216 | * notification that a peer record is being discarded | ||
217 | * - called from krxiod or krxsecd | ||
218 | */ | ||
219 | static void afs_discarding_peer(struct rxrpc_peer *peer) | ||
220 | { | ||
221 | struct afs_server *server; | ||
222 | |||
223 | _enter("%p",peer); | ||
224 | |||
225 | _debug("Discarding peer %08x (rtt=%lu.%lumS)\n", | ||
226 | ntohl(peer->addr.s_addr), | ||
227 | (long) (peer->rtt / 1000), | ||
228 | (long) (peer->rtt % 1000)); | ||
229 | |||
230 | /* uncross-point the structs under a global lock */ | ||
231 | spin_lock(&afs_server_peer_lock); | ||
232 | server = peer->user; | ||
233 | if (server) { | ||
234 | peer->user = NULL; | ||
235 | server->peer = NULL; | ||
236 | } | ||
237 | spin_unlock(&afs_server_peer_lock); | ||
238 | |||
239 | _leave(""); | ||
240 | |||
241 | } /* end afs_discarding_peer() */ | ||
242 | |||
243 | /*****************************************************************************/ | ||
244 | /* | ||
245 | * clear the dead space between task_struct and kernel stack | ||
246 | * - called by supplying -finstrument-functions to gcc | ||
247 | */ | ||
248 | #if 0 | ||
249 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
250 | __attribute__((no_instrument_function)); | ||
251 | |||
252 | void __cyg_profile_func_enter (void *this_fn, void *call_site) | ||
253 | { | ||
254 | asm volatile(" movl %%esp,%%edi \n" | ||
255 | " andl %0,%%edi \n" | ||
256 | " addl %1,%%edi \n" | ||
257 | " movl %%esp,%%ecx \n" | ||
258 | " subl %%edi,%%ecx \n" | ||
259 | " shrl $2,%%ecx \n" | ||
260 | " movl $0xedededed,%%eax \n" | ||
261 | " rep stosl \n" | ||
262 | : | ||
263 | : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info)) | ||
264 | : "eax", "ecx", "edi", "memory", "cc" | ||
265 | ); | ||
266 | } | ||
267 | |||
268 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
269 | __attribute__((no_instrument_function)); | ||
270 | |||
271 | void __cyg_profile_func_exit(void *this_fn, void *call_site) | ||
272 | { | ||
273 | asm volatile(" movl %%esp,%%edi \n" | ||
274 | " andl %0,%%edi \n" | ||
275 | " addl %1,%%edi \n" | ||
276 | " movl %%esp,%%ecx \n" | ||
277 | " subl %%edi,%%ecx \n" | ||
278 | " shrl $2,%%ecx \n" | ||
279 | " movl $0xdadadada,%%eax \n" | ||
280 | " rep stosl \n" | ||
281 | : | ||
282 | : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info)) | ||
283 | : "eax", "ecx", "edi", "memory", "cc" | ||
284 | ); | ||
285 | } | ||
286 | #endif | ||
diff --git a/fs/afs/misc.c b/fs/afs/misc.c new file mode 100644 index 000000000000..e4fce66d76e0 --- /dev/null +++ b/fs/afs/misc.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* misc.c: miscellaneous bits | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include "errors.h" | ||
16 | #include "internal.h" | ||
17 | |||
18 | /*****************************************************************************/ | ||
19 | /* | ||
20 | * convert an AFS abort code to a Linux error number | ||
21 | */ | ||
22 | int afs_abort_to_error(int abortcode) | ||
23 | { | ||
24 | switch (abortcode) { | ||
25 | case VSALVAGE: return -EIO; | ||
26 | case VNOVNODE: return -ENOENT; | ||
27 | case VNOVOL: return -ENXIO; | ||
28 | case VVOLEXISTS: return -EEXIST; | ||
29 | case VNOSERVICE: return -EIO; | ||
30 | case VOFFLINE: return -ENOENT; | ||
31 | case VONLINE: return -EEXIST; | ||
32 | case VDISKFULL: return -ENOSPC; | ||
33 | case VOVERQUOTA: return -EDQUOT; | ||
34 | case VBUSY: return -EBUSY; | ||
35 | case VMOVED: return -ENXIO; | ||
36 | default: return -EIO; | ||
37 | } | ||
38 | |||
39 | } /* end afs_abort_to_error() */ | ||
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c new file mode 100644 index 000000000000..bfc28abe1cb1 --- /dev/null +++ b/fs/afs/mntpt.c | |||
@@ -0,0 +1,287 @@ | |||
1 | /* mntpt.c: mountpoint management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/mount.h> | ||
20 | #include <linux/namei.h> | ||
21 | #include <linux/namespace.h> | ||
22 | #include "super.h" | ||
23 | #include "cell.h" | ||
24 | #include "volume.h" | ||
25 | #include "vnode.h" | ||
26 | #include "internal.h" | ||
27 | |||
28 | |||
29 | static struct dentry *afs_mntpt_lookup(struct inode *dir, | ||
30 | struct dentry *dentry, | ||
31 | struct nameidata *nd); | ||
32 | static int afs_mntpt_open(struct inode *inode, struct file *file); | ||
33 | static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); | ||
34 | |||
35 | struct file_operations afs_mntpt_file_operations = { | ||
36 | .open = afs_mntpt_open, | ||
37 | }; | ||
38 | |||
39 | struct inode_operations afs_mntpt_inode_operations = { | ||
40 | .lookup = afs_mntpt_lookup, | ||
41 | .follow_link = afs_mntpt_follow_link, | ||
42 | .readlink = page_readlink, | ||
43 | .getattr = afs_inode_getattr, | ||
44 | }; | ||
45 | |||
46 | static LIST_HEAD(afs_vfsmounts); | ||
47 | |||
48 | static void afs_mntpt_expiry_timed_out(struct afs_timer *timer); | ||
49 | |||
50 | struct afs_timer_ops afs_mntpt_expiry_timer_ops = { | ||
51 | .timed_out = afs_mntpt_expiry_timed_out, | ||
52 | }; | ||
53 | |||
54 | struct afs_timer afs_mntpt_expiry_timer; | ||
55 | |||
56 | unsigned long afs_mntpt_expiry_timeout = 20; | ||
57 | |||
58 | /*****************************************************************************/ | ||
59 | /* | ||
60 | * check a symbolic link to see whether it actually encodes a mountpoint | ||
61 | * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately | ||
62 | */ | ||
63 | int afs_mntpt_check_symlink(struct afs_vnode *vnode) | ||
64 | { | ||
65 | struct page *page; | ||
66 | filler_t *filler; | ||
67 | size_t size; | ||
68 | char *buf; | ||
69 | int ret; | ||
70 | |||
71 | _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique); | ||
72 | |||
73 | /* read the contents of the symlink into the pagecache */ | ||
74 | filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage; | ||
75 | |||
76 | page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, | ||
77 | filler, NULL); | ||
78 | if (IS_ERR(page)) { | ||
79 | ret = PTR_ERR(page); | ||
80 | goto out; | ||
81 | } | ||
82 | |||
83 | ret = -EIO; | ||
84 | wait_on_page_locked(page); | ||
85 | buf = kmap(page); | ||
86 | if (!PageUptodate(page)) | ||
87 | goto out_free; | ||
88 | if (PageError(page)) | ||
89 | goto out_free; | ||
90 | |||
91 | /* examine the symlink's contents */ | ||
92 | size = vnode->status.size; | ||
93 | _debug("symlink to %*.*s", size, (int) size, buf); | ||
94 | |||
95 | if (size > 2 && | ||
96 | (buf[0] == '%' || buf[0] == '#') && | ||
97 | buf[size - 1] == '.' | ||
98 | ) { | ||
99 | _debug("symlink is a mountpoint"); | ||
100 | spin_lock(&vnode->lock); | ||
101 | vnode->flags |= AFS_VNODE_MOUNTPOINT; | ||
102 | spin_unlock(&vnode->lock); | ||
103 | } | ||
104 | |||
105 | ret = 0; | ||
106 | |||
107 | out_free: | ||
108 | kunmap(page); | ||
109 | page_cache_release(page); | ||
110 | out: | ||
111 | _leave(" = %d", ret); | ||
112 | return ret; | ||
113 | |||
114 | } /* end afs_mntpt_check_symlink() */ | ||
115 | |||
116 | /*****************************************************************************/ | ||
117 | /* | ||
118 | * no valid lookup procedure on this sort of dir | ||
119 | */ | ||
120 | static struct dentry *afs_mntpt_lookup(struct inode *dir, | ||
121 | struct dentry *dentry, | ||
122 | struct nameidata *nd) | ||
123 | { | ||
124 | kenter("%p,%p{%p{%s},%s}", | ||
125 | dir, | ||
126 | dentry, | ||
127 | dentry->d_parent, | ||
128 | dentry->d_parent ? | ||
129 | dentry->d_parent->d_name.name : (const unsigned char *) "", | ||
130 | dentry->d_name.name); | ||
131 | |||
132 | return ERR_PTR(-EREMOTE); | ||
133 | } /* end afs_mntpt_lookup() */ | ||
134 | |||
135 | /*****************************************************************************/ | ||
136 | /* | ||
137 | * no valid open procedure on this sort of dir | ||
138 | */ | ||
139 | static int afs_mntpt_open(struct inode *inode, struct file *file) | ||
140 | { | ||
141 | kenter("%p,%p{%p{%s},%s}", | ||
142 | inode, file, | ||
143 | file->f_dentry->d_parent, | ||
144 | file->f_dentry->d_parent ? | ||
145 | file->f_dentry->d_parent->d_name.name : | ||
146 | (const unsigned char *) "", | ||
147 | file->f_dentry->d_name.name); | ||
148 | |||
149 | return -EREMOTE; | ||
150 | } /* end afs_mntpt_open() */ | ||
151 | |||
152 | /*****************************************************************************/ | ||
153 | /* | ||
154 | * create a vfsmount to be automounted | ||
155 | */ | ||
156 | static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | ||
157 | { | ||
158 | struct afs_super_info *super; | ||
159 | struct vfsmount *mnt; | ||
160 | struct page *page = NULL; | ||
161 | size_t size; | ||
162 | char *buf, *devname = NULL, *options = NULL; | ||
163 | filler_t *filler; | ||
164 | int ret; | ||
165 | |||
166 | kenter("{%s}", mntpt->d_name.name); | ||
167 | |||
168 | BUG_ON(!mntpt->d_inode); | ||
169 | |||
170 | ret = -EINVAL; | ||
171 | size = mntpt->d_inode->i_size; | ||
172 | if (size > PAGE_SIZE - 1) | ||
173 | goto error; | ||
174 | |||
175 | ret = -ENOMEM; | ||
176 | devname = (char *) get_zeroed_page(GFP_KERNEL); | ||
177 | if (!devname) | ||
178 | goto error; | ||
179 | |||
180 | options = (char *) get_zeroed_page(GFP_KERNEL); | ||
181 | if (!options) | ||
182 | goto error; | ||
183 | |||
184 | /* read the contents of the AFS special symlink */ | ||
185 | filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage; | ||
186 | |||
187 | page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL); | ||
188 | if (IS_ERR(page)) { | ||
189 | ret = PTR_ERR(page); | ||
190 | goto error; | ||
191 | } | ||
192 | |||
193 | ret = -EIO; | ||
194 | wait_on_page_locked(page); | ||
195 | if (!PageUptodate(page) || PageError(page)) | ||
196 | goto error; | ||
197 | |||
198 | buf = kmap(page); | ||
199 | memcpy(devname, buf, size); | ||
200 | kunmap(page); | ||
201 | page_cache_release(page); | ||
202 | page = NULL; | ||
203 | |||
204 | /* work out what options we want */ | ||
205 | super = AFS_FS_S(mntpt->d_sb); | ||
206 | memcpy(options, "cell=", 5); | ||
207 | strcpy(options + 5, super->volume->cell->name); | ||
208 | if (super->volume->type == AFSVL_RWVOL) | ||
209 | strcat(options, ",rwpath"); | ||
210 | |||
211 | /* try and do the mount */ | ||
212 | kdebug("--- attempting mount %s -o %s ---", devname, options); | ||
213 | mnt = do_kern_mount("afs", 0, devname, options); | ||
214 | kdebug("--- mount result %p ---", mnt); | ||
215 | |||
216 | free_page((unsigned long) devname); | ||
217 | free_page((unsigned long) options); | ||
218 | kleave(" = %p", mnt); | ||
219 | return mnt; | ||
220 | |||
221 | error: | ||
222 | if (page) | ||
223 | page_cache_release(page); | ||
224 | if (devname) | ||
225 | free_page((unsigned long) devname); | ||
226 | if (options) | ||
227 | free_page((unsigned long) options); | ||
228 | kleave(" = %d", ret); | ||
229 | return ERR_PTR(ret); | ||
230 | } /* end afs_mntpt_do_automount() */ | ||
231 | |||
232 | /*****************************************************************************/ | ||
233 | /* | ||
234 | * follow a link from a mountpoint directory, thus causing it to be mounted | ||
235 | */ | ||
236 | static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) | ||
237 | { | ||
238 | struct vfsmount *newmnt; | ||
239 | struct dentry *old_dentry; | ||
240 | int err; | ||
241 | |||
242 | kenter("%p{%s},{%s:%p{%s}}", | ||
243 | dentry, | ||
244 | dentry->d_name.name, | ||
245 | nd->mnt->mnt_devname, | ||
246 | dentry, | ||
247 | nd->dentry->d_name.name); | ||
248 | |||
249 | newmnt = afs_mntpt_do_automount(dentry); | ||
250 | if (IS_ERR(newmnt)) { | ||
251 | path_release(nd); | ||
252 | return PTR_ERR(newmnt); | ||
253 | } | ||
254 | |||
255 | old_dentry = nd->dentry; | ||
256 | nd->dentry = dentry; | ||
257 | err = do_add_mount(newmnt, nd, 0, &afs_vfsmounts); | ||
258 | nd->dentry = old_dentry; | ||
259 | |||
260 | path_release(nd); | ||
261 | |||
262 | if (!err) { | ||
263 | mntget(newmnt); | ||
264 | nd->mnt = newmnt; | ||
265 | dget(newmnt->mnt_root); | ||
266 | nd->dentry = newmnt->mnt_root; | ||
267 | } | ||
268 | |||
269 | kleave(" = %d", err); | ||
270 | return err; | ||
271 | } /* end afs_mntpt_follow_link() */ | ||
272 | |||
273 | /*****************************************************************************/ | ||
274 | /* | ||
275 | * handle mountpoint expiry timer going off | ||
276 | */ | ||
277 | static void afs_mntpt_expiry_timed_out(struct afs_timer *timer) | ||
278 | { | ||
279 | kenter(""); | ||
280 | |||
281 | mark_mounts_for_expiry(&afs_vfsmounts); | ||
282 | |||
283 | afs_kafstimod_add_timer(&afs_mntpt_expiry_timer, | ||
284 | afs_mntpt_expiry_timeout * HZ); | ||
285 | |||
286 | kleave(""); | ||
287 | } /* end afs_mntpt_expiry_timed_out() */ | ||
diff --git a/fs/afs/mount.h b/fs/afs/mount.h new file mode 100644 index 000000000000..9d2f46ec549f --- /dev/null +++ b/fs/afs/mount.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* mount.h: mount parameters | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_MOUNT_H | ||
13 | #define _LINUX_AFS_MOUNT_H | ||
14 | |||
15 | struct afs_mountdata { | ||
16 | const char *volume; /* name of volume */ | ||
17 | const char *cell; /* name of cell containing volume */ | ||
18 | const char *cache; /* name of cache block device */ | ||
19 | size_t nservers; /* number of server addresses listed */ | ||
20 | uint32_t servers[10]; /* IP addresses of servers in this cell */ | ||
21 | }; | ||
22 | |||
23 | #endif /* _LINUX_AFS_MOUNT_H */ | ||
diff --git a/fs/afs/proc.c b/fs/afs/proc.c new file mode 100644 index 000000000000..9c81b8f7eef0 --- /dev/null +++ b/fs/afs/proc.c | |||
@@ -0,0 +1,857 @@ | |||
1 | /* proc.c: /proc interface for AFS | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/proc_fs.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "cell.h" | ||
18 | #include "volume.h" | ||
19 | #include <asm/uaccess.h> | ||
20 | #include "internal.h" | ||
21 | |||
22 | static struct proc_dir_entry *proc_afs; | ||
23 | |||
24 | |||
25 | static int afs_proc_cells_open(struct inode *inode, struct file *file); | ||
26 | static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos); | ||
27 | static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos); | ||
28 | static void afs_proc_cells_stop(struct seq_file *p, void *v); | ||
29 | static int afs_proc_cells_show(struct seq_file *m, void *v); | ||
30 | static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | ||
31 | size_t size, loff_t *_pos); | ||
32 | |||
33 | static struct seq_operations afs_proc_cells_ops = { | ||
34 | .start = afs_proc_cells_start, | ||
35 | .next = afs_proc_cells_next, | ||
36 | .stop = afs_proc_cells_stop, | ||
37 | .show = afs_proc_cells_show, | ||
38 | }; | ||
39 | |||
40 | static struct file_operations afs_proc_cells_fops = { | ||
41 | .open = afs_proc_cells_open, | ||
42 | .read = seq_read, | ||
43 | .write = afs_proc_cells_write, | ||
44 | .llseek = seq_lseek, | ||
45 | .release = seq_release, | ||
46 | }; | ||
47 | |||
48 | static int afs_proc_rootcell_open(struct inode *inode, struct file *file); | ||
49 | static int afs_proc_rootcell_release(struct inode *inode, struct file *file); | ||
50 | static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, | ||
51 | size_t size, loff_t *_pos); | ||
52 | static ssize_t afs_proc_rootcell_write(struct file *file, | ||
53 | const char __user *buf, | ||
54 | size_t size, loff_t *_pos); | ||
55 | |||
56 | static struct file_operations afs_proc_rootcell_fops = { | ||
57 | .open = afs_proc_rootcell_open, | ||
58 | .read = afs_proc_rootcell_read, | ||
59 | .write = afs_proc_rootcell_write, | ||
60 | .llseek = no_llseek, | ||
61 | .release = afs_proc_rootcell_release | ||
62 | }; | ||
63 | |||
64 | static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file); | ||
65 | static int afs_proc_cell_volumes_release(struct inode *inode, | ||
66 | struct file *file); | ||
67 | static void *afs_proc_cell_volumes_start(struct seq_file *p, loff_t *pos); | ||
68 | static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, | ||
69 | loff_t *pos); | ||
70 | static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v); | ||
71 | static int afs_proc_cell_volumes_show(struct seq_file *m, void *v); | ||
72 | |||
73 | static struct seq_operations afs_proc_cell_volumes_ops = { | ||
74 | .start = afs_proc_cell_volumes_start, | ||
75 | .next = afs_proc_cell_volumes_next, | ||
76 | .stop = afs_proc_cell_volumes_stop, | ||
77 | .show = afs_proc_cell_volumes_show, | ||
78 | }; | ||
79 | |||
80 | static struct file_operations afs_proc_cell_volumes_fops = { | ||
81 | .open = afs_proc_cell_volumes_open, | ||
82 | .read = seq_read, | ||
83 | .llseek = seq_lseek, | ||
84 | .release = afs_proc_cell_volumes_release, | ||
85 | }; | ||
86 | |||
87 | static int afs_proc_cell_vlservers_open(struct inode *inode, | ||
88 | struct file *file); | ||
89 | static int afs_proc_cell_vlservers_release(struct inode *inode, | ||
90 | struct file *file); | ||
91 | static void *afs_proc_cell_vlservers_start(struct seq_file *p, loff_t *pos); | ||
92 | static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, | ||
93 | loff_t *pos); | ||
94 | static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v); | ||
95 | static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v); | ||
96 | |||
97 | static struct seq_operations afs_proc_cell_vlservers_ops = { | ||
98 | .start = afs_proc_cell_vlservers_start, | ||
99 | .next = afs_proc_cell_vlservers_next, | ||
100 | .stop = afs_proc_cell_vlservers_stop, | ||
101 | .show = afs_proc_cell_vlservers_show, | ||
102 | }; | ||
103 | |||
104 | static struct file_operations afs_proc_cell_vlservers_fops = { | ||
105 | .open = afs_proc_cell_vlservers_open, | ||
106 | .read = seq_read, | ||
107 | .llseek = seq_lseek, | ||
108 | .release = afs_proc_cell_vlservers_release, | ||
109 | }; | ||
110 | |||
111 | static int afs_proc_cell_servers_open(struct inode *inode, struct file *file); | ||
112 | static int afs_proc_cell_servers_release(struct inode *inode, | ||
113 | struct file *file); | ||
114 | static void *afs_proc_cell_servers_start(struct seq_file *p, loff_t *pos); | ||
115 | static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, | ||
116 | loff_t *pos); | ||
117 | static void afs_proc_cell_servers_stop(struct seq_file *p, void *v); | ||
118 | static int afs_proc_cell_servers_show(struct seq_file *m, void *v); | ||
119 | |||
120 | static struct seq_operations afs_proc_cell_servers_ops = { | ||
121 | .start = afs_proc_cell_servers_start, | ||
122 | .next = afs_proc_cell_servers_next, | ||
123 | .stop = afs_proc_cell_servers_stop, | ||
124 | .show = afs_proc_cell_servers_show, | ||
125 | }; | ||
126 | |||
127 | static struct file_operations afs_proc_cell_servers_fops = { | ||
128 | .open = afs_proc_cell_servers_open, | ||
129 | .read = seq_read, | ||
130 | .llseek = seq_lseek, | ||
131 | .release = afs_proc_cell_servers_release, | ||
132 | }; | ||
133 | |||
134 | /*****************************************************************************/ | ||
135 | /* | ||
136 | * initialise the /proc/fs/afs/ directory | ||
137 | */ | ||
138 | int afs_proc_init(void) | ||
139 | { | ||
140 | struct proc_dir_entry *p; | ||
141 | |||
142 | _enter(""); | ||
143 | |||
144 | proc_afs = proc_mkdir("fs/afs", NULL); | ||
145 | if (!proc_afs) | ||
146 | goto error; | ||
147 | proc_afs->owner = THIS_MODULE; | ||
148 | |||
149 | p = create_proc_entry("cells", 0, proc_afs); | ||
150 | if (!p) | ||
151 | goto error_proc; | ||
152 | p->proc_fops = &afs_proc_cells_fops; | ||
153 | p->owner = THIS_MODULE; | ||
154 | |||
155 | p = create_proc_entry("rootcell", 0, proc_afs); | ||
156 | if (!p) | ||
157 | goto error_cells; | ||
158 | p->proc_fops = &afs_proc_rootcell_fops; | ||
159 | p->owner = THIS_MODULE; | ||
160 | |||
161 | _leave(" = 0"); | ||
162 | return 0; | ||
163 | |||
164 | error_cells: | ||
165 | remove_proc_entry("cells", proc_afs); | ||
166 | error_proc: | ||
167 | remove_proc_entry("fs/afs", NULL); | ||
168 | error: | ||
169 | _leave(" = -ENOMEM"); | ||
170 | return -ENOMEM; | ||
171 | |||
172 | } /* end afs_proc_init() */ | ||
173 | |||
174 | /*****************************************************************************/ | ||
175 | /* | ||
176 | * clean up the /proc/fs/afs/ directory | ||
177 | */ | ||
178 | void afs_proc_cleanup(void) | ||
179 | { | ||
180 | remove_proc_entry("cells", proc_afs); | ||
181 | |||
182 | remove_proc_entry("fs/afs", NULL); | ||
183 | |||
184 | } /* end afs_proc_cleanup() */ | ||
185 | |||
186 | /*****************************************************************************/ | ||
187 | /* | ||
188 | * open "/proc/fs/afs/cells" which provides a summary of extant cells | ||
189 | */ | ||
190 | static int afs_proc_cells_open(struct inode *inode, struct file *file) | ||
191 | { | ||
192 | struct seq_file *m; | ||
193 | int ret; | ||
194 | |||
195 | ret = seq_open(file, &afs_proc_cells_ops); | ||
196 | if (ret < 0) | ||
197 | return ret; | ||
198 | |||
199 | m = file->private_data; | ||
200 | m->private = PDE(inode)->data; | ||
201 | |||
202 | return 0; | ||
203 | } /* end afs_proc_cells_open() */ | ||
204 | |||
205 | /*****************************************************************************/ | ||
206 | /* | ||
207 | * set up the iterator to start reading from the cells list and return the | ||
208 | * first item | ||
209 | */ | ||
210 | static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) | ||
211 | { | ||
212 | struct list_head *_p; | ||
213 | loff_t pos = *_pos; | ||
214 | |||
215 | /* lock the list against modification */ | ||
216 | down_read(&afs_proc_cells_sem); | ||
217 | |||
218 | /* allow for the header line */ | ||
219 | if (!pos) | ||
220 | return (void *) 1; | ||
221 | pos--; | ||
222 | |||
223 | /* find the n'th element in the list */ | ||
224 | list_for_each(_p, &afs_proc_cells) | ||
225 | if (!pos--) | ||
226 | break; | ||
227 | |||
228 | return _p != &afs_proc_cells ? _p : NULL; | ||
229 | } /* end afs_proc_cells_start() */ | ||
230 | |||
231 | /*****************************************************************************/ | ||
232 | /* | ||
233 | * move to next cell in cells list | ||
234 | */ | ||
235 | static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos) | ||
236 | { | ||
237 | struct list_head *_p; | ||
238 | |||
239 | (*pos)++; | ||
240 | |||
241 | _p = v; | ||
242 | _p = v == (void *) 1 ? afs_proc_cells.next : _p->next; | ||
243 | |||
244 | return _p != &afs_proc_cells ? _p : NULL; | ||
245 | } /* end afs_proc_cells_next() */ | ||
246 | |||
247 | /*****************************************************************************/ | ||
248 | /* | ||
249 | * clean up after reading from the cells list | ||
250 | */ | ||
251 | static void afs_proc_cells_stop(struct seq_file *p, void *v) | ||
252 | { | ||
253 | up_read(&afs_proc_cells_sem); | ||
254 | |||
255 | } /* end afs_proc_cells_stop() */ | ||
256 | |||
257 | /*****************************************************************************/ | ||
258 | /* | ||
259 | * display a header line followed by a load of cell lines | ||
260 | */ | ||
261 | static int afs_proc_cells_show(struct seq_file *m, void *v) | ||
262 | { | ||
263 | struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); | ||
264 | |||
265 | /* display header on line 1 */ | ||
266 | if (v == (void *) 1) { | ||
267 | seq_puts(m, "USE NAME\n"); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | /* display one cell per line on subsequent lines */ | ||
272 | seq_printf(m, "%3d %s\n", atomic_read(&cell->usage), cell->name); | ||
273 | |||
274 | return 0; | ||
275 | } /* end afs_proc_cells_show() */ | ||
276 | |||
277 | /*****************************************************************************/ | ||
278 | /* | ||
279 | * handle writes to /proc/fs/afs/cells | ||
280 | * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" | ||
281 | */ | ||
282 | static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | ||
283 | size_t size, loff_t *_pos) | ||
284 | { | ||
285 | char *kbuf, *name, *args; | ||
286 | int ret; | ||
287 | |||
288 | /* start by dragging the command into memory */ | ||
289 | if (size <= 1 || size >= PAGE_SIZE) | ||
290 | return -EINVAL; | ||
291 | |||
292 | kbuf = kmalloc(size + 1, GFP_KERNEL); | ||
293 | if (!kbuf) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | ret = -EFAULT; | ||
297 | if (copy_from_user(kbuf, buf, size) != 0) | ||
298 | goto done; | ||
299 | kbuf[size] = 0; | ||
300 | |||
301 | /* trim to first NL */ | ||
302 | name = memchr(kbuf, '\n', size); | ||
303 | if (name) | ||
304 | *name = 0; | ||
305 | |||
306 | /* split into command, name and argslist */ | ||
307 | name = strchr(kbuf, ' '); | ||
308 | if (!name) | ||
309 | goto inval; | ||
310 | do { | ||
311 | *name++ = 0; | ||
312 | } while(*name == ' '); | ||
313 | if (!*name) | ||
314 | goto inval; | ||
315 | |||
316 | args = strchr(name, ' '); | ||
317 | if (!args) | ||
318 | goto inval; | ||
319 | do { | ||
320 | *args++ = 0; | ||
321 | } while(*args == ' '); | ||
322 | if (!*args) | ||
323 | goto inval; | ||
324 | |||
325 | /* determine command to perform */ | ||
326 | _debug("cmd=%s name=%s args=%s", kbuf, name, args); | ||
327 | |||
328 | if (strcmp(kbuf, "add") == 0) { | ||
329 | struct afs_cell *cell; | ||
330 | ret = afs_cell_create(name, args, &cell); | ||
331 | if (ret < 0) | ||
332 | goto done; | ||
333 | |||
334 | printk("kAFS: Added new cell '%s'\n", name); | ||
335 | } | ||
336 | else { | ||
337 | goto inval; | ||
338 | } | ||
339 | |||
340 | ret = size; | ||
341 | |||
342 | done: | ||
343 | kfree(kbuf); | ||
344 | _leave(" = %d", ret); | ||
345 | return ret; | ||
346 | |||
347 | inval: | ||
348 | ret = -EINVAL; | ||
349 | printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n"); | ||
350 | goto done; | ||
351 | } /* end afs_proc_cells_write() */ | ||
352 | |||
353 | /*****************************************************************************/ | ||
354 | /* | ||
355 | * Stubs for /proc/fs/afs/rootcell | ||
356 | */ | ||
357 | static int afs_proc_rootcell_open(struct inode *inode, struct file *file) | ||
358 | { | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int afs_proc_rootcell_release(struct inode *inode, struct file *file) | ||
363 | { | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf, | ||
368 | size_t size, loff_t *_pos) | ||
369 | { | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /*****************************************************************************/ | ||
374 | /* | ||
375 | * handle writes to /proc/fs/afs/rootcell | ||
376 | * - to initialize rootcell: echo "cell.name:192.168.231.14" | ||
377 | */ | ||
378 | static ssize_t afs_proc_rootcell_write(struct file *file, | ||
379 | const char __user *buf, | ||
380 | size_t size, loff_t *_pos) | ||
381 | { | ||
382 | char *kbuf, *s; | ||
383 | int ret; | ||
384 | |||
385 | /* start by dragging the command into memory */ | ||
386 | if (size <= 1 || size >= PAGE_SIZE) | ||
387 | return -EINVAL; | ||
388 | |||
389 | ret = -ENOMEM; | ||
390 | kbuf = kmalloc(size + 1, GFP_KERNEL); | ||
391 | if (!kbuf) | ||
392 | goto nomem; | ||
393 | |||
394 | ret = -EFAULT; | ||
395 | if (copy_from_user(kbuf, buf, size) != 0) | ||
396 | goto infault; | ||
397 | kbuf[size] = 0; | ||
398 | |||
399 | /* trim to first NL */ | ||
400 | s = memchr(kbuf, '\n', size); | ||
401 | if (s) | ||
402 | *s = 0; | ||
403 | |||
404 | /* determine command to perform */ | ||
405 | _debug("rootcell=%s", kbuf); | ||
406 | |||
407 | ret = afs_cell_init(kbuf); | ||
408 | if (ret >= 0) | ||
409 | ret = size; /* consume everything, always */ | ||
410 | |||
411 | infault: | ||
412 | kfree(kbuf); | ||
413 | nomem: | ||
414 | _leave(" = %d", ret); | ||
415 | return ret; | ||
416 | } /* end afs_proc_rootcell_write() */ | ||
417 | |||
418 | /*****************************************************************************/ | ||
419 | /* | ||
420 | * initialise /proc/fs/afs/<cell>/ | ||
421 | */ | ||
422 | int afs_proc_cell_setup(struct afs_cell *cell) | ||
423 | { | ||
424 | struct proc_dir_entry *p; | ||
425 | |||
426 | _enter("%p{%s}", cell, cell->name); | ||
427 | |||
428 | cell->proc_dir = proc_mkdir(cell->name, proc_afs); | ||
429 | if (!cell->proc_dir) | ||
430 | return -ENOMEM; | ||
431 | |||
432 | p = create_proc_entry("servers", 0, cell->proc_dir); | ||
433 | if (!p) | ||
434 | goto error_proc; | ||
435 | p->proc_fops = &afs_proc_cell_servers_fops; | ||
436 | p->owner = THIS_MODULE; | ||
437 | p->data = cell; | ||
438 | |||
439 | p = create_proc_entry("vlservers", 0, cell->proc_dir); | ||
440 | if (!p) | ||
441 | goto error_servers; | ||
442 | p->proc_fops = &afs_proc_cell_vlservers_fops; | ||
443 | p->owner = THIS_MODULE; | ||
444 | p->data = cell; | ||
445 | |||
446 | p = create_proc_entry("volumes", 0, cell->proc_dir); | ||
447 | if (!p) | ||
448 | goto error_vlservers; | ||
449 | p->proc_fops = &afs_proc_cell_volumes_fops; | ||
450 | p->owner = THIS_MODULE; | ||
451 | p->data = cell; | ||
452 | |||
453 | _leave(" = 0"); | ||
454 | return 0; | ||
455 | |||
456 | error_vlservers: | ||
457 | remove_proc_entry("vlservers", cell->proc_dir); | ||
458 | error_servers: | ||
459 | remove_proc_entry("servers", cell->proc_dir); | ||
460 | error_proc: | ||
461 | remove_proc_entry(cell->name, proc_afs); | ||
462 | _leave(" = -ENOMEM"); | ||
463 | return -ENOMEM; | ||
464 | } /* end afs_proc_cell_setup() */ | ||
465 | |||
466 | /*****************************************************************************/ | ||
467 | /* | ||
468 | * remove /proc/fs/afs/<cell>/ | ||
469 | */ | ||
470 | void afs_proc_cell_remove(struct afs_cell *cell) | ||
471 | { | ||
472 | _enter(""); | ||
473 | |||
474 | remove_proc_entry("volumes", cell->proc_dir); | ||
475 | remove_proc_entry("vlservers", cell->proc_dir); | ||
476 | remove_proc_entry("servers", cell->proc_dir); | ||
477 | remove_proc_entry(cell->name, proc_afs); | ||
478 | |||
479 | _leave(""); | ||
480 | } /* end afs_proc_cell_remove() */ | ||
481 | |||
482 | /*****************************************************************************/ | ||
483 | /* | ||
484 | * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells | ||
485 | */ | ||
486 | static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file) | ||
487 | { | ||
488 | struct afs_cell *cell; | ||
489 | struct seq_file *m; | ||
490 | int ret; | ||
491 | |||
492 | cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); | ||
493 | if (!cell) | ||
494 | return -ENOENT; | ||
495 | |||
496 | ret = seq_open(file, &afs_proc_cell_volumes_ops); | ||
497 | if (ret < 0) | ||
498 | return ret; | ||
499 | |||
500 | m = file->private_data; | ||
501 | m->private = cell; | ||
502 | |||
503 | return 0; | ||
504 | } /* end afs_proc_cell_volumes_open() */ | ||
505 | |||
506 | /*****************************************************************************/ | ||
507 | /* | ||
508 | * close the file and release the ref to the cell | ||
509 | */ | ||
510 | static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) | ||
511 | { | ||
512 | struct afs_cell *cell = PDE(inode)->data; | ||
513 | int ret; | ||
514 | |||
515 | ret = seq_release(inode,file); | ||
516 | |||
517 | afs_put_cell(cell); | ||
518 | |||
519 | return ret; | ||
520 | } /* end afs_proc_cell_volumes_release() */ | ||
521 | |||
522 | /*****************************************************************************/ | ||
523 | /* | ||
524 | * set up the iterator to start reading from the cells list and return the | ||
525 | * first item | ||
526 | */ | ||
527 | static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) | ||
528 | { | ||
529 | struct list_head *_p; | ||
530 | struct afs_cell *cell = m->private; | ||
531 | loff_t pos = *_pos; | ||
532 | |||
533 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
534 | |||
535 | /* lock the list against modification */ | ||
536 | down_read(&cell->vl_sem); | ||
537 | |||
538 | /* allow for the header line */ | ||
539 | if (!pos) | ||
540 | return (void *) 1; | ||
541 | pos--; | ||
542 | |||
543 | /* find the n'th element in the list */ | ||
544 | list_for_each(_p, &cell->vl_list) | ||
545 | if (!pos--) | ||
546 | break; | ||
547 | |||
548 | return _p != &cell->vl_list ? _p : NULL; | ||
549 | } /* end afs_proc_cell_volumes_start() */ | ||
550 | |||
551 | /*****************************************************************************/ | ||
552 | /* | ||
553 | * move to next cell in cells list | ||
554 | */ | ||
555 | static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, | ||
556 | loff_t *_pos) | ||
557 | { | ||
558 | struct list_head *_p; | ||
559 | struct afs_cell *cell = p->private; | ||
560 | |||
561 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
562 | |||
563 | (*_pos)++; | ||
564 | |||
565 | _p = v; | ||
566 | _p = v == (void *) 1 ? cell->vl_list.next : _p->next; | ||
567 | |||
568 | return _p != &cell->vl_list ? _p : NULL; | ||
569 | } /* end afs_proc_cell_volumes_next() */ | ||
570 | |||
571 | /*****************************************************************************/ | ||
572 | /* | ||
573 | * clean up after reading from the cells list | ||
574 | */ | ||
575 | static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v) | ||
576 | { | ||
577 | struct afs_cell *cell = p->private; | ||
578 | |||
579 | up_read(&cell->vl_sem); | ||
580 | |||
581 | } /* end afs_proc_cell_volumes_stop() */ | ||
582 | |||
583 | /*****************************************************************************/ | ||
584 | /* | ||
585 | * display a header line followed by a load of volume lines | ||
586 | */ | ||
587 | static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) | ||
588 | { | ||
589 | struct afs_vlocation *vlocation = | ||
590 | list_entry(v, struct afs_vlocation, link); | ||
591 | |||
592 | /* display header on line 1 */ | ||
593 | if (v == (void *) 1) { | ||
594 | seq_puts(m, "USE VLID[0] VLID[1] VLID[2] NAME\n"); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /* display one cell per line on subsequent lines */ | ||
599 | seq_printf(m, "%3d %08x %08x %08x %s\n", | ||
600 | atomic_read(&vlocation->usage), | ||
601 | vlocation->vldb.vid[0], | ||
602 | vlocation->vldb.vid[1], | ||
603 | vlocation->vldb.vid[2], | ||
604 | vlocation->vldb.name | ||
605 | ); | ||
606 | |||
607 | return 0; | ||
608 | } /* end afs_proc_cell_volumes_show() */ | ||
609 | |||
610 | /*****************************************************************************/ | ||
611 | /* | ||
612 | * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume | ||
613 | * location server | ||
614 | */ | ||
615 | static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file) | ||
616 | { | ||
617 | struct afs_cell *cell; | ||
618 | struct seq_file *m; | ||
619 | int ret; | ||
620 | |||
621 | cell = afs_get_cell_maybe((struct afs_cell**)&PDE(inode)->data); | ||
622 | if (!cell) | ||
623 | return -ENOENT; | ||
624 | |||
625 | ret = seq_open(file,&afs_proc_cell_vlservers_ops); | ||
626 | if (ret<0) | ||
627 | return ret; | ||
628 | |||
629 | m = file->private_data; | ||
630 | m->private = cell; | ||
631 | |||
632 | return 0; | ||
633 | } /* end afs_proc_cell_vlservers_open() */ | ||
634 | |||
635 | /*****************************************************************************/ | ||
636 | /* | ||
637 | * close the file and release the ref to the cell | ||
638 | */ | ||
639 | static int afs_proc_cell_vlservers_release(struct inode *inode, | ||
640 | struct file *file) | ||
641 | { | ||
642 | struct afs_cell *cell = PDE(inode)->data; | ||
643 | int ret; | ||
644 | |||
645 | ret = seq_release(inode,file); | ||
646 | |||
647 | afs_put_cell(cell); | ||
648 | |||
649 | return ret; | ||
650 | } /* end afs_proc_cell_vlservers_release() */ | ||
651 | |||
652 | /*****************************************************************************/ | ||
653 | /* | ||
654 | * set up the iterator to start reading from the cells list and return the | ||
655 | * first item | ||
656 | */ | ||
657 | static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) | ||
658 | { | ||
659 | struct afs_cell *cell = m->private; | ||
660 | loff_t pos = *_pos; | ||
661 | |||
662 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
663 | |||
664 | /* lock the list against modification */ | ||
665 | down_read(&cell->vl_sem); | ||
666 | |||
667 | /* allow for the header line */ | ||
668 | if (!pos) | ||
669 | return (void *) 1; | ||
670 | pos--; | ||
671 | |||
672 | if (pos >= cell->vl_naddrs) | ||
673 | return NULL; | ||
674 | |||
675 | return &cell->vl_addrs[pos]; | ||
676 | } /* end afs_proc_cell_vlservers_start() */ | ||
677 | |||
678 | /*****************************************************************************/ | ||
679 | /* | ||
680 | * move to next cell in cells list | ||
681 | */ | ||
682 | static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, | ||
683 | loff_t *_pos) | ||
684 | { | ||
685 | struct afs_cell *cell = p->private; | ||
686 | loff_t pos; | ||
687 | |||
688 | _enter("cell=%p{nad=%u} pos=%Ld", cell, cell->vl_naddrs, *_pos); | ||
689 | |||
690 | pos = *_pos; | ||
691 | (*_pos)++; | ||
692 | if (pos >= cell->vl_naddrs) | ||
693 | return NULL; | ||
694 | |||
695 | return &cell->vl_addrs[pos]; | ||
696 | } /* end afs_proc_cell_vlservers_next() */ | ||
697 | |||
698 | /*****************************************************************************/ | ||
699 | /* | ||
700 | * clean up after reading from the cells list | ||
701 | */ | ||
702 | static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v) | ||
703 | { | ||
704 | struct afs_cell *cell = p->private; | ||
705 | |||
706 | up_read(&cell->vl_sem); | ||
707 | |||
708 | } /* end afs_proc_cell_vlservers_stop() */ | ||
709 | |||
710 | /*****************************************************************************/ | ||
711 | /* | ||
712 | * display a header line followed by a load of volume lines | ||
713 | */ | ||
714 | static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) | ||
715 | { | ||
716 | struct in_addr *addr = v; | ||
717 | |||
718 | /* display header on line 1 */ | ||
719 | if (v == (struct in_addr *) 1) { | ||
720 | seq_puts(m, "ADDRESS\n"); | ||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | /* display one cell per line on subsequent lines */ | ||
725 | seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr)); | ||
726 | |||
727 | return 0; | ||
728 | } /* end afs_proc_cell_vlservers_show() */ | ||
729 | |||
730 | /*****************************************************************************/ | ||
731 | /* | ||
732 | * open "/proc/fs/afs/<cell>/servers" which provides a summary of active | ||
733 | * servers | ||
734 | */ | ||
735 | static int afs_proc_cell_servers_open(struct inode *inode, struct file *file) | ||
736 | { | ||
737 | struct afs_cell *cell; | ||
738 | struct seq_file *m; | ||
739 | int ret; | ||
740 | |||
741 | cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); | ||
742 | if (!cell) | ||
743 | return -ENOENT; | ||
744 | |||
745 | ret = seq_open(file, &afs_proc_cell_servers_ops); | ||
746 | if (ret < 0) | ||
747 | return ret; | ||
748 | |||
749 | m = file->private_data; | ||
750 | m->private = cell; | ||
751 | |||
752 | return 0; | ||
753 | } /* end afs_proc_cell_servers_open() */ | ||
754 | |||
755 | /*****************************************************************************/ | ||
756 | /* | ||
757 | * close the file and release the ref to the cell | ||
758 | */ | ||
759 | static int afs_proc_cell_servers_release(struct inode *inode, | ||
760 | struct file *file) | ||
761 | { | ||
762 | struct afs_cell *cell = PDE(inode)->data; | ||
763 | int ret; | ||
764 | |||
765 | ret = seq_release(inode, file); | ||
766 | |||
767 | afs_put_cell(cell); | ||
768 | |||
769 | return ret; | ||
770 | } /* end afs_proc_cell_servers_release() */ | ||
771 | |||
772 | /*****************************************************************************/ | ||
773 | /* | ||
774 | * set up the iterator to start reading from the cells list and return the | ||
775 | * first item | ||
776 | */ | ||
777 | static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) | ||
778 | { | ||
779 | struct list_head *_p; | ||
780 | struct afs_cell *cell = m->private; | ||
781 | loff_t pos = *_pos; | ||
782 | |||
783 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
784 | |||
785 | /* lock the list against modification */ | ||
786 | read_lock(&cell->sv_lock); | ||
787 | |||
788 | /* allow for the header line */ | ||
789 | if (!pos) | ||
790 | return (void *) 1; | ||
791 | pos--; | ||
792 | |||
793 | /* find the n'th element in the list */ | ||
794 | list_for_each(_p, &cell->sv_list) | ||
795 | if (!pos--) | ||
796 | break; | ||
797 | |||
798 | return _p != &cell->sv_list ? _p : NULL; | ||
799 | } /* end afs_proc_cell_servers_start() */ | ||
800 | |||
801 | /*****************************************************************************/ | ||
802 | /* | ||
803 | * move to next cell in cells list | ||
804 | */ | ||
805 | static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, | ||
806 | loff_t *_pos) | ||
807 | { | ||
808 | struct list_head *_p; | ||
809 | struct afs_cell *cell = p->private; | ||
810 | |||
811 | _enter("cell=%p pos=%Ld", cell, *_pos); | ||
812 | |||
813 | (*_pos)++; | ||
814 | |||
815 | _p = v; | ||
816 | _p = v == (void *) 1 ? cell->sv_list.next : _p->next; | ||
817 | |||
818 | return _p != &cell->sv_list ? _p : NULL; | ||
819 | } /* end afs_proc_cell_servers_next() */ | ||
820 | |||
821 | /*****************************************************************************/ | ||
822 | /* | ||
823 | * clean up after reading from the cells list | ||
824 | */ | ||
825 | static void afs_proc_cell_servers_stop(struct seq_file *p, void *v) | ||
826 | { | ||
827 | struct afs_cell *cell = p->private; | ||
828 | |||
829 | read_unlock(&cell->sv_lock); | ||
830 | |||
831 | } /* end afs_proc_cell_servers_stop() */ | ||
832 | |||
833 | /*****************************************************************************/ | ||
834 | /* | ||
835 | * display a header line followed by a load of volume lines | ||
836 | */ | ||
837 | static int afs_proc_cell_servers_show(struct seq_file *m, void *v) | ||
838 | { | ||
839 | struct afs_server *server = list_entry(v, struct afs_server, link); | ||
840 | char ipaddr[20]; | ||
841 | |||
842 | /* display header on line 1 */ | ||
843 | if (v == (void *) 1) { | ||
844 | seq_puts(m, "USE ADDR STATE\n"); | ||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | /* display one cell per line on subsequent lines */ | ||
849 | sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr)); | ||
850 | seq_printf(m, "%3d %-15.15s %5d\n", | ||
851 | atomic_read(&server->usage), | ||
852 | ipaddr, | ||
853 | server->fs_state | ||
854 | ); | ||
855 | |||
856 | return 0; | ||
857 | } /* end afs_proc_cell_servers_show() */ | ||
diff --git a/fs/afs/server.c b/fs/afs/server.c new file mode 100644 index 000000000000..62b093aa41c6 --- /dev/null +++ b/fs/afs/server.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* server.c: AFS server record management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <rxrpc/peer.h> | ||
15 | #include <rxrpc/connection.h> | ||
16 | #include "volume.h" | ||
17 | #include "cell.h" | ||
18 | #include "server.h" | ||
19 | #include "transport.h" | ||
20 | #include "vlclient.h" | ||
21 | #include "kafstimod.h" | ||
22 | #include "internal.h" | ||
23 | |||
24 | DEFINE_SPINLOCK(afs_server_peer_lock); | ||
25 | |||
26 | #define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */ | ||
27 | #define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */ | ||
28 | |||
29 | static void __afs_server_timeout(struct afs_timer *timer) | ||
30 | { | ||
31 | struct afs_server *server = | ||
32 | list_entry(timer, struct afs_server, timeout); | ||
33 | |||
34 | _debug("SERVER TIMEOUT [%p{u=%d}]", | ||
35 | server, atomic_read(&server->usage)); | ||
36 | |||
37 | afs_server_do_timeout(server); | ||
38 | } | ||
39 | |||
40 | static const struct afs_timer_ops afs_server_timer_ops = { | ||
41 | .timed_out = __afs_server_timeout, | ||
42 | }; | ||
43 | |||
44 | /*****************************************************************************/ | ||
45 | /* | ||
46 | * lookup a server record in a cell | ||
47 | * - TODO: search the cell's server list | ||
48 | */ | ||
49 | int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, | ||
50 | struct afs_server **_server) | ||
51 | { | ||
52 | struct afs_server *server, *active, *zombie; | ||
53 | int loop; | ||
54 | |||
55 | _enter("%p,%08x,", cell, ntohl(addr->s_addr)); | ||
56 | |||
57 | /* allocate and initialise a server record */ | ||
58 | server = kmalloc(sizeof(struct afs_server), GFP_KERNEL); | ||
59 | if (!server) { | ||
60 | _leave(" = -ENOMEM"); | ||
61 | return -ENOMEM; | ||
62 | } | ||
63 | |||
64 | memset(server, 0, sizeof(struct afs_server)); | ||
65 | atomic_set(&server->usage, 1); | ||
66 | |||
67 | INIT_LIST_HEAD(&server->link); | ||
68 | init_rwsem(&server->sem); | ||
69 | INIT_LIST_HEAD(&server->fs_callq); | ||
70 | spin_lock_init(&server->fs_lock); | ||
71 | INIT_LIST_HEAD(&server->cb_promises); | ||
72 | spin_lock_init(&server->cb_lock); | ||
73 | |||
74 | for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++) | ||
75 | server->fs_conn_cnt[loop] = 4; | ||
76 | |||
77 | memcpy(&server->addr, addr, sizeof(struct in_addr)); | ||
78 | server->addr.s_addr = addr->s_addr; | ||
79 | |||
80 | afs_timer_init(&server->timeout, &afs_server_timer_ops); | ||
81 | |||
82 | /* add to the cell */ | ||
83 | write_lock(&cell->sv_lock); | ||
84 | |||
85 | /* check the active list */ | ||
86 | list_for_each_entry(active, &cell->sv_list, link) { | ||
87 | if (active->addr.s_addr == addr->s_addr) | ||
88 | goto use_active_server; | ||
89 | } | ||
90 | |||
91 | /* check the inactive list */ | ||
92 | spin_lock(&cell->sv_gylock); | ||
93 | list_for_each_entry(zombie, &cell->sv_graveyard, link) { | ||
94 | if (zombie->addr.s_addr == addr->s_addr) | ||
95 | goto resurrect_server; | ||
96 | } | ||
97 | spin_unlock(&cell->sv_gylock); | ||
98 | |||
99 | afs_get_cell(cell); | ||
100 | server->cell = cell; | ||
101 | list_add_tail(&server->link, &cell->sv_list); | ||
102 | |||
103 | write_unlock(&cell->sv_lock); | ||
104 | |||
105 | *_server = server; | ||
106 | _leave(" = 0 (%p)", server); | ||
107 | return 0; | ||
108 | |||
109 | /* found a matching active server */ | ||
110 | use_active_server: | ||
111 | _debug("active server"); | ||
112 | afs_get_server(active); | ||
113 | write_unlock(&cell->sv_lock); | ||
114 | |||
115 | kfree(server); | ||
116 | |||
117 | *_server = active; | ||
118 | _leave(" = 0 (%p)", active); | ||
119 | return 0; | ||
120 | |||
121 | /* found a matching server in the graveyard, so resurrect it and | ||
122 | * dispose of the new record */ | ||
123 | resurrect_server: | ||
124 | _debug("resurrecting server"); | ||
125 | |||
126 | list_del(&zombie->link); | ||
127 | list_add_tail(&zombie->link, &cell->sv_list); | ||
128 | afs_get_server(zombie); | ||
129 | afs_kafstimod_del_timer(&zombie->timeout); | ||
130 | spin_unlock(&cell->sv_gylock); | ||
131 | write_unlock(&cell->sv_lock); | ||
132 | |||
133 | kfree(server); | ||
134 | |||
135 | *_server = zombie; | ||
136 | _leave(" = 0 (%p)", zombie); | ||
137 | return 0; | ||
138 | |||
139 | } /* end afs_server_lookup() */ | ||
140 | |||
141 | /*****************************************************************************/ | ||
142 | /* | ||
143 | * destroy a server record | ||
144 | * - removes from the cell list | ||
145 | */ | ||
146 | void afs_put_server(struct afs_server *server) | ||
147 | { | ||
148 | struct afs_cell *cell; | ||
149 | |||
150 | if (!server) | ||
151 | return; | ||
152 | |||
153 | _enter("%p", server); | ||
154 | |||
155 | cell = server->cell; | ||
156 | |||
157 | /* sanity check */ | ||
158 | BUG_ON(atomic_read(&server->usage) <= 0); | ||
159 | |||
160 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
161 | * atomic */ | ||
162 | write_lock(&cell->sv_lock); | ||
163 | |||
164 | if (likely(!atomic_dec_and_test(&server->usage))) { | ||
165 | write_unlock(&cell->sv_lock); | ||
166 | _leave(""); | ||
167 | return; | ||
168 | } | ||
169 | |||
170 | spin_lock(&cell->sv_gylock); | ||
171 | list_del(&server->link); | ||
172 | list_add_tail(&server->link, &cell->sv_graveyard); | ||
173 | |||
174 | /* time out in 10 secs */ | ||
175 | afs_kafstimod_add_timer(&server->timeout, 10 * HZ); | ||
176 | |||
177 | spin_unlock(&cell->sv_gylock); | ||
178 | write_unlock(&cell->sv_lock); | ||
179 | |||
180 | _leave(" [killed]"); | ||
181 | } /* end afs_put_server() */ | ||
182 | |||
183 | /*****************************************************************************/ | ||
184 | /* | ||
185 | * timeout server record | ||
186 | * - removes from the cell's graveyard if the usage count is zero | ||
187 | */ | ||
188 | void afs_server_do_timeout(struct afs_server *server) | ||
189 | { | ||
190 | struct rxrpc_peer *peer; | ||
191 | struct afs_cell *cell; | ||
192 | int loop; | ||
193 | |||
194 | _enter("%p", server); | ||
195 | |||
196 | cell = server->cell; | ||
197 | |||
198 | BUG_ON(atomic_read(&server->usage) < 0); | ||
199 | |||
200 | /* remove from graveyard if still dead */ | ||
201 | spin_lock(&cell->vl_gylock); | ||
202 | if (atomic_read(&server->usage) == 0) | ||
203 | list_del_init(&server->link); | ||
204 | else | ||
205 | server = NULL; | ||
206 | spin_unlock(&cell->vl_gylock); | ||
207 | |||
208 | if (!server) { | ||
209 | _leave(""); | ||
210 | return; /* resurrected */ | ||
211 | } | ||
212 | |||
213 | /* we can now destroy it properly */ | ||
214 | afs_put_cell(cell); | ||
215 | |||
216 | /* uncross-point the structs under a global lock */ | ||
217 | spin_lock(&afs_server_peer_lock); | ||
218 | peer = server->peer; | ||
219 | if (peer) { | ||
220 | server->peer = NULL; | ||
221 | peer->user = NULL; | ||
222 | } | ||
223 | spin_unlock(&afs_server_peer_lock); | ||
224 | |||
225 | /* finish cleaning up the server */ | ||
226 | for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--) | ||
227 | if (server->fs_conn[loop]) | ||
228 | rxrpc_put_connection(server->fs_conn[loop]); | ||
229 | |||
230 | if (server->vlserver) | ||
231 | rxrpc_put_connection(server->vlserver); | ||
232 | |||
233 | kfree(server); | ||
234 | |||
235 | _leave(" [destroyed]"); | ||
236 | } /* end afs_server_do_timeout() */ | ||
237 | |||
238 | /*****************************************************************************/ | ||
239 | /* | ||
240 | * get a callslot on a connection to the fileserver on the specified server | ||
241 | */ | ||
242 | int afs_server_request_callslot(struct afs_server *server, | ||
243 | struct afs_server_callslot *callslot) | ||
244 | { | ||
245 | struct afs_server_callslot *pcallslot; | ||
246 | struct rxrpc_connection *conn; | ||
247 | int nconn, ret; | ||
248 | |||
249 | _enter("%p,",server); | ||
250 | |||
251 | INIT_LIST_HEAD(&callslot->link); | ||
252 | callslot->task = current; | ||
253 | callslot->conn = NULL; | ||
254 | callslot->nconn = -1; | ||
255 | callslot->ready = 0; | ||
256 | |||
257 | ret = 0; | ||
258 | conn = NULL; | ||
259 | |||
260 | /* get hold of a callslot first */ | ||
261 | spin_lock(&server->fs_lock); | ||
262 | |||
263 | /* resurrect the server if it's death timeout has expired */ | ||
264 | if (server->fs_state) { | ||
265 | if (time_before(jiffies, server->fs_dead_jif)) { | ||
266 | ret = server->fs_state; | ||
267 | spin_unlock(&server->fs_lock); | ||
268 | _leave(" = %d [still dead]", ret); | ||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | server->fs_state = 0; | ||
273 | } | ||
274 | |||
275 | /* try and find a connection that has spare callslots */ | ||
276 | for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) { | ||
277 | if (server->fs_conn_cnt[nconn] > 0) { | ||
278 | server->fs_conn_cnt[nconn]--; | ||
279 | spin_unlock(&server->fs_lock); | ||
280 | callslot->nconn = nconn; | ||
281 | goto obtained_slot; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | /* none were available - wait interruptibly for one to become | ||
286 | * available */ | ||
287 | set_current_state(TASK_INTERRUPTIBLE); | ||
288 | list_add_tail(&callslot->link, &server->fs_callq); | ||
289 | spin_unlock(&server->fs_lock); | ||
290 | |||
291 | while (!callslot->ready && !signal_pending(current)) { | ||
292 | schedule(); | ||
293 | set_current_state(TASK_INTERRUPTIBLE); | ||
294 | } | ||
295 | |||
296 | set_current_state(TASK_RUNNING); | ||
297 | |||
298 | /* even if we were interrupted we may still be queued */ | ||
299 | if (!callslot->ready) { | ||
300 | spin_lock(&server->fs_lock); | ||
301 | list_del_init(&callslot->link); | ||
302 | spin_unlock(&server->fs_lock); | ||
303 | } | ||
304 | |||
305 | nconn = callslot->nconn; | ||
306 | |||
307 | /* if interrupted, we must release any slot we also got before | ||
308 | * returning an error */ | ||
309 | if (signal_pending(current)) { | ||
310 | ret = -EINTR; | ||
311 | goto error_release; | ||
312 | } | ||
313 | |||
314 | /* if we were woken up with an error, then pass that error back to the | ||
315 | * called */ | ||
316 | if (nconn < 0) { | ||
317 | _leave(" = %d", callslot->errno); | ||
318 | return callslot->errno; | ||
319 | } | ||
320 | |||
321 | /* were we given a connection directly? */ | ||
322 | if (callslot->conn) { | ||
323 | /* yes - use it */ | ||
324 | _leave(" = 0 (nc=%d)", nconn); | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | /* got a callslot, but no connection */ | ||
329 | obtained_slot: | ||
330 | |||
331 | /* need to get hold of the RxRPC connection */ | ||
332 | down_write(&server->sem); | ||
333 | |||
334 | /* quick check to see if there's an outstanding error */ | ||
335 | ret = server->fs_state; | ||
336 | if (ret) | ||
337 | goto error_release_upw; | ||
338 | |||
339 | if (server->fs_conn[nconn]) { | ||
340 | /* reuse an existing connection */ | ||
341 | rxrpc_get_connection(server->fs_conn[nconn]); | ||
342 | callslot->conn = server->fs_conn[nconn]; | ||
343 | } | ||
344 | else { | ||
345 | /* create a new connection */ | ||
346 | ret = rxrpc_create_connection(afs_transport, | ||
347 | htons(7000), | ||
348 | server->addr.s_addr, | ||
349 | FS_SERVICE_ID, | ||
350 | NULL, | ||
351 | &server->fs_conn[nconn]); | ||
352 | |||
353 | if (ret < 0) | ||
354 | goto error_release_upw; | ||
355 | |||
356 | callslot->conn = server->fs_conn[0]; | ||
357 | rxrpc_get_connection(callslot->conn); | ||
358 | } | ||
359 | |||
360 | up_write(&server->sem); | ||
361 | |||
362 | _leave(" = 0"); | ||
363 | return 0; | ||
364 | |||
365 | /* handle an error occurring */ | ||
366 | error_release_upw: | ||
367 | up_write(&server->sem); | ||
368 | |||
369 | error_release: | ||
370 | /* either release the callslot or pass it along to another deserving | ||
371 | * task */ | ||
372 | spin_lock(&server->fs_lock); | ||
373 | |||
374 | if (nconn < 0) { | ||
375 | /* no callslot allocated */ | ||
376 | } | ||
377 | else if (list_empty(&server->fs_callq)) { | ||
378 | /* no one waiting */ | ||
379 | server->fs_conn_cnt[nconn]++; | ||
380 | spin_unlock(&server->fs_lock); | ||
381 | } | ||
382 | else { | ||
383 | /* someone's waiting - dequeue them and wake them up */ | ||
384 | pcallslot = list_entry(server->fs_callq.next, | ||
385 | struct afs_server_callslot, link); | ||
386 | list_del_init(&pcallslot->link); | ||
387 | |||
388 | pcallslot->errno = server->fs_state; | ||
389 | if (!pcallslot->errno) { | ||
390 | /* pass them out callslot details */ | ||
391 | callslot->conn = xchg(&pcallslot->conn, | ||
392 | callslot->conn); | ||
393 | pcallslot->nconn = nconn; | ||
394 | callslot->nconn = nconn = -1; | ||
395 | } | ||
396 | pcallslot->ready = 1; | ||
397 | wake_up_process(pcallslot->task); | ||
398 | spin_unlock(&server->fs_lock); | ||
399 | } | ||
400 | |||
401 | rxrpc_put_connection(callslot->conn); | ||
402 | callslot->conn = NULL; | ||
403 | |||
404 | _leave(" = %d", ret); | ||
405 | return ret; | ||
406 | |||
407 | } /* end afs_server_request_callslot() */ | ||
408 | |||
409 | /*****************************************************************************/ | ||
410 | /* | ||
411 | * release a callslot back to the server | ||
412 | * - transfers the RxRPC connection to the next pending callslot if possible | ||
413 | */ | ||
414 | void afs_server_release_callslot(struct afs_server *server, | ||
415 | struct afs_server_callslot *callslot) | ||
416 | { | ||
417 | struct afs_server_callslot *pcallslot; | ||
418 | |||
419 | _enter("{ad=%08x,cnt=%u},{%d}", | ||
420 | ntohl(server->addr.s_addr), | ||
421 | server->fs_conn_cnt[callslot->nconn], | ||
422 | callslot->nconn); | ||
423 | |||
424 | BUG_ON(callslot->nconn < 0); | ||
425 | |||
426 | spin_lock(&server->fs_lock); | ||
427 | |||
428 | if (list_empty(&server->fs_callq)) { | ||
429 | /* no one waiting */ | ||
430 | server->fs_conn_cnt[callslot->nconn]++; | ||
431 | spin_unlock(&server->fs_lock); | ||
432 | } | ||
433 | else { | ||
434 | /* someone's waiting - dequeue them and wake them up */ | ||
435 | pcallslot = list_entry(server->fs_callq.next, | ||
436 | struct afs_server_callslot, link); | ||
437 | list_del_init(&pcallslot->link); | ||
438 | |||
439 | pcallslot->errno = server->fs_state; | ||
440 | if (!pcallslot->errno) { | ||
441 | /* pass them out callslot details */ | ||
442 | callslot->conn = xchg(&pcallslot->conn, callslot->conn); | ||
443 | pcallslot->nconn = callslot->nconn; | ||
444 | callslot->nconn = -1; | ||
445 | } | ||
446 | |||
447 | pcallslot->ready = 1; | ||
448 | wake_up_process(pcallslot->task); | ||
449 | spin_unlock(&server->fs_lock); | ||
450 | } | ||
451 | |||
452 | rxrpc_put_connection(callslot->conn); | ||
453 | |||
454 | _leave(""); | ||
455 | } /* end afs_server_release_callslot() */ | ||
456 | |||
457 | /*****************************************************************************/ | ||
458 | /* | ||
459 | * get a handle to a connection to the vlserver (volume location) on the | ||
460 | * specified server | ||
461 | */ | ||
462 | int afs_server_get_vlconn(struct afs_server *server, | ||
463 | struct rxrpc_connection **_conn) | ||
464 | { | ||
465 | struct rxrpc_connection *conn; | ||
466 | int ret; | ||
467 | |||
468 | _enter("%p,", server); | ||
469 | |||
470 | ret = 0; | ||
471 | conn = NULL; | ||
472 | down_read(&server->sem); | ||
473 | |||
474 | if (server->vlserver) { | ||
475 | /* reuse an existing connection */ | ||
476 | rxrpc_get_connection(server->vlserver); | ||
477 | conn = server->vlserver; | ||
478 | up_read(&server->sem); | ||
479 | } | ||
480 | else { | ||
481 | /* create a new connection */ | ||
482 | up_read(&server->sem); | ||
483 | down_write(&server->sem); | ||
484 | if (!server->vlserver) { | ||
485 | ret = rxrpc_create_connection(afs_transport, | ||
486 | htons(7003), | ||
487 | server->addr.s_addr, | ||
488 | VL_SERVICE_ID, | ||
489 | NULL, | ||
490 | &server->vlserver); | ||
491 | } | ||
492 | if (ret == 0) { | ||
493 | rxrpc_get_connection(server->vlserver); | ||
494 | conn = server->vlserver; | ||
495 | } | ||
496 | up_write(&server->sem); | ||
497 | } | ||
498 | |||
499 | *_conn = conn; | ||
500 | _leave(" = %d", ret); | ||
501 | return ret; | ||
502 | } /* end afs_server_get_vlconn() */ | ||
diff --git a/fs/afs/server.h b/fs/afs/server.h new file mode 100644 index 000000000000..c3d24115578f --- /dev/null +++ b/fs/afs/server.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* server.h: AFS server record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_SERVER_H | ||
13 | #define _LINUX_AFS_SERVER_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "kafstimod.h" | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | |||
20 | extern spinlock_t afs_server_peer_lock; | ||
21 | |||
22 | /*****************************************************************************/ | ||
23 | /* | ||
24 | * AFS server record | ||
25 | */ | ||
26 | struct afs_server | ||
27 | { | ||
28 | atomic_t usage; | ||
29 | struct afs_cell *cell; /* cell in which server resides */ | ||
30 | struct list_head link; /* link in cell's server list */ | ||
31 | struct rw_semaphore sem; /* access lock */ | ||
32 | struct afs_timer timeout; /* graveyard timeout */ | ||
33 | struct in_addr addr; /* server address */ | ||
34 | struct rxrpc_peer *peer; /* peer record for this server */ | ||
35 | struct rxrpc_connection *vlserver; /* connection to the volume location service */ | ||
36 | |||
37 | /* file service access */ | ||
38 | #define AFS_SERVER_CONN_LIST_SIZE 2 | ||
39 | struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */ | ||
40 | unsigned fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */ | ||
41 | struct list_head fs_callq; /* queue of processes waiting to make a call */ | ||
42 | spinlock_t fs_lock; /* access lock */ | ||
43 | int fs_state; /* 0 or reason FS currently marked dead (-errno) */ | ||
44 | unsigned fs_rtt; /* FS round trip time */ | ||
45 | unsigned long fs_act_jif; /* time at which last activity occurred */ | ||
46 | unsigned long fs_dead_jif; /* time at which no longer to be considered dead */ | ||
47 | |||
48 | /* callback promise management */ | ||
49 | struct list_head cb_promises; /* as yet unbroken promises from this server */ | ||
50 | spinlock_t cb_lock; /* access lock */ | ||
51 | }; | ||
52 | |||
53 | extern int afs_server_lookup(struct afs_cell *cell, | ||
54 | const struct in_addr *addr, | ||
55 | struct afs_server **_server); | ||
56 | |||
57 | #define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0) | ||
58 | |||
59 | extern void afs_put_server(struct afs_server *server); | ||
60 | extern void afs_server_do_timeout(struct afs_server *server); | ||
61 | |||
62 | extern int afs_server_find_by_peer(const struct rxrpc_peer *peer, | ||
63 | struct afs_server **_server); | ||
64 | |||
65 | extern int afs_server_get_vlconn(struct afs_server *server, | ||
66 | struct rxrpc_connection **_conn); | ||
67 | |||
68 | static inline | ||
69 | struct afs_server *afs_server_get_from_peer(struct rxrpc_peer *peer) | ||
70 | { | ||
71 | struct afs_server *server; | ||
72 | |||
73 | spin_lock(&afs_server_peer_lock); | ||
74 | server = peer->user; | ||
75 | if (server) | ||
76 | afs_get_server(server); | ||
77 | spin_unlock(&afs_server_peer_lock); | ||
78 | |||
79 | return server; | ||
80 | } | ||
81 | |||
82 | /*****************************************************************************/ | ||
83 | /* | ||
84 | * AFS server callslot grant record | ||
85 | */ | ||
86 | struct afs_server_callslot | ||
87 | { | ||
88 | struct list_head link; /* link in server's list */ | ||
89 | struct task_struct *task; /* process waiting to make call */ | ||
90 | struct rxrpc_connection *conn; /* connection to use (or NULL on error) */ | ||
91 | short nconn; /* connection slot number (-1 on error) */ | ||
92 | char ready; /* T when ready */ | ||
93 | int errno; /* error number if nconn==-1 */ | ||
94 | }; | ||
95 | |||
96 | extern int afs_server_request_callslot(struct afs_server *server, | ||
97 | struct afs_server_callslot *callslot); | ||
98 | |||
99 | extern void afs_server_release_callslot(struct afs_server *server, | ||
100 | struct afs_server_callslot *callslot); | ||
101 | |||
102 | #endif /* _LINUX_AFS_SERVER_H */ | ||
diff --git a/fs/afs/super.c b/fs/afs/super.c new file mode 100644 index 000000000000..d6fa8e5999df --- /dev/null +++ b/fs/afs/super.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software may be freely redistributed under the terms of the | ||
5 | * GNU General Public License. | ||
6 | * | ||
7 | * You should have received a copy of the GNU General Public License | ||
8 | * along with this program; if not, write to the Free Software | ||
9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
10 | * | ||
11 | * Authors: David Howells <dhowells@redhat.com> | ||
12 | * David Woodhouse <dwmw2@cambridge.redhat.com> | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | #include "vnode.h" | ||
23 | #include "volume.h" | ||
24 | #include "cell.h" | ||
25 | #include "cmservice.h" | ||
26 | #include "fsclient.h" | ||
27 | #include "super.h" | ||
28 | #include "internal.h" | ||
29 | |||
30 | #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ | ||
31 | |||
32 | struct afs_mount_params { | ||
33 | int rwpath; | ||
34 | struct afs_cell *default_cell; | ||
35 | struct afs_volume *volume; | ||
36 | }; | ||
37 | |||
38 | static void afs_i_init_once(void *foo, kmem_cache_t *cachep, | ||
39 | unsigned long flags); | ||
40 | |||
41 | static struct super_block *afs_get_sb(struct file_system_type *fs_type, | ||
42 | int flags, const char *dev_name, | ||
43 | void *data); | ||
44 | |||
45 | static struct inode *afs_alloc_inode(struct super_block *sb); | ||
46 | |||
47 | static void afs_put_super(struct super_block *sb); | ||
48 | |||
49 | static void afs_destroy_inode(struct inode *inode); | ||
50 | |||
51 | static struct file_system_type afs_fs_type = { | ||
52 | .owner = THIS_MODULE, | ||
53 | .name = "afs", | ||
54 | .get_sb = afs_get_sb, | ||
55 | .kill_sb = kill_anon_super, | ||
56 | .fs_flags = FS_BINARY_MOUNTDATA, | ||
57 | }; | ||
58 | |||
59 | static struct super_operations afs_super_ops = { | ||
60 | .statfs = simple_statfs, | ||
61 | .alloc_inode = afs_alloc_inode, | ||
62 | .drop_inode = generic_delete_inode, | ||
63 | .destroy_inode = afs_destroy_inode, | ||
64 | .clear_inode = afs_clear_inode, | ||
65 | .put_super = afs_put_super, | ||
66 | }; | ||
67 | |||
68 | static kmem_cache_t *afs_inode_cachep; | ||
69 | static atomic_t afs_count_active_inodes; | ||
70 | |||
71 | /*****************************************************************************/ | ||
72 | /* | ||
73 | * initialise the filesystem | ||
74 | */ | ||
75 | int __init afs_fs_init(void) | ||
76 | { | ||
77 | int ret; | ||
78 | |||
79 | _enter(""); | ||
80 | |||
81 | afs_timer_init(&afs_mntpt_expiry_timer, &afs_mntpt_expiry_timer_ops); | ||
82 | |||
83 | /* create ourselves an inode cache */ | ||
84 | atomic_set(&afs_count_active_inodes, 0); | ||
85 | |||
86 | ret = -ENOMEM; | ||
87 | afs_inode_cachep = kmem_cache_create("afs_inode_cache", | ||
88 | sizeof(struct afs_vnode), | ||
89 | 0, | ||
90 | SLAB_HWCACHE_ALIGN, | ||
91 | afs_i_init_once, | ||
92 | NULL); | ||
93 | if (!afs_inode_cachep) { | ||
94 | printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | /* now export our filesystem to lesser mortals */ | ||
99 | ret = register_filesystem(&afs_fs_type); | ||
100 | if (ret < 0) { | ||
101 | kmem_cache_destroy(afs_inode_cachep); | ||
102 | kleave(" = %d", ret); | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | kleave(" = 0"); | ||
107 | return 0; | ||
108 | } /* end afs_fs_init() */ | ||
109 | |||
110 | /*****************************************************************************/ | ||
111 | /* | ||
112 | * clean up the filesystem | ||
113 | */ | ||
114 | void __exit afs_fs_exit(void) | ||
115 | { | ||
116 | unregister_filesystem(&afs_fs_type); | ||
117 | |||
118 | if (atomic_read(&afs_count_active_inodes) != 0) { | ||
119 | printk("kAFS: %d active inode objects still present\n", | ||
120 | atomic_read(&afs_count_active_inodes)); | ||
121 | BUG(); | ||
122 | } | ||
123 | |||
124 | kmem_cache_destroy(afs_inode_cachep); | ||
125 | |||
126 | } /* end afs_fs_exit() */ | ||
127 | |||
128 | /*****************************************************************************/ | ||
129 | /* | ||
130 | * check that an argument has a value | ||
131 | */ | ||
132 | static int want_arg(char **_value, const char *option) | ||
133 | { | ||
134 | if (!_value || !*_value || !**_value) { | ||
135 | printk(KERN_NOTICE "kAFS: %s: argument missing\n", option); | ||
136 | return 0; | ||
137 | } | ||
138 | return 1; | ||
139 | } /* end want_arg() */ | ||
140 | |||
141 | /*****************************************************************************/ | ||
142 | /* | ||
143 | * check that there's no subsequent value | ||
144 | */ | ||
145 | static int want_no_value(char *const *_value, const char *option) | ||
146 | { | ||
147 | if (*_value && **_value) { | ||
148 | printk(KERN_NOTICE "kAFS: %s: Invalid argument: %s\n", | ||
149 | option, *_value); | ||
150 | return 0; | ||
151 | } | ||
152 | return 1; | ||
153 | } /* end want_no_value() */ | ||
154 | |||
155 | /*****************************************************************************/ | ||
156 | /* | ||
157 | * parse the mount options | ||
158 | * - this function has been shamelessly adapted from the ext3 fs which | ||
159 | * shamelessly adapted it from the msdos fs | ||
160 | */ | ||
161 | static int afs_super_parse_options(struct afs_mount_params *params, | ||
162 | char *options, | ||
163 | const char **devname) | ||
164 | { | ||
165 | char *key, *value; | ||
166 | int ret; | ||
167 | |||
168 | _enter("%s", options); | ||
169 | |||
170 | options[PAGE_SIZE - 1] = 0; | ||
171 | |||
172 | ret = 0; | ||
173 | while ((key = strsep(&options, ",")) != 0) | ||
174 | { | ||
175 | value = strchr(key, '='); | ||
176 | if (value) | ||
177 | *value++ = 0; | ||
178 | |||
179 | printk("kAFS: KEY: %s, VAL:%s\n", key, value ?: "-"); | ||
180 | |||
181 | if (strcmp(key, "rwpath") == 0) { | ||
182 | if (!want_no_value(&value, "rwpath")) | ||
183 | return -EINVAL; | ||
184 | params->rwpath = 1; | ||
185 | continue; | ||
186 | } | ||
187 | else if (strcmp(key, "vol") == 0) { | ||
188 | if (!want_arg(&value, "vol")) | ||
189 | return -EINVAL; | ||
190 | *devname = value; | ||
191 | continue; | ||
192 | } | ||
193 | else if (strcmp(key, "cell") == 0) { | ||
194 | if (!want_arg(&value, "cell")) | ||
195 | return -EINVAL; | ||
196 | afs_put_cell(params->default_cell); | ||
197 | ret = afs_cell_lookup(value, | ||
198 | strlen(value), | ||
199 | ¶ms->default_cell); | ||
200 | if (ret < 0) | ||
201 | return -EINVAL; | ||
202 | continue; | ||
203 | } | ||
204 | |||
205 | printk("kAFS: Unknown mount option: '%s'\n", key); | ||
206 | ret = -EINVAL; | ||
207 | goto error; | ||
208 | } | ||
209 | |||
210 | ret = 0; | ||
211 | |||
212 | error: | ||
213 | _leave(" = %d", ret); | ||
214 | return ret; | ||
215 | } /* end afs_super_parse_options() */ | ||
216 | |||
217 | /*****************************************************************************/ | ||
218 | /* | ||
219 | * check a superblock to see if it's the one we're looking for | ||
220 | */ | ||
221 | static int afs_test_super(struct super_block *sb, void *data) | ||
222 | { | ||
223 | struct afs_mount_params *params = data; | ||
224 | struct afs_super_info *as = sb->s_fs_info; | ||
225 | |||
226 | return as->volume == params->volume; | ||
227 | } /* end afs_test_super() */ | ||
228 | |||
229 | /*****************************************************************************/ | ||
230 | /* | ||
231 | * fill in the superblock | ||
232 | */ | ||
233 | static int afs_fill_super(struct super_block *sb, void *data, int silent) | ||
234 | { | ||
235 | struct afs_mount_params *params = data; | ||
236 | struct afs_super_info *as = NULL; | ||
237 | struct afs_fid fid; | ||
238 | struct dentry *root = NULL; | ||
239 | struct inode *inode = NULL; | ||
240 | int ret; | ||
241 | |||
242 | kenter(""); | ||
243 | |||
244 | /* allocate a superblock info record */ | ||
245 | as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL); | ||
246 | if (!as) { | ||
247 | _leave(" = -ENOMEM"); | ||
248 | return -ENOMEM; | ||
249 | } | ||
250 | |||
251 | memset(as, 0, sizeof(struct afs_super_info)); | ||
252 | |||
253 | afs_get_volume(params->volume); | ||
254 | as->volume = params->volume; | ||
255 | |||
256 | /* fill in the superblock */ | ||
257 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
258 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
259 | sb->s_magic = AFS_FS_MAGIC; | ||
260 | sb->s_op = &afs_super_ops; | ||
261 | sb->s_fs_info = as; | ||
262 | |||
263 | /* allocate the root inode and dentry */ | ||
264 | fid.vid = as->volume->vid; | ||
265 | fid.vnode = 1; | ||
266 | fid.unique = 1; | ||
267 | ret = afs_iget(sb, &fid, &inode); | ||
268 | if (ret < 0) | ||
269 | goto error; | ||
270 | |||
271 | ret = -ENOMEM; | ||
272 | root = d_alloc_root(inode); | ||
273 | if (!root) | ||
274 | goto error; | ||
275 | |||
276 | sb->s_root = root; | ||
277 | |||
278 | kleave(" = 0"); | ||
279 | return 0; | ||
280 | |||
281 | error: | ||
282 | iput(inode); | ||
283 | afs_put_volume(as->volume); | ||
284 | kfree(as); | ||
285 | |||
286 | sb->s_fs_info = NULL; | ||
287 | |||
288 | kleave(" = %d", ret); | ||
289 | return ret; | ||
290 | } /* end afs_fill_super() */ | ||
291 | |||
292 | /*****************************************************************************/ | ||
293 | /* | ||
294 | * get an AFS superblock | ||
295 | * - TODO: don't use get_sb_nodev(), but rather call sget() directly | ||
296 | */ | ||
297 | static struct super_block *afs_get_sb(struct file_system_type *fs_type, | ||
298 | int flags, | ||
299 | const char *dev_name, | ||
300 | void *options) | ||
301 | { | ||
302 | struct afs_mount_params params; | ||
303 | struct super_block *sb; | ||
304 | int ret; | ||
305 | |||
306 | _enter(",,%s,%p", dev_name, options); | ||
307 | |||
308 | memset(¶ms, 0, sizeof(params)); | ||
309 | |||
310 | /* start the cache manager */ | ||
311 | ret = afscm_start(); | ||
312 | if (ret < 0) { | ||
313 | _leave(" = %d", ret); | ||
314 | return ERR_PTR(ret); | ||
315 | } | ||
316 | |||
317 | /* parse the options */ | ||
318 | if (options) { | ||
319 | ret = afs_super_parse_options(¶ms, options, &dev_name); | ||
320 | if (ret < 0) | ||
321 | goto error; | ||
322 | if (!dev_name) { | ||
323 | printk("kAFS: no volume name specified\n"); | ||
324 | ret = -EINVAL; | ||
325 | goto error; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* parse the device name */ | ||
330 | ret = afs_volume_lookup(dev_name, | ||
331 | params.default_cell, | ||
332 | params.rwpath, | ||
333 | ¶ms.volume); | ||
334 | if (ret < 0) | ||
335 | goto error; | ||
336 | |||
337 | /* allocate a deviceless superblock */ | ||
338 | sb = sget(fs_type, afs_test_super, set_anon_super, ¶ms); | ||
339 | if (IS_ERR(sb)) | ||
340 | goto error; | ||
341 | |||
342 | sb->s_flags = flags; | ||
343 | |||
344 | ret = afs_fill_super(sb, ¶ms, flags & MS_VERBOSE ? 1 : 0); | ||
345 | if (ret < 0) { | ||
346 | up_write(&sb->s_umount); | ||
347 | deactivate_super(sb); | ||
348 | goto error; | ||
349 | } | ||
350 | sb->s_flags |= MS_ACTIVE; | ||
351 | |||
352 | afs_put_volume(params.volume); | ||
353 | afs_put_cell(params.default_cell); | ||
354 | _leave(" = %p", sb); | ||
355 | return sb; | ||
356 | |||
357 | error: | ||
358 | afs_put_volume(params.volume); | ||
359 | afs_put_cell(params.default_cell); | ||
360 | afscm_stop(); | ||
361 | _leave(" = %d", ret); | ||
362 | return ERR_PTR(ret); | ||
363 | } /* end afs_get_sb() */ | ||
364 | |||
365 | /*****************************************************************************/ | ||
366 | /* | ||
367 | * finish the unmounting process on the superblock | ||
368 | */ | ||
369 | static void afs_put_super(struct super_block *sb) | ||
370 | { | ||
371 | struct afs_super_info *as = sb->s_fs_info; | ||
372 | |||
373 | _enter(""); | ||
374 | |||
375 | afs_put_volume(as->volume); | ||
376 | afscm_stop(); | ||
377 | |||
378 | _leave(""); | ||
379 | } /* end afs_put_super() */ | ||
380 | |||
381 | /*****************************************************************************/ | ||
382 | /* | ||
383 | * initialise an inode cache slab element prior to any use | ||
384 | */ | ||
385 | static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, | ||
386 | unsigned long flags) | ||
387 | { | ||
388 | struct afs_vnode *vnode = (struct afs_vnode *) _vnode; | ||
389 | |||
390 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | ||
391 | SLAB_CTOR_CONSTRUCTOR) { | ||
392 | memset(vnode, 0, sizeof(*vnode)); | ||
393 | inode_init_once(&vnode->vfs_inode); | ||
394 | init_waitqueue_head(&vnode->update_waitq); | ||
395 | spin_lock_init(&vnode->lock); | ||
396 | INIT_LIST_HEAD(&vnode->cb_link); | ||
397 | INIT_LIST_HEAD(&vnode->cb_hash_link); | ||
398 | afs_timer_init(&vnode->cb_timeout, | ||
399 | &afs_vnode_cb_timed_out_ops); | ||
400 | } | ||
401 | |||
402 | } /* end afs_i_init_once() */ | ||
403 | |||
404 | /*****************************************************************************/ | ||
405 | /* | ||
406 | * allocate an AFS inode struct from our slab cache | ||
407 | */ | ||
408 | static struct inode *afs_alloc_inode(struct super_block *sb) | ||
409 | { | ||
410 | struct afs_vnode *vnode; | ||
411 | |||
412 | vnode = (struct afs_vnode *) | ||
413 | kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL); | ||
414 | if (!vnode) | ||
415 | return NULL; | ||
416 | |||
417 | atomic_inc(&afs_count_active_inodes); | ||
418 | |||
419 | memset(&vnode->fid, 0, sizeof(vnode->fid)); | ||
420 | memset(&vnode->status, 0, sizeof(vnode->status)); | ||
421 | |||
422 | vnode->volume = NULL; | ||
423 | vnode->update_cnt = 0; | ||
424 | vnode->flags = 0; | ||
425 | |||
426 | return &vnode->vfs_inode; | ||
427 | } /* end afs_alloc_inode() */ | ||
428 | |||
429 | /*****************************************************************************/ | ||
430 | /* | ||
431 | * destroy an AFS inode struct | ||
432 | */ | ||
433 | static void afs_destroy_inode(struct inode *inode) | ||
434 | { | ||
435 | _enter("{%lu}", inode->i_ino); | ||
436 | |||
437 | kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode)); | ||
438 | |||
439 | atomic_dec(&afs_count_active_inodes); | ||
440 | |||
441 | } /* end afs_destroy_inode() */ | ||
diff --git a/fs/afs/super.h b/fs/afs/super.h new file mode 100644 index 000000000000..ac11362f4e95 --- /dev/null +++ b/fs/afs/super.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* super.h: AFS filesystem internal private data | ||
2 | * | ||
3 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software may be freely redistributed under the terms of the | ||
6 | * GNU General Public License. | ||
7 | * | ||
8 | * You should have received a copy of the GNU General Public License | ||
9 | * along with this program; if not, write to the Free Software | ||
10 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
11 | * | ||
12 | * Authors: David Woodhouse <dwmw2@cambridge.redhat.com> | ||
13 | * David Howells <dhowells@redhat.com> | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef _LINUX_AFS_SUPER_H | ||
18 | #define _LINUX_AFS_SUPER_H | ||
19 | |||
20 | #include <linux/fs.h> | ||
21 | #include "server.h" | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | |||
25 | /*****************************************************************************/ | ||
26 | /* | ||
27 | * AFS superblock private data | ||
28 | * - there's one superblock per volume | ||
29 | */ | ||
30 | struct afs_super_info | ||
31 | { | ||
32 | struct afs_volume *volume; /* volume record */ | ||
33 | char rwparent; /* T if parent is R/W AFS volume */ | ||
34 | }; | ||
35 | |||
36 | static inline struct afs_super_info *AFS_FS_S(struct super_block *sb) | ||
37 | { | ||
38 | return sb->s_fs_info; | ||
39 | } | ||
40 | |||
41 | #endif /* __KERNEL__ */ | ||
42 | |||
43 | #endif /* _LINUX_AFS_SUPER_H */ | ||
diff --git a/fs/afs/transport.h b/fs/afs/transport.h new file mode 100644 index 000000000000..7013ae6ccc8c --- /dev/null +++ b/fs/afs/transport.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* transport.h: AFS transport management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_TRANSPORT_H | ||
13 | #define _LINUX_AFS_TRANSPORT_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include <rxrpc/transport.h> | ||
17 | |||
18 | /* the cache manager transport endpoint */ | ||
19 | extern struct rxrpc_transport *afs_transport; | ||
20 | |||
21 | #endif /* _LINUX_AFS_TRANSPORT_H */ | ||
diff --git a/fs/afs/types.h b/fs/afs/types.h new file mode 100644 index 000000000000..b1a2367c7587 --- /dev/null +++ b/fs/afs/types.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* types.h: AFS types | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_TYPES_H | ||
13 | #define _LINUX_AFS_TYPES_H | ||
14 | |||
15 | #ifdef __KERNEL__ | ||
16 | #include <rxrpc/types.h> | ||
17 | #endif /* __KERNEL__ */ | ||
18 | |||
19 | typedef unsigned afs_volid_t; | ||
20 | typedef unsigned afs_vnodeid_t; | ||
21 | typedef unsigned long long afs_dataversion_t; | ||
22 | |||
23 | typedef enum { | ||
24 | AFSVL_RWVOL, /* read/write volume */ | ||
25 | AFSVL_ROVOL, /* read-only volume */ | ||
26 | AFSVL_BACKVOL, /* backup volume */ | ||
27 | } __attribute__((packed)) afs_voltype_t; | ||
28 | |||
29 | typedef enum { | ||
30 | AFS_FTYPE_INVALID = 0, | ||
31 | AFS_FTYPE_FILE = 1, | ||
32 | AFS_FTYPE_DIR = 2, | ||
33 | AFS_FTYPE_SYMLINK = 3, | ||
34 | } afs_file_type_t; | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | struct afs_cell; | ||
39 | struct afs_vnode; | ||
40 | |||
41 | /*****************************************************************************/ | ||
42 | /* | ||
43 | * AFS file identifier | ||
44 | */ | ||
45 | struct afs_fid | ||
46 | { | ||
47 | afs_volid_t vid; /* volume ID */ | ||
48 | afs_vnodeid_t vnode; /* file index within volume */ | ||
49 | unsigned unique; /* unique ID number (file index version) */ | ||
50 | }; | ||
51 | |||
52 | /*****************************************************************************/ | ||
53 | /* | ||
54 | * AFS callback notification | ||
55 | */ | ||
56 | typedef enum { | ||
57 | AFSCM_CB_UNTYPED = 0, /* no type set on CB break */ | ||
58 | AFSCM_CB_EXCLUSIVE = 1, /* CB exclusive to CM [not implemented] */ | ||
59 | AFSCM_CB_SHARED = 2, /* CB shared by other CM's */ | ||
60 | AFSCM_CB_DROPPED = 3, /* CB promise cancelled by file server */ | ||
61 | } afs_callback_type_t; | ||
62 | |||
63 | struct afs_callback | ||
64 | { | ||
65 | struct afs_server *server; /* server that made the promise */ | ||
66 | struct afs_fid fid; /* file identifier */ | ||
67 | unsigned version; /* callback version */ | ||
68 | unsigned expiry; /* time at which expires */ | ||
69 | afs_callback_type_t type; /* type of callback */ | ||
70 | }; | ||
71 | |||
72 | #define AFSCBMAX 50 | ||
73 | |||
74 | /*****************************************************************************/ | ||
75 | /* | ||
76 | * AFS volume information | ||
77 | */ | ||
78 | struct afs_volume_info | ||
79 | { | ||
80 | afs_volid_t vid; /* volume ID */ | ||
81 | afs_voltype_t type; /* type of this volume */ | ||
82 | afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */ | ||
83 | |||
84 | /* list of fileservers serving this volume */ | ||
85 | size_t nservers; /* number of entries used in servers[] */ | ||
86 | struct { | ||
87 | struct in_addr addr; /* fileserver address */ | ||
88 | } servers[8]; | ||
89 | }; | ||
90 | |||
91 | /*****************************************************************************/ | ||
92 | /* | ||
93 | * AFS file status information | ||
94 | */ | ||
95 | struct afs_file_status | ||
96 | { | ||
97 | unsigned if_version; /* interface version */ | ||
98 | #define AFS_FSTATUS_VERSION 1 | ||
99 | |||
100 | afs_file_type_t type; /* file type */ | ||
101 | unsigned nlink; /* link count */ | ||
102 | size_t size; /* file size */ | ||
103 | afs_dataversion_t version; /* current data version */ | ||
104 | unsigned author; /* author ID */ | ||
105 | unsigned owner; /* owner ID */ | ||
106 | unsigned caller_access; /* access rights for authenticated caller */ | ||
107 | unsigned anon_access; /* access rights for unauthenticated caller */ | ||
108 | umode_t mode; /* UNIX mode */ | ||
109 | struct afs_fid parent; /* parent file ID */ | ||
110 | time_t mtime_client; /* last time client changed data */ | ||
111 | time_t mtime_server; /* last time server changed data */ | ||
112 | }; | ||
113 | |||
114 | /*****************************************************************************/ | ||
115 | /* | ||
116 | * AFS volume synchronisation information | ||
117 | */ | ||
118 | struct afs_volsync | ||
119 | { | ||
120 | time_t creation; /* volume creation time */ | ||
121 | }; | ||
122 | |||
123 | #endif /* __KERNEL__ */ | ||
124 | |||
125 | #endif /* _LINUX_AFS_TYPES_H */ | ||
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c new file mode 100644 index 000000000000..7b0e3192ee39 --- /dev/null +++ b/fs/afs/vlclient.c | |||
@@ -0,0 +1,695 @@ | |||
1 | /* vlclient.c: AFS Volume Location Service client | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <rxrpc/rxrpc.h> | ||
15 | #include <rxrpc/transport.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include "server.h" | ||
19 | #include "volume.h" | ||
20 | #include "vlclient.h" | ||
21 | #include "kafsasyncd.h" | ||
22 | #include "kafstimod.h" | ||
23 | #include "errors.h" | ||
24 | #include "internal.h" | ||
25 | |||
26 | #define VLGETENTRYBYID 503 /* AFS Get Cache Entry By ID operation ID */ | ||
27 | #define VLGETENTRYBYNAME 504 /* AFS Get Cache Entry By Name operation ID */ | ||
28 | #define VLPROBE 514 /* AFS Probe Volume Location Service operation ID */ | ||
29 | |||
30 | static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call); | ||
31 | static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call); | ||
32 | |||
33 | /*****************************************************************************/ | ||
34 | /* | ||
35 | * map afs VL abort codes to/from Linux error codes | ||
36 | * - called with call->lock held | ||
37 | */ | ||
38 | static void afs_rxvl_aemap(struct rxrpc_call *call) | ||
39 | { | ||
40 | int err; | ||
41 | |||
42 | _enter("{%u,%u,%d}", | ||
43 | call->app_err_state, call->app_abort_code, call->app_errno); | ||
44 | |||
45 | switch (call->app_err_state) { | ||
46 | case RXRPC_ESTATE_LOCAL_ABORT: | ||
47 | call->app_abort_code = -call->app_errno; | ||
48 | return; | ||
49 | |||
50 | case RXRPC_ESTATE_PEER_ABORT: | ||
51 | switch (call->app_abort_code) { | ||
52 | case AFSVL_IDEXIST: err = -EEXIST; break; | ||
53 | case AFSVL_IO: err = -EREMOTEIO; break; | ||
54 | case AFSVL_NAMEEXIST: err = -EEXIST; break; | ||
55 | case AFSVL_CREATEFAIL: err = -EREMOTEIO; break; | ||
56 | case AFSVL_NOENT: err = -ENOMEDIUM; break; | ||
57 | case AFSVL_EMPTY: err = -ENOMEDIUM; break; | ||
58 | case AFSVL_ENTDELETED: err = -ENOMEDIUM; break; | ||
59 | case AFSVL_BADNAME: err = -EINVAL; break; | ||
60 | case AFSVL_BADINDEX: err = -EINVAL; break; | ||
61 | case AFSVL_BADVOLTYPE: err = -EINVAL; break; | ||
62 | case AFSVL_BADSERVER: err = -EINVAL; break; | ||
63 | case AFSVL_BADPARTITION: err = -EINVAL; break; | ||
64 | case AFSVL_REPSFULL: err = -EFBIG; break; | ||
65 | case AFSVL_NOREPSERVER: err = -ENOENT; break; | ||
66 | case AFSVL_DUPREPSERVER: err = -EEXIST; break; | ||
67 | case AFSVL_RWNOTFOUND: err = -ENOENT; break; | ||
68 | case AFSVL_BADREFCOUNT: err = -EINVAL; break; | ||
69 | case AFSVL_SIZEEXCEEDED: err = -EINVAL; break; | ||
70 | case AFSVL_BADENTRY: err = -EINVAL; break; | ||
71 | case AFSVL_BADVOLIDBUMP: err = -EINVAL; break; | ||
72 | case AFSVL_IDALREADYHASHED: err = -EINVAL; break; | ||
73 | case AFSVL_ENTRYLOCKED: err = -EBUSY; break; | ||
74 | case AFSVL_BADVOLOPER: err = -EBADRQC; break; | ||
75 | case AFSVL_BADRELLOCKTYPE: err = -EINVAL; break; | ||
76 | case AFSVL_RERELEASE: err = -EREMOTEIO; break; | ||
77 | case AFSVL_BADSERVERFLAG: err = -EINVAL; break; | ||
78 | case AFSVL_PERM: err = -EACCES; break; | ||
79 | case AFSVL_NOMEM: err = -EREMOTEIO; break; | ||
80 | default: | ||
81 | err = afs_abort_to_error(call->app_abort_code); | ||
82 | break; | ||
83 | } | ||
84 | call->app_errno = err; | ||
85 | return; | ||
86 | |||
87 | default: | ||
88 | return; | ||
89 | } | ||
90 | } /* end afs_rxvl_aemap() */ | ||
91 | |||
92 | #if 0 | ||
93 | /*****************************************************************************/ | ||
94 | /* | ||
95 | * probe a volume location server to see if it is still alive -- unused | ||
96 | */ | ||
97 | static int afs_rxvl_probe(struct afs_server *server, int alloc_flags) | ||
98 | { | ||
99 | struct rxrpc_connection *conn; | ||
100 | struct rxrpc_call *call; | ||
101 | struct kvec piov[1]; | ||
102 | size_t sent; | ||
103 | int ret; | ||
104 | __be32 param[1]; | ||
105 | |||
106 | DECLARE_WAITQUEUE(myself, current); | ||
107 | |||
108 | /* get hold of the vlserver connection */ | ||
109 | ret = afs_server_get_vlconn(server, &conn); | ||
110 | if (ret < 0) | ||
111 | goto out; | ||
112 | |||
113 | /* create a call through that connection */ | ||
114 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
115 | if (ret < 0) { | ||
116 | printk("kAFS: Unable to create call: %d\n", ret); | ||
117 | goto out_put_conn; | ||
118 | } | ||
119 | call->app_opcode = VLPROBE; | ||
120 | |||
121 | /* we want to get event notifications from the call */ | ||
122 | add_wait_queue(&call->waitq, &myself); | ||
123 | |||
124 | /* marshall the parameters */ | ||
125 | param[0] = htonl(VLPROBE); | ||
126 | piov[0].iov_len = sizeof(param); | ||
127 | piov[0].iov_base = param; | ||
128 | |||
129 | /* send the parameters to the server */ | ||
130 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, | ||
131 | alloc_flags, 0, &sent); | ||
132 | if (ret < 0) | ||
133 | goto abort; | ||
134 | |||
135 | /* wait for the reply to completely arrive */ | ||
136 | for (;;) { | ||
137 | set_current_state(TASK_INTERRUPTIBLE); | ||
138 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
139 | signal_pending(current)) | ||
140 | break; | ||
141 | schedule(); | ||
142 | } | ||
143 | set_current_state(TASK_RUNNING); | ||
144 | |||
145 | ret = -EINTR; | ||
146 | if (signal_pending(current)) | ||
147 | goto abort; | ||
148 | |||
149 | switch (call->app_call_state) { | ||
150 | case RXRPC_CSTATE_ERROR: | ||
151 | ret = call->app_errno; | ||
152 | goto out_unwait; | ||
153 | |||
154 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
155 | ret = 0; | ||
156 | goto out_unwait; | ||
157 | |||
158 | default: | ||
159 | BUG(); | ||
160 | } | ||
161 | |||
162 | abort: | ||
163 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
164 | rxrpc_call_abort(call, ret); | ||
165 | schedule(); | ||
166 | |||
167 | out_unwait: | ||
168 | set_current_state(TASK_RUNNING); | ||
169 | remove_wait_queue(&call->waitq, &myself); | ||
170 | rxrpc_put_call(call); | ||
171 | out_put_conn: | ||
172 | rxrpc_put_connection(conn); | ||
173 | out: | ||
174 | return ret; | ||
175 | |||
176 | } /* end afs_rxvl_probe() */ | ||
177 | #endif | ||
178 | |||
179 | /*****************************************************************************/ | ||
180 | /* | ||
181 | * look up a volume location database entry by name | ||
182 | */ | ||
183 | int afs_rxvl_get_entry_by_name(struct afs_server *server, | ||
184 | const char *volname, | ||
185 | unsigned volnamesz, | ||
186 | struct afs_cache_vlocation *entry) | ||
187 | { | ||
188 | DECLARE_WAITQUEUE(myself, current); | ||
189 | |||
190 | struct rxrpc_connection *conn; | ||
191 | struct rxrpc_call *call; | ||
192 | struct kvec piov[3]; | ||
193 | unsigned tmp; | ||
194 | size_t sent; | ||
195 | int ret, loop; | ||
196 | __be32 *bp, param[2], zero; | ||
197 | |||
198 | _enter(",%*.*s,%u,", volnamesz, volnamesz, volname, volnamesz); | ||
199 | |||
200 | memset(entry, 0, sizeof(*entry)); | ||
201 | |||
202 | /* get hold of the vlserver connection */ | ||
203 | ret = afs_server_get_vlconn(server, &conn); | ||
204 | if (ret < 0) | ||
205 | goto out; | ||
206 | |||
207 | /* create a call through that connection */ | ||
208 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
209 | if (ret < 0) { | ||
210 | printk("kAFS: Unable to create call: %d\n", ret); | ||
211 | goto out_put_conn; | ||
212 | } | ||
213 | call->app_opcode = VLGETENTRYBYNAME; | ||
214 | |||
215 | /* we want to get event notifications from the call */ | ||
216 | add_wait_queue(&call->waitq, &myself); | ||
217 | |||
218 | /* marshall the parameters */ | ||
219 | piov[1].iov_len = volnamesz; | ||
220 | piov[1].iov_base = (char *) volname; | ||
221 | |||
222 | zero = 0; | ||
223 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | ||
224 | piov[2].iov_base = &zero; | ||
225 | |||
226 | param[0] = htonl(VLGETENTRYBYNAME); | ||
227 | param[1] = htonl(piov[1].iov_len); | ||
228 | |||
229 | piov[0].iov_len = sizeof(param); | ||
230 | piov[0].iov_base = param; | ||
231 | |||
232 | /* send the parameters to the server */ | ||
233 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
234 | 0, &sent); | ||
235 | if (ret < 0) | ||
236 | goto abort; | ||
237 | |||
238 | /* wait for the reply to completely arrive */ | ||
239 | bp = rxrpc_call_alloc_scratch(call, 384); | ||
240 | |||
241 | ret = rxrpc_call_read_data(call, bp, 384, | ||
242 | RXRPC_CALL_READ_BLOCK | | ||
243 | RXRPC_CALL_READ_ALL); | ||
244 | if (ret < 0) { | ||
245 | if (ret == -ECONNABORTED) { | ||
246 | ret = call->app_errno; | ||
247 | goto out_unwait; | ||
248 | } | ||
249 | goto abort; | ||
250 | } | ||
251 | |||
252 | /* unmarshall the reply */ | ||
253 | for (loop = 0; loop < 64; loop++) | ||
254 | entry->name[loop] = ntohl(*bp++); | ||
255 | bp++; /* final NUL */ | ||
256 | |||
257 | bp++; /* type */ | ||
258 | entry->nservers = ntohl(*bp++); | ||
259 | |||
260 | for (loop = 0; loop < 8; loop++) | ||
261 | entry->servers[loop].s_addr = *bp++; | ||
262 | |||
263 | bp += 8; /* partition IDs */ | ||
264 | |||
265 | for (loop = 0; loop < 8; loop++) { | ||
266 | tmp = ntohl(*bp++); | ||
267 | if (tmp & AFS_VLSF_RWVOL) | ||
268 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | ||
269 | if (tmp & AFS_VLSF_ROVOL) | ||
270 | entry->srvtmask[loop] |= AFS_VOL_VTM_RO; | ||
271 | if (tmp & AFS_VLSF_BACKVOL) | ||
272 | entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; | ||
273 | } | ||
274 | |||
275 | entry->vid[0] = ntohl(*bp++); | ||
276 | entry->vid[1] = ntohl(*bp++); | ||
277 | entry->vid[2] = ntohl(*bp++); | ||
278 | |||
279 | bp++; /* clone ID */ | ||
280 | |||
281 | tmp = ntohl(*bp++); /* flags */ | ||
282 | if (tmp & AFS_VLF_RWEXISTS) | ||
283 | entry->vidmask |= AFS_VOL_VTM_RW; | ||
284 | if (tmp & AFS_VLF_ROEXISTS) | ||
285 | entry->vidmask |= AFS_VOL_VTM_RO; | ||
286 | if (tmp & AFS_VLF_BACKEXISTS) | ||
287 | entry->vidmask |= AFS_VOL_VTM_BAK; | ||
288 | |||
289 | ret = -ENOMEDIUM; | ||
290 | if (!entry->vidmask) | ||
291 | goto abort; | ||
292 | |||
293 | /* success */ | ||
294 | entry->rtime = get_seconds(); | ||
295 | ret = 0; | ||
296 | |||
297 | out_unwait: | ||
298 | set_current_state(TASK_RUNNING); | ||
299 | remove_wait_queue(&call->waitq, &myself); | ||
300 | rxrpc_put_call(call); | ||
301 | out_put_conn: | ||
302 | rxrpc_put_connection(conn); | ||
303 | out: | ||
304 | _leave(" = %d", ret); | ||
305 | return ret; | ||
306 | |||
307 | abort: | ||
308 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
309 | rxrpc_call_abort(call, ret); | ||
310 | schedule(); | ||
311 | goto out_unwait; | ||
312 | } /* end afs_rxvl_get_entry_by_name() */ | ||
313 | |||
314 | /*****************************************************************************/ | ||
315 | /* | ||
316 | * look up a volume location database entry by ID | ||
317 | */ | ||
318 | int afs_rxvl_get_entry_by_id(struct afs_server *server, | ||
319 | afs_volid_t volid, | ||
320 | afs_voltype_t voltype, | ||
321 | struct afs_cache_vlocation *entry) | ||
322 | { | ||
323 | DECLARE_WAITQUEUE(myself, current); | ||
324 | |||
325 | struct rxrpc_connection *conn; | ||
326 | struct rxrpc_call *call; | ||
327 | struct kvec piov[1]; | ||
328 | unsigned tmp; | ||
329 | size_t sent; | ||
330 | int ret, loop; | ||
331 | __be32 *bp, param[3]; | ||
332 | |||
333 | _enter(",%x,%d,", volid, voltype); | ||
334 | |||
335 | memset(entry, 0, sizeof(*entry)); | ||
336 | |||
337 | /* get hold of the vlserver connection */ | ||
338 | ret = afs_server_get_vlconn(server, &conn); | ||
339 | if (ret < 0) | ||
340 | goto out; | ||
341 | |||
342 | /* create a call through that connection */ | ||
343 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
344 | if (ret < 0) { | ||
345 | printk("kAFS: Unable to create call: %d\n", ret); | ||
346 | goto out_put_conn; | ||
347 | } | ||
348 | call->app_opcode = VLGETENTRYBYID; | ||
349 | |||
350 | /* we want to get event notifications from the call */ | ||
351 | add_wait_queue(&call->waitq, &myself); | ||
352 | |||
353 | /* marshall the parameters */ | ||
354 | param[0] = htonl(VLGETENTRYBYID); | ||
355 | param[1] = htonl(volid); | ||
356 | param[2] = htonl(voltype); | ||
357 | |||
358 | piov[0].iov_len = sizeof(param); | ||
359 | piov[0].iov_base = param; | ||
360 | |||
361 | /* send the parameters to the server */ | ||
362 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
363 | 0, &sent); | ||
364 | if (ret < 0) | ||
365 | goto abort; | ||
366 | |||
367 | /* wait for the reply to completely arrive */ | ||
368 | bp = rxrpc_call_alloc_scratch(call, 384); | ||
369 | |||
370 | ret = rxrpc_call_read_data(call, bp, 384, | ||
371 | RXRPC_CALL_READ_BLOCK | | ||
372 | RXRPC_CALL_READ_ALL); | ||
373 | if (ret < 0) { | ||
374 | if (ret == -ECONNABORTED) { | ||
375 | ret = call->app_errno; | ||
376 | goto out_unwait; | ||
377 | } | ||
378 | goto abort; | ||
379 | } | ||
380 | |||
381 | /* unmarshall the reply */ | ||
382 | for (loop = 0; loop < 64; loop++) | ||
383 | entry->name[loop] = ntohl(*bp++); | ||
384 | bp++; /* final NUL */ | ||
385 | |||
386 | bp++; /* type */ | ||
387 | entry->nservers = ntohl(*bp++); | ||
388 | |||
389 | for (loop = 0; loop < 8; loop++) | ||
390 | entry->servers[loop].s_addr = *bp++; | ||
391 | |||
392 | bp += 8; /* partition IDs */ | ||
393 | |||
394 | for (loop = 0; loop < 8; loop++) { | ||
395 | tmp = ntohl(*bp++); | ||
396 | if (tmp & AFS_VLSF_RWVOL) | ||
397 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | ||
398 | if (tmp & AFS_VLSF_ROVOL) | ||
399 | entry->srvtmask[loop] |= AFS_VOL_VTM_RO; | ||
400 | if (tmp & AFS_VLSF_BACKVOL) | ||
401 | entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; | ||
402 | } | ||
403 | |||
404 | entry->vid[0] = ntohl(*bp++); | ||
405 | entry->vid[1] = ntohl(*bp++); | ||
406 | entry->vid[2] = ntohl(*bp++); | ||
407 | |||
408 | bp++; /* clone ID */ | ||
409 | |||
410 | tmp = ntohl(*bp++); /* flags */ | ||
411 | if (tmp & AFS_VLF_RWEXISTS) | ||
412 | entry->vidmask |= AFS_VOL_VTM_RW; | ||
413 | if (tmp & AFS_VLF_ROEXISTS) | ||
414 | entry->vidmask |= AFS_VOL_VTM_RO; | ||
415 | if (tmp & AFS_VLF_BACKEXISTS) | ||
416 | entry->vidmask |= AFS_VOL_VTM_BAK; | ||
417 | |||
418 | ret = -ENOMEDIUM; | ||
419 | if (!entry->vidmask) | ||
420 | goto abort; | ||
421 | |||
422 | #if 0 /* TODO: remove */ | ||
423 | entry->nservers = 3; | ||
424 | entry->servers[0].s_addr = htonl(0xac101249); | ||
425 | entry->servers[1].s_addr = htonl(0xac101243); | ||
426 | entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/); | ||
427 | |||
428 | entry->srvtmask[0] = AFS_VOL_VTM_RO; | ||
429 | entry->srvtmask[1] = AFS_VOL_VTM_RO; | ||
430 | entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW; | ||
431 | #endif | ||
432 | |||
433 | /* success */ | ||
434 | entry->rtime = get_seconds(); | ||
435 | ret = 0; | ||
436 | |||
437 | out_unwait: | ||
438 | set_current_state(TASK_RUNNING); | ||
439 | remove_wait_queue(&call->waitq, &myself); | ||
440 | rxrpc_put_call(call); | ||
441 | out_put_conn: | ||
442 | rxrpc_put_connection(conn); | ||
443 | out: | ||
444 | _leave(" = %d", ret); | ||
445 | return ret; | ||
446 | |||
447 | abort: | ||
448 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
449 | rxrpc_call_abort(call, ret); | ||
450 | schedule(); | ||
451 | goto out_unwait; | ||
452 | } /* end afs_rxvl_get_entry_by_id() */ | ||
453 | |||
454 | /*****************************************************************************/ | ||
455 | /* | ||
456 | * look up a volume location database entry by ID asynchronously | ||
457 | */ | ||
458 | int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op, | ||
459 | afs_volid_t volid, | ||
460 | afs_voltype_t voltype) | ||
461 | { | ||
462 | struct rxrpc_connection *conn; | ||
463 | struct rxrpc_call *call; | ||
464 | struct kvec piov[1]; | ||
465 | size_t sent; | ||
466 | int ret; | ||
467 | __be32 param[3]; | ||
468 | |||
469 | _enter(",%x,%d,", volid, voltype); | ||
470 | |||
471 | /* get hold of the vlserver connection */ | ||
472 | ret = afs_server_get_vlconn(op->server, &conn); | ||
473 | if (ret < 0) { | ||
474 | _leave(" = %d", ret); | ||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | /* create a call through that connection */ | ||
479 | ret = rxrpc_create_call(conn, | ||
480 | afs_rxvl_get_entry_by_id_attn, | ||
481 | afs_rxvl_get_entry_by_id_error, | ||
482 | afs_rxvl_aemap, | ||
483 | &op->call); | ||
484 | rxrpc_put_connection(conn); | ||
485 | |||
486 | if (ret < 0) { | ||
487 | printk("kAFS: Unable to create call: %d\n", ret); | ||
488 | _leave(" = %d", ret); | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | op->call->app_opcode = VLGETENTRYBYID; | ||
493 | op->call->app_user = op; | ||
494 | |||
495 | call = op->call; | ||
496 | rxrpc_get_call(call); | ||
497 | |||
498 | /* send event notifications from the call to kafsasyncd */ | ||
499 | afs_kafsasyncd_begin_op(op); | ||
500 | |||
501 | /* marshall the parameters */ | ||
502 | param[0] = htonl(VLGETENTRYBYID); | ||
503 | param[1] = htonl(volid); | ||
504 | param[2] = htonl(voltype); | ||
505 | |||
506 | piov[0].iov_len = sizeof(param); | ||
507 | piov[0].iov_base = param; | ||
508 | |||
509 | /* allocate result read buffer in scratch space */ | ||
510 | call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call, 384); | ||
511 | |||
512 | /* send the parameters to the server */ | ||
513 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
514 | 0, &sent); | ||
515 | if (ret < 0) { | ||
516 | rxrpc_call_abort(call, ret); /* handle from kafsasyncd */ | ||
517 | ret = 0; | ||
518 | goto out; | ||
519 | } | ||
520 | |||
521 | /* wait for the reply to completely arrive */ | ||
522 | ret = rxrpc_call_read_data(call, call->app_scr_ptr, 384, 0); | ||
523 | switch (ret) { | ||
524 | case 0: | ||
525 | case -EAGAIN: | ||
526 | case -ECONNABORTED: | ||
527 | ret = 0; | ||
528 | break; /* all handled by kafsasyncd */ | ||
529 | |||
530 | default: | ||
531 | rxrpc_call_abort(call, ret); /* make kafsasyncd handle it */ | ||
532 | ret = 0; | ||
533 | break; | ||
534 | } | ||
535 | |||
536 | out: | ||
537 | rxrpc_put_call(call); | ||
538 | _leave(" = %d", ret); | ||
539 | return ret; | ||
540 | |||
541 | } /* end afs_rxvl_get_entry_by_id_async() */ | ||
542 | |||
543 | /*****************************************************************************/ | ||
544 | /* | ||
545 | * attend to the asynchronous get VLDB entry by ID | ||
546 | */ | ||
547 | int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op, | ||
548 | struct afs_cache_vlocation *entry) | ||
549 | { | ||
550 | __be32 *bp; | ||
551 | __u32 tmp; | ||
552 | int loop, ret; | ||
553 | |||
554 | _enter("{op=%p cst=%u}", op, op->call->app_call_state); | ||
555 | |||
556 | memset(entry, 0, sizeof(*entry)); | ||
557 | |||
558 | if (op->call->app_call_state == RXRPC_CSTATE_COMPLETE) { | ||
559 | /* operation finished */ | ||
560 | afs_kafsasyncd_terminate_op(op); | ||
561 | |||
562 | bp = op->call->app_scr_ptr; | ||
563 | |||
564 | /* unmarshall the reply */ | ||
565 | for (loop = 0; loop < 64; loop++) | ||
566 | entry->name[loop] = ntohl(*bp++); | ||
567 | bp++; /* final NUL */ | ||
568 | |||
569 | bp++; /* type */ | ||
570 | entry->nservers = ntohl(*bp++); | ||
571 | |||
572 | for (loop = 0; loop < 8; loop++) | ||
573 | entry->servers[loop].s_addr = *bp++; | ||
574 | |||
575 | bp += 8; /* partition IDs */ | ||
576 | |||
577 | for (loop = 0; loop < 8; loop++) { | ||
578 | tmp = ntohl(*bp++); | ||
579 | if (tmp & AFS_VLSF_RWVOL) | ||
580 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | ||
581 | if (tmp & AFS_VLSF_ROVOL) | ||
582 | entry->srvtmask[loop] |= AFS_VOL_VTM_RO; | ||
583 | if (tmp & AFS_VLSF_BACKVOL) | ||
584 | entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; | ||
585 | } | ||
586 | |||
587 | entry->vid[0] = ntohl(*bp++); | ||
588 | entry->vid[1] = ntohl(*bp++); | ||
589 | entry->vid[2] = ntohl(*bp++); | ||
590 | |||
591 | bp++; /* clone ID */ | ||
592 | |||
593 | tmp = ntohl(*bp++); /* flags */ | ||
594 | if (tmp & AFS_VLF_RWEXISTS) | ||
595 | entry->vidmask |= AFS_VOL_VTM_RW; | ||
596 | if (tmp & AFS_VLF_ROEXISTS) | ||
597 | entry->vidmask |= AFS_VOL_VTM_RO; | ||
598 | if (tmp & AFS_VLF_BACKEXISTS) | ||
599 | entry->vidmask |= AFS_VOL_VTM_BAK; | ||
600 | |||
601 | ret = -ENOMEDIUM; | ||
602 | if (!entry->vidmask) { | ||
603 | rxrpc_call_abort(op->call, ret); | ||
604 | goto done; | ||
605 | } | ||
606 | |||
607 | #if 0 /* TODO: remove */ | ||
608 | entry->nservers = 3; | ||
609 | entry->servers[0].s_addr = htonl(0xac101249); | ||
610 | entry->servers[1].s_addr = htonl(0xac101243); | ||
611 | entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/); | ||
612 | |||
613 | entry->srvtmask[0] = AFS_VOL_VTM_RO; | ||
614 | entry->srvtmask[1] = AFS_VOL_VTM_RO; | ||
615 | entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW; | ||
616 | #endif | ||
617 | |||
618 | /* success */ | ||
619 | entry->rtime = get_seconds(); | ||
620 | ret = 0; | ||
621 | goto done; | ||
622 | } | ||
623 | |||
624 | if (op->call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
625 | /* operation error */ | ||
626 | ret = op->call->app_errno; | ||
627 | goto done; | ||
628 | } | ||
629 | |||
630 | _leave(" = -EAGAIN"); | ||
631 | return -EAGAIN; | ||
632 | |||
633 | done: | ||
634 | rxrpc_put_call(op->call); | ||
635 | op->call = NULL; | ||
636 | _leave(" = %d", ret); | ||
637 | return ret; | ||
638 | } /* end afs_rxvl_get_entry_by_id_async2() */ | ||
639 | |||
640 | /*****************************************************************************/ | ||
641 | /* | ||
642 | * handle attention events on an async get-entry-by-ID op | ||
643 | * - called from krxiod | ||
644 | */ | ||
645 | static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call) | ||
646 | { | ||
647 | struct afs_async_op *op = call->app_user; | ||
648 | |||
649 | _enter("{op=%p cst=%u}", op, call->app_call_state); | ||
650 | |||
651 | switch (call->app_call_state) { | ||
652 | case RXRPC_CSTATE_COMPLETE: | ||
653 | afs_kafsasyncd_attend_op(op); | ||
654 | break; | ||
655 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
656 | if (call->app_async_read) | ||
657 | break; | ||
658 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
659 | if (call->app_read_count == 0) | ||
660 | break; | ||
661 | printk("kAFS: Reply bigger than expected" | ||
662 | " {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}", | ||
663 | call->app_call_state, | ||
664 | call->app_async_read, | ||
665 | call->app_mark, | ||
666 | call->app_ready_qty, | ||
667 | call->pkt_rcv_count, | ||
668 | call->app_last_rcv ? " last" : ""); | ||
669 | |||
670 | rxrpc_call_abort(call, -EBADMSG); | ||
671 | break; | ||
672 | default: | ||
673 | BUG(); | ||
674 | } | ||
675 | |||
676 | _leave(""); | ||
677 | |||
678 | } /* end afs_rxvl_get_entry_by_id_attn() */ | ||
679 | |||
680 | /*****************************************************************************/ | ||
681 | /* | ||
682 | * handle error events on an async get-entry-by-ID op | ||
683 | * - called from krxiod | ||
684 | */ | ||
685 | static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call) | ||
686 | { | ||
687 | struct afs_async_op *op = call->app_user; | ||
688 | |||
689 | _enter("{op=%p cst=%u}", op, call->app_call_state); | ||
690 | |||
691 | afs_kafsasyncd_attend_op(op); | ||
692 | |||
693 | _leave(""); | ||
694 | |||
695 | } /* end afs_rxvl_get_entry_by_id_error() */ | ||
diff --git a/fs/afs/vlclient.h b/fs/afs/vlclient.h new file mode 100644 index 000000000000..e3d601179c46 --- /dev/null +++ b/fs/afs/vlclient.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* vlclient.h: Volume Location Service client interface | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_VLCLIENT_H | ||
13 | #define _LINUX_AFS_VLCLIENT_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | enum AFSVL_Errors { | ||
18 | AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */ | ||
19 | AFSVL_IO = 363521, /* I/O related error */ | ||
20 | AFSVL_NAMEEXIST = 363522, /* Volume name entry exists in vl database */ | ||
21 | AFSVL_CREATEFAIL = 363523, /* Internal creation failure */ | ||
22 | AFSVL_NOENT = 363524, /* No such entry */ | ||
23 | AFSVL_EMPTY = 363525, /* Vl database is empty */ | ||
24 | AFSVL_ENTDELETED = 363526, /* Entry is deleted (soft delete) */ | ||
25 | AFSVL_BADNAME = 363527, /* Volume name is illegal */ | ||
26 | AFSVL_BADINDEX = 363528, /* Index is out of range */ | ||
27 | AFSVL_BADVOLTYPE = 363529, /* Bad volume type */ | ||
28 | AFSVL_BADSERVER = 363530, /* Illegal server number (out of range) */ | ||
29 | AFSVL_BADPARTITION = 363531, /* Bad partition number */ | ||
30 | AFSVL_REPSFULL = 363532, /* Run out of space for Replication sites */ | ||
31 | AFSVL_NOREPSERVER = 363533, /* No such Replication server site exists */ | ||
32 | AFSVL_DUPREPSERVER = 363534, /* Replication site already exists */ | ||
33 | AFSVL_RWNOTFOUND = 363535, /* Parent R/W entry not found */ | ||
34 | AFSVL_BADREFCOUNT = 363536, /* Illegal Reference Count number */ | ||
35 | AFSVL_SIZEEXCEEDED = 363537, /* Vl size for attributes exceeded */ | ||
36 | AFSVL_BADENTRY = 363538, /* Bad incoming vl entry */ | ||
37 | AFSVL_BADVOLIDBUMP = 363539, /* Illegal max volid increment */ | ||
38 | AFSVL_IDALREADYHASHED = 363540, /* RO/BACK id already hashed */ | ||
39 | AFSVL_ENTRYLOCKED = 363541, /* Vl entry is already locked */ | ||
40 | AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ | ||
41 | AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ | ||
42 | AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ | ||
43 | AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ | ||
44 | AFSVL_PERM = 363546, /* No permission access */ | ||
45 | AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ | ||
46 | }; | ||
47 | |||
48 | /* maps to "struct vldbentry" in vvl-spec.pdf */ | ||
49 | struct afs_vldbentry { | ||
50 | char name[65]; /* name of volume (including NUL char) */ | ||
51 | afs_voltype_t type; /* volume type */ | ||
52 | unsigned num_servers; /* num servers that hold instances of this vol */ | ||
53 | unsigned clone_id; /* cloning ID */ | ||
54 | |||
55 | unsigned flags; | ||
56 | #define AFS_VLF_RWEXISTS 0x1000 /* R/W volume exists */ | ||
57 | #define AFS_VLF_ROEXISTS 0x2000 /* R/O volume exists */ | ||
58 | #define AFS_VLF_BACKEXISTS 0x4000 /* backup volume exists */ | ||
59 | |||
60 | afs_volid_t volume_ids[3]; /* volume IDs */ | ||
61 | |||
62 | struct { | ||
63 | struct in_addr addr; /* server address */ | ||
64 | unsigned partition; /* partition ID on this server */ | ||
65 | unsigned flags; /* server specific flags */ | ||
66 | #define AFS_VLSF_NEWREPSITE 0x0001 /* unused */ | ||
67 | #define AFS_VLSF_ROVOL 0x0002 /* this server holds a R/O instance of the volume */ | ||
68 | #define AFS_VLSF_RWVOL 0x0004 /* this server holds a R/W instance of the volume */ | ||
69 | #define AFS_VLSF_BACKVOL 0x0008 /* this server holds a backup instance of the volume */ | ||
70 | } servers[8]; | ||
71 | |||
72 | }; | ||
73 | |||
74 | /* look up a volume location database entry by name */ | ||
75 | extern int afs_rxvl_get_entry_by_name(struct afs_server *server, | ||
76 | const char *volname, | ||
77 | unsigned volnamesz, | ||
78 | struct afs_cache_vlocation *entry); | ||
79 | |||
80 | /* look up a volume location database entry by ID */ | ||
81 | extern int afs_rxvl_get_entry_by_id(struct afs_server *server, | ||
82 | afs_volid_t volid, | ||
83 | afs_voltype_t voltype, | ||
84 | struct afs_cache_vlocation *entry); | ||
85 | |||
86 | extern int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op, | ||
87 | afs_volid_t volid, | ||
88 | afs_voltype_t voltype); | ||
89 | |||
90 | extern int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op, | ||
91 | struct afs_cache_vlocation *entry); | ||
92 | |||
93 | #endif /* _LINUX_AFS_VLCLIENT_H */ | ||
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c new file mode 100644 index 000000000000..eced20618ecc --- /dev/null +++ b/fs/afs/vlocation.c | |||
@@ -0,0 +1,954 @@ | |||
1 | /* vlocation.c: volume location management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include "volume.h" | ||
19 | #include "cell.h" | ||
20 | #include "cmservice.h" | ||
21 | #include "fsclient.h" | ||
22 | #include "vlclient.h" | ||
23 | #include "kafstimod.h" | ||
24 | #include <rxrpc/connection.h> | ||
25 | #include "internal.h" | ||
26 | |||
27 | #define AFS_VLDB_TIMEOUT HZ*1000 | ||
28 | |||
29 | static void afs_vlocation_update_timer(struct afs_timer *timer); | ||
30 | static void afs_vlocation_update_attend(struct afs_async_op *op); | ||
31 | static void afs_vlocation_update_discard(struct afs_async_op *op); | ||
32 | static void __afs_put_vlocation(struct afs_vlocation *vlocation); | ||
33 | |||
34 | static void __afs_vlocation_timeout(struct afs_timer *timer) | ||
35 | { | ||
36 | struct afs_vlocation *vlocation = | ||
37 | list_entry(timer, struct afs_vlocation, timeout); | ||
38 | |||
39 | _debug("VL TIMEOUT [%s{u=%d}]", | ||
40 | vlocation->vldb.name, atomic_read(&vlocation->usage)); | ||
41 | |||
42 | afs_vlocation_do_timeout(vlocation); | ||
43 | } | ||
44 | |||
45 | static const struct afs_timer_ops afs_vlocation_timer_ops = { | ||
46 | .timed_out = __afs_vlocation_timeout, | ||
47 | }; | ||
48 | |||
49 | static const struct afs_timer_ops afs_vlocation_update_timer_ops = { | ||
50 | .timed_out = afs_vlocation_update_timer, | ||
51 | }; | ||
52 | |||
53 | static const struct afs_async_op_ops afs_vlocation_update_op_ops = { | ||
54 | .attend = afs_vlocation_update_attend, | ||
55 | .discard = afs_vlocation_update_discard, | ||
56 | }; | ||
57 | |||
58 | static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */ | ||
59 | static struct afs_vlocation *afs_vlocation_update; /* VL currently being updated */ | ||
60 | static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */ | ||
61 | |||
62 | #ifdef AFS_CACHING_SUPPORT | ||
63 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
64 | const void *entry); | ||
65 | static void afs_vlocation_cache_update(void *source, void *entry); | ||
66 | |||
67 | struct cachefs_index_def afs_vlocation_cache_index_def = { | ||
68 | .name = "vldb", | ||
69 | .data_size = sizeof(struct afs_cache_vlocation), | ||
70 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
71 | .match = afs_vlocation_cache_match, | ||
72 | .update = afs_vlocation_cache_update, | ||
73 | }; | ||
74 | #endif | ||
75 | |||
76 | /*****************************************************************************/ | ||
77 | /* | ||
78 | * iterate through the VL servers in a cell until one of them admits knowing | ||
79 | * about the volume in question | ||
80 | * - caller must have cell->vl_sem write-locked | ||
81 | */ | ||
82 | static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation, | ||
83 | const char *name, | ||
84 | unsigned namesz, | ||
85 | struct afs_cache_vlocation *vldb) | ||
86 | { | ||
87 | struct afs_server *server = NULL; | ||
88 | struct afs_cell *cell = vlocation->cell; | ||
89 | int count, ret; | ||
90 | |||
91 | _enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz); | ||
92 | |||
93 | ret = -ENOMEDIUM; | ||
94 | for (count = cell->vl_naddrs; count > 0; count--) { | ||
95 | _debug("CellServ[%hu]: %08x", | ||
96 | cell->vl_curr_svix, | ||
97 | cell->vl_addrs[cell->vl_curr_svix].s_addr); | ||
98 | |||
99 | /* try and create a server */ | ||
100 | ret = afs_server_lookup(cell, | ||
101 | &cell->vl_addrs[cell->vl_curr_svix], | ||
102 | &server); | ||
103 | switch (ret) { | ||
104 | case 0: | ||
105 | break; | ||
106 | case -ENOMEM: | ||
107 | case -ENONET: | ||
108 | goto out; | ||
109 | default: | ||
110 | goto rotate; | ||
111 | } | ||
112 | |||
113 | /* attempt to access the VL server */ | ||
114 | ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb); | ||
115 | switch (ret) { | ||
116 | case 0: | ||
117 | afs_put_server(server); | ||
118 | goto out; | ||
119 | case -ENOMEM: | ||
120 | case -ENONET: | ||
121 | case -ENETUNREACH: | ||
122 | case -EHOSTUNREACH: | ||
123 | case -ECONNREFUSED: | ||
124 | down_write(&server->sem); | ||
125 | if (server->vlserver) { | ||
126 | rxrpc_put_connection(server->vlserver); | ||
127 | server->vlserver = NULL; | ||
128 | } | ||
129 | up_write(&server->sem); | ||
130 | afs_put_server(server); | ||
131 | if (ret == -ENOMEM || ret == -ENONET) | ||
132 | goto out; | ||
133 | goto rotate; | ||
134 | case -ENOMEDIUM: | ||
135 | afs_put_server(server); | ||
136 | goto out; | ||
137 | default: | ||
138 | afs_put_server(server); | ||
139 | ret = -ENOMEDIUM; | ||
140 | goto rotate; | ||
141 | } | ||
142 | |||
143 | /* rotate the server records upon lookup failure */ | ||
144 | rotate: | ||
145 | cell->vl_curr_svix++; | ||
146 | cell->vl_curr_svix %= cell->vl_naddrs; | ||
147 | } | ||
148 | |||
149 | out: | ||
150 | _leave(" = %d", ret); | ||
151 | return ret; | ||
152 | |||
153 | } /* end afs_vlocation_access_vl_by_name() */ | ||
154 | |||
155 | /*****************************************************************************/ | ||
156 | /* | ||
157 | * iterate through the VL servers in a cell until one of them admits knowing | ||
158 | * about the volume in question | ||
159 | * - caller must have cell->vl_sem write-locked | ||
160 | */ | ||
161 | static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation, | ||
162 | afs_volid_t volid, | ||
163 | afs_voltype_t voltype, | ||
164 | struct afs_cache_vlocation *vldb) | ||
165 | { | ||
166 | struct afs_server *server = NULL; | ||
167 | struct afs_cell *cell = vlocation->cell; | ||
168 | int count, ret; | ||
169 | |||
170 | _enter("%s,%x,%d,", cell->name, volid, voltype); | ||
171 | |||
172 | ret = -ENOMEDIUM; | ||
173 | for (count = cell->vl_naddrs; count > 0; count--) { | ||
174 | _debug("CellServ[%hu]: %08x", | ||
175 | cell->vl_curr_svix, | ||
176 | cell->vl_addrs[cell->vl_curr_svix].s_addr); | ||
177 | |||
178 | /* try and create a server */ | ||
179 | ret = afs_server_lookup(cell, | ||
180 | &cell->vl_addrs[cell->vl_curr_svix], | ||
181 | &server); | ||
182 | switch (ret) { | ||
183 | case 0: | ||
184 | break; | ||
185 | case -ENOMEM: | ||
186 | case -ENONET: | ||
187 | goto out; | ||
188 | default: | ||
189 | goto rotate; | ||
190 | } | ||
191 | |||
192 | /* attempt to access the VL server */ | ||
193 | ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb); | ||
194 | switch (ret) { | ||
195 | case 0: | ||
196 | afs_put_server(server); | ||
197 | goto out; | ||
198 | case -ENOMEM: | ||
199 | case -ENONET: | ||
200 | case -ENETUNREACH: | ||
201 | case -EHOSTUNREACH: | ||
202 | case -ECONNREFUSED: | ||
203 | down_write(&server->sem); | ||
204 | if (server->vlserver) { | ||
205 | rxrpc_put_connection(server->vlserver); | ||
206 | server->vlserver = NULL; | ||
207 | } | ||
208 | up_write(&server->sem); | ||
209 | afs_put_server(server); | ||
210 | if (ret == -ENOMEM || ret == -ENONET) | ||
211 | goto out; | ||
212 | goto rotate; | ||
213 | case -ENOMEDIUM: | ||
214 | afs_put_server(server); | ||
215 | goto out; | ||
216 | default: | ||
217 | afs_put_server(server); | ||
218 | ret = -ENOMEDIUM; | ||
219 | goto rotate; | ||
220 | } | ||
221 | |||
222 | /* rotate the server records upon lookup failure */ | ||
223 | rotate: | ||
224 | cell->vl_curr_svix++; | ||
225 | cell->vl_curr_svix %= cell->vl_naddrs; | ||
226 | } | ||
227 | |||
228 | out: | ||
229 | _leave(" = %d", ret); | ||
230 | return ret; | ||
231 | |||
232 | } /* end afs_vlocation_access_vl_by_id() */ | ||
233 | |||
234 | /*****************************************************************************/ | ||
235 | /* | ||
236 | * lookup volume location | ||
237 | * - caller must have cell->vol_sem write-locked | ||
238 | * - iterate through the VL servers in a cell until one of them admits knowing | ||
239 | * about the volume in question | ||
240 | * - lookup in the local cache if not able to find on the VL server | ||
241 | * - insert/update in the local cache if did get a VL response | ||
242 | */ | ||
243 | int afs_vlocation_lookup(struct afs_cell *cell, | ||
244 | const char *name, | ||
245 | unsigned namesz, | ||
246 | struct afs_vlocation **_vlocation) | ||
247 | { | ||
248 | struct afs_cache_vlocation vldb; | ||
249 | struct afs_vlocation *vlocation; | ||
250 | afs_voltype_t voltype; | ||
251 | afs_volid_t vid; | ||
252 | int active = 0, ret; | ||
253 | |||
254 | _enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz); | ||
255 | |||
256 | if (namesz > sizeof(vlocation->vldb.name)) { | ||
257 | _leave(" = -ENAMETOOLONG"); | ||
258 | return -ENAMETOOLONG; | ||
259 | } | ||
260 | |||
261 | /* search the cell's active list first */ | ||
262 | list_for_each_entry(vlocation, &cell->vl_list, link) { | ||
263 | if (namesz < sizeof(vlocation->vldb.name) && | ||
264 | vlocation->vldb.name[namesz] != '\0') | ||
265 | continue; | ||
266 | |||
267 | if (memcmp(vlocation->vldb.name, name, namesz) == 0) | ||
268 | goto found_in_memory; | ||
269 | } | ||
270 | |||
271 | /* search the cell's graveyard list second */ | ||
272 | spin_lock(&cell->vl_gylock); | ||
273 | list_for_each_entry(vlocation, &cell->vl_graveyard, link) { | ||
274 | if (namesz < sizeof(vlocation->vldb.name) && | ||
275 | vlocation->vldb.name[namesz] != '\0') | ||
276 | continue; | ||
277 | |||
278 | if (memcmp(vlocation->vldb.name, name, namesz) == 0) | ||
279 | goto found_in_graveyard; | ||
280 | } | ||
281 | spin_unlock(&cell->vl_gylock); | ||
282 | |||
283 | /* not in the cell's in-memory lists - create a new record */ | ||
284 | vlocation = kmalloc(sizeof(struct afs_vlocation), GFP_KERNEL); | ||
285 | if (!vlocation) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | memset(vlocation, 0, sizeof(struct afs_vlocation)); | ||
289 | atomic_set(&vlocation->usage, 1); | ||
290 | INIT_LIST_HEAD(&vlocation->link); | ||
291 | rwlock_init(&vlocation->lock); | ||
292 | memcpy(vlocation->vldb.name, name, namesz); | ||
293 | |||
294 | afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops); | ||
295 | afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops); | ||
296 | afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops); | ||
297 | |||
298 | afs_get_cell(cell); | ||
299 | vlocation->cell = cell; | ||
300 | |||
301 | list_add_tail(&vlocation->link, &cell->vl_list); | ||
302 | |||
303 | #ifdef AFS_CACHING_SUPPORT | ||
304 | /* we want to store it in the cache, plus it might already be | ||
305 | * encached */ | ||
306 | cachefs_acquire_cookie(cell->cache, | ||
307 | &afs_volume_cache_index_def, | ||
308 | vlocation, | ||
309 | &vlocation->cache); | ||
310 | |||
311 | if (vlocation->valid) | ||
312 | goto found_in_cache; | ||
313 | #endif | ||
314 | |||
315 | /* try to look up an unknown volume in the cell VL databases by name */ | ||
316 | ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb); | ||
317 | if (ret < 0) { | ||
318 | printk("kAFS: failed to locate '%*.*s' in cell '%s'\n", | ||
319 | namesz, namesz, name, cell->name); | ||
320 | goto error; | ||
321 | } | ||
322 | |||
323 | goto found_on_vlserver; | ||
324 | |||
325 | found_in_graveyard: | ||
326 | /* found in the graveyard - resurrect */ | ||
327 | _debug("found in graveyard"); | ||
328 | atomic_inc(&vlocation->usage); | ||
329 | list_del(&vlocation->link); | ||
330 | list_add_tail(&vlocation->link, &cell->vl_list); | ||
331 | spin_unlock(&cell->vl_gylock); | ||
332 | |||
333 | afs_kafstimod_del_timer(&vlocation->timeout); | ||
334 | goto active; | ||
335 | |||
336 | found_in_memory: | ||
337 | /* found in memory - check to see if it's active */ | ||
338 | _debug("found in memory"); | ||
339 | atomic_inc(&vlocation->usage); | ||
340 | |||
341 | active: | ||
342 | active = 1; | ||
343 | |||
344 | #ifdef AFS_CACHING_SUPPORT | ||
345 | found_in_cache: | ||
346 | #endif | ||
347 | /* try to look up a cached volume in the cell VL databases by ID */ | ||
348 | _debug("found in cache"); | ||
349 | |||
350 | _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", | ||
351 | vlocation->vldb.name, | ||
352 | vlocation->vldb.vidmask, | ||
353 | ntohl(vlocation->vldb.servers[0].s_addr), | ||
354 | vlocation->vldb.srvtmask[0], | ||
355 | ntohl(vlocation->vldb.servers[1].s_addr), | ||
356 | vlocation->vldb.srvtmask[1], | ||
357 | ntohl(vlocation->vldb.servers[2].s_addr), | ||
358 | vlocation->vldb.srvtmask[2] | ||
359 | ); | ||
360 | |||
361 | _debug("Vids: %08x %08x %08x", | ||
362 | vlocation->vldb.vid[0], | ||
363 | vlocation->vldb.vid[1], | ||
364 | vlocation->vldb.vid[2]); | ||
365 | |||
366 | if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { | ||
367 | vid = vlocation->vldb.vid[0]; | ||
368 | voltype = AFSVL_RWVOL; | ||
369 | } | ||
370 | else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) { | ||
371 | vid = vlocation->vldb.vid[1]; | ||
372 | voltype = AFSVL_ROVOL; | ||
373 | } | ||
374 | else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { | ||
375 | vid = vlocation->vldb.vid[2]; | ||
376 | voltype = AFSVL_BACKVOL; | ||
377 | } | ||
378 | else { | ||
379 | BUG(); | ||
380 | vid = 0; | ||
381 | voltype = 0; | ||
382 | } | ||
383 | |||
384 | ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb); | ||
385 | switch (ret) { | ||
386 | /* net error */ | ||
387 | default: | ||
388 | printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n", | ||
389 | namesz, namesz, name, vid, cell->name, ret); | ||
390 | goto error; | ||
391 | |||
392 | /* pulled from local cache into memory */ | ||
393 | case 0: | ||
394 | goto found_on_vlserver; | ||
395 | |||
396 | /* uh oh... looks like the volume got deleted */ | ||
397 | case -ENOMEDIUM: | ||
398 | printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n", | ||
399 | namesz, namesz, name, vid, cell->name); | ||
400 | |||
401 | /* TODO: make existing record unavailable */ | ||
402 | goto error; | ||
403 | } | ||
404 | |||
405 | found_on_vlserver: | ||
406 | _debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }", | ||
407 | namesz, namesz, name, | ||
408 | vldb.vidmask, | ||
409 | ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0], | ||
410 | ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1], | ||
411 | ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2] | ||
412 | ); | ||
413 | |||
414 | _debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]); | ||
415 | |||
416 | if ((namesz < sizeof(vlocation->vldb.name) && | ||
417 | vlocation->vldb.name[namesz] != '\0') || | ||
418 | memcmp(vldb.name, name, namesz) != 0) | ||
419 | printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n", | ||
420 | namesz, namesz, name, vldb.name); | ||
421 | |||
422 | memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb)); | ||
423 | |||
424 | afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ); | ||
425 | |||
426 | #ifdef AFS_CACHING_SUPPORT | ||
427 | /* update volume entry in local cache */ | ||
428 | cachefs_update_cookie(vlocation->cache); | ||
429 | #endif | ||
430 | |||
431 | *_vlocation = vlocation; | ||
432 | _leave(" = 0 (%p)",vlocation); | ||
433 | return 0; | ||
434 | |||
435 | error: | ||
436 | if (vlocation) { | ||
437 | if (active) { | ||
438 | __afs_put_vlocation(vlocation); | ||
439 | } | ||
440 | else { | ||
441 | list_del(&vlocation->link); | ||
442 | #ifdef AFS_CACHING_SUPPORT | ||
443 | cachefs_relinquish_cookie(vlocation->cache, 0); | ||
444 | #endif | ||
445 | afs_put_cell(vlocation->cell); | ||
446 | kfree(vlocation); | ||
447 | } | ||
448 | } | ||
449 | |||
450 | _leave(" = %d", ret); | ||
451 | return ret; | ||
452 | } /* end afs_vlocation_lookup() */ | ||
453 | |||
454 | /*****************************************************************************/ | ||
455 | /* | ||
456 | * finish using a volume location record | ||
457 | * - caller must have cell->vol_sem write-locked | ||
458 | */ | ||
459 | static void __afs_put_vlocation(struct afs_vlocation *vlocation) | ||
460 | { | ||
461 | struct afs_cell *cell; | ||
462 | |||
463 | if (!vlocation) | ||
464 | return; | ||
465 | |||
466 | _enter("%s", vlocation->vldb.name); | ||
467 | |||
468 | cell = vlocation->cell; | ||
469 | |||
470 | /* sanity check */ | ||
471 | BUG_ON(atomic_read(&vlocation->usage) <= 0); | ||
472 | |||
473 | spin_lock(&cell->vl_gylock); | ||
474 | if (likely(!atomic_dec_and_test(&vlocation->usage))) { | ||
475 | spin_unlock(&cell->vl_gylock); | ||
476 | _leave(""); | ||
477 | return; | ||
478 | } | ||
479 | |||
480 | /* move to graveyard queue */ | ||
481 | list_del(&vlocation->link); | ||
482 | list_add_tail(&vlocation->link,&cell->vl_graveyard); | ||
483 | |||
484 | /* remove from pending timeout queue (refcounted if actually being | ||
485 | * updated) */ | ||
486 | list_del_init(&vlocation->upd_op.link); | ||
487 | |||
488 | /* time out in 10 secs */ | ||
489 | afs_kafstimod_del_timer(&vlocation->upd_timer); | ||
490 | afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ); | ||
491 | |||
492 | spin_unlock(&cell->vl_gylock); | ||
493 | |||
494 | _leave(" [killed]"); | ||
495 | } /* end __afs_put_vlocation() */ | ||
496 | |||
497 | /*****************************************************************************/ | ||
498 | /* | ||
499 | * finish using a volume location record | ||
500 | */ | ||
501 | void afs_put_vlocation(struct afs_vlocation *vlocation) | ||
502 | { | ||
503 | if (vlocation) { | ||
504 | struct afs_cell *cell = vlocation->cell; | ||
505 | |||
506 | down_write(&cell->vl_sem); | ||
507 | __afs_put_vlocation(vlocation); | ||
508 | up_write(&cell->vl_sem); | ||
509 | } | ||
510 | } /* end afs_put_vlocation() */ | ||
511 | |||
512 | /*****************************************************************************/ | ||
513 | /* | ||
514 | * timeout vlocation record | ||
515 | * - removes from the cell's graveyard if the usage count is zero | ||
516 | */ | ||
517 | void afs_vlocation_do_timeout(struct afs_vlocation *vlocation) | ||
518 | { | ||
519 | struct afs_cell *cell; | ||
520 | |||
521 | _enter("%s", vlocation->vldb.name); | ||
522 | |||
523 | cell = vlocation->cell; | ||
524 | |||
525 | BUG_ON(atomic_read(&vlocation->usage) < 0); | ||
526 | |||
527 | /* remove from graveyard if still dead */ | ||
528 | spin_lock(&cell->vl_gylock); | ||
529 | if (atomic_read(&vlocation->usage) == 0) | ||
530 | list_del_init(&vlocation->link); | ||
531 | else | ||
532 | vlocation = NULL; | ||
533 | spin_unlock(&cell->vl_gylock); | ||
534 | |||
535 | if (!vlocation) { | ||
536 | _leave(""); | ||
537 | return; /* resurrected */ | ||
538 | } | ||
539 | |||
540 | /* we can now destroy it properly */ | ||
541 | #ifdef AFS_CACHING_SUPPORT | ||
542 | cachefs_relinquish_cookie(vlocation->cache, 0); | ||
543 | #endif | ||
544 | afs_put_cell(cell); | ||
545 | |||
546 | kfree(vlocation); | ||
547 | |||
548 | _leave(" [destroyed]"); | ||
549 | } /* end afs_vlocation_do_timeout() */ | ||
550 | |||
551 | /*****************************************************************************/ | ||
552 | /* | ||
553 | * send an update operation to the currently selected server | ||
554 | */ | ||
555 | static int afs_vlocation_update_begin(struct afs_vlocation *vlocation) | ||
556 | { | ||
557 | afs_voltype_t voltype; | ||
558 | afs_volid_t vid; | ||
559 | int ret; | ||
560 | |||
561 | _enter("%s{ufs=%u ucs=%u}", | ||
562 | vlocation->vldb.name, | ||
563 | vlocation->upd_first_svix, | ||
564 | vlocation->upd_curr_svix); | ||
565 | |||
566 | /* try to look up a cached volume in the cell VL databases by ID */ | ||
567 | if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { | ||
568 | vid = vlocation->vldb.vid[0]; | ||
569 | voltype = AFSVL_RWVOL; | ||
570 | } | ||
571 | else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) { | ||
572 | vid = vlocation->vldb.vid[1]; | ||
573 | voltype = AFSVL_ROVOL; | ||
574 | } | ||
575 | else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { | ||
576 | vid = vlocation->vldb.vid[2]; | ||
577 | voltype = AFSVL_BACKVOL; | ||
578 | } | ||
579 | else { | ||
580 | BUG(); | ||
581 | vid = 0; | ||
582 | voltype = 0; | ||
583 | } | ||
584 | |||
585 | /* contact the chosen server */ | ||
586 | ret = afs_server_lookup( | ||
587 | vlocation->cell, | ||
588 | &vlocation->cell->vl_addrs[vlocation->upd_curr_svix], | ||
589 | &vlocation->upd_op.server); | ||
590 | |||
591 | switch (ret) { | ||
592 | case 0: | ||
593 | break; | ||
594 | case -ENOMEM: | ||
595 | case -ENONET: | ||
596 | default: | ||
597 | _leave(" = %d", ret); | ||
598 | return ret; | ||
599 | } | ||
600 | |||
601 | /* initiate the update operation */ | ||
602 | ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype); | ||
603 | if (ret < 0) { | ||
604 | _leave(" = %d", ret); | ||
605 | return ret; | ||
606 | } | ||
607 | |||
608 | _leave(" = %d", ret); | ||
609 | return ret; | ||
610 | } /* end afs_vlocation_update_begin() */ | ||
611 | |||
612 | /*****************************************************************************/ | ||
613 | /* | ||
614 | * abandon updating a VL record | ||
615 | * - does not restart the update timer | ||
616 | */ | ||
617 | static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation, | ||
618 | afs_vlocation_upd_t state, | ||
619 | int ret) | ||
620 | { | ||
621 | _enter("%s,%u", vlocation->vldb.name, state); | ||
622 | |||
623 | if (ret < 0) | ||
624 | printk("kAFS: Abandoning VL update '%s': %d\n", | ||
625 | vlocation->vldb.name, ret); | ||
626 | |||
627 | /* discard the server record */ | ||
628 | afs_put_server(vlocation->upd_op.server); | ||
629 | vlocation->upd_op.server = NULL; | ||
630 | |||
631 | spin_lock(&afs_vlocation_update_lock); | ||
632 | afs_vlocation_update = NULL; | ||
633 | vlocation->upd_state = state; | ||
634 | |||
635 | /* TODO: start updating next VL record on pending list */ | ||
636 | |||
637 | spin_unlock(&afs_vlocation_update_lock); | ||
638 | |||
639 | _leave(""); | ||
640 | } /* end afs_vlocation_update_abandon() */ | ||
641 | |||
642 | /*****************************************************************************/ | ||
643 | /* | ||
644 | * handle periodic update timeouts and busy retry timeouts | ||
645 | * - called from kafstimod | ||
646 | */ | ||
647 | static void afs_vlocation_update_timer(struct afs_timer *timer) | ||
648 | { | ||
649 | struct afs_vlocation *vlocation = | ||
650 | list_entry(timer, struct afs_vlocation, upd_timer); | ||
651 | int ret; | ||
652 | |||
653 | _enter("%s", vlocation->vldb.name); | ||
654 | |||
655 | /* only update if not in the graveyard (defend against putting too) */ | ||
656 | spin_lock(&vlocation->cell->vl_gylock); | ||
657 | |||
658 | if (!atomic_read(&vlocation->usage)) | ||
659 | goto out_unlock1; | ||
660 | |||
661 | spin_lock(&afs_vlocation_update_lock); | ||
662 | |||
663 | /* if we were woken up due to EBUSY sleep then restart immediately if | ||
664 | * possible or else jump to front of pending queue */ | ||
665 | if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) { | ||
666 | if (afs_vlocation_update) { | ||
667 | list_add(&vlocation->upd_op.link, | ||
668 | &afs_vlocation_update_pendq); | ||
669 | } | ||
670 | else { | ||
671 | afs_get_vlocation(vlocation); | ||
672 | afs_vlocation_update = vlocation; | ||
673 | vlocation->upd_state = AFS_VLUPD_INPROGRESS; | ||
674 | } | ||
675 | goto out_unlock2; | ||
676 | } | ||
677 | |||
678 | /* put on pending queue if there's already another update in progress */ | ||
679 | if (afs_vlocation_update) { | ||
680 | vlocation->upd_state = AFS_VLUPD_PENDING; | ||
681 | list_add_tail(&vlocation->upd_op.link, | ||
682 | &afs_vlocation_update_pendq); | ||
683 | goto out_unlock2; | ||
684 | } | ||
685 | |||
686 | /* hold a ref on it while actually updating */ | ||
687 | afs_get_vlocation(vlocation); | ||
688 | afs_vlocation_update = vlocation; | ||
689 | vlocation->upd_state = AFS_VLUPD_INPROGRESS; | ||
690 | |||
691 | spin_unlock(&afs_vlocation_update_lock); | ||
692 | spin_unlock(&vlocation->cell->vl_gylock); | ||
693 | |||
694 | /* okay... we can start the update */ | ||
695 | _debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name); | ||
696 | vlocation->upd_first_svix = vlocation->cell->vl_curr_svix; | ||
697 | vlocation->upd_curr_svix = vlocation->upd_first_svix; | ||
698 | vlocation->upd_rej_cnt = 0; | ||
699 | vlocation->upd_busy_cnt = 0; | ||
700 | |||
701 | ret = afs_vlocation_update_begin(vlocation); | ||
702 | if (ret < 0) { | ||
703 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret); | ||
704 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
705 | AFS_VLDB_TIMEOUT); | ||
706 | afs_put_vlocation(vlocation); | ||
707 | } | ||
708 | |||
709 | _leave(""); | ||
710 | return; | ||
711 | |||
712 | out_unlock2: | ||
713 | spin_unlock(&afs_vlocation_update_lock); | ||
714 | out_unlock1: | ||
715 | spin_unlock(&vlocation->cell->vl_gylock); | ||
716 | _leave(""); | ||
717 | return; | ||
718 | |||
719 | } /* end afs_vlocation_update_timer() */ | ||
720 | |||
721 | /*****************************************************************************/ | ||
722 | /* | ||
723 | * attend to an update operation upon which an event happened | ||
724 | * - called in kafsasyncd context | ||
725 | */ | ||
726 | static void afs_vlocation_update_attend(struct afs_async_op *op) | ||
727 | { | ||
728 | struct afs_cache_vlocation vldb; | ||
729 | struct afs_vlocation *vlocation = | ||
730 | list_entry(op, struct afs_vlocation, upd_op); | ||
731 | unsigned tmp; | ||
732 | int ret; | ||
733 | |||
734 | _enter("%s", vlocation->vldb.name); | ||
735 | |||
736 | ret = afs_rxvl_get_entry_by_id_async2(op, &vldb); | ||
737 | switch (ret) { | ||
738 | case -EAGAIN: | ||
739 | _leave(" [unfinished]"); | ||
740 | return; | ||
741 | |||
742 | case 0: | ||
743 | _debug("END VL UPDATE: %d\n", ret); | ||
744 | vlocation->valid = 1; | ||
745 | |||
746 | _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }", | ||
747 | vldb.vidmask, | ||
748 | ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0], | ||
749 | ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1], | ||
750 | ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2] | ||
751 | ); | ||
752 | |||
753 | _debug("Vids: %08x %08x %08x", | ||
754 | vldb.vid[0], vldb.vid[1], vldb.vid[2]); | ||
755 | |||
756 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0); | ||
757 | |||
758 | down_write(&vlocation->cell->vl_sem); | ||
759 | |||
760 | /* actually update the cache */ | ||
761 | if (strncmp(vldb.name, vlocation->vldb.name, | ||
762 | sizeof(vlocation->vldb.name)) != 0) | ||
763 | printk("kAFS: name of volume '%s'" | ||
764 | " changed to '%s' on server\n", | ||
765 | vlocation->vldb.name, vldb.name); | ||
766 | |||
767 | memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb)); | ||
768 | |||
769 | #if 0 | ||
770 | /* TODO update volume entry in local cache */ | ||
771 | #endif | ||
772 | |||
773 | up_write(&vlocation->cell->vl_sem); | ||
774 | |||
775 | if (ret < 0) | ||
776 | printk("kAFS: failed to update local cache: %d\n", ret); | ||
777 | |||
778 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
779 | AFS_VLDB_TIMEOUT); | ||
780 | afs_put_vlocation(vlocation); | ||
781 | _leave(" [found]"); | ||
782 | return; | ||
783 | |||
784 | case -ENOMEDIUM: | ||
785 | vlocation->upd_rej_cnt++; | ||
786 | goto try_next; | ||
787 | |||
788 | /* the server is locked - retry in a very short while */ | ||
789 | case -EBUSY: | ||
790 | vlocation->upd_busy_cnt++; | ||
791 | if (vlocation->upd_busy_cnt > 3) | ||
792 | goto try_next; /* too many retries */ | ||
793 | |||
794 | afs_vlocation_update_abandon(vlocation, | ||
795 | AFS_VLUPD_BUSYSLEEP, 0); | ||
796 | afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2); | ||
797 | afs_put_vlocation(vlocation); | ||
798 | _leave(" [busy]"); | ||
799 | return; | ||
800 | |||
801 | case -ENETUNREACH: | ||
802 | case -EHOSTUNREACH: | ||
803 | case -ECONNREFUSED: | ||
804 | case -EREMOTEIO: | ||
805 | /* record bad vlserver info in the cell too | ||
806 | * - TODO: use down_write_trylock() if available | ||
807 | */ | ||
808 | if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix) | ||
809 | vlocation->cell->vl_curr_svix = | ||
810 | vlocation->cell->vl_curr_svix % | ||
811 | vlocation->cell->vl_naddrs; | ||
812 | |||
813 | case -EBADRQC: | ||
814 | case -EINVAL: | ||
815 | case -EACCES: | ||
816 | case -EBADMSG: | ||
817 | goto try_next; | ||
818 | |||
819 | default: | ||
820 | goto abandon; | ||
821 | } | ||
822 | |||
823 | /* try contacting the next server */ | ||
824 | try_next: | ||
825 | vlocation->upd_busy_cnt = 0; | ||
826 | |||
827 | /* discard the server record */ | ||
828 | afs_put_server(vlocation->upd_op.server); | ||
829 | vlocation->upd_op.server = NULL; | ||
830 | |||
831 | tmp = vlocation->cell->vl_naddrs; | ||
832 | if (tmp == 0) | ||
833 | goto abandon; | ||
834 | |||
835 | vlocation->upd_curr_svix++; | ||
836 | if (vlocation->upd_curr_svix >= tmp) | ||
837 | vlocation->upd_curr_svix = 0; | ||
838 | if (vlocation->upd_first_svix >= tmp) | ||
839 | vlocation->upd_first_svix = tmp - 1; | ||
840 | |||
841 | /* move to the next server */ | ||
842 | if (vlocation->upd_curr_svix != vlocation->upd_first_svix) { | ||
843 | afs_vlocation_update_begin(vlocation); | ||
844 | _leave(" [next]"); | ||
845 | return; | ||
846 | } | ||
847 | |||
848 | /* run out of servers to try - was the volume rejected? */ | ||
849 | if (vlocation->upd_rej_cnt > 0) { | ||
850 | printk("kAFS: Active volume no longer valid '%s'\n", | ||
851 | vlocation->vldb.name); | ||
852 | vlocation->valid = 0; | ||
853 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0); | ||
854 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
855 | AFS_VLDB_TIMEOUT); | ||
856 | afs_put_vlocation(vlocation); | ||
857 | _leave(" [invalidated]"); | ||
858 | return; | ||
859 | } | ||
860 | |||
861 | /* abandon the update */ | ||
862 | abandon: | ||
863 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret); | ||
864 | afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10); | ||
865 | afs_put_vlocation(vlocation); | ||
866 | _leave(" [abandoned]"); | ||
867 | |||
868 | } /* end afs_vlocation_update_attend() */ | ||
869 | |||
870 | /*****************************************************************************/ | ||
871 | /* | ||
872 | * deal with an update operation being discarded | ||
873 | * - called in kafsasyncd context when it's dying due to rmmod | ||
874 | * - the call has already been aborted and put()'d | ||
875 | */ | ||
876 | static void afs_vlocation_update_discard(struct afs_async_op *op) | ||
877 | { | ||
878 | struct afs_vlocation *vlocation = | ||
879 | list_entry(op, struct afs_vlocation, upd_op); | ||
880 | |||
881 | _enter("%s", vlocation->vldb.name); | ||
882 | |||
883 | afs_put_server(op->server); | ||
884 | op->server = NULL; | ||
885 | |||
886 | afs_put_vlocation(vlocation); | ||
887 | |||
888 | _leave(""); | ||
889 | } /* end afs_vlocation_update_discard() */ | ||
890 | |||
891 | /*****************************************************************************/ | ||
892 | /* | ||
893 | * match a VLDB record stored in the cache | ||
894 | * - may also load target from entry | ||
895 | */ | ||
896 | #ifdef AFS_CACHING_SUPPORT | ||
897 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
898 | const void *entry) | ||
899 | { | ||
900 | const struct afs_cache_vlocation *vldb = entry; | ||
901 | struct afs_vlocation *vlocation = target; | ||
902 | |||
903 | _enter("{%s},{%s}", vlocation->vldb.name, vldb->name); | ||
904 | |||
905 | if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0 | ||
906 | ) { | ||
907 | if (!vlocation->valid || | ||
908 | vlocation->vldb.rtime == vldb->rtime | ||
909 | ) { | ||
910 | vlocation->vldb = *vldb; | ||
911 | vlocation->valid = 1; | ||
912 | _leave(" = SUCCESS [c->m]"); | ||
913 | return CACHEFS_MATCH_SUCCESS; | ||
914 | } | ||
915 | /* need to update cache if cached info differs */ | ||
916 | else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) { | ||
917 | /* delete if VIDs for this name differ */ | ||
918 | if (memcmp(&vlocation->vldb.vid, | ||
919 | &vldb->vid, | ||
920 | sizeof(vldb->vid)) != 0) { | ||
921 | _leave(" = DELETE"); | ||
922 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
923 | } | ||
924 | |||
925 | _leave(" = UPDATE"); | ||
926 | return CACHEFS_MATCH_SUCCESS_UPDATE; | ||
927 | } | ||
928 | else { | ||
929 | _leave(" = SUCCESS"); | ||
930 | return CACHEFS_MATCH_SUCCESS; | ||
931 | } | ||
932 | } | ||
933 | |||
934 | _leave(" = FAILED"); | ||
935 | return CACHEFS_MATCH_FAILED; | ||
936 | } /* end afs_vlocation_cache_match() */ | ||
937 | #endif | ||
938 | |||
939 | /*****************************************************************************/ | ||
940 | /* | ||
941 | * update a VLDB record stored in the cache | ||
942 | */ | ||
943 | #ifdef AFS_CACHING_SUPPORT | ||
944 | static void afs_vlocation_cache_update(void *source, void *entry) | ||
945 | { | ||
946 | struct afs_cache_vlocation *vldb = entry; | ||
947 | struct afs_vlocation *vlocation = source; | ||
948 | |||
949 | _enter(""); | ||
950 | |||
951 | *vldb = vlocation->vldb; | ||
952 | |||
953 | } /* end afs_vlocation_cache_update() */ | ||
954 | #endif | ||
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c new file mode 100644 index 000000000000..9867fef3261d --- /dev/null +++ b/fs/afs/vnode.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /* vnode.c: AFS vnode management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include "volume.h" | ||
19 | #include "cell.h" | ||
20 | #include "cmservice.h" | ||
21 | #include "fsclient.h" | ||
22 | #include "vlclient.h" | ||
23 | #include "vnode.h" | ||
24 | #include "internal.h" | ||
25 | |||
26 | static void afs_vnode_cb_timed_out(struct afs_timer *timer); | ||
27 | |||
28 | struct afs_timer_ops afs_vnode_cb_timed_out_ops = { | ||
29 | .timed_out = afs_vnode_cb_timed_out, | ||
30 | }; | ||
31 | |||
32 | #ifdef AFS_CACHING_SUPPORT | ||
33 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | ||
34 | const void *entry); | ||
35 | static void afs_vnode_cache_update(void *source, void *entry); | ||
36 | |||
37 | struct cachefs_index_def afs_vnode_cache_index_def = { | ||
38 | .name = "vnode", | ||
39 | .data_size = sizeof(struct afs_cache_vnode), | ||
40 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 }, | ||
41 | .match = afs_vnode_cache_match, | ||
42 | .update = afs_vnode_cache_update, | ||
43 | }; | ||
44 | #endif | ||
45 | |||
46 | /*****************************************************************************/ | ||
47 | /* | ||
48 | * handle a callback timing out | ||
49 | * TODO: retain a ref to vnode struct for an outstanding callback timeout | ||
50 | */ | ||
51 | static void afs_vnode_cb_timed_out(struct afs_timer *timer) | ||
52 | { | ||
53 | struct afs_server *oldserver; | ||
54 | struct afs_vnode *vnode; | ||
55 | |||
56 | vnode = list_entry(timer, struct afs_vnode, cb_timeout); | ||
57 | |||
58 | _enter("%p", vnode); | ||
59 | |||
60 | /* set the changed flag in the vnode and release the server */ | ||
61 | spin_lock(&vnode->lock); | ||
62 | |||
63 | oldserver = xchg(&vnode->cb_server, NULL); | ||
64 | if (oldserver) { | ||
65 | vnode->flags |= AFS_VNODE_CHANGED; | ||
66 | |||
67 | spin_lock(&afs_cb_hash_lock); | ||
68 | list_del_init(&vnode->cb_hash_link); | ||
69 | spin_unlock(&afs_cb_hash_lock); | ||
70 | |||
71 | spin_lock(&oldserver->cb_lock); | ||
72 | list_del_init(&vnode->cb_link); | ||
73 | spin_unlock(&oldserver->cb_lock); | ||
74 | } | ||
75 | |||
76 | spin_unlock(&vnode->lock); | ||
77 | |||
78 | afs_put_server(oldserver); | ||
79 | |||
80 | _leave(""); | ||
81 | } /* end afs_vnode_cb_timed_out() */ | ||
82 | |||
83 | /*****************************************************************************/ | ||
84 | /* | ||
85 | * finish off updating the recorded status of a file | ||
86 | * - starts callback expiry timer | ||
87 | * - adds to server's callback list | ||
88 | */ | ||
89 | static void afs_vnode_finalise_status_update(struct afs_vnode *vnode, | ||
90 | struct afs_server *server, | ||
91 | int ret) | ||
92 | { | ||
93 | struct afs_server *oldserver = NULL; | ||
94 | |||
95 | _enter("%p,%p,%d", vnode, server, ret); | ||
96 | |||
97 | spin_lock(&vnode->lock); | ||
98 | |||
99 | vnode->flags &= ~AFS_VNODE_CHANGED; | ||
100 | |||
101 | if (ret == 0) { | ||
102 | /* adjust the callback timeout appropriately */ | ||
103 | afs_kafstimod_add_timer(&vnode->cb_timeout, | ||
104 | vnode->cb_expiry * HZ); | ||
105 | |||
106 | spin_lock(&afs_cb_hash_lock); | ||
107 | list_del(&vnode->cb_hash_link); | ||
108 | list_add_tail(&vnode->cb_hash_link, | ||
109 | &afs_cb_hash(server, &vnode->fid)); | ||
110 | spin_unlock(&afs_cb_hash_lock); | ||
111 | |||
112 | /* swap ref to old callback server with that for new callback | ||
113 | * server */ | ||
114 | oldserver = xchg(&vnode->cb_server, server); | ||
115 | if (oldserver != server) { | ||
116 | if (oldserver) { | ||
117 | spin_lock(&oldserver->cb_lock); | ||
118 | list_del_init(&vnode->cb_link); | ||
119 | spin_unlock(&oldserver->cb_lock); | ||
120 | } | ||
121 | |||
122 | afs_get_server(server); | ||
123 | spin_lock(&server->cb_lock); | ||
124 | list_add_tail(&vnode->cb_link, &server->cb_promises); | ||
125 | spin_unlock(&server->cb_lock); | ||
126 | } | ||
127 | else { | ||
128 | /* same server */ | ||
129 | oldserver = NULL; | ||
130 | } | ||
131 | } | ||
132 | else if (ret == -ENOENT) { | ||
133 | /* the file was deleted - clear the callback timeout */ | ||
134 | oldserver = xchg(&vnode->cb_server, NULL); | ||
135 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
136 | |||
137 | _debug("got NOENT from server - marking file deleted"); | ||
138 | vnode->flags |= AFS_VNODE_DELETED; | ||
139 | } | ||
140 | |||
141 | vnode->update_cnt--; | ||
142 | |||
143 | spin_unlock(&vnode->lock); | ||
144 | |||
145 | wake_up_all(&vnode->update_waitq); | ||
146 | |||
147 | afs_put_server(oldserver); | ||
148 | |||
149 | _leave(""); | ||
150 | |||
151 | } /* end afs_vnode_finalise_status_update() */ | ||
152 | |||
153 | /*****************************************************************************/ | ||
154 | /* | ||
155 | * fetch file status from the volume | ||
156 | * - don't issue a fetch if: | ||
157 | * - the changed bit is not set and there's a valid callback | ||
158 | * - there are any outstanding ops that will fetch the status | ||
159 | * - TODO implement local caching | ||
160 | */ | ||
161 | int afs_vnode_fetch_status(struct afs_vnode *vnode) | ||
162 | { | ||
163 | struct afs_server *server; | ||
164 | int ret; | ||
165 | |||
166 | DECLARE_WAITQUEUE(myself, current); | ||
167 | |||
168 | _enter("%s,{%u,%u,%u}", | ||
169 | vnode->volume->vlocation->vldb.name, | ||
170 | vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | ||
171 | |||
172 | if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) { | ||
173 | _leave(" [unchanged]"); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | if (vnode->flags & AFS_VNODE_DELETED) { | ||
178 | _leave(" [deleted]"); | ||
179 | return -ENOENT; | ||
180 | } | ||
181 | |||
182 | spin_lock(&vnode->lock); | ||
183 | |||
184 | if (!(vnode->flags & AFS_VNODE_CHANGED)) { | ||
185 | spin_unlock(&vnode->lock); | ||
186 | _leave(" [unchanged]"); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | if (vnode->update_cnt > 0) { | ||
191 | /* someone else started a fetch */ | ||
192 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
193 | add_wait_queue(&vnode->update_waitq, &myself); | ||
194 | |||
195 | /* wait for the status to be updated */ | ||
196 | for (;;) { | ||
197 | if (!(vnode->flags & AFS_VNODE_CHANGED)) | ||
198 | break; | ||
199 | if (vnode->flags & AFS_VNODE_DELETED) | ||
200 | break; | ||
201 | |||
202 | /* it got updated and invalidated all before we saw | ||
203 | * it */ | ||
204 | if (vnode->update_cnt == 0) { | ||
205 | remove_wait_queue(&vnode->update_waitq, | ||
206 | &myself); | ||
207 | set_current_state(TASK_RUNNING); | ||
208 | goto get_anyway; | ||
209 | } | ||
210 | |||
211 | spin_unlock(&vnode->lock); | ||
212 | |||
213 | schedule(); | ||
214 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
215 | |||
216 | spin_lock(&vnode->lock); | ||
217 | } | ||
218 | |||
219 | remove_wait_queue(&vnode->update_waitq, &myself); | ||
220 | spin_unlock(&vnode->lock); | ||
221 | set_current_state(TASK_RUNNING); | ||
222 | |||
223 | return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0; | ||
224 | } | ||
225 | |||
226 | get_anyway: | ||
227 | /* okay... we're going to have to initiate the op */ | ||
228 | vnode->update_cnt++; | ||
229 | |||
230 | spin_unlock(&vnode->lock); | ||
231 | |||
232 | /* merge AFS status fetches and clear outstanding callback on this | ||
233 | * vnode */ | ||
234 | do { | ||
235 | /* pick a server to query */ | ||
236 | ret = afs_volume_pick_fileserver(vnode->volume, &server); | ||
237 | if (ret<0) | ||
238 | return ret; | ||
239 | |||
240 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | ||
241 | |||
242 | ret = afs_rxfs_fetch_file_status(server, vnode, NULL); | ||
243 | |||
244 | } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); | ||
245 | |||
246 | /* adjust the flags */ | ||
247 | afs_vnode_finalise_status_update(vnode, server, ret); | ||
248 | |||
249 | _leave(" = %d", ret); | ||
250 | return ret; | ||
251 | } /* end afs_vnode_fetch_status() */ | ||
252 | |||
253 | /*****************************************************************************/ | ||
254 | /* | ||
255 | * fetch file data from the volume | ||
256 | * - TODO implement caching and server failover | ||
257 | */ | ||
258 | int afs_vnode_fetch_data(struct afs_vnode *vnode, | ||
259 | struct afs_rxfs_fetch_descriptor *desc) | ||
260 | { | ||
261 | struct afs_server *server; | ||
262 | int ret; | ||
263 | |||
264 | _enter("%s,{%u,%u,%u}", | ||
265 | vnode->volume->vlocation->vldb.name, | ||
266 | vnode->fid.vid, | ||
267 | vnode->fid.vnode, | ||
268 | vnode->fid.unique); | ||
269 | |||
270 | /* this op will fetch the status */ | ||
271 | spin_lock(&vnode->lock); | ||
272 | vnode->update_cnt++; | ||
273 | spin_unlock(&vnode->lock); | ||
274 | |||
275 | /* merge in AFS status fetches and clear outstanding callback on this | ||
276 | * vnode */ | ||
277 | do { | ||
278 | /* pick a server to query */ | ||
279 | ret = afs_volume_pick_fileserver(vnode->volume, &server); | ||
280 | if (ret < 0) | ||
281 | return ret; | ||
282 | |||
283 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | ||
284 | |||
285 | ret = afs_rxfs_fetch_file_data(server, vnode, desc, NULL); | ||
286 | |||
287 | } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); | ||
288 | |||
289 | /* adjust the flags */ | ||
290 | afs_vnode_finalise_status_update(vnode, server, ret); | ||
291 | |||
292 | _leave(" = %d", ret); | ||
293 | return ret; | ||
294 | |||
295 | } /* end afs_vnode_fetch_data() */ | ||
296 | |||
297 | /*****************************************************************************/ | ||
298 | /* | ||
299 | * break any outstanding callback on a vnode | ||
300 | * - only relevent to server that issued it | ||
301 | */ | ||
302 | int afs_vnode_give_up_callback(struct afs_vnode *vnode) | ||
303 | { | ||
304 | struct afs_server *server; | ||
305 | int ret; | ||
306 | |||
307 | _enter("%s,{%u,%u,%u}", | ||
308 | vnode->volume->vlocation->vldb.name, | ||
309 | vnode->fid.vid, | ||
310 | vnode->fid.vnode, | ||
311 | vnode->fid.unique); | ||
312 | |||
313 | spin_lock(&afs_cb_hash_lock); | ||
314 | list_del_init(&vnode->cb_hash_link); | ||
315 | spin_unlock(&afs_cb_hash_lock); | ||
316 | |||
317 | /* set the changed flag in the vnode and release the server */ | ||
318 | spin_lock(&vnode->lock); | ||
319 | |||
320 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
321 | |||
322 | server = xchg(&vnode->cb_server, NULL); | ||
323 | if (server) { | ||
324 | vnode->flags |= AFS_VNODE_CHANGED; | ||
325 | |||
326 | spin_lock(&server->cb_lock); | ||
327 | list_del_init(&vnode->cb_link); | ||
328 | spin_unlock(&server->cb_lock); | ||
329 | } | ||
330 | |||
331 | spin_unlock(&vnode->lock); | ||
332 | |||
333 | ret = 0; | ||
334 | if (server) { | ||
335 | ret = afs_rxfs_give_up_callback(server, vnode); | ||
336 | afs_put_server(server); | ||
337 | } | ||
338 | |||
339 | _leave(" = %d", ret); | ||
340 | return ret; | ||
341 | } /* end afs_vnode_give_up_callback() */ | ||
342 | |||
343 | /*****************************************************************************/ | ||
344 | /* | ||
345 | * match a vnode record stored in the cache | ||
346 | */ | ||
347 | #ifdef AFS_CACHING_SUPPORT | ||
348 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | ||
349 | const void *entry) | ||
350 | { | ||
351 | const struct afs_cache_vnode *cvnode = entry; | ||
352 | struct afs_vnode *vnode = target; | ||
353 | |||
354 | _enter("{%x,%x,%Lx},{%x,%x,%Lx}", | ||
355 | vnode->fid.vnode, | ||
356 | vnode->fid.unique, | ||
357 | vnode->status.version, | ||
358 | cvnode->vnode_id, | ||
359 | cvnode->vnode_unique, | ||
360 | cvnode->data_version); | ||
361 | |||
362 | if (vnode->fid.vnode != cvnode->vnode_id) { | ||
363 | _leave(" = FAILED"); | ||
364 | return CACHEFS_MATCH_FAILED; | ||
365 | } | ||
366 | |||
367 | if (vnode->fid.unique != cvnode->vnode_unique || | ||
368 | vnode->status.version != cvnode->data_version) { | ||
369 | _leave(" = DELETE"); | ||
370 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
371 | } | ||
372 | |||
373 | _leave(" = SUCCESS"); | ||
374 | return CACHEFS_MATCH_SUCCESS; | ||
375 | } /* end afs_vnode_cache_match() */ | ||
376 | #endif | ||
377 | |||
378 | /*****************************************************************************/ | ||
379 | /* | ||
380 | * update a vnode record stored in the cache | ||
381 | */ | ||
382 | #ifdef AFS_CACHING_SUPPORT | ||
383 | static void afs_vnode_cache_update(void *source, void *entry) | ||
384 | { | ||
385 | struct afs_cache_vnode *cvnode = entry; | ||
386 | struct afs_vnode *vnode = source; | ||
387 | |||
388 | _enter(""); | ||
389 | |||
390 | cvnode->vnode_id = vnode->fid.vnode; | ||
391 | cvnode->vnode_unique = vnode->fid.unique; | ||
392 | cvnode->data_version = vnode->status.version; | ||
393 | |||
394 | } /* end afs_vnode_cache_update() */ | ||
395 | #endif | ||
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h new file mode 100644 index 000000000000..b86a97102e8b --- /dev/null +++ b/fs/afs/vnode.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* vnode.h: AFS vnode record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_VNODE_H | ||
13 | #define _LINUX_AFS_VNODE_H | ||
14 | |||
15 | #include <linux/fs.h> | ||
16 | #include "server.h" | ||
17 | #include "kafstimod.h" | ||
18 | #include "cache.h" | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | struct afs_rxfs_fetch_descriptor; | ||
23 | |||
24 | /*****************************************************************************/ | ||
25 | /* | ||
26 | * vnode catalogue entry | ||
27 | */ | ||
28 | struct afs_cache_vnode | ||
29 | { | ||
30 | afs_vnodeid_t vnode_id; /* vnode ID */ | ||
31 | unsigned vnode_unique; /* vnode ID uniquifier */ | ||
32 | afs_dataversion_t data_version; /* data version */ | ||
33 | }; | ||
34 | |||
35 | #ifdef AFS_CACHING_SUPPORT | ||
36 | extern struct cachefs_index_def afs_vnode_cache_index_def; | ||
37 | #endif | ||
38 | |||
39 | /*****************************************************************************/ | ||
40 | /* | ||
41 | * AFS inode private data | ||
42 | */ | ||
43 | struct afs_vnode | ||
44 | { | ||
45 | struct inode vfs_inode; /* the VFS's inode record */ | ||
46 | |||
47 | struct afs_volume *volume; /* volume on which vnode resides */ | ||
48 | struct afs_fid fid; /* the file identifier for this inode */ | ||
49 | struct afs_file_status status; /* AFS status info for this file */ | ||
50 | #ifdef AFS_CACHING_SUPPORT | ||
51 | struct cachefs_cookie *cache; /* caching cookie */ | ||
52 | #endif | ||
53 | |||
54 | wait_queue_head_t update_waitq; /* status fetch waitqueue */ | ||
55 | unsigned update_cnt; /* number of outstanding ops that will update the | ||
56 | * status */ | ||
57 | spinlock_t lock; /* waitqueue/flags lock */ | ||
58 | unsigned flags; | ||
59 | #define AFS_VNODE_CHANGED 0x00000001 /* set if vnode reported changed by callback */ | ||
60 | #define AFS_VNODE_DELETED 0x00000002 /* set if vnode deleted on server */ | ||
61 | #define AFS_VNODE_MOUNTPOINT 0x00000004 /* set if vnode is a mountpoint symlink */ | ||
62 | |||
63 | /* outstanding callback notification on this file */ | ||
64 | struct afs_server *cb_server; /* server that made the current promise */ | ||
65 | struct list_head cb_link; /* link in server's promises list */ | ||
66 | struct list_head cb_hash_link; /* link in master callback hash */ | ||
67 | struct afs_timer cb_timeout; /* timeout on promise */ | ||
68 | unsigned cb_version; /* callback version */ | ||
69 | unsigned cb_expiry; /* callback expiry time */ | ||
70 | afs_callback_type_t cb_type; /* type of callback */ | ||
71 | }; | ||
72 | |||
73 | static inline struct afs_vnode *AFS_FS_I(struct inode *inode) | ||
74 | { | ||
75 | return container_of(inode,struct afs_vnode,vfs_inode); | ||
76 | } | ||
77 | |||
78 | static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) | ||
79 | { | ||
80 | return &vnode->vfs_inode; | ||
81 | } | ||
82 | |||
83 | extern int afs_vnode_fetch_status(struct afs_vnode *vnode); | ||
84 | |||
85 | extern int afs_vnode_fetch_data(struct afs_vnode *vnode, | ||
86 | struct afs_rxfs_fetch_descriptor *desc); | ||
87 | |||
88 | extern int afs_vnode_give_up_callback(struct afs_vnode *vnode); | ||
89 | |||
90 | extern struct afs_timer_ops afs_vnode_cb_timed_out_ops; | ||
91 | |||
92 | #endif /* __KERNEL__ */ | ||
93 | |||
94 | #endif /* _LINUX_AFS_VNODE_H */ | ||
diff --git a/fs/afs/volume.c b/fs/afs/volume.c new file mode 100644 index 000000000000..0ff4b86476e3 --- /dev/null +++ b/fs/afs/volume.c | |||
@@ -0,0 +1,520 @@ | |||
1 | /* volume.c: AFS volume management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include "volume.h" | ||
19 | #include "vnode.h" | ||
20 | #include "cell.h" | ||
21 | #include "cache.h" | ||
22 | #include "cmservice.h" | ||
23 | #include "fsclient.h" | ||
24 | #include "vlclient.h" | ||
25 | #include "internal.h" | ||
26 | |||
27 | #ifdef __KDEBUG | ||
28 | static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" }; | ||
29 | #endif | ||
30 | |||
31 | #ifdef AFS_CACHING_SUPPORT | ||
32 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
33 | const void *entry); | ||
34 | static void afs_volume_cache_update(void *source, void *entry); | ||
35 | |||
36 | struct cachefs_index_def afs_volume_cache_index_def = { | ||
37 | .name = "volume", | ||
38 | .data_size = sizeof(struct afs_cache_vhash), | ||
39 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
40 | .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
41 | .match = afs_volume_cache_match, | ||
42 | .update = afs_volume_cache_update, | ||
43 | }; | ||
44 | #endif | ||
45 | |||
46 | /*****************************************************************************/ | ||
47 | /* | ||
48 | * lookup a volume by name | ||
49 | * - this can be one of the following: | ||
50 | * "%[cell:]volume[.]" R/W volume | ||
51 | * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), | ||
52 | * or R/W (rwparent=1) volume | ||
53 | * "%[cell:]volume.readonly" R/O volume | ||
54 | * "#[cell:]volume.readonly" R/O volume | ||
55 | * "%[cell:]volume.backup" Backup volume | ||
56 | * "#[cell:]volume.backup" Backup volume | ||
57 | * | ||
58 | * The cell name is optional, and defaults to the current cell. | ||
59 | * | ||
60 | * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin | ||
61 | * Guide | ||
62 | * - Rule 1: Explicit type suffix forces access of that type or nothing | ||
63 | * (no suffix, then use Rule 2 & 3) | ||
64 | * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W | ||
65 | * if not available | ||
66 | * - Rule 3: If parent volume is R/W, then only mount R/W volume unless | ||
67 | * explicitly told otherwise | ||
68 | */ | ||
69 | int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | ||
70 | struct afs_volume **_volume) | ||
71 | { | ||
72 | struct afs_vlocation *vlocation = NULL; | ||
73 | struct afs_volume *volume = NULL; | ||
74 | afs_voltype_t type; | ||
75 | const char *cellname, *volname, *suffix; | ||
76 | char srvtmask; | ||
77 | int force, ret, loop, cellnamesz, volnamesz; | ||
78 | |||
79 | _enter("%s,,%d,", name, rwpath); | ||
80 | |||
81 | if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) { | ||
82 | printk("kAFS: unparsable volume name\n"); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | /* determine the type of volume we're looking for */ | ||
87 | force = 0; | ||
88 | type = AFSVL_ROVOL; | ||
89 | |||
90 | if (rwpath || name[0] == '%') { | ||
91 | type = AFSVL_RWVOL; | ||
92 | force = 1; | ||
93 | } | ||
94 | |||
95 | suffix = strrchr(name, '.'); | ||
96 | if (suffix) { | ||
97 | if (strcmp(suffix, ".readonly") == 0) { | ||
98 | type = AFSVL_ROVOL; | ||
99 | force = 1; | ||
100 | } | ||
101 | else if (strcmp(suffix, ".backup") == 0) { | ||
102 | type = AFSVL_BACKVOL; | ||
103 | force = 1; | ||
104 | } | ||
105 | else if (suffix[1] == 0) { | ||
106 | } | ||
107 | else { | ||
108 | suffix = NULL; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* split the cell and volume names */ | ||
113 | name++; | ||
114 | volname = strchr(name, ':'); | ||
115 | if (volname) { | ||
116 | cellname = name; | ||
117 | cellnamesz = volname - name; | ||
118 | volname++; | ||
119 | } | ||
120 | else { | ||
121 | volname = name; | ||
122 | cellname = NULL; | ||
123 | cellnamesz = 0; | ||
124 | } | ||
125 | |||
126 | volnamesz = suffix ? suffix - volname : strlen(volname); | ||
127 | |||
128 | _debug("CELL:%*.*s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", | ||
129 | cellnamesz, cellnamesz, cellname ?: "", cell, | ||
130 | volnamesz, volnamesz, volname, suffix ?: "-", | ||
131 | type, | ||
132 | force ? " FORCE" : ""); | ||
133 | |||
134 | /* lookup the cell record */ | ||
135 | if (cellname || !cell) { | ||
136 | ret = afs_cell_lookup(cellname, cellnamesz, &cell); | ||
137 | if (ret<0) { | ||
138 | printk("kAFS: unable to lookup cell '%s'\n", | ||
139 | cellname ?: ""); | ||
140 | goto error; | ||
141 | } | ||
142 | } | ||
143 | else { | ||
144 | afs_get_cell(cell); | ||
145 | } | ||
146 | |||
147 | /* lookup the volume location record */ | ||
148 | ret = afs_vlocation_lookup(cell, volname, volnamesz, &vlocation); | ||
149 | if (ret < 0) | ||
150 | goto error; | ||
151 | |||
152 | /* make the final decision on the type we want */ | ||
153 | ret = -ENOMEDIUM; | ||
154 | if (force && !(vlocation->vldb.vidmask & (1 << type))) | ||
155 | goto error; | ||
156 | |||
157 | srvtmask = 0; | ||
158 | for (loop = 0; loop < vlocation->vldb.nservers; loop++) | ||
159 | srvtmask |= vlocation->vldb.srvtmask[loop]; | ||
160 | |||
161 | if (force) { | ||
162 | if (!(srvtmask & (1 << type))) | ||
163 | goto error; | ||
164 | } | ||
165 | else if (srvtmask & AFS_VOL_VTM_RO) { | ||
166 | type = AFSVL_ROVOL; | ||
167 | } | ||
168 | else if (srvtmask & AFS_VOL_VTM_RW) { | ||
169 | type = AFSVL_RWVOL; | ||
170 | } | ||
171 | else { | ||
172 | goto error; | ||
173 | } | ||
174 | |||
175 | down_write(&cell->vl_sem); | ||
176 | |||
177 | /* is the volume already active? */ | ||
178 | if (vlocation->vols[type]) { | ||
179 | /* yes - re-use it */ | ||
180 | volume = vlocation->vols[type]; | ||
181 | afs_get_volume(volume); | ||
182 | goto success; | ||
183 | } | ||
184 | |||
185 | /* create a new volume record */ | ||
186 | _debug("creating new volume record"); | ||
187 | |||
188 | ret = -ENOMEM; | ||
189 | volume = kmalloc(sizeof(struct afs_volume), GFP_KERNEL); | ||
190 | if (!volume) | ||
191 | goto error_up; | ||
192 | |||
193 | memset(volume, 0, sizeof(struct afs_volume)); | ||
194 | atomic_set(&volume->usage, 1); | ||
195 | volume->type = type; | ||
196 | volume->type_force = force; | ||
197 | volume->cell = cell; | ||
198 | volume->vid = vlocation->vldb.vid[type]; | ||
199 | |||
200 | init_rwsem(&volume->server_sem); | ||
201 | |||
202 | /* look up all the applicable server records */ | ||
203 | for (loop = 0; loop < 8; loop++) { | ||
204 | if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) { | ||
205 | ret = afs_server_lookup( | ||
206 | volume->cell, | ||
207 | &vlocation->vldb.servers[loop], | ||
208 | &volume->servers[volume->nservers]); | ||
209 | if (ret < 0) | ||
210 | goto error_discard; | ||
211 | |||
212 | volume->nservers++; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* attach the cache and volume location */ | ||
217 | #ifdef AFS_CACHING_SUPPORT | ||
218 | cachefs_acquire_cookie(vlocation->cache, | ||
219 | &afs_vnode_cache_index_def, | ||
220 | volume, | ||
221 | &volume->cache); | ||
222 | #endif | ||
223 | |||
224 | afs_get_vlocation(vlocation); | ||
225 | volume->vlocation = vlocation; | ||
226 | |||
227 | vlocation->vols[type] = volume; | ||
228 | |||
229 | success: | ||
230 | _debug("kAFS selected %s volume %08x", | ||
231 | afs_voltypes[volume->type], volume->vid); | ||
232 | *_volume = volume; | ||
233 | ret = 0; | ||
234 | |||
235 | /* clean up */ | ||
236 | error_up: | ||
237 | up_write(&cell->vl_sem); | ||
238 | error: | ||
239 | afs_put_vlocation(vlocation); | ||
240 | afs_put_cell(cell); | ||
241 | |||
242 | _leave(" = %d (%p)", ret, volume); | ||
243 | return ret; | ||
244 | |||
245 | error_discard: | ||
246 | up_write(&cell->vl_sem); | ||
247 | |||
248 | for (loop = volume->nservers - 1; loop >= 0; loop--) | ||
249 | afs_put_server(volume->servers[loop]); | ||
250 | |||
251 | kfree(volume); | ||
252 | goto error; | ||
253 | } /* end afs_volume_lookup() */ | ||
254 | |||
255 | /*****************************************************************************/ | ||
256 | /* | ||
257 | * destroy a volume record | ||
258 | */ | ||
259 | void afs_put_volume(struct afs_volume *volume) | ||
260 | { | ||
261 | struct afs_vlocation *vlocation; | ||
262 | int loop; | ||
263 | |||
264 | if (!volume) | ||
265 | return; | ||
266 | |||
267 | _enter("%p", volume); | ||
268 | |||
269 | vlocation = volume->vlocation; | ||
270 | |||
271 | /* sanity check */ | ||
272 | BUG_ON(atomic_read(&volume->usage) <= 0); | ||
273 | |||
274 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
275 | * atomic */ | ||
276 | down_write(&vlocation->cell->vl_sem); | ||
277 | |||
278 | if (likely(!atomic_dec_and_test(&volume->usage))) { | ||
279 | up_write(&vlocation->cell->vl_sem); | ||
280 | _leave(""); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | vlocation->vols[volume->type] = NULL; | ||
285 | |||
286 | up_write(&vlocation->cell->vl_sem); | ||
287 | |||
288 | /* finish cleaning up the volume */ | ||
289 | #ifdef AFS_CACHING_SUPPORT | ||
290 | cachefs_relinquish_cookie(volume->cache, 0); | ||
291 | #endif | ||
292 | afs_put_vlocation(vlocation); | ||
293 | |||
294 | for (loop = volume->nservers - 1; loop >= 0; loop--) | ||
295 | afs_put_server(volume->servers[loop]); | ||
296 | |||
297 | kfree(volume); | ||
298 | |||
299 | _leave(" [destroyed]"); | ||
300 | } /* end afs_put_volume() */ | ||
301 | |||
302 | /*****************************************************************************/ | ||
303 | /* | ||
304 | * pick a server to use to try accessing this volume | ||
305 | * - returns with an elevated usage count on the server chosen | ||
306 | */ | ||
307 | int afs_volume_pick_fileserver(struct afs_volume *volume, | ||
308 | struct afs_server **_server) | ||
309 | { | ||
310 | struct afs_server *server; | ||
311 | int ret, state, loop; | ||
312 | |||
313 | _enter("%s", volume->vlocation->vldb.name); | ||
314 | |||
315 | down_read(&volume->server_sem); | ||
316 | |||
317 | /* handle the no-server case */ | ||
318 | if (volume->nservers == 0) { | ||
319 | ret = volume->rjservers ? -ENOMEDIUM : -ESTALE; | ||
320 | up_read(&volume->server_sem); | ||
321 | _leave(" = %d [no servers]", ret); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | /* basically, just search the list for the first live server and use | ||
326 | * that */ | ||
327 | ret = 0; | ||
328 | for (loop = 0; loop < volume->nservers; loop++) { | ||
329 | server = volume->servers[loop]; | ||
330 | state = server->fs_state; | ||
331 | |||
332 | switch (state) { | ||
333 | /* found an apparently healthy server */ | ||
334 | case 0: | ||
335 | afs_get_server(server); | ||
336 | up_read(&volume->server_sem); | ||
337 | *_server = server; | ||
338 | _leave(" = 0 (picked %08x)", | ||
339 | ntohl(server->addr.s_addr)); | ||
340 | return 0; | ||
341 | |||
342 | case -ENETUNREACH: | ||
343 | if (ret == 0) | ||
344 | ret = state; | ||
345 | break; | ||
346 | |||
347 | case -EHOSTUNREACH: | ||
348 | if (ret == 0 || | ||
349 | ret == -ENETUNREACH) | ||
350 | ret = state; | ||
351 | break; | ||
352 | |||
353 | case -ECONNREFUSED: | ||
354 | if (ret == 0 || | ||
355 | ret == -ENETUNREACH || | ||
356 | ret == -EHOSTUNREACH) | ||
357 | ret = state; | ||
358 | break; | ||
359 | |||
360 | default: | ||
361 | case -EREMOTEIO: | ||
362 | if (ret == 0 || | ||
363 | ret == -ENETUNREACH || | ||
364 | ret == -EHOSTUNREACH || | ||
365 | ret == -ECONNREFUSED) | ||
366 | ret = state; | ||
367 | break; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | /* no available servers | ||
372 | * - TODO: handle the no active servers case better | ||
373 | */ | ||
374 | up_read(&volume->server_sem); | ||
375 | _leave(" = %d", ret); | ||
376 | return ret; | ||
377 | } /* end afs_volume_pick_fileserver() */ | ||
378 | |||
379 | /*****************************************************************************/ | ||
380 | /* | ||
381 | * release a server after use | ||
382 | * - releases the ref on the server struct that was acquired by picking | ||
383 | * - records result of using a particular server to access a volume | ||
384 | * - return 0 to try again, 1 if okay or to issue error | ||
385 | */ | ||
386 | int afs_volume_release_fileserver(struct afs_volume *volume, | ||
387 | struct afs_server *server, | ||
388 | int result) | ||
389 | { | ||
390 | unsigned loop; | ||
391 | |||
392 | _enter("%s,%08x,%d", | ||
393 | volume->vlocation->vldb.name, ntohl(server->addr.s_addr), | ||
394 | result); | ||
395 | |||
396 | switch (result) { | ||
397 | /* success */ | ||
398 | case 0: | ||
399 | server->fs_act_jif = jiffies; | ||
400 | break; | ||
401 | |||
402 | /* the fileserver denied all knowledge of the volume */ | ||
403 | case -ENOMEDIUM: | ||
404 | server->fs_act_jif = jiffies; | ||
405 | down_write(&volume->server_sem); | ||
406 | |||
407 | /* first, find where the server is in the active list (if it | ||
408 | * is) */ | ||
409 | for (loop = 0; loop < volume->nservers; loop++) | ||
410 | if (volume->servers[loop] == server) | ||
411 | goto present; | ||
412 | |||
413 | /* no longer there - may have been discarded by another op */ | ||
414 | goto try_next_server_upw; | ||
415 | |||
416 | present: | ||
417 | volume->nservers--; | ||
418 | memmove(&volume->servers[loop], | ||
419 | &volume->servers[loop + 1], | ||
420 | sizeof(volume->servers[loop]) * | ||
421 | (volume->nservers - loop)); | ||
422 | volume->servers[volume->nservers] = NULL; | ||
423 | afs_put_server(server); | ||
424 | volume->rjservers++; | ||
425 | |||
426 | if (volume->nservers > 0) | ||
427 | /* another server might acknowledge its existence */ | ||
428 | goto try_next_server_upw; | ||
429 | |||
430 | /* handle the case where all the fileservers have rejected the | ||
431 | * volume | ||
432 | * - TODO: try asking the fileservers for volume information | ||
433 | * - TODO: contact the VL server again to see if the volume is | ||
434 | * no longer registered | ||
435 | */ | ||
436 | up_write(&volume->server_sem); | ||
437 | afs_put_server(server); | ||
438 | _leave(" [completely rejected]"); | ||
439 | return 1; | ||
440 | |||
441 | /* problem reaching the server */ | ||
442 | case -ENETUNREACH: | ||
443 | case -EHOSTUNREACH: | ||
444 | case -ECONNREFUSED: | ||
445 | case -ETIMEDOUT: | ||
446 | case -EREMOTEIO: | ||
447 | /* mark the server as dead | ||
448 | * TODO: vary dead timeout depending on error | ||
449 | */ | ||
450 | spin_lock(&server->fs_lock); | ||
451 | if (!server->fs_state) { | ||
452 | server->fs_dead_jif = jiffies + HZ * 10; | ||
453 | server->fs_state = result; | ||
454 | printk("kAFS: SERVER DEAD state=%d\n", result); | ||
455 | } | ||
456 | spin_unlock(&server->fs_lock); | ||
457 | goto try_next_server; | ||
458 | |||
459 | /* miscellaneous error */ | ||
460 | default: | ||
461 | server->fs_act_jif = jiffies; | ||
462 | case -ENOMEM: | ||
463 | case -ENONET: | ||
464 | break; | ||
465 | } | ||
466 | |||
467 | /* tell the caller to accept the result */ | ||
468 | afs_put_server(server); | ||
469 | _leave(""); | ||
470 | return 1; | ||
471 | |||
472 | /* tell the caller to loop around and try the next server */ | ||
473 | try_next_server_upw: | ||
474 | up_write(&volume->server_sem); | ||
475 | try_next_server: | ||
476 | afs_put_server(server); | ||
477 | _leave(" [try next server]"); | ||
478 | return 0; | ||
479 | |||
480 | } /* end afs_volume_release_fileserver() */ | ||
481 | |||
482 | /*****************************************************************************/ | ||
483 | /* | ||
484 | * match a volume hash record stored in the cache | ||
485 | */ | ||
486 | #ifdef AFS_CACHING_SUPPORT | ||
487 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
488 | const void *entry) | ||
489 | { | ||
490 | const struct afs_cache_vhash *vhash = entry; | ||
491 | struct afs_volume *volume = target; | ||
492 | |||
493 | _enter("{%u},{%u}", volume->type, vhash->vtype); | ||
494 | |||
495 | if (volume->type == vhash->vtype) { | ||
496 | _leave(" = SUCCESS"); | ||
497 | return CACHEFS_MATCH_SUCCESS; | ||
498 | } | ||
499 | |||
500 | _leave(" = FAILED"); | ||
501 | return CACHEFS_MATCH_FAILED; | ||
502 | } /* end afs_volume_cache_match() */ | ||
503 | #endif | ||
504 | |||
505 | /*****************************************************************************/ | ||
506 | /* | ||
507 | * update a volume hash record stored in the cache | ||
508 | */ | ||
509 | #ifdef AFS_CACHING_SUPPORT | ||
510 | static void afs_volume_cache_update(void *source, void *entry) | ||
511 | { | ||
512 | struct afs_cache_vhash *vhash = entry; | ||
513 | struct afs_volume *volume = source; | ||
514 | |||
515 | _enter(""); | ||
516 | |||
517 | vhash->vtype = volume->type; | ||
518 | |||
519 | } /* end afs_volume_cache_update() */ | ||
520 | #endif | ||
diff --git a/fs/afs/volume.h b/fs/afs/volume.h new file mode 100644 index 000000000000..1e691889c4c9 --- /dev/null +++ b/fs/afs/volume.h | |||
@@ -0,0 +1,142 @@ | |||
1 | /* volume.h: AFS volume management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_AFS_VOLUME_H | ||
13 | #define _LINUX_AFS_VOLUME_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "fsclient.h" | ||
17 | #include "kafstimod.h" | ||
18 | #include "kafsasyncd.h" | ||
19 | #include "cache.h" | ||
20 | |||
21 | #define __packed __attribute__((packed)) | ||
22 | |||
23 | typedef enum { | ||
24 | AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */ | ||
25 | AFS_VLUPD_PENDING, /* on pending queue */ | ||
26 | AFS_VLUPD_INPROGRESS, /* op in progress */ | ||
27 | AFS_VLUPD_BUSYSLEEP, /* sleeping because server returned EBUSY */ | ||
28 | |||
29 | } __attribute__((packed)) afs_vlocation_upd_t; | ||
30 | |||
31 | /*****************************************************************************/ | ||
32 | /* | ||
33 | * entry in the cached volume location catalogue | ||
34 | */ | ||
35 | struct afs_cache_vlocation | ||
36 | { | ||
37 | uint8_t name[64]; /* volume name (lowercase, padded with NULs) */ | ||
38 | uint8_t nservers; /* number of entries used in servers[] */ | ||
39 | uint8_t vidmask; /* voltype mask for vid[] */ | ||
40 | uint8_t srvtmask[8]; /* voltype masks for servers[] */ | ||
41 | #define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */ | ||
42 | #define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */ | ||
43 | #define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */ | ||
44 | |||
45 | afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */ | ||
46 | struct in_addr servers[8]; /* fileserver addresses */ | ||
47 | time_t rtime; /* last retrieval time */ | ||
48 | }; | ||
49 | |||
50 | #ifdef AFS_CACHING_SUPPORT | ||
51 | extern struct cachefs_index_def afs_vlocation_cache_index_def; | ||
52 | #endif | ||
53 | |||
54 | /*****************************************************************************/ | ||
55 | /* | ||
56 | * volume -> vnode hash table entry | ||
57 | */ | ||
58 | struct afs_cache_vhash | ||
59 | { | ||
60 | afs_voltype_t vtype; /* which volume variation */ | ||
61 | uint8_t hash_bucket; /* which hash bucket this represents */ | ||
62 | } __attribute__((packed)); | ||
63 | |||
64 | #ifdef AFS_CACHING_SUPPORT | ||
65 | extern struct cachefs_index_def afs_volume_cache_index_def; | ||
66 | #endif | ||
67 | |||
68 | /*****************************************************************************/ | ||
69 | /* | ||
70 | * AFS volume location record | ||
71 | */ | ||
72 | struct afs_vlocation | ||
73 | { | ||
74 | atomic_t usage; | ||
75 | struct list_head link; /* link in cell volume location list */ | ||
76 | struct afs_timer timeout; /* decaching timer */ | ||
77 | struct afs_cell *cell; /* cell to which volume belongs */ | ||
78 | #ifdef AFS_CACHING_SUPPORT | ||
79 | struct cachefs_cookie *cache; /* caching cookie */ | ||
80 | #endif | ||
81 | struct afs_cache_vlocation vldb; /* volume information DB record */ | ||
82 | struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ | ||
83 | rwlock_t lock; /* access lock */ | ||
84 | unsigned long read_jif; /* time at which last read from vlserver */ | ||
85 | struct afs_timer upd_timer; /* update timer */ | ||
86 | struct afs_async_op upd_op; /* update operation */ | ||
87 | afs_vlocation_upd_t upd_state; /* update state */ | ||
88 | unsigned short upd_first_svix; /* first server index during update */ | ||
89 | unsigned short upd_curr_svix; /* current server index during update */ | ||
90 | unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ | ||
91 | unsigned short upd_busy_cnt; /* EBUSY count during update */ | ||
92 | unsigned short valid; /* T if valid */ | ||
93 | }; | ||
94 | |||
95 | extern int afs_vlocation_lookup(struct afs_cell *cell, | ||
96 | const char *name, | ||
97 | unsigned namesz, | ||
98 | struct afs_vlocation **_vlocation); | ||
99 | |||
100 | #define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0) | ||
101 | |||
102 | extern void afs_put_vlocation(struct afs_vlocation *vlocation); | ||
103 | extern void afs_vlocation_do_timeout(struct afs_vlocation *vlocation); | ||
104 | |||
105 | /*****************************************************************************/ | ||
106 | /* | ||
107 | * AFS volume access record | ||
108 | */ | ||
109 | struct afs_volume | ||
110 | { | ||
111 | atomic_t usage; | ||
112 | struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */ | ||
113 | struct afs_vlocation *vlocation; /* volume location */ | ||
114 | #ifdef AFS_CACHING_SUPPORT | ||
115 | struct cachefs_cookie *cache; /* caching cookie */ | ||
116 | #endif | ||
117 | afs_volid_t vid; /* volume ID */ | ||
118 | afs_voltype_t __packed type; /* type of volume */ | ||
119 | char type_force; /* force volume type (suppress R/O -> R/W) */ | ||
120 | unsigned short nservers; /* number of server slots filled */ | ||
121 | unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */ | ||
122 | struct afs_server *servers[8]; /* servers on which volume resides (ordered) */ | ||
123 | struct rw_semaphore server_sem; /* lock for accessing current server */ | ||
124 | }; | ||
125 | |||
126 | extern int afs_volume_lookup(const char *name, | ||
127 | struct afs_cell *cell, | ||
128 | int rwpath, | ||
129 | struct afs_volume **_volume); | ||
130 | |||
131 | #define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0) | ||
132 | |||
133 | extern void afs_put_volume(struct afs_volume *volume); | ||
134 | |||
135 | extern int afs_volume_pick_fileserver(struct afs_volume *volume, | ||
136 | struct afs_server **_server); | ||
137 | |||
138 | extern int afs_volume_release_fileserver(struct afs_volume *volume, | ||
139 | struct afs_server *server, | ||
140 | int result); | ||
141 | |||
142 | #endif /* _LINUX_AFS_VOLUME_H */ | ||