diff options
author | David Howells <dhowells@redhat.com> | 2007-04-26 18:55:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-04-26 18:55:03 -0400 |
commit | 08e0e7c82eeadec6f4871a386b86bf0f0fbcb4eb (patch) | |
tree | 1c4f7e91e20e56ff2ec755e988a6ee828b1a21c0 | |
parent | 651350d10f93bed7003c9a66e24cf25e0f8eed3d (diff) |
[AF_RXRPC]: Make the in-kernel AFS filesystem use AF_RXRPC.
Make the in-kernel AFS filesystem use AF_RXRPC instead of the old RxRPC code.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | fs/Kconfig | 13 | ||||
-rw-r--r-- | fs/afs/Makefile | 3 | ||||
-rw-r--r-- | fs/afs/afs.h (renamed from fs/afs/types.h) | 22 | ||||
-rw-r--r-- | fs/afs/afs_cm.h | 28 | ||||
-rw-r--r-- | fs/afs/afs_fs.h (renamed from fs/afs/errors.h) | 31 | ||||
-rw-r--r-- | fs/afs/afs_vl.h (renamed from fs/afs/vlclient.h) | 41 | ||||
-rw-r--r-- | fs/afs/cache.c | 256 | ||||
-rw-r--r-- | fs/afs/callback.c | 469 | ||||
-rw-r--r-- | fs/afs/cell.c | 344 | ||||
-rw-r--r-- | fs/afs/cell.h | 70 | ||||
-rw-r--r-- | fs/afs/cmservice.c | 781 | ||||
-rw-r--r-- | fs/afs/cmservice.h | 28 | ||||
-rw-r--r-- | fs/afs/dir.c | 286 | ||||
-rw-r--r-- | fs/afs/file.c | 39 | ||||
-rw-r--r-- | fs/afs/fsclient.c | 1042 | ||||
-rw-r--r-- | fs/afs/fsclient.h | 54 | ||||
-rw-r--r-- | fs/afs/inode.c | 107 | ||||
-rw-r--r-- | fs/afs/internal.h | 599 | ||||
-rw-r--r-- | fs/afs/kafsasyncd.c | 247 | ||||
-rw-r--r-- | fs/afs/kafsasyncd.h | 50 | ||||
-rw-r--r-- | fs/afs/kafstimod.c | 194 | ||||
-rw-r--r-- | fs/afs/kafstimod.h | 45 | ||||
-rw-r--r-- | fs/afs/main.c | 135 | ||||
-rw-r--r-- | fs/afs/misc.c | 11 | ||||
-rw-r--r-- | fs/afs/mntpt.c | 106 | ||||
-rw-r--r-- | fs/afs/mount.h | 23 | ||||
-rw-r--r-- | fs/afs/proc.c | 73 | ||||
-rw-r--r-- | fs/afs/rxrpc.c | 666 | ||||
-rw-r--r-- | fs/afs/server.c | 624 | ||||
-rw-r--r-- | fs/afs/server.h | 97 | ||||
-rw-r--r-- | fs/afs/super.c | 106 | ||||
-rw-r--r-- | fs/afs/super.h | 39 | ||||
-rw-r--r-- | fs/afs/transport.h | 21 | ||||
-rw-r--r-- | fs/afs/vlclient.c | 709 | ||||
-rw-r--r-- | fs/afs/vlocation.c | 1153 | ||||
-rw-r--r-- | fs/afs/vnode.c | 388 | ||||
-rw-r--r-- | fs/afs/vnode.h | 84 | ||||
-rw-r--r-- | fs/afs/volume.c | 141 | ||||
-rw-r--r-- | fs/afs/volume.h | 126 |
39 files changed, 4011 insertions, 5240 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index 3c4886b849f..075c9997ddc 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -2019,7 +2019,7 @@ config CODA_FS_OLD_API | |||
2019 | config AFS_FS | 2019 | config AFS_FS |
2020 | tristate "Andrew File System support (AFS) (EXPERIMENTAL)" | 2020 | tristate "Andrew File System support (AFS) (EXPERIMENTAL)" |
2021 | depends on INET && EXPERIMENTAL | 2021 | depends on INET && EXPERIMENTAL |
2022 | select RXRPC | 2022 | select AF_RXRPC |
2023 | help | 2023 | help |
2024 | If you say Y here, you will get an experimental Andrew File System | 2024 | If you say Y here, you will get an experimental Andrew File System |
2025 | driver. It currently only supports unsecured read-only AFS access. | 2025 | driver. It currently only supports unsecured read-only AFS access. |
@@ -2028,6 +2028,17 @@ config AFS_FS | |||
2028 | 2028 | ||
2029 | If unsure, say N. | 2029 | If unsure, say N. |
2030 | 2030 | ||
2031 | config AFS_DEBUG | ||
2032 | bool "AFS dynamic debugging" | ||
2033 | depends on AFS_FS | ||
2034 | help | ||
2035 | Say Y here to make runtime controllable debugging messages appear. | ||
2036 | |||
2037 | See <file:Documentation/filesystems/afs.txt> for more information. | ||
2038 | |||
2039 | If unsure, say N. | ||
2040 | |||
2041 | |||
2031 | config RXRPC | 2042 | config RXRPC |
2032 | tristate | 2043 | tristate |
2033 | 2044 | ||
diff --git a/fs/afs/Makefile b/fs/afs/Makefile index 8e719737967..66bdc219ccd 100644 --- a/fs/afs/Makefile +++ b/fs/afs/Makefile | |||
@@ -10,12 +10,11 @@ kafs-objs := \ | |||
10 | file.o \ | 10 | file.o \ |
11 | fsclient.o \ | 11 | fsclient.o \ |
12 | inode.o \ | 12 | inode.o \ |
13 | kafsasyncd.o \ | ||
14 | kafstimod.o \ | ||
15 | main.o \ | 13 | main.o \ |
16 | misc.o \ | 14 | misc.o \ |
17 | mntpt.o \ | 15 | mntpt.o \ |
18 | proc.o \ | 16 | proc.o \ |
17 | rxrpc.o \ | ||
19 | server.o \ | 18 | server.o \ |
20 | super.o \ | 19 | super.o \ |
21 | vlclient.o \ | 20 | vlclient.o \ |
diff --git a/fs/afs/types.h b/fs/afs/afs.h index db2b5dc9ff4..b9d2d2ceaf4 100644 --- a/fs/afs/types.h +++ b/fs/afs/afs.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS types | 1 | /* AFS common types |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -9,10 +9,10 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef AFS_TYPES_H | 12 | #ifndef AFS_H |
13 | #define AFS_TYPES_H | 13 | #define AFS_H |
14 | 14 | ||
15 | #include <rxrpc/types.h> | 15 | #include <linux/in.h> |
16 | 16 | ||
17 | typedef unsigned afs_volid_t; | 17 | typedef unsigned afs_volid_t; |
18 | typedef unsigned afs_vnodeid_t; | 18 | typedef unsigned afs_vnodeid_t; |
@@ -31,9 +31,6 @@ typedef enum { | |||
31 | AFS_FTYPE_SYMLINK = 3, | 31 | AFS_FTYPE_SYMLINK = 3, |
32 | } afs_file_type_t; | 32 | } afs_file_type_t; |
33 | 33 | ||
34 | struct afs_cell; | ||
35 | struct afs_vnode; | ||
36 | |||
37 | /* | 34 | /* |
38 | * AFS file identifier | 35 | * AFS file identifier |
39 | */ | 36 | */ |
@@ -54,14 +51,13 @@ typedef enum { | |||
54 | } afs_callback_type_t; | 51 | } afs_callback_type_t; |
55 | 52 | ||
56 | struct afs_callback { | 53 | struct afs_callback { |
57 | struct afs_server *server; /* server that made the promise */ | ||
58 | struct afs_fid fid; /* file identifier */ | 54 | struct afs_fid fid; /* file identifier */ |
59 | unsigned version; /* callback version */ | 55 | unsigned version; /* callback version */ |
60 | unsigned expiry; /* time at which expires */ | 56 | unsigned expiry; /* time at which expires */ |
61 | afs_callback_type_t type; /* type of callback */ | 57 | afs_callback_type_t type; /* type of callback */ |
62 | }; | 58 | }; |
63 | 59 | ||
64 | #define AFSCBMAX 50 | 60 | #define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */ |
65 | 61 | ||
66 | /* | 62 | /* |
67 | * AFS volume information | 63 | * AFS volume information |
@@ -70,7 +66,7 @@ struct afs_volume_info { | |||
70 | afs_volid_t vid; /* volume ID */ | 66 | afs_volid_t vid; /* volume ID */ |
71 | afs_voltype_t type; /* type of this volume */ | 67 | afs_voltype_t type; /* type of this volume */ |
72 | afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */ | 68 | afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */ |
73 | 69 | ||
74 | /* list of fileservers serving this volume */ | 70 | /* list of fileservers serving this volume */ |
75 | size_t nservers; /* number of entries used in servers[] */ | 71 | size_t nservers; /* number of entries used in servers[] */ |
76 | struct { | 72 | struct { |
@@ -88,7 +84,7 @@ struct afs_file_status { | |||
88 | afs_file_type_t type; /* file type */ | 84 | afs_file_type_t type; /* file type */ |
89 | unsigned nlink; /* link count */ | 85 | unsigned nlink; /* link count */ |
90 | size_t size; /* file size */ | 86 | size_t size; /* file size */ |
91 | afs_dataversion_t version; /* current data version */ | 87 | afs_dataversion_t data_version; /* current data version */ |
92 | unsigned author; /* author ID */ | 88 | unsigned author; /* author ID */ |
93 | unsigned owner; /* owner ID */ | 89 | unsigned owner; /* owner ID */ |
94 | unsigned caller_access; /* access rights for authenticated caller */ | 90 | unsigned caller_access; /* access rights for authenticated caller */ |
@@ -106,4 +102,4 @@ struct afs_volsync { | |||
106 | time_t creation; /* volume creation time */ | 102 | time_t creation; /* volume creation time */ |
107 | }; | 103 | }; |
108 | 104 | ||
109 | #endif /* AFS_TYPES_H */ | 105 | #endif /* AFS_H */ |
diff --git a/fs/afs/afs_cm.h b/fs/afs/afs_cm.h new file mode 100644 index 00000000000..7c8e3d43c8e --- /dev/null +++ b/fs/afs/afs_cm.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* AFS Cache Manager definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_CM_H | ||
13 | #define AFS_CM_H | ||
14 | |||
15 | #define AFS_CM_PORT 7001 /* AFS file server port */ | ||
16 | #define CM_SERVICE 1 /* AFS File Service ID */ | ||
17 | |||
18 | enum AFS_CM_Operations { | ||
19 | CBCallBack = 204, /* break callback promises */ | ||
20 | CBInitCallBackState = 205, /* initialise callback state */ | ||
21 | CBProbe = 206, /* probe client */ | ||
22 | CBGetLock = 207, /* get contents of CM lock table */ | ||
23 | CBGetCE = 208, /* get cache file description */ | ||
24 | CBGetXStatsVersion = 209, /* get version of extended statistics */ | ||
25 | CBGetXStats = 210, /* get contents of extended statistics data */ | ||
26 | }; | ||
27 | |||
28 | #endif /* AFS_FS_H */ | ||
diff --git a/fs/afs/errors.h b/fs/afs/afs_fs.h index bcc0a3309e7..fd385954f21 100644 --- a/fs/afs/errors.h +++ b/fs/afs/afs_fs.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS abort/error codes | 1 | /* AFS File Service definitions |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -9,15 +9,22 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef AFS_ERRORS_H | 12 | #ifndef AFS_FS_H |
13 | #define AFS_ERRORS_H | 13 | #define AFS_FS_H |
14 | 14 | ||
15 | #include "types.h" | 15 | #define AFS_FS_PORT 7000 /* AFS file server port */ |
16 | #define FS_SERVICE 1 /* AFS File Service ID */ | ||
16 | 17 | ||
17 | /* | 18 | enum AFS_FS_Operations { |
18 | * file server abort codes | 19 | FSFETCHSTATUS = 132, /* AFS Fetch file status */ |
19 | */ | 20 | FSFETCHDATA = 130, /* AFS Fetch file data */ |
20 | typedef enum { | 21 | FSGIVEUPCALLBACKS = 147, /* AFS Discard callback promises */ |
22 | FSGETVOLUMEINFO = 148, /* AFS Get root volume information */ | ||
23 | FSGETROOTVOLUME = 151, /* AFS Get root volume name */ | ||
24 | FSLOOKUP = 161 /* AFS lookup file in directory */ | ||
25 | }; | ||
26 | |||
27 | enum AFS_FS_Errors { | ||
21 | VSALVAGE = 101, /* volume needs salvaging */ | 28 | VSALVAGE = 101, /* volume needs salvaging */ |
22 | VNOVNODE = 102, /* no such file/dir (vnode) */ | 29 | VNOVNODE = 102, /* no such file/dir (vnode) */ |
23 | VNOVOL = 103, /* no such volume or volume unavailable */ | 30 | VNOVOL = 103, /* no such volume or volume unavailable */ |
@@ -29,8 +36,6 @@ typedef enum { | |||
29 | VOVERQUOTA = 109, /* volume's maximum quota exceeded */ | 36 | VOVERQUOTA = 109, /* volume's maximum quota exceeded */ |
30 | VBUSY = 110, /* volume is temporarily unavailable */ | 37 | VBUSY = 110, /* volume is temporarily unavailable */ |
31 | VMOVED = 111, /* volume moved to new server - ask this FS where */ | 38 | VMOVED = 111, /* volume moved to new server - ask this FS where */ |
32 | } afs_rxfs_abort_t; | 39 | }; |
33 | |||
34 | extern int afs_abort_to_error(int); | ||
35 | 40 | ||
36 | #endif /* AFS_ERRORS_H */ | 41 | #endif /* AFS_FS_H */ |
diff --git a/fs/afs/vlclient.h b/fs/afs/afs_vl.h index 11dc10fe300..8bbefe009ed 100644 --- a/fs/afs/vlclient.h +++ b/fs/afs/afs_vl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Volume Location Service client interface | 1 | /* AFS Volume Location Service client interface |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -9,10 +9,19 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef AFS_VLCLIENT_H | 12 | #ifndef AFS_VL_H |
13 | #define AFS_VLCLIENT_H | 13 | #define AFS_VL_H |
14 | 14 | ||
15 | #include "types.h" | 15 | #include "afs.h" |
16 | |||
17 | #define AFS_VL_PORT 7003 /* volume location service port */ | ||
18 | #define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */ | ||
19 | |||
20 | enum AFSVL_Operations { | ||
21 | VLGETENTRYBYID = 503, /* AFS Get Cache Entry By ID operation ID */ | ||
22 | VLGETENTRYBYNAME = 504, /* AFS Get Cache Entry By Name operation ID */ | ||
23 | VLPROBE = 514, /* AFS Probe Volume Location Service operation ID */ | ||
24 | }; | ||
16 | 25 | ||
17 | enum AFSVL_Errors { | 26 | enum AFSVL_Errors { |
18 | AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */ | 27 | AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */ |
@@ -40,14 +49,16 @@ enum AFSVL_Errors { | |||
40 | AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ | 49 | AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ |
41 | AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ | 50 | AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ |
42 | AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ | 51 | AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ |
43 | AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ | 52 | AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ |
44 | AFSVL_PERM = 363546, /* No permission access */ | 53 | AFSVL_PERM = 363546, /* No permission access */ |
45 | AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ | 54 | AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ |
46 | }; | 55 | }; |
47 | 56 | ||
48 | /* maps to "struct vldbentry" in vvl-spec.pdf */ | 57 | /* |
58 | * maps to "struct vldbentry" in vvl-spec.pdf | ||
59 | */ | ||
49 | struct afs_vldbentry { | 60 | struct afs_vldbentry { |
50 | char name[65]; /* name of volume (including NUL char) */ | 61 | char name[65]; /* name of volume (with NUL char) */ |
51 | afs_voltype_t type; /* volume type */ | 62 | afs_voltype_t type; /* volume type */ |
52 | unsigned num_servers; /* num servers that hold instances of this vol */ | 63 | unsigned num_servers; /* num servers that hold instances of this vol */ |
53 | unsigned clone_id; /* cloning ID */ | 64 | unsigned clone_id; /* cloning ID */ |
@@ -70,16 +81,4 @@ struct afs_vldbentry { | |||
70 | } servers[8]; | 81 | } servers[8]; |
71 | }; | 82 | }; |
72 | 83 | ||
73 | extern int afs_rxvl_get_entry_by_name(struct afs_server *, const char *, | 84 | #endif /* AFS_VL_H */ |
74 | unsigned, struct afs_cache_vlocation *); | ||
75 | extern int afs_rxvl_get_entry_by_id(struct afs_server *, afs_volid_t, | ||
76 | afs_voltype_t, | ||
77 | struct afs_cache_vlocation *); | ||
78 | |||
79 | extern int afs_rxvl_get_entry_by_id_async(struct afs_async_op *, | ||
80 | afs_volid_t, afs_voltype_t); | ||
81 | |||
82 | extern int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *, | ||
83 | struct afs_cache_vlocation *); | ||
84 | |||
85 | #endif /* AFS_VLCLIENT_H */ | ||
diff --git a/fs/afs/cache.c b/fs/afs/cache.c new file mode 100644 index 00000000000..de0d7de69ed --- /dev/null +++ b/fs/afs/cache.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* AFS caching stuff | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifdef AFS_CACHING_SUPPORT | ||
13 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
14 | const void *entry); | ||
15 | static void afs_cell_cache_update(void *source, void *entry); | ||
16 | |||
17 | struct cachefs_index_def afs_cache_cell_index_def = { | ||
18 | .name = "cell_ix", | ||
19 | .data_size = sizeof(struct afs_cache_cell), | ||
20 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
21 | .match = afs_cell_cache_match, | ||
22 | .update = afs_cell_cache_update, | ||
23 | }; | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * match a cell record obtained from the cache | ||
28 | */ | ||
29 | #ifdef AFS_CACHING_SUPPORT | ||
30 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
31 | const void *entry) | ||
32 | { | ||
33 | const struct afs_cache_cell *ccell = entry; | ||
34 | struct afs_cell *cell = target; | ||
35 | |||
36 | _enter("{%s},{%s}", ccell->name, cell->name); | ||
37 | |||
38 | if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) { | ||
39 | _leave(" = SUCCESS"); | ||
40 | return CACHEFS_MATCH_SUCCESS; | ||
41 | } | ||
42 | |||
43 | _leave(" = FAILED"); | ||
44 | return CACHEFS_MATCH_FAILED; | ||
45 | } | ||
46 | #endif | ||
47 | |||
48 | /* | ||
49 | * update a cell record in the cache | ||
50 | */ | ||
51 | #ifdef AFS_CACHING_SUPPORT | ||
52 | static void afs_cell_cache_update(void *source, void *entry) | ||
53 | { | ||
54 | struct afs_cache_cell *ccell = entry; | ||
55 | struct afs_cell *cell = source; | ||
56 | |||
57 | _enter("%p,%p", source, entry); | ||
58 | |||
59 | strncpy(ccell->name, cell->name, sizeof(ccell->name)); | ||
60 | |||
61 | memcpy(ccell->vl_servers, | ||
62 | cell->vl_addrs, | ||
63 | min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs))); | ||
64 | |||
65 | } | ||
66 | #endif | ||
67 | |||
68 | #ifdef AFS_CACHING_SUPPORT | ||
69 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
70 | const void *entry); | ||
71 | static void afs_vlocation_cache_update(void *source, void *entry); | ||
72 | |||
73 | struct cachefs_index_def afs_vlocation_cache_index_def = { | ||
74 | .name = "vldb", | ||
75 | .data_size = sizeof(struct afs_cache_vlocation), | ||
76 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
77 | .match = afs_vlocation_cache_match, | ||
78 | .update = afs_vlocation_cache_update, | ||
79 | }; | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * match a VLDB record stored in the cache | ||
84 | * - may also load target from entry | ||
85 | */ | ||
86 | #ifdef AFS_CACHING_SUPPORT | ||
87 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
88 | const void *entry) | ||
89 | { | ||
90 | const struct afs_cache_vlocation *vldb = entry; | ||
91 | struct afs_vlocation *vlocation = target; | ||
92 | |||
93 | _enter("{%s},{%s}", vlocation->vldb.name, vldb->name); | ||
94 | |||
95 | if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0 | ||
96 | ) { | ||
97 | if (!vlocation->valid || | ||
98 | vlocation->vldb.rtime == vldb->rtime | ||
99 | ) { | ||
100 | vlocation->vldb = *vldb; | ||
101 | vlocation->valid = 1; | ||
102 | _leave(" = SUCCESS [c->m]"); | ||
103 | return CACHEFS_MATCH_SUCCESS; | ||
104 | } else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) { | ||
105 | /* delete if VIDs for this name differ */ | ||
106 | if (memcmp(&vlocation->vldb.vid, | ||
107 | &vldb->vid, | ||
108 | sizeof(vldb->vid)) != 0) { | ||
109 | _leave(" = DELETE"); | ||
110 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
111 | } | ||
112 | |||
113 | _leave(" = UPDATE"); | ||
114 | return CACHEFS_MATCH_SUCCESS_UPDATE; | ||
115 | } else { | ||
116 | _leave(" = SUCCESS"); | ||
117 | return CACHEFS_MATCH_SUCCESS; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | _leave(" = FAILED"); | ||
122 | return CACHEFS_MATCH_FAILED; | ||
123 | } | ||
124 | #endif | ||
125 | |||
126 | /* | ||
127 | * update a VLDB record stored in the cache | ||
128 | */ | ||
129 | #ifdef AFS_CACHING_SUPPORT | ||
130 | static void afs_vlocation_cache_update(void *source, void *entry) | ||
131 | { | ||
132 | struct afs_cache_vlocation *vldb = entry; | ||
133 | struct afs_vlocation *vlocation = source; | ||
134 | |||
135 | _enter(""); | ||
136 | |||
137 | *vldb = vlocation->vldb; | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | #ifdef AFS_CACHING_SUPPORT | ||
142 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
143 | const void *entry); | ||
144 | static void afs_volume_cache_update(void *source, void *entry); | ||
145 | |||
146 | struct cachefs_index_def afs_volume_cache_index_def = { | ||
147 | .name = "volume", | ||
148 | .data_size = sizeof(struct afs_cache_vhash), | ||
149 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
150 | .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
151 | .match = afs_volume_cache_match, | ||
152 | .update = afs_volume_cache_update, | ||
153 | }; | ||
154 | #endif | ||
155 | |||
156 | /* | ||
157 | * match a volume hash record stored in the cache | ||
158 | */ | ||
159 | #ifdef AFS_CACHING_SUPPORT | ||
160 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
161 | const void *entry) | ||
162 | { | ||
163 | const struct afs_cache_vhash *vhash = entry; | ||
164 | struct afs_volume *volume = target; | ||
165 | |||
166 | _enter("{%u},{%u}", volume->type, vhash->vtype); | ||
167 | |||
168 | if (volume->type == vhash->vtype) { | ||
169 | _leave(" = SUCCESS"); | ||
170 | return CACHEFS_MATCH_SUCCESS; | ||
171 | } | ||
172 | |||
173 | _leave(" = FAILED"); | ||
174 | return CACHEFS_MATCH_FAILED; | ||
175 | } | ||
176 | #endif | ||
177 | |||
178 | /* | ||
179 | * update a volume hash record stored in the cache | ||
180 | */ | ||
181 | #ifdef AFS_CACHING_SUPPORT | ||
182 | static void afs_volume_cache_update(void *source, void *entry) | ||
183 | { | ||
184 | struct afs_cache_vhash *vhash = entry; | ||
185 | struct afs_volume *volume = source; | ||
186 | |||
187 | _enter(""); | ||
188 | |||
189 | vhash->vtype = volume->type; | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | #ifdef AFS_CACHING_SUPPORT | ||
194 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | ||
195 | const void *entry); | ||
196 | static void afs_vnode_cache_update(void *source, void *entry); | ||
197 | |||
198 | struct cachefs_index_def afs_vnode_cache_index_def = { | ||
199 | .name = "vnode", | ||
200 | .data_size = sizeof(struct afs_cache_vnode), | ||
201 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 }, | ||
202 | .match = afs_vnode_cache_match, | ||
203 | .update = afs_vnode_cache_update, | ||
204 | }; | ||
205 | #endif | ||
206 | |||
207 | /* | ||
208 | * match a vnode record stored in the cache | ||
209 | */ | ||
210 | #ifdef AFS_CACHING_SUPPORT | ||
211 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | ||
212 | const void *entry) | ||
213 | { | ||
214 | const struct afs_cache_vnode *cvnode = entry; | ||
215 | struct afs_vnode *vnode = target; | ||
216 | |||
217 | _enter("{%x,%x,%Lx},{%x,%x,%Lx}", | ||
218 | vnode->fid.vnode, | ||
219 | vnode->fid.unique, | ||
220 | vnode->status.version, | ||
221 | cvnode->vnode_id, | ||
222 | cvnode->vnode_unique, | ||
223 | cvnode->data_version); | ||
224 | |||
225 | if (vnode->fid.vnode != cvnode->vnode_id) { | ||
226 | _leave(" = FAILED"); | ||
227 | return CACHEFS_MATCH_FAILED; | ||
228 | } | ||
229 | |||
230 | if (vnode->fid.unique != cvnode->vnode_unique || | ||
231 | vnode->status.version != cvnode->data_version) { | ||
232 | _leave(" = DELETE"); | ||
233 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
234 | } | ||
235 | |||
236 | _leave(" = SUCCESS"); | ||
237 | return CACHEFS_MATCH_SUCCESS; | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | /* | ||
242 | * update a vnode record stored in the cache | ||
243 | */ | ||
244 | #ifdef AFS_CACHING_SUPPORT | ||
245 | static void afs_vnode_cache_update(void *source, void *entry) | ||
246 | { | ||
247 | struct afs_cache_vnode *cvnode = entry; | ||
248 | struct afs_vnode *vnode = source; | ||
249 | |||
250 | _enter(""); | ||
251 | |||
252 | cvnode->vnode_id = vnode->fid.vnode; | ||
253 | cvnode->vnode_unique = vnode->fid.unique; | ||
254 | cvnode->data_version = vnode->status.version; | ||
255 | } | ||
256 | #endif | ||
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 26a48fea42f..61121554714 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | 2 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This software may be freely redistributed under the terms of the | 4 | * This software may be freely redistributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
@@ -16,83 +16,182 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include "server.h" | 19 | #include <linux/circ_buf.h> |
20 | #include "vnode.h" | ||
21 | #include "internal.h" | 20 | #include "internal.h" |
22 | #include "cmservice.h" | 21 | |
22 | unsigned afs_vnode_update_timeout = 10; | ||
23 | |||
24 | #define afs_breakring_space(server) \ | ||
25 | CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ | ||
26 | ARRAY_SIZE((server)->cb_break)) | ||
27 | |||
28 | //static void afs_callback_updater(struct work_struct *); | ||
29 | |||
30 | static struct workqueue_struct *afs_callback_update_worker; | ||
23 | 31 | ||
24 | /* | 32 | /* |
25 | * allow the fileserver to request callback state (re-)initialisation | 33 | * allow the fileserver to request callback state (re-)initialisation |
26 | */ | 34 | */ |
27 | int SRXAFSCM_InitCallBackState(struct afs_server *server) | 35 | void afs_init_callback_state(struct afs_server *server) |
28 | { | 36 | { |
29 | struct list_head callbacks; | 37 | struct afs_vnode *vnode; |
30 | 38 | ||
31 | _enter("%p", server); | 39 | _enter("{%p}", server); |
32 | 40 | ||
33 | INIT_LIST_HEAD(&callbacks); | ||
34 | |||
35 | /* transfer the callback list from the server to a temp holding area */ | ||
36 | spin_lock(&server->cb_lock); | 41 | spin_lock(&server->cb_lock); |
37 | 42 | ||
38 | list_add(&callbacks, &server->cb_promises); | 43 | /* kill all the promises on record from this server */ |
39 | list_del_init(&server->cb_promises); | 44 | while (!RB_EMPTY_ROOT(&server->cb_promises)) { |
45 | vnode = rb_entry(server->cb_promises.rb_node, | ||
46 | struct afs_vnode, cb_promise); | ||
47 | printk("\nUNPROMISE on %p\n", vnode); | ||
48 | rb_erase(&vnode->cb_promise, &server->cb_promises); | ||
49 | vnode->cb_promised = false; | ||
50 | } | ||
40 | 51 | ||
41 | /* munch our way through the list, grabbing the inode, dropping all the | 52 | spin_unlock(&server->cb_lock); |
42 | * locks and regetting them in the right order | 53 | _leave(""); |
43 | */ | 54 | } |
44 | while (!list_empty(&callbacks)) { | ||
45 | struct afs_vnode *vnode; | ||
46 | struct inode *inode; | ||
47 | 55 | ||
48 | vnode = list_entry(callbacks.next, struct afs_vnode, cb_link); | 56 | /* |
49 | list_del_init(&vnode->cb_link); | 57 | * handle the data invalidation side of a callback being broken |
58 | */ | ||
59 | void afs_broken_callback_work(struct work_struct *work) | ||
60 | { | ||
61 | struct afs_vnode *vnode = | ||
62 | container_of(work, struct afs_vnode, cb_broken_work); | ||
50 | 63 | ||
51 | /* try and grab the inode - may fail */ | 64 | _enter(""); |
52 | inode = igrab(AFS_VNODE_TO_I(vnode)); | ||
53 | if (inode) { | ||
54 | int release = 0; | ||
55 | 65 | ||
56 | spin_unlock(&server->cb_lock); | 66 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
57 | spin_lock(&vnode->lock); | 67 | return; |
58 | 68 | ||
59 | if (vnode->cb_server == server) { | 69 | /* we're only interested in dealing with a broken callback on *this* |
60 | vnode->cb_server = NULL; | 70 | * vnode and only if no-one else has dealt with it yet */ |
61 | afs_kafstimod_del_timer(&vnode->cb_timeout); | 71 | if (!mutex_trylock(&vnode->cb_broken_lock)) |
62 | spin_lock(&afs_cb_hash_lock); | 72 | return; /* someone else is dealing with it */ |
63 | list_del_init(&vnode->cb_hash_link); | ||
64 | spin_unlock(&afs_cb_hash_lock); | ||
65 | release = 1; | ||
66 | } | ||
67 | 73 | ||
68 | spin_unlock(&vnode->lock); | 74 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { |
75 | if (afs_vnode_fetch_status(vnode) < 0) | ||
76 | goto out; | ||
69 | 77 | ||
70 | iput(inode); | 78 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
71 | afs_put_server(server); | 79 | goto out; |
72 | 80 | ||
73 | spin_lock(&server->cb_lock); | 81 | /* if the vnode's data version number changed then its contents |
82 | * are different */ | ||
83 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { | ||
84 | _debug("zap data"); | ||
85 | invalidate_remote_inode(&vnode->vfs_inode); | ||
74 | } | 86 | } |
75 | } | 87 | } |
76 | 88 | ||
77 | spin_unlock(&server->cb_lock); | 89 | out: |
90 | mutex_unlock(&vnode->cb_broken_lock); | ||
78 | 91 | ||
79 | _leave(" = 0"); | 92 | /* avoid the potential race whereby the mutex_trylock() in this |
80 | return 0; | 93 | * function happens again between the clear_bit() and the |
94 | * mutex_unlock() */ | ||
95 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { | ||
96 | _debug("requeue"); | ||
97 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); | ||
98 | } | ||
99 | _leave(""); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * actually break a callback | ||
104 | */ | ||
105 | static void afs_break_callback(struct afs_server *server, | ||
106 | struct afs_vnode *vnode) | ||
107 | { | ||
108 | _enter(""); | ||
109 | |||
110 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); | ||
111 | |||
112 | if (vnode->cb_promised) { | ||
113 | spin_lock(&vnode->lock); | ||
114 | |||
115 | _debug("break callback"); | ||
116 | |||
117 | spin_lock(&server->cb_lock); | ||
118 | if (vnode->cb_promised) { | ||
119 | rb_erase(&vnode->cb_promise, &server->cb_promises); | ||
120 | vnode->cb_promised = false; | ||
121 | } | ||
122 | spin_unlock(&server->cb_lock); | ||
123 | |||
124 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); | ||
125 | spin_unlock(&vnode->lock); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * allow the fileserver to explicitly break one callback | ||
131 | * - happens when | ||
132 | * - the backing file is changed | ||
133 | * - a lock is released | ||
134 | */ | ||
135 | static void afs_break_one_callback(struct afs_server *server, | ||
136 | struct afs_fid *fid) | ||
137 | { | ||
138 | struct afs_vnode *vnode; | ||
139 | struct rb_node *p; | ||
140 | |||
141 | _debug("find"); | ||
142 | spin_lock(&server->fs_lock); | ||
143 | p = server->fs_vnodes.rb_node; | ||
144 | while (p) { | ||
145 | vnode = rb_entry(p, struct afs_vnode, server_rb); | ||
146 | if (fid->vid < vnode->fid.vid) | ||
147 | p = p->rb_left; | ||
148 | else if (fid->vid > vnode->fid.vid) | ||
149 | p = p->rb_right; | ||
150 | else if (fid->vnode < vnode->fid.vnode) | ||
151 | p = p->rb_left; | ||
152 | else if (fid->vnode > vnode->fid.vnode) | ||
153 | p = p->rb_right; | ||
154 | else if (fid->unique < vnode->fid.unique) | ||
155 | p = p->rb_left; | ||
156 | else if (fid->unique > vnode->fid.unique) | ||
157 | p = p->rb_right; | ||
158 | else | ||
159 | goto found; | ||
160 | } | ||
161 | |||
162 | /* not found so we just ignore it (it may have moved to another | ||
163 | * server) */ | ||
164 | not_available: | ||
165 | _debug("not avail"); | ||
166 | spin_unlock(&server->fs_lock); | ||
167 | _leave(""); | ||
168 | return; | ||
169 | |||
170 | found: | ||
171 | _debug("found"); | ||
172 | ASSERTCMP(server, ==, vnode->server); | ||
173 | |||
174 | if (!igrab(AFS_VNODE_TO_I(vnode))) | ||
175 | goto not_available; | ||
176 | spin_unlock(&server->fs_lock); | ||
177 | |||
178 | afs_break_callback(server, vnode); | ||
179 | iput(&vnode->vfs_inode); | ||
180 | _leave(""); | ||
81 | } | 181 | } |
82 | 182 | ||
83 | /* | 183 | /* |
84 | * allow the fileserver to break callback promises | 184 | * allow the fileserver to break callback promises |
85 | */ | 185 | */ |
86 | int SRXAFSCM_CallBack(struct afs_server *server, size_t count, | 186 | void afs_break_callbacks(struct afs_server *server, size_t count, |
87 | struct afs_callback callbacks[]) | 187 | struct afs_callback callbacks[]) |
88 | { | 188 | { |
89 | _enter("%p,%u,", server, count); | 189 | _enter("%p,%zu,", server, count); |
90 | 190 | ||
91 | for (; count > 0; callbacks++, count--) { | 191 | ASSERT(server != NULL); |
92 | struct afs_vnode *vnode = NULL; | 192 | ASSERTCMP(count, <=, AFSCBMAX); |
93 | struct inode *inode = NULL; | ||
94 | int valid = 0; | ||
95 | 193 | ||
194 | for (; count > 0; callbacks++, count--) { | ||
96 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", | 195 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", |
97 | callbacks->fid.vid, | 196 | callbacks->fid.vid, |
98 | callbacks->fid.vnode, | 197 | callbacks->fid.vnode, |
@@ -101,66 +200,244 @@ int SRXAFSCM_CallBack(struct afs_server *server, size_t count, | |||
101 | callbacks->expiry, | 200 | callbacks->expiry, |
102 | callbacks->type | 201 | callbacks->type |
103 | ); | 202 | ); |
203 | afs_break_one_callback(server, &callbacks->fid); | ||
204 | } | ||
205 | |||
206 | _leave(""); | ||
207 | return; | ||
208 | } | ||
104 | 209 | ||
105 | /* find the inode for this fid */ | 210 | /* |
106 | spin_lock(&afs_cb_hash_lock); | 211 | * record the callback for breaking |
212 | * - the caller must hold server->cb_lock | ||
213 | */ | ||
214 | static void afs_do_give_up_callback(struct afs_server *server, | ||
215 | struct afs_vnode *vnode) | ||
216 | { | ||
217 | struct afs_callback *cb; | ||
107 | 218 | ||
108 | list_for_each_entry(vnode, | 219 | _enter("%p,%p", server, vnode); |
109 | &afs_cb_hash(server, &callbacks->fid), | ||
110 | cb_hash_link) { | ||
111 | if (memcmp(&vnode->fid, &callbacks->fid, | ||
112 | sizeof(struct afs_fid)) != 0) | ||
113 | continue; | ||
114 | 220 | ||
115 | /* right vnode, but is it same server? */ | 221 | cb = &server->cb_break[server->cb_break_head]; |
116 | if (vnode->cb_server != server) | 222 | cb->fid = vnode->fid; |
117 | break; /* no */ | 223 | cb->version = vnode->cb_version; |
224 | cb->expiry = vnode->cb_expiry; | ||
225 | cb->type = vnode->cb_type; | ||
226 | smp_wmb(); | ||
227 | server->cb_break_head = | ||
228 | (server->cb_break_head + 1) & | ||
229 | (ARRAY_SIZE(server->cb_break) - 1); | ||
118 | 230 | ||
119 | /* try and nail the inode down */ | 231 | /* defer the breaking of callbacks to try and collect as many as |
120 | inode = igrab(AFS_VNODE_TO_I(vnode)); | 232 | * possible to ship in one operation */ |
121 | break; | 233 | switch (atomic_inc_return(&server->cb_break_n)) { |
234 | case 1 ... AFSCBMAX - 1: | ||
235 | queue_delayed_work(afs_callback_update_worker, | ||
236 | &server->cb_break_work, HZ * 2); | ||
237 | break; | ||
238 | case AFSCBMAX: | ||
239 | afs_flush_callback_breaks(server); | ||
240 | break; | ||
241 | default: | ||
242 | break; | ||
243 | } | ||
244 | |||
245 | ASSERT(server->cb_promises.rb_node != NULL); | ||
246 | rb_erase(&vnode->cb_promise, &server->cb_promises); | ||
247 | vnode->cb_promised = false; | ||
248 | _leave(""); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * give up the callback registered for a vnode on the file server when the | ||
253 | * inode is being cleared | ||
254 | */ | ||
255 | void afs_give_up_callback(struct afs_vnode *vnode) | ||
256 | { | ||
257 | struct afs_server *server = vnode->server; | ||
258 | |||
259 | DECLARE_WAITQUEUE(myself, current); | ||
260 | |||
261 | _enter("%d", vnode->cb_promised); | ||
262 | |||
263 | _debug("GIVE UP INODE %p", &vnode->vfs_inode); | ||
264 | |||
265 | if (!vnode->cb_promised) { | ||
266 | _leave(" [not promised]"); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | ASSERT(server != NULL); | ||
271 | |||
272 | spin_lock(&server->cb_lock); | ||
273 | if (vnode->cb_promised && afs_breakring_space(server) == 0) { | ||
274 | add_wait_queue(&server->cb_break_waitq, &myself); | ||
275 | for (;;) { | ||
276 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
277 | if (!vnode->cb_promised || | ||
278 | afs_breakring_space(server) != 0) | ||
279 | break; | ||
280 | spin_unlock(&server->cb_lock); | ||
281 | schedule(); | ||
282 | spin_lock(&server->cb_lock); | ||
122 | } | 283 | } |
284 | remove_wait_queue(&server->cb_break_waitq, &myself); | ||
285 | __set_current_state(TASK_RUNNING); | ||
286 | } | ||
287 | |||
288 | /* of course, it's always possible for the server to break this vnode's | ||
289 | * callback first... */ | ||
290 | if (vnode->cb_promised) | ||
291 | afs_do_give_up_callback(server, vnode); | ||
292 | |||
293 | spin_unlock(&server->cb_lock); | ||
294 | _leave(""); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * dispatch a deferred give up callbacks operation | ||
299 | */ | ||
300 | void afs_dispatch_give_up_callbacks(struct work_struct *work) | ||
301 | { | ||
302 | struct afs_server *server = | ||
303 | container_of(work, struct afs_server, cb_break_work.work); | ||
304 | |||
305 | _enter(""); | ||
306 | |||
307 | /* tell the fileserver to discard the callback promises it has | ||
308 | * - in the event of ENOMEM or some other error, we just forget that we | ||
309 | * had callbacks entirely, and the server will call us later to break | ||
310 | * them | ||
311 | */ | ||
312 | afs_fs_give_up_callbacks(server, &afs_async_call); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * flush the outstanding callback breaks on a server | ||
317 | */ | ||
318 | void afs_flush_callback_breaks(struct afs_server *server) | ||
319 | { | ||
320 | cancel_delayed_work(&server->cb_break_work); | ||
321 | queue_delayed_work(afs_callback_update_worker, | ||
322 | &server->cb_break_work, 0); | ||
323 | } | ||
324 | |||
325 | #if 0 | ||
326 | /* | ||
327 | * update a bunch of callbacks | ||
328 | */ | ||
329 | static void afs_callback_updater(struct work_struct *work) | ||
330 | { | ||
331 | struct afs_server *server; | ||
332 | struct afs_vnode *vnode, *xvnode; | ||
333 | time_t now; | ||
334 | long timeout; | ||
335 | int ret; | ||
336 | |||
337 | server = container_of(work, struct afs_server, updater); | ||
338 | |||
339 | _enter(""); | ||
123 | 340 | ||
124 | spin_unlock(&afs_cb_hash_lock); | 341 | now = get_seconds(); |
125 | 342 | ||
126 | if (inode) { | 343 | /* find the first vnode to update */ |
127 | /* we've found the record for this vnode */ | 344 | spin_lock(&server->cb_lock); |
128 | spin_lock(&vnode->lock); | 345 | for (;;) { |
129 | if (vnode->cb_server == server) { | 346 | if (RB_EMPTY_ROOT(&server->cb_promises)) { |
130 | /* the callback _is_ on the calling server */ | 347 | spin_unlock(&server->cb_lock); |
131 | vnode->cb_server = NULL; | 348 | _leave(" [nothing]"); |
132 | valid = 1; | 349 | return; |
133 | |||
134 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
135 | vnode->flags |= AFS_VNODE_CHANGED; | ||
136 | |||
137 | spin_lock(&server->cb_lock); | ||
138 | list_del_init(&vnode->cb_link); | ||
139 | spin_unlock(&server->cb_lock); | ||
140 | |||
141 | spin_lock(&afs_cb_hash_lock); | ||
142 | list_del_init(&vnode->cb_hash_link); | ||
143 | spin_unlock(&afs_cb_hash_lock); | ||
144 | } | ||
145 | spin_unlock(&vnode->lock); | ||
146 | |||
147 | if (valid) { | ||
148 | invalidate_remote_inode(inode); | ||
149 | afs_put_server(server); | ||
150 | } | ||
151 | iput(inode); | ||
152 | } | 350 | } |
351 | |||
352 | vnode = rb_entry(rb_first(&server->cb_promises), | ||
353 | struct afs_vnode, cb_promise); | ||
354 | if (atomic_read(&vnode->usage) > 0) | ||
355 | break; | ||
356 | rb_erase(&vnode->cb_promise, &server->cb_promises); | ||
357 | vnode->cb_promised = false; | ||
358 | } | ||
359 | |||
360 | timeout = vnode->update_at - now; | ||
361 | if (timeout > 0) { | ||
362 | queue_delayed_work(afs_vnode_update_worker, | ||
363 | &afs_vnode_update, timeout * HZ); | ||
364 | spin_unlock(&server->cb_lock); | ||
365 | _leave(" [nothing]"); | ||
366 | return; | ||
367 | } | ||
368 | |||
369 | list_del_init(&vnode->update); | ||
370 | atomic_inc(&vnode->usage); | ||
371 | spin_unlock(&server->cb_lock); | ||
372 | |||
373 | /* we can now perform the update */ | ||
374 | _debug("update %s", vnode->vldb.name); | ||
375 | vnode->state = AFS_VL_UPDATING; | ||
376 | vnode->upd_rej_cnt = 0; | ||
377 | vnode->upd_busy_cnt = 0; | ||
378 | |||
379 | ret = afs_vnode_update_record(vl, &vldb); | ||
380 | switch (ret) { | ||
381 | case 0: | ||
382 | afs_vnode_apply_update(vl, &vldb); | ||
383 | vnode->state = AFS_VL_UPDATING; | ||
384 | break; | ||
385 | case -ENOMEDIUM: | ||
386 | vnode->state = AFS_VL_VOLUME_DELETED; | ||
387 | break; | ||
388 | default: | ||
389 | vnode->state = AFS_VL_UNCERTAIN; | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | /* and then reschedule */ | ||
394 | _debug("reschedule"); | ||
395 | vnode->update_at = get_seconds() + afs_vnode_update_timeout; | ||
396 | |||
397 | spin_lock(&server->cb_lock); | ||
398 | |||
399 | if (!list_empty(&server->cb_promises)) { | ||
400 | /* next update in 10 minutes, but wait at least 1 second more | ||
401 | * than the newest record already queued so that we don't spam | ||
402 | * the VL server suddenly with lots of requests | ||
403 | */ | ||
404 | xvnode = list_entry(server->cb_promises.prev, | ||
405 | struct afs_vnode, update); | ||
406 | if (vnode->update_at <= xvnode->update_at) | ||
407 | vnode->update_at = xvnode->update_at + 1; | ||
408 | xvnode = list_entry(server->cb_promises.next, | ||
409 | struct afs_vnode, update); | ||
410 | timeout = xvnode->update_at - now; | ||
411 | if (timeout < 0) | ||
412 | timeout = 0; | ||
413 | } else { | ||
414 | timeout = afs_vnode_update_timeout; | ||
153 | } | 415 | } |
154 | 416 | ||
155 | _leave(" = 0"); | 417 | list_add_tail(&vnode->update, &server->cb_promises); |
156 | return 0; | 418 | |
419 | _debug("timeout %ld", timeout); | ||
420 | queue_delayed_work(afs_vnode_update_worker, | ||
421 | &afs_vnode_update, timeout * HZ); | ||
422 | spin_unlock(&server->cb_lock); | ||
423 | afs_put_vnode(vl); | ||
424 | } | ||
425 | #endif | ||
426 | |||
427 | /* | ||
428 | * initialise the callback update process | ||
429 | */ | ||
430 | int __init afs_callback_update_init(void) | ||
431 | { | ||
432 | afs_callback_update_worker = | ||
433 | create_singlethread_workqueue("kafs_callbackd"); | ||
434 | return afs_callback_update_worker ? 0 : -ENOMEM; | ||
157 | } | 435 | } |
158 | 436 | ||
159 | /* | 437 | /* |
160 | * allow the fileserver to see if the cache manager is still alive | 438 | * shut down the callback update process |
161 | */ | 439 | */ |
162 | int SRXAFSCM_Probe(struct afs_server *server) | 440 | void __exit afs_callback_update_kill(void) |
163 | { | 441 | { |
164 | _debug("SRXAFSCM_Probe(%p)\n", server); | 442 | destroy_workqueue(afs_callback_update_worker); |
165 | return 0; | ||
166 | } | 443 | } |
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 28ed84ec8ff..733c60246ab 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c | |||
@@ -11,15 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <rxrpc/peer.h> | ||
15 | #include <rxrpc/connection.h> | ||
16 | #include "volume.h" | ||
17 | #include "cell.h" | ||
18 | #include "server.h" | ||
19 | #include "transport.h" | ||
20 | #include "vlclient.h" | ||
21 | #include "kafstimod.h" | ||
22 | #include "super.h" | ||
23 | #include "internal.h" | 14 | #include "internal.h" |
24 | 15 | ||
25 | DECLARE_RWSEM(afs_proc_cells_sem); | 16 | DECLARE_RWSEM(afs_proc_cells_sem); |
@@ -28,34 +19,21 @@ LIST_HEAD(afs_proc_cells); | |||
28 | static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells); | 19 | static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells); |
29 | static DEFINE_RWLOCK(afs_cells_lock); | 20 | static DEFINE_RWLOCK(afs_cells_lock); |
30 | static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ | 21 | static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ |
22 | static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq); | ||
31 | static struct afs_cell *afs_cell_root; | 23 | static struct afs_cell *afs_cell_root; |
32 | 24 | ||
33 | #ifdef AFS_CACHING_SUPPORT | ||
34 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
35 | const void *entry); | ||
36 | static void afs_cell_cache_update(void *source, void *entry); | ||
37 | |||
38 | struct cachefs_index_def afs_cache_cell_index_def = { | ||
39 | .name = "cell_ix", | ||
40 | .data_size = sizeof(struct afs_cache_cell), | ||
41 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
42 | .match = afs_cell_cache_match, | ||
43 | .update = afs_cell_cache_update, | ||
44 | }; | ||
45 | #endif | ||
46 | |||
47 | /* | 25 | /* |
48 | * create a cell record | 26 | * create a cell record |
49 | * - "name" is the name of the cell | 27 | * - "name" is the name of the cell |
50 | * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format | 28 | * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format |
51 | */ | 29 | */ |
52 | int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) | 30 | struct afs_cell *afs_cell_create(const char *name, char *vllist) |
53 | { | 31 | { |
54 | struct afs_cell *cell; | 32 | struct afs_cell *cell; |
55 | char *next; | 33 | char *next; |
56 | int ret; | 34 | int ret; |
57 | 35 | ||
58 | _enter("%s", name); | 36 | _enter("%s,%s", name, vllist); |
59 | 37 | ||
60 | BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ | 38 | BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ |
61 | 39 | ||
@@ -63,27 +41,24 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) | |||
63 | cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL); | 41 | cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL); |
64 | if (!cell) { | 42 | if (!cell) { |
65 | _leave(" = -ENOMEM"); | 43 | _leave(" = -ENOMEM"); |
66 | return -ENOMEM; | 44 | return ERR_PTR(-ENOMEM); |
67 | } | 45 | } |
68 | 46 | ||
69 | down_write(&afs_cells_sem); | 47 | down_write(&afs_cells_sem); |
70 | 48 | ||
71 | memset(cell, 0, sizeof(struct afs_cell)); | 49 | memset(cell, 0, sizeof(struct afs_cell)); |
72 | atomic_set(&cell->usage, 0); | 50 | atomic_set(&cell->usage, 1); |
73 | 51 | ||
74 | INIT_LIST_HEAD(&cell->link); | 52 | INIT_LIST_HEAD(&cell->link); |
75 | 53 | ||
76 | rwlock_init(&cell->sv_lock); | 54 | rwlock_init(&cell->servers_lock); |
77 | INIT_LIST_HEAD(&cell->sv_list); | 55 | INIT_LIST_HEAD(&cell->servers); |
78 | INIT_LIST_HEAD(&cell->sv_graveyard); | ||
79 | spin_lock_init(&cell->sv_gylock); | ||
80 | 56 | ||
81 | init_rwsem(&cell->vl_sem); | 57 | init_rwsem(&cell->vl_sem); |
82 | INIT_LIST_HEAD(&cell->vl_list); | 58 | INIT_LIST_HEAD(&cell->vl_list); |
83 | INIT_LIST_HEAD(&cell->vl_graveyard); | 59 | spin_lock_init(&cell->vl_lock); |
84 | spin_lock_init(&cell->vl_gylock); | ||
85 | 60 | ||
86 | strcpy(cell->name,name); | 61 | strcpy(cell->name, name); |
87 | 62 | ||
88 | /* fill in the VL server list from the rest of the string */ | 63 | /* fill in the VL server list from the rest of the string */ |
89 | ret = -EINVAL; | 64 | ret = -EINVAL; |
@@ -106,9 +81,9 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) | |||
106 | if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS) | 81 | if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS) |
107 | break; | 82 | break; |
108 | 83 | ||
109 | } while(vllist = next, vllist); | 84 | } while ((vllist = next)); |
110 | 85 | ||
111 | /* add a proc dir for this cell */ | 86 | /* add a proc directory for this cell */ |
112 | ret = afs_proc_cell_setup(cell); | 87 | ret = afs_proc_cell_setup(cell); |
113 | if (ret < 0) | 88 | if (ret < 0) |
114 | goto error; | 89 | goto error; |
@@ -129,30 +104,29 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) | |||
129 | down_write(&afs_proc_cells_sem); | 104 | down_write(&afs_proc_cells_sem); |
130 | list_add_tail(&cell->proc_link, &afs_proc_cells); | 105 | list_add_tail(&cell->proc_link, &afs_proc_cells); |
131 | up_write(&afs_proc_cells_sem); | 106 | up_write(&afs_proc_cells_sem); |
132 | |||
133 | *_cell = cell; | ||
134 | up_write(&afs_cells_sem); | 107 | up_write(&afs_cells_sem); |
135 | 108 | ||
136 | _leave(" = 0 (%p)", cell); | 109 | _leave(" = %p", cell); |
137 | return 0; | 110 | return cell; |
138 | 111 | ||
139 | badaddr: | 112 | badaddr: |
140 | printk(KERN_ERR "kAFS: bad VL server IP address: '%s'\n", vllist); | 113 | printk(KERN_ERR "kAFS: bad VL server IP address\n"); |
141 | error: | 114 | error: |
142 | up_write(&afs_cells_sem); | 115 | up_write(&afs_cells_sem); |
143 | kfree(cell); | 116 | kfree(cell); |
144 | _leave(" = %d", ret); | 117 | _leave(" = %d", ret); |
145 | return ret; | 118 | return ERR_PTR(ret); |
146 | } | 119 | } |
147 | 120 | ||
148 | /* | 121 | /* |
149 | * initialise the cell database from module parameters | 122 | * set the root cell information |
123 | * - can be called with a module parameter string | ||
124 | * - can be called from a write to /proc/fs/afs/rootcell | ||
150 | */ | 125 | */ |
151 | int afs_cell_init(char *rootcell) | 126 | int afs_cell_init(char *rootcell) |
152 | { | 127 | { |
153 | struct afs_cell *old_root, *new_root; | 128 | struct afs_cell *old_root, *new_root; |
154 | char *cp; | 129 | char *cp; |
155 | int ret; | ||
156 | 130 | ||
157 | _enter(""); | 131 | _enter(""); |
158 | 132 | ||
@@ -160,79 +134,60 @@ int afs_cell_init(char *rootcell) | |||
160 | /* module is loaded with no parameters, or built statically. | 134 | /* module is loaded with no parameters, or built statically. |
161 | * - in the future we might initialize cell DB here. | 135 | * - in the future we might initialize cell DB here. |
162 | */ | 136 | */ |
163 | _leave(" = 0 (but no root)"); | 137 | _leave(" = 0 [no root]"); |
164 | return 0; | 138 | return 0; |
165 | } | 139 | } |
166 | 140 | ||
167 | cp = strchr(rootcell, ':'); | 141 | cp = strchr(rootcell, ':'); |
168 | if (!cp) { | 142 | if (!cp) { |
169 | printk(KERN_ERR "kAFS: no VL server IP addresses specified\n"); | 143 | printk(KERN_ERR "kAFS: no VL server IP addresses specified\n"); |
170 | _leave(" = %d (no colon)", -EINVAL); | 144 | _leave(" = -EINVAL"); |
171 | return -EINVAL; | 145 | return -EINVAL; |
172 | } | 146 | } |
173 | 147 | ||
174 | /* allocate a cell record for the root cell */ | 148 | /* allocate a cell record for the root cell */ |
175 | *cp++ = 0; | 149 | *cp++ = 0; |
176 | ret = afs_cell_create(rootcell, cp, &new_root); | 150 | new_root = afs_cell_create(rootcell, cp); |
177 | if (ret < 0) { | 151 | if (IS_ERR(new_root)) { |
178 | _leave(" = %d", ret); | 152 | _leave(" = %ld", PTR_ERR(new_root)); |
179 | return ret; | 153 | return PTR_ERR(new_root); |
180 | } | 154 | } |
181 | 155 | ||
182 | /* as afs_put_cell() takes locks by itself, we have to do | 156 | /* install the new cell */ |
183 | * a little gymnastics to be race-free. | ||
184 | */ | ||
185 | afs_get_cell(new_root); | ||
186 | |||
187 | write_lock(&afs_cells_lock); | 157 | write_lock(&afs_cells_lock); |
188 | while (afs_cell_root) { | 158 | old_root = afs_cell_root; |
189 | old_root = afs_cell_root; | ||
190 | afs_cell_root = NULL; | ||
191 | write_unlock(&afs_cells_lock); | ||
192 | afs_put_cell(old_root); | ||
193 | write_lock(&afs_cells_lock); | ||
194 | } | ||
195 | afs_cell_root = new_root; | 159 | afs_cell_root = new_root; |
196 | write_unlock(&afs_cells_lock); | 160 | write_unlock(&afs_cells_lock); |
161 | afs_put_cell(old_root); | ||
197 | 162 | ||
198 | _leave(" = %d", ret); | 163 | _leave(" = 0"); |
199 | return ret; | 164 | return 0; |
200 | } | 165 | } |
201 | 166 | ||
202 | /* | 167 | /* |
203 | * lookup a cell record | 168 | * lookup a cell record |
204 | */ | 169 | */ |
205 | int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell) | 170 | struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz) |
206 | { | 171 | { |
207 | struct afs_cell *cell; | 172 | struct afs_cell *cell; |
208 | int ret; | ||
209 | 173 | ||
210 | _enter("\"%*.*s\",", namesz, namesz, name ? name : ""); | 174 | _enter("\"%*.*s\",", namesz, namesz, name ? name : ""); |
211 | 175 | ||
212 | *_cell = NULL; | 176 | down_read(&afs_cells_sem); |
177 | read_lock(&afs_cells_lock); | ||
213 | 178 | ||
214 | if (name) { | 179 | if (name) { |
215 | /* if the cell was named, look for it in the cell record list */ | 180 | /* if the cell was named, look for it in the cell record list */ |
216 | ret = -ENOENT; | ||
217 | cell = NULL; | ||
218 | read_lock(&afs_cells_lock); | ||
219 | |||
220 | list_for_each_entry(cell, &afs_cells, link) { | 181 | list_for_each_entry(cell, &afs_cells, link) { |
221 | if (strncmp(cell->name, name, namesz) == 0) { | 182 | if (strncmp(cell->name, name, namesz) == 0) { |
222 | afs_get_cell(cell); | 183 | afs_get_cell(cell); |
223 | goto found; | 184 | goto found; |
224 | } | 185 | } |
225 | } | 186 | } |
226 | cell = NULL; | 187 | cell = ERR_PTR(-ENOENT); |
227 | found: | 188 | found: |
228 | 189 | ; | |
229 | read_unlock(&afs_cells_lock); | ||
230 | |||
231 | if (cell) | ||
232 | ret = 0; | ||
233 | } else { | 190 | } else { |
234 | read_lock(&afs_cells_lock); | ||
235 | |||
236 | cell = afs_cell_root; | 191 | cell = afs_cell_root; |
237 | if (!cell) { | 192 | if (!cell) { |
238 | /* this should not happen unless user tries to mount | 193 | /* this should not happen unless user tries to mount |
@@ -241,37 +196,32 @@ int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell) | |||
241 | * ENOENT might be "more appropriate" but they happen | 196 | * ENOENT might be "more appropriate" but they happen |
242 | * for other reasons. | 197 | * for other reasons. |
243 | */ | 198 | */ |
244 | ret = -EDESTADDRREQ; | 199 | cell = ERR_PTR(-EDESTADDRREQ); |
245 | } else { | 200 | } else { |
246 | afs_get_cell(cell); | 201 | afs_get_cell(cell); |
247 | ret = 0; | ||
248 | } | 202 | } |
249 | 203 | ||
250 | read_unlock(&afs_cells_lock); | ||
251 | } | 204 | } |
252 | 205 | ||
253 | *_cell = cell; | 206 | read_unlock(&afs_cells_lock); |
254 | _leave(" = %d (%p)", ret, cell); | 207 | up_read(&afs_cells_sem); |
255 | return ret; | 208 | _leave(" = %p", cell); |
209 | return cell; | ||
256 | } | 210 | } |
257 | 211 | ||
258 | /* | 212 | /* |
259 | * try and get a cell record | 213 | * try and get a cell record |
260 | */ | 214 | */ |
261 | struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell) | 215 | struct afs_cell *afs_get_cell_maybe(struct afs_cell *cell) |
262 | { | 216 | { |
263 | struct afs_cell *cell; | ||
264 | |||
265 | write_lock(&afs_cells_lock); | 217 | write_lock(&afs_cells_lock); |
266 | 218 | ||
267 | cell = *_cell; | ||
268 | if (cell && !list_empty(&cell->link)) | 219 | if (cell && !list_empty(&cell->link)) |
269 | afs_get_cell(cell); | 220 | afs_get_cell(cell); |
270 | else | 221 | else |
271 | cell = NULL; | 222 | cell = NULL; |
272 | 223 | ||
273 | write_unlock(&afs_cells_lock); | 224 | write_unlock(&afs_cells_lock); |
274 | |||
275 | return cell; | 225 | return cell; |
276 | } | 226 | } |
277 | 227 | ||
@@ -285,8 +235,7 @@ void afs_put_cell(struct afs_cell *cell) | |||
285 | 235 | ||
286 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); | 236 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); |
287 | 237 | ||
288 | /* sanity check */ | 238 | ASSERTCMP(atomic_read(&cell->usage), >, 0); |
289 | BUG_ON(atomic_read(&cell->usage) <= 0); | ||
290 | 239 | ||
291 | /* to prevent a race, the decrement and the dequeue must be effectively | 240 | /* to prevent a race, the decrement and the dequeue must be effectively |
292 | * atomic */ | 241 | * atomic */ |
@@ -298,35 +247,49 @@ void afs_put_cell(struct afs_cell *cell) | |||
298 | return; | 247 | return; |
299 | } | 248 | } |
300 | 249 | ||
250 | ASSERT(list_empty(&cell->servers)); | ||
251 | ASSERT(list_empty(&cell->vl_list)); | ||
252 | |||
301 | write_unlock(&afs_cells_lock); | 253 | write_unlock(&afs_cells_lock); |
302 | 254 | ||
303 | BUG_ON(!list_empty(&cell->sv_list)); | 255 | wake_up(&afs_cells_freeable_wq); |
304 | BUG_ON(!list_empty(&cell->sv_graveyard)); | ||
305 | BUG_ON(!list_empty(&cell->vl_list)); | ||
306 | BUG_ON(!list_empty(&cell->vl_graveyard)); | ||
307 | 256 | ||
308 | _leave(" [unused]"); | 257 | _leave(" [unused]"); |
309 | } | 258 | } |
310 | 259 | ||
311 | /* | 260 | /* |
312 | * destroy a cell record | 261 | * destroy a cell record |
262 | * - must be called with the afs_cells_sem write-locked | ||
263 | * - cell->link should have been broken by the caller | ||
313 | */ | 264 | */ |
314 | static void afs_cell_destroy(struct afs_cell *cell) | 265 | static void afs_cell_destroy(struct afs_cell *cell) |
315 | { | 266 | { |
316 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); | 267 | _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); |
317 | 268 | ||
318 | /* to prevent a race, the decrement and the dequeue must be effectively | 269 | ASSERTCMP(atomic_read(&cell->usage), >=, 0); |
319 | * atomic */ | 270 | ASSERT(list_empty(&cell->link)); |
320 | write_lock(&afs_cells_lock); | ||
321 | 271 | ||
322 | /* sanity check */ | 272 | /* wait for everyone to stop using the cell */ |
323 | BUG_ON(atomic_read(&cell->usage) != 0); | 273 | if (atomic_read(&cell->usage) > 0) { |
274 | DECLARE_WAITQUEUE(myself, current); | ||
324 | 275 | ||
325 | list_del_init(&cell->link); | 276 | _debug("wait for cell %s", cell->name); |
277 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
278 | add_wait_queue(&afs_cells_freeable_wq, &myself); | ||
326 | 279 | ||
327 | write_unlock(&afs_cells_lock); | 280 | while (atomic_read(&cell->usage) > 0) { |
281 | schedule(); | ||
282 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
283 | } | ||
328 | 284 | ||
329 | down_write(&afs_cells_sem); | 285 | remove_wait_queue(&afs_cells_freeable_wq, &myself); |
286 | set_current_state(TASK_RUNNING); | ||
287 | } | ||
288 | |||
289 | _debug("cell dead"); | ||
290 | ASSERTCMP(atomic_read(&cell->usage), ==, 0); | ||
291 | ASSERT(list_empty(&cell->servers)); | ||
292 | ASSERT(list_empty(&cell->vl_list)); | ||
330 | 293 | ||
331 | afs_proc_cell_remove(cell); | 294 | afs_proc_cell_remove(cell); |
332 | 295 | ||
@@ -338,101 +301,25 @@ static void afs_cell_destroy(struct afs_cell *cell) | |||
338 | cachefs_relinquish_cookie(cell->cache, 0); | 301 | cachefs_relinquish_cookie(cell->cache, 0); |
339 | #endif | 302 | #endif |
340 | 303 | ||
341 | up_write(&afs_cells_sem); | ||
342 | |||
343 | BUG_ON(!list_empty(&cell->sv_list)); | ||
344 | BUG_ON(!list_empty(&cell->sv_graveyard)); | ||
345 | BUG_ON(!list_empty(&cell->vl_list)); | ||
346 | BUG_ON(!list_empty(&cell->vl_graveyard)); | ||
347 | |||
348 | /* finish cleaning up the cell */ | ||
349 | kfree(cell); | 304 | kfree(cell); |
350 | 305 | ||
351 | _leave(" [destroyed]"); | 306 | _leave(" [destroyed]"); |
352 | } | 307 | } |
353 | 308 | ||
354 | /* | 309 | /* |
355 | * lookup the server record corresponding to an Rx RPC peer | ||
356 | */ | ||
357 | int afs_server_find_by_peer(const struct rxrpc_peer *peer, | ||
358 | struct afs_server **_server) | ||
359 | { | ||
360 | struct afs_server *server; | ||
361 | struct afs_cell *cell; | ||
362 | |||
363 | _enter("%p{a=%08x},", peer, ntohl(peer->addr.s_addr)); | ||
364 | |||
365 | /* search the cell list */ | ||
366 | read_lock(&afs_cells_lock); | ||
367 | |||
368 | list_for_each_entry(cell, &afs_cells, link) { | ||
369 | |||
370 | _debug("? cell %s",cell->name); | ||
371 | |||
372 | write_lock(&cell->sv_lock); | ||
373 | |||
374 | /* check the active list */ | ||
375 | list_for_each_entry(server, &cell->sv_list, link) { | ||
376 | _debug("?? server %08x", ntohl(server->addr.s_addr)); | ||
377 | |||
378 | if (memcmp(&server->addr, &peer->addr, | ||
379 | sizeof(struct in_addr)) == 0) | ||
380 | goto found_server; | ||
381 | } | ||
382 | |||
383 | /* check the inactive list */ | ||
384 | spin_lock(&cell->sv_gylock); | ||
385 | list_for_each_entry(server, &cell->sv_graveyard, link) { | ||
386 | _debug("?? dead server %08x", | ||
387 | ntohl(server->addr.s_addr)); | ||
388 | |||
389 | if (memcmp(&server->addr, &peer->addr, | ||
390 | sizeof(struct in_addr)) == 0) | ||
391 | goto found_dead_server; | ||
392 | } | ||
393 | spin_unlock(&cell->sv_gylock); | ||
394 | |||
395 | write_unlock(&cell->sv_lock); | ||
396 | } | ||
397 | read_unlock(&afs_cells_lock); | ||
398 | |||
399 | _leave(" = -ENOENT"); | ||
400 | return -ENOENT; | ||
401 | |||
402 | /* we found it in the graveyard - resurrect it */ | ||
403 | found_dead_server: | ||
404 | list_move_tail(&server->link, &cell->sv_list); | ||
405 | afs_get_server(server); | ||
406 | afs_kafstimod_del_timer(&server->timeout); | ||
407 | spin_unlock(&cell->sv_gylock); | ||
408 | goto success; | ||
409 | |||
410 | /* we found it - increment its ref count and return it */ | ||
411 | found_server: | ||
412 | afs_get_server(server); | ||
413 | |||
414 | success: | ||
415 | write_unlock(&cell->sv_lock); | ||
416 | read_unlock(&afs_cells_lock); | ||
417 | |||
418 | *_server = server; | ||
419 | _leave(" = 0 (s=%p c=%p)", server, cell); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * purge in-memory cell database on module unload or afs_init() failure | 310 | * purge in-memory cell database on module unload or afs_init() failure |
425 | * - the timeout daemon is stopped before calling this | 311 | * - the timeout daemon is stopped before calling this |
426 | */ | 312 | */ |
427 | void afs_cell_purge(void) | 313 | void afs_cell_purge(void) |
428 | { | 314 | { |
429 | struct afs_vlocation *vlocation; | ||
430 | struct afs_cell *cell; | 315 | struct afs_cell *cell; |
431 | 316 | ||
432 | _enter(""); | 317 | _enter(""); |
433 | 318 | ||
434 | afs_put_cell(afs_cell_root); | 319 | afs_put_cell(afs_cell_root); |
435 | 320 | ||
321 | down_write(&afs_cells_sem); | ||
322 | |||
436 | while (!list_empty(&afs_cells)) { | 323 | while (!list_empty(&afs_cells)) { |
437 | cell = NULL; | 324 | cell = NULL; |
438 | 325 | ||
@@ -451,102 +338,11 @@ void afs_cell_purge(void) | |||
451 | _debug("PURGING CELL %s (%d)", | 338 | _debug("PURGING CELL %s (%d)", |
452 | cell->name, atomic_read(&cell->usage)); | 339 | cell->name, atomic_read(&cell->usage)); |
453 | 340 | ||
454 | BUG_ON(!list_empty(&cell->sv_list)); | ||
455 | BUG_ON(!list_empty(&cell->vl_list)); | ||
456 | |||
457 | /* purge the cell's VL graveyard list */ | ||
458 | _debug(" - clearing VL graveyard"); | ||
459 | |||
460 | spin_lock(&cell->vl_gylock); | ||
461 | |||
462 | while (!list_empty(&cell->vl_graveyard)) { | ||
463 | vlocation = list_entry(cell->vl_graveyard.next, | ||
464 | struct afs_vlocation, | ||
465 | link); | ||
466 | list_del_init(&vlocation->link); | ||
467 | |||
468 | afs_kafstimod_del_timer(&vlocation->timeout); | ||
469 | |||
470 | spin_unlock(&cell->vl_gylock); | ||
471 | |||
472 | afs_vlocation_do_timeout(vlocation); | ||
473 | /* TODO: race if move to use krxtimod instead | ||
474 | * of kafstimod */ | ||
475 | |||
476 | spin_lock(&cell->vl_gylock); | ||
477 | } | ||
478 | |||
479 | spin_unlock(&cell->vl_gylock); | ||
480 | |||
481 | /* purge the cell's server graveyard list */ | ||
482 | _debug(" - clearing server graveyard"); | ||
483 | |||
484 | spin_lock(&cell->sv_gylock); | ||
485 | |||
486 | while (!list_empty(&cell->sv_graveyard)) { | ||
487 | struct afs_server *server; | ||
488 | |||
489 | server = list_entry(cell->sv_graveyard.next, | ||
490 | struct afs_server, link); | ||
491 | list_del_init(&server->link); | ||
492 | |||
493 | afs_kafstimod_del_timer(&server->timeout); | ||
494 | |||
495 | spin_unlock(&cell->sv_gylock); | ||
496 | |||
497 | afs_server_do_timeout(server); | ||
498 | |||
499 | spin_lock(&cell->sv_gylock); | ||
500 | } | ||
501 | |||
502 | spin_unlock(&cell->sv_gylock); | ||
503 | |||
504 | /* now the cell should be left with no references */ | 341 | /* now the cell should be left with no references */ |
505 | afs_cell_destroy(cell); | 342 | afs_cell_destroy(cell); |
506 | } | 343 | } |
507 | } | 344 | } |
508 | 345 | ||
346 | up_write(&afs_cells_sem); | ||
509 | _leave(""); | 347 | _leave(""); |
510 | } | 348 | } |
511 | |||
512 | /* | ||
513 | * match a cell record obtained from the cache | ||
514 | */ | ||
515 | #ifdef AFS_CACHING_SUPPORT | ||
516 | static cachefs_match_val_t afs_cell_cache_match(void *target, | ||
517 | const void *entry) | ||
518 | { | ||
519 | const struct afs_cache_cell *ccell = entry; | ||
520 | struct afs_cell *cell = target; | ||
521 | |||
522 | _enter("{%s},{%s}", ccell->name, cell->name); | ||
523 | |||
524 | if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) { | ||
525 | _leave(" = SUCCESS"); | ||
526 | return CACHEFS_MATCH_SUCCESS; | ||
527 | } | ||
528 | |||
529 | _leave(" = FAILED"); | ||
530 | return CACHEFS_MATCH_FAILED; | ||
531 | } | ||
532 | #endif | ||
533 | |||
534 | /* | ||
535 | * update a cell record in the cache | ||
536 | */ | ||
537 | #ifdef AFS_CACHING_SUPPORT | ||
538 | static void afs_cell_cache_update(void *source, void *entry) | ||
539 | { | ||
540 | struct afs_cache_cell *ccell = entry; | ||
541 | struct afs_cell *cell = source; | ||
542 | |||
543 | _enter("%p,%p", source, entry); | ||
544 | |||
545 | strncpy(ccell->name, cell->name, sizeof(ccell->name)); | ||
546 | |||
547 | memcpy(ccell->vl_servers, | ||
548 | cell->vl_addrs, | ||
549 | min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs))); | ||
550 | |||
551 | } | ||
552 | #endif | ||
diff --git a/fs/afs/cell.h b/fs/afs/cell.h deleted file mode 100644 index c135b00c6c7..00000000000 --- a/fs/afs/cell.h +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | /* AFS cell record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_CELL_H | ||
13 | #define AFS_CELL_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "cache.h" | ||
17 | |||
18 | #define AFS_CELL_MAX_ADDRS 15 | ||
19 | |||
20 | extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */ | ||
21 | |||
22 | /* | ||
23 | * entry in the cached cell catalogue | ||
24 | */ | ||
25 | struct afs_cache_cell { | ||
26 | char name[64]; /* cell name (padded with NULs) */ | ||
27 | struct in_addr vl_servers[15]; /* cached cell VL servers */ | ||
28 | }; | ||
29 | |||
30 | /* | ||
31 | * AFS cell record | ||
32 | */ | ||
33 | struct afs_cell { | ||
34 | atomic_t usage; | ||
35 | struct list_head link; /* main cell list link */ | ||
36 | struct list_head proc_link; /* /proc cell list link */ | ||
37 | struct proc_dir_entry *proc_dir; /* /proc dir for this cell */ | ||
38 | #ifdef AFS_CACHING_SUPPORT | ||
39 | struct cachefs_cookie *cache; /* caching cookie */ | ||
40 | #endif | ||
41 | |||
42 | /* server record management */ | ||
43 | rwlock_t sv_lock; /* active server list lock */ | ||
44 | struct list_head sv_list; /* active server list */ | ||
45 | struct list_head sv_graveyard; /* inactive server list */ | ||
46 | spinlock_t sv_gylock; /* inactive server list lock */ | ||
47 | |||
48 | /* volume location record management */ | ||
49 | struct rw_semaphore vl_sem; /* volume management serialisation semaphore */ | ||
50 | struct list_head vl_list; /* cell's active VL record list */ | ||
51 | struct list_head vl_graveyard; /* cell's inactive VL record list */ | ||
52 | spinlock_t vl_gylock; /* graveyard lock */ | ||
53 | unsigned short vl_naddrs; /* number of VL servers in addr list */ | ||
54 | unsigned short vl_curr_svix; /* current server index */ | ||
55 | struct in_addr vl_addrs[AFS_CELL_MAX_ADDRS]; /* cell VL server addresses */ | ||
56 | |||
57 | char name[0]; /* cell name - must go last */ | ||
58 | }; | ||
59 | |||
60 | extern int afs_cell_init(char *); | ||
61 | extern int afs_cell_create(const char *, char *, struct afs_cell **); | ||
62 | extern int afs_cell_lookup(const char *, unsigned, struct afs_cell **); | ||
63 | |||
64 | #define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0) | ||
65 | |||
66 | extern struct afs_cell *afs_get_cell_maybe(struct afs_cell **); | ||
67 | extern void afs_put_cell(struct afs_cell *); | ||
68 | extern void afs_cell_purge(void); | ||
69 | |||
70 | #endif /* AFS_CELL_H */ | ||
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 3f4585765cb..c7141175391 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
@@ -12,623 +12,316 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/completion.h> | 15 | #include <linux/ip.h> |
16 | #include "server.h" | ||
17 | #include "cell.h" | ||
18 | #include "transport.h" | ||
19 | #include <rxrpc/rxrpc.h> | ||
20 | #include <rxrpc/transport.h> | ||
21 | #include <rxrpc/connection.h> | ||
22 | #include <rxrpc/call.h> | ||
23 | #include "cmservice.h" | ||
24 | #include "internal.h" | 16 | #include "internal.h" |
17 | #include "afs_cm.h" | ||
25 | 18 | ||
26 | static unsigned afscm_usage; /* AFS cache manager usage count */ | 19 | struct workqueue_struct *afs_cm_workqueue; |
27 | static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */ | ||
28 | |||
29 | static int afscm_new_call(struct rxrpc_call *call); | ||
30 | static void afscm_attention(struct rxrpc_call *call); | ||
31 | static void afscm_error(struct rxrpc_call *call); | ||
32 | static void afscm_aemap(struct rxrpc_call *call); | ||
33 | |||
34 | static void _SRXAFSCM_CallBack(struct rxrpc_call *call); | ||
35 | static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call); | ||
36 | static void _SRXAFSCM_Probe(struct rxrpc_call *call); | ||
37 | |||
38 | typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call); | ||
39 | |||
40 | static const struct rxrpc_operation AFSCM_ops[] = { | ||
41 | { | ||
42 | .id = 204, | ||
43 | .asize = RXRPC_APP_MARK_EOF, | ||
44 | .name = "CallBack", | ||
45 | .user = _SRXAFSCM_CallBack, | ||
46 | }, | ||
47 | { | ||
48 | .id = 205, | ||
49 | .asize = RXRPC_APP_MARK_EOF, | ||
50 | .name = "InitCallBackState", | ||
51 | .user = _SRXAFSCM_InitCallBackState, | ||
52 | }, | ||
53 | { | ||
54 | .id = 206, | ||
55 | .asize = RXRPC_APP_MARK_EOF, | ||
56 | .name = "Probe", | ||
57 | .user = _SRXAFSCM_Probe, | ||
58 | }, | ||
59 | #if 0 | ||
60 | { | ||
61 | .id = 207, | ||
62 | .asize = RXRPC_APP_MARK_EOF, | ||
63 | .name = "GetLock", | ||
64 | .user = _SRXAFSCM_GetLock, | ||
65 | }, | ||
66 | { | ||
67 | .id = 208, | ||
68 | .asize = RXRPC_APP_MARK_EOF, | ||
69 | .name = "GetCE", | ||
70 | .user = _SRXAFSCM_GetCE, | ||
71 | }, | ||
72 | { | ||
73 | .id = 209, | ||
74 | .asize = RXRPC_APP_MARK_EOF, | ||
75 | .name = "GetXStatsVersion", | ||
76 | .user = _SRXAFSCM_GetXStatsVersion, | ||
77 | }, | ||
78 | { | ||
79 | .id = 210, | ||
80 | .asize = RXRPC_APP_MARK_EOF, | ||
81 | .name = "GetXStats", | ||
82 | .user = _SRXAFSCM_GetXStats, | ||
83 | } | ||
84 | #endif | ||
85 | }; | ||
86 | 20 | ||
87 | static struct rxrpc_service AFSCM_service = { | 21 | static int afs_deliver_cb_init_call_back_state(struct afs_call *, |
88 | .name = "AFS/CM", | 22 | struct sk_buff *, bool); |
89 | .owner = THIS_MODULE, | 23 | static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool); |
90 | .link = LIST_HEAD_INIT(AFSCM_service.link), | 24 | static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool); |
91 | .new_call = afscm_new_call, | 25 | static void afs_cm_destructor(struct afs_call *); |
92 | .service_id = 1, | ||
93 | .attn_func = afscm_attention, | ||
94 | .error_func = afscm_error, | ||
95 | .aemap_func = afscm_aemap, | ||
96 | .ops_begin = &AFSCM_ops[0], | ||
97 | .ops_end = &AFSCM_ops[ARRAY_SIZE(AFSCM_ops)], | ||
98 | }; | ||
99 | |||
100 | static DECLARE_COMPLETION(kafscmd_alive); | ||
101 | static DECLARE_COMPLETION(kafscmd_dead); | ||
102 | static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq); | ||
103 | static LIST_HEAD(kafscmd_attention_list); | ||
104 | static LIST_HEAD(afscm_calls); | ||
105 | static DEFINE_SPINLOCK(afscm_calls_lock); | ||
106 | static DEFINE_SPINLOCK(kafscmd_attention_lock); | ||
107 | static int kafscmd_die; | ||
108 | 26 | ||
109 | /* | 27 | /* |
110 | * AFS Cache Manager kernel thread | 28 | * CB.CallBack operation type |
111 | */ | 29 | */ |
112 | static int kafscmd(void *arg) | 30 | static const struct afs_call_type afs_SRXCBCallBack = { |
113 | { | 31 | .deliver = afs_deliver_cb_callback, |
114 | DECLARE_WAITQUEUE(myself, current); | 32 | .abort_to_error = afs_abort_to_error, |
115 | 33 | .destructor = afs_cm_destructor, | |
116 | struct rxrpc_call *call; | 34 | }; |
117 | _SRXAFSCM_xxxx_t func; | ||
118 | int die; | ||
119 | |||
120 | printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid); | ||
121 | |||
122 | daemonize("kafscmd"); | ||
123 | |||
124 | complete(&kafscmd_alive); | ||
125 | |||
126 | /* loop around looking for things to attend to */ | ||
127 | do { | ||
128 | if (list_empty(&kafscmd_attention_list)) { | ||
129 | set_current_state(TASK_INTERRUPTIBLE); | ||
130 | add_wait_queue(&kafscmd_sleepq, &myself); | ||
131 | |||
132 | for (;;) { | ||
133 | set_current_state(TASK_INTERRUPTIBLE); | ||
134 | if (!list_empty(&kafscmd_attention_list) || | ||
135 | signal_pending(current) || | ||
136 | kafscmd_die) | ||
137 | break; | ||
138 | |||
139 | schedule(); | ||
140 | } | ||
141 | |||
142 | remove_wait_queue(&kafscmd_sleepq, &myself); | ||
143 | set_current_state(TASK_RUNNING); | ||
144 | } | ||
145 | |||
146 | die = kafscmd_die; | ||
147 | |||
148 | /* dequeue the next call requiring attention */ | ||
149 | call = NULL; | ||
150 | spin_lock(&kafscmd_attention_lock); | ||
151 | |||
152 | if (!list_empty(&kafscmd_attention_list)) { | ||
153 | call = list_entry(kafscmd_attention_list.next, | ||
154 | struct rxrpc_call, | ||
155 | app_attn_link); | ||
156 | list_del_init(&call->app_attn_link); | ||
157 | die = 0; | ||
158 | } | ||
159 | |||
160 | spin_unlock(&kafscmd_attention_lock); | ||
161 | |||
162 | if (call) { | ||
163 | /* act upon it */ | ||
164 | _debug("@@@ Begin Attend Call %p", call); | ||
165 | |||
166 | func = call->app_user; | ||
167 | if (func) | ||
168 | func(call); | ||
169 | |||
170 | rxrpc_put_call(call); | ||
171 | |||
172 | _debug("@@@ End Attend Call %p", call); | ||
173 | } | ||
174 | |||
175 | } while(!die); | ||
176 | |||
177 | /* and that's all */ | ||
178 | complete_and_exit(&kafscmd_dead, 0); | ||
179 | } | ||
180 | 35 | ||
181 | /* | 36 | /* |
182 | * handle a call coming in to the cache manager | 37 | * CB.InitCallBackState operation type |
183 | * - if I want to keep the call, I must increment its usage count | ||
184 | * - the return value will be negated and passed back in an abort packet if | ||
185 | * non-zero | ||
186 | * - serialised by virtue of there only being one krxiod | ||
187 | */ | 38 | */ |
188 | static int afscm_new_call(struct rxrpc_call *call) | 39 | static const struct afs_call_type afs_SRXCBInitCallBackState = { |
189 | { | 40 | .deliver = afs_deliver_cb_init_call_back_state, |
190 | _enter("%p{cid=%u u=%d}", | 41 | .abort_to_error = afs_abort_to_error, |
191 | call, ntohl(call->call_id), atomic_read(&call->usage)); | 42 | .destructor = afs_cm_destructor, |
192 | 43 | }; | |
193 | rxrpc_get_call(call); | ||
194 | |||
195 | /* add to my current call list */ | ||
196 | spin_lock(&afscm_calls_lock); | ||
197 | list_add(&call->app_link,&afscm_calls); | ||
198 | spin_unlock(&afscm_calls_lock); | ||
199 | |||
200 | _leave(" = 0"); | ||
201 | return 0; | ||
202 | } | ||
203 | 44 | ||
204 | /* | 45 | /* |
205 | * queue on the kafscmd queue for attention | 46 | * CB.Probe operation type |
206 | */ | 47 | */ |
207 | static void afscm_attention(struct rxrpc_call *call) | 48 | static const struct afs_call_type afs_SRXCBProbe = { |
208 | { | 49 | .deliver = afs_deliver_cb_probe, |
209 | _enter("%p{cid=%u u=%d}", | 50 | .abort_to_error = afs_abort_to_error, |
210 | call, ntohl(call->call_id), atomic_read(&call->usage)); | 51 | .destructor = afs_cm_destructor, |
211 | 52 | }; | |
212 | spin_lock(&kafscmd_attention_lock); | ||
213 | |||
214 | if (list_empty(&call->app_attn_link)) { | ||
215 | list_add_tail(&call->app_attn_link, &kafscmd_attention_list); | ||
216 | rxrpc_get_call(call); | ||
217 | } | ||
218 | |||
219 | spin_unlock(&kafscmd_attention_lock); | ||
220 | |||
221 | wake_up(&kafscmd_sleepq); | ||
222 | |||
223 | _leave(" {u=%d}", atomic_read(&call->usage)); | ||
224 | } | ||
225 | 53 | ||
226 | /* | 54 | /* |
227 | * handle my call being aborted | 55 | * route an incoming cache manager call |
228 | * - clean up, dequeue and put my ref to the call | 56 | * - return T if supported, F if not |
229 | */ | 57 | */ |
230 | static void afscm_error(struct rxrpc_call *call) | 58 | bool afs_cm_incoming_call(struct afs_call *call) |
231 | { | 59 | { |
232 | int removed; | 60 | u32 operation_id = ntohl(call->operation_ID); |
233 | 61 | ||
234 | _enter("%p{est=%s ac=%u er=%d}", | 62 | _enter("{CB.OP %u}", operation_id); |
235 | call, | 63 | |
236 | rxrpc_call_error_states[call->app_err_state], | 64 | switch (operation_id) { |
237 | call->app_abort_code, | 65 | case CBCallBack: |
238 | call->app_errno); | 66 | call->type = &afs_SRXCBCallBack; |
239 | 67 | return true; | |
240 | spin_lock(&kafscmd_attention_lock); | 68 | case CBInitCallBackState: |
241 | 69 | call->type = &afs_SRXCBInitCallBackState; | |
242 | if (list_empty(&call->app_attn_link)) { | 70 | return true; |
243 | list_add_tail(&call->app_attn_link, &kafscmd_attention_list); | 71 | case CBProbe: |
244 | rxrpc_get_call(call); | 72 | call->type = &afs_SRXCBProbe; |
245 | } | 73 | return true; |
246 | 74 | default: | |
247 | spin_unlock(&kafscmd_attention_lock); | 75 | return false; |
248 | |||
249 | removed = 0; | ||
250 | spin_lock(&afscm_calls_lock); | ||
251 | if (!list_empty(&call->app_link)) { | ||
252 | list_del_init(&call->app_link); | ||
253 | removed = 1; | ||
254 | } | 76 | } |
255 | spin_unlock(&afscm_calls_lock); | ||
256 | |||
257 | if (removed) | ||
258 | rxrpc_put_call(call); | ||
259 | |||
260 | wake_up(&kafscmd_sleepq); | ||
261 | |||
262 | _leave(""); | ||
263 | } | 77 | } |
264 | 78 | ||
265 | /* | 79 | /* |
266 | * map afs abort codes to/from Linux error codes | 80 | * clean up a cache manager call |
267 | * - called with call->lock held | ||
268 | */ | 81 | */ |
269 | static void afscm_aemap(struct rxrpc_call *call) | 82 | static void afs_cm_destructor(struct afs_call *call) |
270 | { | 83 | { |
271 | switch (call->app_err_state) { | 84 | _enter(""); |
272 | case RXRPC_ESTATE_LOCAL_ABORT: | 85 | |
273 | call->app_abort_code = -call->app_errno; | 86 | afs_put_server(call->server); |
274 | break; | 87 | call->server = NULL; |
275 | case RXRPC_ESTATE_PEER_ABORT: | 88 | kfree(call->buffer); |
276 | call->app_errno = -ECONNABORTED; | 89 | call->buffer = NULL; |
277 | break; | ||
278 | default: | ||
279 | break; | ||
280 | } | ||
281 | } | 90 | } |
282 | 91 | ||
283 | /* | 92 | /* |
284 | * start the cache manager service if not already started | 93 | * allow the fileserver to see if the cache manager is still alive |
285 | */ | 94 | */ |
286 | int afscm_start(void) | 95 | static void SRXAFSCB_CallBack(struct work_struct *work) |
287 | { | 96 | { |
288 | int ret; | 97 | struct afs_call *call = container_of(work, struct afs_call, work); |
289 | |||
290 | down_write(&afscm_sem); | ||
291 | if (!afscm_usage) { | ||
292 | ret = kernel_thread(kafscmd, NULL, 0); | ||
293 | if (ret < 0) | ||
294 | goto out; | ||
295 | |||
296 | wait_for_completion(&kafscmd_alive); | ||
297 | |||
298 | ret = rxrpc_add_service(afs_transport, &AFSCM_service); | ||
299 | if (ret < 0) | ||
300 | goto kill; | ||
301 | 98 | ||
302 | afs_kafstimod_add_timer(&afs_mntpt_expiry_timer, | 99 | _enter(""); |
303 | afs_mntpt_expiry_timeout * HZ); | ||
304 | } | ||
305 | |||
306 | afscm_usage++; | ||
307 | up_write(&afscm_sem); | ||
308 | |||
309 | return 0; | ||
310 | 100 | ||
311 | kill: | 101 | /* be sure to send the reply *before* attempting to spam the AFS server |
312 | kafscmd_die = 1; | 102 | * with FSFetchStatus requests on the vnodes with broken callbacks lest |
313 | wake_up(&kafscmd_sleepq); | 103 | * the AFS server get into a vicious cycle of trying to break further |
314 | wait_for_completion(&kafscmd_dead); | 104 | * callbacks because it hadn't received completion of the CBCallBack op |
105 | * yet */ | ||
106 | afs_send_empty_reply(call); | ||
315 | 107 | ||
316 | out: | 108 | afs_break_callbacks(call->server, call->count, call->request); |
317 | up_write(&afscm_sem); | 109 | _leave(""); |
318 | return ret; | ||
319 | } | 110 | } |
320 | 111 | ||
321 | /* | 112 | /* |
322 | * stop the cache manager service | 113 | * deliver request data to a CB.CallBack call |
323 | */ | 114 | */ |
324 | void afscm_stop(void) | 115 | static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, |
116 | bool last) | ||
325 | { | 117 | { |
326 | struct rxrpc_call *call; | 118 | struct afs_callback *cb; |
327 | 119 | struct afs_server *server; | |
328 | down_write(&afscm_sem); | 120 | struct in_addr addr; |
329 | 121 | __be32 *bp; | |
330 | BUG_ON(afscm_usage == 0); | 122 | u32 tmp; |
331 | afscm_usage--; | 123 | int ret, loop; |
332 | 124 | ||
333 | if (afscm_usage == 0) { | 125 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
334 | /* don't want more incoming calls */ | 126 | |
335 | rxrpc_del_service(afs_transport, &AFSCM_service); | 127 | switch (call->unmarshall) { |
336 | 128 | case 0: | |
337 | /* abort any calls I've still got open (the afscm_error() will | 129 | call->offset = 0; |
338 | * dequeue them) */ | 130 | call->unmarshall++; |
339 | spin_lock(&afscm_calls_lock); | 131 | |
340 | while (!list_empty(&afscm_calls)) { | 132 | /* extract the FID array and its count in two steps */ |
341 | call = list_entry(afscm_calls.next, | 133 | case 1: |
342 | struct rxrpc_call, | 134 | _debug("extract FID count"); |
343 | app_link); | 135 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
344 | 136 | switch (ret) { | |
345 | list_del_init(&call->app_link); | 137 | case 0: break; |
346 | rxrpc_get_call(call); | 138 | case -EAGAIN: return 0; |
347 | spin_unlock(&afscm_calls_lock); | 139 | default: return ret; |
348 | |||
349 | rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and | ||
350 | * put */ | ||
351 | |||
352 | _debug("nuking active call %08x.%d", | ||
353 | ntohl(call->conn->conn_id), | ||
354 | ntohl(call->call_id)); | ||
355 | rxrpc_put_call(call); | ||
356 | rxrpc_put_call(call); | ||
357 | |||
358 | spin_lock(&afscm_calls_lock); | ||
359 | } | 140 | } |
360 | spin_unlock(&afscm_calls_lock); | ||
361 | |||
362 | /* get rid of my daemon */ | ||
363 | kafscmd_die = 1; | ||
364 | wake_up(&kafscmd_sleepq); | ||
365 | wait_for_completion(&kafscmd_dead); | ||
366 | |||
367 | /* dispose of any calls waiting for attention */ | ||
368 | spin_lock(&kafscmd_attention_lock); | ||
369 | while (!list_empty(&kafscmd_attention_list)) { | ||
370 | call = list_entry(kafscmd_attention_list.next, | ||
371 | struct rxrpc_call, | ||
372 | app_attn_link); | ||
373 | |||
374 | list_del_init(&call->app_attn_link); | ||
375 | spin_unlock(&kafscmd_attention_lock); | ||
376 | 141 | ||
377 | rxrpc_put_call(call); | 142 | call->count = ntohl(call->tmp); |
378 | 143 | _debug("FID count: %u", call->count); | |
379 | spin_lock(&kafscmd_attention_lock); | 144 | if (call->count > AFSCBMAX) |
145 | return -EBADMSG; | ||
146 | |||
147 | call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL); | ||
148 | if (!call->buffer) | ||
149 | return -ENOMEM; | ||
150 | call->offset = 0; | ||
151 | call->unmarshall++; | ||
152 | |||
153 | case 2: | ||
154 | _debug("extract FID array"); | ||
155 | ret = afs_extract_data(call, skb, last, call->buffer, | ||
156 | call->count * 3 * 4); | ||
157 | switch (ret) { | ||
158 | case 0: break; | ||
159 | case -EAGAIN: return 0; | ||
160 | default: return ret; | ||
380 | } | 161 | } |
381 | spin_unlock(&kafscmd_attention_lock); | ||
382 | |||
383 | afs_kafstimod_del_timer(&afs_mntpt_expiry_timer); | ||
384 | } | ||
385 | |||
386 | up_write(&afscm_sem); | ||
387 | } | ||
388 | 162 | ||
389 | /* | 163 | _debug("unmarshall FID array"); |
390 | * handle the fileserver breaking a set of callbacks | 164 | call->request = kcalloc(call->count, |
391 | */ | 165 | sizeof(struct afs_callback), |
392 | static void _SRXAFSCM_CallBack(struct rxrpc_call *call) | 166 | GFP_KERNEL); |
393 | { | 167 | if (!call->request) |
394 | struct afs_server *server; | 168 | return -ENOMEM; |
395 | size_t count, qty, tmp; | 169 | |
396 | int ret = 0, removed; | 170 | cb = call->request; |
397 | 171 | bp = call->buffer; | |
398 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | 172 | for (loop = call->count; loop > 0; loop--, cb++) { |
399 | 173 | cb->fid.vid = ntohl(*bp++); | |
400 | server = afs_server_get_from_peer(call->conn->peer); | 174 | cb->fid.vnode = ntohl(*bp++); |
401 | 175 | cb->fid.unique = ntohl(*bp++); | |
402 | switch (call->app_call_state) { | 176 | cb->type = AFSCM_CB_UNTYPED; |
403 | /* we've received the last packet | ||
404 | * - drain all the data from the call and send the reply | ||
405 | */ | ||
406 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
407 | ret = -EBADMSG; | ||
408 | qty = call->app_ready_qty; | ||
409 | if (qty < 8 || qty > 50 * (6 * 4) + 8) | ||
410 | break; | ||
411 | |||
412 | { | ||
413 | struct afs_callback *cb, *pcb; | ||
414 | int loop; | ||
415 | __be32 *fp, *bp; | ||
416 | |||
417 | fp = rxrpc_call_alloc_scratch(call, qty); | ||
418 | |||
419 | /* drag the entire argument block out to the scratch | ||
420 | * space */ | ||
421 | ret = rxrpc_call_read_data(call, fp, qty, 0); | ||
422 | if (ret < 0) | ||
423 | break; | ||
424 | |||
425 | /* and unmarshall the parameter block */ | ||
426 | ret = -EBADMSG; | ||
427 | count = ntohl(*fp++); | ||
428 | if (count>AFSCBMAX || | ||
429 | (count * (3 * 4) + 8 != qty && | ||
430 | count * (6 * 4) + 8 != qty)) | ||
431 | break; | ||
432 | |||
433 | bp = fp + count*3; | ||
434 | tmp = ntohl(*bp++); | ||
435 | if (tmp > 0 && tmp != count) | ||
436 | break; | ||
437 | if (tmp == 0) | ||
438 | bp = NULL; | ||
439 | |||
440 | pcb = cb = rxrpc_call_alloc_scratch_s( | ||
441 | call, struct afs_callback); | ||
442 | |||
443 | for (loop = count - 1; loop >= 0; loop--) { | ||
444 | pcb->fid.vid = ntohl(*fp++); | ||
445 | pcb->fid.vnode = ntohl(*fp++); | ||
446 | pcb->fid.unique = ntohl(*fp++); | ||
447 | if (bp) { | ||
448 | pcb->version = ntohl(*bp++); | ||
449 | pcb->expiry = ntohl(*bp++); | ||
450 | pcb->type = ntohl(*bp++); | ||
451 | } else { | ||
452 | pcb->version = 0; | ||
453 | pcb->expiry = 0; | ||
454 | pcb->type = AFSCM_CB_UNTYPED; | ||
455 | } | ||
456 | pcb++; | ||
457 | } | ||
458 | |||
459 | /* invoke the actual service routine */ | ||
460 | ret = SRXAFSCM_CallBack(server, count, cb); | ||
461 | if (ret < 0) | ||
462 | break; | ||
463 | } | 177 | } |
464 | 178 | ||
465 | /* send the reply */ | 179 | call->offset = 0; |
466 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | 180 | call->unmarshall++; |
467 | GFP_KERNEL, 0, &count); | 181 | |
468 | if (ret < 0) | 182 | /* extract the callback array and its count in two steps */ |
469 | break; | 183 | case 3: |
470 | break; | 184 | _debug("extract CB count"); |
185 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | ||
186 | switch (ret) { | ||
187 | case 0: break; | ||
188 | case -EAGAIN: return 0; | ||
189 | default: return ret; | ||
190 | } | ||
471 | 191 | ||
472 | /* operation complete */ | 192 | tmp = ntohl(call->tmp); |
473 | case RXRPC_CSTATE_COMPLETE: | 193 | _debug("CB count: %u", tmp); |
474 | call->app_user = NULL; | 194 | if (tmp != call->count && tmp != 0) |
475 | removed = 0; | 195 | return -EBADMSG; |
476 | spin_lock(&afscm_calls_lock); | 196 | call->offset = 0; |
477 | if (!list_empty(&call->app_link)) { | 197 | call->unmarshall++; |
478 | list_del_init(&call->app_link); | 198 | if (tmp == 0) |
479 | removed = 1; | 199 | goto empty_cb_array; |
200 | |||
201 | case 4: | ||
202 | _debug("extract CB array"); | ||
203 | ret = afs_extract_data(call, skb, last, call->request, | ||
204 | call->count * 3 * 4); | ||
205 | switch (ret) { | ||
206 | case 0: break; | ||
207 | case -EAGAIN: return 0; | ||
208 | default: return ret; | ||
480 | } | 209 | } |
481 | spin_unlock(&afscm_calls_lock); | ||
482 | 210 | ||
483 | if (removed) | 211 | _debug("unmarshall CB array"); |
484 | rxrpc_put_call(call); | 212 | cb = call->request; |
485 | break; | 213 | bp = call->buffer; |
214 | for (loop = call->count; loop > 0; loop--, cb++) { | ||
215 | cb->version = ntohl(*bp++); | ||
216 | cb->expiry = ntohl(*bp++); | ||
217 | cb->type = ntohl(*bp++); | ||
218 | } | ||
486 | 219 | ||
487 | /* operation terminated on error */ | 220 | empty_cb_array: |
488 | case RXRPC_CSTATE_ERROR: | 221 | call->offset = 0; |
489 | call->app_user = NULL; | 222 | call->unmarshall++; |
490 | break; | ||
491 | 223 | ||
492 | default: | 224 | case 5: |
225 | _debug("trailer"); | ||
226 | if (skb->len != 0) | ||
227 | return -EBADMSG; | ||
493 | break; | 228 | break; |
494 | } | 229 | } |
495 | 230 | ||
496 | if (ret < 0) | 231 | if (!last) |
497 | rxrpc_call_abort(call, ret); | 232 | return 0; |
498 | 233 | ||
499 | afs_put_server(server); | 234 | call->state = AFS_CALL_REPLYING; |
500 | 235 | ||
501 | _leave(" = %d", ret); | 236 | /* we'll need the file server record as that tells us which set of |
237 | * vnodes to operate upon */ | ||
238 | memcpy(&addr, &ip_hdr(skb)->saddr, 4); | ||
239 | server = afs_find_server(&addr); | ||
240 | if (!server) | ||
241 | return -ENOTCONN; | ||
242 | call->server = server; | ||
243 | |||
244 | INIT_WORK(&call->work, SRXAFSCB_CallBack); | ||
245 | schedule_work(&call->work); | ||
246 | return 0; | ||
502 | } | 247 | } |
503 | 248 | ||
504 | /* | 249 | /* |
505 | * handle the fileserver asking us to initialise our callback state | 250 | * allow the fileserver to request callback state (re-)initialisation |
506 | */ | 251 | */ |
507 | static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call) | 252 | static void SRXAFSCB_InitCallBackState(struct work_struct *work) |
508 | { | 253 | { |
509 | struct afs_server *server; | 254 | struct afs_call *call = container_of(work, struct afs_call, work); |
510 | size_t count; | ||
511 | int ret = 0, removed; | ||
512 | |||
513 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | ||
514 | |||
515 | server = afs_server_get_from_peer(call->conn->peer); | ||
516 | |||
517 | switch (call->app_call_state) { | ||
518 | /* we've received the last packet - drain all the data from the | ||
519 | * call */ | ||
520 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
521 | /* shouldn't be any args */ | ||
522 | ret = -EBADMSG; | ||
523 | break; | ||
524 | 255 | ||
525 | /* send the reply when asked for it */ | 256 | _enter("{%p}", call->server); |
526 | case RXRPC_CSTATE_SRVR_SND_REPLY: | ||
527 | /* invoke the actual service routine */ | ||
528 | ret = SRXAFSCM_InitCallBackState(server); | ||
529 | if (ret < 0) | ||
530 | break; | ||
531 | |||
532 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | ||
533 | GFP_KERNEL, 0, &count); | ||
534 | if (ret < 0) | ||
535 | break; | ||
536 | break; | ||
537 | 257 | ||
538 | /* operation complete */ | 258 | afs_init_callback_state(call->server); |
539 | case RXRPC_CSTATE_COMPLETE: | 259 | afs_send_empty_reply(call); |
540 | call->app_user = NULL; | 260 | _leave(""); |
541 | removed = 0; | ||
542 | spin_lock(&afscm_calls_lock); | ||
543 | if (!list_empty(&call->app_link)) { | ||
544 | list_del_init(&call->app_link); | ||
545 | removed = 1; | ||
546 | } | ||
547 | spin_unlock(&afscm_calls_lock); | ||
548 | |||
549 | if (removed) | ||
550 | rxrpc_put_call(call); | ||
551 | break; | ||
552 | |||
553 | /* operation terminated on error */ | ||
554 | case RXRPC_CSTATE_ERROR: | ||
555 | call->app_user = NULL; | ||
556 | break; | ||
557 | |||
558 | default: | ||
559 | break; | ||
560 | } | ||
561 | |||
562 | if (ret < 0) | ||
563 | rxrpc_call_abort(call, ret); | ||
564 | |||
565 | afs_put_server(server); | ||
566 | |||
567 | _leave(" = %d", ret); | ||
568 | } | 261 | } |
569 | 262 | ||
570 | /* | 263 | /* |
571 | * handle a probe from a fileserver | 264 | * deliver request data to a CB.InitCallBackState call |
572 | */ | 265 | */ |
573 | static void _SRXAFSCM_Probe(struct rxrpc_call *call) | 266 | static int afs_deliver_cb_init_call_back_state(struct afs_call *call, |
267 | struct sk_buff *skb, | ||
268 | bool last) | ||
574 | { | 269 | { |
575 | struct afs_server *server; | 270 | struct afs_server *server; |
576 | size_t count; | 271 | struct in_addr addr; |
577 | int ret = 0, removed; | ||
578 | 272 | ||
579 | _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); | 273 | _enter(",{%u},%d", skb->len, last); |
580 | 274 | ||
581 | server = afs_server_get_from_peer(call->conn->peer); | 275 | if (skb->len > 0) |
276 | return -EBADMSG; | ||
277 | if (!last) | ||
278 | return 0; | ||
582 | 279 | ||
583 | switch (call->app_call_state) { | 280 | /* no unmarshalling required */ |
584 | /* we've received the last packet - drain all the data from the | 281 | call->state = AFS_CALL_REPLYING; |
585 | * call */ | ||
586 | case RXRPC_CSTATE_SRVR_GOT_ARGS: | ||
587 | /* shouldn't be any args */ | ||
588 | ret = -EBADMSG; | ||
589 | break; | ||
590 | 282 | ||
591 | /* send the reply when asked for it */ | 283 | /* we'll need the file server record as that tells us which set of |
592 | case RXRPC_CSTATE_SRVR_SND_REPLY: | 284 | * vnodes to operate upon */ |
593 | /* invoke the actual service routine */ | 285 | memcpy(&addr, &ip_hdr(skb)->saddr, 4); |
594 | ret = SRXAFSCM_Probe(server); | 286 | server = afs_find_server(&addr); |
595 | if (ret < 0) | 287 | if (!server) |
596 | break; | 288 | return -ENOTCONN; |
597 | 289 | call->server = server; | |
598 | ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET, | ||
599 | GFP_KERNEL, 0, &count); | ||
600 | if (ret < 0) | ||
601 | break; | ||
602 | break; | ||
603 | 290 | ||
604 | /* operation complete */ | 291 | INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); |
605 | case RXRPC_CSTATE_COMPLETE: | 292 | schedule_work(&call->work); |
606 | call->app_user = NULL; | 293 | return 0; |
607 | removed = 0; | 294 | } |
608 | spin_lock(&afscm_calls_lock); | ||
609 | if (!list_empty(&call->app_link)) { | ||
610 | list_del_init(&call->app_link); | ||
611 | removed = 1; | ||
612 | } | ||
613 | spin_unlock(&afscm_calls_lock); | ||
614 | 295 | ||
615 | if (removed) | 296 | /* |
616 | rxrpc_put_call(call); | 297 | * allow the fileserver to see if the cache manager is still alive |
617 | break; | 298 | */ |
299 | static void SRXAFSCB_Probe(struct work_struct *work) | ||
300 | { | ||
301 | struct afs_call *call = container_of(work, struct afs_call, work); | ||
618 | 302 | ||
619 | /* operation terminated on error */ | 303 | _enter(""); |
620 | case RXRPC_CSTATE_ERROR: | 304 | afs_send_empty_reply(call); |
621 | call->app_user = NULL; | 305 | _leave(""); |
622 | break; | 306 | } |
623 | 307 | ||
624 | default: | 308 | /* |
625 | break; | 309 | * deliver request data to a CB.Probe call |
626 | } | 310 | */ |
311 | static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, | ||
312 | bool last) | ||
313 | { | ||
314 | _enter(",{%u},%d", skb->len, last); | ||
627 | 315 | ||
628 | if (ret < 0) | 316 | if (skb->len > 0) |
629 | rxrpc_call_abort(call, ret); | 317 | return -EBADMSG; |
318 | if (!last) | ||
319 | return 0; | ||
630 | 320 | ||
631 | afs_put_server(server); | 321 | /* no unmarshalling required */ |
322 | call->state = AFS_CALL_REPLYING; | ||
632 | 323 | ||
633 | _leave(" = %d", ret); | 324 | INIT_WORK(&call->work, SRXAFSCB_Probe); |
325 | schedule_work(&call->work); | ||
326 | return 0; | ||
634 | } | 327 | } |
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h deleted file mode 100644 index 66e10c15bd1..00000000000 --- a/fs/afs/cmservice.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* AFS Cache Manager Service declarations | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_CMSERVICE_H | ||
13 | #define AFS_CMSERVICE_H | ||
14 | |||
15 | #include <rxrpc/transport.h> | ||
16 | #include "types.h" | ||
17 | |||
18 | /* cache manager start/stop */ | ||
19 | extern int afscm_start(void); | ||
20 | extern void afscm_stop(void); | ||
21 | |||
22 | /* cache manager server functions */ | ||
23 | extern int SRXAFSCM_InitCallBackState(struct afs_server *); | ||
24 | extern int SRXAFSCM_CallBack(struct afs_server *, size_t, | ||
25 | struct afs_callback[]); | ||
26 | extern int SRXAFSCM_Probe(struct afs_server *); | ||
27 | |||
28 | #endif /* AFS_CMSERVICE_H */ | ||
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 2f6d9237646..d7697f6f3b7 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c | |||
@@ -15,11 +15,6 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
18 | #include <linux/smp_lock.h> | ||
19 | #include "vnode.h" | ||
20 | #include "volume.h" | ||
21 | #include <rxrpc/call.h> | ||
22 | #include "super.h" | ||
23 | #include "internal.h" | 18 | #include "internal.h" |
24 | 19 | ||
25 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | 20 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, |
@@ -127,9 +122,10 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page) | |||
127 | if (qty == 0) | 122 | if (qty == 0) |
128 | goto error; | 123 | goto error; |
129 | 124 | ||
130 | if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) { | 125 | if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { |
131 | printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", | 126 | printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", |
132 | __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages)); | 127 | __FUNCTION__, dir->i_ino, qty, |
128 | ntohs(dbuf->blocks[0].pagehdr.npages)); | ||
133 | goto error; | 129 | goto error; |
134 | } | 130 | } |
135 | #endif | 131 | #endif |
@@ -194,6 +190,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index) | |||
194 | 190 | ||
195 | fail: | 191 | fail: |
196 | afs_dir_put_page(page); | 192 | afs_dir_put_page(page); |
193 | _leave(" = -EIO"); | ||
197 | return ERR_PTR(-EIO); | 194 | return ERR_PTR(-EIO); |
198 | } | 195 | } |
199 | 196 | ||
@@ -207,7 +204,7 @@ static int afs_dir_open(struct inode *inode, struct file *file) | |||
207 | BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); | 204 | BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); |
208 | BUILD_BUG_ON(sizeof(union afs_dirent) != 32); | 205 | BUILD_BUG_ON(sizeof(union afs_dirent) != 32); |
209 | 206 | ||
210 | if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) | 207 | if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags)) |
211 | return -ENOENT; | 208 | return -ENOENT; |
212 | 209 | ||
213 | _leave(" = 0"); | 210 | _leave(" = 0"); |
@@ -242,7 +239,7 @@ static int afs_dir_iterate_block(unsigned *fpos, | |||
242 | /* skip entries marked unused in the bitmap */ | 239 | /* skip entries marked unused in the bitmap */ |
243 | if (!(block->pagehdr.bitmap[offset / 8] & | 240 | if (!(block->pagehdr.bitmap[offset / 8] & |
244 | (1 << (offset % 8)))) { | 241 | (1 << (offset % 8)))) { |
245 | _debug("ENT[%Zu.%u]: unused\n", | 242 | _debug("ENT[%Zu.%u]: unused", |
246 | blkoff / sizeof(union afs_dir_block), offset); | 243 | blkoff / sizeof(union afs_dir_block), offset); |
247 | if (offset >= curr) | 244 | if (offset >= curr) |
248 | *fpos = blkoff + | 245 | *fpos = blkoff + |
@@ -256,7 +253,7 @@ static int afs_dir_iterate_block(unsigned *fpos, | |||
256 | sizeof(*block) - | 253 | sizeof(*block) - |
257 | offset * sizeof(union afs_dirent)); | 254 | offset * sizeof(union afs_dirent)); |
258 | 255 | ||
259 | _debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n", | 256 | _debug("ENT[%Zu.%u]: %s %Zu \"%s\"", |
260 | blkoff / sizeof(union afs_dir_block), offset, | 257 | blkoff / sizeof(union afs_dir_block), offset, |
261 | (offset < curr ? "skip" : "fill"), | 258 | (offset < curr ? "skip" : "fill"), |
262 | nlen, dire->u.name); | 259 | nlen, dire->u.name); |
@@ -266,7 +263,7 @@ static int afs_dir_iterate_block(unsigned *fpos, | |||
266 | if (next >= AFS_DIRENT_PER_BLOCK) { | 263 | if (next >= AFS_DIRENT_PER_BLOCK) { |
267 | _debug("ENT[%Zu.%u]:" | 264 | _debug("ENT[%Zu.%u]:" |
268 | " %u travelled beyond end dir block" | 265 | " %u travelled beyond end dir block" |
269 | " (len %u/%Zu)\n", | 266 | " (len %u/%Zu)", |
270 | blkoff / sizeof(union afs_dir_block), | 267 | blkoff / sizeof(union afs_dir_block), |
271 | offset, next, tmp, nlen); | 268 | offset, next, tmp, nlen); |
272 | return -EIO; | 269 | return -EIO; |
@@ -274,13 +271,13 @@ static int afs_dir_iterate_block(unsigned *fpos, | |||
274 | if (!(block->pagehdr.bitmap[next / 8] & | 271 | if (!(block->pagehdr.bitmap[next / 8] & |
275 | (1 << (next % 8)))) { | 272 | (1 << (next % 8)))) { |
276 | _debug("ENT[%Zu.%u]:" | 273 | _debug("ENT[%Zu.%u]:" |
277 | " %u unmarked extension (len %u/%Zu)\n", | 274 | " %u unmarked extension (len %u/%Zu)", |
278 | blkoff / sizeof(union afs_dir_block), | 275 | blkoff / sizeof(union afs_dir_block), |
279 | offset, next, tmp, nlen); | 276 | offset, next, tmp, nlen); |
280 | return -EIO; | 277 | return -EIO; |
281 | } | 278 | } |
282 | 279 | ||
283 | _debug("ENT[%Zu.%u]: ext %u/%Zu\n", | 280 | _debug("ENT[%Zu.%u]: ext %u/%Zu", |
284 | blkoff / sizeof(union afs_dir_block), | 281 | blkoff / sizeof(union afs_dir_block), |
285 | next, tmp, nlen); | 282 | next, tmp, nlen); |
286 | next++; | 283 | next++; |
@@ -311,12 +308,12 @@ static int afs_dir_iterate_block(unsigned *fpos, | |||
311 | } | 308 | } |
312 | 309 | ||
313 | /* | 310 | /* |
314 | * read an AFS directory | 311 | * iterate through the data blob that lists the contents of an AFS directory |
315 | */ | 312 | */ |
316 | static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, | 313 | static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, |
317 | filldir_t filldir) | 314 | filldir_t filldir) |
318 | { | 315 | { |
319 | union afs_dir_block *dblock; | 316 | union afs_dir_block *dblock; |
320 | struct afs_dir_page *dbuf; | 317 | struct afs_dir_page *dbuf; |
321 | struct page *page; | 318 | struct page *page; |
322 | unsigned blkoff, limit; | 319 | unsigned blkoff, limit; |
@@ -324,7 +321,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, | |||
324 | 321 | ||
325 | _enter("{%lu},%u,,", dir->i_ino, *fpos); | 322 | _enter("{%lu},%u,,", dir->i_ino, *fpos); |
326 | 323 | ||
327 | if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { | 324 | if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { |
328 | _leave(" = -ESTALE"); | 325 | _leave(" = -ESTALE"); |
329 | return -ESTALE; | 326 | return -ESTALE; |
330 | } | 327 | } |
@@ -381,10 +378,12 @@ static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir) | |||
381 | unsigned fpos; | 378 | unsigned fpos; |
382 | int ret; | 379 | int ret; |
383 | 380 | ||
384 | _enter("{%Ld,{%lu}}", file->f_pos, file->f_path.dentry->d_inode->i_ino); | 381 | _enter("{%Ld,{%lu}}", |
382 | file->f_pos, file->f_path.dentry->d_inode->i_ino); | ||
385 | 383 | ||
386 | fpos = file->f_pos; | 384 | fpos = file->f_pos; |
387 | ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos, cookie, filldir); | 385 | ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos, |
386 | cookie, filldir); | ||
388 | file->f_pos = fpos; | 387 | file->f_pos = fpos; |
389 | 388 | ||
390 | _leave(" = %d", ret); | 389 | _leave(" = %d", ret); |
@@ -401,9 +400,13 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, | |||
401 | { | 400 | { |
402 | struct afs_dir_lookup_cookie *cookie = _cookie; | 401 | struct afs_dir_lookup_cookie *cookie = _cookie; |
403 | 402 | ||
404 | _enter("{%s,%Zu},%s,%u,,%lu,%u", | 403 | _enter("{%s,%Zu},%s,%u,,%llu,%u", |
405 | cookie->name, cookie->nlen, name, nlen, ino, dtype); | 404 | cookie->name, cookie->nlen, name, nlen, ino, dtype); |
406 | 405 | ||
406 | /* insanity checks first */ | ||
407 | BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); | ||
408 | BUILD_BUG_ON(sizeof(union afs_dirent) != 32); | ||
409 | |||
407 | if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { | 410 | if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { |
408 | _leave(" = 0 [no]"); | 411 | _leave(" = 0 [no]"); |
409 | return 0; | 412 | return 0; |
@@ -418,34 +421,17 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, | |||
418 | } | 421 | } |
419 | 422 | ||
420 | /* | 423 | /* |
421 | * look up an entry in a directory | 424 | * do a lookup in a directory |
422 | */ | 425 | */ |
423 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | 426 | static int afs_do_lookup(struct inode *dir, struct dentry *dentry, |
424 | struct nameidata *nd) | 427 | struct afs_fid *fid) |
425 | { | 428 | { |
426 | struct afs_dir_lookup_cookie cookie; | 429 | struct afs_dir_lookup_cookie cookie; |
427 | struct afs_super_info *as; | 430 | struct afs_super_info *as; |
428 | struct afs_vnode *vnode; | ||
429 | struct inode *inode; | ||
430 | unsigned fpos; | 431 | unsigned fpos; |
431 | int ret; | 432 | int ret; |
432 | 433 | ||
433 | _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name); | 434 | _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name); |
434 | |||
435 | /* insanity checks first */ | ||
436 | BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); | ||
437 | BUILD_BUG_ON(sizeof(union afs_dirent) != 32); | ||
438 | |||
439 | if (dentry->d_name.len > 255) { | ||
440 | _leave(" = -ENAMETOOLONG"); | ||
441 | return ERR_PTR(-ENAMETOOLONG); | ||
442 | } | ||
443 | |||
444 | vnode = AFS_FS_I(dir); | ||
445 | if (vnode->flags & AFS_VNODE_DELETED) { | ||
446 | _leave(" = -ESTALE"); | ||
447 | return ERR_PTR(-ESTALE); | ||
448 | } | ||
449 | 435 | ||
450 | as = dir->i_sb->s_fs_info; | 436 | as = dir->i_sb->s_fs_info; |
451 | 437 | ||
@@ -458,30 +444,64 @@ static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | |||
458 | fpos = 0; | 444 | fpos = 0; |
459 | ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir); | 445 | ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir); |
460 | if (ret < 0) { | 446 | if (ret < 0) { |
461 | _leave(" = %d", ret); | 447 | _leave(" = %d [iter]", ret); |
462 | return ERR_PTR(ret); | 448 | return ret; |
463 | } | 449 | } |
464 | 450 | ||
465 | ret = -ENOENT; | 451 | ret = -ENOENT; |
466 | if (!cookie.found) { | 452 | if (!cookie.found) { |
467 | _leave(" = %d", ret); | 453 | _leave(" = -ENOENT [not found]"); |
468 | return ERR_PTR(ret); | 454 | return -ENOENT; |
469 | } | 455 | } |
470 | 456 | ||
471 | /* instantiate the dentry */ | 457 | *fid = cookie.fid; |
472 | ret = afs_iget(dir->i_sb, &cookie.fid, &inode); | 458 | _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique); |
459 | return 0; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * look up an entry in a directory | ||
464 | */ | ||
465 | static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | ||
466 | struct nameidata *nd) | ||
467 | { | ||
468 | struct afs_vnode *vnode; | ||
469 | struct afs_fid fid; | ||
470 | struct inode *inode; | ||
471 | int ret; | ||
472 | |||
473 | _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name); | ||
474 | |||
475 | if (dentry->d_name.len > 255) { | ||
476 | _leave(" = -ENAMETOOLONG"); | ||
477 | return ERR_PTR(-ENAMETOOLONG); | ||
478 | } | ||
479 | |||
480 | vnode = AFS_FS_I(dir); | ||
481 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | ||
482 | _leave(" = -ESTALE"); | ||
483 | return ERR_PTR(-ESTALE); | ||
484 | } | ||
485 | |||
486 | ret = afs_do_lookup(dir, dentry, &fid); | ||
473 | if (ret < 0) { | 487 | if (ret < 0) { |
474 | _leave(" = %d", ret); | 488 | _leave(" = %d [do]", ret); |
475 | return ERR_PTR(ret); | 489 | return ERR_PTR(ret); |
476 | } | 490 | } |
477 | 491 | ||
492 | /* instantiate the dentry */ | ||
493 | inode = afs_iget(dir->i_sb, &fid); | ||
494 | if (IS_ERR(inode)) { | ||
495 | _leave(" = %ld", PTR_ERR(inode)); | ||
496 | return ERR_PTR(PTR_ERR(inode)); | ||
497 | } | ||
498 | |||
478 | dentry->d_op = &afs_fs_dentry_operations; | 499 | dentry->d_op = &afs_fs_dentry_operations; |
479 | dentry->d_fsdata = (void *) (unsigned long) vnode->status.version; | ||
480 | 500 | ||
481 | d_add(dentry, inode); | 501 | d_add(dentry, inode); |
482 | _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }", | 502 | _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }", |
483 | cookie.fid.vnode, | 503 | fid.vnode, |
484 | cookie.fid.unique, | 504 | fid.unique, |
485 | dentry->d_inode->i_ino, | 505 | dentry->d_inode->i_ino, |
486 | dentry->d_inode->i_version); | 506 | dentry->d_inode->i_version); |
487 | 507 | ||
@@ -489,23 +509,65 @@ static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, | |||
489 | } | 509 | } |
490 | 510 | ||
491 | /* | 511 | /* |
512 | * propagate changed and modified flags on a directory to all the children of | ||
513 | * that directory as they may indicate that the ACL on the dir has changed, | ||
514 | * potentially rendering the child inaccessible or that a file has been deleted | ||
515 | * or renamed | ||
516 | */ | ||
517 | static void afs_propagate_dir_changes(struct dentry *dir) | ||
518 | { | ||
519 | struct dentry *child; | ||
520 | bool c, m; | ||
521 | |||
522 | c = test_bit(AFS_VNODE_CHANGED, &AFS_FS_I(dir->d_inode)->flags); | ||
523 | m = test_bit(AFS_VNODE_MODIFIED, &AFS_FS_I(dir->d_inode)->flags); | ||
524 | |||
525 | _enter("{%d,%d}", c, m); | ||
526 | |||
527 | spin_lock(&dir->d_lock); | ||
528 | |||
529 | list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { | ||
530 | if (child->d_inode) { | ||
531 | struct afs_vnode *vnode; | ||
532 | |||
533 | _debug("tag %s", child->d_name.name); | ||
534 | vnode = AFS_FS_I(child->d_inode); | ||
535 | if (c) | ||
536 | set_bit(AFS_VNODE_DIR_CHANGED, &vnode->flags); | ||
537 | if (m) | ||
538 | set_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | spin_unlock(&dir->d_lock); | ||
543 | } | ||
544 | |||
545 | /* | ||
492 | * check that a dentry lookup hit has found a valid entry | 546 | * check that a dentry lookup hit has found a valid entry |
493 | * - NOTE! the hit can be a negative hit too, so we can't assume we have an | 547 | * - NOTE! the hit can be a negative hit too, so we can't assume we have an |
494 | * inode | 548 | * inode |
495 | * (derived from nfs_lookup_revalidate) | 549 | * - there are several things we need to check |
550 | * - parent dir data changes (rm, rmdir, rename, mkdir, create, link, | ||
551 | * symlink) | ||
552 | * - parent dir metadata changed (security changes) | ||
553 | * - dentry data changed (write, truncate) | ||
554 | * - dentry metadata changed (security changes) | ||
496 | */ | 555 | */ |
497 | static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | 556 | static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) |
498 | { | 557 | { |
499 | struct afs_dir_lookup_cookie cookie; | 558 | struct afs_vnode *vnode; |
559 | struct afs_fid fid; | ||
500 | struct dentry *parent; | 560 | struct dentry *parent; |
501 | struct inode *inode, *dir; | 561 | struct inode *inode, *dir; |
502 | unsigned fpos; | ||
503 | int ret; | 562 | int ret; |
504 | 563 | ||
505 | _enter("{sb=%p n=%s},", dentry->d_sb, dentry->d_name.name); | 564 | vnode = AFS_FS_I(dentry->d_inode); |
565 | |||
566 | _enter("{sb=%p n=%s fl=%lx},", | ||
567 | dentry->d_sb, dentry->d_name.name, vnode->flags); | ||
506 | 568 | ||
507 | /* lock down the parent dentry so we can peer at it */ | 569 | /* lock down the parent dentry so we can peer at it */ |
508 | parent = dget_parent(dentry->d_parent); | 570 | parent = dget_parent(dentry); |
509 | 571 | ||
510 | dir = parent->d_inode; | 572 | dir = parent->d_inode; |
511 | inode = dentry->d_inode; | 573 | inode = dentry->d_inode; |
@@ -517,81 +579,92 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
517 | /* handle a bad inode */ | 579 | /* handle a bad inode */ |
518 | if (is_bad_inode(inode)) { | 580 | if (is_bad_inode(inode)) { |
519 | printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n", | 581 | printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n", |
520 | dentry->d_parent->d_name.name, dentry->d_name.name); | 582 | parent->d_name.name, dentry->d_name.name); |
521 | goto out_bad; | 583 | goto out_bad; |
522 | } | 584 | } |
523 | 585 | ||
524 | /* force a full look up if the parent directory changed since last the | 586 | /* check that this dirent still exists if the directory's contents were |
525 | * server was consulted | 587 | * modified */ |
526 | * - otherwise this inode must still exist, even if the inode details | 588 | if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { |
527 | * themselves have changed | ||
528 | */ | ||
529 | if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED) | ||
530 | afs_vnode_fetch_status(AFS_FS_I(dir)); | ||
531 | |||
532 | if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { | ||
533 | _debug("%s: parent dir deleted", dentry->d_name.name); | 589 | _debug("%s: parent dir deleted", dentry->d_name.name); |
534 | goto out_bad; | 590 | goto out_bad; |
535 | } | 591 | } |
536 | 592 | ||
537 | if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) { | 593 | if (test_and_clear_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags)) { |
538 | _debug("%s: file already deleted", dentry->d_name.name); | 594 | /* rm/rmdir/rename may have occurred */ |
539 | goto out_bad; | 595 | _debug("dir modified"); |
540 | } | ||
541 | |||
542 | if ((unsigned long) dentry->d_fsdata != | ||
543 | (unsigned long) AFS_FS_I(dir)->status.version) { | ||
544 | _debug("%s: parent changed %lu -> %u", | ||
545 | dentry->d_name.name, | ||
546 | (unsigned long) dentry->d_fsdata, | ||
547 | (unsigned) AFS_FS_I(dir)->status.version); | ||
548 | 596 | ||
549 | /* search the directory for this vnode */ | 597 | /* search the directory for this vnode */ |
550 | cookie.name = dentry->d_name.name; | 598 | ret = afs_do_lookup(dir, dentry, &fid); |
551 | cookie.nlen = dentry->d_name.len; | 599 | if (ret == -ENOENT) { |
552 | cookie.fid.vid = AFS_FS_I(inode)->volume->vid; | 600 | _debug("%s: dirent not found", dentry->d_name.name); |
553 | cookie.found = 0; | 601 | goto not_found; |
554 | 602 | } | |
555 | fpos = 0; | ||
556 | ret = afs_dir_iterate(dir, &fpos, &cookie, | ||
557 | afs_dir_lookup_filldir); | ||
558 | if (ret < 0) { | 603 | if (ret < 0) { |
559 | _debug("failed to iterate dir %s: %d", | 604 | _debug("failed to iterate dir %s: %d", |
560 | parent->d_name.name, ret); | 605 | parent->d_name.name, ret); |
561 | goto out_bad; | 606 | goto out_bad; |
562 | } | 607 | } |
563 | 608 | ||
564 | if (!cookie.found) { | ||
565 | _debug("%s: dirent not found", dentry->d_name.name); | ||
566 | goto not_found; | ||
567 | } | ||
568 | |||
569 | /* if the vnode ID has changed, then the dirent points to a | 609 | /* if the vnode ID has changed, then the dirent points to a |
570 | * different file */ | 610 | * different file */ |
571 | if (cookie.fid.vnode != AFS_FS_I(inode)->fid.vnode) { | 611 | if (fid.vnode != vnode->fid.vnode) { |
572 | _debug("%s: dirent changed", dentry->d_name.name); | 612 | _debug("%s: dirent changed [%u != %u]", |
613 | dentry->d_name.name, fid.vnode, | ||
614 | vnode->fid.vnode); | ||
573 | goto not_found; | 615 | goto not_found; |
574 | } | 616 | } |
575 | 617 | ||
576 | /* if the vnode ID uniqifier has changed, then the file has | 618 | /* if the vnode ID uniqifier has changed, then the file has |
577 | * been deleted */ | 619 | * been deleted */ |
578 | if (cookie.fid.unique != AFS_FS_I(inode)->fid.unique) { | 620 | if (fid.unique != vnode->fid.unique) { |
579 | _debug("%s: file deleted (uq %u -> %u I:%lu)", | 621 | _debug("%s: file deleted (uq %u -> %u I:%lu)", |
580 | dentry->d_name.name, | 622 | dentry->d_name.name, fid.unique, |
581 | cookie.fid.unique, | 623 | vnode->fid.unique, inode->i_version); |
582 | AFS_FS_I(inode)->fid.unique, | 624 | spin_lock(&vnode->lock); |
583 | inode->i_version); | 625 | set_bit(AFS_VNODE_DELETED, &vnode->flags); |
584 | spin_lock(&AFS_FS_I(inode)->lock); | 626 | spin_unlock(&vnode->lock); |
585 | AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED; | ||
586 | spin_unlock(&AFS_FS_I(inode)->lock); | ||
587 | invalidate_remote_inode(inode); | 627 | invalidate_remote_inode(inode); |
588 | goto out_bad; | 628 | goto out_bad; |
589 | } | 629 | } |
630 | } | ||
631 | |||
632 | /* if the directory's metadata were changed then the security may be | ||
633 | * different and we may no longer have access */ | ||
634 | mutex_lock(&vnode->cb_broken_lock); | ||
590 | 635 | ||
591 | dentry->d_fsdata = | 636 | if (test_and_clear_bit(AFS_VNODE_DIR_CHANGED, &vnode->flags) || |
592 | (void *) (unsigned long) AFS_FS_I(dir)->status.version; | 637 | test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { |
638 | _debug("%s: changed", dentry->d_name.name); | ||
639 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); | ||
640 | if (afs_vnode_fetch_status(vnode) < 0) { | ||
641 | mutex_unlock(&vnode->cb_broken_lock); | ||
642 | goto out_bad; | ||
643 | } | ||
593 | } | 644 | } |
594 | 645 | ||
646 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | ||
647 | _debug("%s: file already deleted", dentry->d_name.name); | ||
648 | mutex_unlock(&vnode->cb_broken_lock); | ||
649 | goto out_bad; | ||
650 | } | ||
651 | |||
652 | /* if the vnode's data version number changed then its contents are | ||
653 | * different */ | ||
654 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { | ||
655 | _debug("zap data"); | ||
656 | invalidate_remote_inode(inode); | ||
657 | } | ||
658 | |||
659 | if (S_ISDIR(inode->i_mode) && | ||
660 | (test_bit(AFS_VNODE_CHANGED, &vnode->flags) || | ||
661 | test_bit(AFS_VNODE_MODIFIED, &vnode->flags))) | ||
662 | afs_propagate_dir_changes(dentry); | ||
663 | |||
664 | clear_bit(AFS_VNODE_CHANGED, &vnode->flags); | ||
665 | clear_bit(AFS_VNODE_MODIFIED, &vnode->flags); | ||
666 | mutex_unlock(&vnode->cb_broken_lock); | ||
667 | |||
595 | out_valid: | 668 | out_valid: |
596 | dput(parent); | 669 | dput(parent); |
597 | _leave(" = 1 [valid]"); | 670 | _leave(" = 1 [valid]"); |
@@ -610,12 +683,10 @@ out_bad: | |||
610 | goto out_valid; | 683 | goto out_valid; |
611 | } | 684 | } |
612 | 685 | ||
613 | shrink_dcache_parent(dentry); | ||
614 | |||
615 | _debug("dropping dentry %s/%s", | 686 | _debug("dropping dentry %s/%s", |
616 | dentry->d_parent->d_name.name, dentry->d_name.name); | 687 | parent->d_name.name, dentry->d_name.name); |
688 | shrink_dcache_parent(dentry); | ||
617 | d_drop(dentry); | 689 | d_drop(dentry); |
618 | |||
619 | dput(parent); | 690 | dput(parent); |
620 | 691 | ||
621 | _leave(" = 0 [bad]"); | 692 | _leave(" = 0 [bad]"); |
@@ -635,10 +706,9 @@ static int afs_d_delete(struct dentry *dentry) | |||
635 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) | 706 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) |
636 | goto zap; | 707 | goto zap; |
637 | 708 | ||
638 | if (dentry->d_inode) { | 709 | if (dentry->d_inode && |
639 | if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED) | 710 | test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags)) |
640 | goto zap; | 711 | goto zap; |
641 | } | ||
642 | 712 | ||
643 | _leave(" = 0 [keep]"); | 713 | _leave(" = 0 [keep]"); |
644 | return 0; | 714 | return 0; |
diff --git a/fs/afs/file.c b/fs/afs/file.c index 01df30d256b..6990327e75d 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* file.c: AFS filesystem file handling | 1 | /* AFS filesystem file handling |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -15,9 +15,6 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
18 | #include "volume.h" | ||
19 | #include "vnode.h" | ||
20 | #include <rxrpc/call.h> | ||
21 | #include "internal.h" | 18 | #include "internal.h" |
22 | 19 | ||
23 | #if 0 | 20 | #if 0 |
@@ -80,12 +77,10 @@ static void afs_file_readpage_write_complete(void *cookie_data, | |||
80 | */ | 77 | */ |
81 | static int afs_file_readpage(struct file *file, struct page *page) | 78 | static int afs_file_readpage(struct file *file, struct page *page) |
82 | { | 79 | { |
83 | struct afs_rxfs_fetch_descriptor desc; | ||
84 | #ifdef AFS_CACHING_SUPPORT | ||
85 | struct cachefs_page *pageio; | ||
86 | #endif | ||
87 | struct afs_vnode *vnode; | 80 | struct afs_vnode *vnode; |
88 | struct inode *inode; | 81 | struct inode *inode; |
82 | size_t len; | ||
83 | off_t offset; | ||
89 | int ret; | 84 | int ret; |
90 | 85 | ||
91 | inode = page->mapping->host; | 86 | inode = page->mapping->host; |
@@ -97,14 +92,10 @@ static int afs_file_readpage(struct file *file, struct page *page) | |||
97 | BUG_ON(!PageLocked(page)); | 92 | BUG_ON(!PageLocked(page)); |
98 | 93 | ||
99 | ret = -ESTALE; | 94 | ret = -ESTALE; |
100 | if (vnode->flags & AFS_VNODE_DELETED) | 95 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
101 | goto error; | 96 | goto error; |
102 | 97 | ||
103 | #ifdef AFS_CACHING_SUPPORT | 98 | #ifdef AFS_CACHING_SUPPORT |
104 | ret = cachefs_page_get_private(page, &pageio, GFP_NOIO); | ||
105 | if (ret < 0) | ||
106 | goto error; | ||
107 | |||
108 | /* is it cached? */ | 99 | /* is it cached? */ |
109 | ret = cachefs_read_or_alloc_page(vnode->cache, | 100 | ret = cachefs_read_or_alloc_page(vnode->cache, |
110 | page, | 101 | page, |
@@ -128,26 +119,19 @@ static int afs_file_readpage(struct file *file, struct page *page) | |||
128 | case -ENOBUFS: | 119 | case -ENOBUFS: |
129 | case -ENODATA: | 120 | case -ENODATA: |
130 | default: | 121 | default: |
131 | desc.fid = vnode->fid; | 122 | offset = page->index << PAGE_CACHE_SHIFT; |
132 | desc.offset = page->index << PAGE_CACHE_SHIFT; | 123 | len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); |
133 | desc.size = min((size_t) (inode->i_size - desc.offset), | ||
134 | (size_t) PAGE_SIZE); | ||
135 | desc.buffer = kmap(page); | ||
136 | |||
137 | clear_page(desc.buffer); | ||
138 | 124 | ||
139 | /* read the contents of the file from the server into the | 125 | /* read the contents of the file from the server into the |
140 | * page */ | 126 | * page */ |
141 | ret = afs_vnode_fetch_data(vnode, &desc); | 127 | ret = afs_vnode_fetch_data(vnode, offset, len, page); |
142 | kunmap(page); | ||
143 | if (ret < 0) { | 128 | if (ret < 0) { |
144 | if (ret==-ENOENT) { | 129 | if (ret == -ENOENT) { |
145 | _debug("got NOENT from server" | 130 | _debug("got NOENT from server" |
146 | " - marking file deleted and stale"); | 131 | " - marking file deleted and stale"); |
147 | vnode->flags |= AFS_VNODE_DELETED; | 132 | set_bit(AFS_VNODE_DELETED, &vnode->flags); |
148 | ret = -ESTALE; | 133 | ret = -ESTALE; |
149 | } | 134 | } |
150 | |||
151 | #ifdef AFS_CACHING_SUPPORT | 135 | #ifdef AFS_CACHING_SUPPORT |
152 | cachefs_uncache_page(vnode->cache, page); | 136 | cachefs_uncache_page(vnode->cache, page); |
153 | #endif | 137 | #endif |
@@ -174,10 +158,9 @@ static int afs_file_readpage(struct file *file, struct page *page) | |||
174 | _leave(" = 0"); | 158 | _leave(" = 0"); |
175 | return 0; | 159 | return 0; |
176 | 160 | ||
177 | error: | 161 | error: |
178 | SetPageError(page); | 162 | SetPageError(page); |
179 | unlock_page(page); | 163 | unlock_page(page); |
180 | |||
181 | _leave(" = %d", ret); | 164 | _leave(" = %d", ret); |
182 | return ret; | 165 | return ret; |
183 | } | 166 | } |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index f1c3a186842..167ca615c2e 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* fsclient.c: AFS File Server client stubs | 1 | /* AFS File Server client stubs |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -11,818 +11,396 @@ | |||
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <rxrpc/rxrpc.h> | 14 | #include <linux/circ_buf.h> |
15 | #include <rxrpc/transport.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include "fsclient.h" | ||
19 | #include "cmservice.h" | ||
20 | #include "vnode.h" | ||
21 | #include "server.h" | ||
22 | #include "errors.h" | ||
23 | #include "internal.h" | 15 | #include "internal.h" |
24 | 16 | #include "afs_fs.h" | |
25 | #define FSFETCHSTATUS 132 /* AFS Fetch file status */ | ||
26 | #define FSFETCHDATA 130 /* AFS Fetch file data */ | ||
27 | #define FSGIVEUPCALLBACKS 147 /* AFS Discard callback promises */ | ||
28 | #define FSGETVOLUMEINFO 148 /* AFS Get root volume information */ | ||
29 | #define FSGETROOTVOLUME 151 /* AFS Get root volume name */ | ||
30 | #define FSLOOKUP 161 /* AFS lookup file in directory */ | ||
31 | 17 | ||
32 | /* | 18 | /* |
33 | * map afs abort codes to/from Linux error codes | 19 | * decode an AFSFetchStatus block |
34 | * - called with call->lock held | ||
35 | */ | 20 | */ |
36 | static void afs_rxfs_aemap(struct rxrpc_call *call) | 21 | static void xdr_decode_AFSFetchStatus(const __be32 **_bp, |
22 | struct afs_vnode *vnode) | ||
37 | { | 23 | { |
38 | switch (call->app_err_state) { | 24 | const __be32 *bp = *_bp; |
39 | case RXRPC_ESTATE_LOCAL_ABORT: | 25 | umode_t mode; |
40 | call->app_abort_code = -call->app_errno; | 26 | u64 data_version; |
41 | break; | 27 | u32 changed = 0; /* becomes non-zero if ctime-type changes seen */ |
42 | case RXRPC_ESTATE_PEER_ABORT: | 28 | |
43 | call->app_errno = afs_abort_to_error(call->app_abort_code); | 29 | #define EXTRACT(DST) \ |
44 | break; | 30 | do { \ |
45 | default: | 31 | u32 x = ntohl(*bp++); \ |
46 | break; | 32 | changed |= DST - x; \ |
33 | DST = x; \ | ||
34 | } while (0) | ||
35 | |||
36 | vnode->status.if_version = ntohl(*bp++); | ||
37 | EXTRACT(vnode->status.type); | ||
38 | vnode->status.nlink = ntohl(*bp++); | ||
39 | EXTRACT(vnode->status.size); | ||
40 | data_version = ntohl(*bp++); | ||
41 | EXTRACT(vnode->status.author); | ||
42 | EXTRACT(vnode->status.owner); | ||
43 | EXTRACT(vnode->status.caller_access); /* call ticket dependent */ | ||
44 | EXTRACT(vnode->status.anon_access); | ||
45 | EXTRACT(vnode->status.mode); | ||
46 | vnode->status.parent.vid = vnode->fid.vid; | ||
47 | EXTRACT(vnode->status.parent.vnode); | ||
48 | EXTRACT(vnode->status.parent.unique); | ||
49 | bp++; /* seg size */ | ||
50 | vnode->status.mtime_client = ntohl(*bp++); | ||
51 | vnode->status.mtime_server = ntohl(*bp++); | ||
52 | bp++; /* group */ | ||
53 | bp++; /* sync counter */ | ||
54 | data_version |= (u64) ntohl(*bp++) << 32; | ||
55 | bp++; /* spare2 */ | ||
56 | bp++; /* spare3 */ | ||
57 | bp++; /* spare4 */ | ||
58 | *_bp = bp; | ||
59 | |||
60 | if (changed) { | ||
61 | _debug("vnode changed"); | ||
62 | set_bit(AFS_VNODE_CHANGED, &vnode->flags); | ||
63 | vnode->vfs_inode.i_uid = vnode->status.owner; | ||
64 | vnode->vfs_inode.i_size = vnode->status.size; | ||
65 | vnode->vfs_inode.i_version = vnode->fid.unique; | ||
66 | |||
67 | vnode->status.mode &= S_IALLUGO; | ||
68 | mode = vnode->vfs_inode.i_mode; | ||
69 | mode &= ~S_IALLUGO; | ||
70 | mode |= vnode->status.mode; | ||
71 | vnode->vfs_inode.i_mode = mode; | ||
72 | } | ||
73 | |||
74 | _debug("vnode time %lx, %lx", | ||
75 | vnode->status.mtime_client, vnode->status.mtime_server); | ||
76 | vnode->vfs_inode.i_ctime.tv_sec = vnode->status.mtime_server; | ||
77 | vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; | ||
78 | vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; | ||
79 | |||
80 | if (vnode->status.data_version != data_version) { | ||
81 | _debug("vnode modified %llx", data_version); | ||
82 | vnode->status.data_version = data_version; | ||
83 | set_bit(AFS_VNODE_MODIFIED, &vnode->flags); | ||
84 | set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); | ||
47 | } | 85 | } |
48 | } | 86 | } |
49 | 87 | ||
50 | /* | 88 | /* |
51 | * get the root volume name from a fileserver | 89 | * decode an AFSCallBack block |
52 | * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2 | ||
53 | */ | 90 | */ |
54 | #if 0 | 91 | static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode) |
55 | int afs_rxfs_get_root_volume(struct afs_server *server, | ||
56 | char *buf, size_t *buflen) | ||
57 | { | 92 | { |
58 | struct rxrpc_connection *conn; | 93 | const __be32 *bp = *_bp; |
59 | struct rxrpc_call *call; | ||
60 | struct kvec piov[2]; | ||
61 | size_t sent; | ||
62 | int ret; | ||
63 | u32 param[1]; | ||
64 | |||
65 | DECLARE_WAITQUEUE(myself, current); | ||
66 | |||
67 | kenter("%p,%p,%u",server, buf, *buflen); | ||
68 | |||
69 | /* get hold of the fileserver connection */ | ||
70 | ret = afs_server_get_fsconn(server, &conn); | ||
71 | if (ret < 0) | ||
72 | goto out; | ||
73 | |||
74 | /* create a call through that connection */ | ||
75 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
76 | if (ret < 0) { | ||
77 | printk("kAFS: Unable to create call: %d\n", ret); | ||
78 | goto out_put_conn; | ||
79 | } | ||
80 | call->app_opcode = FSGETROOTVOLUME; | ||
81 | |||
82 | /* we want to get event notifications from the call */ | ||
83 | add_wait_queue(&call->waitq, &myself); | ||
84 | |||
85 | /* marshall the parameters */ | ||
86 | param[0] = htonl(FSGETROOTVOLUME); | ||
87 | |||
88 | piov[0].iov_len = sizeof(param); | ||
89 | piov[0].iov_base = param; | ||
90 | |||
91 | /* send the parameters to the server */ | ||
92 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
93 | 0, &sent); | ||
94 | if (ret < 0) | ||
95 | goto abort; | ||
96 | |||
97 | /* wait for the reply to completely arrive */ | ||
98 | for (;;) { | ||
99 | set_current_state(TASK_INTERRUPTIBLE); | ||
100 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
101 | signal_pending(current)) | ||
102 | break; | ||
103 | schedule(); | ||
104 | } | ||
105 | set_current_state(TASK_RUNNING); | ||
106 | |||
107 | ret = -EINTR; | ||
108 | if (signal_pending(current)) | ||
109 | goto abort; | ||
110 | |||
111 | switch (call->app_call_state) { | ||
112 | case RXRPC_CSTATE_ERROR: | ||
113 | ret = call->app_errno; | ||
114 | kdebug("Got Error: %d", ret); | ||
115 | goto out_unwait; | ||
116 | |||
117 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
118 | /* read the reply */ | ||
119 | kdebug("Got Reply: qty=%d", call->app_ready_qty); | ||
120 | |||
121 | ret = -EBADMSG; | ||
122 | if (call->app_ready_qty <= 4) | ||
123 | goto abort; | ||
124 | |||
125 | ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0); | ||
126 | if (ret < 0) | ||
127 | goto abort; | ||
128 | |||
129 | #if 0 | ||
130 | /* unmarshall the reply */ | ||
131 | bp = buffer; | ||
132 | for (loop = 0; loop < 65; loop++) | ||
133 | entry->name[loop] = ntohl(*bp++); | ||
134 | entry->name[64] = 0; | ||
135 | |||
136 | entry->type = ntohl(*bp++); | ||
137 | entry->num_servers = ntohl(*bp++); | ||
138 | |||
139 | for (loop = 0; loop < 8; loop++) | ||
140 | entry->servers[loop].addr.s_addr = *bp++; | ||
141 | |||
142 | for (loop = 0; loop < 8; loop++) | ||
143 | entry->servers[loop].partition = ntohl(*bp++); | ||
144 | |||
145 | for (loop = 0; loop < 8; loop++) | ||
146 | entry->servers[loop].flags = ntohl(*bp++); | ||
147 | |||
148 | for (loop = 0; loop < 3; loop++) | ||
149 | entry->volume_ids[loop] = ntohl(*bp++); | ||
150 | |||
151 | entry->clone_id = ntohl(*bp++); | ||
152 | entry->flags = ntohl(*bp); | ||
153 | #endif | ||
154 | 94 | ||
155 | /* success */ | 95 | vnode->cb_version = ntohl(*bp++); |
156 | ret = 0; | 96 | vnode->cb_expiry = ntohl(*bp++); |
157 | goto out_unwait; | 97 | vnode->cb_type = ntohl(*bp++); |
158 | 98 | vnode->cb_expires = vnode->cb_expiry + get_seconds(); | |
159 | default: | 99 | *_bp = bp; |
160 | BUG(); | ||
161 | } | ||
162 | |||
163 | abort: | ||
164 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
165 | rxrpc_call_abort(call, ret); | ||
166 | schedule(); | ||
167 | out_unwait: | ||
168 | set_current_state(TASK_RUNNING); | ||
169 | remove_wait_queue(&call->waitq, &myself); | ||
170 | rxrpc_put_call(call); | ||
171 | out_put_conn: | ||
172 | afs_server_release_fsconn(server, conn); | ||
173 | out: | ||
174 | kleave(""); | ||
175 | return ret; | ||
176 | } | 100 | } |
177 | #endif | ||
178 | 101 | ||
179 | /* | 102 | /* |
180 | * get information about a volume | 103 | * decode an AFSVolSync block |
181 | */ | 104 | */ |
182 | #if 0 | 105 | static void xdr_decode_AFSVolSync(const __be32 **_bp, |
183 | int afs_rxfs_get_volume_info(struct afs_server *server, | 106 | struct afs_volsync *volsync) |
184 | const char *name, | ||
185 | struct afs_volume_info *vinfo) | ||
186 | { | 107 | { |
187 | struct rxrpc_connection *conn; | 108 | const __be32 *bp = *_bp; |
188 | struct rxrpc_call *call; | ||
189 | struct kvec piov[3]; | ||
190 | size_t sent; | ||
191 | int ret; | ||
192 | u32 param[2], *bp, zero; | ||
193 | 109 | ||
194 | DECLARE_WAITQUEUE(myself, current); | 110 | volsync->creation = ntohl(*bp++); |
111 | bp++; /* spare2 */ | ||
112 | bp++; /* spare3 */ | ||
113 | bp++; /* spare4 */ | ||
114 | bp++; /* spare5 */ | ||
115 | bp++; /* spare6 */ | ||
116 | *_bp = bp; | ||
117 | } | ||
195 | 118 | ||
196 | _enter("%p,%s,%p", server, name, vinfo); | 119 | /* |
120 | * deliver reply data to an FS.FetchStatus | ||
121 | */ | ||
122 | static int afs_deliver_fs_fetch_status(struct afs_call *call, | ||
123 | struct sk_buff *skb, bool last) | ||
124 | { | ||
125 | const __be32 *bp; | ||
197 | 126 | ||
198 | /* get hold of the fileserver connection */ | 127 | _enter(",,%u", last); |
199 | ret = afs_server_get_fsconn(server, &conn); | ||
200 | if (ret < 0) | ||
201 | goto out; | ||
202 | 128 | ||
203 | /* create a call through that connection */ | 129 | afs_transfer_reply(call, skb); |
204 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | 130 | if (!last) |
205 | if (ret < 0) { | 131 | return 0; |
206 | printk("kAFS: Unable to create call: %d\n", ret); | ||
207 | goto out_put_conn; | ||
208 | } | ||
209 | call->app_opcode = FSGETVOLUMEINFO; | ||
210 | 132 | ||
211 | /* we want to get event notifications from the call */ | 133 | if (call->reply_size != call->reply_max) |
212 | add_wait_queue(&call->waitq, &myself); | 134 | return -EBADMSG; |
213 | 135 | ||
214 | /* marshall the parameters */ | 136 | /* unmarshall the reply once we've received all of it */ |
215 | piov[1].iov_len = strlen(name); | 137 | bp = call->buffer; |
216 | piov[1].iov_base = (char *) name; | 138 | xdr_decode_AFSFetchStatus(&bp, call->reply); |
217 | 139 | xdr_decode_AFSCallBack(&bp, call->reply); | |
218 | zero = 0; | 140 | if (call->reply2) |
219 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | 141 | xdr_decode_AFSVolSync(&bp, call->reply2); |
220 | piov[2].iov_base = &zero; | ||
221 | |||
222 | param[0] = htonl(FSGETVOLUMEINFO); | ||
223 | param[1] = htonl(piov[1].iov_len); | ||
224 | |||
225 | piov[0].iov_len = sizeof(param); | ||
226 | piov[0].iov_base = param; | ||
227 | |||
228 | /* send the parameters to the server */ | ||
229 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
230 | 0, &sent); | ||
231 | if (ret < 0) | ||
232 | goto abort; | ||
233 | |||
234 | /* wait for the reply to completely arrive */ | ||
235 | bp = rxrpc_call_alloc_scratch(call, 64); | ||
236 | |||
237 | ret = rxrpc_call_read_data(call, bp, 64, | ||
238 | RXRPC_CALL_READ_BLOCK | | ||
239 | RXRPC_CALL_READ_ALL); | ||
240 | if (ret < 0) { | ||
241 | if (ret == -ECONNABORTED) { | ||
242 | ret = call->app_errno; | ||
243 | goto out_unwait; | ||
244 | } | ||
245 | goto abort; | ||
246 | } | ||
247 | 142 | ||
248 | /* unmarshall the reply */ | 143 | _leave(" = 0 [done]"); |
249 | vinfo->vid = ntohl(*bp++); | 144 | return 0; |
250 | vinfo->type = ntohl(*bp++); | ||
251 | |||
252 | vinfo->type_vids[0] = ntohl(*bp++); | ||
253 | vinfo->type_vids[1] = ntohl(*bp++); | ||
254 | vinfo->type_vids[2] = ntohl(*bp++); | ||
255 | vinfo->type_vids[3] = ntohl(*bp++); | ||
256 | vinfo->type_vids[4] = ntohl(*bp++); | ||
257 | |||
258 | vinfo->nservers = ntohl(*bp++); | ||
259 | vinfo->servers[0].addr.s_addr = *bp++; | ||
260 | vinfo->servers[1].addr.s_addr = *bp++; | ||
261 | vinfo->servers[2].addr.s_addr = *bp++; | ||
262 | vinfo->servers[3].addr.s_addr = *bp++; | ||
263 | vinfo->servers[4].addr.s_addr = *bp++; | ||
264 | vinfo->servers[5].addr.s_addr = *bp++; | ||
265 | vinfo->servers[6].addr.s_addr = *bp++; | ||
266 | vinfo->servers[7].addr.s_addr = *bp++; | ||
267 | |||
268 | ret = -EBADMSG; | ||
269 | if (vinfo->nservers > 8) | ||
270 | goto abort; | ||
271 | |||
272 | /* success */ | ||
273 | ret = 0; | ||
274 | |||
275 | out_unwait: | ||
276 | set_current_state(TASK_RUNNING); | ||
277 | remove_wait_queue(&call->waitq, &myself); | ||
278 | rxrpc_put_call(call); | ||
279 | out_put_conn: | ||
280 | afs_server_release_fsconn(server, conn); | ||
281 | out: | ||
282 | _leave(""); | ||
283 | return ret; | ||
284 | |||
285 | abort: | ||
286 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
287 | rxrpc_call_abort(call, ret); | ||
288 | schedule(); | ||
289 | goto out_unwait; | ||
290 | } | 145 | } |
291 | #endif | 146 | |
147 | /* | ||
148 | * FS.FetchStatus operation type | ||
149 | */ | ||
150 | static const struct afs_call_type afs_RXFSFetchStatus = { | ||
151 | .deliver = afs_deliver_fs_fetch_status, | ||
152 | .abort_to_error = afs_abort_to_error, | ||
153 | .destructor = afs_flat_call_destructor, | ||
154 | }; | ||
292 | 155 | ||
293 | /* | 156 | /* |
294 | * fetch the status information for a file | 157 | * fetch the status information for a file |
295 | */ | 158 | */ |
296 | int afs_rxfs_fetch_file_status(struct afs_server *server, | 159 | int afs_fs_fetch_file_status(struct afs_server *server, |
297 | struct afs_vnode *vnode, | 160 | struct afs_vnode *vnode, |
298 | struct afs_volsync *volsync) | 161 | struct afs_volsync *volsync, |
162 | const struct afs_wait_mode *wait_mode) | ||
299 | { | 163 | { |
300 | struct afs_server_callslot callslot; | 164 | struct afs_call *call; |
301 | struct rxrpc_call *call; | ||
302 | struct kvec piov[1]; | ||
303 | size_t sent; | ||
304 | int ret; | ||
305 | __be32 *bp; | 165 | __be32 *bp; |
306 | 166 | ||
307 | DECLARE_WAITQUEUE(myself, current); | 167 | _enter(""); |
308 | 168 | ||
309 | _enter("%p,{%u,%u,%u}", | 169 | call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, 120); |
310 | server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | 170 | if (!call) |
171 | return -ENOMEM; | ||
311 | 172 | ||
312 | /* get hold of the fileserver connection */ | 173 | call->reply = vnode; |
313 | ret = afs_server_request_callslot(server, &callslot); | 174 | call->reply2 = volsync; |
314 | if (ret < 0) | 175 | call->service_id = FS_SERVICE; |
315 | goto out; | 176 | call->port = htons(AFS_FS_PORT); |
316 | |||
317 | /* create a call through that connection */ | ||
318 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, | ||
319 | &call); | ||
320 | if (ret < 0) { | ||
321 | printk("kAFS: Unable to create call: %d\n", ret); | ||
322 | goto out_put_conn; | ||
323 | } | ||
324 | call->app_opcode = FSFETCHSTATUS; | ||
325 | |||
326 | /* we want to get event notifications from the call */ | ||
327 | add_wait_queue(&call->waitq, &myself); | ||
328 | 177 | ||
329 | /* marshall the parameters */ | 178 | /* marshall the parameters */ |
330 | bp = rxrpc_call_alloc_scratch(call, 16); | 179 | bp = call->request; |
331 | bp[0] = htonl(FSFETCHSTATUS); | 180 | bp[0] = htonl(FSFETCHSTATUS); |
332 | bp[1] = htonl(vnode->fid.vid); | 181 | bp[1] = htonl(vnode->fid.vid); |
333 | bp[2] = htonl(vnode->fid.vnode); | 182 | bp[2] = htonl(vnode->fid.vnode); |
334 | bp[3] = htonl(vnode->fid.unique); | 183 | bp[3] = htonl(vnode->fid.unique); |
335 | 184 | ||
336 | piov[0].iov_len = 16; | 185 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); |
337 | piov[0].iov_base = bp; | ||
338 | |||
339 | /* send the parameters to the server */ | ||
340 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
341 | 0, &sent); | ||
342 | if (ret < 0) | ||
343 | goto abort; | ||
344 | |||
345 | /* wait for the reply to completely arrive */ | ||
346 | bp = rxrpc_call_alloc_scratch(call, 120); | ||
347 | |||
348 | ret = rxrpc_call_read_data(call, bp, 120, | ||
349 | RXRPC_CALL_READ_BLOCK | | ||
350 | RXRPC_CALL_READ_ALL); | ||
351 | if (ret < 0) { | ||
352 | if (ret == -ECONNABORTED) { | ||
353 | ret = call->app_errno; | ||
354 | goto out_unwait; | ||
355 | } | ||
356 | goto abort; | ||
357 | } | ||
358 | |||
359 | /* unmarshall the reply */ | ||
360 | vnode->status.if_version = ntohl(*bp++); | ||
361 | vnode->status.type = ntohl(*bp++); | ||
362 | vnode->status.nlink = ntohl(*bp++); | ||
363 | vnode->status.size = ntohl(*bp++); | ||
364 | vnode->status.version = ntohl(*bp++); | ||
365 | vnode->status.author = ntohl(*bp++); | ||
366 | vnode->status.owner = ntohl(*bp++); | ||
367 | vnode->status.caller_access = ntohl(*bp++); | ||
368 | vnode->status.anon_access = ntohl(*bp++); | ||
369 | vnode->status.mode = ntohl(*bp++); | ||
370 | vnode->status.parent.vid = vnode->fid.vid; | ||
371 | vnode->status.parent.vnode = ntohl(*bp++); | ||
372 | vnode->status.parent.unique = ntohl(*bp++); | ||
373 | bp++; /* seg size */ | ||
374 | vnode->status.mtime_client = ntohl(*bp++); | ||
375 | vnode->status.mtime_server = ntohl(*bp++); | ||
376 | bp++; /* group */ | ||
377 | bp++; /* sync counter */ | ||
378 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
379 | bp++; /* spare2 */ | ||
380 | bp++; /* spare3 */ | ||
381 | bp++; /* spare4 */ | ||
382 | |||
383 | vnode->cb_version = ntohl(*bp++); | ||
384 | vnode->cb_expiry = ntohl(*bp++); | ||
385 | vnode->cb_type = ntohl(*bp++); | ||
386 | |||
387 | if (volsync) { | ||
388 | volsync->creation = ntohl(*bp++); | ||
389 | bp++; /* spare2 */ | ||
390 | bp++; /* spare3 */ | ||
391 | bp++; /* spare4 */ | ||
392 | bp++; /* spare5 */ | ||
393 | bp++; /* spare6 */ | ||
394 | } | ||
395 | |||
396 | /* success */ | ||
397 | ret = 0; | ||
398 | |||
399 | out_unwait: | ||
400 | set_current_state(TASK_RUNNING); | ||
401 | remove_wait_queue(&call->waitq, &myself); | ||
402 | rxrpc_put_call(call); | ||
403 | out_put_conn: | ||
404 | afs_server_release_callslot(server, &callslot); | ||
405 | out: | ||
406 | _leave(""); | ||
407 | return ret; | ||
408 | |||
409 | abort: | ||
410 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
411 | rxrpc_call_abort(call, ret); | ||
412 | schedule(); | ||
413 | goto out_unwait; | ||
414 | } | 186 | } |
415 | 187 | ||
416 | /* | 188 | /* |
417 | * fetch the contents of a file or directory | 189 | * deliver reply data to an FS.FetchData |
418 | */ | 190 | */ |
419 | int afs_rxfs_fetch_file_data(struct afs_server *server, | 191 | static int afs_deliver_fs_fetch_data(struct afs_call *call, |
420 | struct afs_vnode *vnode, | 192 | struct sk_buff *skb, bool last) |
421 | struct afs_rxfs_fetch_descriptor *desc, | ||
422 | struct afs_volsync *volsync) | ||
423 | { | 193 | { |
424 | struct afs_server_callslot callslot; | 194 | const __be32 *bp; |
425 | struct rxrpc_call *call; | 195 | struct page *page; |
426 | struct kvec piov[1]; | 196 | void *buffer; |
427 | size_t sent; | ||
428 | int ret; | 197 | int ret; |
429 | __be32 *bp; | ||
430 | 198 | ||
431 | DECLARE_WAITQUEUE(myself, current); | 199 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
432 | 200 | ||
433 | _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}", | 201 | switch (call->unmarshall) { |
434 | server, | 202 | case 0: |
435 | desc->fid.vid, | 203 | call->offset = 0; |
436 | desc->fid.vnode, | 204 | call->unmarshall++; |
437 | desc->fid.unique, | 205 | |
438 | desc->size, | 206 | /* extract the returned data length */ |
439 | desc->offset); | 207 | case 1: |
440 | 208 | _debug("extract data length"); | |
441 | /* get hold of the fileserver connection */ | 209 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
442 | ret = afs_server_request_callslot(server, &callslot); | 210 | switch (ret) { |
443 | if (ret < 0) | 211 | case 0: break; |
444 | goto out; | 212 | case -EAGAIN: return 0; |
445 | 213 | default: return ret; | |
446 | /* create a call through that connection */ | 214 | } |
447 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
448 | if (ret < 0) { | ||
449 | printk("kAFS: Unable to create call: %d\n", ret); | ||
450 | goto out_put_conn; | ||
451 | } | ||
452 | call->app_opcode = FSFETCHDATA; | ||
453 | 215 | ||
454 | /* we want to get event notifications from the call */ | 216 | call->count = ntohl(call->tmp); |
455 | add_wait_queue(&call->waitq, &myself); | 217 | _debug("DATA length: %u", call->count); |
218 | if (call->count > PAGE_SIZE) | ||
219 | return -EBADMSG; | ||
220 | call->offset = 0; | ||
221 | call->unmarshall++; | ||
222 | |||
223 | if (call->count < PAGE_SIZE) { | ||
224 | buffer = kmap_atomic(call->reply3, KM_USER0); | ||
225 | memset(buffer + PAGE_SIZE - call->count, 0, | ||
226 | call->count); | ||
227 | kunmap_atomic(buffer, KM_USER0); | ||
228 | } | ||
456 | 229 | ||
457 | /* marshall the parameters */ | 230 | /* extract the returned data */ |
458 | bp = rxrpc_call_alloc_scratch(call, 24); | 231 | case 2: |
459 | bp[0] = htonl(FSFETCHDATA); | 232 | _debug("extract data"); |
460 | bp[1] = htonl(desc->fid.vid); | 233 | page = call->reply3; |
461 | bp[2] = htonl(desc->fid.vnode); | 234 | buffer = kmap_atomic(page, KM_USER0); |
462 | bp[3] = htonl(desc->fid.unique); | 235 | ret = afs_extract_data(call, skb, last, buffer, call->count); |
463 | bp[4] = htonl(desc->offset); | 236 | kunmap_atomic(buffer, KM_USER0); |
464 | bp[5] = htonl(desc->size); | 237 | switch (ret) { |
465 | 238 | case 0: break; | |
466 | piov[0].iov_len = 24; | 239 | case -EAGAIN: return 0; |
467 | piov[0].iov_base = bp; | 240 | default: return ret; |
468 | 241 | } | |
469 | /* send the parameters to the server */ | ||
470 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
471 | 0, &sent); | ||
472 | if (ret < 0) | ||
473 | goto abort; | ||
474 | |||
475 | /* wait for the data count to arrive */ | ||
476 | ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK); | ||
477 | if (ret < 0) | ||
478 | goto read_failed; | ||
479 | |||
480 | desc->actual = ntohl(bp[0]); | ||
481 | if (desc->actual != desc->size) { | ||
482 | ret = -EBADMSG; | ||
483 | goto abort; | ||
484 | } | ||
485 | 242 | ||
486 | /* call the app to read the actual data */ | 243 | call->offset = 0; |
487 | rxrpc_call_reset_scratch(call); | 244 | call->unmarshall++; |
488 | |||
489 | ret = rxrpc_call_read_data(call, desc->buffer, desc->actual, | ||
490 | RXRPC_CALL_READ_BLOCK); | ||
491 | if (ret < 0) | ||
492 | goto read_failed; | ||
493 | |||
494 | /* wait for the rest of the reply to completely arrive */ | ||
495 | rxrpc_call_reset_scratch(call); | ||
496 | bp = rxrpc_call_alloc_scratch(call, 120); | ||
497 | |||
498 | ret = rxrpc_call_read_data(call, bp, 120, | ||
499 | RXRPC_CALL_READ_BLOCK | | ||
500 | RXRPC_CALL_READ_ALL); | ||
501 | if (ret < 0) | ||
502 | goto read_failed; | ||
503 | |||
504 | /* unmarshall the reply */ | ||
505 | vnode->status.if_version = ntohl(*bp++); | ||
506 | vnode->status.type = ntohl(*bp++); | ||
507 | vnode->status.nlink = ntohl(*bp++); | ||
508 | vnode->status.size = ntohl(*bp++); | ||
509 | vnode->status.version = ntohl(*bp++); | ||
510 | vnode->status.author = ntohl(*bp++); | ||
511 | vnode->status.owner = ntohl(*bp++); | ||
512 | vnode->status.caller_access = ntohl(*bp++); | ||
513 | vnode->status.anon_access = ntohl(*bp++); | ||
514 | vnode->status.mode = ntohl(*bp++); | ||
515 | vnode->status.parent.vid = desc->fid.vid; | ||
516 | vnode->status.parent.vnode = ntohl(*bp++); | ||
517 | vnode->status.parent.unique = ntohl(*bp++); | ||
518 | bp++; /* seg size */ | ||
519 | vnode->status.mtime_client = ntohl(*bp++); | ||
520 | vnode->status.mtime_server = ntohl(*bp++); | ||
521 | bp++; /* group */ | ||
522 | bp++; /* sync counter */ | ||
523 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
524 | bp++; /* spare2 */ | ||
525 | bp++; /* spare3 */ | ||
526 | bp++; /* spare4 */ | ||
527 | 245 | ||
528 | vnode->cb_version = ntohl(*bp++); | 246 | /* extract the metadata */ |
529 | vnode->cb_expiry = ntohl(*bp++); | 247 | case 3: |
530 | vnode->cb_type = ntohl(*bp++); | 248 | ret = afs_extract_data(call, skb, last, call->buffer, 120); |
531 | 249 | switch (ret) { | |
532 | if (volsync) { | 250 | case 0: break; |
533 | volsync->creation = ntohl(*bp++); | 251 | case -EAGAIN: return 0; |
534 | bp++; /* spare2 */ | 252 | default: return ret; |
535 | bp++; /* spare3 */ | 253 | } |
536 | bp++; /* spare4 */ | 254 | |
537 | bp++; /* spare5 */ | 255 | bp = call->buffer; |
538 | bp++; /* spare6 */ | 256 | xdr_decode_AFSFetchStatus(&bp, call->reply); |
539 | } | 257 | xdr_decode_AFSCallBack(&bp, call->reply); |
258 | if (call->reply2) | ||
259 | xdr_decode_AFSVolSync(&bp, call->reply2); | ||
260 | |||
261 | call->offset = 0; | ||
262 | call->unmarshall++; | ||
540 | 263 | ||
541 | /* success */ | 264 | case 4: |
542 | ret = 0; | 265 | _debug("trailer"); |
543 | 266 | if (skb->len != 0) | |
544 | out_unwait: | 267 | return -EBADMSG; |
545 | set_current_state(TASK_RUNNING); | 268 | break; |
546 | remove_wait_queue(&call->waitq,&myself); | ||
547 | rxrpc_put_call(call); | ||
548 | out_put_conn: | ||
549 | afs_server_release_callslot(server, &callslot); | ||
550 | out: | ||
551 | _leave(" = %d", ret); | ||
552 | return ret; | ||
553 | |||
554 | read_failed: | ||
555 | if (ret == -ECONNABORTED) { | ||
556 | ret = call->app_errno; | ||
557 | goto out_unwait; | ||
558 | } | 269 | } |
559 | 270 | ||
560 | abort: | 271 | if (!last) |
561 | set_current_state(TASK_UNINTERRUPTIBLE); | 272 | return 0; |
562 | rxrpc_call_abort(call, ret); | 273 | |
563 | schedule(); | 274 | _leave(" = 0 [done]"); |
564 | goto out_unwait; | 275 | return 0; |
565 | } | 276 | } |
566 | 277 | ||
567 | /* | 278 | /* |
568 | * ask the AFS fileserver to discard a callback request on a file | 279 | * FS.FetchData operation type |
569 | */ | 280 | */ |
570 | int afs_rxfs_give_up_callback(struct afs_server *server, | 281 | static const struct afs_call_type afs_RXFSFetchData = { |
571 | struct afs_vnode *vnode) | 282 | .deliver = afs_deliver_fs_fetch_data, |
283 | .abort_to_error = afs_abort_to_error, | ||
284 | .destructor = afs_flat_call_destructor, | ||
285 | }; | ||
286 | |||
287 | /* | ||
288 | * fetch data from a file | ||
289 | */ | ||
290 | int afs_fs_fetch_data(struct afs_server *server, | ||
291 | struct afs_vnode *vnode, | ||
292 | off_t offset, size_t length, | ||
293 | struct page *buffer, | ||
294 | struct afs_volsync *volsync, | ||
295 | const struct afs_wait_mode *wait_mode) | ||
572 | { | 296 | { |
573 | struct afs_server_callslot callslot; | 297 | struct afs_call *call; |
574 | struct rxrpc_call *call; | ||
575 | struct kvec piov[1]; | ||
576 | size_t sent; | ||
577 | int ret; | ||
578 | __be32 *bp; | 298 | __be32 *bp; |
579 | 299 | ||
580 | DECLARE_WAITQUEUE(myself, current); | 300 | _enter(""); |
581 | 301 | ||
582 | _enter("%p,{%u,%u,%u}", | 302 | call = afs_alloc_flat_call(&afs_RXFSFetchData, 24, 120); |
583 | server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | 303 | if (!call) |
304 | return -ENOMEM; | ||
584 | 305 | ||
585 | /* get hold of the fileserver connection */ | 306 | call->reply = vnode; |
586 | ret = afs_server_request_callslot(server, &callslot); | 307 | call->reply2 = volsync; |
587 | if (ret < 0) | 308 | call->reply3 = buffer; |
588 | goto out; | 309 | call->service_id = FS_SERVICE; |
589 | 310 | call->port = htons(AFS_FS_PORT); | |
590 | /* create a call through that connection */ | ||
591 | ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
592 | if (ret < 0) { | ||
593 | printk("kAFS: Unable to create call: %d\n", ret); | ||
594 | goto out_put_conn; | ||
595 | } | ||
596 | call->app_opcode = FSGIVEUPCALLBACKS; | ||
597 | |||
598 | /* we want to get event notifications from the call */ | ||
599 | add_wait_queue(&call->waitq, &myself); | ||
600 | 311 | ||
601 | /* marshall the parameters */ | 312 | /* marshall the parameters */ |
602 | bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4); | 313 | bp = call->request; |
603 | 314 | bp[0] = htonl(FSFETCHDATA); | |
604 | piov[0].iov_len = (1 + 4 + 4) * 4; | 315 | bp[1] = htonl(vnode->fid.vid); |
605 | piov[0].iov_base = bp; | 316 | bp[2] = htonl(vnode->fid.vnode); |
606 | 317 | bp[3] = htonl(vnode->fid.unique); | |
607 | *bp++ = htonl(FSGIVEUPCALLBACKS); | 318 | bp[4] = htonl(offset); |
608 | *bp++ = htonl(1); | 319 | bp[5] = htonl(length); |
609 | *bp++ = htonl(vnode->fid.vid); | ||
610 | *bp++ = htonl(vnode->fid.vnode); | ||
611 | *bp++ = htonl(vnode->fid.unique); | ||
612 | *bp++ = htonl(1); | ||
613 | *bp++ = htonl(vnode->cb_version); | ||
614 | *bp++ = htonl(vnode->cb_expiry); | ||
615 | *bp++ = htonl(vnode->cb_type); | ||
616 | |||
617 | /* send the parameters to the server */ | ||
618 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
619 | 0, &sent); | ||
620 | if (ret < 0) | ||
621 | goto abort; | ||
622 | |||
623 | /* wait for the reply to completely arrive */ | ||
624 | for (;;) { | ||
625 | set_current_state(TASK_INTERRUPTIBLE); | ||
626 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
627 | signal_pending(current)) | ||
628 | break; | ||
629 | schedule(); | ||
630 | } | ||
631 | set_current_state(TASK_RUNNING); | ||
632 | |||
633 | ret = -EINTR; | ||
634 | if (signal_pending(current)) | ||
635 | goto abort; | ||
636 | |||
637 | switch (call->app_call_state) { | ||
638 | case RXRPC_CSTATE_ERROR: | ||
639 | ret = call->app_errno; | ||
640 | goto out_unwait; | ||
641 | 320 | ||
642 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | 321 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); |
643 | ret = 0; | 322 | } |
644 | goto out_unwait; | ||
645 | 323 | ||
646 | default: | 324 | /* |
647 | BUG(); | 325 | * deliver reply data to an FS.GiveUpCallBacks |
648 | } | 326 | */ |
327 | static int afs_deliver_fs_give_up_callbacks(struct afs_call *call, | ||
328 | struct sk_buff *skb, bool last) | ||
329 | { | ||
330 | _enter(",{%u},%d", skb->len, last); | ||
649 | 331 | ||
650 | out_unwait: | 332 | if (skb->len > 0) |
651 | set_current_state(TASK_RUNNING); | 333 | return -EBADMSG; /* shouldn't be any reply data */ |
652 | remove_wait_queue(&call->waitq, &myself); | 334 | return 0; |
653 | rxrpc_put_call(call); | ||
654 | out_put_conn: | ||
655 | afs_server_release_callslot(server, &callslot); | ||
656 | out: | ||
657 | _leave(""); | ||
658 | return ret; | ||
659 | |||
660 | abort: | ||
661 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
662 | rxrpc_call_abort(call, ret); | ||
663 | schedule(); | ||
664 | goto out_unwait; | ||
665 | } | 335 | } |
666 | 336 | ||
667 | /* | 337 | /* |
668 | * look a filename up in a directory | 338 | * FS.GiveUpCallBacks operation type |
669 | * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2 | 339 | */ |
340 | static const struct afs_call_type afs_RXFSGiveUpCallBacks = { | ||
341 | .deliver = afs_deliver_fs_give_up_callbacks, | ||
342 | .abort_to_error = afs_abort_to_error, | ||
343 | .destructor = afs_flat_call_destructor, | ||
344 | }; | ||
345 | |||
346 | /* | ||
347 | * give up a set of callbacks | ||
348 | * - the callbacks are held in the server->cb_break ring | ||
670 | */ | 349 | */ |
671 | #if 0 | 350 | int afs_fs_give_up_callbacks(struct afs_server *server, |
672 | int afs_rxfs_lookup(struct afs_server *server, | 351 | const struct afs_wait_mode *wait_mode) |
673 | struct afs_vnode *dir, | ||
674 | const char *filename, | ||
675 | struct afs_vnode *vnode, | ||
676 | struct afs_volsync *volsync) | ||
677 | { | 352 | { |
678 | struct rxrpc_connection *conn; | 353 | struct afs_call *call; |
679 | struct rxrpc_call *call; | 354 | size_t ncallbacks; |
680 | struct kvec piov[3]; | 355 | __be32 *bp, *tp; |
681 | size_t sent; | 356 | int loop; |
682 | int ret; | ||
683 | u32 *bp, zero; | ||
684 | 357 | ||
685 | DECLARE_WAITQUEUE(myself, current); | 358 | ncallbacks = CIRC_CNT(server->cb_break_head, server->cb_break_tail, |
359 | ARRAY_SIZE(server->cb_break)); | ||
686 | 360 | ||
687 | kenter("%p,{%u,%u,%u},%s", | 361 | _enter("{%zu},", ncallbacks); |
688 | server, fid->vid, fid->vnode, fid->unique, filename); | ||
689 | 362 | ||
690 | /* get hold of the fileserver connection */ | 363 | if (ncallbacks == 0) |
691 | ret = afs_server_get_fsconn(server, &conn); | 364 | return 0; |
692 | if (ret < 0) | 365 | if (ncallbacks > AFSCBMAX) |
693 | goto out; | 366 | ncallbacks = AFSCBMAX; |
694 | 367 | ||
695 | /* create a call through that connection */ | 368 | _debug("break %zu callbacks", ncallbacks); |
696 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); | ||
697 | if (ret < 0) { | ||
698 | printk("kAFS: Unable to create call: %d\n", ret); | ||
699 | goto out_put_conn; | ||
700 | } | ||
701 | call->app_opcode = FSLOOKUP; | ||
702 | 369 | ||
703 | /* we want to get event notifications from the call */ | 370 | call = afs_alloc_flat_call(&afs_RXFSGiveUpCallBacks, |
704 | add_wait_queue(&call->waitq,&myself); | 371 | 12 + ncallbacks * 6 * 4, 0); |
372 | if (!call) | ||
373 | return -ENOMEM; | ||
374 | |||
375 | call->service_id = FS_SERVICE; | ||
376 | call->port = htons(AFS_FS_PORT); | ||
705 | 377 | ||
706 | /* marshall the parameters */ | 378 | /* marshall the parameters */ |
707 | bp = rxrpc_call_alloc_scratch(call, 20); | 379 | bp = call->request; |
708 | 380 | tp = bp + 2 + ncallbacks * 3; | |
709 | zero = 0; | 381 | *bp++ = htonl(FSGIVEUPCALLBACKS); |
710 | 382 | *bp++ = htonl(ncallbacks); | |
711 | piov[0].iov_len = 20; | 383 | *tp++ = htonl(ncallbacks); |
712 | piov[0].iov_base = bp; | 384 | |
713 | piov[1].iov_len = strlen(filename); | 385 | atomic_sub(ncallbacks, &server->cb_break_n); |
714 | piov[1].iov_base = (char *) filename; | 386 | for (loop = ncallbacks; loop > 0; loop--) { |
715 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | 387 | struct afs_callback *cb = |
716 | piov[2].iov_base = &zero; | 388 | &server->cb_break[server->cb_break_tail]; |
717 | 389 | ||
718 | *bp++ = htonl(FSLOOKUP); | 390 | *bp++ = htonl(cb->fid.vid); |
719 | *bp++ = htonl(dirfid->vid); | 391 | *bp++ = htonl(cb->fid.vnode); |
720 | *bp++ = htonl(dirfid->vnode); | 392 | *bp++ = htonl(cb->fid.unique); |
721 | *bp++ = htonl(dirfid->unique); | 393 | *tp++ = htonl(cb->version); |
722 | *bp++ = htonl(piov[1].iov_len); | 394 | *tp++ = htonl(cb->expiry); |
723 | 395 | *tp++ = htonl(cb->type); | |
724 | /* send the parameters to the server */ | 396 | smp_mb(); |
725 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | 397 | server->cb_break_tail = |
726 | 0, &sent); | 398 | (server->cb_break_tail + 1) & |
727 | if (ret < 0) | 399 | (ARRAY_SIZE(server->cb_break) - 1); |
728 | goto abort; | ||
729 | |||
730 | /* wait for the reply to completely arrive */ | ||
731 | bp = rxrpc_call_alloc_scratch(call, 220); | ||
732 | |||
733 | ret = rxrpc_call_read_data(call, bp, 220, | ||
734 | RXRPC_CALL_READ_BLOCK | | ||
735 | RXRPC_CALL_READ_ALL); | ||
736 | if (ret < 0) { | ||
737 | if (ret == -ECONNABORTED) { | ||
738 | ret = call->app_errno; | ||
739 | goto out_unwait; | ||
740 | } | ||
741 | goto abort; | ||
742 | } | 400 | } |
743 | 401 | ||
744 | /* unmarshall the reply */ | 402 | ASSERT(ncallbacks > 0); |
745 | fid->vid = ntohl(*bp++); | 403 | wake_up_nr(&server->cb_break_waitq, ncallbacks); |
746 | fid->vnode = ntohl(*bp++); | ||
747 | fid->unique = ntohl(*bp++); | ||
748 | |||
749 | vnode->status.if_version = ntohl(*bp++); | ||
750 | vnode->status.type = ntohl(*bp++); | ||
751 | vnode->status.nlink = ntohl(*bp++); | ||
752 | vnode->status.size = ntohl(*bp++); | ||
753 | vnode->status.version = ntohl(*bp++); | ||
754 | vnode->status.author = ntohl(*bp++); | ||
755 | vnode->status.owner = ntohl(*bp++); | ||
756 | vnode->status.caller_access = ntohl(*bp++); | ||
757 | vnode->status.anon_access = ntohl(*bp++); | ||
758 | vnode->status.mode = ntohl(*bp++); | ||
759 | vnode->status.parent.vid = dirfid->vid; | ||
760 | vnode->status.parent.vnode = ntohl(*bp++); | ||
761 | vnode->status.parent.unique = ntohl(*bp++); | ||
762 | bp++; /* seg size */ | ||
763 | vnode->status.mtime_client = ntohl(*bp++); | ||
764 | vnode->status.mtime_server = ntohl(*bp++); | ||
765 | bp++; /* group */ | ||
766 | bp++; /* sync counter */ | ||
767 | vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
768 | bp++; /* spare2 */ | ||
769 | bp++; /* spare3 */ | ||
770 | bp++; /* spare4 */ | ||
771 | |||
772 | dir->status.if_version = ntohl(*bp++); | ||
773 | dir->status.type = ntohl(*bp++); | ||
774 | dir->status.nlink = ntohl(*bp++); | ||
775 | dir->status.size = ntohl(*bp++); | ||
776 | dir->status.version = ntohl(*bp++); | ||
777 | dir->status.author = ntohl(*bp++); | ||
778 | dir->status.owner = ntohl(*bp++); | ||
779 | dir->status.caller_access = ntohl(*bp++); | ||
780 | dir->status.anon_access = ntohl(*bp++); | ||
781 | dir->status.mode = ntohl(*bp++); | ||
782 | dir->status.parent.vid = dirfid->vid; | ||
783 | dir->status.parent.vnode = ntohl(*bp++); | ||
784 | dir->status.parent.unique = ntohl(*bp++); | ||
785 | bp++; /* seg size */ | ||
786 | dir->status.mtime_client = ntohl(*bp++); | ||
787 | dir->status.mtime_server = ntohl(*bp++); | ||
788 | bp++; /* group */ | ||
789 | bp++; /* sync counter */ | ||
790 | dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; | ||
791 | bp++; /* spare2 */ | ||
792 | bp++; /* spare3 */ | ||
793 | bp++; /* spare4 */ | ||
794 | |||
795 | callback->fid = *fid; | ||
796 | callback->version = ntohl(*bp++); | ||
797 | callback->expiry = ntohl(*bp++); | ||
798 | callback->type = ntohl(*bp++); | ||
799 | |||
800 | if (volsync) { | ||
801 | volsync->creation = ntohl(*bp++); | ||
802 | bp++; /* spare2 */ | ||
803 | bp++; /* spare3 */ | ||
804 | bp++; /* spare4 */ | ||
805 | bp++; /* spare5 */ | ||
806 | bp++; /* spare6 */ | ||
807 | } | ||
808 | 404 | ||
809 | /* success */ | 405 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); |
810 | ret = 0; | ||
811 | |||
812 | out_unwait: | ||
813 | set_current_state(TASK_RUNNING); | ||
814 | remove_wait_queue(&call->waitq, &myself); | ||
815 | rxrpc_put_call(call); | ||
816 | out_put_conn: | ||
817 | afs_server_release_fsconn(server, conn); | ||
818 | out: | ||
819 | kleave(""); | ||
820 | return ret; | ||
821 | |||
822 | abort: | ||
823 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
824 | rxrpc_call_abort(call, ret); | ||
825 | schedule(); | ||
826 | goto out_unwait; | ||
827 | } | 406 | } |
828 | #endif | ||
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h deleted file mode 100644 index e2b0b7bcd09..00000000000 --- a/fs/afs/fsclient.h +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | /* AFS File Server client stub declarations | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_FSCLIENT_H | ||
13 | #define AFS_FSCLIENT_H | ||
14 | |||
15 | #include "server.h" | ||
16 | |||
17 | extern int afs_rxfs_get_volume_info(struct afs_server *, | ||
18 | const char *, | ||
19 | struct afs_volume_info *); | ||
20 | |||
21 | extern int afs_rxfs_fetch_file_status(struct afs_server *, | ||
22 | struct afs_vnode *, | ||
23 | struct afs_volsync *); | ||
24 | |||
25 | struct afs_rxfs_fetch_descriptor { | ||
26 | struct afs_fid fid; /* file ID to fetch */ | ||
27 | size_t size; /* total number of bytes to fetch */ | ||
28 | off_t offset; /* offset in file to start from */ | ||
29 | void *buffer; /* read buffer */ | ||
30 | size_t actual; /* actual size sent back by server */ | ||
31 | }; | ||
32 | |||
33 | extern int afs_rxfs_fetch_file_data(struct afs_server *, | ||
34 | struct afs_vnode *, | ||
35 | struct afs_rxfs_fetch_descriptor *, | ||
36 | struct afs_volsync *); | ||
37 | |||
38 | extern int afs_rxfs_give_up_callback(struct afs_server *, | ||
39 | struct afs_vnode *); | ||
40 | |||
41 | /* this doesn't appear to work in OpenAFS server */ | ||
42 | extern int afs_rxfs_lookup(struct afs_server *, | ||
43 | struct afs_vnode *, | ||
44 | const char *, | ||
45 | struct afs_vnode *, | ||
46 | struct afs_volsync *); | ||
47 | |||
48 | /* this is apparently mis-implemented in OpenAFS server */ | ||
49 | extern int afs_rxfs_get_root_volume(struct afs_server *, | ||
50 | char *, | ||
51 | size_t *); | ||
52 | |||
53 | |||
54 | #endif /* AFS_FSCLIENT_H */ | ||
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 900c8bb1c3b..18863315211 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
@@ -19,9 +19,6 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include "volume.h" | ||
23 | #include "vnode.h" | ||
24 | #include "super.h" | ||
25 | #include "internal.h" | 22 | #include "internal.h" |
26 | 23 | ||
27 | struct afs_iget_data { | 24 | struct afs_iget_data { |
@@ -40,7 +37,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode) | |||
40 | vnode->status.type, | 37 | vnode->status.type, |
41 | vnode->status.nlink, | 38 | vnode->status.nlink, |
42 | vnode->status.size, | 39 | vnode->status.size, |
43 | vnode->status.version, | 40 | vnode->status.data_version, |
44 | vnode->status.mode); | 41 | vnode->status.mode); |
45 | 42 | ||
46 | switch (vnode->status.type) { | 43 | switch (vnode->status.type) { |
@@ -78,7 +75,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode) | |||
78 | if (vnode->status.type == AFS_FTYPE_SYMLINK) { | 75 | if (vnode->status.type == AFS_FTYPE_SYMLINK) { |
79 | afs_mntpt_check_symlink(vnode); | 76 | afs_mntpt_check_symlink(vnode); |
80 | 77 | ||
81 | if (vnode->flags & AFS_VNODE_MOUNTPOINT) { | 78 | if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) { |
82 | inode->i_mode = S_IFDIR | vnode->status.mode; | 79 | inode->i_mode = S_IFDIR | vnode->status.mode; |
83 | inode->i_op = &afs_mntpt_inode_operations; | 80 | inode->i_op = &afs_mntpt_inode_operations; |
84 | inode->i_fop = &afs_mntpt_file_operations; | 81 | inode->i_fop = &afs_mntpt_file_operations; |
@@ -89,25 +86,6 @@ static int afs_inode_map_status(struct afs_vnode *vnode) | |||
89 | } | 86 | } |
90 | 87 | ||
91 | /* | 88 | /* |
92 | * attempt to fetch the status of an inode, coelescing multiple simultaneous | ||
93 | * fetches | ||
94 | */ | ||
95 | static int afs_inode_fetch_status(struct inode *inode) | ||
96 | { | ||
97 | struct afs_vnode *vnode; | ||
98 | int ret; | ||
99 | |||
100 | vnode = AFS_FS_I(inode); | ||
101 | |||
102 | ret = afs_vnode_fetch_status(vnode); | ||
103 | |||
104 | if (ret == 0) | ||
105 | ret = afs_inode_map_status(vnode); | ||
106 | |||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * iget5() comparator | 89 | * iget5() comparator |
112 | */ | 90 | */ |
113 | static int afs_iget5_test(struct inode *inode, void *opaque) | 91 | static int afs_iget5_test(struct inode *inode, void *opaque) |
@@ -137,8 +115,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque) | |||
137 | /* | 115 | /* |
138 | * inode retrieval | 116 | * inode retrieval |
139 | */ | 117 | */ |
140 | inline int afs_iget(struct super_block *sb, struct afs_fid *fid, | 118 | inline struct inode *afs_iget(struct super_block *sb, struct afs_fid *fid) |
141 | struct inode **_inode) | ||
142 | { | 119 | { |
143 | struct afs_iget_data data = { .fid = *fid }; | 120 | struct afs_iget_data data = { .fid = *fid }; |
144 | struct afs_super_info *as; | 121 | struct afs_super_info *as; |
@@ -155,20 +132,18 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid, | |||
155 | &data); | 132 | &data); |
156 | if (!inode) { | 133 | if (!inode) { |
157 | _leave(" = -ENOMEM"); | 134 | _leave(" = -ENOMEM"); |
158 | return -ENOMEM; | 135 | return ERR_PTR(-ENOMEM); |
159 | } | 136 | } |
160 | 137 | ||
138 | _debug("GOT INODE %p { vl=%x vn=%x, u=%x }", | ||
139 | inode, fid->vid, fid->vnode, fid->unique); | ||
140 | |||
161 | vnode = AFS_FS_I(inode); | 141 | vnode = AFS_FS_I(inode); |
162 | 142 | ||
163 | /* deal with an existing inode */ | 143 | /* deal with an existing inode */ |
164 | if (!(inode->i_state & I_NEW)) { | 144 | if (!(inode->i_state & I_NEW)) { |
165 | ret = afs_vnode_fetch_status(vnode); | 145 | _leave(" = %p", inode); |
166 | if (ret == 0) | 146 | return inode; |
167 | *_inode = inode; | ||
168 | else | ||
169 | iput(inode); | ||
170 | _leave(" = %d", ret); | ||
171 | return ret; | ||
172 | } | 147 | } |
173 | 148 | ||
174 | #ifdef AFS_CACHING_SUPPORT | 149 | #ifdef AFS_CACHING_SUPPORT |
@@ -181,21 +156,19 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid, | |||
181 | #endif | 156 | #endif |
182 | 157 | ||
183 | /* okay... it's a new inode */ | 158 | /* okay... it's a new inode */ |
184 | inode->i_flags |= S_NOATIME; | 159 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); |
185 | vnode->flags |= AFS_VNODE_CHANGED; | 160 | ret = afs_vnode_fetch_status(vnode); |
186 | ret = afs_inode_fetch_status(inode); | 161 | if (ret < 0) |
187 | if (ret<0) | 162 | goto bad_inode; |
163 | ret = afs_inode_map_status(vnode); | ||
164 | if (ret < 0) | ||
188 | goto bad_inode; | 165 | goto bad_inode; |
189 | 166 | ||
190 | /* success */ | 167 | /* success */ |
168 | inode->i_flags |= S_NOATIME; | ||
191 | unlock_new_inode(inode); | 169 | unlock_new_inode(inode); |
192 | 170 | _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type); | |
193 | *_inode = inode; | 171 | return inode; |
194 | _leave(" = 0 [CB { v=%u x=%lu t=%u }]", | ||
195 | vnode->cb_version, | ||
196 | vnode->cb_timeout.timo_jif, | ||
197 | vnode->cb_type); | ||
198 | return 0; | ||
199 | 172 | ||
200 | /* failure */ | 173 | /* failure */ |
201 | bad_inode: | 174 | bad_inode: |
@@ -204,7 +177,7 @@ bad_inode: | |||
204 | iput(inode); | 177 | iput(inode); |
205 | 178 | ||
206 | _leave(" = %d [bad]", ret); | 179 | _leave(" = %d [bad]", ret); |
207 | return ret; | 180 | return ERR_PTR(ret); |
208 | } | 181 | } |
209 | 182 | ||
210 | /* | 183 | /* |
@@ -213,36 +186,13 @@ bad_inode: | |||
213 | int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, | 186 | int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, |
214 | struct kstat *stat) | 187 | struct kstat *stat) |
215 | { | 188 | { |
216 | struct afs_vnode *vnode; | ||
217 | struct inode *inode; | 189 | struct inode *inode; |
218 | int ret; | ||
219 | 190 | ||
220 | inode = dentry->d_inode; | 191 | inode = dentry->d_inode; |
221 | 192 | ||
222 | _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version); | 193 | _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version); |
223 | 194 | ||
224 | vnode = AFS_FS_I(inode); | ||
225 | |||
226 | ret = afs_inode_fetch_status(inode); | ||
227 | if (ret == -ENOENT) { | ||
228 | _leave(" = %d [%d %p]", | ||
229 | ret, atomic_read(&dentry->d_count), dentry->d_inode); | ||
230 | return ret; | ||
231 | } else if (ret < 0) { | ||
232 | make_bad_inode(inode); | ||
233 | _leave(" = %d", ret); | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | /* transfer attributes from the inode structure to the stat | ||
238 | * structure */ | ||
239 | generic_fillattr(inode, stat); | 195 | generic_fillattr(inode, stat); |
240 | |||
241 | _leave(" = 0 CB { v=%u x=%u t=%u }", | ||
242 | vnode->cb_version, | ||
243 | vnode->cb_expiry, | ||
244 | vnode->cb_type); | ||
245 | |||
246 | return 0; | 196 | return 0; |
247 | } | 197 | } |
248 | 198 | ||
@@ -260,12 +210,23 @@ void afs_clear_inode(struct inode *inode) | |||
260 | vnode->fid.vnode, | 210 | vnode->fid.vnode, |
261 | vnode->cb_version, | 211 | vnode->cb_version, |
262 | vnode->cb_expiry, | 212 | vnode->cb_expiry, |
263 | vnode->cb_type | 213 | vnode->cb_type); |
264 | ); | ||
265 | 214 | ||
266 | BUG_ON(inode->i_ino != vnode->fid.vnode); | 215 | _debug("CLEAR INODE %p", inode); |
216 | |||
217 | ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); | ||
218 | |||
219 | afs_give_up_callback(vnode); | ||
220 | |||
221 | if (vnode->server) { | ||
222 | spin_lock(&vnode->server->fs_lock); | ||
223 | rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes); | ||
224 | spin_unlock(&vnode->server->fs_lock); | ||
225 | afs_put_server(vnode->server); | ||
226 | vnode->server = NULL; | ||
227 | } | ||
267 | 228 | ||
268 | afs_vnode_give_up_callback(vnode); | 229 | ASSERT(!vnode->cb_promised); |
269 | 230 | ||
270 | #ifdef AFS_CACHING_SUPPORT | 231 | #ifdef AFS_CACHING_SUPPORT |
271 | cachefs_relinquish_cookie(vnode->cache, 0); | 232 | cachefs_relinquish_cookie(vnode->cache, 0); |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index b6dd20a93cc..afc6f0f3025 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* internal AFS stuff | 1 | /* internal AFS stuff |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -9,48 +9,321 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef AFS_INTERNAL_H | ||
13 | #define AFS_INTERNAL_H | ||
14 | |||
15 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
16 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
17 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
18 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/rxrpc.h> | ||
18 | #include "afs.h" | ||
19 | #include "afs_vl.h" | ||
20 | |||
21 | #define AFS_CELL_MAX_ADDRS 15 | ||
22 | |||
23 | struct afs_call; | ||
24 | |||
25 | typedef enum { | ||
26 | AFS_VL_NEW, /* new, uninitialised record */ | ||
27 | AFS_VL_CREATING, /* creating record */ | ||
28 | AFS_VL_VALID, /* record is pending */ | ||
29 | AFS_VL_NO_VOLUME, /* no such volume available */ | ||
30 | AFS_VL_UPDATING, /* update in progress */ | ||
31 | AFS_VL_VOLUME_DELETED, /* volume was deleted */ | ||
32 | AFS_VL_UNCERTAIN, /* uncertain state (update failed) */ | ||
33 | } __attribute__((packed)) afs_vlocation_state_t; | ||
19 | 34 | ||
20 | /* | 35 | /* |
21 | * debug tracing | 36 | * definition of how to wait for the completion of an operation |
22 | */ | 37 | */ |
23 | #define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ## a) | 38 | struct afs_wait_mode { |
24 | #define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ## a) | 39 | /* RxRPC received message notification */ |
25 | #define kdebug(FMT, a...) printk(FMT"\n" , ## a) | 40 | void (*rx_wakeup)(struct afs_call *call); |
26 | #define kproto(FMT, a...) printk("### "FMT"\n" , ## a) | ||
27 | #define knet(FMT, a...) printk(FMT"\n" , ## a) | ||
28 | |||
29 | #ifdef __KDEBUG | ||
30 | #define _enter(FMT, a...) kenter(FMT , ## a) | ||
31 | #define _leave(FMT, a...) kleave(FMT , ## a) | ||
32 | #define _debug(FMT, a...) kdebug(FMT , ## a) | ||
33 | #define _proto(FMT, a...) kproto(FMT , ## a) | ||
34 | #define _net(FMT, a...) knet(FMT , ## a) | ||
35 | #else | ||
36 | #define _enter(FMT, a...) do { } while(0) | ||
37 | #define _leave(FMT, a...) do { } while(0) | ||
38 | #define _debug(FMT, a...) do { } while(0) | ||
39 | #define _proto(FMT, a...) do { } while(0) | ||
40 | #define _net(FMT, a...) do { } while(0) | ||
41 | #endif | ||
42 | 41 | ||
43 | static inline void afs_discard_my_signals(void) | 42 | /* synchronous call waiter and call dispatched notification */ |
44 | { | 43 | int (*wait)(struct afs_call *call); |
45 | while (signal_pending(current)) { | 44 | |
46 | siginfo_t sinfo; | 45 | /* asynchronous call completion */ |
46 | void (*async_complete)(void *reply, int error); | ||
47 | }; | ||
48 | |||
49 | extern const struct afs_wait_mode afs_sync_call; | ||
50 | extern const struct afs_wait_mode afs_async_call; | ||
51 | |||
52 | /* | ||
53 | * a record of an in-progress RxRPC call | ||
54 | */ | ||
55 | struct afs_call { | ||
56 | const struct afs_call_type *type; /* type of call */ | ||
57 | const struct afs_wait_mode *wait_mode; /* completion wait mode */ | ||
58 | wait_queue_head_t waitq; /* processes awaiting completion */ | ||
59 | struct work_struct async_work; /* asynchronous work processor */ | ||
60 | struct work_struct work; /* actual work processor */ | ||
61 | struct sk_buff_head rx_queue; /* received packets */ | ||
62 | struct rxrpc_call *rxcall; /* RxRPC call handle */ | ||
63 | struct key *key; /* security for this call */ | ||
64 | struct afs_server *server; /* server affected by incoming CM call */ | ||
65 | void *request; /* request data (first part) */ | ||
66 | void *request2; /* request data (second part) */ | ||
67 | void *buffer; /* reply receive buffer */ | ||
68 | void *reply; /* reply buffer (first part) */ | ||
69 | void *reply2; /* reply buffer (second part) */ | ||
70 | void *reply3; /* reply buffer (third part) */ | ||
71 | enum { /* call state */ | ||
72 | AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ | ||
73 | AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ | ||
74 | AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */ | ||
75 | AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */ | ||
76 | AFS_CALL_REPLYING, /* replying to incoming call */ | ||
77 | AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */ | ||
78 | AFS_CALL_COMPLETE, /* successfully completed */ | ||
79 | AFS_CALL_BUSY, /* server was busy */ | ||
80 | AFS_CALL_ABORTED, /* call was aborted */ | ||
81 | AFS_CALL_ERROR, /* call failed due to error */ | ||
82 | } state; | ||
83 | int error; /* error code */ | ||
84 | unsigned request_size; /* size of request data */ | ||
85 | unsigned reply_max; /* maximum size of reply */ | ||
86 | unsigned reply_size; /* current size of reply */ | ||
87 | unsigned short offset; /* offset into received data store */ | ||
88 | unsigned char unmarshall; /* unmarshalling phase */ | ||
89 | bool incoming; /* T if incoming call */ | ||
90 | u16 service_id; /* RxRPC service ID to call */ | ||
91 | __be16 port; /* target UDP port */ | ||
92 | __be32 operation_ID; /* operation ID for an incoming call */ | ||
93 | u32 count; /* count for use in unmarshalling */ | ||
94 | __be32 tmp; /* place to extract temporary data */ | ||
95 | }; | ||
96 | |||
97 | struct afs_call_type { | ||
98 | /* deliver request or reply data to an call | ||
99 | * - returning an error will cause the call to be aborted | ||
100 | */ | ||
101 | int (*deliver)(struct afs_call *call, struct sk_buff *skb, | ||
102 | bool last); | ||
103 | |||
104 | /* map an abort code to an error number */ | ||
105 | int (*abort_to_error)(u32 abort_code); | ||
106 | |||
107 | /* clean up a call */ | ||
108 | void (*destructor)(struct afs_call *call); | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * AFS superblock private data | ||
113 | * - there's one superblock per volume | ||
114 | */ | ||
115 | struct afs_super_info { | ||
116 | struct afs_volume *volume; /* volume record */ | ||
117 | char rwparent; /* T if parent is R/W AFS volume */ | ||
118 | }; | ||
47 | 119 | ||
48 | spin_lock_irq(¤t->sighand->siglock); | 120 | static inline struct afs_super_info *AFS_FS_S(struct super_block *sb) |
49 | dequeue_signal(current,¤t->blocked, &sinfo); | 121 | { |
50 | spin_unlock_irq(¤t->sighand->siglock); | 122 | return sb->s_fs_info; |
51 | } | ||
52 | } | 123 | } |
53 | 124 | ||
125 | extern struct file_system_type afs_fs_type; | ||
126 | |||
127 | /* | ||
128 | * entry in the cached cell catalogue | ||
129 | */ | ||
130 | struct afs_cache_cell { | ||
131 | char name[64]; /* cell name (padded with NULs) */ | ||
132 | struct in_addr vl_servers[15]; /* cached cell VL servers */ | ||
133 | }; | ||
134 | |||
135 | /* | ||
136 | * AFS cell record | ||
137 | */ | ||
138 | struct afs_cell { | ||
139 | atomic_t usage; | ||
140 | struct list_head link; /* main cell list link */ | ||
141 | struct list_head proc_link; /* /proc cell list link */ | ||
142 | struct proc_dir_entry *proc_dir; /* /proc dir for this cell */ | ||
143 | #ifdef AFS_CACHING_SUPPORT | ||
144 | struct cachefs_cookie *cache; /* caching cookie */ | ||
145 | #endif | ||
146 | |||
147 | /* server record management */ | ||
148 | rwlock_t servers_lock; /* active server list lock */ | ||
149 | struct list_head servers; /* active server list */ | ||
150 | |||
151 | /* volume location record management */ | ||
152 | struct rw_semaphore vl_sem; /* volume management serialisation semaphore */ | ||
153 | struct list_head vl_list; /* cell's active VL record list */ | ||
154 | spinlock_t vl_lock; /* vl_list lock */ | ||
155 | unsigned short vl_naddrs; /* number of VL servers in addr list */ | ||
156 | unsigned short vl_curr_svix; /* current server index */ | ||
157 | struct in_addr vl_addrs[AFS_CELL_MAX_ADDRS]; /* cell VL server addresses */ | ||
158 | |||
159 | char name[0]; /* cell name - must go last */ | ||
160 | }; | ||
161 | |||
162 | /* | ||
163 | * entry in the cached volume location catalogue | ||
164 | */ | ||
165 | struct afs_cache_vlocation { | ||
166 | uint8_t name[64 + 1]; /* volume name (lowercase, padded with NULs) */ | ||
167 | uint8_t nservers; /* number of entries used in servers[] */ | ||
168 | uint8_t vidmask; /* voltype mask for vid[] */ | ||
169 | uint8_t srvtmask[8]; /* voltype masks for servers[] */ | ||
170 | #define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */ | ||
171 | #define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */ | ||
172 | #define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */ | ||
173 | |||
174 | afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */ | ||
175 | struct in_addr servers[8]; /* fileserver addresses */ | ||
176 | time_t rtime; /* last retrieval time */ | ||
177 | }; | ||
178 | |||
179 | /* | ||
180 | * volume -> vnode hash table entry | ||
181 | */ | ||
182 | struct afs_cache_vhash { | ||
183 | afs_voltype_t vtype; /* which volume variation */ | ||
184 | uint8_t hash_bucket; /* which hash bucket this represents */ | ||
185 | } __attribute__((packed)); | ||
186 | |||
187 | /* | ||
188 | * AFS volume location record | ||
189 | */ | ||
190 | struct afs_vlocation { | ||
191 | atomic_t usage; | ||
192 | time_t time_of_death; /* time at which put reduced usage to 0 */ | ||
193 | struct list_head link; /* link in cell volume location list */ | ||
194 | struct list_head grave; /* link in master graveyard list */ | ||
195 | struct list_head update; /* link in master update list */ | ||
196 | struct afs_cell *cell; /* cell to which volume belongs */ | ||
197 | #ifdef AFS_CACHING_SUPPORT | ||
198 | struct cachefs_cookie *cache; /* caching cookie */ | ||
199 | #endif | ||
200 | struct afs_cache_vlocation vldb; /* volume information DB record */ | ||
201 | struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ | ||
202 | wait_queue_head_t waitq; /* status change waitqueue */ | ||
203 | time_t update_at; /* time at which record should be updated */ | ||
204 | rwlock_t lock; /* access lock */ | ||
205 | afs_vlocation_state_t state; /* volume location state */ | ||
206 | unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ | ||
207 | unsigned short upd_busy_cnt; /* EBUSY count during update */ | ||
208 | bool valid; /* T if valid */ | ||
209 | }; | ||
210 | |||
211 | /* | ||
212 | * AFS fileserver record | ||
213 | */ | ||
214 | struct afs_server { | ||
215 | atomic_t usage; | ||
216 | time_t time_of_death; /* time at which put reduced usage to 0 */ | ||
217 | struct in_addr addr; /* server address */ | ||
218 | struct afs_cell *cell; /* cell in which server resides */ | ||
219 | struct list_head link; /* link in cell's server list */ | ||
220 | struct list_head grave; /* link in master graveyard list */ | ||
221 | struct rb_node master_rb; /* link in master by-addr tree */ | ||
222 | struct rw_semaphore sem; /* access lock */ | ||
223 | |||
224 | /* file service access */ | ||
225 | struct rb_root fs_vnodes; /* vnodes backed by this server (ordered by FID) */ | ||
226 | unsigned long fs_act_jif; /* time at which last activity occurred */ | ||
227 | unsigned long fs_dead_jif; /* time at which no longer to be considered dead */ | ||
228 | spinlock_t fs_lock; /* access lock */ | ||
229 | int fs_state; /* 0 or reason FS currently marked dead (-errno) */ | ||
230 | |||
231 | /* callback promise management */ | ||
232 | struct rb_root cb_promises; /* vnode expiration list (ordered earliest first) */ | ||
233 | struct delayed_work cb_updater; /* callback updater */ | ||
234 | struct delayed_work cb_break_work; /* collected break dispatcher */ | ||
235 | wait_queue_head_t cb_break_waitq; /* space available in cb_break waitqueue */ | ||
236 | spinlock_t cb_lock; /* access lock */ | ||
237 | struct afs_callback cb_break[64]; /* ring of callbacks awaiting breaking */ | ||
238 | atomic_t cb_break_n; /* number of pending breaks */ | ||
239 | u8 cb_break_head; /* head of callback breaking ring */ | ||
240 | u8 cb_break_tail; /* tail of callback breaking ring */ | ||
241 | }; | ||
242 | |||
243 | /* | ||
244 | * AFS volume access record | ||
245 | */ | ||
246 | struct afs_volume { | ||
247 | atomic_t usage; | ||
248 | struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */ | ||
249 | struct afs_vlocation *vlocation; /* volume location */ | ||
250 | #ifdef AFS_CACHING_SUPPORT | ||
251 | struct cachefs_cookie *cache; /* caching cookie */ | ||
252 | #endif | ||
253 | afs_volid_t vid; /* volume ID */ | ||
254 | afs_voltype_t type; /* type of volume */ | ||
255 | char type_force; /* force volume type (suppress R/O -> R/W) */ | ||
256 | unsigned short nservers; /* number of server slots filled */ | ||
257 | unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */ | ||
258 | struct afs_server *servers[8]; /* servers on which volume resides (ordered) */ | ||
259 | struct rw_semaphore server_sem; /* lock for accessing current server */ | ||
260 | }; | ||
261 | |||
262 | /* | ||
263 | * vnode catalogue entry | ||
264 | */ | ||
265 | struct afs_cache_vnode { | ||
266 | afs_vnodeid_t vnode_id; /* vnode ID */ | ||
267 | unsigned vnode_unique; /* vnode ID uniquifier */ | ||
268 | afs_dataversion_t data_version; /* data version */ | ||
269 | }; | ||
270 | |||
271 | /* | ||
272 | * AFS inode private data | ||
273 | */ | ||
274 | struct afs_vnode { | ||
275 | struct inode vfs_inode; /* the VFS's inode record */ | ||
276 | |||
277 | struct afs_volume *volume; /* volume on which vnode resides */ | ||
278 | struct afs_server *server; /* server currently supplying this file */ | ||
279 | struct afs_fid fid; /* the file identifier for this inode */ | ||
280 | struct afs_file_status status; /* AFS status info for this file */ | ||
281 | #ifdef AFS_CACHING_SUPPORT | ||
282 | struct cachefs_cookie *cache; /* caching cookie */ | ||
283 | #endif | ||
284 | |||
285 | wait_queue_head_t update_waitq; /* status fetch waitqueue */ | ||
286 | unsigned update_cnt; /* number of outstanding ops that will update the | ||
287 | * status */ | ||
288 | spinlock_t lock; /* waitqueue/flags lock */ | ||
289 | unsigned long flags; | ||
290 | #define AFS_VNODE_CB_BROKEN 0 /* set if vnode's callback was broken */ | ||
291 | #define AFS_VNODE_CHANGED 1 /* set if vnode's metadata changed */ | ||
292 | #define AFS_VNODE_MODIFIED 2 /* set if vnode's data modified */ | ||
293 | #define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */ | ||
294 | #define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */ | ||
295 | #define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */ | ||
296 | #define AFS_VNODE_DIR_CHANGED 6 /* set if vnode's parent dir metadata changed */ | ||
297 | #define AFS_VNODE_DIR_MODIFIED 7 /* set if vnode's parent dir data modified */ | ||
298 | |||
299 | /* outstanding callback notification on this file */ | ||
300 | struct rb_node server_rb; /* link in server->fs_vnodes */ | ||
301 | struct rb_node cb_promise; /* link in server->cb_promises */ | ||
302 | struct work_struct cb_broken_work; /* work to be done on callback break */ | ||
303 | struct mutex cb_broken_lock; /* lock against multiple attempts to fix break */ | ||
304 | // struct list_head cb_hash_link; /* link in master callback hash */ | ||
305 | time_t cb_expires; /* time at which callback expires */ | ||
306 | time_t cb_expires_at; /* time used to order cb_promise */ | ||
307 | unsigned cb_version; /* callback version */ | ||
308 | unsigned cb_expiry; /* callback expiry time */ | ||
309 | afs_callback_type_t cb_type; /* type of callback */ | ||
310 | bool cb_promised; /* true if promise still holds */ | ||
311 | }; | ||
312 | |||
313 | /*****************************************************************************/ | ||
314 | /* | ||
315 | * callback.c | ||
316 | */ | ||
317 | extern void afs_init_callback_state(struct afs_server *); | ||
318 | extern void afs_broken_callback_work(struct work_struct *); | ||
319 | extern void afs_break_callbacks(struct afs_server *, size_t, | ||
320 | struct afs_callback[]); | ||
321 | extern void afs_give_up_callback(struct afs_vnode *); | ||
322 | extern void afs_dispatch_give_up_callbacks(struct work_struct *); | ||
323 | extern void afs_flush_callback_breaks(struct afs_server *); | ||
324 | extern int __init afs_callback_update_init(void); | ||
325 | extern void __exit afs_callback_update_kill(void); | ||
326 | |||
54 | /* | 327 | /* |
55 | * cell.c | 328 | * cell.c |
56 | */ | 329 | */ |
@@ -60,6 +333,19 @@ extern struct list_head afs_proc_cells; | |||
60 | extern struct cachefs_index_def afs_cache_cell_index_def; | 333 | extern struct cachefs_index_def afs_cache_cell_index_def; |
61 | #endif | 334 | #endif |
62 | 335 | ||
336 | #define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0) | ||
337 | extern int afs_cell_init(char *); | ||
338 | extern struct afs_cell *afs_cell_create(const char *, char *); | ||
339 | extern struct afs_cell *afs_cell_lookup(const char *, unsigned); | ||
340 | extern struct afs_cell *afs_grab_cell(struct afs_cell *); | ||
341 | extern void afs_put_cell(struct afs_cell *); | ||
342 | extern void afs_cell_purge(void); | ||
343 | |||
344 | /* | ||
345 | * cmservice.c | ||
346 | */ | ||
347 | extern bool afs_cm_incoming_call(struct afs_call *); | ||
348 | |||
63 | /* | 349 | /* |
64 | * dir.c | 350 | * dir.c |
65 | */ | 351 | */ |
@@ -77,9 +363,22 @@ extern int afs_cache_get_page_cookie(struct page *, struct cachefs_page **); | |||
77 | #endif | 363 | #endif |
78 | 364 | ||
79 | /* | 365 | /* |
366 | * fsclient.c | ||
367 | */ | ||
368 | extern int afs_fs_fetch_file_status(struct afs_server *, | ||
369 | struct afs_vnode *, | ||
370 | struct afs_volsync *, | ||
371 | const struct afs_wait_mode *); | ||
372 | extern int afs_fs_give_up_callbacks(struct afs_server *, | ||
373 | const struct afs_wait_mode *); | ||
374 | extern int afs_fs_fetch_data(struct afs_server *, struct afs_vnode *, off_t, | ||
375 | size_t, struct page *, struct afs_volsync *, | ||
376 | const struct afs_wait_mode *); | ||
377 | |||
378 | /* | ||
80 | * inode.c | 379 | * inode.c |
81 | */ | 380 | */ |
82 | extern int afs_iget(struct super_block *, struct afs_fid *, struct inode **); | 381 | extern struct inode *afs_iget(struct super_block *, struct afs_fid *); |
83 | extern int afs_inode_getattr(struct vfsmount *, struct dentry *, | 382 | extern int afs_inode_getattr(struct vfsmount *, struct dentry *, |
84 | struct kstat *); | 383 | struct kstat *); |
85 | extern void afs_clear_inode(struct inode *); | 384 | extern void afs_clear_inode(struct inode *); |
@@ -92,15 +391,20 @@ extern struct cachefs_netfs afs_cache_netfs; | |||
92 | #endif | 391 | #endif |
93 | 392 | ||
94 | /* | 393 | /* |
394 | * misc.c | ||
395 | */ | ||
396 | extern int afs_abort_to_error(u32); | ||
397 | |||
398 | /* | ||
95 | * mntpt.c | 399 | * mntpt.c |
96 | */ | 400 | */ |
97 | extern const struct inode_operations afs_mntpt_inode_operations; | 401 | extern const struct inode_operations afs_mntpt_inode_operations; |
98 | extern const struct file_operations afs_mntpt_file_operations; | 402 | extern const struct file_operations afs_mntpt_file_operations; |
99 | extern struct afs_timer afs_mntpt_expiry_timer; | ||
100 | extern struct afs_timer_ops afs_mntpt_expiry_timer_ops; | ||
101 | extern unsigned long afs_mntpt_expiry_timeout; | 403 | extern unsigned long afs_mntpt_expiry_timeout; |
102 | 404 | ||
103 | extern int afs_mntpt_check_symlink(struct afs_vnode *); | 405 | extern int afs_mntpt_check_symlink(struct afs_vnode *); |
406 | extern void afs_mntpt_kill_timer(void); | ||
407 | extern void afs_umount_begin(struct vfsmount *, int); | ||
104 | 408 | ||
105 | /* | 409 | /* |
106 | * super.c | 410 | * super.c |
@@ -108,16 +412,6 @@ extern int afs_mntpt_check_symlink(struct afs_vnode *); | |||
108 | extern int afs_fs_init(void); | 412 | extern int afs_fs_init(void); |
109 | extern void afs_fs_exit(void); | 413 | extern void afs_fs_exit(void); |
110 | 414 | ||
111 | #define AFS_CB_HASH_COUNT (PAGE_SIZE / sizeof(struct list_head)) | ||
112 | |||
113 | extern struct list_head afs_cb_hash_tbl[]; | ||
114 | extern spinlock_t afs_cb_hash_lock; | ||
115 | |||
116 | #define afs_cb_hash(SRV, FID) \ | ||
117 | afs_cb_hash_tbl[((unsigned long)(SRV) + \ | ||
118 | (FID)->vid + (FID)->vnode + (FID)->unique) & \ | ||
119 | (AFS_CB_HASH_COUNT - 1)] | ||
120 | |||
121 | /* | 415 | /* |
122 | * proc.c | 416 | * proc.c |
123 | */ | 417 | */ |
@@ -126,4 +420,217 @@ extern void afs_proc_cleanup(void); | |||
126 | extern int afs_proc_cell_setup(struct afs_cell *); | 420 | extern int afs_proc_cell_setup(struct afs_cell *); |
127 | extern void afs_proc_cell_remove(struct afs_cell *); | 421 | extern void afs_proc_cell_remove(struct afs_cell *); |
128 | 422 | ||
129 | #endif /* AFS_INTERNAL_H */ | 423 | /* |
424 | * rxrpc.c | ||
425 | */ | ||
426 | extern int afs_open_socket(void); | ||
427 | extern void afs_close_socket(void); | ||
428 | extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, | ||
429 | const struct afs_wait_mode *); | ||
430 | extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, | ||
431 | size_t, size_t); | ||
432 | extern void afs_flat_call_destructor(struct afs_call *); | ||
433 | extern void afs_transfer_reply(struct afs_call *, struct sk_buff *); | ||
434 | extern void afs_send_empty_reply(struct afs_call *); | ||
435 | extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, | ||
436 | size_t); | ||
437 | |||
438 | /* | ||
439 | * server.c | ||
440 | */ | ||
441 | extern spinlock_t afs_server_peer_lock; | ||
442 | |||
443 | #define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0) | ||
444 | |||
445 | extern struct afs_server *afs_lookup_server(struct afs_cell *, | ||
446 | const struct in_addr *); | ||
447 | extern struct afs_server *afs_find_server(const struct in_addr *); | ||
448 | extern void afs_put_server(struct afs_server *); | ||
449 | extern void __exit afs_purge_servers(void); | ||
450 | |||
451 | /* | ||
452 | * vlclient.c | ||
453 | */ | ||
454 | #ifdef AFS_CACHING_SUPPORT | ||
455 | extern struct cachefs_index_def afs_vlocation_cache_index_def; | ||
456 | #endif | ||
457 | |||
458 | extern int afs_vl_get_entry_by_name(struct in_addr *, const char *, | ||
459 | struct afs_cache_vlocation *, | ||
460 | const struct afs_wait_mode *); | ||
461 | extern int afs_vl_get_entry_by_id(struct in_addr *, afs_volid_t, afs_voltype_t, | ||
462 | struct afs_cache_vlocation *, | ||
463 | const struct afs_wait_mode *); | ||
464 | |||
465 | /* | ||
466 | * vlocation.c | ||
467 | */ | ||
468 | #define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0) | ||
469 | |||
470 | extern int __init afs_vlocation_update_init(void); | ||
471 | extern struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *, | ||
472 | const char *, size_t); | ||
473 | extern void afs_put_vlocation(struct afs_vlocation *); | ||
474 | extern void __exit afs_vlocation_purge(void); | ||
475 | |||
476 | /* | ||
477 | * vnode.c | ||
478 | */ | ||
479 | #ifdef AFS_CACHING_SUPPORT | ||
480 | extern struct cachefs_index_def afs_vnode_cache_index_def; | ||
481 | #endif | ||
482 | |||
483 | extern struct afs_timer_ops afs_vnode_cb_timed_out_ops; | ||
484 | |||
485 | static inline struct afs_vnode *AFS_FS_I(struct inode *inode) | ||
486 | { | ||
487 | return container_of(inode, struct afs_vnode, vfs_inode); | ||
488 | } | ||
489 | |||
490 | static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) | ||
491 | { | ||
492 | return &vnode->vfs_inode; | ||
493 | } | ||
494 | |||
495 | extern int afs_vnode_fetch_status(struct afs_vnode *); | ||
496 | extern int afs_vnode_fetch_data(struct afs_vnode *vnode, off_t, size_t, | ||
497 | struct page *); | ||
498 | |||
499 | /* | ||
500 | * volume.c | ||
501 | */ | ||
502 | #ifdef AFS_CACHING_SUPPORT | ||
503 | extern struct cachefs_index_def afs_volume_cache_index_def; | ||
504 | #endif | ||
505 | |||
506 | #define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0) | ||
507 | |||
508 | extern void afs_put_volume(struct afs_volume *); | ||
509 | extern struct afs_volume *afs_volume_lookup(const char *, struct afs_cell *, | ||
510 | int); | ||
511 | extern struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *); | ||
512 | extern int afs_volume_release_fileserver(struct afs_vnode *, | ||
513 | struct afs_server *, int); | ||
514 | |||
515 | /*****************************************************************************/ | ||
516 | /* | ||
517 | * debug tracing | ||
518 | */ | ||
519 | extern unsigned afs_debug; | ||
520 | |||
521 | #define dbgprintk(FMT,...) \ | ||
522 | printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__) | ||
523 | |||
524 | /* make sure we maintain the format strings, even when debugging is disabled */ | ||
525 | static inline __attribute__((format(printf,1,2))) | ||
526 | void _dbprintk(const char *fmt, ...) | ||
527 | { | ||
528 | } | ||
529 | |||
530 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | ||
531 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | ||
532 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) | ||
533 | |||
534 | |||
535 | #if defined(__KDEBUG) | ||
536 | #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) | ||
537 | #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) | ||
538 | #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) | ||
539 | |||
540 | #elif defined(CONFIG_AFS_DEBUG) | ||
541 | #define AFS_DEBUG_KENTER 0x01 | ||
542 | #define AFS_DEBUG_KLEAVE 0x02 | ||
543 | #define AFS_DEBUG_KDEBUG 0x04 | ||
544 | |||
545 | #define _enter(FMT,...) \ | ||
546 | do { \ | ||
547 | if (unlikely(afs_debug & AFS_DEBUG_KENTER)) \ | ||
548 | kenter(FMT,##__VA_ARGS__); \ | ||
549 | } while (0) | ||
550 | |||
551 | #define _leave(FMT,...) \ | ||
552 | do { \ | ||
553 | if (unlikely(afs_debug & AFS_DEBUG_KLEAVE)) \ | ||
554 | kleave(FMT,##__VA_ARGS__); \ | ||
555 | } while (0) | ||
556 | |||
557 | #define _debug(FMT,...) \ | ||
558 | do { \ | ||
559 | if (unlikely(afs_debug & AFS_DEBUG_KDEBUG)) \ | ||
560 | kdebug(FMT,##__VA_ARGS__); \ | ||
561 | } while (0) | ||
562 | |||
563 | #else | ||
564 | #define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | ||
565 | #define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | ||
566 | #define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) | ||
567 | #endif | ||
568 | |||
569 | /* | ||
570 | * debug assertion checking | ||
571 | */ | ||
572 | #if 1 // defined(__KDEBUGALL) | ||
573 | |||
574 | #define ASSERT(X) \ | ||
575 | do { \ | ||
576 | if (unlikely(!(X))) { \ | ||
577 | printk(KERN_ERR "\n"); \ | ||
578 | printk(KERN_ERR "AFS: Assertion failed\n"); \ | ||
579 | BUG(); \ | ||
580 | } \ | ||
581 | } while(0) | ||
582 | |||
583 | #define ASSERTCMP(X, OP, Y) \ | ||
584 | do { \ | ||
585 | if (unlikely(!((X) OP (Y)))) { \ | ||
586 | printk(KERN_ERR "\n"); \ | ||
587 | printk(KERN_ERR "AFS: Assertion failed\n"); \ | ||
588 | printk(KERN_ERR "%lu " #OP " %lu is false\n", \ | ||
589 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
590 | printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ | ||
591 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
592 | BUG(); \ | ||
593 | } \ | ||
594 | } while(0) | ||
595 | |||
596 | #define ASSERTIF(C, X) \ | ||
597 | do { \ | ||
598 | if (unlikely((C) && !(X))) { \ | ||
599 | printk(KERN_ERR "\n"); \ | ||
600 | printk(KERN_ERR "AFS: Assertion failed\n"); \ | ||
601 | BUG(); \ | ||
602 | } \ | ||
603 | } while(0) | ||
604 | |||
605 | #define ASSERTIFCMP(C, X, OP, Y) \ | ||
606 | do { \ | ||
607 | if (unlikely((C) && !((X) OP (Y)))) { \ | ||
608 | printk(KERN_ERR "\n"); \ | ||
609 | printk(KERN_ERR "AFS: Assertion failed\n"); \ | ||
610 | printk(KERN_ERR "%lu " #OP " %lu is false\n", \ | ||
611 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
612 | printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ | ||
613 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
614 | BUG(); \ | ||
615 | } \ | ||
616 | } while(0) | ||
617 | |||
618 | #else | ||
619 | |||
620 | #define ASSERT(X) \ | ||
621 | do { \ | ||
622 | } while(0) | ||
623 | |||
624 | #define ASSERTCMP(X, OP, Y) \ | ||
625 | do { \ | ||
626 | } while(0) | ||
627 | |||
628 | #define ASSERTIF(C, X) \ | ||
629 | do { \ | ||
630 | } while(0) | ||
631 | |||
632 | #define ASSERTIFCMP(C, X, OP, Y) \ | ||
633 | do { \ | ||
634 | } while(0) | ||
635 | |||
636 | #endif /* __KDEBUGALL */ | ||
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c deleted file mode 100644 index 8ca01c23601..00000000000 --- a/fs/afs/kafsasyncd.c +++ /dev/null | |||
@@ -1,247 +0,0 @@ | |||
1 | /* AFS asynchronous operation daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * | ||
12 | * The AFS async daemon is used to the following: | ||
13 | * - probe "dead" servers to see whether they've come back to life yet. | ||
14 | * - probe "live" servers that we haven't talked to for a while to see if they are better | ||
15 | * candidates for serving than what we're currently using | ||
16 | * - poll volume location servers to keep up to date volume location lists | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/completion.h> | ||
23 | #include <linux/freezer.h> | ||
24 | #include "cell.h" | ||
25 | #include "server.h" | ||
26 | #include "volume.h" | ||
27 | #include "kafsasyncd.h" | ||
28 | #include "kafstimod.h" | ||
29 | #include <rxrpc/call.h> | ||
30 | #include <asm/errno.h> | ||
31 | #include "internal.h" | ||
32 | |||
33 | static DECLARE_COMPLETION(kafsasyncd_alive); | ||
34 | static DECLARE_COMPLETION(kafsasyncd_dead); | ||
35 | static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq); | ||
36 | static struct task_struct *kafsasyncd_task; | ||
37 | static int kafsasyncd_die; | ||
38 | |||
39 | static int kafsasyncd(void *arg); | ||
40 | |||
41 | static LIST_HEAD(kafsasyncd_async_attnq); | ||
42 | static LIST_HEAD(kafsasyncd_async_busyq); | ||
43 | static DEFINE_SPINLOCK(kafsasyncd_async_lock); | ||
44 | |||
45 | static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | static void kafsasyncd_null_call_error_func(struct rxrpc_call *call) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * start the async daemon | ||
55 | */ | ||
56 | int afs_kafsasyncd_start(void) | ||
57 | { | ||
58 | int ret; | ||
59 | |||
60 | ret = kernel_thread(kafsasyncd, NULL, 0); | ||
61 | if (ret < 0) | ||
62 | return ret; | ||
63 | |||
64 | wait_for_completion(&kafsasyncd_alive); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * stop the async daemon | ||
71 | */ | ||
72 | void afs_kafsasyncd_stop(void) | ||
73 | { | ||
74 | /* get rid of my daemon */ | ||
75 | kafsasyncd_die = 1; | ||
76 | wake_up(&kafsasyncd_sleepq); | ||
77 | wait_for_completion(&kafsasyncd_dead); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * probing daemon | ||
82 | */ | ||
83 | static int kafsasyncd(void *arg) | ||
84 | { | ||
85 | struct afs_async_op *op; | ||
86 | int die; | ||
87 | |||
88 | DECLARE_WAITQUEUE(myself, current); | ||
89 | |||
90 | kafsasyncd_task = current; | ||
91 | |||
92 | printk("kAFS: Started kafsasyncd %d\n", current->pid); | ||
93 | |||
94 | daemonize("kafsasyncd"); | ||
95 | |||
96 | complete(&kafsasyncd_alive); | ||
97 | |||
98 | /* loop around looking for things to attend to */ | ||
99 | do { | ||
100 | set_current_state(TASK_INTERRUPTIBLE); | ||
101 | add_wait_queue(&kafsasyncd_sleepq, &myself); | ||
102 | |||
103 | for (;;) { | ||
104 | if (!list_empty(&kafsasyncd_async_attnq) || | ||
105 | signal_pending(current) || | ||
106 | kafsasyncd_die) | ||
107 | break; | ||
108 | |||
109 | schedule(); | ||
110 | set_current_state(TASK_INTERRUPTIBLE); | ||
111 | } | ||
112 | |||
113 | remove_wait_queue(&kafsasyncd_sleepq, &myself); | ||
114 | set_current_state(TASK_RUNNING); | ||
115 | |||
116 | try_to_freeze(); | ||
117 | |||
118 | /* discard pending signals */ | ||
119 | afs_discard_my_signals(); | ||
120 | |||
121 | die = kafsasyncd_die; | ||
122 | |||
123 | /* deal with the next asynchronous operation requiring | ||
124 | * attention */ | ||
125 | if (!list_empty(&kafsasyncd_async_attnq)) { | ||
126 | struct afs_async_op *op; | ||
127 | |||
128 | _debug("@@@ Begin Asynchronous Operation"); | ||
129 | |||
130 | op = NULL; | ||
131 | spin_lock(&kafsasyncd_async_lock); | ||
132 | |||
133 | if (!list_empty(&kafsasyncd_async_attnq)) { | ||
134 | op = list_entry(kafsasyncd_async_attnq.next, | ||
135 | struct afs_async_op, link); | ||
136 | list_move_tail(&op->link, | ||
137 | &kafsasyncd_async_busyq); | ||
138 | } | ||
139 | |||
140 | spin_unlock(&kafsasyncd_async_lock); | ||
141 | |||
142 | _debug("@@@ Operation %p {%p}\n", | ||
143 | op, op ? op->ops : NULL); | ||
144 | |||
145 | if (op) | ||
146 | op->ops->attend(op); | ||
147 | |||
148 | _debug("@@@ End Asynchronous Operation"); | ||
149 | } | ||
150 | |||
151 | } while(!die); | ||
152 | |||
153 | /* need to kill all outstanding asynchronous operations before | ||
154 | * exiting */ | ||
155 | kafsasyncd_task = NULL; | ||
156 | spin_lock(&kafsasyncd_async_lock); | ||
157 | |||
158 | /* fold the busy and attention queues together */ | ||
159 | list_splice_init(&kafsasyncd_async_busyq, | ||
160 | &kafsasyncd_async_attnq); | ||
161 | |||
162 | /* dequeue kafsasyncd from all their wait queues */ | ||
163 | list_for_each_entry(op, &kafsasyncd_async_attnq, link) { | ||
164 | op->call->app_attn_func = kafsasyncd_null_call_attn_func; | ||
165 | op->call->app_error_func = kafsasyncd_null_call_error_func; | ||
166 | remove_wait_queue(&op->call->waitq, &op->waiter); | ||
167 | } | ||
168 | |||
169 | spin_unlock(&kafsasyncd_async_lock); | ||
170 | |||
171 | /* abort all the operations */ | ||
172 | while (!list_empty(&kafsasyncd_async_attnq)) { | ||
173 | op = list_entry(kafsasyncd_async_attnq.next, struct afs_async_op, link); | ||
174 | list_del_init(&op->link); | ||
175 | |||
176 | rxrpc_call_abort(op->call, -EIO); | ||
177 | rxrpc_put_call(op->call); | ||
178 | op->call = NULL; | ||
179 | |||
180 | op->ops->discard(op); | ||
181 | } | ||
182 | |||
183 | /* and that's all */ | ||
184 | _leave(""); | ||
185 | complete_and_exit(&kafsasyncd_dead, 0); | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * begin an operation | ||
190 | * - place operation on busy queue | ||
191 | */ | ||
192 | void afs_kafsasyncd_begin_op(struct afs_async_op *op) | ||
193 | { | ||
194 | _enter(""); | ||
195 | |||
196 | spin_lock(&kafsasyncd_async_lock); | ||
197 | |||
198 | init_waitqueue_entry(&op->waiter, kafsasyncd_task); | ||
199 | add_wait_queue(&op->call->waitq, &op->waiter); | ||
200 | |||
201 | list_move_tail(&op->link, &kafsasyncd_async_busyq); | ||
202 | |||
203 | spin_unlock(&kafsasyncd_async_lock); | ||
204 | |||
205 | _leave(""); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * request attention for an operation | ||
210 | * - move to attention queue | ||
211 | */ | ||
212 | void afs_kafsasyncd_attend_op(struct afs_async_op *op) | ||
213 | { | ||
214 | _enter(""); | ||
215 | |||
216 | spin_lock(&kafsasyncd_async_lock); | ||
217 | |||
218 | list_move_tail(&op->link, &kafsasyncd_async_attnq); | ||
219 | |||
220 | spin_unlock(&kafsasyncd_async_lock); | ||
221 | |||
222 | wake_up(&kafsasyncd_sleepq); | ||
223 | |||
224 | _leave(""); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * terminate an operation | ||
229 | * - remove from either queue | ||
230 | */ | ||
231 | void afs_kafsasyncd_terminate_op(struct afs_async_op *op) | ||
232 | { | ||
233 | _enter(""); | ||
234 | |||
235 | spin_lock(&kafsasyncd_async_lock); | ||
236 | |||
237 | if (!list_empty(&op->link)) { | ||
238 | list_del_init(&op->link); | ||
239 | remove_wait_queue(&op->call->waitq, &op->waiter); | ||
240 | } | ||
241 | |||
242 | spin_unlock(&kafsasyncd_async_lock); | ||
243 | |||
244 | wake_up(&kafsasyncd_sleepq); | ||
245 | |||
246 | _leave(""); | ||
247 | } | ||
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h deleted file mode 100644 index 1273eb544c5..00000000000 --- a/fs/afs/kafsasyncd.h +++ /dev/null | |||
@@ -1,50 +0,0 @@ | |||
1 | /* AFS asynchronous operation daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_KAFSASYNCD_H | ||
13 | #define AFS_KAFSASYNCD_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | struct afs_async_op; | ||
18 | |||
19 | struct afs_async_op_ops { | ||
20 | void (*attend)(struct afs_async_op *); | ||
21 | void (*discard)(struct afs_async_op *); | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * asynchronous operation record | ||
26 | */ | ||
27 | struct afs_async_op { | ||
28 | struct list_head link; | ||
29 | struct afs_server *server; /* server being contacted */ | ||
30 | struct rxrpc_call *call; /* RxRPC call performing op */ | ||
31 | wait_queue_t waiter; /* wait queue for kafsasyncd */ | ||
32 | const struct afs_async_op_ops *ops; /* operations */ | ||
33 | }; | ||
34 | |||
35 | static inline void afs_async_op_init(struct afs_async_op *op, | ||
36 | const struct afs_async_op_ops *ops) | ||
37 | { | ||
38 | INIT_LIST_HEAD(&op->link); | ||
39 | op->call = NULL; | ||
40 | op->ops = ops; | ||
41 | } | ||
42 | |||
43 | extern int afs_kafsasyncd_start(void); | ||
44 | extern void afs_kafsasyncd_stop(void); | ||
45 | |||
46 | extern void afs_kafsasyncd_begin_op(struct afs_async_op *); | ||
47 | extern void afs_kafsasyncd_attend_op(struct afs_async_op *); | ||
48 | extern void afs_kafsasyncd_terminate_op(struct afs_async_op *); | ||
49 | |||
50 | #endif /* AFS_KAFSASYNCD_H */ | ||
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c deleted file mode 100644 index 3526dcccc16..00000000000 --- a/fs/afs/kafstimod.c +++ /dev/null | |||
@@ -1,194 +0,0 @@ | |||
1 | /* AFS timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include <linux/freezer.h> | ||
17 | #include "cell.h" | ||
18 | #include "volume.h" | ||
19 | #include "kafstimod.h" | ||
20 | #include <asm/errno.h> | ||
21 | #include "internal.h" | ||
22 | |||
23 | static DECLARE_COMPLETION(kafstimod_alive); | ||
24 | static DECLARE_COMPLETION(kafstimod_dead); | ||
25 | static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq); | ||
26 | static int kafstimod_die; | ||
27 | |||
28 | static LIST_HEAD(kafstimod_list); | ||
29 | static DEFINE_SPINLOCK(kafstimod_lock); | ||
30 | |||
31 | static int kafstimod(void *arg); | ||
32 | |||
33 | /* | ||
34 | * start the timeout daemon | ||
35 | */ | ||
36 | int afs_kafstimod_start(void) | ||
37 | { | ||
38 | int ret; | ||
39 | |||
40 | ret = kernel_thread(kafstimod, NULL, 0); | ||
41 | if (ret < 0) | ||
42 | return ret; | ||
43 | |||
44 | wait_for_completion(&kafstimod_alive); | ||
45 | |||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * stop the timeout daemon | ||
51 | */ | ||
52 | void afs_kafstimod_stop(void) | ||
53 | { | ||
54 | /* get rid of my daemon */ | ||
55 | kafstimod_die = 1; | ||
56 | wake_up(&kafstimod_sleepq); | ||
57 | wait_for_completion(&kafstimod_dead); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * timeout processing daemon | ||
62 | */ | ||
63 | static int kafstimod(void *arg) | ||
64 | { | ||
65 | struct afs_timer *timer; | ||
66 | |||
67 | DECLARE_WAITQUEUE(myself, current); | ||
68 | |||
69 | printk("kAFS: Started kafstimod %d\n", current->pid); | ||
70 | |||
71 | daemonize("kafstimod"); | ||
72 | |||
73 | complete(&kafstimod_alive); | ||
74 | |||
75 | /* loop around looking for things to attend to */ | ||
76 | loop: | ||
77 | set_current_state(TASK_INTERRUPTIBLE); | ||
78 | add_wait_queue(&kafstimod_sleepq, &myself); | ||
79 | |||
80 | for (;;) { | ||
81 | unsigned long jif; | ||
82 | signed long timeout; | ||
83 | |||
84 | /* deal with the server being asked to die */ | ||
85 | if (kafstimod_die) { | ||
86 | remove_wait_queue(&kafstimod_sleepq, &myself); | ||
87 | _leave(""); | ||
88 | complete_and_exit(&kafstimod_dead, 0); | ||
89 | } | ||
90 | |||
91 | try_to_freeze(); | ||
92 | |||
93 | /* discard pending signals */ | ||
94 | afs_discard_my_signals(); | ||
95 | |||
96 | /* work out the time to elapse before the next event */ | ||
97 | spin_lock(&kafstimod_lock); | ||
98 | if (list_empty(&kafstimod_list)) { | ||
99 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
100 | } else { | ||
101 | timer = list_entry(kafstimod_list.next, | ||
102 | struct afs_timer, link); | ||
103 | timeout = timer->timo_jif; | ||
104 | jif = jiffies; | ||
105 | |||
106 | if (time_before_eq((unsigned long) timeout, jif)) | ||
107 | goto immediate; | ||
108 | timeout = (long) timeout - (long) jiffies; | ||
109 | } | ||
110 | spin_unlock(&kafstimod_lock); | ||
111 | |||
112 | schedule_timeout(timeout); | ||
113 | |||
114 | set_current_state(TASK_INTERRUPTIBLE); | ||
115 | } | ||
116 | |||
117 | /* the thing on the front of the queue needs processing | ||
118 | * - we come here with the lock held and timer pointing to the expired | ||
119 | * entry | ||
120 | */ | ||
121 | immediate: | ||
122 | remove_wait_queue(&kafstimod_sleepq, &myself); | ||
123 | set_current_state(TASK_RUNNING); | ||
124 | |||
125 | _debug("@@@ Begin Timeout of %p", timer); | ||
126 | |||
127 | /* dequeue the timer */ | ||
128 | list_del_init(&timer->link); | ||
129 | spin_unlock(&kafstimod_lock); | ||
130 | |||
131 | /* call the timeout function */ | ||
132 | timer->ops->timed_out(timer); | ||
133 | |||
134 | _debug("@@@ End Timeout"); | ||
135 | goto loop; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * (re-)queue a timer | ||
140 | */ | ||
141 | void afs_kafstimod_add_timer(struct afs_timer *timer, unsigned long timeout) | ||
142 | { | ||
143 | struct afs_timer *ptimer; | ||
144 | struct list_head *_p; | ||
145 | |||
146 | _enter("%p,%lu", timer, timeout); | ||
147 | |||
148 | spin_lock(&kafstimod_lock); | ||
149 | |||
150 | list_del(&timer->link); | ||
151 | |||
152 | /* the timer was deferred or reset - put it back in the queue at the | ||
153 | * right place */ | ||
154 | timer->timo_jif = jiffies + timeout; | ||
155 | |||
156 | list_for_each(_p, &kafstimod_list) { | ||
157 | ptimer = list_entry(_p, struct afs_timer, link); | ||
158 | if (time_before(timer->timo_jif, ptimer->timo_jif)) | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | list_add_tail(&timer->link, _p); /* insert before stopping point */ | ||
163 | |||
164 | spin_unlock(&kafstimod_lock); | ||
165 | |||
166 | wake_up(&kafstimod_sleepq); | ||
167 | |||
168 | _leave(""); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * dequeue a timer | ||
173 | * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued | ||
174 | */ | ||
175 | int afs_kafstimod_del_timer(struct afs_timer *timer) | ||
176 | { | ||
177 | int ret = 0; | ||
178 | |||
179 | _enter("%p", timer); | ||
180 | |||
181 | spin_lock(&kafstimod_lock); | ||
182 | |||
183 | if (list_empty(&timer->link)) | ||
184 | ret = -ENOENT; | ||
185 | else | ||
186 | list_del_init(&timer->link); | ||
187 | |||
188 | spin_unlock(&kafstimod_lock); | ||
189 | |||
190 | wake_up(&kafstimod_sleepq); | ||
191 | |||
192 | _leave(" = %d", ret); | ||
193 | return ret; | ||
194 | } | ||
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h deleted file mode 100644 index 0d39becbbe0..00000000000 --- a/fs/afs/kafstimod.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* AFS timeout daemon | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_KAFSTIMOD_H | ||
13 | #define AFS_KAFSTIMOD_H | ||
14 | |||
15 | #include "types.h" | ||
16 | |||
17 | struct afs_timer; | ||
18 | |||
19 | struct afs_timer_ops { | ||
20 | /* called when the front of the timer queue has timed out */ | ||
21 | void (*timed_out)(struct afs_timer *); | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * AFS timer/timeout record | ||
26 | */ | ||
27 | struct afs_timer { | ||
28 | struct list_head link; /* link in timer queue */ | ||
29 | unsigned long timo_jif; /* timeout time */ | ||
30 | const struct afs_timer_ops *ops; /* timeout expiry function */ | ||
31 | }; | ||
32 | |||
33 | static inline void afs_timer_init(struct afs_timer *timer, | ||
34 | const struct afs_timer_ops *ops) | ||
35 | { | ||
36 | INIT_LIST_HEAD(&timer->link); | ||
37 | timer->ops = ops; | ||
38 | } | ||
39 | |||
40 | extern int afs_kafstimod_start(void); | ||
41 | extern void afs_kafstimod_stop(void); | ||
42 | extern void afs_kafstimod_add_timer(struct afs_timer *, unsigned long); | ||
43 | extern int afs_kafstimod_del_timer(struct afs_timer *); | ||
44 | |||
45 | #endif /* AFS_KAFSTIMOD_H */ | ||
diff --git a/fs/afs/main.c b/fs/afs/main.c index 5bf39f66f4c..0cf1b021ad5 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c | |||
@@ -13,43 +13,21 @@ | |||
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
16 | #include <rxrpc/rxrpc.h> | ||
17 | #include <rxrpc/transport.h> | ||
18 | #include <rxrpc/call.h> | ||
19 | #include <rxrpc/peer.h> | ||
20 | #include "cache.h" | ||
21 | #include "cell.h" | ||
22 | #include "server.h" | ||
23 | #include "fsclient.h" | ||
24 | #include "cmservice.h" | ||
25 | #include "kafstimod.h" | ||
26 | #include "kafsasyncd.h" | ||
27 | #include "internal.h" | 16 | #include "internal.h" |
28 | 17 | ||
29 | struct rxrpc_transport *afs_transport; | ||
30 | |||
31 | static int afs_adding_peer(struct rxrpc_peer *peer); | ||
32 | static void afs_discarding_peer(struct rxrpc_peer *peer); | ||
33 | |||
34 | |||
35 | MODULE_DESCRIPTION("AFS Client File System"); | 18 | MODULE_DESCRIPTION("AFS Client File System"); |
36 | MODULE_AUTHOR("Red Hat, Inc."); | 19 | MODULE_AUTHOR("Red Hat, Inc."); |
37 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
38 | 21 | ||
22 | unsigned afs_debug; | ||
23 | module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO); | ||
24 | MODULE_PARM_DESC(afs_debug, "AFS debugging mask"); | ||
25 | |||
39 | static char *rootcell; | 26 | static char *rootcell; |
40 | 27 | ||
41 | module_param(rootcell, charp, 0); | 28 | module_param(rootcell, charp, 0); |
42 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); | 29 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); |
43 | 30 | ||
44 | |||
45 | static struct rxrpc_peer_ops afs_peer_ops = { | ||
46 | .adding = afs_adding_peer, | ||
47 | .discarding = afs_discarding_peer, | ||
48 | }; | ||
49 | |||
50 | struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT]; | ||
51 | DEFINE_SPINLOCK(afs_cb_hash_lock); | ||
52 | |||
53 | #ifdef AFS_CACHING_SUPPORT | 31 | #ifdef AFS_CACHING_SUPPORT |
54 | static struct cachefs_netfs_operations afs_cache_ops = { | 32 | static struct cachefs_netfs_operations afs_cache_ops = { |
55 | .get_page_cookie = afs_cache_get_page_cookie, | 33 | .get_page_cookie = afs_cache_get_page_cookie, |
@@ -67,15 +45,10 @@ struct cachefs_netfs afs_cache_netfs = { | |||
67 | */ | 45 | */ |
68 | static int __init afs_init(void) | 46 | static int __init afs_init(void) |
69 | { | 47 | { |
70 | int loop, ret; | 48 | int ret; |
71 | 49 | ||
72 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); | 50 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); |
73 | 51 | ||
74 | /* initialise the callback hash table */ | ||
75 | spin_lock_init(&afs_cb_hash_lock); | ||
76 | for (loop = AFS_CB_HASH_COUNT - 1; loop >= 0; loop--) | ||
77 | INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]); | ||
78 | |||
79 | /* register the /proc stuff */ | 52 | /* register the /proc stuff */ |
80 | ret = afs_proc_init(); | 53 | ret = afs_proc_init(); |
81 | if (ret < 0) | 54 | if (ret < 0) |
@@ -94,22 +67,18 @@ static int __init afs_init(void) | |||
94 | if (ret < 0) | 67 | if (ret < 0) |
95 | goto error_cell_init; | 68 | goto error_cell_init; |
96 | 69 | ||
97 | /* start the timeout daemon */ | 70 | /* initialise the VL update process */ |
98 | ret = afs_kafstimod_start(); | 71 | ret = afs_vlocation_update_init(); |
99 | if (ret < 0) | 72 | if (ret < 0) |
100 | goto error_kafstimod; | 73 | goto error_vl_update_init; |
101 | 74 | ||
102 | /* start the async operation daemon */ | 75 | /* initialise the callback update process */ |
103 | ret = afs_kafsasyncd_start(); | 76 | ret = afs_callback_update_init(); |
104 | if (ret < 0) | ||
105 | goto error_kafsasyncd; | ||
106 | 77 | ||
107 | /* create the RxRPC transport */ | 78 | /* create the RxRPC transport */ |
108 | ret = rxrpc_create_transport(7001, &afs_transport); | 79 | ret = afs_open_socket(); |
109 | if (ret < 0) | 80 | if (ret < 0) |
110 | goto error_transport; | 81 | goto error_open_socket; |
111 | |||
112 | afs_transport->peer_ops = &afs_peer_ops; | ||
113 | 82 | ||
114 | /* register the filesystems */ | 83 | /* register the filesystems */ |
115 | ret = afs_fs_init(); | 84 | ret = afs_fs_init(); |
@@ -119,17 +88,16 @@ static int __init afs_init(void) | |||
119 | return ret; | 88 | return ret; |
120 | 89 | ||
121 | error_fs: | 90 | error_fs: |
122 | rxrpc_put_transport(afs_transport); | 91 | afs_close_socket(); |
123 | error_transport: | 92 | error_open_socket: |
124 | afs_kafsasyncd_stop(); | 93 | error_vl_update_init: |
125 | error_kafsasyncd: | ||
126 | afs_kafstimod_stop(); | ||
127 | error_kafstimod: | ||
128 | error_cell_init: | 94 | error_cell_init: |
129 | #ifdef AFS_CACHING_SUPPORT | 95 | #ifdef AFS_CACHING_SUPPORT |
130 | cachefs_unregister_netfs(&afs_cache_netfs); | 96 | cachefs_unregister_netfs(&afs_cache_netfs); |
131 | error_cache: | 97 | error_cache: |
132 | #endif | 98 | #endif |
99 | afs_callback_update_kill(); | ||
100 | afs_vlocation_purge(); | ||
133 | afs_cell_purge(); | 101 | afs_cell_purge(); |
134 | afs_proc_cleanup(); | 102 | afs_proc_cleanup(); |
135 | printk(KERN_ERR "kAFS: failed to register: %d\n", ret); | 103 | printk(KERN_ERR "kAFS: failed to register: %d\n", ret); |
@@ -149,9 +117,11 @@ static void __exit afs_exit(void) | |||
149 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); | 117 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); |
150 | 118 | ||
151 | afs_fs_exit(); | 119 | afs_fs_exit(); |
152 | rxrpc_put_transport(afs_transport); | 120 | afs_close_socket(); |
153 | afs_kafstimod_stop(); | 121 | afs_purge_servers(); |
154 | afs_kafsasyncd_stop(); | 122 | afs_callback_update_kill(); |
123 | afs_vlocation_purge(); | ||
124 | flush_scheduled_work(); | ||
155 | afs_cell_purge(); | 125 | afs_cell_purge(); |
156 | #ifdef AFS_CACHING_SUPPORT | 126 | #ifdef AFS_CACHING_SUPPORT |
157 | cachefs_unregister_netfs(&afs_cache_netfs); | 127 | cachefs_unregister_netfs(&afs_cache_netfs); |
@@ -160,64 +130,3 @@ static void __exit afs_exit(void) | |||
160 | } | 130 | } |
161 | 131 | ||
162 | module_exit(afs_exit); | 132 | module_exit(afs_exit); |
163 | |||
164 | /* | ||
165 | * notification that new peer record is being added | ||
166 | * - called from krxsecd | ||
167 | * - return an error to induce an abort | ||
168 | * - mustn't sleep (caller holds an rwlock) | ||
169 | */ | ||
170 | static int afs_adding_peer(struct rxrpc_peer *peer) | ||
171 | { | ||
172 | struct afs_server *server; | ||
173 | int ret; | ||
174 | |||
175 | _debug("kAFS: Adding new peer %08x\n", ntohl(peer->addr.s_addr)); | ||
176 | |||
177 | /* determine which server the peer resides in (if any) */ | ||
178 | ret = afs_server_find_by_peer(peer, &server); | ||
179 | if (ret < 0) | ||
180 | return ret; /* none that we recognise, so abort */ | ||
181 | |||
182 | _debug("Server %p{u=%d}\n", server, atomic_read(&server->usage)); | ||
183 | |||
184 | _debug("Cell %p{u=%d}\n", | ||
185 | server->cell, atomic_read(&server->cell->usage)); | ||
186 | |||
187 | /* cross-point the structs under a global lock */ | ||
188 | spin_lock(&afs_server_peer_lock); | ||
189 | peer->user = server; | ||
190 | server->peer = peer; | ||
191 | spin_unlock(&afs_server_peer_lock); | ||
192 | |||
193 | afs_put_server(server); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * notification that a peer record is being discarded | ||
200 | * - called from krxiod or krxsecd | ||
201 | */ | ||
202 | static void afs_discarding_peer(struct rxrpc_peer *peer) | ||
203 | { | ||
204 | struct afs_server *server; | ||
205 | |||
206 | _enter("%p",peer); | ||
207 | |||
208 | _debug("Discarding peer %08x (rtt=%lu.%lumS)\n", | ||
209 | ntohl(peer->addr.s_addr), | ||
210 | (long) (peer->rtt / 1000), | ||
211 | (long) (peer->rtt % 1000)); | ||
212 | |||
213 | /* uncross-point the structs under a global lock */ | ||
214 | spin_lock(&afs_server_peer_lock); | ||
215 | server = peer->user; | ||
216 | if (server) { | ||
217 | peer->user = NULL; | ||
218 | server->peer = NULL; | ||
219 | } | ||
220 | spin_unlock(&afs_server_peer_lock); | ||
221 | |||
222 | _leave(""); | ||
223 | } | ||
diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 55bc6778cec..98e9276c46a 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* miscellaneous bits | 1 | /* miscellaneous bits |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -12,18 +12,19 @@ | |||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include "errors.h" | ||
16 | #include "internal.h" | 15 | #include "internal.h" |
16 | #include "afs_fs.h" | ||
17 | 17 | ||
18 | /* | 18 | /* |
19 | * convert an AFS abort code to a Linux error number | 19 | * convert an AFS abort code to a Linux error number |
20 | */ | 20 | */ |
21 | int afs_abort_to_error(int abortcode) | 21 | int afs_abort_to_error(u32 abort_code) |
22 | { | 22 | { |
23 | switch (abortcode) { | 23 | switch (abort_code) { |
24 | case 13: return -EACCES; | ||
24 | case VSALVAGE: return -EIO; | 25 | case VSALVAGE: return -EIO; |
25 | case VNOVNODE: return -ENOENT; | 26 | case VNOVNODE: return -ENOENT; |
26 | case VNOVOL: return -ENXIO; | 27 | case VNOVOL: return -ENOMEDIUM; |
27 | case VVOLEXISTS: return -EEXIST; | 28 | case VVOLEXISTS: return -EEXIST; |
28 | case VNOSERVICE: return -EIO; | 29 | case VNOSERVICE: return -EIO; |
29 | case VOFFLINE: return -ENOENT; | 30 | case VOFFLINE: return -ENOENT; |
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index ca3fa81b068..08c11a0b66b 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
@@ -18,10 +18,6 @@ | |||
18 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
19 | #include <linux/namei.h> | 19 | #include <linux/namei.h> |
20 | #include <linux/mnt_namespace.h> | 20 | #include <linux/mnt_namespace.h> |
21 | #include "super.h" | ||
22 | #include "cell.h" | ||
23 | #include "volume.h" | ||
24 | #include "vnode.h" | ||
25 | #include "internal.h" | 21 | #include "internal.h" |
26 | 22 | ||
27 | 23 | ||
@@ -30,6 +26,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, | |||
30 | struct nameidata *nd); | 26 | struct nameidata *nd); |
31 | static int afs_mntpt_open(struct inode *inode, struct file *file); | 27 | static int afs_mntpt_open(struct inode *inode, struct file *file); |
32 | static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); | 28 | static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); |
29 | static void afs_mntpt_expiry_timed_out(struct work_struct *work); | ||
33 | 30 | ||
34 | const struct file_operations afs_mntpt_file_operations = { | 31 | const struct file_operations afs_mntpt_file_operations = { |
35 | .open = afs_mntpt_open, | 32 | .open = afs_mntpt_open, |
@@ -43,16 +40,9 @@ const struct inode_operations afs_mntpt_inode_operations = { | |||
43 | }; | 40 | }; |
44 | 41 | ||
45 | static LIST_HEAD(afs_vfsmounts); | 42 | static LIST_HEAD(afs_vfsmounts); |
43 | static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out); | ||
46 | 44 | ||
47 | static void afs_mntpt_expiry_timed_out(struct afs_timer *timer); | 45 | unsigned long afs_mntpt_expiry_timeout = 10 * 60; |
48 | |||
49 | struct afs_timer_ops afs_mntpt_expiry_timer_ops = { | ||
50 | .timed_out = afs_mntpt_expiry_timed_out, | ||
51 | }; | ||
52 | |||
53 | struct afs_timer afs_mntpt_expiry_timer; | ||
54 | |||
55 | unsigned long afs_mntpt_expiry_timeout = 20; | ||
56 | 46 | ||
57 | /* | 47 | /* |
58 | * check a symbolic link to see whether it actually encodes a mountpoint | 48 | * check a symbolic link to see whether it actually encodes a mountpoint |
@@ -84,7 +74,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode) | |||
84 | 74 | ||
85 | /* examine the symlink's contents */ | 75 | /* examine the symlink's contents */ |
86 | size = vnode->status.size; | 76 | size = vnode->status.size; |
87 | _debug("symlink to %*.*s", size, (int) size, buf); | 77 | _debug("symlink to %*.*s", (int) size, (int) size, buf); |
88 | 78 | ||
89 | if (size > 2 && | 79 | if (size > 2 && |
90 | (buf[0] == '%' || buf[0] == '#') && | 80 | (buf[0] == '%' || buf[0] == '#') && |
@@ -92,7 +82,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode) | |||
92 | ) { | 82 | ) { |
93 | _debug("symlink is a mountpoint"); | 83 | _debug("symlink is a mountpoint"); |
94 | spin_lock(&vnode->lock); | 84 | spin_lock(&vnode->lock); |
95 | vnode->flags |= AFS_VNODE_MOUNTPOINT; | 85 | set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); |
96 | spin_unlock(&vnode->lock); | 86 | spin_unlock(&vnode->lock); |
97 | } | 87 | } |
98 | 88 | ||
@@ -113,7 +103,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, | |||
113 | struct dentry *dentry, | 103 | struct dentry *dentry, |
114 | struct nameidata *nd) | 104 | struct nameidata *nd) |
115 | { | 105 | { |
116 | kenter("%p,%p{%p{%s},%s}", | 106 | _enter("%p,%p{%p{%s},%s}", |
117 | dir, | 107 | dir, |
118 | dentry, | 108 | dentry, |
119 | dentry->d_parent, | 109 | dentry->d_parent, |
@@ -129,7 +119,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, | |||
129 | */ | 119 | */ |
130 | static int afs_mntpt_open(struct inode *inode, struct file *file) | 120 | static int afs_mntpt_open(struct inode *inode, struct file *file) |
131 | { | 121 | { |
132 | kenter("%p,%p{%p{%s},%s}", | 122 | _enter("%p,%p{%p{%s},%s}", |
133 | inode, file, | 123 | inode, file, |
134 | file->f_path.dentry->d_parent, | 124 | file->f_path.dentry->d_parent, |
135 | file->f_path.dentry->d_parent ? | 125 | file->f_path.dentry->d_parent ? |
@@ -152,7 +142,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
152 | char *buf, *devname = NULL, *options = NULL; | 142 | char *buf, *devname = NULL, *options = NULL; |
153 | int ret; | 143 | int ret; |
154 | 144 | ||
155 | kenter("{%s}", mntpt->d_name.name); | 145 | _enter("{%s}", mntpt->d_name.name); |
156 | 146 | ||
157 | BUG_ON(!mntpt->d_inode); | 147 | BUG_ON(!mntpt->d_inode); |
158 | 148 | ||
@@ -196,13 +186,13 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
196 | strcat(options, ",rwpath"); | 186 | strcat(options, ",rwpath"); |
197 | 187 | ||
198 | /* try and do the mount */ | 188 | /* try and do the mount */ |
199 | kdebug("--- attempting mount %s -o %s ---", devname, options); | 189 | _debug("--- attempting mount %s -o %s ---", devname, options); |
200 | mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options); | 190 | mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options); |
201 | kdebug("--- mount result %p ---", mnt); | 191 | _debug("--- mount result %p ---", mnt); |
202 | 192 | ||
203 | free_page((unsigned long) devname); | 193 | free_page((unsigned long) devname); |
204 | free_page((unsigned long) options); | 194 | free_page((unsigned long) options); |
205 | kleave(" = %p", mnt); | 195 | _leave(" = %p", mnt); |
206 | return mnt; | 196 | return mnt; |
207 | 197 | ||
208 | error: | 198 | error: |
@@ -212,7 +202,7 @@ error: | |||
212 | free_page((unsigned long) devname); | 202 | free_page((unsigned long) devname); |
213 | if (options) | 203 | if (options) |
214 | free_page((unsigned long) options); | 204 | free_page((unsigned long) options); |
215 | kleave(" = %d", ret); | 205 | _leave(" = %d", ret); |
216 | return ERR_PTR(ret); | 206 | return ERR_PTR(ret); |
217 | } | 207 | } |
218 | 208 | ||
@@ -222,51 +212,81 @@ error: | |||
222 | static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) | 212 | static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) |
223 | { | 213 | { |
224 | struct vfsmount *newmnt; | 214 | struct vfsmount *newmnt; |
225 | struct dentry *old_dentry; | ||
226 | int err; | 215 | int err; |
227 | 216 | ||
228 | kenter("%p{%s},{%s:%p{%s}}", | 217 | _enter("%p{%s},{%s:%p{%s}}", |
229 | dentry, | 218 | dentry, |
230 | dentry->d_name.name, | 219 | dentry->d_name.name, |
231 | nd->mnt->mnt_devname, | 220 | nd->mnt->mnt_devname, |
232 | dentry, | 221 | dentry, |
233 | nd->dentry->d_name.name); | 222 | nd->dentry->d_name.name); |
234 | 223 | ||
235 | newmnt = afs_mntpt_do_automount(dentry); | 224 | dput(nd->dentry); |
225 | nd->dentry = dget(dentry); | ||
226 | |||
227 | newmnt = afs_mntpt_do_automount(nd->dentry); | ||
236 | if (IS_ERR(newmnt)) { | 228 | if (IS_ERR(newmnt)) { |
237 | path_release(nd); | 229 | path_release(nd); |
238 | return (void *)newmnt; | 230 | return (void *)newmnt; |
239 | } | 231 | } |
240 | 232 | ||
241 | old_dentry = nd->dentry; | 233 | mntget(newmnt); |
242 | nd->dentry = dentry; | 234 | err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); |
243 | err = do_add_mount(newmnt, nd, 0, &afs_vfsmounts); | 235 | switch (err) { |
244 | nd->dentry = old_dentry; | 236 | case 0: |
245 | 237 | path_release(nd); | |
246 | path_release(nd); | ||
247 | |||
248 | if (!err) { | ||
249 | mntget(newmnt); | ||
250 | nd->mnt = newmnt; | 238 | nd->mnt = newmnt; |
251 | dget(newmnt->mnt_root); | 239 | nd->dentry = dget(newmnt->mnt_root); |
252 | nd->dentry = newmnt->mnt_root; | 240 | schedule_delayed_work(&afs_mntpt_expiry_timer, |
241 | afs_mntpt_expiry_timeout * HZ); | ||
242 | break; | ||
243 | case -EBUSY: | ||
244 | /* someone else made a mount here whilst we were busy */ | ||
245 | while (d_mountpoint(nd->dentry) && | ||
246 | follow_down(&nd->mnt, &nd->dentry)) | ||
247 | ; | ||
248 | err = 0; | ||
249 | default: | ||
250 | mntput(newmnt); | ||
251 | break; | ||
253 | } | 252 | } |
254 | 253 | ||
255 | kleave(" = %d", err); | 254 | _leave(" = %d", err); |
256 | return ERR_PTR(err); | 255 | return ERR_PTR(err); |
257 | } | 256 | } |
258 | 257 | ||
259 | /* | 258 | /* |
260 | * handle mountpoint expiry timer going off | 259 | * handle mountpoint expiry timer going off |
261 | */ | 260 | */ |
262 | static void afs_mntpt_expiry_timed_out(struct afs_timer *timer) | 261 | static void afs_mntpt_expiry_timed_out(struct work_struct *work) |
263 | { | 262 | { |
264 | kenter(""); | 263 | _enter(""); |
264 | |||
265 | if (!list_empty(&afs_vfsmounts)) { | ||
266 | mark_mounts_for_expiry(&afs_vfsmounts); | ||
267 | schedule_delayed_work(&afs_mntpt_expiry_timer, | ||
268 | afs_mntpt_expiry_timeout * HZ); | ||
269 | } | ||
270 | |||
271 | _leave(""); | ||
272 | } | ||
265 | 273 | ||
266 | mark_mounts_for_expiry(&afs_vfsmounts); | 274 | /* |
275 | * kill the AFS mountpoint timer if it's still running | ||
276 | */ | ||
277 | void afs_mntpt_kill_timer(void) | ||
278 | { | ||
279 | _enter(""); | ||
267 | 280 | ||
268 | afs_kafstimod_add_timer(&afs_mntpt_expiry_timer, | 281 | ASSERT(list_empty(&afs_vfsmounts)); |
269 | afs_mntpt_expiry_timeout * HZ); | 282 | cancel_delayed_work(&afs_mntpt_expiry_timer); |
283 | flush_scheduled_work(); | ||
284 | } | ||
270 | 285 | ||
271 | kleave(""); | 286 | /* |
287 | * begin unmount by attempting to remove all automounted mountpoints we added | ||
288 | */ | ||
289 | void afs_umount_begin(struct vfsmount *vfsmnt, int flags) | ||
290 | { | ||
291 | shrink_submounts(vfsmnt, &afs_vfsmounts); | ||
272 | } | 292 | } |
diff --git a/fs/afs/mount.h b/fs/afs/mount.h deleted file mode 100644 index 41b848320e0..00000000000 --- a/fs/afs/mount.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* mount parameters | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_MOUNT_H | ||
13 | #define AFS_MOUNT_H | ||
14 | |||
15 | struct afs_mountdata { | ||
16 | const char *volume; /* name of volume */ | ||
17 | const char *cell; /* name of cell containing volume */ | ||
18 | const char *cache; /* name of cache block device */ | ||
19 | size_t nservers; /* number of server addresses listed */ | ||
20 | uint32_t servers[10]; /* IP addresses of servers in this cell */ | ||
21 | }; | ||
22 | |||
23 | #endif /* AFS_MOUNT_H */ | ||
diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 5ebcc0cd3dd..d5601f617cd 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c | |||
@@ -13,8 +13,6 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include "cell.h" | ||
17 | #include "volume.h" | ||
18 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
19 | #include "internal.h" | 17 | #include "internal.h" |
20 | 18 | ||
@@ -315,10 +313,14 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, | |||
315 | 313 | ||
316 | if (strcmp(kbuf, "add") == 0) { | 314 | if (strcmp(kbuf, "add") == 0) { |
317 | struct afs_cell *cell; | 315 | struct afs_cell *cell; |
318 | ret = afs_cell_create(name, args, &cell); | 316 | |
319 | if (ret < 0) | 317 | cell = afs_cell_create(name, args); |
318 | if (IS_ERR(cell)) { | ||
319 | ret = PTR_ERR(cell); | ||
320 | goto done; | 320 | goto done; |
321 | } | ||
321 | 322 | ||
323 | afs_put_cell(cell); | ||
322 | printk("kAFS: Added new cell '%s'\n", name); | 324 | printk("kAFS: Added new cell '%s'\n", name); |
323 | } else { | 325 | } else { |
324 | goto inval; | 326 | goto inval; |
@@ -472,7 +474,7 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file) | |||
472 | struct seq_file *m; | 474 | struct seq_file *m; |
473 | int ret; | 475 | int ret; |
474 | 476 | ||
475 | cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); | 477 | cell = PDE(inode)->data; |
476 | if (!cell) | 478 | if (!cell) |
477 | return -ENOENT; | 479 | return -ENOENT; |
478 | 480 | ||
@@ -491,13 +493,7 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file) | |||
491 | */ | 493 | */ |
492 | static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) | 494 | static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) |
493 | { | 495 | { |
494 | struct afs_cell *cell = PDE(inode)->data; | 496 | return seq_release(inode, file); |
495 | int ret; | ||
496 | |||
497 | ret = seq_release(inode, file); | ||
498 | |||
499 | afs_put_cell(cell); | ||
500 | return ret; | ||
501 | } | 497 | } |
502 | 498 | ||
503 | /* | 499 | /* |
@@ -557,6 +553,16 @@ static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v) | |||
557 | up_read(&cell->vl_sem); | 553 | up_read(&cell->vl_sem); |
558 | } | 554 | } |
559 | 555 | ||
556 | const char afs_vlocation_states[][4] = { | ||
557 | [AFS_VL_NEW] = "New", | ||
558 | [AFS_VL_CREATING] = "Crt", | ||
559 | [AFS_VL_VALID] = "Val", | ||
560 | [AFS_VL_NO_VOLUME] = "NoV", | ||
561 | [AFS_VL_UPDATING] = "Upd", | ||
562 | [AFS_VL_VOLUME_DELETED] = "Del", | ||
563 | [AFS_VL_UNCERTAIN] = "Unc", | ||
564 | }; | ||
565 | |||
560 | /* | 566 | /* |
561 | * display a header line followed by a load of volume lines | 567 | * display a header line followed by a load of volume lines |
562 | */ | 568 | */ |
@@ -567,13 +573,14 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) | |||
567 | 573 | ||
568 | /* display header on line 1 */ | 574 | /* display header on line 1 */ |
569 | if (v == (void *) 1) { | 575 | if (v == (void *) 1) { |
570 | seq_puts(m, "USE VLID[0] VLID[1] VLID[2] NAME\n"); | 576 | seq_puts(m, "USE STT VLID[0] VLID[1] VLID[2] NAME\n"); |
571 | return 0; | 577 | return 0; |
572 | } | 578 | } |
573 | 579 | ||
574 | /* display one cell per line on subsequent lines */ | 580 | /* display one cell per line on subsequent lines */ |
575 | seq_printf(m, "%3d %08x %08x %08x %s\n", | 581 | seq_printf(m, "%3d %s %08x %08x %08x %s\n", |
576 | atomic_read(&vlocation->usage), | 582 | atomic_read(&vlocation->usage), |
583 | afs_vlocation_states[vlocation->state], | ||
577 | vlocation->vldb.vid[0], | 584 | vlocation->vldb.vid[0], |
578 | vlocation->vldb.vid[1], | 585 | vlocation->vldb.vid[1], |
579 | vlocation->vldb.vid[2], | 586 | vlocation->vldb.vid[2], |
@@ -592,11 +599,11 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file) | |||
592 | struct seq_file *m; | 599 | struct seq_file *m; |
593 | int ret; | 600 | int ret; |
594 | 601 | ||
595 | cell = afs_get_cell_maybe((struct afs_cell**)&PDE(inode)->data); | 602 | cell = PDE(inode)->data; |
596 | if (!cell) | 603 | if (!cell) |
597 | return -ENOENT; | 604 | return -ENOENT; |
598 | 605 | ||
599 | ret = seq_open(file,&afs_proc_cell_vlservers_ops); | 606 | ret = seq_open(file, &afs_proc_cell_vlservers_ops); |
600 | if (ret<0) | 607 | if (ret<0) |
601 | return ret; | 608 | return ret; |
602 | 609 | ||
@@ -612,13 +619,7 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file) | |||
612 | static int afs_proc_cell_vlservers_release(struct inode *inode, | 619 | static int afs_proc_cell_vlservers_release(struct inode *inode, |
613 | struct file *file) | 620 | struct file *file) |
614 | { | 621 | { |
615 | struct afs_cell *cell = PDE(inode)->data; | 622 | return seq_release(inode, file); |
616 | int ret; | ||
617 | |||
618 | ret = seq_release(inode,file); | ||
619 | |||
620 | afs_put_cell(cell); | ||
621 | return ret; | ||
622 | } | 623 | } |
623 | 624 | ||
624 | /* | 625 | /* |
@@ -703,7 +704,7 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file) | |||
703 | struct seq_file *m; | 704 | struct seq_file *m; |
704 | int ret; | 705 | int ret; |
705 | 706 | ||
706 | cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); | 707 | cell = PDE(inode)->data; |
707 | if (!cell) | 708 | if (!cell) |
708 | return -ENOENT; | 709 | return -ENOENT; |
709 | 710 | ||
@@ -722,13 +723,7 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file) | |||
722 | static int afs_proc_cell_servers_release(struct inode *inode, | 723 | static int afs_proc_cell_servers_release(struct inode *inode, |
723 | struct file *file) | 724 | struct file *file) |
724 | { | 725 | { |
725 | struct afs_cell *cell = PDE(inode)->data; | 726 | return seq_release(inode, file); |
726 | int ret; | ||
727 | |||
728 | ret = seq_release(inode, file); | ||
729 | |||
730 | afs_put_cell(cell); | ||
731 | return ret; | ||
732 | } | 727 | } |
733 | 728 | ||
734 | /* | 729 | /* |
@@ -736,7 +731,7 @@ static int afs_proc_cell_servers_release(struct inode *inode, | |||
736 | * first item | 731 | * first item |
737 | */ | 732 | */ |
738 | static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) | 733 | static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) |
739 | __acquires(m->private->sv_lock) | 734 | __acquires(m->private->servers_lock) |
740 | { | 735 | { |
741 | struct list_head *_p; | 736 | struct list_head *_p; |
742 | struct afs_cell *cell = m->private; | 737 | struct afs_cell *cell = m->private; |
@@ -745,7 +740,7 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) | |||
745 | _enter("cell=%p pos=%Ld", cell, *_pos); | 740 | _enter("cell=%p pos=%Ld", cell, *_pos); |
746 | 741 | ||
747 | /* lock the list against modification */ | 742 | /* lock the list against modification */ |
748 | read_lock(&cell->sv_lock); | 743 | read_lock(&cell->servers_lock); |
749 | 744 | ||
750 | /* allow for the header line */ | 745 | /* allow for the header line */ |
751 | if (!pos) | 746 | if (!pos) |
@@ -753,11 +748,11 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) | |||
753 | pos--; | 748 | pos--; |
754 | 749 | ||
755 | /* find the n'th element in the list */ | 750 | /* find the n'th element in the list */ |
756 | list_for_each(_p, &cell->sv_list) | 751 | list_for_each(_p, &cell->servers) |
757 | if (!pos--) | 752 | if (!pos--) |
758 | break; | 753 | break; |
759 | 754 | ||
760 | return _p != &cell->sv_list ? _p : NULL; | 755 | return _p != &cell->servers ? _p : NULL; |
761 | } | 756 | } |
762 | 757 | ||
763 | /* | 758 | /* |
@@ -774,20 +769,20 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, | |||
774 | (*_pos)++; | 769 | (*_pos)++; |
775 | 770 | ||
776 | _p = v; | 771 | _p = v; |
777 | _p = v == (void *) 1 ? cell->sv_list.next : _p->next; | 772 | _p = v == (void *) 1 ? cell->servers.next : _p->next; |
778 | 773 | ||
779 | return _p != &cell->sv_list ? _p : NULL; | 774 | return _p != &cell->servers ? _p : NULL; |
780 | } | 775 | } |
781 | 776 | ||
782 | /* | 777 | /* |
783 | * clean up after reading from the cells list | 778 | * clean up after reading from the cells list |
784 | */ | 779 | */ |
785 | static void afs_proc_cell_servers_stop(struct seq_file *p, void *v) | 780 | static void afs_proc_cell_servers_stop(struct seq_file *p, void *v) |
786 | __releases(p->private->sv_lock) | 781 | __releases(p->private->servers_lock) |
787 | { | 782 | { |
788 | struct afs_cell *cell = p->private; | 783 | struct afs_cell *cell = p->private; |
789 | 784 | ||
790 | read_unlock(&cell->sv_lock); | 785 | read_unlock(&cell->servers_lock); |
791 | } | 786 | } |
792 | 787 | ||
793 | /* | 788 | /* |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c new file mode 100644 index 00000000000..b92774231b3 --- /dev/null +++ b/fs/afs/rxrpc.c | |||
@@ -0,0 +1,666 @@ | |||
1 | /* Maintain an RxRPC server socket to do AFS communications through | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <net/sock.h> | ||
13 | #include <net/af_rxrpc.h> | ||
14 | #include <rxrpc/packet.h> | ||
15 | #include "internal.h" | ||
16 | #include "afs_cm.h" | ||
17 | |||
18 | static struct socket *afs_socket; /* my RxRPC socket */ | ||
19 | static struct workqueue_struct *afs_async_calls; | ||
20 | |||
21 | static void afs_wake_up_call_waiter(struct afs_call *); | ||
22 | static int afs_wait_for_call_to_complete(struct afs_call *); | ||
23 | static void afs_wake_up_async_call(struct afs_call *); | ||
24 | static int afs_dont_wait_for_call_to_complete(struct afs_call *); | ||
25 | static void afs_process_async_call(struct work_struct *); | ||
26 | static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); | ||
27 | static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); | ||
28 | |||
29 | /* synchronous call management */ | ||
30 | const struct afs_wait_mode afs_sync_call = { | ||
31 | .rx_wakeup = afs_wake_up_call_waiter, | ||
32 | .wait = afs_wait_for_call_to_complete, | ||
33 | }; | ||
34 | |||
35 | /* asynchronous call management */ | ||
36 | const struct afs_wait_mode afs_async_call = { | ||
37 | .rx_wakeup = afs_wake_up_async_call, | ||
38 | .wait = afs_dont_wait_for_call_to_complete, | ||
39 | }; | ||
40 | |||
41 | /* asynchronous incoming call management */ | ||
42 | static const struct afs_wait_mode afs_async_incoming_call = { | ||
43 | .rx_wakeup = afs_wake_up_async_call, | ||
44 | }; | ||
45 | |||
46 | /* asynchronous incoming call initial processing */ | ||
47 | static const struct afs_call_type afs_RXCMxxxx = { | ||
48 | .deliver = afs_deliver_cm_op_id, | ||
49 | .abort_to_error = afs_abort_to_error, | ||
50 | }; | ||
51 | |||
52 | static void afs_collect_incoming_call(struct work_struct *); | ||
53 | |||
54 | static struct sk_buff_head afs_incoming_calls; | ||
55 | static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); | ||
56 | |||
57 | /* | ||
58 | * open an RxRPC socket and bind it to be a server for callback notifications | ||
59 | * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT | ||
60 | */ | ||
61 | int afs_open_socket(void) | ||
62 | { | ||
63 | struct sockaddr_rxrpc srx; | ||
64 | struct socket *socket; | ||
65 | int ret; | ||
66 | |||
67 | _enter(""); | ||
68 | |||
69 | skb_queue_head_init(&afs_incoming_calls); | ||
70 | |||
71 | afs_async_calls = create_singlethread_workqueue("kafsd"); | ||
72 | if (!afs_async_calls) { | ||
73 | _leave(" = -ENOMEM [wq]"); | ||
74 | return -ENOMEM; | ||
75 | } | ||
76 | |||
77 | ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket); | ||
78 | if (ret < 0) { | ||
79 | destroy_workqueue(afs_async_calls); | ||
80 | _leave(" = %d [socket]", ret); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | socket->sk->sk_allocation = GFP_NOFS; | ||
85 | |||
86 | /* bind the callback manager's address to make this a server socket */ | ||
87 | srx.srx_family = AF_RXRPC; | ||
88 | srx.srx_service = CM_SERVICE; | ||
89 | srx.transport_type = SOCK_DGRAM; | ||
90 | srx.transport_len = sizeof(srx.transport.sin); | ||
91 | srx.transport.sin.sin_family = AF_INET; | ||
92 | srx.transport.sin.sin_port = htons(AFS_CM_PORT); | ||
93 | memset(&srx.transport.sin.sin_addr, 0, | ||
94 | sizeof(srx.transport.sin.sin_addr)); | ||
95 | |||
96 | ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); | ||
97 | if (ret < 0) { | ||
98 | sock_release(socket); | ||
99 | _leave(" = %d [bind]", ret); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor); | ||
104 | |||
105 | afs_socket = socket; | ||
106 | _leave(" = 0"); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * close the RxRPC socket AFS was using | ||
112 | */ | ||
113 | void afs_close_socket(void) | ||
114 | { | ||
115 | _enter(""); | ||
116 | |||
117 | sock_release(afs_socket); | ||
118 | |||
119 | _debug("dework"); | ||
120 | destroy_workqueue(afs_async_calls); | ||
121 | _leave(""); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * allocate a call with flat request and reply buffers | ||
126 | */ | ||
127 | struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, | ||
128 | size_t request_size, size_t reply_size) | ||
129 | { | ||
130 | struct afs_call *call; | ||
131 | |||
132 | call = kzalloc(sizeof(*call), GFP_NOFS); | ||
133 | if (!call) | ||
134 | goto nomem_call; | ||
135 | |||
136 | if (request_size) { | ||
137 | call->request = kmalloc(request_size, GFP_NOFS); | ||
138 | if (!call->request) | ||
139 | goto nomem_request; | ||
140 | } | ||
141 | |||
142 | if (reply_size) { | ||
143 | call->buffer = kmalloc(reply_size, GFP_NOFS); | ||
144 | if (!call->buffer) | ||
145 | goto nomem_buffer; | ||
146 | } | ||
147 | |||
148 | call->type = type; | ||
149 | call->request_size = request_size; | ||
150 | call->reply_max = reply_size; | ||
151 | |||
152 | init_waitqueue_head(&call->waitq); | ||
153 | skb_queue_head_init(&call->rx_queue); | ||
154 | return call; | ||
155 | |||
156 | nomem_buffer: | ||
157 | kfree(call->request); | ||
158 | nomem_request: | ||
159 | kfree(call); | ||
160 | nomem_call: | ||
161 | return NULL; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * clean up a call with flat buffer | ||
166 | */ | ||
167 | void afs_flat_call_destructor(struct afs_call *call) | ||
168 | { | ||
169 | _enter(""); | ||
170 | |||
171 | kfree(call->request); | ||
172 | call->request = NULL; | ||
173 | kfree(call->buffer); | ||
174 | call->buffer = NULL; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * initiate a call | ||
179 | */ | ||
180 | int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | ||
181 | const struct afs_wait_mode *wait_mode) | ||
182 | { | ||
183 | struct sockaddr_rxrpc srx; | ||
184 | struct rxrpc_call *rxcall; | ||
185 | struct msghdr msg; | ||
186 | struct kvec iov[1]; | ||
187 | int ret; | ||
188 | |||
189 | _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); | ||
190 | |||
191 | call->wait_mode = wait_mode; | ||
192 | INIT_WORK(&call->async_work, afs_process_async_call); | ||
193 | |||
194 | memset(&srx, 0, sizeof(srx)); | ||
195 | srx.srx_family = AF_RXRPC; | ||
196 | srx.srx_service = call->service_id; | ||
197 | srx.transport_type = SOCK_DGRAM; | ||
198 | srx.transport_len = sizeof(srx.transport.sin); | ||
199 | srx.transport.sin.sin_family = AF_INET; | ||
200 | srx.transport.sin.sin_port = call->port; | ||
201 | memcpy(&srx.transport.sin.sin_addr, addr, 4); | ||
202 | |||
203 | /* create a call */ | ||
204 | rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, | ||
205 | (unsigned long) call, gfp); | ||
206 | if (IS_ERR(rxcall)) { | ||
207 | ret = PTR_ERR(rxcall); | ||
208 | goto error_kill_call; | ||
209 | } | ||
210 | |||
211 | call->rxcall = rxcall; | ||
212 | |||
213 | /* send the request */ | ||
214 | iov[0].iov_base = call->request; | ||
215 | iov[0].iov_len = call->request_size; | ||
216 | |||
217 | msg.msg_name = NULL; | ||
218 | msg.msg_namelen = 0; | ||
219 | msg.msg_iov = (struct iovec *) iov; | ||
220 | msg.msg_iovlen = 1; | ||
221 | msg.msg_control = NULL; | ||
222 | msg.msg_controllen = 0; | ||
223 | msg.msg_flags = 0; | ||
224 | |||
225 | /* have to change the state *before* sending the last packet as RxRPC | ||
226 | * might give us the reply before it returns from sending the | ||
227 | * request */ | ||
228 | call->state = AFS_CALL_AWAIT_REPLY; | ||
229 | ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); | ||
230 | if (ret < 0) | ||
231 | goto error_do_abort; | ||
232 | |||
233 | /* at this point, an async call may no longer exist as it may have | ||
234 | * already completed */ | ||
235 | return wait_mode->wait(call); | ||
236 | |||
237 | error_do_abort: | ||
238 | rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); | ||
239 | rxrpc_kernel_end_call(rxcall); | ||
240 | error_kill_call: | ||
241 | call->type->destructor(call); | ||
242 | ASSERT(skb_queue_empty(&call->rx_queue)); | ||
243 | kfree(call); | ||
244 | _leave(" = %d", ret); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * handles intercepted messages that were arriving in the socket's Rx queue | ||
250 | * - called with the socket receive queue lock held to ensure message ordering | ||
251 | * - called with softirqs disabled | ||
252 | */ | ||
253 | static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, | ||
254 | struct sk_buff *skb) | ||
255 | { | ||
256 | struct afs_call *call = (struct afs_call *) user_call_ID; | ||
257 | |||
258 | _enter("%p,,%u", call, skb->mark); | ||
259 | |||
260 | ASSERTCMP(sk, ==, afs_socket->sk); | ||
261 | |||
262 | if (!call) { | ||
263 | /* its an incoming call for our callback service */ | ||
264 | __skb_queue_tail(&afs_incoming_calls, skb); | ||
265 | schedule_work(&afs_collect_incoming_call_work); | ||
266 | } else { | ||
267 | /* route the messages directly to the appropriate call */ | ||
268 | __skb_queue_tail(&call->rx_queue, skb); | ||
269 | call->wait_mode->rx_wakeup(call); | ||
270 | } | ||
271 | |||
272 | _leave(""); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * deliver messages to a call | ||
277 | */ | ||
278 | static void afs_deliver_to_call(struct afs_call *call) | ||
279 | { | ||
280 | struct sk_buff *skb; | ||
281 | bool last; | ||
282 | u32 abort_code; | ||
283 | int ret; | ||
284 | |||
285 | _enter(""); | ||
286 | |||
287 | while ((call->state == AFS_CALL_AWAIT_REPLY || | ||
288 | call->state == AFS_CALL_AWAIT_OP_ID || | ||
289 | call->state == AFS_CALL_AWAIT_REQUEST || | ||
290 | call->state == AFS_CALL_AWAIT_ACK) && | ||
291 | (skb = skb_dequeue(&call->rx_queue))) { | ||
292 | switch (skb->mark) { | ||
293 | case RXRPC_SKB_MARK_DATA: | ||
294 | _debug("Rcv DATA"); | ||
295 | last = rxrpc_kernel_is_data_last(skb); | ||
296 | ret = call->type->deliver(call, skb, last); | ||
297 | switch (ret) { | ||
298 | case 0: | ||
299 | if (last && | ||
300 | call->state == AFS_CALL_AWAIT_REPLY) | ||
301 | call->state = AFS_CALL_COMPLETE; | ||
302 | break; | ||
303 | case -ENOTCONN: | ||
304 | abort_code = RX_CALL_DEAD; | ||
305 | goto do_abort; | ||
306 | case -ENOTSUPP: | ||
307 | abort_code = RX_INVALID_OPERATION; | ||
308 | goto do_abort; | ||
309 | default: | ||
310 | abort_code = RXGEN_CC_UNMARSHAL; | ||
311 | if (call->state != AFS_CALL_AWAIT_REPLY) | ||
312 | abort_code = RXGEN_SS_UNMARSHAL; | ||
313 | do_abort: | ||
314 | rxrpc_kernel_abort_call(call->rxcall, | ||
315 | abort_code); | ||
316 | call->error = ret; | ||
317 | call->state = AFS_CALL_ERROR; | ||
318 | break; | ||
319 | } | ||
320 | rxrpc_kernel_data_delivered(skb); | ||
321 | skb = NULL; | ||
322 | break; | ||
323 | case RXRPC_SKB_MARK_FINAL_ACK: | ||
324 | _debug("Rcv ACK"); | ||
325 | call->state = AFS_CALL_COMPLETE; | ||
326 | break; | ||
327 | case RXRPC_SKB_MARK_BUSY: | ||
328 | _debug("Rcv BUSY"); | ||
329 | call->error = -EBUSY; | ||
330 | call->state = AFS_CALL_BUSY; | ||
331 | break; | ||
332 | case RXRPC_SKB_MARK_REMOTE_ABORT: | ||
333 | abort_code = rxrpc_kernel_get_abort_code(skb); | ||
334 | call->error = call->type->abort_to_error(abort_code); | ||
335 | call->state = AFS_CALL_ABORTED; | ||
336 | _debug("Rcv ABORT %u -> %d", abort_code, call->error); | ||
337 | break; | ||
338 | case RXRPC_SKB_MARK_NET_ERROR: | ||
339 | call->error = -rxrpc_kernel_get_error_number(skb); | ||
340 | call->state = AFS_CALL_ERROR; | ||
341 | _debug("Rcv NET ERROR %d", call->error); | ||
342 | break; | ||
343 | case RXRPC_SKB_MARK_LOCAL_ERROR: | ||
344 | call->error = -rxrpc_kernel_get_error_number(skb); | ||
345 | call->state = AFS_CALL_ERROR; | ||
346 | _debug("Rcv LOCAL ERROR %d", call->error); | ||
347 | break; | ||
348 | default: | ||
349 | BUG(); | ||
350 | break; | ||
351 | } | ||
352 | |||
353 | rxrpc_kernel_free_skb(skb); | ||
354 | } | ||
355 | |||
356 | /* make sure the queue is empty if the call is done with (we might have | ||
357 | * aborted the call early because of an unmarshalling error) */ | ||
358 | if (call->state >= AFS_CALL_COMPLETE) { | ||
359 | while ((skb = skb_dequeue(&call->rx_queue))) | ||
360 | rxrpc_kernel_free_skb(skb); | ||
361 | if (call->incoming) { | ||
362 | rxrpc_kernel_end_call(call->rxcall); | ||
363 | call->type->destructor(call); | ||
364 | ASSERT(skb_queue_empty(&call->rx_queue)); | ||
365 | kfree(call); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | _leave(""); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * wait synchronously for a call to complete | ||
374 | */ | ||
375 | static int afs_wait_for_call_to_complete(struct afs_call *call) | ||
376 | { | ||
377 | struct sk_buff *skb; | ||
378 | int ret; | ||
379 | |||
380 | DECLARE_WAITQUEUE(myself, current); | ||
381 | |||
382 | _enter(""); | ||
383 | |||
384 | add_wait_queue(&call->waitq, &myself); | ||
385 | for (;;) { | ||
386 | set_current_state(TASK_INTERRUPTIBLE); | ||
387 | |||
388 | /* deliver any messages that are in the queue */ | ||
389 | if (!skb_queue_empty(&call->rx_queue)) { | ||
390 | __set_current_state(TASK_RUNNING); | ||
391 | afs_deliver_to_call(call); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | ret = call->error; | ||
396 | if (call->state >= AFS_CALL_COMPLETE) | ||
397 | break; | ||
398 | ret = -EINTR; | ||
399 | if (signal_pending(current)) | ||
400 | break; | ||
401 | schedule(); | ||
402 | } | ||
403 | |||
404 | remove_wait_queue(&call->waitq, &myself); | ||
405 | __set_current_state(TASK_RUNNING); | ||
406 | |||
407 | /* kill the call */ | ||
408 | if (call->state < AFS_CALL_COMPLETE) { | ||
409 | _debug("call incomplete"); | ||
410 | rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD); | ||
411 | while ((skb = skb_dequeue(&call->rx_queue))) | ||
412 | rxrpc_kernel_free_skb(skb); | ||
413 | } | ||
414 | |||
415 | _debug("call complete"); | ||
416 | rxrpc_kernel_end_call(call->rxcall); | ||
417 | call->type->destructor(call); | ||
418 | ASSERT(skb_queue_empty(&call->rx_queue)); | ||
419 | kfree(call); | ||
420 | _leave(" = %d", ret); | ||
421 | return ret; | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * wake up a waiting call | ||
426 | */ | ||
427 | static void afs_wake_up_call_waiter(struct afs_call *call) | ||
428 | { | ||
429 | wake_up(&call->waitq); | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * wake up an asynchronous call | ||
434 | */ | ||
435 | static void afs_wake_up_async_call(struct afs_call *call) | ||
436 | { | ||
437 | _enter(""); | ||
438 | queue_work(afs_async_calls, &call->async_work); | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * put a call into asynchronous mode | ||
443 | * - mustn't touch the call descriptor as the call my have completed by the | ||
444 | * time we get here | ||
445 | */ | ||
446 | static int afs_dont_wait_for_call_to_complete(struct afs_call *call) | ||
447 | { | ||
448 | _enter(""); | ||
449 | return -EINPROGRESS; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * delete an asynchronous call | ||
454 | */ | ||
455 | static void afs_delete_async_call(struct work_struct *work) | ||
456 | { | ||
457 | struct afs_call *call = | ||
458 | container_of(work, struct afs_call, async_work); | ||
459 | |||
460 | _enter(""); | ||
461 | |||
462 | ASSERT(skb_queue_empty(&call->rx_queue)); | ||
463 | ASSERT(!work_pending(&call->async_work)); | ||
464 | kfree(call); | ||
465 | |||
466 | _leave(""); | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * perform processing on an asynchronous call | ||
471 | * - on a multiple-thread workqueue this work item may try to run on several | ||
472 | * CPUs at the same time | ||
473 | */ | ||
474 | static void afs_process_async_call(struct work_struct *work) | ||
475 | { | ||
476 | struct afs_call *call = | ||
477 | container_of(work, struct afs_call, async_work); | ||
478 | |||
479 | _enter(""); | ||
480 | |||
481 | if (!skb_queue_empty(&call->rx_queue)) | ||
482 | afs_deliver_to_call(call); | ||
483 | |||
484 | if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) { | ||
485 | if (call->wait_mode->async_complete) | ||
486 | call->wait_mode->async_complete(call->reply, | ||
487 | call->error); | ||
488 | call->reply = NULL; | ||
489 | |||
490 | /* kill the call */ | ||
491 | rxrpc_kernel_end_call(call->rxcall); | ||
492 | if (call->type->destructor) | ||
493 | call->type->destructor(call); | ||
494 | |||
495 | /* we can't just delete the call because the work item may be | ||
496 | * queued */ | ||
497 | PREPARE_WORK(&call->async_work, afs_delete_async_call); | ||
498 | queue_work(afs_async_calls, &call->async_work); | ||
499 | } | ||
500 | |||
501 | _leave(""); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * empty a socket buffer into a flat reply buffer | ||
506 | */ | ||
507 | void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) | ||
508 | { | ||
509 | size_t len = skb->len; | ||
510 | |||
511 | if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) | ||
512 | BUG(); | ||
513 | call->reply_size += len; | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * accept the backlog of incoming calls | ||
518 | */ | ||
519 | static void afs_collect_incoming_call(struct work_struct *work) | ||
520 | { | ||
521 | struct rxrpc_call *rxcall; | ||
522 | struct afs_call *call = NULL; | ||
523 | struct sk_buff *skb; | ||
524 | |||
525 | while ((skb = skb_dequeue(&afs_incoming_calls))) { | ||
526 | _debug("new call"); | ||
527 | |||
528 | /* don't need the notification */ | ||
529 | rxrpc_kernel_free_skb(skb); | ||
530 | |||
531 | if (!call) { | ||
532 | call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); | ||
533 | if (!call) { | ||
534 | rxrpc_kernel_reject_call(afs_socket); | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | INIT_WORK(&call->async_work, afs_process_async_call); | ||
539 | call->wait_mode = &afs_async_incoming_call; | ||
540 | call->type = &afs_RXCMxxxx; | ||
541 | init_waitqueue_head(&call->waitq); | ||
542 | skb_queue_head_init(&call->rx_queue); | ||
543 | call->state = AFS_CALL_AWAIT_OP_ID; | ||
544 | } | ||
545 | |||
546 | rxcall = rxrpc_kernel_accept_call(afs_socket, | ||
547 | (unsigned long) call); | ||
548 | if (!IS_ERR(rxcall)) { | ||
549 | call->rxcall = rxcall; | ||
550 | call = NULL; | ||
551 | } | ||
552 | } | ||
553 | |||
554 | kfree(call); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * grab the operation ID from an incoming cache manager call | ||
559 | */ | ||
560 | static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, | ||
561 | bool last) | ||
562 | { | ||
563 | size_t len = skb->len; | ||
564 | void *oibuf = (void *) &call->operation_ID; | ||
565 | |||
566 | _enter("{%u},{%zu},%d", call->offset, len, last); | ||
567 | |||
568 | ASSERTCMP(call->offset, <, 4); | ||
569 | |||
570 | /* the operation ID forms the first four bytes of the request data */ | ||
571 | len = min_t(size_t, len, 4 - call->offset); | ||
572 | if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0) | ||
573 | BUG(); | ||
574 | if (!pskb_pull(skb, len)) | ||
575 | BUG(); | ||
576 | call->offset += len; | ||
577 | |||
578 | if (call->offset < 4) { | ||
579 | if (last) { | ||
580 | _leave(" = -EBADMSG [op ID short]"); | ||
581 | return -EBADMSG; | ||
582 | } | ||
583 | _leave(" = 0 [incomplete]"); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | call->state = AFS_CALL_AWAIT_REQUEST; | ||
588 | |||
589 | /* ask the cache manager to route the call (it'll change the call type | ||
590 | * if successful) */ | ||
591 | if (!afs_cm_incoming_call(call)) | ||
592 | return -ENOTSUPP; | ||
593 | |||
594 | /* pass responsibility for the remainer of this message off to the | ||
595 | * cache manager op */ | ||
596 | return call->type->deliver(call, skb, last); | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * send an empty reply | ||
601 | */ | ||
602 | void afs_send_empty_reply(struct afs_call *call) | ||
603 | { | ||
604 | struct msghdr msg; | ||
605 | struct iovec iov[1]; | ||
606 | |||
607 | _enter(""); | ||
608 | |||
609 | iov[0].iov_base = NULL; | ||
610 | iov[0].iov_len = 0; | ||
611 | msg.msg_name = NULL; | ||
612 | msg.msg_namelen = 0; | ||
613 | msg.msg_iov = iov; | ||
614 | msg.msg_iovlen = 0; | ||
615 | msg.msg_control = NULL; | ||
616 | msg.msg_controllen = 0; | ||
617 | msg.msg_flags = 0; | ||
618 | |||
619 | call->state = AFS_CALL_AWAIT_ACK; | ||
620 | switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) { | ||
621 | case 0: | ||
622 | _leave(" [replied]"); | ||
623 | return; | ||
624 | |||
625 | case -ENOMEM: | ||
626 | _debug("oom"); | ||
627 | rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); | ||
628 | default: | ||
629 | rxrpc_kernel_end_call(call->rxcall); | ||
630 | call->rxcall = NULL; | ||
631 | call->type->destructor(call); | ||
632 | ASSERT(skb_queue_empty(&call->rx_queue)); | ||
633 | kfree(call); | ||
634 | _leave(" [error]"); | ||
635 | return; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * extract a piece of data from the received data socket buffers | ||
641 | */ | ||
642 | int afs_extract_data(struct afs_call *call, struct sk_buff *skb, | ||
643 | bool last, void *buf, size_t count) | ||
644 | { | ||
645 | size_t len = skb->len; | ||
646 | |||
647 | _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count); | ||
648 | |||
649 | ASSERTCMP(call->offset, <, count); | ||
650 | |||
651 | len = min_t(size_t, len, count - call->offset); | ||
652 | if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 || | ||
653 | !pskb_pull(skb, len)) | ||
654 | BUG(); | ||
655 | call->offset += len; | ||
656 | |||
657 | if (call->offset < count) { | ||
658 | if (last) { | ||
659 | _leave(" = -EBADMSG [%d < %lu]", call->offset, count); | ||
660 | return -EBADMSG; | ||
661 | } | ||
662 | _leave(" = -EAGAIN"); | ||
663 | return -EAGAIN; | ||
664 | } | ||
665 | return 0; | ||
666 | } | ||
diff --git a/fs/afs/server.c b/fs/afs/server.c index 44b0ce53e91..bde6125c2f2 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS server record management | 1 | /* AFS server record management |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -11,127 +11,205 @@ | |||
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <rxrpc/peer.h> | ||
15 | #include <rxrpc/connection.h> | ||
16 | #include "volume.h" | ||
17 | #include "cell.h" | ||
18 | #include "server.h" | ||
19 | #include "transport.h" | ||
20 | #include "vlclient.h" | ||
21 | #include "kafstimod.h" | ||
22 | #include "internal.h" | 14 | #include "internal.h" |
23 | 15 | ||
24 | DEFINE_SPINLOCK(afs_server_peer_lock); | 16 | unsigned afs_server_timeout = 10; /* server timeout in seconds */ |
25 | 17 | ||
26 | #define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */ | 18 | static void afs_reap_server(struct work_struct *); |
27 | #define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */ | ||
28 | 19 | ||
29 | static void __afs_server_timeout(struct afs_timer *timer) | 20 | /* tree of all the servers, indexed by IP address */ |
21 | static struct rb_root afs_servers = RB_ROOT; | ||
22 | static DEFINE_RWLOCK(afs_servers_lock); | ||
23 | |||
24 | /* LRU list of all the servers not currently in use */ | ||
25 | static LIST_HEAD(afs_server_graveyard); | ||
26 | static DEFINE_SPINLOCK(afs_server_graveyard_lock); | ||
27 | static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server); | ||
28 | |||
29 | /* | ||
30 | * install a server record in the master tree | ||
31 | */ | ||
32 | static int afs_install_server(struct afs_server *server) | ||
30 | { | 33 | { |
31 | struct afs_server *server = | 34 | struct afs_server *xserver; |
32 | list_entry(timer, struct afs_server, timeout); | 35 | struct rb_node **pp, *p; |
36 | int ret; | ||
37 | |||
38 | _enter("%p", server); | ||
33 | 39 | ||
34 | _debug("SERVER TIMEOUT [%p{u=%d}]", | 40 | write_lock(&afs_servers_lock); |
35 | server, atomic_read(&server->usage)); | 41 | |
42 | ret = -EEXIST; | ||
43 | pp = &afs_servers.rb_node; | ||
44 | p = NULL; | ||
45 | while (*pp) { | ||
46 | p = *pp; | ||
47 | _debug("- consider %p", p); | ||
48 | xserver = rb_entry(p, struct afs_server, master_rb); | ||
49 | if (server->addr.s_addr < xserver->addr.s_addr) | ||
50 | pp = &(*pp)->rb_left; | ||
51 | else if (server->addr.s_addr > xserver->addr.s_addr) | ||
52 | pp = &(*pp)->rb_right; | ||
53 | else | ||
54 | goto error; | ||
55 | } | ||
36 | 56 | ||
37 | afs_server_do_timeout(server); | 57 | rb_link_node(&server->master_rb, p, pp); |
38 | } | 58 | rb_insert_color(&server->master_rb, &afs_servers); |
59 | ret = 0; | ||
39 | 60 | ||
40 | static const struct afs_timer_ops afs_server_timer_ops = { | 61 | error: |
41 | .timed_out = __afs_server_timeout, | 62 | write_unlock(&afs_servers_lock); |
42 | }; | 63 | return ret; |
64 | } | ||
43 | 65 | ||
44 | /* | 66 | /* |
45 | * lookup a server record in a cell | 67 | * allocate a new server record |
46 | * - TODO: search the cell's server list | ||
47 | */ | 68 | */ |
48 | int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, | 69 | static struct afs_server *afs_alloc_server(struct afs_cell *cell, |
49 | struct afs_server **_server) | 70 | const struct in_addr *addr) |
50 | { | 71 | { |
51 | struct afs_server *server, *active, *zombie; | 72 | struct afs_server *server; |
52 | int loop; | ||
53 | 73 | ||
54 | _enter("%p,%08x,", cell, ntohl(addr->s_addr)); | 74 | _enter(""); |
55 | 75 | ||
56 | /* allocate and initialise a server record */ | ||
57 | server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); | 76 | server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); |
58 | if (!server) { | 77 | if (server) { |
59 | _leave(" = -ENOMEM"); | 78 | atomic_set(&server->usage, 1); |
60 | return -ENOMEM; | 79 | server->cell = cell; |
80 | |||
81 | INIT_LIST_HEAD(&server->link); | ||
82 | INIT_LIST_HEAD(&server->grave); | ||
83 | init_rwsem(&server->sem); | ||
84 | spin_lock_init(&server->fs_lock); | ||
85 | server->fs_vnodes = RB_ROOT; | ||
86 | server->cb_promises = RB_ROOT; | ||
87 | spin_lock_init(&server->cb_lock); | ||
88 | init_waitqueue_head(&server->cb_break_waitq); | ||
89 | INIT_DELAYED_WORK(&server->cb_break_work, | ||
90 | afs_dispatch_give_up_callbacks); | ||
91 | |||
92 | memcpy(&server->addr, addr, sizeof(struct in_addr)); | ||
93 | server->addr.s_addr = addr->s_addr; | ||
61 | } | 94 | } |
62 | 95 | ||
63 | atomic_set(&server->usage, 1); | 96 | _leave(" = %p{%d}", server, atomic_read(&server->usage)); |
64 | 97 | return server; | |
65 | INIT_LIST_HEAD(&server->link); | 98 | } |
66 | init_rwsem(&server->sem); | ||
67 | INIT_LIST_HEAD(&server->fs_callq); | ||
68 | spin_lock_init(&server->fs_lock); | ||
69 | INIT_LIST_HEAD(&server->cb_promises); | ||
70 | spin_lock_init(&server->cb_lock); | ||
71 | |||
72 | for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++) | ||
73 | server->fs_conn_cnt[loop] = 4; | ||
74 | 99 | ||
75 | memcpy(&server->addr, addr, sizeof(struct in_addr)); | 100 | /* |
76 | server->addr.s_addr = addr->s_addr; | 101 | * get an FS-server record for a cell |
102 | */ | ||
103 | struct afs_server *afs_lookup_server(struct afs_cell *cell, | ||
104 | const struct in_addr *addr) | ||
105 | { | ||
106 | struct afs_server *server, *candidate; | ||
77 | 107 | ||
78 | afs_timer_init(&server->timeout, &afs_server_timer_ops); | 108 | _enter("%p,"NIPQUAD_FMT, cell, NIPQUAD(addr->s_addr)); |
79 | 109 | ||
80 | /* add to the cell */ | 110 | /* quick scan of the list to see if we already have the server */ |
81 | write_lock(&cell->sv_lock); | 111 | read_lock(&cell->servers_lock); |
82 | 112 | ||
83 | /* check the active list */ | 113 | list_for_each_entry(server, &cell->servers, link) { |
84 | list_for_each_entry(active, &cell->sv_list, link) { | 114 | if (server->addr.s_addr == addr->s_addr) |
85 | if (active->addr.s_addr == addr->s_addr) | 115 | goto found_server_quickly; |
86 | goto use_active_server; | ||
87 | } | 116 | } |
117 | read_unlock(&cell->servers_lock); | ||
88 | 118 | ||
89 | /* check the inactive list */ | 119 | candidate = afs_alloc_server(cell, addr); |
90 | spin_lock(&cell->sv_gylock); | 120 | if (!candidate) { |
91 | list_for_each_entry(zombie, &cell->sv_graveyard, link) { | 121 | _leave(" = -ENOMEM"); |
92 | if (zombie->addr.s_addr == addr->s_addr) | 122 | return ERR_PTR(-ENOMEM); |
93 | goto resurrect_server; | ||
94 | } | 123 | } |
95 | spin_unlock(&cell->sv_gylock); | ||
96 | 124 | ||
97 | afs_get_cell(cell); | 125 | write_lock(&cell->servers_lock); |
98 | server->cell = cell; | ||
99 | list_add_tail(&server->link, &cell->sv_list); | ||
100 | 126 | ||
101 | write_unlock(&cell->sv_lock); | 127 | /* check the cell's server list again */ |
128 | list_for_each_entry(server, &cell->servers, link) { | ||
129 | if (server->addr.s_addr == addr->s_addr) | ||
130 | goto found_server; | ||
131 | } | ||
132 | |||
133 | _debug("new"); | ||
134 | server = candidate; | ||
135 | if (afs_install_server(server) < 0) | ||
136 | goto server_in_two_cells; | ||
102 | 137 | ||
103 | *_server = server; | 138 | afs_get_cell(cell); |
104 | _leave(" = 0 (%p)", server); | 139 | list_add_tail(&server->link, &cell->servers); |
105 | return 0; | 140 | |
141 | write_unlock(&cell->servers_lock); | ||
142 | _leave(" = %p{%d}", server, atomic_read(&server->usage)); | ||
143 | return server; | ||
144 | |||
145 | /* found a matching server quickly */ | ||
146 | found_server_quickly: | ||
147 | _debug("found quickly"); | ||
148 | afs_get_server(server); | ||
149 | read_unlock(&cell->servers_lock); | ||
150 | no_longer_unused: | ||
151 | if (!list_empty(&server->grave)) { | ||
152 | spin_lock(&afs_server_graveyard_lock); | ||
153 | list_del_init(&server->grave); | ||
154 | spin_unlock(&afs_server_graveyard_lock); | ||
155 | } | ||
156 | _leave(" = %p{%d}", server, atomic_read(&server->usage)); | ||
157 | return server; | ||
158 | |||
159 | /* found a matching server on the second pass */ | ||
160 | found_server: | ||
161 | _debug("found"); | ||
162 | afs_get_server(server); | ||
163 | write_unlock(&cell->servers_lock); | ||
164 | kfree(candidate); | ||
165 | goto no_longer_unused; | ||
166 | |||
167 | /* found a server that seems to be in two cells */ | ||
168 | server_in_two_cells: | ||
169 | write_unlock(&cell->servers_lock); | ||
170 | kfree(candidate); | ||
171 | printk(KERN_NOTICE "kAFS:" | ||
172 | " Server "NIPQUAD_FMT" appears to be in two cells\n", | ||
173 | NIPQUAD(*addr)); | ||
174 | _leave(" = -EEXIST"); | ||
175 | return ERR_PTR(-EEXIST); | ||
176 | } | ||
106 | 177 | ||
107 | /* found a matching active server */ | 178 | /* |
108 | use_active_server: | 179 | * look up a server by its IP address |
109 | _debug("active server"); | 180 | */ |
110 | afs_get_server(active); | 181 | struct afs_server *afs_find_server(const struct in_addr *_addr) |
111 | write_unlock(&cell->sv_lock); | 182 | { |
183 | struct afs_server *server = NULL; | ||
184 | struct rb_node *p; | ||
185 | struct in_addr addr = *_addr; | ||
112 | 186 | ||
113 | kfree(server); | 187 | _enter(NIPQUAD_FMT, NIPQUAD(addr.s_addr)); |
114 | 188 | ||
115 | *_server = active; | 189 | read_lock(&afs_servers_lock); |
116 | _leave(" = 0 (%p)", active); | ||
117 | return 0; | ||
118 | 190 | ||
119 | /* found a matching server in the graveyard, so resurrect it and | 191 | p = afs_servers.rb_node; |
120 | * dispose of the new record */ | 192 | while (p) { |
121 | resurrect_server: | 193 | server = rb_entry(p, struct afs_server, master_rb); |
122 | _debug("resurrecting server"); | ||
123 | 194 | ||
124 | list_move_tail(&zombie->link, &cell->sv_list); | 195 | _debug("- consider %p", p); |
125 | afs_get_server(zombie); | ||
126 | afs_kafstimod_del_timer(&zombie->timeout); | ||
127 | spin_unlock(&cell->sv_gylock); | ||
128 | write_unlock(&cell->sv_lock); | ||
129 | 196 | ||
130 | kfree(server); | 197 | if (addr.s_addr < server->addr.s_addr) { |
198 | p = p->rb_left; | ||
199 | } else if (addr.s_addr > server->addr.s_addr) { | ||
200 | p = p->rb_right; | ||
201 | } else { | ||
202 | afs_get_server(server); | ||
203 | goto found; | ||
204 | } | ||
205 | } | ||
131 | 206 | ||
132 | *_server = zombie; | 207 | server = NULL; |
133 | _leave(" = 0 (%p)", zombie); | 208 | found: |
134 | return 0; | 209 | read_unlock(&afs_servers_lock); |
210 | ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr); | ||
211 | _leave(" = %p", server); | ||
212 | return server; | ||
135 | } | 213 | } |
136 | 214 | ||
137 | /* | 215 | /* |
@@ -140,347 +218,105 @@ resurrect_server: | |||
140 | */ | 218 | */ |
141 | void afs_put_server(struct afs_server *server) | 219 | void afs_put_server(struct afs_server *server) |
142 | { | 220 | { |
143 | struct afs_cell *cell; | ||
144 | |||
145 | if (!server) | 221 | if (!server) |
146 | return; | 222 | return; |
147 | 223 | ||
148 | _enter("%p", server); | 224 | _enter("%p{%d}", server, atomic_read(&server->usage)); |
149 | |||
150 | cell = server->cell; | ||
151 | 225 | ||
152 | /* sanity check */ | 226 | ASSERTCMP(atomic_read(&server->usage), >, 0); |
153 | BUG_ON(atomic_read(&server->usage) <= 0); | ||
154 | |||
155 | /* to prevent a race, the decrement and the dequeue must be effectively | ||
156 | * atomic */ | ||
157 | write_lock(&cell->sv_lock); | ||
158 | 227 | ||
159 | if (likely(!atomic_dec_and_test(&server->usage))) { | 228 | if (likely(!atomic_dec_and_test(&server->usage))) { |
160 | write_unlock(&cell->sv_lock); | ||
161 | _leave(""); | 229 | _leave(""); |
162 | return; | 230 | return; |
163 | } | 231 | } |
164 | 232 | ||
165 | spin_lock(&cell->sv_gylock); | 233 | afs_flush_callback_breaks(server); |
166 | list_move_tail(&server->link, &cell->sv_graveyard); | ||
167 | |||
168 | /* time out in 10 secs */ | ||
169 | afs_kafstimod_add_timer(&server->timeout, 10 * HZ); | ||
170 | |||
171 | spin_unlock(&cell->sv_gylock); | ||
172 | write_unlock(&cell->sv_lock); | ||
173 | 234 | ||
174 | _leave(" [killed]"); | 235 | spin_lock(&afs_server_graveyard_lock); |
236 | if (atomic_read(&server->usage) == 0) { | ||
237 | list_move_tail(&server->grave, &afs_server_graveyard); | ||
238 | server->time_of_death = get_seconds(); | ||
239 | schedule_delayed_work(&afs_server_reaper, | ||
240 | afs_server_timeout * HZ); | ||
241 | } | ||
242 | spin_unlock(&afs_server_graveyard_lock); | ||
243 | _leave(" [dead]"); | ||
175 | } | 244 | } |
176 | 245 | ||
177 | /* | 246 | /* |
178 | * timeout server record | 247 | * destroy a dead server |
179 | * - removes from the cell's graveyard if the usage count is zero | ||
180 | */ | 248 | */ |
181 | void afs_server_do_timeout(struct afs_server *server) | 249 | static void afs_destroy_server(struct afs_server *server) |
182 | { | 250 | { |
183 | struct rxrpc_peer *peer; | ||
184 | struct afs_cell *cell; | ||
185 | int loop; | ||
186 | |||
187 | _enter("%p", server); | 251 | _enter("%p", server); |
188 | 252 | ||
189 | cell = server->cell; | 253 | ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL); |
190 | 254 | ASSERTCMP(server->cb_promises.rb_node, ==, NULL); | |
191 | BUG_ON(atomic_read(&server->usage) < 0); | 255 | ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail); |
192 | 256 | ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0); | |
193 | /* remove from graveyard if still dead */ | ||
194 | spin_lock(&cell->vl_gylock); | ||
195 | if (atomic_read(&server->usage) == 0) | ||
196 | list_del_init(&server->link); | ||
197 | else | ||
198 | server = NULL; | ||
199 | spin_unlock(&cell->vl_gylock); | ||
200 | |||
201 | if (!server) { | ||
202 | _leave(""); | ||
203 | return; /* resurrected */ | ||
204 | } | ||
205 | |||
206 | /* we can now destroy it properly */ | ||
207 | afs_put_cell(cell); | ||
208 | |||
209 | /* uncross-point the structs under a global lock */ | ||
210 | spin_lock(&afs_server_peer_lock); | ||
211 | peer = server->peer; | ||
212 | if (peer) { | ||
213 | server->peer = NULL; | ||
214 | peer->user = NULL; | ||
215 | } | ||
216 | spin_unlock(&afs_server_peer_lock); | ||
217 | |||
218 | /* finish cleaning up the server */ | ||
219 | for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--) | ||
220 | if (server->fs_conn[loop]) | ||
221 | rxrpc_put_connection(server->fs_conn[loop]); | ||
222 | |||
223 | if (server->vlserver) | ||
224 | rxrpc_put_connection(server->vlserver); | ||
225 | 257 | ||
258 | afs_put_cell(server->cell); | ||
226 | kfree(server); | 259 | kfree(server); |
227 | |||
228 | _leave(" [destroyed]"); | ||
229 | } | 260 | } |
230 | 261 | ||
231 | /* | 262 | /* |
232 | * get a callslot on a connection to the fileserver on the specified server | 263 | * reap dead server records |
233 | */ | 264 | */ |
234 | int afs_server_request_callslot(struct afs_server *server, | 265 | static void afs_reap_server(struct work_struct *work) |
235 | struct afs_server_callslot *callslot) | ||
236 | { | 266 | { |
237 | struct afs_server_callslot *pcallslot; | 267 | LIST_HEAD(corpses); |
238 | struct rxrpc_connection *conn; | 268 | struct afs_server *server; |
239 | int nconn, ret; | 269 | unsigned long delay, expiry; |
240 | 270 | time_t now; | |
241 | _enter("%p,",server); | 271 | |
242 | 272 | now = get_seconds(); | |
243 | INIT_LIST_HEAD(&callslot->link); | 273 | spin_lock(&afs_server_graveyard_lock); |
244 | callslot->task = current; | 274 | |
245 | callslot->conn = NULL; | 275 | while (!list_empty(&afs_server_graveyard)) { |
246 | callslot->nconn = -1; | 276 | server = list_entry(afs_server_graveyard.next, |
247 | callslot->ready = 0; | 277 | struct afs_server, grave); |
248 | 278 | ||
249 | ret = 0; | 279 | /* the queue is ordered most dead first */ |
250 | conn = NULL; | 280 | expiry = server->time_of_death + afs_server_timeout; |
251 | 281 | if (expiry > now) { | |
252 | /* get hold of a callslot first */ | 282 | delay = (expiry - now) * HZ; |
253 | spin_lock(&server->fs_lock); | 283 | if (!schedule_delayed_work(&afs_server_reaper, delay)) { |
254 | 284 | cancel_delayed_work(&afs_server_reaper); | |
255 | /* resurrect the server if it's death timeout has expired */ | 285 | schedule_delayed_work(&afs_server_reaper, |
256 | if (server->fs_state) { | 286 | delay); |
257 | if (time_before(jiffies, server->fs_dead_jif)) { | 287 | } |
258 | ret = server->fs_state; | 288 | break; |
259 | spin_unlock(&server->fs_lock); | ||
260 | _leave(" = %d [still dead]", ret); | ||
261 | return ret; | ||
262 | } | 289 | } |
263 | 290 | ||
264 | server->fs_state = 0; | 291 | write_lock(&server->cell->servers_lock); |
265 | } | 292 | write_lock(&afs_servers_lock); |
266 | 293 | if (atomic_read(&server->usage) > 0) { | |
267 | /* try and find a connection that has spare callslots */ | 294 | list_del_init(&server->grave); |
268 | for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) { | 295 | } else { |
269 | if (server->fs_conn_cnt[nconn] > 0) { | 296 | list_move_tail(&server->grave, &corpses); |
270 | server->fs_conn_cnt[nconn]--; | 297 | list_del_init(&server->link); |
271 | spin_unlock(&server->fs_lock); | 298 | rb_erase(&server->master_rb, &afs_servers); |
272 | callslot->nconn = nconn; | ||
273 | goto obtained_slot; | ||
274 | } | 299 | } |
300 | write_unlock(&afs_servers_lock); | ||
301 | write_unlock(&server->cell->servers_lock); | ||
275 | } | 302 | } |
276 | 303 | ||
277 | /* none were available - wait interruptibly for one to become | 304 | spin_unlock(&afs_server_graveyard_lock); |
278 | * available */ | ||
279 | set_current_state(TASK_INTERRUPTIBLE); | ||
280 | list_add_tail(&callslot->link, &server->fs_callq); | ||
281 | spin_unlock(&server->fs_lock); | ||
282 | |||
283 | while (!callslot->ready && !signal_pending(current)) { | ||
284 | schedule(); | ||
285 | set_current_state(TASK_INTERRUPTIBLE); | ||
286 | } | ||
287 | |||
288 | set_current_state(TASK_RUNNING); | ||
289 | |||
290 | /* even if we were interrupted we may still be queued */ | ||
291 | if (!callslot->ready) { | ||
292 | spin_lock(&server->fs_lock); | ||
293 | list_del_init(&callslot->link); | ||
294 | spin_unlock(&server->fs_lock); | ||
295 | } | ||
296 | |||
297 | nconn = callslot->nconn; | ||
298 | |||
299 | /* if interrupted, we must release any slot we also got before | ||
300 | * returning an error */ | ||
301 | if (signal_pending(current)) { | ||
302 | ret = -EINTR; | ||
303 | goto error_release; | ||
304 | } | ||
305 | |||
306 | /* if we were woken up with an error, then pass that error back to the | ||
307 | * called */ | ||
308 | if (nconn < 0) { | ||
309 | _leave(" = %d", callslot->errno); | ||
310 | return callslot->errno; | ||
311 | } | ||
312 | |||
313 | /* were we given a connection directly? */ | ||
314 | if (callslot->conn) { | ||
315 | /* yes - use it */ | ||
316 | _leave(" = 0 (nc=%d)", nconn); | ||
317 | return 0; | ||
318 | } | ||
319 | 305 | ||
320 | /* got a callslot, but no connection */ | 306 | /* now reap the corpses we've extracted */ |
321 | obtained_slot: | 307 | while (!list_empty(&corpses)) { |
322 | 308 | server = list_entry(corpses.next, struct afs_server, grave); | |
323 | /* need to get hold of the RxRPC connection */ | 309 | list_del(&server->grave); |
324 | down_write(&server->sem); | 310 | afs_destroy_server(server); |
325 | |||
326 | /* quick check to see if there's an outstanding error */ | ||
327 | ret = server->fs_state; | ||
328 | if (ret) | ||
329 | goto error_release_upw; | ||
330 | |||
331 | if (server->fs_conn[nconn]) { | ||
332 | /* reuse an existing connection */ | ||
333 | rxrpc_get_connection(server->fs_conn[nconn]); | ||
334 | callslot->conn = server->fs_conn[nconn]; | ||
335 | } else { | ||
336 | /* create a new connection */ | ||
337 | ret = rxrpc_create_connection(afs_transport, | ||
338 | htons(7000), | ||
339 | server->addr.s_addr, | ||
340 | FS_SERVICE_ID, | ||
341 | NULL, | ||
342 | &server->fs_conn[nconn]); | ||
343 | |||
344 | if (ret < 0) | ||
345 | goto error_release_upw; | ||
346 | |||
347 | callslot->conn = server->fs_conn[0]; | ||
348 | rxrpc_get_connection(callslot->conn); | ||
349 | } | 311 | } |
350 | |||
351 | up_write(&server->sem); | ||
352 | |||
353 | _leave(" = 0"); | ||
354 | return 0; | ||
355 | |||
356 | /* handle an error occurring */ | ||
357 | error_release_upw: | ||
358 | up_write(&server->sem); | ||
359 | |||
360 | error_release: | ||
361 | /* either release the callslot or pass it along to another deserving | ||
362 | * task */ | ||
363 | spin_lock(&server->fs_lock); | ||
364 | |||
365 | if (nconn < 0) { | ||
366 | /* no callslot allocated */ | ||
367 | } else if (list_empty(&server->fs_callq)) { | ||
368 | /* no one waiting */ | ||
369 | server->fs_conn_cnt[nconn]++; | ||
370 | spin_unlock(&server->fs_lock); | ||
371 | } else { | ||
372 | /* someone's waiting - dequeue them and wake them up */ | ||
373 | pcallslot = list_entry(server->fs_callq.next, | ||
374 | struct afs_server_callslot, link); | ||
375 | list_del_init(&pcallslot->link); | ||
376 | |||
377 | pcallslot->errno = server->fs_state; | ||
378 | if (!pcallslot->errno) { | ||
379 | /* pass them out callslot details */ | ||
380 | callslot->conn = xchg(&pcallslot->conn, | ||
381 | callslot->conn); | ||
382 | pcallslot->nconn = nconn; | ||
383 | callslot->nconn = nconn = -1; | ||
384 | } | ||
385 | pcallslot->ready = 1; | ||
386 | wake_up_process(pcallslot->task); | ||
387 | spin_unlock(&server->fs_lock); | ||
388 | } | ||
389 | |||
390 | rxrpc_put_connection(callslot->conn); | ||
391 | callslot->conn = NULL; | ||
392 | |||
393 | _leave(" = %d", ret); | ||
394 | return ret; | ||
395 | } | 312 | } |
396 | 313 | ||
397 | /* | 314 | /* |
398 | * release a callslot back to the server | 315 | * discard all the server records for rmmod |
399 | * - transfers the RxRPC connection to the next pending callslot if possible | ||
400 | */ | 316 | */ |
401 | void afs_server_release_callslot(struct afs_server *server, | 317 | void __exit afs_purge_servers(void) |
402 | struct afs_server_callslot *callslot) | ||
403 | { | 318 | { |
404 | struct afs_server_callslot *pcallslot; | 319 | afs_server_timeout = 0; |
405 | 320 | cancel_delayed_work(&afs_server_reaper); | |
406 | _enter("{ad=%08x,cnt=%u},{%d}", | 321 | schedule_delayed_work(&afs_server_reaper, 0); |
407 | ntohl(server->addr.s_addr), | ||
408 | server->fs_conn_cnt[callslot->nconn], | ||
409 | callslot->nconn); | ||
410 | |||
411 | BUG_ON(callslot->nconn < 0); | ||
412 | |||
413 | spin_lock(&server->fs_lock); | ||
414 | |||
415 | if (list_empty(&server->fs_callq)) { | ||
416 | /* no one waiting */ | ||
417 | server->fs_conn_cnt[callslot->nconn]++; | ||
418 | spin_unlock(&server->fs_lock); | ||
419 | } else { | ||
420 | /* someone's waiting - dequeue them and wake them up */ | ||
421 | pcallslot = list_entry(server->fs_callq.next, | ||
422 | struct afs_server_callslot, link); | ||
423 | list_del_init(&pcallslot->link); | ||
424 | |||
425 | pcallslot->errno = server->fs_state; | ||
426 | if (!pcallslot->errno) { | ||
427 | /* pass them out callslot details */ | ||
428 | callslot->conn = xchg(&pcallslot->conn, callslot->conn); | ||
429 | pcallslot->nconn = callslot->nconn; | ||
430 | callslot->nconn = -1; | ||
431 | } | ||
432 | |||
433 | pcallslot->ready = 1; | ||
434 | wake_up_process(pcallslot->task); | ||
435 | spin_unlock(&server->fs_lock); | ||
436 | } | ||
437 | |||
438 | rxrpc_put_connection(callslot->conn); | ||
439 | |||
440 | _leave(""); | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * get a handle to a connection to the vlserver (volume location) on the | ||
445 | * specified server | ||
446 | */ | ||
447 | int afs_server_get_vlconn(struct afs_server *server, | ||
448 | struct rxrpc_connection **_conn) | ||
449 | { | ||
450 | struct rxrpc_connection *conn; | ||
451 | int ret; | ||
452 | |||
453 | _enter("%p,", server); | ||
454 | |||
455 | ret = 0; | ||
456 | conn = NULL; | ||
457 | down_read(&server->sem); | ||
458 | |||
459 | if (server->vlserver) { | ||
460 | /* reuse an existing connection */ | ||
461 | rxrpc_get_connection(server->vlserver); | ||
462 | conn = server->vlserver; | ||
463 | up_read(&server->sem); | ||
464 | } else { | ||
465 | /* create a new connection */ | ||
466 | up_read(&server->sem); | ||
467 | down_write(&server->sem); | ||
468 | if (!server->vlserver) { | ||
469 | ret = rxrpc_create_connection(afs_transport, | ||
470 | htons(7003), | ||
471 | server->addr.s_addr, | ||
472 | VL_SERVICE_ID, | ||
473 | NULL, | ||
474 | &server->vlserver); | ||
475 | } | ||
476 | if (ret == 0) { | ||
477 | rxrpc_get_connection(server->vlserver); | ||
478 | conn = server->vlserver; | ||
479 | } | ||
480 | up_write(&server->sem); | ||
481 | } | ||
482 | |||
483 | *_conn = conn; | ||
484 | _leave(" = %d", ret); | ||
485 | return ret; | ||
486 | } | 322 | } |
diff --git a/fs/afs/server.h b/fs/afs/server.h deleted file mode 100644 index e1a006829b5..00000000000 --- a/fs/afs/server.h +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* AFS server record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_SERVER_H | ||
13 | #define AFS_SERVER_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "kafstimod.h" | ||
17 | #include <rxrpc/peer.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | |||
20 | extern spinlock_t afs_server_peer_lock; | ||
21 | |||
22 | /* | ||
23 | * AFS server record | ||
24 | */ | ||
25 | struct afs_server { | ||
26 | atomic_t usage; | ||
27 | struct afs_cell *cell; /* cell in which server resides */ | ||
28 | struct list_head link; /* link in cell's server list */ | ||
29 | struct rw_semaphore sem; /* access lock */ | ||
30 | struct afs_timer timeout; /* graveyard timeout */ | ||
31 | struct in_addr addr; /* server address */ | ||
32 | struct rxrpc_peer *peer; /* peer record for this server */ | ||
33 | struct rxrpc_connection *vlserver; /* connection to the volume location service */ | ||
34 | |||
35 | /* file service access */ | ||
36 | #define AFS_SERVER_CONN_LIST_SIZE 2 | ||
37 | struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */ | ||
38 | unsigned fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */ | ||
39 | struct list_head fs_callq; /* queue of processes waiting to make a call */ | ||
40 | spinlock_t fs_lock; /* access lock */ | ||
41 | int fs_state; /* 0 or reason FS currently marked dead (-errno) */ | ||
42 | unsigned fs_rtt; /* FS round trip time */ | ||
43 | unsigned long fs_act_jif; /* time at which last activity occurred */ | ||
44 | unsigned long fs_dead_jif; /* time at which no longer to be considered dead */ | ||
45 | |||
46 | /* callback promise management */ | ||
47 | struct list_head cb_promises; /* as yet unbroken promises from this server */ | ||
48 | spinlock_t cb_lock; /* access lock */ | ||
49 | }; | ||
50 | |||
51 | extern int afs_server_lookup(struct afs_cell *, const struct in_addr *, | ||
52 | struct afs_server **); | ||
53 | |||
54 | #define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0) | ||
55 | |||
56 | extern void afs_put_server(struct afs_server *); | ||
57 | extern void afs_server_do_timeout(struct afs_server *); | ||
58 | |||
59 | extern int afs_server_find_by_peer(const struct rxrpc_peer *, | ||
60 | struct afs_server **); | ||
61 | |||
62 | extern int afs_server_get_vlconn(struct afs_server *, | ||
63 | struct rxrpc_connection **); | ||
64 | |||
65 | static inline | ||
66 | struct afs_server *afs_server_get_from_peer(struct rxrpc_peer *peer) | ||
67 | { | ||
68 | struct afs_server *server; | ||
69 | |||
70 | spin_lock(&afs_server_peer_lock); | ||
71 | server = peer->user; | ||
72 | if (server) | ||
73 | afs_get_server(server); | ||
74 | spin_unlock(&afs_server_peer_lock); | ||
75 | |||
76 | return server; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * AFS server callslot grant record | ||
81 | */ | ||
82 | struct afs_server_callslot { | ||
83 | struct list_head link; /* link in server's list */ | ||
84 | struct task_struct *task; /* process waiting to make call */ | ||
85 | struct rxrpc_connection *conn; /* connection to use (or NULL on error) */ | ||
86 | short nconn; /* connection slot number (-1 on error) */ | ||
87 | char ready; /* T when ready */ | ||
88 | int errno; /* error number if nconn==-1 */ | ||
89 | }; | ||
90 | |||
91 | extern int afs_server_request_callslot(struct afs_server *, | ||
92 | struct afs_server_callslot *); | ||
93 | |||
94 | extern void afs_server_release_callslot(struct afs_server *, | ||
95 | struct afs_server_callslot *); | ||
96 | |||
97 | #endif /* AFS_SERVER_H */ | ||
diff --git a/fs/afs/super.c b/fs/afs/super.c index 0470a5c0b8a..efc4fe69f4f 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS superblock handling | 1 | /* AFS superblock handling |
2 | * | 2 | * |
3 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | 3 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software may be freely redistributed under the terms of the | 5 | * This software may be freely redistributed under the terms of the |
6 | * GNU General Public License. | 6 | * GNU General Public License. |
@@ -20,12 +20,6 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include "vnode.h" | ||
24 | #include "volume.h" | ||
25 | #include "cell.h" | ||
26 | #include "cmservice.h" | ||
27 | #include "fsclient.h" | ||
28 | #include "super.h" | ||
29 | #include "internal.h" | 23 | #include "internal.h" |
30 | 24 | ||
31 | #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ | 25 | #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ |
@@ -63,6 +57,7 @@ static const struct super_operations afs_super_ops = { | |||
63 | .drop_inode = generic_delete_inode, | 57 | .drop_inode = generic_delete_inode, |
64 | .destroy_inode = afs_destroy_inode, | 58 | .destroy_inode = afs_destroy_inode, |
65 | .clear_inode = afs_clear_inode, | 59 | .clear_inode = afs_clear_inode, |
60 | .umount_begin = afs_umount_begin, | ||
66 | .put_super = afs_put_super, | 61 | .put_super = afs_put_super, |
67 | }; | 62 | }; |
68 | 63 | ||
@@ -78,8 +73,6 @@ int __init afs_fs_init(void) | |||
78 | 73 | ||
79 | _enter(""); | 74 | _enter(""); |
80 | 75 | ||
81 | afs_timer_init(&afs_mntpt_expiry_timer, &afs_mntpt_expiry_timer_ops); | ||
82 | |||
83 | /* create ourselves an inode cache */ | 76 | /* create ourselves an inode cache */ |
84 | atomic_set(&afs_count_active_inodes, 0); | 77 | atomic_set(&afs_count_active_inodes, 0); |
85 | 78 | ||
@@ -99,11 +92,11 @@ int __init afs_fs_init(void) | |||
99 | ret = register_filesystem(&afs_fs_type); | 92 | ret = register_filesystem(&afs_fs_type); |
100 | if (ret < 0) { | 93 | if (ret < 0) { |
101 | kmem_cache_destroy(afs_inode_cachep); | 94 | kmem_cache_destroy(afs_inode_cachep); |
102 | kleave(" = %d", ret); | 95 | _leave(" = %d", ret); |
103 | return ret; | 96 | return ret; |
104 | } | 97 | } |
105 | 98 | ||
106 | kleave(" = 0"); | 99 | _leave(" = 0"); |
107 | return 0; | 100 | return 0; |
108 | } | 101 | } |
109 | 102 | ||
@@ -112,6 +105,9 @@ int __init afs_fs_init(void) | |||
112 | */ | 105 | */ |
113 | void __exit afs_fs_exit(void) | 106 | void __exit afs_fs_exit(void) |
114 | { | 107 | { |
108 | _enter(""); | ||
109 | |||
110 | afs_mntpt_kill_timer(); | ||
115 | unregister_filesystem(&afs_fs_type); | 111 | unregister_filesystem(&afs_fs_type); |
116 | 112 | ||
117 | if (atomic_read(&afs_count_active_inodes) != 0) { | 113 | if (atomic_read(&afs_count_active_inodes) != 0) { |
@@ -121,6 +117,7 @@ void __exit afs_fs_exit(void) | |||
121 | } | 117 | } |
122 | 118 | ||
123 | kmem_cache_destroy(afs_inode_cachep); | 119 | kmem_cache_destroy(afs_inode_cachep); |
120 | _leave(""); | ||
124 | } | 121 | } |
125 | 122 | ||
126 | /* | 123 | /* |
@@ -154,9 +151,9 @@ static int want_no_value(char *const *_value, const char *option) | |||
154 | * shamelessly adapted it from the msdos fs | 151 | * shamelessly adapted it from the msdos fs |
155 | */ | 152 | */ |
156 | static int afs_super_parse_options(struct afs_mount_params *params, | 153 | static int afs_super_parse_options(struct afs_mount_params *params, |
157 | char *options, | 154 | char *options, const char **devname) |
158 | const char **devname) | ||
159 | { | 155 | { |
156 | struct afs_cell *cell; | ||
160 | char *key, *value; | 157 | char *key, *value; |
161 | int ret; | 158 | int ret; |
162 | 159 | ||
@@ -165,43 +162,37 @@ static int afs_super_parse_options(struct afs_mount_params *params, | |||
165 | options[PAGE_SIZE - 1] = 0; | 162 | options[PAGE_SIZE - 1] = 0; |
166 | 163 | ||
167 | ret = 0; | 164 | ret = 0; |
168 | while ((key = strsep(&options, ",")) != 0) | 165 | while ((key = strsep(&options, ","))) { |
169 | { | ||
170 | value = strchr(key, '='); | 166 | value = strchr(key, '='); |
171 | if (value) | 167 | if (value) |
172 | *value++ = 0; | 168 | *value++ = 0; |
173 | 169 | ||
174 | printk("kAFS: KEY: %s, VAL:%s\n", key, value ?: "-"); | 170 | _debug("kAFS: KEY: %s, VAL:%s", key, value ?: "-"); |
175 | 171 | ||
176 | if (strcmp(key, "rwpath") == 0) { | 172 | if (strcmp(key, "rwpath") == 0) { |
177 | if (!want_no_value(&value, "rwpath")) | 173 | if (!want_no_value(&value, "rwpath")) |
178 | return -EINVAL; | 174 | return -EINVAL; |
179 | params->rwpath = 1; | 175 | params->rwpath = 1; |
180 | continue; | ||
181 | } else if (strcmp(key, "vol") == 0) { | 176 | } else if (strcmp(key, "vol") == 0) { |
182 | if (!want_arg(&value, "vol")) | 177 | if (!want_arg(&value, "vol")) |
183 | return -EINVAL; | 178 | return -EINVAL; |
184 | *devname = value; | 179 | *devname = value; |
185 | continue; | ||
186 | } else if (strcmp(key, "cell") == 0) { | 180 | } else if (strcmp(key, "cell") == 0) { |
187 | if (!want_arg(&value, "cell")) | 181 | if (!want_arg(&value, "cell")) |
188 | return -EINVAL; | 182 | return -EINVAL; |
183 | cell = afs_cell_lookup(value, strlen(value)); | ||
184 | if (IS_ERR(cell)) | ||
185 | return PTR_ERR(cell); | ||
189 | afs_put_cell(params->default_cell); | 186 | afs_put_cell(params->default_cell); |
190 | ret = afs_cell_lookup(value, | 187 | params->default_cell = cell; |
191 | strlen(value), | 188 | } else { |
192 | ¶ms->default_cell); | 189 | printk("kAFS: Unknown mount option: '%s'\n", key); |
193 | if (ret < 0) | 190 | ret = -EINVAL; |
194 | return -EINVAL; | 191 | goto error; |
195 | continue; | ||
196 | } | 192 | } |
197 | |||
198 | printk("kAFS: Unknown mount option: '%s'\n", key); | ||
199 | ret = -EINVAL; | ||
200 | goto error; | ||
201 | } | 193 | } |
202 | 194 | ||
203 | ret = 0; | 195 | ret = 0; |
204 | |||
205 | error: | 196 | error: |
206 | _leave(" = %d", ret); | 197 | _leave(" = %d", ret); |
207 | return ret; | 198 | return ret; |
@@ -230,7 +221,7 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent) | |||
230 | struct inode *inode = NULL; | 221 | struct inode *inode = NULL; |
231 | int ret; | 222 | int ret; |
232 | 223 | ||
233 | kenter(""); | 224 | _enter(""); |
234 | 225 | ||
235 | /* allocate a superblock info record */ | 226 | /* allocate a superblock info record */ |
236 | as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); | 227 | as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); |
@@ -253,9 +244,9 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent) | |||
253 | fid.vid = as->volume->vid; | 244 | fid.vid = as->volume->vid; |
254 | fid.vnode = 1; | 245 | fid.vnode = 1; |
255 | fid.unique = 1; | 246 | fid.unique = 1; |
256 | ret = afs_iget(sb, &fid, &inode); | 247 | inode = afs_iget(sb, &fid); |
257 | if (ret < 0) | 248 | if (IS_ERR(inode)) |
258 | goto error; | 249 | goto error_inode; |
259 | 250 | ||
260 | ret = -ENOMEM; | 251 | ret = -ENOMEM; |
261 | root = d_alloc_root(inode); | 252 | root = d_alloc_root(inode); |
@@ -264,9 +255,12 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent) | |||
264 | 255 | ||
265 | sb->s_root = root; | 256 | sb->s_root = root; |
266 | 257 | ||
267 | kleave(" = 0"); | 258 | _leave(" = 0"); |
268 | return 0; | 259 | return 0; |
269 | 260 | ||
261 | error_inode: | ||
262 | ret = PTR_ERR(inode); | ||
263 | inode = NULL; | ||
270 | error: | 264 | error: |
271 | iput(inode); | 265 | iput(inode); |
272 | afs_put_volume(as->volume); | 266 | afs_put_volume(as->volume); |
@@ -274,7 +268,7 @@ error: | |||
274 | 268 | ||
275 | sb->s_fs_info = NULL; | 269 | sb->s_fs_info = NULL; |
276 | 270 | ||
277 | kleave(" = %d", ret); | 271 | _leave(" = %d", ret); |
278 | return ret; | 272 | return ret; |
279 | } | 273 | } |
280 | 274 | ||
@@ -290,19 +284,13 @@ static int afs_get_sb(struct file_system_type *fs_type, | |||
290 | { | 284 | { |
291 | struct afs_mount_params params; | 285 | struct afs_mount_params params; |
292 | struct super_block *sb; | 286 | struct super_block *sb; |
287 | struct afs_volume *vol; | ||
293 | int ret; | 288 | int ret; |
294 | 289 | ||
295 | _enter(",,%s,%p", dev_name, options); | 290 | _enter(",,%s,%p", dev_name, options); |
296 | 291 | ||
297 | memset(¶ms, 0, sizeof(params)); | 292 | memset(¶ms, 0, sizeof(params)); |
298 | 293 | ||
299 | /* start the cache manager */ | ||
300 | ret = afscm_start(); | ||
301 | if (ret < 0) { | ||
302 | _leave(" = %d", ret); | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | /* parse the options */ | 294 | /* parse the options */ |
307 | if (options) { | 295 | if (options) { |
308 | ret = afs_super_parse_options(¶ms, options, &dev_name); | 296 | ret = afs_super_parse_options(¶ms, options, &dev_name); |
@@ -316,17 +304,20 @@ static int afs_get_sb(struct file_system_type *fs_type, | |||
316 | } | 304 | } |
317 | 305 | ||
318 | /* parse the device name */ | 306 | /* parse the device name */ |
319 | ret = afs_volume_lookup(dev_name, | 307 | vol = afs_volume_lookup(dev_name, params.default_cell, params.rwpath); |
320 | params.default_cell, | 308 | if (IS_ERR(vol)) { |
321 | params.rwpath, | 309 | ret = PTR_ERR(vol); |
322 | ¶ms.volume); | ||
323 | if (ret < 0) | ||
324 | goto error; | 310 | goto error; |
311 | } | ||
312 | |||
313 | params.volume = vol; | ||
325 | 314 | ||
326 | /* allocate a deviceless superblock */ | 315 | /* allocate a deviceless superblock */ |
327 | sb = sget(fs_type, afs_test_super, set_anon_super, ¶ms); | 316 | sb = sget(fs_type, afs_test_super, set_anon_super, ¶ms); |
328 | if (IS_ERR(sb)) | 317 | if (IS_ERR(sb)) { |
318 | ret = PTR_ERR(sb); | ||
329 | goto error; | 319 | goto error; |
320 | } | ||
330 | 321 | ||
331 | sb->s_flags = flags; | 322 | sb->s_flags = flags; |
332 | 323 | ||
@@ -341,13 +332,12 @@ static int afs_get_sb(struct file_system_type *fs_type, | |||
341 | 332 | ||
342 | afs_put_volume(params.volume); | 333 | afs_put_volume(params.volume); |
343 | afs_put_cell(params.default_cell); | 334 | afs_put_cell(params.default_cell); |
344 | _leave(" = 0 [%p]", 0, sb); | 335 | _leave(" = 0 [%p]", sb); |
345 | return 0; | 336 | return 0; |
346 | 337 | ||
347 | error: | 338 | error: |
348 | afs_put_volume(params.volume); | 339 | afs_put_volume(params.volume); |
349 | afs_put_cell(params.default_cell); | 340 | afs_put_cell(params.default_cell); |
350 | afscm_stop(); | ||
351 | _leave(" = %d", ret); | 341 | _leave(" = %d", ret); |
352 | return ret; | 342 | return ret; |
353 | } | 343 | } |
@@ -362,7 +352,6 @@ static void afs_put_super(struct super_block *sb) | |||
362 | _enter(""); | 352 | _enter(""); |
363 | 353 | ||
364 | afs_put_volume(as->volume); | 354 | afs_put_volume(as->volume); |
365 | afscm_stop(); | ||
366 | 355 | ||
367 | _leave(""); | 356 | _leave(""); |
368 | } | 357 | } |
@@ -381,10 +370,8 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, | |||
381 | inode_init_once(&vnode->vfs_inode); | 370 | inode_init_once(&vnode->vfs_inode); |
382 | init_waitqueue_head(&vnode->update_waitq); | 371 | init_waitqueue_head(&vnode->update_waitq); |
383 | spin_lock_init(&vnode->lock); | 372 | spin_lock_init(&vnode->lock); |
384 | INIT_LIST_HEAD(&vnode->cb_link); | 373 | INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); |
385 | INIT_LIST_HEAD(&vnode->cb_hash_link); | 374 | mutex_init(&vnode->cb_broken_lock); |
386 | afs_timer_init(&vnode->cb_timeout, | ||
387 | &afs_vnode_cb_timed_out_ops); | ||
388 | } | 375 | } |
389 | } | 376 | } |
390 | 377 | ||
@@ -407,6 +394,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb) | |||
407 | vnode->volume = NULL; | 394 | vnode->volume = NULL; |
408 | vnode->update_cnt = 0; | 395 | vnode->update_cnt = 0; |
409 | vnode->flags = 0; | 396 | vnode->flags = 0; |
397 | vnode->cb_promised = false; | ||
410 | 398 | ||
411 | return &vnode->vfs_inode; | 399 | return &vnode->vfs_inode; |
412 | } | 400 | } |
@@ -416,8 +404,14 @@ static struct inode *afs_alloc_inode(struct super_block *sb) | |||
416 | */ | 404 | */ |
417 | static void afs_destroy_inode(struct inode *inode) | 405 | static void afs_destroy_inode(struct inode *inode) |
418 | { | 406 | { |
407 | struct afs_vnode *vnode = AFS_FS_I(inode); | ||
408 | |||
419 | _enter("{%lu}", inode->i_ino); | 409 | _enter("{%lu}", inode->i_ino); |
420 | 410 | ||
421 | kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode)); | 411 | _debug("DESTROY INODE %p", inode); |
412 | |||
413 | ASSERTCMP(vnode->server, ==, NULL); | ||
414 | |||
415 | kmem_cache_free(afs_inode_cachep, vnode); | ||
422 | atomic_dec(&afs_count_active_inodes); | 416 | atomic_dec(&afs_count_active_inodes); |
423 | } | 417 | } |
diff --git a/fs/afs/super.h b/fs/afs/super.h deleted file mode 100644 index c95b48edfc7..00000000000 --- a/fs/afs/super.h +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | /* AFS filesystem internal private data | ||
2 | * | ||
3 | * Copyright (c) 2002 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software may be freely redistributed under the terms of the | ||
6 | * GNU General Public License. | ||
7 | * | ||
8 | * You should have received a copy of the GNU General Public License | ||
9 | * along with this program; if not, write to the Free Software | ||
10 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
11 | * | ||
12 | * Authors: David Woodhouse <dwmw2@cambridge.redhat.com> | ||
13 | * David Howells <dhowells@redhat.com> | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef AFS_SUPER_H | ||
18 | #define AFS_SUPER_H | ||
19 | |||
20 | #include <linux/fs.h> | ||
21 | #include "server.h" | ||
22 | |||
23 | /* | ||
24 | * AFS superblock private data | ||
25 | * - there's one superblock per volume | ||
26 | */ | ||
27 | struct afs_super_info { | ||
28 | struct afs_volume *volume; /* volume record */ | ||
29 | char rwparent; /* T if parent is R/W AFS volume */ | ||
30 | }; | ||
31 | |||
32 | static inline struct afs_super_info *AFS_FS_S(struct super_block *sb) | ||
33 | { | ||
34 | return sb->s_fs_info; | ||
35 | } | ||
36 | |||
37 | extern struct file_system_type afs_fs_type; | ||
38 | |||
39 | #endif /* AFS_SUPER_H */ | ||
diff --git a/fs/afs/transport.h b/fs/afs/transport.h deleted file mode 100644 index f56be4b7b1d..00000000000 --- a/fs/afs/transport.h +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | /* AFS transport management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_TRANSPORT_H | ||
13 | #define AFS_TRANSPORT_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include <rxrpc/transport.h> | ||
17 | |||
18 | /* the cache manager transport endpoint */ | ||
19 | extern struct rxrpc_transport *afs_transport; | ||
20 | |||
21 | #endif /* AFS_TRANSPORT_H */ | ||
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index dac9faa70ff..0c7eba17483 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c | |||
@@ -11,243 +11,76 @@ | |||
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <rxrpc/rxrpc.h> | ||
15 | #include <rxrpc/transport.h> | ||
16 | #include <rxrpc/connection.h> | ||
17 | #include <rxrpc/call.h> | ||
18 | #include "server.h" | ||
19 | #include "volume.h" | ||
20 | #include "vlclient.h" | ||
21 | #include "kafsasyncd.h" | ||
22 | #include "kafstimod.h" | ||
23 | #include "errors.h" | ||
24 | #include "internal.h" | 14 | #include "internal.h" |
25 | 15 | ||
26 | #define VLGETENTRYBYID 503 /* AFS Get Cache Entry By ID operation ID */ | ||
27 | #define VLGETENTRYBYNAME 504 /* AFS Get Cache Entry By Name operation ID */ | ||
28 | #define VLPROBE 514 /* AFS Probe Volume Location Service operation ID */ | ||
29 | |||
30 | static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call); | ||
31 | static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call); | ||
32 | |||
33 | /* | 16 | /* |
34 | * map afs VL abort codes to/from Linux error codes | 17 | * map volume locator abort codes to error codes |
35 | * - called with call->lock held | ||
36 | */ | 18 | */ |
37 | static void afs_rxvl_aemap(struct rxrpc_call *call) | 19 | static int afs_vl_abort_to_error(u32 abort_code) |
38 | { | 20 | { |
39 | int err; | 21 | _enter("%u", abort_code); |
40 | 22 | ||
41 | _enter("{%u,%u,%d}", | 23 | switch (abort_code) { |
42 | call->app_err_state, call->app_abort_code, call->app_errno); | 24 | case AFSVL_IDEXIST: return -EEXIST; |
43 | 25 | case AFSVL_IO: return -EREMOTEIO; | |
44 | switch (call->app_err_state) { | 26 | case AFSVL_NAMEEXIST: return -EEXIST; |
45 | case RXRPC_ESTATE_LOCAL_ABORT: | 27 | case AFSVL_CREATEFAIL: return -EREMOTEIO; |
46 | call->app_abort_code = -call->app_errno; | 28 | case AFSVL_NOENT: return -ENOMEDIUM; |
47 | return; | 29 | case AFSVL_EMPTY: return -ENOMEDIUM; |
48 | 30 | case AFSVL_ENTDELETED: return -ENOMEDIUM; | |
49 | case RXRPC_ESTATE_PEER_ABORT: | 31 | case AFSVL_BADNAME: return -EINVAL; |
50 | switch (call->app_abort_code) { | 32 | case AFSVL_BADINDEX: return -EINVAL; |
51 | case AFSVL_IDEXIST: err = -EEXIST; break; | 33 | case AFSVL_BADVOLTYPE: return -EINVAL; |
52 | case AFSVL_IO: err = -EREMOTEIO; break; | 34 | case AFSVL_BADSERVER: return -EINVAL; |
53 | case AFSVL_NAMEEXIST: err = -EEXIST; break; | 35 | case AFSVL_BADPARTITION: return -EINVAL; |
54 | case AFSVL_CREATEFAIL: err = -EREMOTEIO; break; | 36 | case AFSVL_REPSFULL: return -EFBIG; |
55 | case AFSVL_NOENT: err = -ENOMEDIUM; break; | 37 | case AFSVL_NOREPSERVER: return -ENOENT; |
56 | case AFSVL_EMPTY: err = -ENOMEDIUM; break; | 38 | case AFSVL_DUPREPSERVER: return -EEXIST; |
57 | case AFSVL_ENTDELETED: err = -ENOMEDIUM; break; | 39 | case AFSVL_RWNOTFOUND: return -ENOENT; |
58 | case AFSVL_BADNAME: err = -EINVAL; break; | 40 | case AFSVL_BADREFCOUNT: return -EINVAL; |
59 | case AFSVL_BADINDEX: err = -EINVAL; break; | 41 | case AFSVL_SIZEEXCEEDED: return -EINVAL; |
60 | case AFSVL_BADVOLTYPE: err = -EINVAL; break; | 42 | case AFSVL_BADENTRY: return -EINVAL; |
61 | case AFSVL_BADSERVER: err = -EINVAL; break; | 43 | case AFSVL_BADVOLIDBUMP: return -EINVAL; |
62 | case AFSVL_BADPARTITION: err = -EINVAL; break; | 44 | case AFSVL_IDALREADYHASHED: return -EINVAL; |
63 | case AFSVL_REPSFULL: err = -EFBIG; break; | 45 | case AFSVL_ENTRYLOCKED: return -EBUSY; |
64 | case AFSVL_NOREPSERVER: err = -ENOENT; break; | 46 | case AFSVL_BADVOLOPER: return -EBADRQC; |
65 | case AFSVL_DUPREPSERVER: err = -EEXIST; break; | 47 | case AFSVL_BADRELLOCKTYPE: return -EINVAL; |
66 | case AFSVL_RWNOTFOUND: err = -ENOENT; break; | 48 | case AFSVL_RERELEASE: return -EREMOTEIO; |
67 | case AFSVL_BADREFCOUNT: err = -EINVAL; break; | 49 | case AFSVL_BADSERVERFLAG: return -EINVAL; |
68 | case AFSVL_SIZEEXCEEDED: err = -EINVAL; break; | 50 | case AFSVL_PERM: return -EACCES; |
69 | case AFSVL_BADENTRY: err = -EINVAL; break; | 51 | case AFSVL_NOMEM: return -EREMOTEIO; |
70 | case AFSVL_BADVOLIDBUMP: err = -EINVAL; break; | ||
71 | case AFSVL_IDALREADYHASHED: err = -EINVAL; break; | ||
72 | case AFSVL_ENTRYLOCKED: err = -EBUSY; break; | ||
73 | case AFSVL_BADVOLOPER: err = -EBADRQC; break; | ||
74 | case AFSVL_BADRELLOCKTYPE: err = -EINVAL; break; | ||
75 | case AFSVL_RERELEASE: err = -EREMOTEIO; break; | ||
76 | case AFSVL_BADSERVERFLAG: err = -EINVAL; break; | ||
77 | case AFSVL_PERM: err = -EACCES; break; | ||
78 | case AFSVL_NOMEM: err = -EREMOTEIO; break; | ||
79 | default: | ||
80 | err = afs_abort_to_error(call->app_abort_code); | ||
81 | break; | ||
82 | } | ||
83 | call->app_errno = err; | ||
84 | return; | ||
85 | |||
86 | default: | 52 | default: |
87 | return; | 53 | return afs_abort_to_error(abort_code); |
88 | } | 54 | } |
89 | } | 55 | } |
90 | 56 | ||
91 | #if 0 | ||
92 | /* | 57 | /* |
93 | * probe a volume location server to see if it is still alive -- unused | 58 | * deliver reply data to a VL.GetEntryByXXX call |
94 | */ | 59 | */ |
95 | static int afs_rxvl_probe(struct afs_server *server, int alloc_flags) | 60 | static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call, |
61 | struct sk_buff *skb, bool last) | ||
96 | { | 62 | { |
97 | struct rxrpc_connection *conn; | 63 | struct afs_cache_vlocation *entry; |
98 | struct rxrpc_call *call; | 64 | __be32 *bp; |
99 | struct kvec piov[1]; | 65 | u32 tmp; |
100 | size_t sent; | 66 | int loop; |
101 | int ret; | ||
102 | __be32 param[1]; | ||
103 | |||
104 | DECLARE_WAITQUEUE(myself, current); | ||
105 | |||
106 | /* get hold of the vlserver connection */ | ||
107 | ret = afs_server_get_vlconn(server, &conn); | ||
108 | if (ret < 0) | ||
109 | goto out; | ||
110 | |||
111 | /* create a call through that connection */ | ||
112 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
113 | if (ret < 0) { | ||
114 | printk("kAFS: Unable to create call: %d\n", ret); | ||
115 | goto out_put_conn; | ||
116 | } | ||
117 | call->app_opcode = VLPROBE; | ||
118 | |||
119 | /* we want to get event notifications from the call */ | ||
120 | add_wait_queue(&call->waitq, &myself); | ||
121 | |||
122 | /* marshall the parameters */ | ||
123 | param[0] = htonl(VLPROBE); | ||
124 | piov[0].iov_len = sizeof(param); | ||
125 | piov[0].iov_base = param; | ||
126 | |||
127 | /* send the parameters to the server */ | ||
128 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, | ||
129 | alloc_flags, 0, &sent); | ||
130 | if (ret < 0) | ||
131 | goto abort; | ||
132 | |||
133 | /* wait for the reply to completely arrive */ | ||
134 | for (;;) { | ||
135 | set_current_state(TASK_INTERRUPTIBLE); | ||
136 | if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY || | ||
137 | signal_pending(current)) | ||
138 | break; | ||
139 | schedule(); | ||
140 | } | ||
141 | set_current_state(TASK_RUNNING); | ||
142 | |||
143 | ret = -EINTR; | ||
144 | if (signal_pending(current)) | ||
145 | goto abort; | ||
146 | |||
147 | switch (call->app_call_state) { | ||
148 | case RXRPC_CSTATE_ERROR: | ||
149 | ret = call->app_errno; | ||
150 | goto out_unwait; | ||
151 | |||
152 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
153 | ret = 0; | ||
154 | goto out_unwait; | ||
155 | |||
156 | default: | ||
157 | BUG(); | ||
158 | } | ||
159 | 67 | ||
160 | abort: | 68 | _enter(",,%u", last); |
161 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
162 | rxrpc_call_abort(call, ret); | ||
163 | schedule(); | ||
164 | |||
165 | out_unwait: | ||
166 | set_current_state(TASK_RUNNING); | ||
167 | remove_wait_queue(&call->waitq, &myself); | ||
168 | rxrpc_put_call(call); | ||
169 | out_put_conn: | ||
170 | rxrpc_put_connection(conn); | ||
171 | out: | ||
172 | return ret; | ||
173 | } | ||
174 | #endif | ||
175 | 69 | ||
176 | /* | 70 | afs_transfer_reply(call, skb); |
177 | * look up a volume location database entry by name | 71 | if (!last) |
178 | */ | 72 | return 0; |
179 | int afs_rxvl_get_entry_by_name(struct afs_server *server, | ||
180 | const char *volname, | ||
181 | unsigned volnamesz, | ||
182 | struct afs_cache_vlocation *entry) | ||
183 | { | ||
184 | DECLARE_WAITQUEUE(myself, current); | ||
185 | |||
186 | struct rxrpc_connection *conn; | ||
187 | struct rxrpc_call *call; | ||
188 | struct kvec piov[3]; | ||
189 | unsigned tmp; | ||
190 | size_t sent; | ||
191 | int ret, loop; | ||
192 | __be32 *bp, param[2], zero; | ||
193 | |||
194 | _enter(",%*.*s,%u,", volnamesz, volnamesz, volname, volnamesz); | ||
195 | |||
196 | memset(entry, 0, sizeof(*entry)); | ||
197 | |||
198 | /* get hold of the vlserver connection */ | ||
199 | ret = afs_server_get_vlconn(server, &conn); | ||
200 | if (ret < 0) | ||
201 | goto out; | ||
202 | |||
203 | /* create a call through that connection */ | ||
204 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
205 | if (ret < 0) { | ||
206 | printk("kAFS: Unable to create call: %d\n", ret); | ||
207 | goto out_put_conn; | ||
208 | } | ||
209 | call->app_opcode = VLGETENTRYBYNAME; | ||
210 | 73 | ||
211 | /* we want to get event notifications from the call */ | 74 | if (call->reply_size != call->reply_max) |
212 | add_wait_queue(&call->waitq, &myself); | 75 | return -EBADMSG; |
213 | 76 | ||
214 | /* marshall the parameters */ | 77 | /* unmarshall the reply once we've received all of it */ |
215 | piov[1].iov_len = volnamesz; | 78 | entry = call->reply; |
216 | piov[1].iov_base = (char *) volname; | 79 | bp = call->buffer; |
217 | |||
218 | zero = 0; | ||
219 | piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; | ||
220 | piov[2].iov_base = &zero; | ||
221 | |||
222 | param[0] = htonl(VLGETENTRYBYNAME); | ||
223 | param[1] = htonl(piov[1].iov_len); | ||
224 | |||
225 | piov[0].iov_len = sizeof(param); | ||
226 | piov[0].iov_base = param; | ||
227 | |||
228 | /* send the parameters to the server */ | ||
229 | ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
230 | 0, &sent); | ||
231 | if (ret < 0) | ||
232 | goto abort; | ||
233 | |||
234 | /* wait for the reply to completely arrive */ | ||
235 | bp = rxrpc_call_alloc_scratch(call, 384); | ||
236 | |||
237 | ret = rxrpc_call_read_data(call, bp, 384, | ||
238 | RXRPC_CALL_READ_BLOCK | | ||
239 | RXRPC_CALL_READ_ALL); | ||
240 | if (ret < 0) { | ||
241 | if (ret == -ECONNABORTED) { | ||
242 | ret = call->app_errno; | ||
243 | goto out_unwait; | ||
244 | } | ||
245 | goto abort; | ||
246 | } | ||
247 | 80 | ||
248 | /* unmarshall the reply */ | ||
249 | for (loop = 0; loop < 64; loop++) | 81 | for (loop = 0; loop < 64; loop++) |
250 | entry->name[loop] = ntohl(*bp++); | 82 | entry->name[loop] = ntohl(*bp++); |
83 | entry->name[loop] = 0; | ||
251 | bp++; /* final NUL */ | 84 | bp++; /* final NUL */ |
252 | 85 | ||
253 | bp++; /* type */ | 86 | bp++; /* type */ |
@@ -260,6 +93,7 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server, | |||
260 | 93 | ||
261 | for (loop = 0; loop < 8; loop++) { | 94 | for (loop = 0; loop < 8; loop++) { |
262 | tmp = ntohl(*bp++); | 95 | tmp = ntohl(*bp++); |
96 | entry->srvtmask[loop] = 0; | ||
263 | if (tmp & AFS_VLSF_RWVOL) | 97 | if (tmp & AFS_VLSF_RWVOL) |
264 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | 98 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; |
265 | if (tmp & AFS_VLSF_ROVOL) | 99 | if (tmp & AFS_VLSF_ROVOL) |
@@ -275,409 +109,104 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server, | |||
275 | bp++; /* clone ID */ | 109 | bp++; /* clone ID */ |
276 | 110 | ||
277 | tmp = ntohl(*bp++); /* flags */ | 111 | tmp = ntohl(*bp++); /* flags */ |
112 | entry->vidmask = 0; | ||
278 | if (tmp & AFS_VLF_RWEXISTS) | 113 | if (tmp & AFS_VLF_RWEXISTS) |
279 | entry->vidmask |= AFS_VOL_VTM_RW; | 114 | entry->vidmask |= AFS_VOL_VTM_RW; |
280 | if (tmp & AFS_VLF_ROEXISTS) | 115 | if (tmp & AFS_VLF_ROEXISTS) |
281 | entry->vidmask |= AFS_VOL_VTM_RO; | 116 | entry->vidmask |= AFS_VOL_VTM_RO; |
282 | if (tmp & AFS_VLF_BACKEXISTS) | 117 | if (tmp & AFS_VLF_BACKEXISTS) |
283 | entry->vidmask |= AFS_VOL_VTM_BAK; | 118 | entry->vidmask |= AFS_VOL_VTM_BAK; |
284 | |||
285 | ret = -ENOMEDIUM; | ||
286 | if (!entry->vidmask) | 119 | if (!entry->vidmask) |
287 | goto abort; | 120 | return -EBADMSG; |
288 | 121 | ||
289 | /* success */ | 122 | _leave(" = 0 [done]"); |
290 | entry->rtime = get_seconds(); | 123 | return 0; |
291 | ret = 0; | ||
292 | |||
293 | out_unwait: | ||
294 | set_current_state(TASK_RUNNING); | ||
295 | remove_wait_queue(&call->waitq, &myself); | ||
296 | rxrpc_put_call(call); | ||
297 | out_put_conn: | ||
298 | rxrpc_put_connection(conn); | ||
299 | out: | ||
300 | _leave(" = %d", ret); | ||
301 | return ret; | ||
302 | |||
303 | abort: | ||
304 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
305 | rxrpc_call_abort(call, ret); | ||
306 | schedule(); | ||
307 | goto out_unwait; | ||
308 | } | 124 | } |
309 | 125 | ||
310 | /* | 126 | /* |
311 | * look up a volume location database entry by ID | 127 | * VL.GetEntryByName operation type |
312 | */ | 128 | */ |
313 | int afs_rxvl_get_entry_by_id(struct afs_server *server, | 129 | static const struct afs_call_type afs_RXVLGetEntryByName = { |
314 | afs_volid_t volid, | 130 | .deliver = afs_deliver_vl_get_entry_by_xxx, |
315 | afs_voltype_t voltype, | 131 | .abort_to_error = afs_vl_abort_to_error, |
316 | struct afs_cache_vlocation *entry) | 132 | .destructor = afs_flat_call_destructor, |
317 | { | 133 | }; |
318 | DECLARE_WAITQUEUE(myself, current); | ||
319 | |||
320 | struct rxrpc_connection *conn; | ||
321 | struct rxrpc_call *call; | ||
322 | struct kvec piov[1]; | ||
323 | unsigned tmp; | ||
324 | size_t sent; | ||
325 | int ret, loop; | ||
326 | __be32 *bp, param[3]; | ||
327 | |||
328 | _enter(",%x,%d,", volid, voltype); | ||
329 | |||
330 | memset(entry, 0, sizeof(*entry)); | ||
331 | |||
332 | /* get hold of the vlserver connection */ | ||
333 | ret = afs_server_get_vlconn(server, &conn); | ||
334 | if (ret < 0) | ||
335 | goto out; | ||
336 | |||
337 | /* create a call through that connection */ | ||
338 | ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call); | ||
339 | if (ret < 0) { | ||
340 | printk("kAFS: Unable to create call: %d\n", ret); | ||
341 | goto out_put_conn; | ||
342 | } | ||
343 | call->app_opcode = VLGETENTRYBYID; | ||
344 | |||
345 | /* we want to get event notifications from the call */ | ||
346 | add_wait_queue(&call->waitq, &myself); | ||
347 | |||
348 | /* marshall the parameters */ | ||
349 | param[0] = htonl(VLGETENTRYBYID); | ||
350 | param[1] = htonl(volid); | ||
351 | param[2] = htonl(voltype); | ||
352 | |||
353 | piov[0].iov_len = sizeof(param); | ||
354 | piov[0].iov_base = param; | ||
355 | |||
356 | /* send the parameters to the server */ | ||
357 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
358 | 0, &sent); | ||
359 | if (ret < 0) | ||
360 | goto abort; | ||
361 | |||
362 | /* wait for the reply to completely arrive */ | ||
363 | bp = rxrpc_call_alloc_scratch(call, 384); | ||
364 | |||
365 | ret = rxrpc_call_read_data(call, bp, 384, | ||
366 | RXRPC_CALL_READ_BLOCK | | ||
367 | RXRPC_CALL_READ_ALL); | ||
368 | if (ret < 0) { | ||
369 | if (ret == -ECONNABORTED) { | ||
370 | ret = call->app_errno; | ||
371 | goto out_unwait; | ||
372 | } | ||
373 | goto abort; | ||
374 | } | ||
375 | |||
376 | /* unmarshall the reply */ | ||
377 | for (loop = 0; loop < 64; loop++) | ||
378 | entry->name[loop] = ntohl(*bp++); | ||
379 | bp++; /* final NUL */ | ||
380 | |||
381 | bp++; /* type */ | ||
382 | entry->nservers = ntohl(*bp++); | ||
383 | |||
384 | for (loop = 0; loop < 8; loop++) | ||
385 | entry->servers[loop].s_addr = *bp++; | ||
386 | |||
387 | bp += 8; /* partition IDs */ | ||
388 | |||
389 | for (loop = 0; loop < 8; loop++) { | ||
390 | tmp = ntohl(*bp++); | ||
391 | if (tmp & AFS_VLSF_RWVOL) | ||
392 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | ||
393 | if (tmp & AFS_VLSF_ROVOL) | ||
394 | entry->srvtmask[loop] |= AFS_VOL_VTM_RO; | ||
395 | if (tmp & AFS_VLSF_BACKVOL) | ||
396 | entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; | ||
397 | } | ||
398 | |||
399 | entry->vid[0] = ntohl(*bp++); | ||
400 | entry->vid[1] = ntohl(*bp++); | ||
401 | entry->vid[2] = ntohl(*bp++); | ||
402 | |||
403 | bp++; /* clone ID */ | ||
404 | |||
405 | tmp = ntohl(*bp++); /* flags */ | ||
406 | if (tmp & AFS_VLF_RWEXISTS) | ||
407 | entry->vidmask |= AFS_VOL_VTM_RW; | ||
408 | if (tmp & AFS_VLF_ROEXISTS) | ||
409 | entry->vidmask |= AFS_VOL_VTM_RO; | ||
410 | if (tmp & AFS_VLF_BACKEXISTS) | ||
411 | entry->vidmask |= AFS_VOL_VTM_BAK; | ||
412 | 134 | ||
413 | ret = -ENOMEDIUM; | 135 | /* |
414 | if (!entry->vidmask) | 136 | * VL.GetEntryById operation type |
415 | goto abort; | 137 | */ |
416 | 138 | static const struct afs_call_type afs_RXVLGetEntryById = { | |
417 | #if 0 /* TODO: remove */ | 139 | .deliver = afs_deliver_vl_get_entry_by_xxx, |
418 | entry->nservers = 3; | 140 | .abort_to_error = afs_vl_abort_to_error, |
419 | entry->servers[0].s_addr = htonl(0xac101249); | 141 | .destructor = afs_flat_call_destructor, |
420 | entry->servers[1].s_addr = htonl(0xac101243); | 142 | }; |
421 | entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/); | ||
422 | |||
423 | entry->srvtmask[0] = AFS_VOL_VTM_RO; | ||
424 | entry->srvtmask[1] = AFS_VOL_VTM_RO; | ||
425 | entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW; | ||
426 | #endif | ||
427 | |||
428 | /* success */ | ||
429 | entry->rtime = get_seconds(); | ||
430 | ret = 0; | ||
431 | |||
432 | out_unwait: | ||
433 | set_current_state(TASK_RUNNING); | ||
434 | remove_wait_queue(&call->waitq, &myself); | ||
435 | rxrpc_put_call(call); | ||
436 | out_put_conn: | ||
437 | rxrpc_put_connection(conn); | ||
438 | out: | ||
439 | _leave(" = %d", ret); | ||
440 | return ret; | ||
441 | |||
442 | abort: | ||
443 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
444 | rxrpc_call_abort(call, ret); | ||
445 | schedule(); | ||
446 | goto out_unwait; | ||
447 | } | ||
448 | 143 | ||
449 | /* | 144 | /* |
450 | * look up a volume location database entry by ID asynchronously | 145 | * dispatch a get volume entry by name operation |
451 | */ | 146 | */ |
452 | int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op, | 147 | int afs_vl_get_entry_by_name(struct in_addr *addr, |
453 | afs_volid_t volid, | 148 | const char *volname, |
454 | afs_voltype_t voltype) | 149 | struct afs_cache_vlocation *entry, |
150 | const struct afs_wait_mode *wait_mode) | ||
455 | { | 151 | { |
456 | struct rxrpc_connection *conn; | 152 | struct afs_call *call; |
457 | struct rxrpc_call *call; | 153 | size_t volnamesz, reqsz, padsz; |
458 | struct kvec piov[1]; | 154 | __be32 *bp; |
459 | size_t sent; | ||
460 | int ret; | ||
461 | __be32 param[3]; | ||
462 | |||
463 | _enter(",%x,%d,", volid, voltype); | ||
464 | |||
465 | /* get hold of the vlserver connection */ | ||
466 | ret = afs_server_get_vlconn(op->server, &conn); | ||
467 | if (ret < 0) { | ||
468 | _leave(" = %d", ret); | ||
469 | return ret; | ||
470 | } | ||
471 | 155 | ||
472 | /* create a call through that connection */ | 156 | _enter(""); |
473 | ret = rxrpc_create_call(conn, | ||
474 | afs_rxvl_get_entry_by_id_attn, | ||
475 | afs_rxvl_get_entry_by_id_error, | ||
476 | afs_rxvl_aemap, | ||
477 | &op->call); | ||
478 | rxrpc_put_connection(conn); | ||
479 | |||
480 | if (ret < 0) { | ||
481 | printk("kAFS: Unable to create call: %d\n", ret); | ||
482 | _leave(" = %d", ret); | ||
483 | return ret; | ||
484 | } | ||
485 | 157 | ||
486 | op->call->app_opcode = VLGETENTRYBYID; | 158 | volnamesz = strlen(volname); |
487 | op->call->app_user = op; | 159 | padsz = (4 - (volnamesz & 3)) & 3; |
160 | reqsz = 8 + volnamesz + padsz; | ||
488 | 161 | ||
489 | call = op->call; | 162 | call = afs_alloc_flat_call(&afs_RXVLGetEntryByName, reqsz, 384); |
490 | rxrpc_get_call(call); | 163 | if (!call) |
164 | return -ENOMEM; | ||
491 | 165 | ||
492 | /* send event notifications from the call to kafsasyncd */ | 166 | call->reply = entry; |
493 | afs_kafsasyncd_begin_op(op); | 167 | call->service_id = VL_SERVICE; |
168 | call->port = htons(AFS_VL_PORT); | ||
494 | 169 | ||
495 | /* marshall the parameters */ | 170 | /* marshall the parameters */ |
496 | param[0] = htonl(VLGETENTRYBYID); | 171 | bp = call->request; |
497 | param[1] = htonl(volid); | 172 | *bp++ = htonl(VLGETENTRYBYNAME); |
498 | param[2] = htonl(voltype); | 173 | *bp++ = htonl(volnamesz); |
499 | 174 | memcpy(bp, volname, volnamesz); | |
500 | piov[0].iov_len = sizeof(param); | 175 | if (padsz > 0) |
501 | piov[0].iov_base = param; | 176 | memset((void *) bp + volnamesz, 0, padsz); |
502 | 177 | ||
503 | /* allocate result read buffer in scratch space */ | 178 | /* initiate the call */ |
504 | call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call, 384); | 179 | return afs_make_call(addr, call, GFP_KERNEL, wait_mode); |
505 | |||
506 | /* send the parameters to the server */ | ||
507 | ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, | ||
508 | 0, &sent); | ||
509 | if (ret < 0) { | ||
510 | rxrpc_call_abort(call, ret); /* handle from kafsasyncd */ | ||
511 | ret = 0; | ||
512 | goto out; | ||
513 | } | ||
514 | |||
515 | /* wait for the reply to completely arrive */ | ||
516 | ret = rxrpc_call_read_data(call, call->app_scr_ptr, 384, 0); | ||
517 | switch (ret) { | ||
518 | case 0: | ||
519 | case -EAGAIN: | ||
520 | case -ECONNABORTED: | ||
521 | ret = 0; | ||
522 | break; /* all handled by kafsasyncd */ | ||
523 | |||
524 | default: | ||
525 | rxrpc_call_abort(call, ret); /* make kafsasyncd handle it */ | ||
526 | ret = 0; | ||
527 | break; | ||
528 | } | ||
529 | |||
530 | out: | ||
531 | rxrpc_put_call(call); | ||
532 | _leave(" = %d", ret); | ||
533 | return ret; | ||
534 | } | 180 | } |
535 | 181 | ||
536 | /* | 182 | /* |
537 | * attend to the asynchronous get VLDB entry by ID | 183 | * dispatch a get volume entry by ID operation |
538 | */ | 184 | */ |
539 | int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op, | 185 | int afs_vl_get_entry_by_id(struct in_addr *addr, |
540 | struct afs_cache_vlocation *entry) | 186 | afs_volid_t volid, |
187 | afs_voltype_t voltype, | ||
188 | struct afs_cache_vlocation *entry, | ||
189 | const struct afs_wait_mode *wait_mode) | ||
541 | { | 190 | { |
191 | struct afs_call *call; | ||
542 | __be32 *bp; | 192 | __be32 *bp; |
543 | __u32 tmp; | ||
544 | int loop, ret; | ||
545 | |||
546 | _enter("{op=%p cst=%u}", op, op->call->app_call_state); | ||
547 | |||
548 | memset(entry, 0, sizeof(*entry)); | ||
549 | |||
550 | if (op->call->app_call_state == RXRPC_CSTATE_COMPLETE) { | ||
551 | /* operation finished */ | ||
552 | afs_kafsasyncd_terminate_op(op); | ||
553 | |||
554 | bp = op->call->app_scr_ptr; | ||
555 | |||
556 | /* unmarshall the reply */ | ||
557 | for (loop = 0; loop < 64; loop++) | ||
558 | entry->name[loop] = ntohl(*bp++); | ||
559 | bp++; /* final NUL */ | ||
560 | |||
561 | bp++; /* type */ | ||
562 | entry->nservers = ntohl(*bp++); | ||
563 | |||
564 | for (loop = 0; loop < 8; loop++) | ||
565 | entry->servers[loop].s_addr = *bp++; | ||
566 | |||
567 | bp += 8; /* partition IDs */ | ||
568 | |||
569 | for (loop = 0; loop < 8; loop++) { | ||
570 | tmp = ntohl(*bp++); | ||
571 | if (tmp & AFS_VLSF_RWVOL) | ||
572 | entry->srvtmask[loop] |= AFS_VOL_VTM_RW; | ||
573 | if (tmp & AFS_VLSF_ROVOL) | ||
574 | entry->srvtmask[loop] |= AFS_VOL_VTM_RO; | ||
575 | if (tmp & AFS_VLSF_BACKVOL) | ||
576 | entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; | ||
577 | } | ||
578 | |||
579 | entry->vid[0] = ntohl(*bp++); | ||
580 | entry->vid[1] = ntohl(*bp++); | ||
581 | entry->vid[2] = ntohl(*bp++); | ||
582 | |||
583 | bp++; /* clone ID */ | ||
584 | |||
585 | tmp = ntohl(*bp++); /* flags */ | ||
586 | if (tmp & AFS_VLF_RWEXISTS) | ||
587 | entry->vidmask |= AFS_VOL_VTM_RW; | ||
588 | if (tmp & AFS_VLF_ROEXISTS) | ||
589 | entry->vidmask |= AFS_VOL_VTM_RO; | ||
590 | if (tmp & AFS_VLF_BACKEXISTS) | ||
591 | entry->vidmask |= AFS_VOL_VTM_BAK; | ||
592 | |||
593 | ret = -ENOMEDIUM; | ||
594 | if (!entry->vidmask) { | ||
595 | rxrpc_call_abort(op->call, ret); | ||
596 | goto done; | ||
597 | } | ||
598 | |||
599 | #if 0 /* TODO: remove */ | ||
600 | entry->nservers = 3; | ||
601 | entry->servers[0].s_addr = htonl(0xac101249); | ||
602 | entry->servers[1].s_addr = htonl(0xac101243); | ||
603 | entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/); | ||
604 | |||
605 | entry->srvtmask[0] = AFS_VOL_VTM_RO; | ||
606 | entry->srvtmask[1] = AFS_VOL_VTM_RO; | ||
607 | entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW; | ||
608 | #endif | ||
609 | |||
610 | /* success */ | ||
611 | entry->rtime = get_seconds(); | ||
612 | ret = 0; | ||
613 | goto done; | ||
614 | } | ||
615 | |||
616 | if (op->call->app_call_state == RXRPC_CSTATE_ERROR) { | ||
617 | /* operation error */ | ||
618 | ret = op->call->app_errno; | ||
619 | goto done; | ||
620 | } | ||
621 | |||
622 | _leave(" = -EAGAIN"); | ||
623 | return -EAGAIN; | ||
624 | |||
625 | done: | ||
626 | rxrpc_put_call(op->call); | ||
627 | op->call = NULL; | ||
628 | _leave(" = %d", ret); | ||
629 | return ret; | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * handle attention events on an async get-entry-by-ID op | ||
634 | * - called from krxiod | ||
635 | */ | ||
636 | static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call) | ||
637 | { | ||
638 | struct afs_async_op *op = call->app_user; | ||
639 | |||
640 | _enter("{op=%p cst=%u}", op, call->app_call_state); | ||
641 | |||
642 | switch (call->app_call_state) { | ||
643 | case RXRPC_CSTATE_COMPLETE: | ||
644 | afs_kafsasyncd_attend_op(op); | ||
645 | break; | ||
646 | case RXRPC_CSTATE_CLNT_RCV_REPLY: | ||
647 | if (call->app_async_read) | ||
648 | break; | ||
649 | case RXRPC_CSTATE_CLNT_GOT_REPLY: | ||
650 | if (call->app_read_count == 0) | ||
651 | break; | ||
652 | printk("kAFS: Reply bigger than expected" | ||
653 | " {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}", | ||
654 | call->app_call_state, | ||
655 | call->app_async_read, | ||
656 | call->app_mark, | ||
657 | call->app_ready_qty, | ||
658 | call->pkt_rcv_count, | ||
659 | call->app_last_rcv ? " last" : ""); | ||
660 | |||
661 | rxrpc_call_abort(call, -EBADMSG); | ||
662 | break; | ||
663 | default: | ||
664 | BUG(); | ||
665 | } | ||
666 | 193 | ||
667 | _leave(""); | 194 | _enter(""); |
668 | } | ||
669 | 195 | ||
670 | /* | 196 | call = afs_alloc_flat_call(&afs_RXVLGetEntryById, 12, 384); |
671 | * handle error events on an async get-entry-by-ID op | 197 | if (!call) |
672 | * - called from krxiod | 198 | return -ENOMEM; |
673 | */ | ||
674 | static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call) | ||
675 | { | ||
676 | struct afs_async_op *op = call->app_user; | ||
677 | 199 | ||
678 | _enter("{op=%p cst=%u}", op, call->app_call_state); | 200 | call->reply = entry; |
201 | call->service_id = VL_SERVICE; | ||
202 | call->port = htons(AFS_VL_PORT); | ||
679 | 203 | ||
680 | afs_kafsasyncd_attend_op(op); | 204 | /* marshall the parameters */ |
205 | bp = call->request; | ||
206 | *bp++ = htonl(VLGETENTRYBYID); | ||
207 | *bp++ = htonl(volid); | ||
208 | *bp = htonl(voltype); | ||
681 | 209 | ||
682 | _leave(""); | 210 | /* initiate the call */ |
211 | return afs_make_call(addr, call, GFP_KERNEL, wait_mode); | ||
683 | } | 212 | } |
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index e48728c9217..60cb2f408c7 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* volume location management | 1 | /* AFS volume location management |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -12,130 +12,60 @@ | |||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include "volume.h" | ||
19 | #include "cell.h" | ||
20 | #include "cmservice.h" | ||
21 | #include "fsclient.h" | ||
22 | #include "vlclient.h" | ||
23 | #include "kafstimod.h" | ||
24 | #include <rxrpc/connection.h> | ||
25 | #include "internal.h" | 15 | #include "internal.h" |
26 | 16 | ||
27 | #define AFS_VLDB_TIMEOUT HZ*1000 | 17 | unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */ |
18 | unsigned afs_vlocation_update_timeout = 10 * 60; | ||
28 | 19 | ||
29 | static void afs_vlocation_update_timer(struct afs_timer *timer); | 20 | static void afs_vlocation_reaper(struct work_struct *); |
30 | static void afs_vlocation_update_attend(struct afs_async_op *op); | 21 | static void afs_vlocation_updater(struct work_struct *); |
31 | static void afs_vlocation_update_discard(struct afs_async_op *op); | ||
32 | static void __afs_put_vlocation(struct afs_vlocation *vlocation); | ||
33 | 22 | ||
34 | static void __afs_vlocation_timeout(struct afs_timer *timer) | 23 | static LIST_HEAD(afs_vlocation_updates); |
35 | { | 24 | static LIST_HEAD(afs_vlocation_graveyard); |
36 | struct afs_vlocation *vlocation = | 25 | static DEFINE_SPINLOCK(afs_vlocation_updates_lock); |
37 | list_entry(timer, struct afs_vlocation, timeout); | 26 | static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock); |
38 | 27 | static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper); | |
39 | _debug("VL TIMEOUT [%s{u=%d}]", | 28 | static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater); |
40 | vlocation->vldb.name, atomic_read(&vlocation->usage)); | 29 | static struct workqueue_struct *afs_vlocation_update_worker; |
41 | |||
42 | afs_vlocation_do_timeout(vlocation); | ||
43 | } | ||
44 | |||
45 | static const struct afs_timer_ops afs_vlocation_timer_ops = { | ||
46 | .timed_out = __afs_vlocation_timeout, | ||
47 | }; | ||
48 | |||
49 | static const struct afs_timer_ops afs_vlocation_update_timer_ops = { | ||
50 | .timed_out = afs_vlocation_update_timer, | ||
51 | }; | ||
52 | |||
53 | static const struct afs_async_op_ops afs_vlocation_update_op_ops = { | ||
54 | .attend = afs_vlocation_update_attend, | ||
55 | .discard = afs_vlocation_update_discard, | ||
56 | }; | ||
57 | |||
58 | static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */ | ||
59 | static struct afs_vlocation *afs_vlocation_update; /* VL currently being updated */ | ||
60 | static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */ | ||
61 | |||
62 | #ifdef AFS_CACHING_SUPPORT | ||
63 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
64 | const void *entry); | ||
65 | static void afs_vlocation_cache_update(void *source, void *entry); | ||
66 | |||
67 | struct cachefs_index_def afs_vlocation_cache_index_def = { | ||
68 | .name = "vldb", | ||
69 | .data_size = sizeof(struct afs_cache_vlocation), | ||
70 | .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 }, | ||
71 | .match = afs_vlocation_cache_match, | ||
72 | .update = afs_vlocation_cache_update, | ||
73 | }; | ||
74 | #endif | ||
75 | 30 | ||
76 | /* | 31 | /* |
77 | * iterate through the VL servers in a cell until one of them admits knowing | 32 | * iterate through the VL servers in a cell until one of them admits knowing |
78 | * about the volume in question | 33 | * about the volume in question |
79 | * - caller must have cell->vl_sem write-locked | ||
80 | */ | 34 | */ |
81 | static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation, | 35 | static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl, |
82 | const char *name, | ||
83 | unsigned namesz, | ||
84 | struct afs_cache_vlocation *vldb) | 36 | struct afs_cache_vlocation *vldb) |
85 | { | 37 | { |
86 | struct afs_server *server = NULL; | 38 | struct afs_cell *cell = vl->cell; |
87 | struct afs_cell *cell = vlocation->cell; | 39 | struct in_addr addr; |
88 | int count, ret; | 40 | int count, ret; |
89 | 41 | ||
90 | _enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz); | 42 | _enter("%s,%s", cell->name, vl->vldb.name); |
91 | 43 | ||
44 | down_write(&vl->cell->vl_sem); | ||
92 | ret = -ENOMEDIUM; | 45 | ret = -ENOMEDIUM; |
93 | for (count = cell->vl_naddrs; count > 0; count--) { | 46 | for (count = cell->vl_naddrs; count > 0; count--) { |
94 | _debug("CellServ[%hu]: %08x", | 47 | addr = cell->vl_addrs[cell->vl_curr_svix]; |
95 | cell->vl_curr_svix, | 48 | |
96 | cell->vl_addrs[cell->vl_curr_svix].s_addr); | 49 | _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr); |
97 | |||
98 | /* try and create a server */ | ||
99 | ret = afs_server_lookup(cell, | ||
100 | &cell->vl_addrs[cell->vl_curr_svix], | ||
101 | &server); | ||
102 | switch (ret) { | ||
103 | case 0: | ||
104 | break; | ||
105 | case -ENOMEM: | ||
106 | case -ENONET: | ||
107 | goto out; | ||
108 | default: | ||
109 | goto rotate; | ||
110 | } | ||
111 | 50 | ||
112 | /* attempt to access the VL server */ | 51 | /* attempt to access the VL server */ |
113 | ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb); | 52 | ret = afs_vl_get_entry_by_name(&addr, vl->vldb.name, vldb, |
53 | &afs_sync_call); | ||
114 | switch (ret) { | 54 | switch (ret) { |
115 | case 0: | 55 | case 0: |
116 | afs_put_server(server); | ||
117 | goto out; | 56 | goto out; |
118 | case -ENOMEM: | 57 | case -ENOMEM: |
119 | case -ENONET: | 58 | case -ENONET: |
120 | case -ENETUNREACH: | 59 | case -ENETUNREACH: |
121 | case -EHOSTUNREACH: | 60 | case -EHOSTUNREACH: |
122 | case -ECONNREFUSED: | 61 | case -ECONNREFUSED: |
123 | down_write(&server->sem); | ||
124 | if (server->vlserver) { | ||
125 | rxrpc_put_connection(server->vlserver); | ||
126 | server->vlserver = NULL; | ||
127 | } | ||
128 | up_write(&server->sem); | ||
129 | afs_put_server(server); | ||
130 | if (ret == -ENOMEM || ret == -ENONET) | 62 | if (ret == -ENOMEM || ret == -ENONET) |
131 | goto out; | 63 | goto out; |
132 | goto rotate; | 64 | goto rotate; |
133 | case -ENOMEDIUM: | 65 | case -ENOMEDIUM: |
134 | afs_put_server(server); | ||
135 | goto out; | 66 | goto out; |
136 | default: | 67 | default: |
137 | afs_put_server(server); | 68 | ret = -EIO; |
138 | ret = -ENOMEDIUM; | ||
139 | goto rotate; | 69 | goto rotate; |
140 | } | 70 | } |
141 | 71 | ||
@@ -146,6 +76,7 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation, | |||
146 | } | 76 | } |
147 | 77 | ||
148 | out: | 78 | out: |
79 | up_write(&vl->cell->vl_sem); | ||
149 | _leave(" = %d", ret); | 80 | _leave(" = %d", ret); |
150 | return ret; | 81 | return ret; |
151 | } | 82 | } |
@@ -153,66 +84,56 @@ out: | |||
153 | /* | 84 | /* |
154 | * iterate through the VL servers in a cell until one of them admits knowing | 85 | * iterate through the VL servers in a cell until one of them admits knowing |
155 | * about the volume in question | 86 | * about the volume in question |
156 | * - caller must have cell->vl_sem write-locked | ||
157 | */ | 87 | */ |
158 | static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation, | 88 | static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl, |
159 | afs_volid_t volid, | 89 | afs_volid_t volid, |
160 | afs_voltype_t voltype, | 90 | afs_voltype_t voltype, |
161 | struct afs_cache_vlocation *vldb) | 91 | struct afs_cache_vlocation *vldb) |
162 | { | 92 | { |
163 | struct afs_server *server = NULL; | 93 | struct afs_cell *cell = vl->cell; |
164 | struct afs_cell *cell = vlocation->cell; | 94 | struct in_addr addr; |
165 | int count, ret; | 95 | int count, ret; |
166 | 96 | ||
167 | _enter("%s,%x,%d,", cell->name, volid, voltype); | 97 | _enter("%s,%x,%d,", cell->name, volid, voltype); |
168 | 98 | ||
99 | down_write(&vl->cell->vl_sem); | ||
169 | ret = -ENOMEDIUM; | 100 | ret = -ENOMEDIUM; |
170 | for (count = cell->vl_naddrs; count > 0; count--) { | 101 | for (count = cell->vl_naddrs; count > 0; count--) { |
171 | _debug("CellServ[%hu]: %08x", | 102 | addr = cell->vl_addrs[cell->vl_curr_svix]; |
172 | cell->vl_curr_svix, | 103 | |
173 | cell->vl_addrs[cell->vl_curr_svix].s_addr); | 104 | _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr); |
174 | |||
175 | /* try and create a server */ | ||
176 | ret = afs_server_lookup(cell, | ||
177 | &cell->vl_addrs[cell->vl_curr_svix], | ||
178 | &server); | ||
179 | switch (ret) { | ||
180 | case 0: | ||
181 | break; | ||
182 | case -ENOMEM: | ||
183 | case -ENONET: | ||
184 | goto out; | ||
185 | default: | ||
186 | goto rotate; | ||
187 | } | ||
188 | 105 | ||
189 | /* attempt to access the VL server */ | 106 | /* attempt to access the VL server */ |
190 | ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb); | 107 | ret = afs_vl_get_entry_by_id(&addr, volid, voltype, vldb, |
108 | &afs_sync_call); | ||
191 | switch (ret) { | 109 | switch (ret) { |
192 | case 0: | 110 | case 0: |
193 | afs_put_server(server); | ||
194 | goto out; | 111 | goto out; |
195 | case -ENOMEM: | 112 | case -ENOMEM: |
196 | case -ENONET: | 113 | case -ENONET: |
197 | case -ENETUNREACH: | 114 | case -ENETUNREACH: |
198 | case -EHOSTUNREACH: | 115 | case -EHOSTUNREACH: |
199 | case -ECONNREFUSED: | 116 | case -ECONNREFUSED: |
200 | down_write(&server->sem); | ||
201 | if (server->vlserver) { | ||
202 | rxrpc_put_connection(server->vlserver); | ||
203 | server->vlserver = NULL; | ||
204 | } | ||
205 | up_write(&server->sem); | ||
206 | afs_put_server(server); | ||
207 | if (ret == -ENOMEM || ret == -ENONET) | 117 | if (ret == -ENOMEM || ret == -ENONET) |
208 | goto out; | 118 | goto out; |
209 | goto rotate; | 119 | goto rotate; |
120 | case -EBUSY: | ||
121 | vl->upd_busy_cnt++; | ||
122 | if (vl->upd_busy_cnt <= 3) { | ||
123 | if (vl->upd_busy_cnt > 1) { | ||
124 | /* second+ BUSY - sleep a little bit */ | ||
125 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
126 | schedule_timeout(1); | ||
127 | __set_current_state(TASK_RUNNING); | ||
128 | } | ||
129 | continue; | ||
130 | } | ||
131 | break; | ||
210 | case -ENOMEDIUM: | 132 | case -ENOMEDIUM: |
211 | afs_put_server(server); | 133 | vl->upd_rej_cnt++; |
212 | goto out; | 134 | goto rotate; |
213 | default: | 135 | default: |
214 | afs_put_server(server); | 136 | ret = -EIO; |
215 | ret = -ENOMEDIUM; | ||
216 | goto rotate; | 137 | goto rotate; |
217 | } | 138 | } |
218 | 139 | ||
@@ -220,150 +141,83 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation, | |||
220 | rotate: | 141 | rotate: |
221 | cell->vl_curr_svix++; | 142 | cell->vl_curr_svix++; |
222 | cell->vl_curr_svix %= cell->vl_naddrs; | 143 | cell->vl_curr_svix %= cell->vl_naddrs; |
144 | vl->upd_busy_cnt = 0; | ||
223 | } | 145 | } |
224 | 146 | ||
225 | out: | 147 | out: |
148 | if (ret < 0 && vl->upd_rej_cnt > 0) { | ||
149 | printk(KERN_NOTICE "kAFS:" | ||
150 | " Active volume no longer valid '%s'\n", | ||
151 | vl->vldb.name); | ||
152 | vl->valid = 0; | ||
153 | ret = -ENOMEDIUM; | ||
154 | } | ||
155 | |||
156 | up_write(&vl->cell->vl_sem); | ||
226 | _leave(" = %d", ret); | 157 | _leave(" = %d", ret); |
227 | return ret; | 158 | return ret; |
228 | } | 159 | } |
229 | 160 | ||
230 | /* | 161 | /* |
231 | * lookup volume location | 162 | * allocate a volume location record |
232 | * - caller must have cell->vol_sem write-locked | ||
233 | * - iterate through the VL servers in a cell until one of them admits knowing | ||
234 | * about the volume in question | ||
235 | * - lookup in the local cache if not able to find on the VL server | ||
236 | * - insert/update in the local cache if did get a VL response | ||
237 | */ | 163 | */ |
238 | int afs_vlocation_lookup(struct afs_cell *cell, | 164 | static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell, |
239 | const char *name, | 165 | const char *name, |
240 | unsigned namesz, | 166 | size_t namesz) |
241 | struct afs_vlocation **_vlocation) | ||
242 | { | 167 | { |
243 | struct afs_cache_vlocation vldb; | 168 | struct afs_vlocation *vl; |
244 | struct afs_vlocation *vlocation; | 169 | |
245 | afs_voltype_t voltype; | 170 | vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL); |
246 | afs_volid_t vid; | 171 | if (vl) { |
247 | int active = 0, ret; | 172 | vl->cell = cell; |
248 | 173 | vl->state = AFS_VL_NEW; | |
249 | _enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz); | 174 | atomic_set(&vl->usage, 1); |
250 | 175 | INIT_LIST_HEAD(&vl->link); | |
251 | if (namesz > sizeof(vlocation->vldb.name)) { | 176 | INIT_LIST_HEAD(&vl->grave); |
252 | _leave(" = -ENAMETOOLONG"); | 177 | INIT_LIST_HEAD(&vl->update); |
253 | return -ENAMETOOLONG; | 178 | init_waitqueue_head(&vl->waitq); |
254 | } | 179 | rwlock_init(&vl->lock); |
255 | 180 | memcpy(vl->vldb.name, name, namesz); | |
256 | /* search the cell's active list first */ | ||
257 | list_for_each_entry(vlocation, &cell->vl_list, link) { | ||
258 | if (namesz < sizeof(vlocation->vldb.name) && | ||
259 | vlocation->vldb.name[namesz] != '\0') | ||
260 | continue; | ||
261 | |||
262 | if (memcmp(vlocation->vldb.name, name, namesz) == 0) | ||
263 | goto found_in_memory; | ||
264 | } | ||
265 | |||
266 | /* search the cell's graveyard list second */ | ||
267 | spin_lock(&cell->vl_gylock); | ||
268 | list_for_each_entry(vlocation, &cell->vl_graveyard, link) { | ||
269 | if (namesz < sizeof(vlocation->vldb.name) && | ||
270 | vlocation->vldb.name[namesz] != '\0') | ||
271 | continue; | ||
272 | |||
273 | if (memcmp(vlocation->vldb.name, name, namesz) == 0) | ||
274 | goto found_in_graveyard; | ||
275 | } | ||
276 | spin_unlock(&cell->vl_gylock); | ||
277 | |||
278 | /* not in the cell's in-memory lists - create a new record */ | ||
279 | vlocation = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL); | ||
280 | if (!vlocation) | ||
281 | return -ENOMEM; | ||
282 | |||
283 | atomic_set(&vlocation->usage, 1); | ||
284 | INIT_LIST_HEAD(&vlocation->link); | ||
285 | rwlock_init(&vlocation->lock); | ||
286 | memcpy(vlocation->vldb.name, name, namesz); | ||
287 | |||
288 | afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops); | ||
289 | afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops); | ||
290 | afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops); | ||
291 | |||
292 | afs_get_cell(cell); | ||
293 | vlocation->cell = cell; | ||
294 | |||
295 | list_add_tail(&vlocation->link, &cell->vl_list); | ||
296 | |||
297 | #ifdef AFS_CACHING_SUPPORT | ||
298 | /* we want to store it in the cache, plus it might already be | ||
299 | * encached */ | ||
300 | cachefs_acquire_cookie(cell->cache, | ||
301 | &afs_volume_cache_index_def, | ||
302 | vlocation, | ||
303 | &vlocation->cache); | ||
304 | |||
305 | if (vlocation->valid) | ||
306 | goto found_in_cache; | ||
307 | #endif | ||
308 | |||
309 | /* try to look up an unknown volume in the cell VL databases by name */ | ||
310 | ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb); | ||
311 | if (ret < 0) { | ||
312 | printk("kAFS: failed to locate '%*.*s' in cell '%s'\n", | ||
313 | namesz, namesz, name, cell->name); | ||
314 | goto error; | ||
315 | } | 181 | } |
316 | 182 | ||
317 | goto found_on_vlserver; | 183 | _leave(" = %p", vl); |
318 | 184 | return vl; | |
319 | found_in_graveyard: | 185 | } |
320 | /* found in the graveyard - resurrect */ | ||
321 | _debug("found in graveyard"); | ||
322 | atomic_inc(&vlocation->usage); | ||
323 | list_move_tail(&vlocation->link, &cell->vl_list); | ||
324 | spin_unlock(&cell->vl_gylock); | ||
325 | |||
326 | afs_kafstimod_del_timer(&vlocation->timeout); | ||
327 | goto active; | ||
328 | |||
329 | found_in_memory: | ||
330 | /* found in memory - check to see if it's active */ | ||
331 | _debug("found in memory"); | ||
332 | atomic_inc(&vlocation->usage); | ||
333 | 186 | ||
334 | active: | 187 | /* |
335 | active = 1; | 188 | * update record if we found it in the cache |
189 | */ | ||
190 | static int afs_vlocation_update_record(struct afs_vlocation *vl, | ||
191 | struct afs_cache_vlocation *vldb) | ||
192 | { | ||
193 | afs_voltype_t voltype; | ||
194 | afs_volid_t vid; | ||
195 | int ret; | ||
336 | 196 | ||
337 | #ifdef AFS_CACHING_SUPPORT | ||
338 | found_in_cache: | ||
339 | #endif | ||
340 | /* try to look up a cached volume in the cell VL databases by ID */ | 197 | /* try to look up a cached volume in the cell VL databases by ID */ |
341 | _debug("found in cache"); | ||
342 | |||
343 | _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", | 198 | _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", |
344 | vlocation->vldb.name, | 199 | vl->vldb.name, |
345 | vlocation->vldb.vidmask, | 200 | vl->vldb.vidmask, |
346 | ntohl(vlocation->vldb.servers[0].s_addr), | 201 | ntohl(vl->vldb.servers[0].s_addr), |
347 | vlocation->vldb.srvtmask[0], | 202 | vl->vldb.srvtmask[0], |
348 | ntohl(vlocation->vldb.servers[1].s_addr), | 203 | ntohl(vl->vldb.servers[1].s_addr), |
349 | vlocation->vldb.srvtmask[1], | 204 | vl->vldb.srvtmask[1], |
350 | ntohl(vlocation->vldb.servers[2].s_addr), | 205 | ntohl(vl->vldb.servers[2].s_addr), |
351 | vlocation->vldb.srvtmask[2] | 206 | vl->vldb.srvtmask[2]); |
352 | ); | ||
353 | 207 | ||
354 | _debug("Vids: %08x %08x %08x", | 208 | _debug("Vids: %08x %08x %08x", |
355 | vlocation->vldb.vid[0], | 209 | vl->vldb.vid[0], |
356 | vlocation->vldb.vid[1], | 210 | vl->vldb.vid[1], |
357 | vlocation->vldb.vid[2]); | 211 | vl->vldb.vid[2]); |
358 | 212 | ||
359 | if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { | 213 | if (vl->vldb.vidmask & AFS_VOL_VTM_RW) { |
360 | vid = vlocation->vldb.vid[0]; | 214 | vid = vl->vldb.vid[0]; |
361 | voltype = AFSVL_RWVOL; | 215 | voltype = AFSVL_RWVOL; |
362 | } else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) { | 216 | } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) { |
363 | vid = vlocation->vldb.vid[1]; | 217 | vid = vl->vldb.vid[1]; |
364 | voltype = AFSVL_ROVOL; | 218 | voltype = AFSVL_ROVOL; |
365 | } else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { | 219 | } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) { |
366 | vid = vlocation->vldb.vid[2]; | 220 | vid = vl->vldb.vid[2]; |
367 | voltype = AFSVL_BACKVOL; | 221 | voltype = AFSVL_BACKVOL; |
368 | } else { | 222 | } else { |
369 | BUG(); | 223 | BUG(); |
@@ -371,551 +225,482 @@ found_in_cache: | |||
371 | voltype = 0; | 225 | voltype = 0; |
372 | } | 226 | } |
373 | 227 | ||
374 | ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb); | 228 | /* contact the server to make sure the volume is still available |
229 | * - TODO: need to handle disconnected operation here | ||
230 | */ | ||
231 | ret = afs_vlocation_access_vl_by_id(vl, vid, voltype, vldb); | ||
375 | switch (ret) { | 232 | switch (ret) { |
376 | /* net error */ | 233 | /* net error */ |
377 | default: | 234 | default: |
378 | printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n", | 235 | printk(KERN_WARNING "kAFS:" |
379 | namesz, namesz, name, vid, cell->name, ret); | 236 | " failed to update volume '%s' (%x) up in '%s': %d\n", |
380 | goto error; | 237 | vl->vldb.name, vid, vl->cell->name, ret); |
238 | _leave(" = %d", ret); | ||
239 | return ret; | ||
381 | 240 | ||
382 | /* pulled from local cache into memory */ | 241 | /* pulled from local cache into memory */ |
383 | case 0: | 242 | case 0: |
384 | goto found_on_vlserver; | 243 | _leave(" = 0"); |
244 | return 0; | ||
385 | 245 | ||
386 | /* uh oh... looks like the volume got deleted */ | 246 | /* uh oh... looks like the volume got deleted */ |
387 | case -ENOMEDIUM: | 247 | case -ENOMEDIUM: |
388 | printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n", | 248 | printk(KERN_ERR "kAFS:" |
389 | namesz, namesz, name, vid, cell->name); | 249 | " volume '%s' (%x) does not exist '%s'\n", |
250 | vl->vldb.name, vid, vl->cell->name); | ||
390 | 251 | ||
391 | /* TODO: make existing record unavailable */ | 252 | /* TODO: make existing record unavailable */ |
392 | goto error; | 253 | _leave(" = %d", ret); |
254 | return ret; | ||
393 | } | 255 | } |
256 | } | ||
394 | 257 | ||
395 | found_on_vlserver: | 258 | /* |
396 | _debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }", | 259 | * apply the update to a VL record |
397 | namesz, namesz, name, | 260 | */ |
398 | vldb.vidmask, | 261 | static void afs_vlocation_apply_update(struct afs_vlocation *vl, |
399 | ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0], | 262 | struct afs_cache_vlocation *vldb) |
400 | ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1], | 263 | { |
401 | ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2] | 264 | _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", |
402 | ); | 265 | vldb->name, vldb->vidmask, |
403 | 266 | ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0], | |
404 | _debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]); | 267 | ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1], |
268 | ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]); | ||
405 | 269 | ||
406 | if ((namesz < sizeof(vlocation->vldb.name) && | 270 | _debug("Vids: %08x %08x %08x", |
407 | vlocation->vldb.name[namesz] != '\0') || | 271 | vldb->vid[0], vldb->vid[1], vldb->vid[2]); |
408 | memcmp(vldb.name, name, namesz) != 0) | ||
409 | printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n", | ||
410 | namesz, namesz, name, vldb.name); | ||
411 | 272 | ||
412 | memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb)); | 273 | if (strcmp(vldb->name, vl->vldb.name) != 0) |
274 | printk(KERN_NOTICE "kAFS:" | ||
275 | " name of volume '%s' changed to '%s' on server\n", | ||
276 | vl->vldb.name, vldb->name); | ||
413 | 277 | ||
414 | afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ); | 278 | vl->vldb = *vldb; |
415 | 279 | ||
416 | #ifdef AFS_CACHING_SUPPORT | 280 | #ifdef AFS_CACHING_SUPPORT |
417 | /* update volume entry in local cache */ | 281 | /* update volume entry in local cache */ |
418 | cachefs_update_cookie(vlocation->cache); | 282 | cachefs_update_cookie(vl->cache); |
419 | #endif | ||
420 | |||
421 | *_vlocation = vlocation; | ||
422 | _leave(" = 0 (%p)",vlocation); | ||
423 | return 0; | ||
424 | |||
425 | error: | ||
426 | if (vlocation) { | ||
427 | if (active) { | ||
428 | __afs_put_vlocation(vlocation); | ||
429 | } else { | ||
430 | list_del(&vlocation->link); | ||
431 | #ifdef AFS_CACHING_SUPPORT | ||
432 | cachefs_relinquish_cookie(vlocation->cache, 0); | ||
433 | #endif | 283 | #endif |
434 | afs_put_cell(vlocation->cell); | ||
435 | kfree(vlocation); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | _leave(" = %d", ret); | ||
440 | return ret; | ||
441 | } | 284 | } |
442 | 285 | ||
443 | /* | 286 | /* |
444 | * finish using a volume location record | 287 | * fill in a volume location record, consulting the cache and the VL server |
445 | * - caller must have cell->vol_sem write-locked | 288 | * both |
446 | */ | 289 | */ |
447 | static void __afs_put_vlocation(struct afs_vlocation *vlocation) | 290 | static int afs_vlocation_fill_in_record(struct afs_vlocation *vl) |
448 | { | 291 | { |
449 | struct afs_cell *cell; | 292 | struct afs_cache_vlocation vldb; |
293 | int ret; | ||
450 | 294 | ||
451 | if (!vlocation) | 295 | _enter(""); |
452 | return; | ||
453 | 296 | ||
454 | _enter("%s", vlocation->vldb.name); | 297 | ASSERTCMP(vl->valid, ==, 0); |
455 | 298 | ||
456 | cell = vlocation->cell; | 299 | memset(&vldb, 0, sizeof(vldb)); |
457 | 300 | ||
458 | /* sanity check */ | 301 | /* see if we have an in-cache copy (will set vl->valid if there is) */ |
459 | BUG_ON(atomic_read(&vlocation->usage) <= 0); | 302 | #ifdef AFS_CACHING_SUPPORT |
303 | cachefs_acquire_cookie(cell->cache, | ||
304 | &afs_volume_cache_index_def, | ||
305 | vlocation, | ||
306 | &vl->cache); | ||
307 | #endif | ||
460 | 308 | ||
461 | spin_lock(&cell->vl_gylock); | 309 | if (vl->valid) { |
462 | if (likely(!atomic_dec_and_test(&vlocation->usage))) { | 310 | /* try to update a known volume in the cell VL databases by |
463 | spin_unlock(&cell->vl_gylock); | 311 | * ID as the name may have changed */ |
464 | _leave(""); | 312 | _debug("found in cache"); |
465 | return; | 313 | ret = afs_vlocation_update_record(vl, &vldb); |
314 | } else { | ||
315 | /* try to look up an unknown volume in the cell VL databases by | ||
316 | * name */ | ||
317 | ret = afs_vlocation_access_vl_by_name(vl, &vldb); | ||
318 | if (ret < 0) { | ||
319 | printk("kAFS: failed to locate '%s' in cell '%s'\n", | ||
320 | vl->vldb.name, vl->cell->name); | ||
321 | return ret; | ||
322 | } | ||
466 | } | 323 | } |
467 | 324 | ||
468 | /* move to graveyard queue */ | 325 | afs_vlocation_apply_update(vl, &vldb); |
469 | list_move_tail(&vlocation->link,&cell->vl_graveyard); | 326 | _leave(" = 0"); |
470 | 327 | return 0; | |
471 | /* remove from pending timeout queue (refcounted if actually being | ||
472 | * updated) */ | ||
473 | list_del_init(&vlocation->upd_op.link); | ||
474 | |||
475 | /* time out in 10 secs */ | ||
476 | afs_kafstimod_del_timer(&vlocation->upd_timer); | ||
477 | afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ); | ||
478 | |||
479 | spin_unlock(&cell->vl_gylock); | ||
480 | |||
481 | _leave(" [killed]"); | ||
482 | } | 328 | } |
483 | 329 | ||
484 | /* | 330 | /* |
485 | * finish using a volume location record | 331 | * queue a vlocation record for updates |
486 | */ | 332 | */ |
487 | void afs_put_vlocation(struct afs_vlocation *vlocation) | 333 | void afs_vlocation_queue_for_updates(struct afs_vlocation *vl) |
488 | { | 334 | { |
489 | if (vlocation) { | 335 | struct afs_vlocation *xvl; |
490 | struct afs_cell *cell = vlocation->cell; | ||
491 | 336 | ||
492 | down_write(&cell->vl_sem); | 337 | /* wait at least 10 minutes before updating... */ |
493 | __afs_put_vlocation(vlocation); | 338 | vl->update_at = get_seconds() + afs_vlocation_update_timeout; |
494 | up_write(&cell->vl_sem); | 339 | |
340 | spin_lock(&afs_vlocation_updates_lock); | ||
341 | |||
342 | if (!list_empty(&afs_vlocation_updates)) { | ||
343 | /* ... but wait at least 1 second more than the newest record | ||
344 | * already queued so that we don't spam the VL server suddenly | ||
345 | * with lots of requests | ||
346 | */ | ||
347 | xvl = list_entry(afs_vlocation_updates.prev, | ||
348 | struct afs_vlocation, update); | ||
349 | if (vl->update_at <= xvl->update_at) | ||
350 | vl->update_at = xvl->update_at + 1; | ||
351 | } else { | ||
352 | queue_delayed_work(afs_vlocation_update_worker, | ||
353 | &afs_vlocation_update, | ||
354 | afs_vlocation_update_timeout * HZ); | ||
495 | } | 355 | } |
356 | |||
357 | list_add_tail(&vl->update, &afs_vlocation_updates); | ||
358 | spin_unlock(&afs_vlocation_updates_lock); | ||
496 | } | 359 | } |
497 | 360 | ||
498 | /* | 361 | /* |
499 | * timeout vlocation record | 362 | * lookup volume location |
500 | * - removes from the cell's graveyard if the usage count is zero | 363 | * - iterate through the VL servers in a cell until one of them admits knowing |
364 | * about the volume in question | ||
365 | * - lookup in the local cache if not able to find on the VL server | ||
366 | * - insert/update in the local cache if did get a VL response | ||
501 | */ | 367 | */ |
502 | void afs_vlocation_do_timeout(struct afs_vlocation *vlocation) | 368 | struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell, |
369 | const char *name, | ||
370 | size_t namesz) | ||
503 | { | 371 | { |
504 | struct afs_cell *cell; | 372 | struct afs_vlocation *vl; |
505 | 373 | int ret; | |
506 | _enter("%s", vlocation->vldb.name); | ||
507 | |||
508 | cell = vlocation->cell; | ||
509 | 374 | ||
510 | BUG_ON(atomic_read(&vlocation->usage) < 0); | 375 | _enter("{%s},%*.*s,%zu", |
376 | cell->name, (int) namesz, (int) namesz, name, namesz); | ||
511 | 377 | ||
512 | /* remove from graveyard if still dead */ | 378 | if (namesz > sizeof(vl->vldb.name)) { |
513 | spin_lock(&cell->vl_gylock); | 379 | _leave(" = -ENAMETOOLONG"); |
514 | if (atomic_read(&vlocation->usage) == 0) | 380 | return ERR_PTR(-ENAMETOOLONG); |
515 | list_del_init(&vlocation->link); | 381 | } |
516 | else | ||
517 | vlocation = NULL; | ||
518 | spin_unlock(&cell->vl_gylock); | ||
519 | 382 | ||
520 | if (!vlocation) { | 383 | /* see if we have an in-memory copy first */ |
521 | _leave(""); | 384 | down_write(&cell->vl_sem); |
522 | return; /* resurrected */ | 385 | spin_lock(&cell->vl_lock); |
386 | list_for_each_entry(vl, &cell->vl_list, link) { | ||
387 | if (vl->vldb.name[namesz] != '\0') | ||
388 | continue; | ||
389 | if (memcmp(vl->vldb.name, name, namesz) == 0) | ||
390 | goto found_in_memory; | ||
523 | } | 391 | } |
392 | spin_unlock(&cell->vl_lock); | ||
524 | 393 | ||
525 | /* we can now destroy it properly */ | 394 | /* not in the cell's in-memory lists - create a new record */ |
526 | #ifdef AFS_CACHING_SUPPORT | 395 | vl = afs_vlocation_alloc(cell, name, namesz); |
527 | cachefs_relinquish_cookie(vlocation->cache, 0); | 396 | if (!vl) { |
528 | #endif | 397 | up_write(&cell->vl_sem); |
529 | afs_put_cell(cell); | 398 | return ERR_PTR(-ENOMEM); |
399 | } | ||
530 | 400 | ||
531 | kfree(vlocation); | 401 | afs_get_cell(cell); |
532 | 402 | ||
533 | _leave(" [destroyed]"); | 403 | list_add_tail(&vl->link, &cell->vl_list); |
534 | } | 404 | vl->state = AFS_VL_CREATING; |
405 | up_write(&cell->vl_sem); | ||
535 | 406 | ||
536 | /* | 407 | fill_in_record: |
537 | * send an update operation to the currently selected server | 408 | ret = afs_vlocation_fill_in_record(vl); |
538 | */ | 409 | if (ret < 0) |
539 | static int afs_vlocation_update_begin(struct afs_vlocation *vlocation) | 410 | goto error_abandon; |
540 | { | 411 | vl->state = AFS_VL_VALID; |
541 | afs_voltype_t voltype; | 412 | wake_up(&vl->waitq); |
542 | afs_volid_t vid; | ||
543 | int ret; | ||
544 | 413 | ||
545 | _enter("%s{ufs=%u ucs=%u}", | 414 | /* schedule for regular updates */ |
546 | vlocation->vldb.name, | 415 | afs_vlocation_queue_for_updates(vl); |
547 | vlocation->upd_first_svix, | 416 | goto success; |
548 | vlocation->upd_curr_svix); | ||
549 | 417 | ||
550 | /* try to look up a cached volume in the cell VL databases by ID */ | 418 | found_in_memory: |
551 | if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { | 419 | /* found in memory */ |
552 | vid = vlocation->vldb.vid[0]; | 420 | _debug("found in memory"); |
553 | voltype = AFSVL_RWVOL; | 421 | atomic_inc(&vl->usage); |
554 | } else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) { | 422 | spin_unlock(&cell->vl_lock); |
555 | vid = vlocation->vldb.vid[1]; | 423 | if (!list_empty(&vl->grave)) { |
556 | voltype = AFSVL_ROVOL; | 424 | spin_lock(&afs_vlocation_graveyard_lock); |
557 | } else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { | 425 | list_del_init(&vl->grave); |
558 | vid = vlocation->vldb.vid[2]; | 426 | spin_unlock(&afs_vlocation_graveyard_lock); |
559 | voltype = AFSVL_BACKVOL; | ||
560 | } else { | ||
561 | BUG(); | ||
562 | vid = 0; | ||
563 | voltype = 0; | ||
564 | } | 427 | } |
428 | up_write(&cell->vl_sem); | ||
565 | 429 | ||
566 | /* contact the chosen server */ | 430 | /* see if it was an abandoned record that we might try filling in */ |
567 | ret = afs_server_lookup( | 431 | while (vl->state != AFS_VL_VALID) { |
568 | vlocation->cell, | 432 | afs_vlocation_state_t state = vl->state; |
569 | &vlocation->cell->vl_addrs[vlocation->upd_curr_svix], | ||
570 | &vlocation->upd_op.server); | ||
571 | 433 | ||
572 | switch (ret) { | 434 | _debug("invalid [state %d]", state); |
573 | case 0: | ||
574 | break; | ||
575 | case -ENOMEM: | ||
576 | case -ENONET: | ||
577 | default: | ||
578 | _leave(" = %d", ret); | ||
579 | return ret; | ||
580 | } | ||
581 | 435 | ||
582 | /* initiate the update operation */ | 436 | if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) { |
583 | ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype); | 437 | if (cmpxchg(&vl->state, state, AFS_VL_CREATING) == |
584 | if (ret < 0) { | 438 | state) |
585 | _leave(" = %d", ret); | 439 | goto fill_in_record; |
586 | return ret; | 440 | continue; |
441 | } | ||
442 | |||
443 | /* must now wait for creation or update by someone else to | ||
444 | * complete */ | ||
445 | _debug("wait"); | ||
446 | |||
447 | ret = wait_event_interruptible( | ||
448 | vl->waitq, | ||
449 | vl->state == AFS_VL_NEW || | ||
450 | vl->state == AFS_VL_VALID || | ||
451 | vl->state == AFS_VL_NO_VOLUME); | ||
452 | if (ret < 0) | ||
453 | goto error; | ||
587 | } | 454 | } |
588 | 455 | ||
456 | success: | ||
457 | _leave(" = %p",vl); | ||
458 | return vl; | ||
459 | |||
460 | error_abandon: | ||
461 | vl->state = AFS_VL_NEW; | ||
462 | wake_up(&vl->waitq); | ||
463 | error: | ||
464 | ASSERT(vl != NULL); | ||
465 | afs_put_vlocation(vl); | ||
589 | _leave(" = %d", ret); | 466 | _leave(" = %d", ret); |
590 | return ret; | 467 | return ERR_PTR(ret); |
591 | } | 468 | } |
592 | 469 | ||
593 | /* | 470 | /* |
594 | * abandon updating a VL record | 471 | * finish using a volume location record |
595 | * - does not restart the update timer | ||
596 | */ | 472 | */ |
597 | static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation, | 473 | void afs_put_vlocation(struct afs_vlocation *vl) |
598 | afs_vlocation_upd_t state, | ||
599 | int ret) | ||
600 | { | 474 | { |
601 | _enter("%s,%u", vlocation->vldb.name, state); | 475 | if (!vl) |
602 | 476 | return; | |
603 | if (ret < 0) | ||
604 | printk("kAFS: Abandoning VL update '%s': %d\n", | ||
605 | vlocation->vldb.name, ret); | ||
606 | |||
607 | /* discard the server record */ | ||
608 | afs_put_server(vlocation->upd_op.server); | ||
609 | vlocation->upd_op.server = NULL; | ||
610 | 477 | ||
611 | spin_lock(&afs_vlocation_update_lock); | 478 | _enter("%s", vl->vldb.name); |
612 | afs_vlocation_update = NULL; | ||
613 | vlocation->upd_state = state; | ||
614 | 479 | ||
615 | /* TODO: start updating next VL record on pending list */ | 480 | ASSERTCMP(atomic_read(&vl->usage), >, 0); |
616 | 481 | ||
617 | spin_unlock(&afs_vlocation_update_lock); | 482 | if (likely(!atomic_dec_and_test(&vl->usage))) { |
483 | _leave(""); | ||
484 | return; | ||
485 | } | ||
618 | 486 | ||
619 | _leave(""); | 487 | spin_lock(&afs_vlocation_graveyard_lock); |
488 | if (atomic_read(&vl->usage) == 0) { | ||
489 | _debug("buried"); | ||
490 | list_move_tail(&vl->grave, &afs_vlocation_graveyard); | ||
491 | vl->time_of_death = get_seconds(); | ||
492 | schedule_delayed_work(&afs_vlocation_reap, | ||
493 | afs_vlocation_timeout * HZ); | ||
494 | |||
495 | /* suspend updates on this record */ | ||
496 | if (!list_empty(&vl->update)) { | ||
497 | spin_lock(&afs_vlocation_updates_lock); | ||
498 | list_del_init(&vl->update); | ||
499 | spin_unlock(&afs_vlocation_updates_lock); | ||
500 | } | ||
501 | } | ||
502 | spin_unlock(&afs_vlocation_graveyard_lock); | ||
503 | _leave(" [killed?]"); | ||
620 | } | 504 | } |
621 | 505 | ||
622 | /* | 506 | /* |
623 | * handle periodic update timeouts and busy retry timeouts | 507 | * destroy a dead volume location record |
624 | * - called from kafstimod | ||
625 | */ | 508 | */ |
626 | static void afs_vlocation_update_timer(struct afs_timer *timer) | 509 | static void afs_vlocation_destroy(struct afs_vlocation *vl) |
627 | { | 510 | { |
628 | struct afs_vlocation *vlocation = | 511 | _enter("%p", vl); |
629 | list_entry(timer, struct afs_vlocation, upd_timer); | ||
630 | int ret; | ||
631 | 512 | ||
632 | _enter("%s", vlocation->vldb.name); | 513 | #ifdef AFS_CACHING_SUPPORT |
514 | cachefs_relinquish_cookie(vl->cache, 0); | ||
515 | #endif | ||
633 | 516 | ||
634 | /* only update if not in the graveyard (defend against putting too) */ | 517 | afs_put_cell(vl->cell); |
635 | spin_lock(&vlocation->cell->vl_gylock); | 518 | kfree(vl); |
519 | } | ||
520 | |||
521 | /* | ||
522 | * reap dead volume location records | ||
523 | */ | ||
524 | static void afs_vlocation_reaper(struct work_struct *work) | ||
525 | { | ||
526 | LIST_HEAD(corpses); | ||
527 | struct afs_vlocation *vl; | ||
528 | unsigned long delay, expiry; | ||
529 | time_t now; | ||
636 | 530 | ||
637 | if (!atomic_read(&vlocation->usage)) | 531 | _enter(""); |
638 | goto out_unlock1; | ||
639 | 532 | ||
640 | spin_lock(&afs_vlocation_update_lock); | 533 | now = get_seconds(); |
534 | spin_lock(&afs_vlocation_graveyard_lock); | ||
535 | |||
536 | while (!list_empty(&afs_vlocation_graveyard)) { | ||
537 | vl = list_entry(afs_vlocation_graveyard.next, | ||
538 | struct afs_vlocation, grave); | ||
539 | |||
540 | _debug("check %p", vl); | ||
541 | |||
542 | /* the queue is ordered most dead first */ | ||
543 | expiry = vl->time_of_death + afs_vlocation_timeout; | ||
544 | if (expiry > now) { | ||
545 | delay = (expiry - now) * HZ; | ||
546 | _debug("delay %lu", delay); | ||
547 | if (!schedule_delayed_work(&afs_vlocation_reap, | ||
548 | delay)) { | ||
549 | cancel_delayed_work(&afs_vlocation_reap); | ||
550 | schedule_delayed_work(&afs_vlocation_reap, | ||
551 | delay); | ||
552 | } | ||
553 | break; | ||
554 | } | ||
641 | 555 | ||
642 | /* if we were woken up due to EBUSY sleep then restart immediately if | 556 | spin_lock(&vl->cell->vl_lock); |
643 | * possible or else jump to front of pending queue */ | 557 | if (atomic_read(&vl->usage) > 0) { |
644 | if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) { | 558 | _debug("no reap"); |
645 | if (afs_vlocation_update) { | 559 | list_del_init(&vl->grave); |
646 | list_add(&vlocation->upd_op.link, | ||
647 | &afs_vlocation_update_pendq); | ||
648 | } else { | 560 | } else { |
649 | afs_get_vlocation(vlocation); | 561 | _debug("reap"); |
650 | afs_vlocation_update = vlocation; | 562 | list_move_tail(&vl->grave, &corpses); |
651 | vlocation->upd_state = AFS_VLUPD_INPROGRESS; | 563 | list_del_init(&vl->link); |
652 | } | 564 | } |
653 | goto out_unlock2; | 565 | spin_unlock(&vl->cell->vl_lock); |
654 | } | 566 | } |
655 | 567 | ||
656 | /* put on pending queue if there's already another update in progress */ | 568 | spin_unlock(&afs_vlocation_graveyard_lock); |
657 | if (afs_vlocation_update) { | ||
658 | vlocation->upd_state = AFS_VLUPD_PENDING; | ||
659 | list_add_tail(&vlocation->upd_op.link, | ||
660 | &afs_vlocation_update_pendq); | ||
661 | goto out_unlock2; | ||
662 | } | ||
663 | 569 | ||
664 | /* hold a ref on it while actually updating */ | 570 | /* now reap the corpses we've extracted */ |
665 | afs_get_vlocation(vlocation); | 571 | while (!list_empty(&corpses)) { |
666 | afs_vlocation_update = vlocation; | 572 | vl = list_entry(corpses.next, struct afs_vlocation, grave); |
667 | vlocation->upd_state = AFS_VLUPD_INPROGRESS; | 573 | list_del(&vl->grave); |
668 | 574 | afs_vlocation_destroy(vl); | |
669 | spin_unlock(&afs_vlocation_update_lock); | ||
670 | spin_unlock(&vlocation->cell->vl_gylock); | ||
671 | |||
672 | /* okay... we can start the update */ | ||
673 | _debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name); | ||
674 | vlocation->upd_first_svix = vlocation->cell->vl_curr_svix; | ||
675 | vlocation->upd_curr_svix = vlocation->upd_first_svix; | ||
676 | vlocation->upd_rej_cnt = 0; | ||
677 | vlocation->upd_busy_cnt = 0; | ||
678 | |||
679 | ret = afs_vlocation_update_begin(vlocation); | ||
680 | if (ret < 0) { | ||
681 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret); | ||
682 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
683 | AFS_VLDB_TIMEOUT); | ||
684 | afs_put_vlocation(vlocation); | ||
685 | } | 575 | } |
686 | 576 | ||
687 | _leave(""); | 577 | _leave(""); |
688 | return; | 578 | } |
689 | 579 | ||
690 | out_unlock2: | 580 | /* |
691 | spin_unlock(&afs_vlocation_update_lock); | 581 | * initialise the VL update process |
692 | out_unlock1: | 582 | */ |
693 | spin_unlock(&vlocation->cell->vl_gylock); | 583 | int __init afs_vlocation_update_init(void) |
694 | _leave(""); | 584 | { |
585 | afs_vlocation_update_worker = | ||
586 | create_singlethread_workqueue("kafs_vlupdated"); | ||
587 | return afs_vlocation_update_worker ? 0 : -ENOMEM; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * discard all the volume location records for rmmod | ||
592 | */ | ||
593 | void __exit afs_vlocation_purge(void) | ||
594 | { | ||
595 | afs_vlocation_timeout = 0; | ||
596 | |||
597 | spin_lock(&afs_vlocation_updates_lock); | ||
598 | list_del_init(&afs_vlocation_updates); | ||
599 | spin_unlock(&afs_vlocation_updates_lock); | ||
600 | cancel_delayed_work(&afs_vlocation_update); | ||
601 | queue_delayed_work(afs_vlocation_update_worker, | ||
602 | &afs_vlocation_update, 0); | ||
603 | destroy_workqueue(afs_vlocation_update_worker); | ||
604 | |||
605 | cancel_delayed_work(&afs_vlocation_reap); | ||
606 | schedule_delayed_work(&afs_vlocation_reap, 0); | ||
695 | } | 607 | } |
696 | 608 | ||
697 | /* | 609 | /* |
698 | * attend to an update operation upon which an event happened | 610 | * update a volume location |
699 | * - called in kafsasyncd context | ||
700 | */ | 611 | */ |
701 | static void afs_vlocation_update_attend(struct afs_async_op *op) | 612 | static void afs_vlocation_updater(struct work_struct *work) |
702 | { | 613 | { |
703 | struct afs_cache_vlocation vldb; | 614 | struct afs_cache_vlocation vldb; |
704 | struct afs_vlocation *vlocation = | 615 | struct afs_vlocation *vl, *xvl; |
705 | list_entry(op, struct afs_vlocation, upd_op); | 616 | time_t now; |
706 | unsigned tmp; | 617 | long timeout; |
707 | int ret; | 618 | int ret; |
708 | 619 | ||
709 | _enter("%s", vlocation->vldb.name); | 620 | _enter(""); |
710 | |||
711 | ret = afs_rxvl_get_entry_by_id_async2(op, &vldb); | ||
712 | switch (ret) { | ||
713 | case -EAGAIN: | ||
714 | _leave(" [unfinished]"); | ||
715 | return; | ||
716 | |||
717 | case 0: | ||
718 | _debug("END VL UPDATE: %d\n", ret); | ||
719 | vlocation->valid = 1; | ||
720 | |||
721 | _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }", | ||
722 | vldb.vidmask, | ||
723 | ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0], | ||
724 | ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1], | ||
725 | ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2] | ||
726 | ); | ||
727 | |||
728 | _debug("Vids: %08x %08x %08x", | ||
729 | vldb.vid[0], vldb.vid[1], vldb.vid[2]); | ||
730 | |||
731 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0); | ||
732 | |||
733 | down_write(&vlocation->cell->vl_sem); | ||
734 | |||
735 | /* actually update the cache */ | ||
736 | if (strncmp(vldb.name, vlocation->vldb.name, | ||
737 | sizeof(vlocation->vldb.name)) != 0) | ||
738 | printk("kAFS: name of volume '%s'" | ||
739 | " changed to '%s' on server\n", | ||
740 | vlocation->vldb.name, vldb.name); | ||
741 | |||
742 | memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb)); | ||
743 | |||
744 | #if 0 | ||
745 | /* TODO update volume entry in local cache */ | ||
746 | #endif | ||
747 | |||
748 | up_write(&vlocation->cell->vl_sem); | ||
749 | |||
750 | if (ret < 0) | ||
751 | printk("kAFS: failed to update local cache: %d\n", ret); | ||
752 | |||
753 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
754 | AFS_VLDB_TIMEOUT); | ||
755 | afs_put_vlocation(vlocation); | ||
756 | _leave(" [found]"); | ||
757 | return; | ||
758 | |||
759 | case -ENOMEDIUM: | ||
760 | vlocation->upd_rej_cnt++; | ||
761 | goto try_next; | ||
762 | |||
763 | /* the server is locked - retry in a very short while */ | ||
764 | case -EBUSY: | ||
765 | vlocation->upd_busy_cnt++; | ||
766 | if (vlocation->upd_busy_cnt > 3) | ||
767 | goto try_next; /* too many retries */ | ||
768 | |||
769 | afs_vlocation_update_abandon(vlocation, | ||
770 | AFS_VLUPD_BUSYSLEEP, 0); | ||
771 | afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2); | ||
772 | afs_put_vlocation(vlocation); | ||
773 | _leave(" [busy]"); | ||
774 | return; | ||
775 | |||
776 | case -ENETUNREACH: | ||
777 | case -EHOSTUNREACH: | ||
778 | case -ECONNREFUSED: | ||
779 | case -EREMOTEIO: | ||
780 | /* record bad vlserver info in the cell too | ||
781 | * - TODO: use down_write_trylock() if available | ||
782 | */ | ||
783 | if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix) | ||
784 | vlocation->cell->vl_curr_svix = | ||
785 | vlocation->cell->vl_curr_svix % | ||
786 | vlocation->cell->vl_naddrs; | ||
787 | |||
788 | case -EBADRQC: | ||
789 | case -EINVAL: | ||
790 | case -EACCES: | ||
791 | case -EBADMSG: | ||
792 | goto try_next; | ||
793 | |||
794 | default: | ||
795 | goto abandon; | ||
796 | } | ||
797 | |||
798 | /* try contacting the next server */ | ||
799 | try_next: | ||
800 | vlocation->upd_busy_cnt = 0; | ||
801 | |||
802 | /* discard the server record */ | ||
803 | afs_put_server(vlocation->upd_op.server); | ||
804 | vlocation->upd_op.server = NULL; | ||
805 | 621 | ||
806 | tmp = vlocation->cell->vl_naddrs; | 622 | now = get_seconds(); |
807 | if (tmp == 0) | ||
808 | goto abandon; | ||
809 | 623 | ||
810 | vlocation->upd_curr_svix++; | 624 | /* find a record to update */ |
811 | if (vlocation->upd_curr_svix >= tmp) | 625 | spin_lock(&afs_vlocation_updates_lock); |
812 | vlocation->upd_curr_svix = 0; | 626 | for (;;) { |
813 | if (vlocation->upd_first_svix >= tmp) | 627 | if (list_empty(&afs_vlocation_updates)) { |
814 | vlocation->upd_first_svix = tmp - 1; | 628 | spin_unlock(&afs_vlocation_updates_lock); |
629 | _leave(" [nothing]"); | ||
630 | return; | ||
631 | } | ||
815 | 632 | ||
816 | /* move to the next server */ | 633 | vl = list_entry(afs_vlocation_updates.next, |
817 | if (vlocation->upd_curr_svix != vlocation->upd_first_svix) { | 634 | struct afs_vlocation, update); |
818 | afs_vlocation_update_begin(vlocation); | 635 | if (atomic_read(&vl->usage) > 0) |
819 | _leave(" [next]"); | 636 | break; |
820 | return; | 637 | list_del_init(&vl->update); |
821 | } | 638 | } |
822 | 639 | ||
823 | /* run out of servers to try - was the volume rejected? */ | 640 | timeout = vl->update_at - now; |
824 | if (vlocation->upd_rej_cnt > 0) { | 641 | if (timeout > 0) { |
825 | printk("kAFS: Active volume no longer valid '%s'\n", | 642 | queue_delayed_work(afs_vlocation_update_worker, |
826 | vlocation->vldb.name); | 643 | &afs_vlocation_update, timeout * HZ); |
827 | vlocation->valid = 0; | 644 | spin_unlock(&afs_vlocation_updates_lock); |
828 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0); | 645 | _leave(" [nothing]"); |
829 | afs_kafstimod_add_timer(&vlocation->upd_timer, | ||
830 | AFS_VLDB_TIMEOUT); | ||
831 | afs_put_vlocation(vlocation); | ||
832 | _leave(" [invalidated]"); | ||
833 | return; | 646 | return; |
834 | } | 647 | } |
835 | 648 | ||
836 | /* abandon the update */ | 649 | list_del_init(&vl->update); |
837 | abandon: | 650 | atomic_inc(&vl->usage); |
838 | afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret); | 651 | spin_unlock(&afs_vlocation_updates_lock); |
839 | afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10); | ||
840 | afs_put_vlocation(vlocation); | ||
841 | _leave(" [abandoned]"); | ||
842 | } | ||
843 | 652 | ||
844 | /* | 653 | /* we can now perform the update */ |
845 | * deal with an update operation being discarded | 654 | _debug("update %s", vl->vldb.name); |
846 | * - called in kafsasyncd context when it's dying due to rmmod | 655 | vl->state = AFS_VL_UPDATING; |
847 | * - the call has already been aborted and put()'d | 656 | vl->upd_rej_cnt = 0; |
848 | */ | 657 | vl->upd_busy_cnt = 0; |
849 | static void afs_vlocation_update_discard(struct afs_async_op *op) | ||
850 | { | ||
851 | struct afs_vlocation *vlocation = | ||
852 | list_entry(op, struct afs_vlocation, upd_op); | ||
853 | |||
854 | _enter("%s", vlocation->vldb.name); | ||
855 | 658 | ||
856 | afs_put_server(op->server); | 659 | ret = afs_vlocation_update_record(vl, &vldb); |
857 | op->server = NULL; | 660 | switch (ret) { |
858 | 661 | case 0: | |
859 | afs_put_vlocation(vlocation); | 662 | afs_vlocation_apply_update(vl, &vldb); |
663 | vl->state = AFS_VL_VALID; | ||
664 | break; | ||
665 | case -ENOMEDIUM: | ||
666 | vl->state = AFS_VL_VOLUME_DELETED; | ||
667 | break; | ||
668 | default: | ||
669 | vl->state = AFS_VL_UNCERTAIN; | ||
670 | break; | ||
671 | } | ||
860 | 672 | ||
861 | _leave(""); | 673 | /* and then reschedule */ |
862 | } | 674 | _debug("reschedule"); |
675 | vl->update_at = get_seconds() + afs_vlocation_update_timeout; | ||
863 | 676 | ||
864 | /* | 677 | spin_lock(&afs_vlocation_updates_lock); |
865 | * match a VLDB record stored in the cache | ||
866 | * - may also load target from entry | ||
867 | */ | ||
868 | #ifdef AFS_CACHING_SUPPORT | ||
869 | static cachefs_match_val_t afs_vlocation_cache_match(void *target, | ||
870 | const void *entry) | ||
871 | { | ||
872 | const struct afs_cache_vlocation *vldb = entry; | ||
873 | struct afs_vlocation *vlocation = target; | ||
874 | |||
875 | _enter("{%s},{%s}", vlocation->vldb.name, vldb->name); | ||
876 | |||
877 | if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0 | ||
878 | ) { | ||
879 | if (!vlocation->valid || | ||
880 | vlocation->vldb.rtime == vldb->rtime | ||
881 | ) { | ||
882 | vlocation->vldb = *vldb; | ||
883 | vlocation->valid = 1; | ||
884 | _leave(" = SUCCESS [c->m]"); | ||
885 | return CACHEFS_MATCH_SUCCESS; | ||
886 | } else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) { | ||
887 | /* delete if VIDs for this name differ */ | ||
888 | if (memcmp(&vlocation->vldb.vid, | ||
889 | &vldb->vid, | ||
890 | sizeof(vldb->vid)) != 0) { | ||
891 | _leave(" = DELETE"); | ||
892 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
893 | } | ||
894 | 678 | ||
895 | _leave(" = UPDATE"); | 679 | if (!list_empty(&afs_vlocation_updates)) { |
896 | return CACHEFS_MATCH_SUCCESS_UPDATE; | 680 | /* next update in 10 minutes, but wait at least 1 second more |
897 | } else { | 681 | * than the newest record already queued so that we don't spam |
898 | _leave(" = SUCCESS"); | 682 | * the VL server suddenly with lots of requests |
899 | return CACHEFS_MATCH_SUCCESS; | 683 | */ |
900 | } | 684 | xvl = list_entry(afs_vlocation_updates.prev, |
685 | struct afs_vlocation, update); | ||
686 | if (vl->update_at <= xvl->update_at) | ||
687 | vl->update_at = xvl->update_at + 1; | ||
688 | xvl = list_entry(afs_vlocation_updates.next, | ||
689 | struct afs_vlocation, update); | ||
690 | timeout = xvl->update_at - now; | ||
691 | if (timeout < 0) | ||
692 | timeout = 0; | ||
693 | } else { | ||
694 | timeout = afs_vlocation_update_timeout; | ||
901 | } | 695 | } |
902 | 696 | ||
903 | _leave(" = FAILED"); | 697 | ASSERT(list_empty(&vl->update)); |
904 | return CACHEFS_MATCH_FAILED; | ||
905 | } | ||
906 | #endif | ||
907 | 698 | ||
908 | /* | 699 | list_add_tail(&vl->update, &afs_vlocation_updates); |
909 | * update a VLDB record stored in the cache | ||
910 | */ | ||
911 | #ifdef AFS_CACHING_SUPPORT | ||
912 | static void afs_vlocation_cache_update(void *source, void *entry) | ||
913 | { | ||
914 | struct afs_cache_vlocation *vldb = entry; | ||
915 | struct afs_vlocation *vlocation = source; | ||
916 | |||
917 | _enter(""); | ||
918 | 700 | ||
919 | *vldb = vlocation->vldb; | 701 | _debug("timeout %ld", timeout); |
702 | queue_delayed_work(afs_vlocation_update_worker, | ||
703 | &afs_vlocation_update, timeout * HZ); | ||
704 | spin_unlock(&afs_vlocation_updates_lock); | ||
705 | afs_put_vlocation(vl); | ||
920 | } | 706 | } |
921 | #endif | ||
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c index 4ab1ed71028..d2ca1398474 100644 --- a/fs/afs/vnode.c +++ b/fs/afs/vnode.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS vnode management | 1 | /* AFS vnode management |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -14,72 +14,183 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/pagemap.h> | ||
18 | #include "volume.h" | ||
19 | #include "cell.h" | ||
20 | #include "cmservice.h" | ||
21 | #include "fsclient.h" | ||
22 | #include "vlclient.h" | ||
23 | #include "vnode.h" | ||
24 | #include "internal.h" | 17 | #include "internal.h" |
25 | 18 | ||
26 | static void afs_vnode_cb_timed_out(struct afs_timer *timer); | 19 | #if 0 |
20 | static noinline bool dump_tree_aux(struct rb_node *node, struct rb_node *parent, | ||
21 | int depth, char lr) | ||
22 | { | ||
23 | struct afs_vnode *vnode; | ||
24 | bool bad = false; | ||
25 | |||
26 | if (!node) | ||
27 | return false; | ||
28 | |||
29 | if (node->rb_left) | ||
30 | bad = dump_tree_aux(node->rb_left, node, depth + 2, '/'); | ||
31 | |||
32 | vnode = rb_entry(node, struct afs_vnode, cb_promise); | ||
33 | kdebug("%c %*.*s%c%p {%d}", | ||
34 | rb_is_red(node) ? 'R' : 'B', | ||
35 | depth, depth, "", lr, | ||
36 | vnode, vnode->cb_expires_at); | ||
37 | if (rb_parent(node) != parent) { | ||
38 | printk("BAD: %p != %p\n", rb_parent(node), parent); | ||
39 | bad = true; | ||
40 | } | ||
27 | 41 | ||
28 | struct afs_timer_ops afs_vnode_cb_timed_out_ops = { | 42 | if (node->rb_right) |
29 | .timed_out = afs_vnode_cb_timed_out, | 43 | bad |= dump_tree_aux(node->rb_right, node, depth + 2, '\\'); |
30 | }; | ||
31 | 44 | ||
32 | #ifdef AFS_CACHING_SUPPORT | 45 | return bad; |
33 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | 46 | } |
34 | const void *entry); | ||
35 | static void afs_vnode_cache_update(void *source, void *entry); | ||
36 | 47 | ||
37 | struct cachefs_index_def afs_vnode_cache_index_def = { | 48 | static noinline void dump_tree(const char *name, struct afs_server *server) |
38 | .name = "vnode", | 49 | { |
39 | .data_size = sizeof(struct afs_cache_vnode), | 50 | kenter("%s", name); |
40 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 }, | 51 | if (dump_tree_aux(server->cb_promises.rb_node, NULL, 0, '-')) |
41 | .match = afs_vnode_cache_match, | 52 | BUG(); |
42 | .update = afs_vnode_cache_update, | 53 | } |
43 | }; | ||
44 | #endif | 54 | #endif |
45 | 55 | ||
46 | /* | 56 | /* |
47 | * handle a callback timing out | 57 | * insert a vnode into the backing server's vnode tree |
48 | * TODO: retain a ref to vnode struct for an outstanding callback timeout | ||
49 | */ | 58 | */ |
50 | static void afs_vnode_cb_timed_out(struct afs_timer *timer) | 59 | static void afs_install_vnode(struct afs_vnode *vnode, |
60 | struct afs_server *server) | ||
51 | { | 61 | { |
52 | struct afs_server *oldserver; | 62 | struct afs_server *old_server = vnode->server; |
53 | struct afs_vnode *vnode; | 63 | struct afs_vnode *xvnode; |
64 | struct rb_node *parent, **p; | ||
54 | 65 | ||
55 | vnode = list_entry(timer, struct afs_vnode, cb_timeout); | 66 | _enter("%p,%p", vnode, server); |
56 | 67 | ||
57 | _enter("%p", vnode); | 68 | if (old_server) { |
69 | spin_lock(&old_server->fs_lock); | ||
70 | rb_erase(&vnode->server_rb, &old_server->fs_vnodes); | ||
71 | spin_unlock(&old_server->fs_lock); | ||
72 | } | ||
58 | 73 | ||
59 | /* set the changed flag in the vnode and release the server */ | 74 | afs_get_server(server); |
60 | spin_lock(&vnode->lock); | 75 | vnode->server = server; |
76 | afs_put_server(old_server); | ||
77 | |||
78 | /* insert into the server's vnode tree in FID order */ | ||
79 | spin_lock(&server->fs_lock); | ||
80 | |||
81 | parent = NULL; | ||
82 | p = &server->fs_vnodes.rb_node; | ||
83 | while (*p) { | ||
84 | parent = *p; | ||
85 | xvnode = rb_entry(parent, struct afs_vnode, server_rb); | ||
86 | if (vnode->fid.vid < xvnode->fid.vid) | ||
87 | p = &(*p)->rb_left; | ||
88 | else if (vnode->fid.vid > xvnode->fid.vid) | ||
89 | p = &(*p)->rb_right; | ||
90 | else if (vnode->fid.vnode < xvnode->fid.vnode) | ||
91 | p = &(*p)->rb_left; | ||
92 | else if (vnode->fid.vnode > xvnode->fid.vnode) | ||
93 | p = &(*p)->rb_right; | ||
94 | else if (vnode->fid.unique < xvnode->fid.unique) | ||
95 | p = &(*p)->rb_left; | ||
96 | else if (vnode->fid.unique > xvnode->fid.unique) | ||
97 | p = &(*p)->rb_right; | ||
98 | else | ||
99 | BUG(); /* can't happen unless afs_iget() malfunctions */ | ||
100 | } | ||
61 | 101 | ||
62 | oldserver = xchg(&vnode->cb_server, NULL); | 102 | rb_link_node(&vnode->server_rb, parent, p); |
63 | if (oldserver) { | 103 | rb_insert_color(&vnode->server_rb, &server->fs_vnodes); |
64 | vnode->flags |= AFS_VNODE_CHANGED; | ||
65 | 104 | ||
66 | spin_lock(&afs_cb_hash_lock); | 105 | spin_unlock(&server->fs_lock); |
67 | list_del_init(&vnode->cb_hash_link); | 106 | _leave(""); |
68 | spin_unlock(&afs_cb_hash_lock); | 107 | } |
69 | 108 | ||
70 | spin_lock(&oldserver->cb_lock); | 109 | /* |
71 | list_del_init(&vnode->cb_link); | 110 | * insert a vnode into the promising server's update/expiration tree |
72 | spin_unlock(&oldserver->cb_lock); | 111 | * - caller must hold vnode->lock |
112 | */ | ||
113 | static void afs_vnode_note_promise(struct afs_vnode *vnode, | ||
114 | struct afs_server *server) | ||
115 | { | ||
116 | struct afs_server *old_server; | ||
117 | struct afs_vnode *xvnode; | ||
118 | struct rb_node *parent, **p; | ||
119 | |||
120 | _enter("%p,%p", vnode, server); | ||
121 | |||
122 | ASSERT(server != NULL); | ||
123 | |||
124 | old_server = vnode->server; | ||
125 | if (vnode->cb_promised) { | ||
126 | if (server == old_server && | ||
127 | vnode->cb_expires == vnode->cb_expires_at) { | ||
128 | _leave(" [no change]"); | ||
129 | return; | ||
130 | } | ||
131 | |||
132 | spin_lock(&old_server->cb_lock); | ||
133 | if (vnode->cb_promised) { | ||
134 | _debug("delete"); | ||
135 | rb_erase(&vnode->cb_promise, &old_server->cb_promises); | ||
136 | vnode->cb_promised = false; | ||
137 | } | ||
138 | spin_unlock(&old_server->cb_lock); | ||
73 | } | 139 | } |
74 | 140 | ||
75 | spin_unlock(&vnode->lock); | 141 | if (vnode->server != server) |
142 | afs_install_vnode(vnode, server); | ||
143 | |||
144 | vnode->cb_expires_at = vnode->cb_expires; | ||
145 | _debug("PROMISE on %p {%lu}", | ||
146 | vnode, (unsigned long) vnode->cb_expires_at); | ||
147 | |||
148 | /* abuse an RB-tree to hold the expiration order (we may have multiple | ||
149 | * items with the same expiration time) */ | ||
150 | spin_lock(&server->cb_lock); | ||
151 | |||
152 | parent = NULL; | ||
153 | p = &server->cb_promises.rb_node; | ||
154 | while (*p) { | ||
155 | parent = *p; | ||
156 | xvnode = rb_entry(parent, struct afs_vnode, cb_promise); | ||
157 | if (vnode->cb_expires_at < xvnode->cb_expires_at) | ||
158 | p = &(*p)->rb_left; | ||
159 | else | ||
160 | p = &(*p)->rb_right; | ||
161 | } | ||
76 | 162 | ||
77 | afs_put_server(oldserver); | 163 | rb_link_node(&vnode->cb_promise, parent, p); |
164 | rb_insert_color(&vnode->cb_promise, &server->cb_promises); | ||
165 | vnode->cb_promised = true; | ||
78 | 166 | ||
167 | spin_unlock(&server->cb_lock); | ||
79 | _leave(""); | 168 | _leave(""); |
80 | } | 169 | } |
81 | 170 | ||
82 | /* | 171 | /* |
172 | * handle remote file deletion by discarding the callback promise | ||
173 | */ | ||
174 | static void afs_vnode_deleted_remotely(struct afs_vnode *vnode) | ||
175 | { | ||
176 | struct afs_server *server; | ||
177 | |||
178 | set_bit(AFS_VNODE_DELETED, &vnode->flags); | ||
179 | |||
180 | server = vnode->server; | ||
181 | if (vnode->cb_promised) { | ||
182 | spin_lock(&server->cb_lock); | ||
183 | if (vnode->cb_promised) { | ||
184 | rb_erase(&vnode->cb_promise, &server->cb_promises); | ||
185 | vnode->cb_promised = false; | ||
186 | } | ||
187 | spin_unlock(&server->cb_lock); | ||
188 | } | ||
189 | |||
190 | afs_put_server(server); | ||
191 | } | ||
192 | |||
193 | /* | ||
83 | * finish off updating the recorded status of a file | 194 | * finish off updating the recorded status of a file |
84 | * - starts callback expiry timer | 195 | * - starts callback expiry timer |
85 | * - adds to server's callback list | 196 | * - adds to server's callback list |
@@ -94,43 +205,19 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode, | |||
94 | 205 | ||
95 | spin_lock(&vnode->lock); | 206 | spin_lock(&vnode->lock); |
96 | 207 | ||
97 | vnode->flags &= ~AFS_VNODE_CHANGED; | 208 | clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); |
98 | |||
99 | if (ret == 0) { | ||
100 | /* adjust the callback timeout appropriately */ | ||
101 | afs_kafstimod_add_timer(&vnode->cb_timeout, | ||
102 | vnode->cb_expiry * HZ); | ||
103 | |||
104 | spin_lock(&afs_cb_hash_lock); | ||
105 | list_move_tail(&vnode->cb_hash_link, | ||
106 | &afs_cb_hash(server, &vnode->fid)); | ||
107 | spin_unlock(&afs_cb_hash_lock); | ||
108 | |||
109 | /* swap ref to old callback server with that for new callback | ||
110 | * server */ | ||
111 | oldserver = xchg(&vnode->cb_server, server); | ||
112 | if (oldserver != server) { | ||
113 | if (oldserver) { | ||
114 | spin_lock(&oldserver->cb_lock); | ||
115 | list_del_init(&vnode->cb_link); | ||
116 | spin_unlock(&oldserver->cb_lock); | ||
117 | } | ||
118 | |||
119 | afs_get_server(server); | ||
120 | spin_lock(&server->cb_lock); | ||
121 | list_add_tail(&vnode->cb_link, &server->cb_promises); | ||
122 | spin_unlock(&server->cb_lock); | ||
123 | } else { | ||
124 | /* same server */ | ||
125 | oldserver = NULL; | ||
126 | } | ||
127 | } else if (ret == -ENOENT) { | ||
128 | /* the file was deleted - clear the callback timeout */ | ||
129 | oldserver = xchg(&vnode->cb_server, NULL); | ||
130 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
131 | 209 | ||
210 | switch (ret) { | ||
211 | case 0: | ||
212 | afs_vnode_note_promise(vnode, server); | ||
213 | break; | ||
214 | case -ENOENT: | ||
215 | /* the file was deleted on the server */ | ||
132 | _debug("got NOENT from server - marking file deleted"); | 216 | _debug("got NOENT from server - marking file deleted"); |
133 | vnode->flags |= AFS_VNODE_DELETED; | 217 | afs_vnode_deleted_remotely(vnode); |
218 | break; | ||
219 | default: | ||
220 | break; | ||
134 | } | 221 | } |
135 | 222 | ||
136 | vnode->update_cnt--; | 223 | vnode->update_cnt--; |
@@ -162,19 +249,21 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode) | |||
162 | vnode->volume->vlocation->vldb.name, | 249 | vnode->volume->vlocation->vldb.name, |
163 | vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | 250 | vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); |
164 | 251 | ||
165 | if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) { | 252 | if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && |
253 | vnode->cb_promised) { | ||
166 | _leave(" [unchanged]"); | 254 | _leave(" [unchanged]"); |
167 | return 0; | 255 | return 0; |
168 | } | 256 | } |
169 | 257 | ||
170 | if (vnode->flags & AFS_VNODE_DELETED) { | 258 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { |
171 | _leave(" [deleted]"); | 259 | _leave(" [deleted]"); |
172 | return -ENOENT; | 260 | return -ENOENT; |
173 | } | 261 | } |
174 | 262 | ||
175 | spin_lock(&vnode->lock); | 263 | spin_lock(&vnode->lock); |
176 | 264 | ||
177 | if (!(vnode->flags & AFS_VNODE_CHANGED)) { | 265 | if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && |
266 | vnode->cb_promised) { | ||
178 | spin_unlock(&vnode->lock); | 267 | spin_unlock(&vnode->lock); |
179 | _leave(" [unchanged]"); | 268 | _leave(" [unchanged]"); |
180 | return 0; | 269 | return 0; |
@@ -183,17 +272,18 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode) | |||
183 | if (vnode->update_cnt > 0) { | 272 | if (vnode->update_cnt > 0) { |
184 | /* someone else started a fetch */ | 273 | /* someone else started a fetch */ |
185 | set_current_state(TASK_UNINTERRUPTIBLE); | 274 | set_current_state(TASK_UNINTERRUPTIBLE); |
275 | ASSERT(myself.func != NULL); | ||
186 | add_wait_queue(&vnode->update_waitq, &myself); | 276 | add_wait_queue(&vnode->update_waitq, &myself); |
187 | 277 | ||
188 | /* wait for the status to be updated */ | 278 | /* wait for the status to be updated */ |
189 | for (;;) { | 279 | for (;;) { |
190 | if (!(vnode->flags & AFS_VNODE_CHANGED)) | 280 | if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) |
191 | break; | 281 | break; |
192 | if (vnode->flags & AFS_VNODE_DELETED) | 282 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
193 | break; | 283 | break; |
194 | 284 | ||
195 | /* it got updated and invalidated all before we saw | 285 | /* check to see if it got updated and invalidated all |
196 | * it */ | 286 | * before we saw it */ |
197 | if (vnode->update_cnt == 0) { | 287 | if (vnode->update_cnt == 0) { |
198 | remove_wait_queue(&vnode->update_waitq, | 288 | remove_wait_queue(&vnode->update_waitq, |
199 | &myself); | 289 | &myself); |
@@ -213,7 +303,8 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode) | |||
213 | spin_unlock(&vnode->lock); | 303 | spin_unlock(&vnode->lock); |
214 | set_current_state(TASK_RUNNING); | 304 | set_current_state(TASK_RUNNING); |
215 | 305 | ||
216 | return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0; | 306 | return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? |
307 | -ENOENT : 0; | ||
217 | } | 308 | } |
218 | 309 | ||
219 | get_anyway: | 310 | get_anyway: |
@@ -226,15 +317,17 @@ get_anyway: | |||
226 | * vnode */ | 317 | * vnode */ |
227 | do { | 318 | do { |
228 | /* pick a server to query */ | 319 | /* pick a server to query */ |
229 | ret = afs_volume_pick_fileserver(vnode->volume, &server); | 320 | server = afs_volume_pick_fileserver(vnode); |
230 | if (ret<0) | 321 | if (IS_ERR(server)) |
231 | return ret; | 322 | return PTR_ERR(server); |
232 | 323 | ||
233 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 324 | _debug("USING SERVER: %p{%08x}", |
325 | server, ntohl(server->addr.s_addr)); | ||
234 | 326 | ||
235 | ret = afs_rxfs_fetch_file_status(server, vnode, NULL); | 327 | ret = afs_fs_fetch_file_status(server, vnode, NULL, |
328 | &afs_sync_call); | ||
236 | 329 | ||
237 | } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); | 330 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
238 | 331 | ||
239 | /* adjust the flags */ | 332 | /* adjust the flags */ |
240 | afs_vnode_finalise_status_update(vnode, server, ret); | 333 | afs_vnode_finalise_status_update(vnode, server, ret); |
@@ -247,8 +340,8 @@ get_anyway: | |||
247 | * fetch file data from the volume | 340 | * fetch file data from the volume |
248 | * - TODO implement caching and server failover | 341 | * - TODO implement caching and server failover |
249 | */ | 342 | */ |
250 | int afs_vnode_fetch_data(struct afs_vnode *vnode, | 343 | int afs_vnode_fetch_data(struct afs_vnode *vnode, off_t offset, size_t length, |
251 | struct afs_rxfs_fetch_descriptor *desc) | 344 | struct page *page) |
252 | { | 345 | { |
253 | struct afs_server *server; | 346 | struct afs_server *server; |
254 | int ret; | 347 | int ret; |
@@ -268,15 +361,16 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, | |||
268 | * vnode */ | 361 | * vnode */ |
269 | do { | 362 | do { |
270 | /* pick a server to query */ | 363 | /* pick a server to query */ |
271 | ret = afs_volume_pick_fileserver(vnode->volume, &server); | 364 | server = afs_volume_pick_fileserver(vnode); |
272 | if (ret < 0) | 365 | if (IS_ERR(server)) |
273 | return ret; | 366 | return PTR_ERR(server); |
274 | 367 | ||
275 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 368 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
276 | 369 | ||
277 | ret = afs_rxfs_fetch_file_data(server, vnode, desc, NULL); | 370 | ret = afs_fs_fetch_data(server, vnode, offset, length, page, |
371 | NULL, &afs_sync_call); | ||
278 | 372 | ||
279 | } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); | 373 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
280 | 374 | ||
281 | /* adjust the flags */ | 375 | /* adjust the flags */ |
282 | afs_vnode_finalise_status_update(vnode, server, ret); | 376 | afs_vnode_finalise_status_update(vnode, server, ret); |
@@ -284,99 +378,3 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, | |||
284 | _leave(" = %d", ret); | 378 | _leave(" = %d", ret); |
285 | return ret; | 379 | return ret; |
286 | } | 380 | } |
287 | |||
288 | /* | ||
289 | * break any outstanding callback on a vnode | ||
290 | * - only relevent to server that issued it | ||
291 | */ | ||
292 | int afs_vnode_give_up_callback(struct afs_vnode *vnode) | ||
293 | { | ||
294 | struct afs_server *server; | ||
295 | int ret; | ||
296 | |||
297 | _enter("%s,{%u,%u,%u}", | ||
298 | vnode->volume->vlocation->vldb.name, | ||
299 | vnode->fid.vid, | ||
300 | vnode->fid.vnode, | ||
301 | vnode->fid.unique); | ||
302 | |||
303 | spin_lock(&afs_cb_hash_lock); | ||
304 | list_del_init(&vnode->cb_hash_link); | ||
305 | spin_unlock(&afs_cb_hash_lock); | ||
306 | |||
307 | /* set the changed flag in the vnode and release the server */ | ||
308 | spin_lock(&vnode->lock); | ||
309 | |||
310 | afs_kafstimod_del_timer(&vnode->cb_timeout); | ||
311 | |||
312 | server = xchg(&vnode->cb_server, NULL); | ||
313 | if (server) { | ||
314 | vnode->flags |= AFS_VNODE_CHANGED; | ||
315 | |||
316 | spin_lock(&server->cb_lock); | ||
317 | list_del_init(&vnode->cb_link); | ||
318 | spin_unlock(&server->cb_lock); | ||
319 | } | ||
320 | |||
321 | spin_unlock(&vnode->lock); | ||
322 | |||
323 | ret = 0; | ||
324 | if (server) { | ||
325 | ret = afs_rxfs_give_up_callback(server, vnode); | ||
326 | afs_put_server(server); | ||
327 | } | ||
328 | |||
329 | _leave(" = %d", ret); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * match a vnode record stored in the cache | ||
335 | */ | ||
336 | #ifdef AFS_CACHING_SUPPORT | ||
337 | static cachefs_match_val_t afs_vnode_cache_match(void *target, | ||
338 | const void *entry) | ||
339 | { | ||
340 | const struct afs_cache_vnode *cvnode = entry; | ||
341 | struct afs_vnode *vnode = target; | ||
342 | |||
343 | _enter("{%x,%x,%Lx},{%x,%x,%Lx}", | ||
344 | vnode->fid.vnode, | ||
345 | vnode->fid.unique, | ||
346 | vnode->status.version, | ||
347 | cvnode->vnode_id, | ||
348 | cvnode->vnode_unique, | ||
349 | cvnode->data_version); | ||
350 | |||
351 | if (vnode->fid.vnode != cvnode->vnode_id) { | ||
352 | _leave(" = FAILED"); | ||
353 | return CACHEFS_MATCH_FAILED; | ||
354 | } | ||
355 | |||
356 | if (vnode->fid.unique != cvnode->vnode_unique || | ||
357 | vnode->status.version != cvnode->data_version) { | ||
358 | _leave(" = DELETE"); | ||
359 | return CACHEFS_MATCH_SUCCESS_DELETE; | ||
360 | } | ||
361 | |||
362 | _leave(" = SUCCESS"); | ||
363 | return CACHEFS_MATCH_SUCCESS; | ||
364 | } | ||
365 | #endif | ||
366 | |||
367 | /* | ||
368 | * update a vnode record stored in the cache | ||
369 | */ | ||
370 | #ifdef AFS_CACHING_SUPPORT | ||
371 | static void afs_vnode_cache_update(void *source, void *entry) | ||
372 | { | ||
373 | struct afs_cache_vnode *cvnode = entry; | ||
374 | struct afs_vnode *vnode = source; | ||
375 | |||
376 | _enter(""); | ||
377 | |||
378 | cvnode->vnode_id = vnode->fid.vnode; | ||
379 | cvnode->vnode_unique = vnode->fid.unique; | ||
380 | cvnode->data_version = vnode->status.version; | ||
381 | } | ||
382 | #endif | ||
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h deleted file mode 100644 index 7f6d05b197a..00000000000 --- a/fs/afs/vnode.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* AFS vnode record | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_VNODE_H | ||
13 | #define AFS_VNODE_H | ||
14 | |||
15 | #include <linux/fs.h> | ||
16 | #include "server.h" | ||
17 | #include "kafstimod.h" | ||
18 | #include "cache.h" | ||
19 | |||
20 | struct afs_rxfs_fetch_descriptor; | ||
21 | |||
22 | extern struct afs_timer_ops afs_vnode_cb_timed_out_ops; | ||
23 | |||
24 | /* | ||
25 | * vnode catalogue entry | ||
26 | */ | ||
27 | struct afs_cache_vnode { | ||
28 | afs_vnodeid_t vnode_id; /* vnode ID */ | ||
29 | unsigned vnode_unique; /* vnode ID uniquifier */ | ||
30 | afs_dataversion_t data_version; /* data version */ | ||
31 | }; | ||
32 | |||
33 | #ifdef AFS_CACHING_SUPPORT | ||
34 | extern struct cachefs_index_def afs_vnode_cache_index_def; | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * AFS inode private data | ||
39 | */ | ||
40 | struct afs_vnode { | ||
41 | struct inode vfs_inode; /* the VFS's inode record */ | ||
42 | |||
43 | struct afs_volume *volume; /* volume on which vnode resides */ | ||
44 | struct afs_fid fid; /* the file identifier for this inode */ | ||
45 | struct afs_file_status status; /* AFS status info for this file */ | ||
46 | #ifdef AFS_CACHING_SUPPORT | ||
47 | struct cachefs_cookie *cache; /* caching cookie */ | ||
48 | #endif | ||
49 | |||
50 | wait_queue_head_t update_waitq; /* status fetch waitqueue */ | ||
51 | unsigned update_cnt; /* number of outstanding ops that will update the | ||
52 | * status */ | ||
53 | spinlock_t lock; /* waitqueue/flags lock */ | ||
54 | unsigned flags; | ||
55 | #define AFS_VNODE_CHANGED 0x00000001 /* set if vnode reported changed by callback */ | ||
56 | #define AFS_VNODE_DELETED 0x00000002 /* set if vnode deleted on server */ | ||
57 | #define AFS_VNODE_MOUNTPOINT 0x00000004 /* set if vnode is a mountpoint symlink */ | ||
58 | |||
59 | /* outstanding callback notification on this file */ | ||
60 | struct afs_server *cb_server; /* server that made the current promise */ | ||
61 | struct list_head cb_link; /* link in server's promises list */ | ||
62 | struct list_head cb_hash_link; /* link in master callback hash */ | ||
63 | struct afs_timer cb_timeout; /* timeout on promise */ | ||
64 | unsigned cb_version; /* callback version */ | ||
65 | unsigned cb_expiry; /* callback expiry time */ | ||
66 | afs_callback_type_t cb_type; /* type of callback */ | ||
67 | }; | ||
68 | |||
69 | static inline struct afs_vnode *AFS_FS_I(struct inode *inode) | ||
70 | { | ||
71 | return container_of(inode, struct afs_vnode, vfs_inode); | ||
72 | } | ||
73 | |||
74 | static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) | ||
75 | { | ||
76 | return &vnode->vfs_inode; | ||
77 | } | ||
78 | |||
79 | extern int afs_vnode_fetch_status(struct afs_vnode *); | ||
80 | extern int afs_vnode_fetch_data(struct afs_vnode *, | ||
81 | struct afs_rxfs_fetch_descriptor *); | ||
82 | extern int afs_vnode_give_up_callback(struct afs_vnode *); | ||
83 | |||
84 | #endif /* AFS_VNODE_H */ | ||
diff --git a/fs/afs/volume.c b/fs/afs/volume.c index c82e1bb4f2d..45491cfd4f4 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* AFS volume management | 1 | /* AFS volume management |
2 | * | 2 | * |
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -15,33 +15,9 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
18 | #include "volume.h" | ||
19 | #include "vnode.h" | ||
20 | #include "cell.h" | ||
21 | #include "cache.h" | ||
22 | #include "cmservice.h" | ||
23 | #include "fsclient.h" | ||
24 | #include "vlclient.h" | ||
25 | #include "internal.h" | 18 | #include "internal.h" |
26 | 19 | ||
27 | #ifdef __KDEBUG | ||
28 | static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" }; | 20 | static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" }; |
29 | #endif | ||
30 | |||
31 | #ifdef AFS_CACHING_SUPPORT | ||
32 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
33 | const void *entry); | ||
34 | static void afs_volume_cache_update(void *source, void *entry); | ||
35 | |||
36 | struct cachefs_index_def afs_volume_cache_index_def = { | ||
37 | .name = "volume", | ||
38 | .data_size = sizeof(struct afs_cache_vhash), | ||
39 | .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
40 | .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 }, | ||
41 | .match = afs_volume_cache_match, | ||
42 | .update = afs_volume_cache_update, | ||
43 | }; | ||
44 | #endif | ||
45 | 21 | ||
46 | /* | 22 | /* |
47 | * lookup a volume by name | 23 | * lookup a volume by name |
@@ -65,11 +41,12 @@ struct cachefs_index_def afs_volume_cache_index_def = { | |||
65 | * - Rule 3: If parent volume is R/W, then only mount R/W volume unless | 41 | * - Rule 3: If parent volume is R/W, then only mount R/W volume unless |
66 | * explicitly told otherwise | 42 | * explicitly told otherwise |
67 | */ | 43 | */ |
68 | int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | 44 | struct afs_volume *afs_volume_lookup(const char *name, struct afs_cell *cell, |
69 | struct afs_volume **_volume) | 45 | int rwpath) |
70 | { | 46 | { |
71 | struct afs_vlocation *vlocation = NULL; | 47 | struct afs_vlocation *vlocation = NULL; |
72 | struct afs_volume *volume = NULL; | 48 | struct afs_volume *volume = NULL; |
49 | struct afs_server *server = NULL; | ||
73 | afs_voltype_t type; | 50 | afs_voltype_t type; |
74 | const char *cellname, *volname, *suffix; | 51 | const char *cellname, *volname, *suffix; |
75 | char srvtmask; | 52 | char srvtmask; |
@@ -79,7 +56,7 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | |||
79 | 56 | ||
80 | if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) { | 57 | if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) { |
81 | printk("kAFS: unparsable volume name\n"); | 58 | printk("kAFS: unparsable volume name\n"); |
82 | return -EINVAL; | 59 | return ERR_PTR(-EINVAL); |
83 | } | 60 | } |
84 | 61 | ||
85 | /* determine the type of volume we're looking for */ | 62 | /* determine the type of volume we're looking for */ |
@@ -128,8 +105,9 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | |||
128 | 105 | ||
129 | /* lookup the cell record */ | 106 | /* lookup the cell record */ |
130 | if (cellname || !cell) { | 107 | if (cellname || !cell) { |
131 | ret = afs_cell_lookup(cellname, cellnamesz, &cell); | 108 | cell = afs_cell_lookup(cellname, cellnamesz); |
132 | if (ret<0) { | 109 | if (IS_ERR(cell)) { |
110 | ret = PTR_ERR(cell); | ||
133 | printk("kAFS: unable to lookup cell '%s'\n", | 111 | printk("kAFS: unable to lookup cell '%s'\n", |
134 | cellname ?: ""); | 112 | cellname ?: ""); |
135 | goto error; | 113 | goto error; |
@@ -139,9 +117,12 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | |||
139 | } | 117 | } |
140 | 118 | ||
141 | /* lookup the volume location record */ | 119 | /* lookup the volume location record */ |
142 | ret = afs_vlocation_lookup(cell, volname, volnamesz, &vlocation); | 120 | vlocation = afs_vlocation_lookup(cell, volname, volnamesz); |
143 | if (ret < 0) | 121 | if (IS_ERR(vlocation)) { |
122 | ret = PTR_ERR(vlocation); | ||
123 | vlocation = NULL; | ||
144 | goto error; | 124 | goto error; |
125 | } | ||
145 | 126 | ||
146 | /* make the final decision on the type we want */ | 127 | /* make the final decision on the type we want */ |
147 | ret = -ENOMEDIUM; | 128 | ret = -ENOMEDIUM; |
@@ -192,13 +173,14 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | |||
192 | /* look up all the applicable server records */ | 173 | /* look up all the applicable server records */ |
193 | for (loop = 0; loop < 8; loop++) { | 174 | for (loop = 0; loop < 8; loop++) { |
194 | if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) { | 175 | if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) { |
195 | ret = afs_server_lookup( | 176 | server = afs_lookup_server( |
196 | volume->cell, | 177 | volume->cell, &vlocation->vldb.servers[loop]); |
197 | &vlocation->vldb.servers[loop], | 178 | if (IS_ERR(server)) { |
198 | &volume->servers[volume->nservers]); | 179 | ret = PTR_ERR(server); |
199 | if (ret < 0) | ||
200 | goto error_discard; | 180 | goto error_discard; |
181 | } | ||
201 | 182 | ||
183 | volume->servers[volume->nservers] = server; | ||
202 | volume->nservers++; | 184 | volume->nservers++; |
203 | } | 185 | } |
204 | } | 186 | } |
@@ -219,8 +201,11 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, | |||
219 | success: | 201 | success: |
220 | _debug("kAFS selected %s volume %08x", | 202 | _debug("kAFS selected %s volume %08x", |
221 | afs_voltypes[volume->type], volume->vid); | 203 | afs_voltypes[volume->type], volume->vid); |
222 | *_volume = volume; | 204 | up_write(&cell->vl_sem); |
223 | ret = 0; | 205 | afs_put_vlocation(vlocation); |
206 | afs_put_cell(cell); | ||
207 | _leave(" = %p", volume); | ||
208 | return volume; | ||
224 | 209 | ||
225 | /* clean up */ | 210 | /* clean up */ |
226 | error_up: | 211 | error_up: |
@@ -228,9 +213,8 @@ error_up: | |||
228 | error: | 213 | error: |
229 | afs_put_vlocation(vlocation); | 214 | afs_put_vlocation(vlocation); |
230 | afs_put_cell(cell); | 215 | afs_put_cell(cell); |
231 | 216 | _leave(" = %d", ret); | |
232 | _leave(" = %d (%p)", ret, volume); | 217 | return ERR_PTR(ret); |
233 | return ret; | ||
234 | 218 | ||
235 | error_discard: | 219 | error_discard: |
236 | up_write(&cell->vl_sem); | 220 | up_write(&cell->vl_sem); |
@@ -255,10 +239,9 @@ void afs_put_volume(struct afs_volume *volume) | |||
255 | 239 | ||
256 | _enter("%p", volume); | 240 | _enter("%p", volume); |
257 | 241 | ||
258 | vlocation = volume->vlocation; | 242 | ASSERTCMP(atomic_read(&volume->usage), >, 0); |
259 | 243 | ||
260 | /* sanity check */ | 244 | vlocation = volume->vlocation; |
261 | BUG_ON(atomic_read(&volume->usage) <= 0); | ||
262 | 245 | ||
263 | /* to prevent a race, the decrement and the dequeue must be effectively | 246 | /* to prevent a race, the decrement and the dequeue must be effectively |
264 | * atomic */ | 247 | * atomic */ |
@@ -292,14 +275,21 @@ void afs_put_volume(struct afs_volume *volume) | |||
292 | * pick a server to use to try accessing this volume | 275 | * pick a server to use to try accessing this volume |
293 | * - returns with an elevated usage count on the server chosen | 276 | * - returns with an elevated usage count on the server chosen |
294 | */ | 277 | */ |
295 | int afs_volume_pick_fileserver(struct afs_volume *volume, | 278 | struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode) |
296 | struct afs_server **_server) | ||
297 | { | 279 | { |
280 | struct afs_volume *volume = vnode->volume; | ||
298 | struct afs_server *server; | 281 | struct afs_server *server; |
299 | int ret, state, loop; | 282 | int ret, state, loop; |
300 | 283 | ||
301 | _enter("%s", volume->vlocation->vldb.name); | 284 | _enter("%s", volume->vlocation->vldb.name); |
302 | 285 | ||
286 | /* stick with the server we're already using if we can */ | ||
287 | if (vnode->server && vnode->server->fs_state == 0) { | ||
288 | afs_get_server(vnode->server); | ||
289 | _leave(" = %p [current]", vnode->server); | ||
290 | return vnode->server; | ||
291 | } | ||
292 | |||
303 | down_read(&volume->server_sem); | 293 | down_read(&volume->server_sem); |
304 | 294 | ||
305 | /* handle the no-server case */ | 295 | /* handle the no-server case */ |
@@ -307,7 +297,7 @@ int afs_volume_pick_fileserver(struct afs_volume *volume, | |||
307 | ret = volume->rjservers ? -ENOMEDIUM : -ESTALE; | 297 | ret = volume->rjservers ? -ENOMEDIUM : -ESTALE; |
308 | up_read(&volume->server_sem); | 298 | up_read(&volume->server_sem); |
309 | _leave(" = %d [no servers]", ret); | 299 | _leave(" = %d [no servers]", ret); |
310 | return ret; | 300 | return ERR_PTR(ret); |
311 | } | 301 | } |
312 | 302 | ||
313 | /* basically, just search the list for the first live server and use | 303 | /* basically, just search the list for the first live server and use |
@@ -317,15 +307,16 @@ int afs_volume_pick_fileserver(struct afs_volume *volume, | |||
317 | server = volume->servers[loop]; | 307 | server = volume->servers[loop]; |
318 | state = server->fs_state; | 308 | state = server->fs_state; |
319 | 309 | ||
310 | _debug("consider %d [%d]", loop, state); | ||
311 | |||
320 | switch (state) { | 312 | switch (state) { |
321 | /* found an apparently healthy server */ | 313 | /* found an apparently healthy server */ |
322 | case 0: | 314 | case 0: |
323 | afs_get_server(server); | 315 | afs_get_server(server); |
324 | up_read(&volume->server_sem); | 316 | up_read(&volume->server_sem); |
325 | *_server = server; | 317 | _leave(" = %p (picked %08x)", |
326 | _leave(" = 0 (picked %08x)", | 318 | server, ntohl(server->addr.s_addr)); |
327 | ntohl(server->addr.s_addr)); | 319 | return server; |
328 | return 0; | ||
329 | 320 | ||
330 | case -ENETUNREACH: | 321 | case -ENETUNREACH: |
331 | if (ret == 0) | 322 | if (ret == 0) |
@@ -361,7 +352,7 @@ int afs_volume_pick_fileserver(struct afs_volume *volume, | |||
361 | */ | 352 | */ |
362 | up_read(&volume->server_sem); | 353 | up_read(&volume->server_sem); |
363 | _leave(" = %d", ret); | 354 | _leave(" = %d", ret); |
364 | return ret; | 355 | return ERR_PTR(ret); |
365 | } | 356 | } |
366 | 357 | ||
367 | /* | 358 | /* |
@@ -370,10 +361,11 @@ int afs_volume_pick_fileserver(struct afs_volume *volume, | |||
370 | * - records result of using a particular server to access a volume | 361 | * - records result of using a particular server to access a volume |
371 | * - return 0 to try again, 1 if okay or to issue error | 362 | * - return 0 to try again, 1 if okay or to issue error |
372 | */ | 363 | */ |
373 | int afs_volume_release_fileserver(struct afs_volume *volume, | 364 | int afs_volume_release_fileserver(struct afs_vnode *vnode, |
374 | struct afs_server *server, | 365 | struct afs_server *server, |
375 | int result) | 366 | int result) |
376 | { | 367 | { |
368 | struct afs_volume *volume = vnode->volume; | ||
377 | unsigned loop; | 369 | unsigned loop; |
378 | 370 | ||
379 | _enter("%s,%08x,%d", | 371 | _enter("%s,%08x,%d", |
@@ -384,6 +376,7 @@ int afs_volume_release_fileserver(struct afs_volume *volume, | |||
384 | /* success */ | 376 | /* success */ |
385 | case 0: | 377 | case 0: |
386 | server->fs_act_jif = jiffies; | 378 | server->fs_act_jif = jiffies; |
379 | server->fs_state = 0; | ||
387 | break; | 380 | break; |
388 | 381 | ||
389 | /* the fileserver denied all knowledge of the volume */ | 382 | /* the fileserver denied all knowledge of the volume */ |
@@ -391,7 +384,7 @@ int afs_volume_release_fileserver(struct afs_volume *volume, | |||
391 | server->fs_act_jif = jiffies; | 384 | server->fs_act_jif = jiffies; |
392 | down_write(&volume->server_sem); | 385 | down_write(&volume->server_sem); |
393 | 386 | ||
394 | /* first, find where the server is in the active list (if it | 387 | /* firstly, find where the server is in the active list (if it |
395 | * is) */ | 388 | * is) */ |
396 | for (loop = 0; loop < volume->nservers; loop++) | 389 | for (loop = 0; loop < volume->nservers; loop++) |
397 | if (volume->servers[loop] == server) | 390 | if (volume->servers[loop] == server) |
@@ -429,6 +422,7 @@ int afs_volume_release_fileserver(struct afs_volume *volume, | |||
429 | case -ENETUNREACH: | 422 | case -ENETUNREACH: |
430 | case -EHOSTUNREACH: | 423 | case -EHOSTUNREACH: |
431 | case -ECONNREFUSED: | 424 | case -ECONNREFUSED: |
425 | case -ETIME: | ||
432 | case -ETIMEDOUT: | 426 | case -ETIMEDOUT: |
433 | case -EREMOTEIO: | 427 | case -EREMOTEIO: |
434 | /* mark the server as dead | 428 | /* mark the server as dead |
@@ -464,40 +458,3 @@ try_next_server: | |||
464 | _leave(" [try next server]"); | 458 | _leave(" [try next server]"); |
465 | return 0; | 459 | return 0; |
466 | } | 460 | } |
467 | |||
468 | /* | ||
469 | * match a volume hash record stored in the cache | ||
470 | */ | ||
471 | #ifdef AFS_CACHING_SUPPORT | ||
472 | static cachefs_match_val_t afs_volume_cache_match(void *target, | ||
473 | const void *entry) | ||
474 | { | ||
475 | const struct afs_cache_vhash *vhash = entry; | ||
476 | struct afs_volume *volume = target; | ||
477 | |||
478 | _enter("{%u},{%u}", volume->type, vhash->vtype); | ||
479 | |||
480 | if (volume->type == vhash->vtype) { | ||
481 | _leave(" = SUCCESS"); | ||
482 | return CACHEFS_MATCH_SUCCESS; | ||
483 | } | ||
484 | |||
485 | _leave(" = FAILED"); | ||
486 | return CACHEFS_MATCH_FAILED; | ||
487 | } | ||
488 | #endif | ||
489 | |||
490 | /* | ||
491 | * update a volume hash record stored in the cache | ||
492 | */ | ||
493 | #ifdef AFS_CACHING_SUPPORT | ||
494 | static void afs_volume_cache_update(void *source, void *entry) | ||
495 | { | ||
496 | struct afs_cache_vhash *vhash = entry; | ||
497 | struct afs_volume *volume = source; | ||
498 | |||
499 | _enter(""); | ||
500 | |||
501 | vhash->vtype = volume->type; | ||
502 | } | ||
503 | #endif | ||
diff --git a/fs/afs/volume.h b/fs/afs/volume.h deleted file mode 100644 index a605bea2e3a..00000000000 --- a/fs/afs/volume.h +++ /dev/null | |||
@@ -1,126 +0,0 @@ | |||
1 | /* AFS volume management | ||
2 | * | ||
3 | * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef AFS_VOLUME_H | ||
13 | #define AFS_VOLUME_H | ||
14 | |||
15 | #include "types.h" | ||
16 | #include "fsclient.h" | ||
17 | #include "kafstimod.h" | ||
18 | #include "kafsasyncd.h" | ||
19 | #include "cache.h" | ||
20 | |||
21 | typedef enum { | ||
22 | AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */ | ||
23 | AFS_VLUPD_PENDING, /* on pending queue */ | ||
24 | AFS_VLUPD_INPROGRESS, /* op in progress */ | ||
25 | AFS_VLUPD_BUSYSLEEP, /* sleeping because server returned EBUSY */ | ||
26 | } __attribute__((packed)) afs_vlocation_upd_t; | ||
27 | |||
28 | /* | ||
29 | * entry in the cached volume location catalogue | ||
30 | */ | ||
31 | struct afs_cache_vlocation { | ||
32 | uint8_t name[64]; /* volume name (lowercase, padded with NULs) */ | ||
33 | uint8_t nservers; /* number of entries used in servers[] */ | ||
34 | uint8_t vidmask; /* voltype mask for vid[] */ | ||
35 | uint8_t srvtmask[8]; /* voltype masks for servers[] */ | ||
36 | #define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */ | ||
37 | #define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */ | ||
38 | #define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */ | ||
39 | |||
40 | afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */ | ||
41 | struct in_addr servers[8]; /* fileserver addresses */ | ||
42 | time_t rtime; /* last retrieval time */ | ||
43 | }; | ||
44 | |||
45 | #ifdef AFS_CACHING_SUPPORT | ||
46 | extern struct cachefs_index_def afs_vlocation_cache_index_def; | ||
47 | #endif | ||
48 | |||
49 | /* | ||
50 | * volume -> vnode hash table entry | ||
51 | */ | ||
52 | struct afs_cache_vhash { | ||
53 | afs_voltype_t vtype; /* which volume variation */ | ||
54 | uint8_t hash_bucket; /* which hash bucket this represents */ | ||
55 | } __attribute__((packed)); | ||
56 | |||
57 | #ifdef AFS_CACHING_SUPPORT | ||
58 | extern struct cachefs_index_def afs_volume_cache_index_def; | ||
59 | #endif | ||
60 | |||
61 | /* | ||
62 | * AFS volume location record | ||
63 | */ | ||
64 | struct afs_vlocation { | ||
65 | atomic_t usage; | ||
66 | struct list_head link; /* link in cell volume location list */ | ||
67 | struct afs_timer timeout; /* decaching timer */ | ||
68 | struct afs_cell *cell; /* cell to which volume belongs */ | ||
69 | #ifdef AFS_CACHING_SUPPORT | ||
70 | struct cachefs_cookie *cache; /* caching cookie */ | ||
71 | #endif | ||
72 | struct afs_cache_vlocation vldb; /* volume information DB record */ | ||
73 | struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ | ||
74 | rwlock_t lock; /* access lock */ | ||
75 | unsigned long read_jif; /* time at which last read from vlserver */ | ||
76 | struct afs_timer upd_timer; /* update timer */ | ||
77 | struct afs_async_op upd_op; /* update operation */ | ||
78 | afs_vlocation_upd_t upd_state; /* update state */ | ||
79 | unsigned short upd_first_svix; /* first server index during update */ | ||
80 | unsigned short upd_curr_svix; /* current server index during update */ | ||
81 | unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ | ||
82 | unsigned short upd_busy_cnt; /* EBUSY count during update */ | ||
83 | unsigned short valid; /* T if valid */ | ||
84 | }; | ||
85 | |||
86 | extern int afs_vlocation_lookup(struct afs_cell *, const char *, unsigned, | ||
87 | struct afs_vlocation **); | ||
88 | |||
89 | #define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0) | ||
90 | |||
91 | extern void afs_put_vlocation(struct afs_vlocation *); | ||
92 | extern void afs_vlocation_do_timeout(struct afs_vlocation *); | ||
93 | |||
94 | /* | ||
95 | * AFS volume access record | ||
96 | */ | ||
97 | struct afs_volume { | ||
98 | atomic_t usage; | ||
99 | struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */ | ||
100 | struct afs_vlocation *vlocation; /* volume location */ | ||
101 | #ifdef AFS_CACHING_SUPPORT | ||
102 | struct cachefs_cookie *cache; /* caching cookie */ | ||
103 | #endif | ||
104 | afs_volid_t vid; /* volume ID */ | ||
105 | afs_voltype_t type; /* type of volume */ | ||
106 | char type_force; /* force volume type (suppress R/O -> R/W) */ | ||
107 | unsigned short nservers; /* number of server slots filled */ | ||
108 | unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */ | ||
109 | struct afs_server *servers[8]; /* servers on which volume resides (ordered) */ | ||
110 | struct rw_semaphore server_sem; /* lock for accessing current server */ | ||
111 | }; | ||
112 | |||
113 | extern int afs_volume_lookup(const char *, struct afs_cell *, int, | ||
114 | struct afs_volume **); | ||
115 | |||
116 | #define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0) | ||
117 | |||
118 | extern void afs_put_volume(struct afs_volume *); | ||
119 | |||
120 | extern int afs_volume_pick_fileserver(struct afs_volume *, | ||
121 | struct afs_server **); | ||
122 | |||
123 | extern int afs_volume_release_fileserver(struct afs_volume *, | ||
124 | struct afs_server *, int); | ||
125 | |||
126 | #endif /* AFS_VOLUME_H */ | ||