diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-13 19:46:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-13 19:46:18 -0400 |
commit | 16cefa8c3863721fd40445a1b34dea18cd16ccfe (patch) | |
tree | c8e58ca06e2edfd667d3e6062a642b80cc58e5e7 /net | |
parent | 4fbef206daead133085fe33905f5e842d38fb8da (diff) | |
parent | d8558f99fbc5ef5d4ae76b893784005056450f82 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (122 commits)
sunrpc: drop BKL around wrap and unwrap
NFSv4: Make sure unlock is really an unlock when cancelling a lock
NLM: fix source address of callback to client
SUNRPC client: add interface for binding to a local address
SUNRPC server: record the destination address of a request
SUNRPC: cleanup transport creation argument passing
NFSv4: Make the NFS state model work with the nosharedcache mount option
NFS: Error when mounting the same filesystem with different options
NFS: Add the mount option "nosharecache"
NFS: Add support for mounting NFSv4 file systems with string options
NFS: Add final pieces to support in-kernel mount option parsing
NFS: Introduce generic mount client API
NFS: Add enums and match tables for mount option parsing
NFS: Improve debugging output in NFS in-kernel mount client
NFS: Clean up in-kernel NFS mount
NFS: Remake nfsroot_mount as a permanent part of NFS client
SUNRPC: Add a convenient default for the hostname when calling rpc_create()
SUNRPC: Rename rpcb_getport to be consistent with new rpcb_getport_sync name
SUNRPC: Rename rpcb_getport_external routine
SUNRPC: Allow rpcbind requests to be interrupted by a signal.
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth.c | 370 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 349 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_null.c | 10 | ||||
-rw-r--r-- | net/sunrpc/auth_unix.c | 54 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 371 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 80 | ||||
-rw-r--r-- | net/sunrpc/rpcb_clnt.c | 65 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 209 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 8 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 20 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 19 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 81 |
14 files changed, 980 insertions, 660 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9527f2bb1744..aa55d0a03e6f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -13,17 +13,22 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/sunrpc/clnt.h> | 14 | #include <linux/sunrpc/clnt.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/smp_lock.h> | ||
16 | 17 | ||
17 | #ifdef RPC_DEBUG | 18 | #ifdef RPC_DEBUG |
18 | # define RPCDBG_FACILITY RPCDBG_AUTH | 19 | # define RPCDBG_FACILITY RPCDBG_AUTH |
19 | #endif | 20 | #endif |
20 | 21 | ||
21 | static struct rpc_authops * auth_flavors[RPC_AUTH_MAXFLAVOR] = { | 22 | static DEFINE_SPINLOCK(rpc_authflavor_lock); |
23 | static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { | ||
22 | &authnull_ops, /* AUTH_NULL */ | 24 | &authnull_ops, /* AUTH_NULL */ |
23 | &authunix_ops, /* AUTH_UNIX */ | 25 | &authunix_ops, /* AUTH_UNIX */ |
24 | NULL, /* others can be loadable modules */ | 26 | NULL, /* others can be loadable modules */ |
25 | }; | 27 | }; |
26 | 28 | ||
29 | static LIST_HEAD(cred_unused); | ||
30 | static unsigned long number_cred_unused; | ||
31 | |||
27 | static u32 | 32 | static u32 |
28 | pseudoflavor_to_flavor(u32 flavor) { | 33 | pseudoflavor_to_flavor(u32 flavor) { |
29 | if (flavor >= RPC_AUTH_MAXFLAVOR) | 34 | if (flavor >= RPC_AUTH_MAXFLAVOR) |
@@ -32,55 +37,67 @@ pseudoflavor_to_flavor(u32 flavor) { | |||
32 | } | 37 | } |
33 | 38 | ||
34 | int | 39 | int |
35 | rpcauth_register(struct rpc_authops *ops) | 40 | rpcauth_register(const struct rpc_authops *ops) |
36 | { | 41 | { |
37 | rpc_authflavor_t flavor; | 42 | rpc_authflavor_t flavor; |
43 | int ret = -EPERM; | ||
38 | 44 | ||
39 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) | 45 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) |
40 | return -EINVAL; | 46 | return -EINVAL; |
41 | if (auth_flavors[flavor] != NULL) | 47 | spin_lock(&rpc_authflavor_lock); |
42 | return -EPERM; /* what else? */ | 48 | if (auth_flavors[flavor] == NULL) { |
43 | auth_flavors[flavor] = ops; | 49 | auth_flavors[flavor] = ops; |
44 | return 0; | 50 | ret = 0; |
51 | } | ||
52 | spin_unlock(&rpc_authflavor_lock); | ||
53 | return ret; | ||
45 | } | 54 | } |
46 | 55 | ||
47 | int | 56 | int |
48 | rpcauth_unregister(struct rpc_authops *ops) | 57 | rpcauth_unregister(const struct rpc_authops *ops) |
49 | { | 58 | { |
50 | rpc_authflavor_t flavor; | 59 | rpc_authflavor_t flavor; |
60 | int ret = -EPERM; | ||
51 | 61 | ||
52 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) | 62 | if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) |
53 | return -EINVAL; | 63 | return -EINVAL; |
54 | if (auth_flavors[flavor] != ops) | 64 | spin_lock(&rpc_authflavor_lock); |
55 | return -EPERM; /* what else? */ | 65 | if (auth_flavors[flavor] == ops) { |
56 | auth_flavors[flavor] = NULL; | 66 | auth_flavors[flavor] = NULL; |
57 | return 0; | 67 | ret = 0; |
68 | } | ||
69 | spin_unlock(&rpc_authflavor_lock); | ||
70 | return ret; | ||
58 | } | 71 | } |
59 | 72 | ||
60 | struct rpc_auth * | 73 | struct rpc_auth * |
61 | rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) | 74 | rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) |
62 | { | 75 | { |
63 | struct rpc_auth *auth; | 76 | struct rpc_auth *auth; |
64 | struct rpc_authops *ops; | 77 | const struct rpc_authops *ops; |
65 | u32 flavor = pseudoflavor_to_flavor(pseudoflavor); | 78 | u32 flavor = pseudoflavor_to_flavor(pseudoflavor); |
66 | 79 | ||
67 | auth = ERR_PTR(-EINVAL); | 80 | auth = ERR_PTR(-EINVAL); |
68 | if (flavor >= RPC_AUTH_MAXFLAVOR) | 81 | if (flavor >= RPC_AUTH_MAXFLAVOR) |
69 | goto out; | 82 | goto out; |
70 | 83 | ||
71 | /* FIXME - auth_flavors[] really needs an rw lock, | ||
72 | * and module refcounting. */ | ||
73 | #ifdef CONFIG_KMOD | 84 | #ifdef CONFIG_KMOD |
74 | if ((ops = auth_flavors[flavor]) == NULL) | 85 | if ((ops = auth_flavors[flavor]) == NULL) |
75 | request_module("rpc-auth-%u", flavor); | 86 | request_module("rpc-auth-%u", flavor); |
76 | #endif | 87 | #endif |
77 | if ((ops = auth_flavors[flavor]) == NULL) | 88 | spin_lock(&rpc_authflavor_lock); |
89 | ops = auth_flavors[flavor]; | ||
90 | if (ops == NULL || !try_module_get(ops->owner)) { | ||
91 | spin_unlock(&rpc_authflavor_lock); | ||
78 | goto out; | 92 | goto out; |
93 | } | ||
94 | spin_unlock(&rpc_authflavor_lock); | ||
79 | auth = ops->create(clnt, pseudoflavor); | 95 | auth = ops->create(clnt, pseudoflavor); |
96 | module_put(ops->owner); | ||
80 | if (IS_ERR(auth)) | 97 | if (IS_ERR(auth)) |
81 | return auth; | 98 | return auth; |
82 | if (clnt->cl_auth) | 99 | if (clnt->cl_auth) |
83 | rpcauth_destroy(clnt->cl_auth); | 100 | rpcauth_release(clnt->cl_auth); |
84 | clnt->cl_auth = auth; | 101 | clnt->cl_auth = auth; |
85 | 102 | ||
86 | out: | 103 | out: |
@@ -88,7 +105,7 @@ out: | |||
88 | } | 105 | } |
89 | 106 | ||
90 | void | 107 | void |
91 | rpcauth_destroy(struct rpc_auth *auth) | 108 | rpcauth_release(struct rpc_auth *auth) |
92 | { | 109 | { |
93 | if (!atomic_dec_and_test(&auth->au_count)) | 110 | if (!atomic_dec_and_test(&auth->au_count)) |
94 | return; | 111 | return; |
@@ -97,11 +114,31 @@ rpcauth_destroy(struct rpc_auth *auth) | |||
97 | 114 | ||
98 | static DEFINE_SPINLOCK(rpc_credcache_lock); | 115 | static DEFINE_SPINLOCK(rpc_credcache_lock); |
99 | 116 | ||
117 | static void | ||
118 | rpcauth_unhash_cred_locked(struct rpc_cred *cred) | ||
119 | { | ||
120 | hlist_del_rcu(&cred->cr_hash); | ||
121 | smp_mb__before_clear_bit(); | ||
122 | clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); | ||
123 | } | ||
124 | |||
125 | static void | ||
126 | rpcauth_unhash_cred(struct rpc_cred *cred) | ||
127 | { | ||
128 | spinlock_t *cache_lock; | ||
129 | |||
130 | cache_lock = &cred->cr_auth->au_credcache->lock; | ||
131 | spin_lock(cache_lock); | ||
132 | if (atomic_read(&cred->cr_count) == 0) | ||
133 | rpcauth_unhash_cred_locked(cred); | ||
134 | spin_unlock(cache_lock); | ||
135 | } | ||
136 | |||
100 | /* | 137 | /* |
101 | * Initialize RPC credential cache | 138 | * Initialize RPC credential cache |
102 | */ | 139 | */ |
103 | int | 140 | int |
104 | rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | 141 | rpcauth_init_credcache(struct rpc_auth *auth) |
105 | { | 142 | { |
106 | struct rpc_cred_cache *new; | 143 | struct rpc_cred_cache *new; |
107 | int i; | 144 | int i; |
@@ -111,8 +148,7 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | |||
111 | return -ENOMEM; | 148 | return -ENOMEM; |
112 | for (i = 0; i < RPC_CREDCACHE_NR; i++) | 149 | for (i = 0; i < RPC_CREDCACHE_NR; i++) |
113 | INIT_HLIST_HEAD(&new->hashtable[i]); | 150 | INIT_HLIST_HEAD(&new->hashtable[i]); |
114 | new->expire = expire; | 151 | spin_lock_init(&new->lock); |
115 | new->nextgc = jiffies + (expire >> 1); | ||
116 | auth->au_credcache = new; | 152 | auth->au_credcache = new; |
117 | return 0; | 153 | return 0; |
118 | } | 154 | } |
@@ -121,13 +157,13 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) | |||
121 | * Destroy a list of credentials | 157 | * Destroy a list of credentials |
122 | */ | 158 | */ |
123 | static inline | 159 | static inline |
124 | void rpcauth_destroy_credlist(struct hlist_head *head) | 160 | void rpcauth_destroy_credlist(struct list_head *head) |
125 | { | 161 | { |
126 | struct rpc_cred *cred; | 162 | struct rpc_cred *cred; |
127 | 163 | ||
128 | while (!hlist_empty(head)) { | 164 | while (!list_empty(head)) { |
129 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); | 165 | cred = list_entry(head->next, struct rpc_cred, cr_lru); |
130 | hlist_del_init(&cred->cr_hash); | 166 | list_del_init(&cred->cr_lru); |
131 | put_rpccred(cred); | 167 | put_rpccred(cred); |
132 | } | 168 | } |
133 | } | 169 | } |
@@ -137,58 +173,95 @@ void rpcauth_destroy_credlist(struct hlist_head *head) | |||
137 | * that are not referenced. | 173 | * that are not referenced. |
138 | */ | 174 | */ |
139 | void | 175 | void |
140 | rpcauth_free_credcache(struct rpc_auth *auth) | 176 | rpcauth_clear_credcache(struct rpc_cred_cache *cache) |
141 | { | 177 | { |
142 | struct rpc_cred_cache *cache = auth->au_credcache; | 178 | LIST_HEAD(free); |
143 | HLIST_HEAD(free); | 179 | struct hlist_head *head; |
144 | struct hlist_node *pos, *next; | ||
145 | struct rpc_cred *cred; | 180 | struct rpc_cred *cred; |
146 | int i; | 181 | int i; |
147 | 182 | ||
148 | spin_lock(&rpc_credcache_lock); | 183 | spin_lock(&rpc_credcache_lock); |
184 | spin_lock(&cache->lock); | ||
149 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { |
150 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 186 | head = &cache->hashtable[i]; |
151 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 187 | while (!hlist_empty(head)) { |
152 | __hlist_del(&cred->cr_hash); | 188 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); |
153 | hlist_add_head(&cred->cr_hash, &free); | 189 | get_rpccred(cred); |
190 | if (!list_empty(&cred->cr_lru)) { | ||
191 | list_del(&cred->cr_lru); | ||
192 | number_cred_unused--; | ||
193 | } | ||
194 | list_add_tail(&cred->cr_lru, &free); | ||
195 | rpcauth_unhash_cred_locked(cred); | ||
154 | } | 196 | } |
155 | } | 197 | } |
198 | spin_unlock(&cache->lock); | ||
156 | spin_unlock(&rpc_credcache_lock); | 199 | spin_unlock(&rpc_credcache_lock); |
157 | rpcauth_destroy_credlist(&free); | 200 | rpcauth_destroy_credlist(&free); |
158 | } | 201 | } |
159 | 202 | ||
160 | static void | 203 | /* |
161 | rpcauth_prune_expired(struct rpc_auth *auth, struct rpc_cred *cred, struct hlist_head *free) | 204 | * Destroy the RPC credential cache |
205 | */ | ||
206 | void | ||
207 | rpcauth_destroy_credcache(struct rpc_auth *auth) | ||
162 | { | 208 | { |
163 | if (atomic_read(&cred->cr_count) != 1) | 209 | struct rpc_cred_cache *cache = auth->au_credcache; |
164 | return; | 210 | |
165 | if (time_after(jiffies, cred->cr_expire + auth->au_credcache->expire)) | 211 | if (cache) { |
166 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 212 | auth->au_credcache = NULL; |
167 | if (!(cred->cr_flags & RPCAUTH_CRED_UPTODATE)) { | 213 | rpcauth_clear_credcache(cache); |
168 | __hlist_del(&cred->cr_hash); | 214 | kfree(cache); |
169 | hlist_add_head(&cred->cr_hash, free); | ||
170 | } | 215 | } |
171 | } | 216 | } |
172 | 217 | ||
173 | /* | 218 | /* |
174 | * Remove stale credentials. Avoid sleeping inside the loop. | 219 | * Remove stale credentials. Avoid sleeping inside the loop. |
175 | */ | 220 | */ |
176 | static void | 221 | static int |
177 | rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | 222 | rpcauth_prune_expired(struct list_head *free, int nr_to_scan) |
178 | { | 223 | { |
179 | struct rpc_cred_cache *cache = auth->au_credcache; | 224 | spinlock_t *cache_lock; |
180 | struct hlist_node *pos, *next; | 225 | struct rpc_cred *cred; |
181 | struct rpc_cred *cred; | ||
182 | int i; | ||
183 | 226 | ||
184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); | 227 | while (!list_empty(&cred_unused)) { |
185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 228 | cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); |
186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 229 | list_del_init(&cred->cr_lru); |
187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 230 | number_cred_unused--; |
188 | rpcauth_prune_expired(auth, cred, free); | 231 | if (atomic_read(&cred->cr_count) != 0) |
232 | continue; | ||
233 | cache_lock = &cred->cr_auth->au_credcache->lock; | ||
234 | spin_lock(cache_lock); | ||
235 | if (atomic_read(&cred->cr_count) == 0) { | ||
236 | get_rpccred(cred); | ||
237 | list_add_tail(&cred->cr_lru, free); | ||
238 | rpcauth_unhash_cred_locked(cred); | ||
239 | nr_to_scan--; | ||
189 | } | 240 | } |
241 | spin_unlock(cache_lock); | ||
242 | if (nr_to_scan == 0) | ||
243 | break; | ||
190 | } | 244 | } |
191 | cache->nextgc = jiffies + cache->expire; | 245 | return nr_to_scan; |
246 | } | ||
247 | |||
248 | /* | ||
249 | * Run memory cache shrinker. | ||
250 | */ | ||
251 | static int | ||
252 | rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | ||
253 | { | ||
254 | LIST_HEAD(free); | ||
255 | int res; | ||
256 | |||
257 | if (list_empty(&cred_unused)) | ||
258 | return 0; | ||
259 | spin_lock(&rpc_credcache_lock); | ||
260 | nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); | ||
261 | res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; | ||
262 | spin_unlock(&rpc_credcache_lock); | ||
263 | rpcauth_destroy_credlist(&free); | ||
264 | return res; | ||
192 | } | 265 | } |
193 | 266 | ||
194 | /* | 267 | /* |
@@ -198,53 +271,56 @@ struct rpc_cred * | |||
198 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | 271 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, |
199 | int flags) | 272 | int flags) |
200 | { | 273 | { |
274 | LIST_HEAD(free); | ||
201 | struct rpc_cred_cache *cache = auth->au_credcache; | 275 | struct rpc_cred_cache *cache = auth->au_credcache; |
202 | HLIST_HEAD(free); | 276 | struct hlist_node *pos; |
203 | struct hlist_node *pos, *next; | 277 | struct rpc_cred *cred = NULL, |
204 | struct rpc_cred *new = NULL, | 278 | *entry, *new; |
205 | *cred = NULL; | ||
206 | int nr = 0; | 279 | int nr = 0; |
207 | 280 | ||
208 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) | 281 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) |
209 | nr = acred->uid & RPC_CREDCACHE_MASK; | 282 | nr = acred->uid & RPC_CREDCACHE_MASK; |
210 | retry: | 283 | |
211 | spin_lock(&rpc_credcache_lock); | 284 | rcu_read_lock(); |
212 | if (time_before(cache->nextgc, jiffies)) | 285 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { |
213 | rpcauth_gc_credcache(auth, &free); | 286 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
214 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { | 287 | continue; |
215 | struct rpc_cred *entry; | 288 | spin_lock(&cache->lock); |
216 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); | 289 | if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { |
217 | if (entry->cr_ops->crmatch(acred, entry, flags)) { | 290 | spin_unlock(&cache->lock); |
218 | hlist_del(&entry->cr_hash); | 291 | continue; |
219 | cred = entry; | ||
220 | break; | ||
221 | } | 292 | } |
222 | rpcauth_prune_expired(auth, entry, &free); | 293 | cred = get_rpccred(entry); |
294 | spin_unlock(&cache->lock); | ||
295 | break; | ||
223 | } | 296 | } |
224 | if (new) { | 297 | rcu_read_unlock(); |
225 | if (cred) | ||
226 | hlist_add_head(&new->cr_hash, &free); | ||
227 | else | ||
228 | cred = new; | ||
229 | } | ||
230 | if (cred) { | ||
231 | hlist_add_head(&cred->cr_hash, &cache->hashtable[nr]); | ||
232 | get_rpccred(cred); | ||
233 | } | ||
234 | spin_unlock(&rpc_credcache_lock); | ||
235 | 298 | ||
236 | rpcauth_destroy_credlist(&free); | 299 | if (cred != NULL) |
300 | goto found; | ||
237 | 301 | ||
238 | if (!cred) { | 302 | new = auth->au_ops->crcreate(auth, acred, flags); |
239 | new = auth->au_ops->crcreate(auth, acred, flags); | 303 | if (IS_ERR(new)) { |
240 | if (!IS_ERR(new)) { | 304 | cred = new; |
241 | #ifdef RPC_DEBUG | 305 | goto out; |
242 | new->cr_magic = RPCAUTH_CRED_MAGIC; | 306 | } |
243 | #endif | 307 | |
244 | goto retry; | 308 | spin_lock(&cache->lock); |
245 | } else | 309 | hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { |
246 | cred = new; | 310 | if (!entry->cr_ops->crmatch(acred, entry, flags)) |
247 | } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) | 311 | continue; |
312 | cred = get_rpccred(entry); | ||
313 | break; | ||
314 | } | ||
315 | if (cred == NULL) { | ||
316 | cred = new; | ||
317 | set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); | ||
318 | hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); | ||
319 | } else | ||
320 | list_add_tail(&new->cr_lru, &free); | ||
321 | spin_unlock(&cache->lock); | ||
322 | found: | ||
323 | if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) | ||
248 | && cred->cr_ops->cr_init != NULL | 324 | && cred->cr_ops->cr_init != NULL |
249 | && !(flags & RPCAUTH_LOOKUP_NEW)) { | 325 | && !(flags & RPCAUTH_LOOKUP_NEW)) { |
250 | int res = cred->cr_ops->cr_init(auth, cred); | 326 | int res = cred->cr_ops->cr_init(auth, cred); |
@@ -253,8 +329,9 @@ retry: | |||
253 | cred = ERR_PTR(res); | 329 | cred = ERR_PTR(res); |
254 | } | 330 | } |
255 | } | 331 | } |
256 | 332 | rpcauth_destroy_credlist(&free); | |
257 | return (struct rpc_cred *) cred; | 333 | out: |
334 | return cred; | ||
258 | } | 335 | } |
259 | 336 | ||
260 | struct rpc_cred * | 337 | struct rpc_cred * |
@@ -275,10 +352,27 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) | |||
275 | return ret; | 352 | return ret; |
276 | } | 353 | } |
277 | 354 | ||
355 | void | ||
356 | rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | ||
357 | struct rpc_auth *auth, const struct rpc_credops *ops) | ||
358 | { | ||
359 | INIT_HLIST_NODE(&cred->cr_hash); | ||
360 | INIT_LIST_HEAD(&cred->cr_lru); | ||
361 | atomic_set(&cred->cr_count, 1); | ||
362 | cred->cr_auth = auth; | ||
363 | cred->cr_ops = ops; | ||
364 | cred->cr_expire = jiffies; | ||
365 | #ifdef RPC_DEBUG | ||
366 | cred->cr_magic = RPCAUTH_CRED_MAGIC; | ||
367 | #endif | ||
368 | cred->cr_uid = acred->uid; | ||
369 | } | ||
370 | EXPORT_SYMBOL(rpcauth_init_cred); | ||
371 | |||
278 | struct rpc_cred * | 372 | struct rpc_cred * |
279 | rpcauth_bindcred(struct rpc_task *task) | 373 | rpcauth_bindcred(struct rpc_task *task) |
280 | { | 374 | { |
281 | struct rpc_auth *auth = task->tk_auth; | 375 | struct rpc_auth *auth = task->tk_client->cl_auth; |
282 | struct auth_cred acred = { | 376 | struct auth_cred acred = { |
283 | .uid = current->fsuid, | 377 | .uid = current->fsuid, |
284 | .gid = current->fsgid, | 378 | .gid = current->fsgid, |
@@ -288,7 +382,7 @@ rpcauth_bindcred(struct rpc_task *task) | |||
288 | int flags = 0; | 382 | int flags = 0; |
289 | 383 | ||
290 | dprintk("RPC: %5u looking up %s cred\n", | 384 | dprintk("RPC: %5u looking up %s cred\n", |
291 | task->tk_pid, task->tk_auth->au_ops->au_name); | 385 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); |
292 | get_group_info(acred.group_info); | 386 | get_group_info(acred.group_info); |
293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) | 387 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
294 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; | 388 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; |
@@ -304,19 +398,42 @@ rpcauth_bindcred(struct rpc_task *task) | |||
304 | void | 398 | void |
305 | rpcauth_holdcred(struct rpc_task *task) | 399 | rpcauth_holdcred(struct rpc_task *task) |
306 | { | 400 | { |
307 | dprintk("RPC: %5u holding %s cred %p\n", | 401 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
308 | task->tk_pid, task->tk_auth->au_ops->au_name, | 402 | if (cred != NULL) { |
309 | task->tk_msg.rpc_cred); | 403 | get_rpccred(cred); |
310 | if (task->tk_msg.rpc_cred) | 404 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, |
311 | get_rpccred(task->tk_msg.rpc_cred); | 405 | cred->cr_auth->au_ops->au_name, cred); |
406 | } | ||
312 | } | 407 | } |
313 | 408 | ||
314 | void | 409 | void |
315 | put_rpccred(struct rpc_cred *cred) | 410 | put_rpccred(struct rpc_cred *cred) |
316 | { | 411 | { |
317 | cred->cr_expire = jiffies; | 412 | /* Fast path for unhashed credentials */ |
413 | if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) | ||
414 | goto need_lock; | ||
415 | |||
318 | if (!atomic_dec_and_test(&cred->cr_count)) | 416 | if (!atomic_dec_and_test(&cred->cr_count)) |
319 | return; | 417 | return; |
418 | goto out_destroy; | ||
419 | need_lock: | ||
420 | if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) | ||
421 | return; | ||
422 | if (!list_empty(&cred->cr_lru)) { | ||
423 | number_cred_unused--; | ||
424 | list_del_init(&cred->cr_lru); | ||
425 | } | ||
426 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) | ||
427 | rpcauth_unhash_cred(cred); | ||
428 | else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { | ||
429 | cred->cr_expire = jiffies; | ||
430 | list_add_tail(&cred->cr_lru, &cred_unused); | ||
431 | number_cred_unused++; | ||
432 | spin_unlock(&rpc_credcache_lock); | ||
433 | return; | ||
434 | } | ||
435 | spin_unlock(&rpc_credcache_lock); | ||
436 | out_destroy: | ||
320 | cred->cr_ops->crdestroy(cred); | 437 | cred->cr_ops->crdestroy(cred); |
321 | } | 438 | } |
322 | 439 | ||
@@ -326,7 +443,7 @@ rpcauth_unbindcred(struct rpc_task *task) | |||
326 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 443 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
327 | 444 | ||
328 | dprintk("RPC: %5u releasing %s cred %p\n", | 445 | dprintk("RPC: %5u releasing %s cred %p\n", |
329 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 446 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
330 | 447 | ||
331 | put_rpccred(cred); | 448 | put_rpccred(cred); |
332 | task->tk_msg.rpc_cred = NULL; | 449 | task->tk_msg.rpc_cred = NULL; |
@@ -338,7 +455,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) | |||
338 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 455 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
339 | 456 | ||
340 | dprintk("RPC: %5u marshaling %s cred %p\n", | 457 | dprintk("RPC: %5u marshaling %s cred %p\n", |
341 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 458 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
342 | 459 | ||
343 | return cred->cr_ops->crmarshal(task, p); | 460 | return cred->cr_ops->crmarshal(task, p); |
344 | } | 461 | } |
@@ -349,7 +466,7 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) | |||
349 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 466 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
350 | 467 | ||
351 | dprintk("RPC: %5u validating %s cred %p\n", | 468 | dprintk("RPC: %5u validating %s cred %p\n", |
352 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 469 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
353 | 470 | ||
354 | return cred->cr_ops->crvalidate(task, p); | 471 | return cred->cr_ops->crvalidate(task, p); |
355 | } | 472 | } |
@@ -359,13 +476,17 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
359 | __be32 *data, void *obj) | 476 | __be32 *data, void *obj) |
360 | { | 477 | { |
361 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 478 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
479 | int ret; | ||
362 | 480 | ||
363 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", | 481 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", |
364 | task->tk_pid, cred->cr_ops->cr_name, cred); | 482 | task->tk_pid, cred->cr_ops->cr_name, cred); |
365 | if (cred->cr_ops->crwrap_req) | 483 | if (cred->cr_ops->crwrap_req) |
366 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 484 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
367 | /* By default, we encode the arguments normally. */ | 485 | /* By default, we encode the arguments normally. */ |
368 | return encode(rqstp, data, obj); | 486 | lock_kernel(); |
487 | ret = encode(rqstp, data, obj); | ||
488 | unlock_kernel(); | ||
489 | return ret; | ||
369 | } | 490 | } |
370 | 491 | ||
371 | int | 492 | int |
@@ -373,6 +494,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
373 | __be32 *data, void *obj) | 494 | __be32 *data, void *obj) |
374 | { | 495 | { |
375 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 496 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
497 | int ret; | ||
376 | 498 | ||
377 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", | 499 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", |
378 | task->tk_pid, cred->cr_ops->cr_name, cred); | 500 | task->tk_pid, cred->cr_ops->cr_name, cred); |
@@ -380,7 +502,10 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
380 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 502 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
381 | data, obj); | 503 | data, obj); |
382 | /* By default, we decode the arguments normally. */ | 504 | /* By default, we decode the arguments normally. */ |
383 | return decode(rqstp, data, obj); | 505 | lock_kernel(); |
506 | ret = decode(rqstp, data, obj); | ||
507 | unlock_kernel(); | ||
508 | return ret; | ||
384 | } | 509 | } |
385 | 510 | ||
386 | int | 511 | int |
@@ -390,7 +515,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
390 | int err; | 515 | int err; |
391 | 516 | ||
392 | dprintk("RPC: %5u refreshing %s cred %p\n", | 517 | dprintk("RPC: %5u refreshing %s cred %p\n", |
393 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 518 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
394 | 519 | ||
395 | err = cred->cr_ops->crrefresh(task); | 520 | err = cred->cr_ops->crrefresh(task); |
396 | if (err < 0) | 521 | if (err < 0) |
@@ -401,17 +526,34 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
401 | void | 526 | void |
402 | rpcauth_invalcred(struct rpc_task *task) | 527 | rpcauth_invalcred(struct rpc_task *task) |
403 | { | 528 | { |
529 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | ||
530 | |||
404 | dprintk("RPC: %5u invalidating %s cred %p\n", | 531 | dprintk("RPC: %5u invalidating %s cred %p\n", |
405 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 532 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
406 | spin_lock(&rpc_credcache_lock); | 533 | if (cred) |
407 | if (task->tk_msg.rpc_cred) | 534 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
408 | task->tk_msg.rpc_cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | ||
409 | spin_unlock(&rpc_credcache_lock); | ||
410 | } | 535 | } |
411 | 536 | ||
412 | int | 537 | int |
413 | rpcauth_uptodatecred(struct rpc_task *task) | 538 | rpcauth_uptodatecred(struct rpc_task *task) |
414 | { | 539 | { |
415 | return !(task->tk_msg.rpc_cred) || | 540 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
416 | (task->tk_msg.rpc_cred->cr_flags & RPCAUTH_CRED_UPTODATE); | 541 | |
542 | return cred == NULL || | ||
543 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; | ||
544 | } | ||
545 | |||
546 | |||
547 | static struct shrinker *rpc_cred_shrinker; | ||
548 | |||
549 | void __init rpcauth_init_module(void) | ||
550 | { | ||
551 | rpc_init_authunix(); | ||
552 | rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); | ||
553 | } | ||
554 | |||
555 | void __exit rpcauth_remove_module(void) | ||
556 | { | ||
557 | if (rpc_cred_shrinker != NULL) | ||
558 | remove_shrinker(rpc_cred_shrinker); | ||
417 | } | 559 | } |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4e4ccc5b6fea..baf4096d52d4 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -54,9 +54,10 @@ | |||
54 | #include <linux/sunrpc/gss_api.h> | 54 | #include <linux/sunrpc/gss_api.h> |
55 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
56 | 56 | ||
57 | static struct rpc_authops authgss_ops; | 57 | static const struct rpc_authops authgss_ops; |
58 | 58 | ||
59 | static struct rpc_credops gss_credops; | 59 | static const struct rpc_credops gss_credops; |
60 | static const struct rpc_credops gss_nullops; | ||
60 | 61 | ||
61 | #ifdef RPC_DEBUG | 62 | #ifdef RPC_DEBUG |
62 | # define RPCDBG_FACILITY RPCDBG_AUTH | 63 | # define RPCDBG_FACILITY RPCDBG_AUTH |
@@ -64,7 +65,6 @@ static struct rpc_credops gss_credops; | |||
64 | 65 | ||
65 | #define NFS_NGROUPS 16 | 66 | #define NFS_NGROUPS 16 |
66 | 67 | ||
67 | #define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */ | ||
68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ | 68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ |
69 | /* length of a krb5 verifier (48), plus data added before arguments when | 69 | /* length of a krb5 verifier (48), plus data added before arguments when |
70 | * using integrity (two 4-byte integers): */ | 70 | * using integrity (two 4-byte integers): */ |
@@ -79,19 +79,16 @@ static struct rpc_credops gss_credops; | |||
79 | /* dump the buffer in `emacs-hexl' style */ | 79 | /* dump the buffer in `emacs-hexl' style */ |
80 | #define isprint(c) ((c > 0x1f) && (c < 0x7f)) | 80 | #define isprint(c) ((c > 0x1f) && (c < 0x7f)) |
81 | 81 | ||
82 | static DEFINE_RWLOCK(gss_ctx_lock); | ||
83 | |||
84 | struct gss_auth { | 82 | struct gss_auth { |
83 | struct kref kref; | ||
85 | struct rpc_auth rpc_auth; | 84 | struct rpc_auth rpc_auth; |
86 | struct gss_api_mech *mech; | 85 | struct gss_api_mech *mech; |
87 | enum rpc_gss_svc service; | 86 | enum rpc_gss_svc service; |
88 | struct list_head upcalls; | ||
89 | struct rpc_clnt *client; | 87 | struct rpc_clnt *client; |
90 | struct dentry *dentry; | 88 | struct dentry *dentry; |
91 | spinlock_t lock; | ||
92 | }; | 89 | }; |
93 | 90 | ||
94 | static void gss_destroy_ctx(struct gss_cl_ctx *); | 91 | static void gss_free_ctx(struct gss_cl_ctx *); |
95 | static struct rpc_pipe_ops gss_upcall_ops; | 92 | static struct rpc_pipe_ops gss_upcall_ops; |
96 | 93 | ||
97 | static inline struct gss_cl_ctx * | 94 | static inline struct gss_cl_ctx * |
@@ -105,20 +102,24 @@ static inline void | |||
105 | gss_put_ctx(struct gss_cl_ctx *ctx) | 102 | gss_put_ctx(struct gss_cl_ctx *ctx) |
106 | { | 103 | { |
107 | if (atomic_dec_and_test(&ctx->count)) | 104 | if (atomic_dec_and_test(&ctx->count)) |
108 | gss_destroy_ctx(ctx); | 105 | gss_free_ctx(ctx); |
109 | } | 106 | } |
110 | 107 | ||
108 | /* gss_cred_set_ctx: | ||
109 | * called by gss_upcall_callback and gss_create_upcall in order | ||
110 | * to set the gss context. The actual exchange of an old context | ||
111 | * and a new one is protected by the inode->i_lock. | ||
112 | */ | ||
111 | static void | 113 | static void |
112 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | 114 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) |
113 | { | 115 | { |
114 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 116 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
115 | struct gss_cl_ctx *old; | 117 | struct gss_cl_ctx *old; |
116 | write_lock(&gss_ctx_lock); | 118 | |
117 | old = gss_cred->gc_ctx; | 119 | old = gss_cred->gc_ctx; |
118 | gss_cred->gc_ctx = ctx; | 120 | rcu_assign_pointer(gss_cred->gc_ctx, ctx); |
119 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 121 | set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
120 | cred->cr_flags &= ~RPCAUTH_CRED_NEW; | 122 | clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); |
121 | write_unlock(&gss_ctx_lock); | ||
122 | if (old) | 123 | if (old) |
123 | gss_put_ctx(old); | 124 | gss_put_ctx(old); |
124 | } | 125 | } |
@@ -129,10 +130,10 @@ gss_cred_is_uptodate_ctx(struct rpc_cred *cred) | |||
129 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 130 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
130 | int res = 0; | 131 | int res = 0; |
131 | 132 | ||
132 | read_lock(&gss_ctx_lock); | 133 | rcu_read_lock(); |
133 | if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx) | 134 | if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx) |
134 | res = 1; | 135 | res = 1; |
135 | read_unlock(&gss_ctx_lock); | 136 | rcu_read_unlock(); |
136 | return res; | 137 | return res; |
137 | } | 138 | } |
138 | 139 | ||
@@ -171,10 +172,10 @@ gss_cred_get_ctx(struct rpc_cred *cred) | |||
171 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 172 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
172 | struct gss_cl_ctx *ctx = NULL; | 173 | struct gss_cl_ctx *ctx = NULL; |
173 | 174 | ||
174 | read_lock(&gss_ctx_lock); | 175 | rcu_read_lock(); |
175 | if (gss_cred->gc_ctx) | 176 | if (gss_cred->gc_ctx) |
176 | ctx = gss_get_ctx(gss_cred->gc_ctx); | 177 | ctx = gss_get_ctx(gss_cred->gc_ctx); |
177 | read_unlock(&gss_ctx_lock); | 178 | rcu_read_unlock(); |
178 | return ctx; | 179 | return ctx; |
179 | } | 180 | } |
180 | 181 | ||
@@ -269,10 +270,10 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) | |||
269 | } | 270 | } |
270 | 271 | ||
271 | static struct gss_upcall_msg * | 272 | static struct gss_upcall_msg * |
272 | __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) | 273 | __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) |
273 | { | 274 | { |
274 | struct gss_upcall_msg *pos; | 275 | struct gss_upcall_msg *pos; |
275 | list_for_each_entry(pos, &gss_auth->upcalls, list) { | 276 | list_for_each_entry(pos, &rpci->in_downcall, list) { |
276 | if (pos->uid != uid) | 277 | if (pos->uid != uid) |
277 | continue; | 278 | continue; |
278 | atomic_inc(&pos->count); | 279 | atomic_inc(&pos->count); |
@@ -290,24 +291,24 @@ __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) | |||
290 | static inline struct gss_upcall_msg * | 291 | static inline struct gss_upcall_msg * |
291 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) | 292 | gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) |
292 | { | 293 | { |
294 | struct inode *inode = gss_auth->dentry->d_inode; | ||
295 | struct rpc_inode *rpci = RPC_I(inode); | ||
293 | struct gss_upcall_msg *old; | 296 | struct gss_upcall_msg *old; |
294 | 297 | ||
295 | spin_lock(&gss_auth->lock); | 298 | spin_lock(&inode->i_lock); |
296 | old = __gss_find_upcall(gss_auth, gss_msg->uid); | 299 | old = __gss_find_upcall(rpci, gss_msg->uid); |
297 | if (old == NULL) { | 300 | if (old == NULL) { |
298 | atomic_inc(&gss_msg->count); | 301 | atomic_inc(&gss_msg->count); |
299 | list_add(&gss_msg->list, &gss_auth->upcalls); | 302 | list_add(&gss_msg->list, &rpci->in_downcall); |
300 | } else | 303 | } else |
301 | gss_msg = old; | 304 | gss_msg = old; |
302 | spin_unlock(&gss_auth->lock); | 305 | spin_unlock(&inode->i_lock); |
303 | return gss_msg; | 306 | return gss_msg; |
304 | } | 307 | } |
305 | 308 | ||
306 | static void | 309 | static void |
307 | __gss_unhash_msg(struct gss_upcall_msg *gss_msg) | 310 | __gss_unhash_msg(struct gss_upcall_msg *gss_msg) |
308 | { | 311 | { |
309 | if (list_empty(&gss_msg->list)) | ||
310 | return; | ||
311 | list_del_init(&gss_msg->list); | 312 | list_del_init(&gss_msg->list); |
312 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | 313 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); |
313 | wake_up_all(&gss_msg->waitqueue); | 314 | wake_up_all(&gss_msg->waitqueue); |
@@ -318,10 +319,14 @@ static void | |||
318 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) | 319 | gss_unhash_msg(struct gss_upcall_msg *gss_msg) |
319 | { | 320 | { |
320 | struct gss_auth *gss_auth = gss_msg->auth; | 321 | struct gss_auth *gss_auth = gss_msg->auth; |
322 | struct inode *inode = gss_auth->dentry->d_inode; | ||
321 | 323 | ||
322 | spin_lock(&gss_auth->lock); | 324 | if (list_empty(&gss_msg->list)) |
323 | __gss_unhash_msg(gss_msg); | 325 | return; |
324 | spin_unlock(&gss_auth->lock); | 326 | spin_lock(&inode->i_lock); |
327 | if (!list_empty(&gss_msg->list)) | ||
328 | __gss_unhash_msg(gss_msg); | ||
329 | spin_unlock(&inode->i_lock); | ||
325 | } | 330 | } |
326 | 331 | ||
327 | static void | 332 | static void |
@@ -330,16 +335,16 @@ gss_upcall_callback(struct rpc_task *task) | |||
330 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, | 335 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, |
331 | struct gss_cred, gc_base); | 336 | struct gss_cred, gc_base); |
332 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; | 337 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; |
338 | struct inode *inode = gss_msg->auth->dentry->d_inode; | ||
333 | 339 | ||
334 | BUG_ON(gss_msg == NULL); | 340 | spin_lock(&inode->i_lock); |
335 | if (gss_msg->ctx) | 341 | if (gss_msg->ctx) |
336 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); | 342 | gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); |
337 | else | 343 | else |
338 | task->tk_status = gss_msg->msg.errno; | 344 | task->tk_status = gss_msg->msg.errno; |
339 | spin_lock(&gss_msg->auth->lock); | ||
340 | gss_cred->gc_upcall = NULL; | 345 | gss_cred->gc_upcall = NULL; |
341 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); | 346 | rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); |
342 | spin_unlock(&gss_msg->auth->lock); | 347 | spin_unlock(&inode->i_lock); |
343 | gss_release_msg(gss_msg); | 348 | gss_release_msg(gss_msg); |
344 | } | 349 | } |
345 | 350 | ||
@@ -386,11 +391,12 @@ static inline int | |||
386 | gss_refresh_upcall(struct rpc_task *task) | 391 | gss_refresh_upcall(struct rpc_task *task) |
387 | { | 392 | { |
388 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 393 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
389 | struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth, | 394 | struct gss_auth *gss_auth = container_of(cred->cr_auth, |
390 | struct gss_auth, rpc_auth); | 395 | struct gss_auth, rpc_auth); |
391 | struct gss_cred *gss_cred = container_of(cred, | 396 | struct gss_cred *gss_cred = container_of(cred, |
392 | struct gss_cred, gc_base); | 397 | struct gss_cred, gc_base); |
393 | struct gss_upcall_msg *gss_msg; | 398 | struct gss_upcall_msg *gss_msg; |
399 | struct inode *inode = gss_auth->dentry->d_inode; | ||
394 | int err = 0; | 400 | int err = 0; |
395 | 401 | ||
396 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 402 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, |
@@ -400,7 +406,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
400 | err = PTR_ERR(gss_msg); | 406 | err = PTR_ERR(gss_msg); |
401 | goto out; | 407 | goto out; |
402 | } | 408 | } |
403 | spin_lock(&gss_auth->lock); | 409 | spin_lock(&inode->i_lock); |
404 | if (gss_cred->gc_upcall != NULL) | 410 | if (gss_cred->gc_upcall != NULL) |
405 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); | 411 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); |
406 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { | 412 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { |
@@ -411,7 +417,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
411 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); | 417 | rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); |
412 | } else | 418 | } else |
413 | err = gss_msg->msg.errno; | 419 | err = gss_msg->msg.errno; |
414 | spin_unlock(&gss_auth->lock); | 420 | spin_unlock(&inode->i_lock); |
415 | gss_release_msg(gss_msg); | 421 | gss_release_msg(gss_msg); |
416 | out: | 422 | out: |
417 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", | 423 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", |
@@ -422,6 +428,7 @@ out: | |||
422 | static inline int | 428 | static inline int |
423 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | 429 | gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) |
424 | { | 430 | { |
431 | struct inode *inode = gss_auth->dentry->d_inode; | ||
425 | struct rpc_cred *cred = &gss_cred->gc_base; | 432 | struct rpc_cred *cred = &gss_cred->gc_base; |
426 | struct gss_upcall_msg *gss_msg; | 433 | struct gss_upcall_msg *gss_msg; |
427 | DEFINE_WAIT(wait); | 434 | DEFINE_WAIT(wait); |
@@ -435,12 +442,11 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
435 | } | 442 | } |
436 | for (;;) { | 443 | for (;;) { |
437 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); | 444 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); |
438 | spin_lock(&gss_auth->lock); | 445 | spin_lock(&inode->i_lock); |
439 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { | 446 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { |
440 | spin_unlock(&gss_auth->lock); | ||
441 | break; | 447 | break; |
442 | } | 448 | } |
443 | spin_unlock(&gss_auth->lock); | 449 | spin_unlock(&inode->i_lock); |
444 | if (signalled()) { | 450 | if (signalled()) { |
445 | err = -ERESTARTSYS; | 451 | err = -ERESTARTSYS; |
446 | goto out_intr; | 452 | goto out_intr; |
@@ -451,6 +457,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
451 | gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); | 457 | gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); |
452 | else | 458 | else |
453 | err = gss_msg->msg.errno; | 459 | err = gss_msg->msg.errno; |
460 | spin_unlock(&inode->i_lock); | ||
454 | out_intr: | 461 | out_intr: |
455 | finish_wait(&gss_msg->waitqueue, &wait); | 462 | finish_wait(&gss_msg->waitqueue, &wait); |
456 | gss_release_msg(gss_msg); | 463 | gss_release_msg(gss_msg); |
@@ -489,12 +496,11 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
489 | const void *p, *end; | 496 | const void *p, *end; |
490 | void *buf; | 497 | void *buf; |
491 | struct rpc_clnt *clnt; | 498 | struct rpc_clnt *clnt; |
492 | struct gss_auth *gss_auth; | ||
493 | struct rpc_cred *cred; | ||
494 | struct gss_upcall_msg *gss_msg; | 499 | struct gss_upcall_msg *gss_msg; |
500 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
495 | struct gss_cl_ctx *ctx; | 501 | struct gss_cl_ctx *ctx; |
496 | uid_t uid; | 502 | uid_t uid; |
497 | int err = -EFBIG; | 503 | ssize_t err = -EFBIG; |
498 | 504 | ||
499 | if (mlen > MSG_BUF_MAXSIZE) | 505 | if (mlen > MSG_BUF_MAXSIZE) |
500 | goto out; | 506 | goto out; |
@@ -503,7 +509,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
503 | if (!buf) | 509 | if (!buf) |
504 | goto out; | 510 | goto out; |
505 | 511 | ||
506 | clnt = RPC_I(filp->f_path.dentry->d_inode)->private; | 512 | clnt = RPC_I(inode)->private; |
507 | err = -EFAULT; | 513 | err = -EFAULT; |
508 | if (copy_from_user(buf, src, mlen)) | 514 | if (copy_from_user(buf, src, mlen)) |
509 | goto err; | 515 | goto err; |
@@ -519,43 +525,38 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
519 | ctx = gss_alloc_context(); | 525 | ctx = gss_alloc_context(); |
520 | if (ctx == NULL) | 526 | if (ctx == NULL) |
521 | goto err; | 527 | goto err; |
522 | err = 0; | 528 | |
523 | gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth); | 529 | err = -ENOENT; |
524 | p = gss_fill_context(p, end, ctx, gss_auth->mech); | 530 | /* Find a matching upcall */ |
531 | spin_lock(&inode->i_lock); | ||
532 | gss_msg = __gss_find_upcall(RPC_I(inode), uid); | ||
533 | if (gss_msg == NULL) { | ||
534 | spin_unlock(&inode->i_lock); | ||
535 | goto err_put_ctx; | ||
536 | } | ||
537 | list_del_init(&gss_msg->list); | ||
538 | spin_unlock(&inode->i_lock); | ||
539 | |||
540 | p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); | ||
525 | if (IS_ERR(p)) { | 541 | if (IS_ERR(p)) { |
526 | err = PTR_ERR(p); | 542 | err = PTR_ERR(p); |
527 | if (err != -EACCES) | 543 | gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; |
528 | goto err_put_ctx; | 544 | goto err_release_msg; |
529 | } | ||
530 | spin_lock(&gss_auth->lock); | ||
531 | gss_msg = __gss_find_upcall(gss_auth, uid); | ||
532 | if (gss_msg) { | ||
533 | if (err == 0 && gss_msg->ctx == NULL) | ||
534 | gss_msg->ctx = gss_get_ctx(ctx); | ||
535 | gss_msg->msg.errno = err; | ||
536 | __gss_unhash_msg(gss_msg); | ||
537 | spin_unlock(&gss_auth->lock); | ||
538 | gss_release_msg(gss_msg); | ||
539 | } else { | ||
540 | struct auth_cred acred = { .uid = uid }; | ||
541 | spin_unlock(&gss_auth->lock); | ||
542 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); | ||
543 | if (IS_ERR(cred)) { | ||
544 | err = PTR_ERR(cred); | ||
545 | goto err_put_ctx; | ||
546 | } | ||
547 | gss_cred_set_ctx(cred, gss_get_ctx(ctx)); | ||
548 | } | 545 | } |
549 | gss_put_ctx(ctx); | 546 | gss_msg->ctx = gss_get_ctx(ctx); |
550 | kfree(buf); | 547 | err = mlen; |
551 | dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); | 548 | |
552 | return mlen; | 549 | err_release_msg: |
550 | spin_lock(&inode->i_lock); | ||
551 | __gss_unhash_msg(gss_msg); | ||
552 | spin_unlock(&inode->i_lock); | ||
553 | gss_release_msg(gss_msg); | ||
553 | err_put_ctx: | 554 | err_put_ctx: |
554 | gss_put_ctx(ctx); | 555 | gss_put_ctx(ctx); |
555 | err: | 556 | err: |
556 | kfree(buf); | 557 | kfree(buf); |
557 | out: | 558 | out: |
558 | dprintk("RPC: gss_pipe_downcall returning %d\n", err); | 559 | dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); |
559 | return err; | 560 | return err; |
560 | } | 561 | } |
561 | 562 | ||
@@ -563,27 +564,21 @@ static void | |||
563 | gss_pipe_release(struct inode *inode) | 564 | gss_pipe_release(struct inode *inode) |
564 | { | 565 | { |
565 | struct rpc_inode *rpci = RPC_I(inode); | 566 | struct rpc_inode *rpci = RPC_I(inode); |
566 | struct rpc_clnt *clnt; | 567 | struct gss_upcall_msg *gss_msg; |
567 | struct rpc_auth *auth; | ||
568 | struct gss_auth *gss_auth; | ||
569 | 568 | ||
570 | clnt = rpci->private; | 569 | spin_lock(&inode->i_lock); |
571 | auth = clnt->cl_auth; | 570 | while (!list_empty(&rpci->in_downcall)) { |
572 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); | ||
573 | spin_lock(&gss_auth->lock); | ||
574 | while (!list_empty(&gss_auth->upcalls)) { | ||
575 | struct gss_upcall_msg *gss_msg; | ||
576 | 571 | ||
577 | gss_msg = list_entry(gss_auth->upcalls.next, | 572 | gss_msg = list_entry(rpci->in_downcall.next, |
578 | struct gss_upcall_msg, list); | 573 | struct gss_upcall_msg, list); |
579 | gss_msg->msg.errno = -EPIPE; | 574 | gss_msg->msg.errno = -EPIPE; |
580 | atomic_inc(&gss_msg->count); | 575 | atomic_inc(&gss_msg->count); |
581 | __gss_unhash_msg(gss_msg); | 576 | __gss_unhash_msg(gss_msg); |
582 | spin_unlock(&gss_auth->lock); | 577 | spin_unlock(&inode->i_lock); |
583 | gss_release_msg(gss_msg); | 578 | gss_release_msg(gss_msg); |
584 | spin_lock(&gss_auth->lock); | 579 | spin_lock(&inode->i_lock); |
585 | } | 580 | } |
586 | spin_unlock(&gss_auth->lock); | 581 | spin_unlock(&inode->i_lock); |
587 | } | 582 | } |
588 | 583 | ||
589 | static void | 584 | static void |
@@ -637,18 +632,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
637 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); | 632 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); |
638 | if (gss_auth->service == 0) | 633 | if (gss_auth->service == 0) |
639 | goto err_put_mech; | 634 | goto err_put_mech; |
640 | INIT_LIST_HEAD(&gss_auth->upcalls); | ||
641 | spin_lock_init(&gss_auth->lock); | ||
642 | auth = &gss_auth->rpc_auth; | 635 | auth = &gss_auth->rpc_auth; |
643 | auth->au_cslack = GSS_CRED_SLACK >> 2; | 636 | auth->au_cslack = GSS_CRED_SLACK >> 2; |
644 | auth->au_rslack = GSS_VERF_SLACK >> 2; | 637 | auth->au_rslack = GSS_VERF_SLACK >> 2; |
645 | auth->au_ops = &authgss_ops; | 638 | auth->au_ops = &authgss_ops; |
646 | auth->au_flavor = flavor; | 639 | auth->au_flavor = flavor; |
647 | atomic_set(&auth->au_count, 1); | 640 | atomic_set(&auth->au_count, 1); |
648 | 641 | kref_init(&gss_auth->kref); | |
649 | err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE); | ||
650 | if (err) | ||
651 | goto err_put_mech; | ||
652 | 642 | ||
653 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, | 643 | gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, |
654 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); | 644 | clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); |
@@ -657,7 +647,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
657 | goto err_put_mech; | 647 | goto err_put_mech; |
658 | } | 648 | } |
659 | 649 | ||
650 | err = rpcauth_init_credcache(auth); | ||
651 | if (err) | ||
652 | goto err_unlink_pipe; | ||
653 | |||
660 | return auth; | 654 | return auth; |
655 | err_unlink_pipe: | ||
656 | rpc_unlink(gss_auth->dentry); | ||
661 | err_put_mech: | 657 | err_put_mech: |
662 | gss_mech_put(gss_auth->mech); | 658 | gss_mech_put(gss_auth->mech); |
663 | err_free: | 659 | err_free: |
@@ -668,6 +664,25 @@ out_dec: | |||
668 | } | 664 | } |
669 | 665 | ||
670 | static void | 666 | static void |
667 | gss_free(struct gss_auth *gss_auth) | ||
668 | { | ||
669 | rpc_unlink(gss_auth->dentry); | ||
670 | gss_auth->dentry = NULL; | ||
671 | gss_mech_put(gss_auth->mech); | ||
672 | |||
673 | kfree(gss_auth); | ||
674 | module_put(THIS_MODULE); | ||
675 | } | ||
676 | |||
677 | static void | ||
678 | gss_free_callback(struct kref *kref) | ||
679 | { | ||
680 | struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); | ||
681 | |||
682 | gss_free(gss_auth); | ||
683 | } | ||
684 | |||
685 | static void | ||
671 | gss_destroy(struct rpc_auth *auth) | 686 | gss_destroy(struct rpc_auth *auth) |
672 | { | 687 | { |
673 | struct gss_auth *gss_auth; | 688 | struct gss_auth *gss_auth; |
@@ -675,23 +690,51 @@ gss_destroy(struct rpc_auth *auth) | |||
675 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", | 690 | dprintk("RPC: destroying GSS authenticator %p flavor %d\n", |
676 | auth, auth->au_flavor); | 691 | auth, auth->au_flavor); |
677 | 692 | ||
693 | rpcauth_destroy_credcache(auth); | ||
694 | |||
678 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 695 | gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
679 | rpc_unlink(gss_auth->dentry); | 696 | kref_put(&gss_auth->kref, gss_free_callback); |
680 | gss_auth->dentry = NULL; | 697 | } |
681 | gss_mech_put(gss_auth->mech); | ||
682 | 698 | ||
683 | rpcauth_free_credcache(auth); | 699 | /* |
684 | kfree(gss_auth); | 700 | * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call |
685 | module_put(THIS_MODULE); | 701 | * to the server with the GSS control procedure field set to |
702 | * RPC_GSS_PROC_DESTROY. This should normally cause the server to release | ||
703 | * all RPCSEC_GSS state associated with that context. | ||
704 | */ | ||
705 | static int | ||
706 | gss_destroying_context(struct rpc_cred *cred) | ||
707 | { | ||
708 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | ||
709 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); | ||
710 | struct rpc_task *task; | ||
711 | |||
712 | if (gss_cred->gc_ctx == NULL || | ||
713 | gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY) | ||
714 | return 0; | ||
715 | |||
716 | gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; | ||
717 | cred->cr_ops = &gss_nullops; | ||
718 | |||
719 | /* Take a reference to ensure the cred will be destroyed either | ||
720 | * by the RPC call or by the put_rpccred() below */ | ||
721 | get_rpccred(cred); | ||
722 | |||
723 | task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC); | ||
724 | if (!IS_ERR(task)) | ||
725 | rpc_put_task(task); | ||
726 | |||
727 | put_rpccred(cred); | ||
728 | return 1; | ||
686 | } | 729 | } |
687 | 730 | ||
688 | /* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure | 731 | /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure |
689 | * to create a new cred or context, so they check that things have been | 732 | * to create a new cred or context, so they check that things have been |
690 | * allocated before freeing them. */ | 733 | * allocated before freeing them. */ |
691 | static void | 734 | static void |
692 | gss_destroy_ctx(struct gss_cl_ctx *ctx) | 735 | gss_do_free_ctx(struct gss_cl_ctx *ctx) |
693 | { | 736 | { |
694 | dprintk("RPC: gss_destroy_ctx\n"); | 737 | dprintk("RPC: gss_free_ctx\n"); |
695 | 738 | ||
696 | if (ctx->gc_gss_ctx) | 739 | if (ctx->gc_gss_ctx) |
697 | gss_delete_sec_context(&ctx->gc_gss_ctx); | 740 | gss_delete_sec_context(&ctx->gc_gss_ctx); |
@@ -701,15 +744,46 @@ gss_destroy_ctx(struct gss_cl_ctx *ctx) | |||
701 | } | 744 | } |
702 | 745 | ||
703 | static void | 746 | static void |
704 | gss_destroy_cred(struct rpc_cred *rc) | 747 | gss_free_ctx_callback(struct rcu_head *head) |
705 | { | 748 | { |
706 | struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); | 749 | struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); |
750 | gss_do_free_ctx(ctx); | ||
751 | } | ||
707 | 752 | ||
708 | dprintk("RPC: gss_destroy_cred \n"); | 753 | static void |
754 | gss_free_ctx(struct gss_cl_ctx *ctx) | ||
755 | { | ||
756 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); | ||
757 | } | ||
709 | 758 | ||
710 | if (cred->gc_ctx) | 759 | static void |
711 | gss_put_ctx(cred->gc_ctx); | 760 | gss_free_cred(struct gss_cred *gss_cred) |
712 | kfree(cred); | 761 | { |
762 | dprintk("RPC: gss_free_cred %p\n", gss_cred); | ||
763 | kfree(gss_cred); | ||
764 | } | ||
765 | |||
766 | static void | ||
767 | gss_free_cred_callback(struct rcu_head *head) | ||
768 | { | ||
769 | struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); | ||
770 | gss_free_cred(gss_cred); | ||
771 | } | ||
772 | |||
773 | static void | ||
774 | gss_destroy_cred(struct rpc_cred *cred) | ||
775 | { | ||
776 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | ||
777 | struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); | ||
778 | struct gss_cl_ctx *ctx = gss_cred->gc_ctx; | ||
779 | |||
780 | if (gss_destroying_context(cred)) | ||
781 | return; | ||
782 | rcu_assign_pointer(gss_cred->gc_ctx, NULL); | ||
783 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); | ||
784 | if (ctx) | ||
785 | gss_put_ctx(ctx); | ||
786 | kref_put(&gss_auth->kref, gss_free_callback); | ||
713 | } | 787 | } |
714 | 788 | ||
715 | /* | 789 | /* |
@@ -734,16 +808,14 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
734 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) | 808 | if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) |
735 | goto out_err; | 809 | goto out_err; |
736 | 810 | ||
737 | atomic_set(&cred->gc_count, 1); | 811 | rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); |
738 | cred->gc_uid = acred->uid; | ||
739 | /* | 812 | /* |
740 | * Note: in order to force a call to call_refresh(), we deliberately | 813 | * Note: in order to force a call to call_refresh(), we deliberately |
741 | * fail to flag the credential as RPCAUTH_CRED_UPTODATE. | 814 | * fail to flag the credential as RPCAUTH_CRED_UPTODATE. |
742 | */ | 815 | */ |
743 | cred->gc_flags = 0; | 816 | cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; |
744 | cred->gc_base.cr_ops = &gss_credops; | ||
745 | cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; | ||
746 | cred->gc_service = gss_auth->service; | 817 | cred->gc_service = gss_auth->service; |
818 | kref_get(&gss_auth->kref); | ||
747 | return &cred->gc_base; | 819 | return &cred->gc_base; |
748 | 820 | ||
749 | out_err: | 821 | out_err: |
@@ -774,7 +846,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) | |||
774 | * we don't really care if the credential has expired or not, | 846 | * we don't really care if the credential has expired or not, |
775 | * since the caller should be prepared to reinitialise it. | 847 | * since the caller should be prepared to reinitialise it. |
776 | */ | 848 | */ |
777 | if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) | 849 | if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) |
778 | goto out; | 850 | goto out; |
779 | /* Don't match with creds that have expired. */ | 851 | /* Don't match with creds that have expired. */ |
780 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) | 852 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) |
@@ -830,7 +902,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
830 | mic.data = (u8 *)(p + 1); | 902 | mic.data = (u8 *)(p + 1); |
831 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 903 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
832 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 904 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
833 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 905 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
834 | } else if (maj_stat != 0) { | 906 | } else if (maj_stat != 0) { |
835 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); | 907 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); |
836 | goto out_put_ctx; | 908 | goto out_put_ctx; |
@@ -855,6 +927,13 @@ gss_refresh(struct rpc_task *task) | |||
855 | return 0; | 927 | return 0; |
856 | } | 928 | } |
857 | 929 | ||
930 | /* Dummy refresh routine: used only when destroying the context */ | ||
931 | static int | ||
932 | gss_refresh_null(struct rpc_task *task) | ||
933 | { | ||
934 | return -EACCES; | ||
935 | } | ||
936 | |||
858 | static __be32 * | 937 | static __be32 * |
859 | gss_validate(struct rpc_task *task, __be32 *p) | 938 | gss_validate(struct rpc_task *task, __be32 *p) |
860 | { | 939 | { |
@@ -883,12 +962,15 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
883 | 962 | ||
884 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 963 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
885 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 964 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
886 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 965 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
887 | if (maj_stat) | 966 | if (maj_stat) { |
967 | dprintk("RPC: %5u gss_validate: gss_verify_mic returned" | ||
968 | "error 0x%08x\n", task->tk_pid, maj_stat); | ||
888 | goto out_bad; | 969 | goto out_bad; |
970 | } | ||
889 | /* We leave it to unwrap to calculate au_rslack. For now we just | 971 | /* We leave it to unwrap to calculate au_rslack. For now we just |
890 | * calculate the length of the verifier: */ | 972 | * calculate the length of the verifier: */ |
891 | task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; | 973 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
892 | gss_put_ctx(ctx); | 974 | gss_put_ctx(ctx); |
893 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", | 975 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", |
894 | task->tk_pid); | 976 | task->tk_pid); |
@@ -917,7 +999,9 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
917 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 999 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
918 | *p++ = htonl(rqstp->rq_seqno); | 1000 | *p++ = htonl(rqstp->rq_seqno); |
919 | 1001 | ||
1002 | lock_kernel(); | ||
920 | status = encode(rqstp, p, obj); | 1003 | status = encode(rqstp, p, obj); |
1004 | unlock_kernel(); | ||
921 | if (status) | 1005 | if (status) |
922 | return status; | 1006 | return status; |
923 | 1007 | ||
@@ -937,7 +1021,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
937 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); | 1021 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
938 | status = -EIO; /* XXX? */ | 1022 | status = -EIO; /* XXX? */ |
939 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1023 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
940 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1024 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
941 | else if (maj_stat) | 1025 | else if (maj_stat) |
942 | return status; | 1026 | return status; |
943 | q = xdr_encode_opaque(p, NULL, mic.len); | 1027 | q = xdr_encode_opaque(p, NULL, mic.len); |
@@ -1011,7 +1095,9 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1011 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | 1095 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; |
1012 | *p++ = htonl(rqstp->rq_seqno); | 1096 | *p++ = htonl(rqstp->rq_seqno); |
1013 | 1097 | ||
1098 | lock_kernel(); | ||
1014 | status = encode(rqstp, p, obj); | 1099 | status = encode(rqstp, p, obj); |
1100 | unlock_kernel(); | ||
1015 | if (status) | 1101 | if (status) |
1016 | return status; | 1102 | return status; |
1017 | 1103 | ||
@@ -1036,7 +1122,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1036 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was | 1122 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was |
1037 | * done anyway, so it's safe to put the request on the wire: */ | 1123 | * done anyway, so it's safe to put the request on the wire: */ |
1038 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1124 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
1039 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1125 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
1040 | else if (maj_stat) | 1126 | else if (maj_stat) |
1041 | return status; | 1127 | return status; |
1042 | 1128 | ||
@@ -1070,12 +1156,16 @@ gss_wrap_req(struct rpc_task *task, | |||
1070 | /* The spec seems a little ambiguous here, but I think that not | 1156 | /* The spec seems a little ambiguous here, but I think that not |
1071 | * wrapping context destruction requests makes the most sense. | 1157 | * wrapping context destruction requests makes the most sense. |
1072 | */ | 1158 | */ |
1159 | lock_kernel(); | ||
1073 | status = encode(rqstp, p, obj); | 1160 | status = encode(rqstp, p, obj); |
1161 | unlock_kernel(); | ||
1074 | goto out; | 1162 | goto out; |
1075 | } | 1163 | } |
1076 | switch (gss_cred->gc_service) { | 1164 | switch (gss_cred->gc_service) { |
1077 | case RPC_GSS_SVC_NONE: | 1165 | case RPC_GSS_SVC_NONE: |
1166 | lock_kernel(); | ||
1078 | status = encode(rqstp, p, obj); | 1167 | status = encode(rqstp, p, obj); |
1168 | unlock_kernel(); | ||
1079 | break; | 1169 | break; |
1080 | case RPC_GSS_SVC_INTEGRITY: | 1170 | case RPC_GSS_SVC_INTEGRITY: |
1081 | status = gss_wrap_req_integ(cred, ctx, encode, | 1171 | status = gss_wrap_req_integ(cred, ctx, encode, |
@@ -1123,7 +1213,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1123 | 1213 | ||
1124 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); | 1214 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
1125 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1215 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
1126 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1216 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
1127 | if (maj_stat != GSS_S_COMPLETE) | 1217 | if (maj_stat != GSS_S_COMPLETE) |
1128 | return status; | 1218 | return status; |
1129 | return 0; | 1219 | return 0; |
@@ -1148,7 +1238,7 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1148 | 1238 | ||
1149 | maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); | 1239 | maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); |
1150 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1240 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
1151 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1241 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
1152 | if (maj_stat != GSS_S_COMPLETE) | 1242 | if (maj_stat != GSS_S_COMPLETE) |
1153 | return status; | 1243 | return status; |
1154 | if (ntohl(*(*p)++) != rqstp->rq_seqno) | 1244 | if (ntohl(*(*p)++) != rqstp->rq_seqno) |
@@ -1188,10 +1278,12 @@ gss_unwrap_resp(struct rpc_task *task, | |||
1188 | break; | 1278 | break; |
1189 | } | 1279 | } |
1190 | /* take into account extra slack for integrity and privacy cases: */ | 1280 | /* take into account extra slack for integrity and privacy cases: */ |
1191 | task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) | 1281 | cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) |
1192 | + (savedlen - head->iov_len); | 1282 | + (savedlen - head->iov_len); |
1193 | out_decode: | 1283 | out_decode: |
1284 | lock_kernel(); | ||
1194 | status = decode(rqstp, p, obj); | 1285 | status = decode(rqstp, p, obj); |
1286 | unlock_kernel(); | ||
1195 | out: | 1287 | out: |
1196 | gss_put_ctx(ctx); | 1288 | gss_put_ctx(ctx); |
1197 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1289 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, |
@@ -1199,7 +1291,7 @@ out: | |||
1199 | return status; | 1291 | return status; |
1200 | } | 1292 | } |
1201 | 1293 | ||
1202 | static struct rpc_authops authgss_ops = { | 1294 | static const struct rpc_authops authgss_ops = { |
1203 | .owner = THIS_MODULE, | 1295 | .owner = THIS_MODULE, |
1204 | .au_flavor = RPC_AUTH_GSS, | 1296 | .au_flavor = RPC_AUTH_GSS, |
1205 | #ifdef RPC_DEBUG | 1297 | #ifdef RPC_DEBUG |
@@ -1211,7 +1303,7 @@ static struct rpc_authops authgss_ops = { | |||
1211 | .crcreate = gss_create_cred | 1303 | .crcreate = gss_create_cred |
1212 | }; | 1304 | }; |
1213 | 1305 | ||
1214 | static struct rpc_credops gss_credops = { | 1306 | static const struct rpc_credops gss_credops = { |
1215 | .cr_name = "AUTH_GSS", | 1307 | .cr_name = "AUTH_GSS", |
1216 | .crdestroy = gss_destroy_cred, | 1308 | .crdestroy = gss_destroy_cred, |
1217 | .cr_init = gss_cred_init, | 1309 | .cr_init = gss_cred_init, |
@@ -1223,6 +1315,17 @@ static struct rpc_credops gss_credops = { | |||
1223 | .crunwrap_resp = gss_unwrap_resp, | 1315 | .crunwrap_resp = gss_unwrap_resp, |
1224 | }; | 1316 | }; |
1225 | 1317 | ||
1318 | static const struct rpc_credops gss_nullops = { | ||
1319 | .cr_name = "AUTH_GSS", | ||
1320 | .crdestroy = gss_destroy_cred, | ||
1321 | .crmatch = gss_match, | ||
1322 | .crmarshal = gss_marshal, | ||
1323 | .crrefresh = gss_refresh_null, | ||
1324 | .crvalidate = gss_validate, | ||
1325 | .crwrap_req = gss_wrap_req, | ||
1326 | .crunwrap_resp = gss_unwrap_resp, | ||
1327 | }; | ||
1328 | |||
1226 | static struct rpc_pipe_ops gss_upcall_ops = { | 1329 | static struct rpc_pipe_ops gss_upcall_ops = { |
1227 | .upcall = gss_pipe_upcall, | 1330 | .upcall = gss_pipe_upcall, |
1228 | .downcall = gss_pipe_downcall, | 1331 | .downcall = gss_pipe_downcall, |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7b1943217053..71b9daefdff3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -201,7 +201,7 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { | |||
201 | kfree(kctx); | 201 | kfree(kctx); |
202 | } | 202 | } |
203 | 203 | ||
204 | static struct gss_api_ops gss_kerberos_ops = { | 204 | static const struct gss_api_ops gss_kerberos_ops = { |
205 | .gss_import_sec_context = gss_import_sec_context_kerberos, | 205 | .gss_import_sec_context = gss_import_sec_context_kerberos, |
206 | .gss_get_mic = gss_get_mic_kerberos, | 206 | .gss_get_mic = gss_get_mic_kerberos, |
207 | .gss_verify_mic = gss_verify_mic_kerberos, | 207 | .gss_verify_mic = gss_verify_mic_kerberos, |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 7e15aa68ae64..577d590e755f 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -202,7 +202,7 @@ gss_get_mic_spkm3(struct gss_ctx *ctx, | |||
202 | return err; | 202 | return err; |
203 | } | 203 | } |
204 | 204 | ||
205 | static struct gss_api_ops gss_spkm3_ops = { | 205 | static const struct gss_api_ops gss_spkm3_ops = { |
206 | .gss_import_sec_context = gss_import_sec_context_spkm3, | 206 | .gss_import_sec_context = gss_import_sec_context_spkm3, |
207 | .gss_get_mic = gss_get_mic_spkm3, | 207 | .gss_get_mic = gss_get_mic_spkm3, |
208 | .gss_verify_mic = gss_verify_mic_spkm3, | 208 | .gss_verify_mic = gss_verify_mic_spkm3, |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 3df9fccab2f8..537d0e8589dd 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
@@ -76,7 +76,7 @@ nul_marshal(struct rpc_task *task, __be32 *p) | |||
76 | static int | 76 | static int |
77 | nul_refresh(struct rpc_task *task) | 77 | nul_refresh(struct rpc_task *task) |
78 | { | 78 | { |
79 | task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 79 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
@@ -101,7 +101,7 @@ nul_validate(struct rpc_task *task, __be32 *p) | |||
101 | return p; | 101 | return p; |
102 | } | 102 | } |
103 | 103 | ||
104 | struct rpc_authops authnull_ops = { | 104 | const struct rpc_authops authnull_ops = { |
105 | .owner = THIS_MODULE, | 105 | .owner = THIS_MODULE, |
106 | .au_flavor = RPC_AUTH_NULL, | 106 | .au_flavor = RPC_AUTH_NULL, |
107 | #ifdef RPC_DEBUG | 107 | #ifdef RPC_DEBUG |
@@ -122,7 +122,7 @@ struct rpc_auth null_auth = { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | static | 124 | static |
125 | struct rpc_credops null_credops = { | 125 | const struct rpc_credops null_credops = { |
126 | .cr_name = "AUTH_NULL", | 126 | .cr_name = "AUTH_NULL", |
127 | .crdestroy = nul_destroy_cred, | 127 | .crdestroy = nul_destroy_cred, |
128 | .crmatch = nul_match, | 128 | .crmatch = nul_match, |
@@ -133,9 +133,11 @@ struct rpc_credops null_credops = { | |||
133 | 133 | ||
134 | static | 134 | static |
135 | struct rpc_cred null_cred = { | 135 | struct rpc_cred null_cred = { |
136 | .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), | ||
137 | .cr_auth = &null_auth, | ||
136 | .cr_ops = &null_credops, | 138 | .cr_ops = &null_credops, |
137 | .cr_count = ATOMIC_INIT(1), | 139 | .cr_count = ATOMIC_INIT(1), |
138 | .cr_flags = RPCAUTH_CRED_UPTODATE, | 140 | .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, |
139 | #ifdef RPC_DEBUG | 141 | #ifdef RPC_DEBUG |
140 | .cr_magic = RPCAUTH_CRED_MAGIC, | 142 | .cr_magic = RPCAUTH_CRED_MAGIC, |
141 | #endif | 143 | #endif |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4e7733aee36e..5ed91e5bcee4 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -20,11 +20,6 @@ struct unx_cred { | |||
20 | gid_t uc_gids[NFS_NGROUPS]; | 20 | gid_t uc_gids[NFS_NGROUPS]; |
21 | }; | 21 | }; |
22 | #define uc_uid uc_base.cr_uid | 22 | #define uc_uid uc_base.cr_uid |
23 | #define uc_count uc_base.cr_count | ||
24 | #define uc_flags uc_base.cr_flags | ||
25 | #define uc_expire uc_base.cr_expire | ||
26 | |||
27 | #define UNX_CRED_EXPIRE (60 * HZ) | ||
28 | 23 | ||
29 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) | 24 | #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) |
30 | 25 | ||
@@ -34,15 +29,14 @@ struct unx_cred { | |||
34 | 29 | ||
35 | static struct rpc_auth unix_auth; | 30 | static struct rpc_auth unix_auth; |
36 | static struct rpc_cred_cache unix_cred_cache; | 31 | static struct rpc_cred_cache unix_cred_cache; |
37 | static struct rpc_credops unix_credops; | 32 | static const struct rpc_credops unix_credops; |
38 | 33 | ||
39 | static struct rpc_auth * | 34 | static struct rpc_auth * |
40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | 35 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) |
41 | { | 36 | { |
42 | dprintk("RPC: creating UNIX authenticator for client %p\n", | 37 | dprintk("RPC: creating UNIX authenticator for client %p\n", |
43 | clnt); | 38 | clnt); |
44 | if (atomic_inc_return(&unix_auth.au_count) == 0) | 39 | atomic_inc(&unix_auth.au_count); |
45 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); | ||
46 | return &unix_auth; | 40 | return &unix_auth; |
47 | } | 41 | } |
48 | 42 | ||
@@ -50,7 +44,7 @@ static void | |||
50 | unx_destroy(struct rpc_auth *auth) | 44 | unx_destroy(struct rpc_auth *auth) |
51 | { | 45 | { |
52 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); | 46 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); |
53 | rpcauth_free_credcache(auth); | 47 | rpcauth_clear_credcache(auth->au_credcache); |
54 | } | 48 | } |
55 | 49 | ||
56 | /* | 50 | /* |
@@ -74,8 +68,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
74 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) | 68 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) |
75 | return ERR_PTR(-ENOMEM); | 69 | return ERR_PTR(-ENOMEM); |
76 | 70 | ||
77 | atomic_set(&cred->uc_count, 1); | 71 | rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops); |
78 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; | 72 | cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; |
79 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { | 73 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { |
80 | cred->uc_uid = 0; | 74 | cred->uc_uid = 0; |
81 | cred->uc_gid = 0; | 75 | cred->uc_gid = 0; |
@@ -85,22 +79,34 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
85 | if (groups > NFS_NGROUPS) | 79 | if (groups > NFS_NGROUPS) |
86 | groups = NFS_NGROUPS; | 80 | groups = NFS_NGROUPS; |
87 | 81 | ||
88 | cred->uc_uid = acred->uid; | ||
89 | cred->uc_gid = acred->gid; | 82 | cred->uc_gid = acred->gid; |
90 | for (i = 0; i < groups; i++) | 83 | for (i = 0; i < groups; i++) |
91 | cred->uc_gids[i] = GROUP_AT(acred->group_info, i); | 84 | cred->uc_gids[i] = GROUP_AT(acred->group_info, i); |
92 | if (i < NFS_NGROUPS) | 85 | if (i < NFS_NGROUPS) |
93 | cred->uc_gids[i] = NOGROUP; | 86 | cred->uc_gids[i] = NOGROUP; |
94 | } | 87 | } |
95 | cred->uc_base.cr_ops = &unix_credops; | ||
96 | 88 | ||
97 | return (struct rpc_cred *) cred; | 89 | return &cred->uc_base; |
90 | } | ||
91 | |||
92 | static void | ||
93 | unx_free_cred(struct unx_cred *unx_cred) | ||
94 | { | ||
95 | dprintk("RPC: unx_free_cred %p\n", unx_cred); | ||
96 | kfree(unx_cred); | ||
97 | } | ||
98 | |||
99 | static void | ||
100 | unx_free_cred_callback(struct rcu_head *head) | ||
101 | { | ||
102 | struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu); | ||
103 | unx_free_cred(unx_cred); | ||
98 | } | 104 | } |
99 | 105 | ||
100 | static void | 106 | static void |
101 | unx_destroy_cred(struct rpc_cred *cred) | 107 | unx_destroy_cred(struct rpc_cred *cred) |
102 | { | 108 | { |
103 | kfree(cred); | 109 | call_rcu(&cred->cr_rcu, unx_free_cred_callback); |
104 | } | 110 | } |
105 | 111 | ||
106 | /* | 112 | /* |
@@ -111,7 +117,7 @@ unx_destroy_cred(struct rpc_cred *cred) | |||
111 | static int | 117 | static int |
112 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) | 118 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) |
113 | { | 119 | { |
114 | struct unx_cred *cred = (struct unx_cred *) rcred; | 120 | struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base); |
115 | int i; | 121 | int i; |
116 | 122 | ||
117 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { | 123 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { |
@@ -142,7 +148,7 @@ static __be32 * | |||
142 | unx_marshal(struct rpc_task *task, __be32 *p) | 148 | unx_marshal(struct rpc_task *task, __be32 *p) |
143 | { | 149 | { |
144 | struct rpc_clnt *clnt = task->tk_client; | 150 | struct rpc_clnt *clnt = task->tk_client; |
145 | struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; | 151 | struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); |
146 | __be32 *base, *hold; | 152 | __be32 *base, *hold; |
147 | int i; | 153 | int i; |
148 | 154 | ||
@@ -175,7 +181,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) | |||
175 | static int | 181 | static int |
176 | unx_refresh(struct rpc_task *task) | 182 | unx_refresh(struct rpc_task *task) |
177 | { | 183 | { |
178 | task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 184 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); |
179 | return 0; | 185 | return 0; |
180 | } | 186 | } |
181 | 187 | ||
@@ -198,13 +204,18 @@ unx_validate(struct rpc_task *task, __be32 *p) | |||
198 | printk("RPC: giant verf size: %u\n", size); | 204 | printk("RPC: giant verf size: %u\n", size); |
199 | return NULL; | 205 | return NULL; |
200 | } | 206 | } |
201 | task->tk_auth->au_rslack = (size >> 2) + 2; | 207 | task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2; |
202 | p += (size >> 2); | 208 | p += (size >> 2); |
203 | 209 | ||
204 | return p; | 210 | return p; |
205 | } | 211 | } |
206 | 212 | ||
207 | struct rpc_authops authunix_ops = { | 213 | void __init rpc_init_authunix(void) |
214 | { | ||
215 | spin_lock_init(&unix_cred_cache.lock); | ||
216 | } | ||
217 | |||
218 | const struct rpc_authops authunix_ops = { | ||
208 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
209 | .au_flavor = RPC_AUTH_UNIX, | 220 | .au_flavor = RPC_AUTH_UNIX, |
210 | #ifdef RPC_DEBUG | 221 | #ifdef RPC_DEBUG |
@@ -218,7 +229,6 @@ struct rpc_authops authunix_ops = { | |||
218 | 229 | ||
219 | static | 230 | static |
220 | struct rpc_cred_cache unix_cred_cache = { | 231 | struct rpc_cred_cache unix_cred_cache = { |
221 | .expire = UNX_CRED_EXPIRE, | ||
222 | }; | 232 | }; |
223 | 233 | ||
224 | static | 234 | static |
@@ -232,7 +242,7 @@ struct rpc_auth unix_auth = { | |||
232 | }; | 242 | }; |
233 | 243 | ||
234 | static | 244 | static |
235 | struct rpc_credops unix_credops = { | 245 | const struct rpc_credops unix_credops = { |
236 | .cr_name = "AUTH_UNIX", | 246 | .cr_name = "AUTH_UNIX", |
237 | .crdestroy = unx_destroy_cred, | 247 | .crdestroy = unx_destroy_cred, |
238 | .crmatch = unx_match, | 248 | .crmatch = unx_match, |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d8fbee40a19c..52429b1ffcc1 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -44,6 +44,12 @@ | |||
44 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ | 44 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ |
45 | __FUNCTION__, t->tk_status) | 45 | __FUNCTION__, t->tk_status) |
46 | 46 | ||
47 | /* | ||
48 | * All RPC clients are linked into this list | ||
49 | */ | ||
50 | static LIST_HEAD(all_clients); | ||
51 | static DEFINE_SPINLOCK(rpc_client_lock); | ||
52 | |||
47 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); | 53 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
48 | 54 | ||
49 | 55 | ||
@@ -66,6 +72,21 @@ static void call_connect_status(struct rpc_task *task); | |||
66 | static __be32 * call_header(struct rpc_task *task); | 72 | static __be32 * call_header(struct rpc_task *task); |
67 | static __be32 * call_verify(struct rpc_task *task); | 73 | static __be32 * call_verify(struct rpc_task *task); |
68 | 74 | ||
75 | static int rpc_ping(struct rpc_clnt *clnt, int flags); | ||
76 | |||
77 | static void rpc_register_client(struct rpc_clnt *clnt) | ||
78 | { | ||
79 | spin_lock(&rpc_client_lock); | ||
80 | list_add(&clnt->cl_clients, &all_clients); | ||
81 | spin_unlock(&rpc_client_lock); | ||
82 | } | ||
83 | |||
84 | static void rpc_unregister_client(struct rpc_clnt *clnt) | ||
85 | { | ||
86 | spin_lock(&rpc_client_lock); | ||
87 | list_del(&clnt->cl_clients); | ||
88 | spin_unlock(&rpc_client_lock); | ||
89 | } | ||
69 | 90 | ||
70 | static int | 91 | static int |
71 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | 92 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) |
@@ -111,6 +132,9 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
111 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 132 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
112 | program->name, servname, xprt); | 133 | program->name, servname, xprt); |
113 | 134 | ||
135 | err = rpciod_up(); | ||
136 | if (err) | ||
137 | goto out_no_rpciod; | ||
114 | err = -EINVAL; | 138 | err = -EINVAL; |
115 | if (!xprt) | 139 | if (!xprt) |
116 | goto out_no_xprt; | 140 | goto out_no_xprt; |
@@ -121,8 +145,6 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
121 | clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); | 145 | clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); |
122 | if (!clnt) | 146 | if (!clnt) |
123 | goto out_err; | 147 | goto out_err; |
124 | atomic_set(&clnt->cl_users, 0); | ||
125 | atomic_set(&clnt->cl_count, 1); | ||
126 | clnt->cl_parent = clnt; | 148 | clnt->cl_parent = clnt; |
127 | 149 | ||
128 | clnt->cl_server = clnt->cl_inline_name; | 150 | clnt->cl_server = clnt->cl_inline_name; |
@@ -148,6 +170,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
148 | if (clnt->cl_metrics == NULL) | 170 | if (clnt->cl_metrics == NULL) |
149 | goto out_no_stats; | 171 | goto out_no_stats; |
150 | clnt->cl_program = program; | 172 | clnt->cl_program = program; |
173 | INIT_LIST_HEAD(&clnt->cl_tasks); | ||
174 | spin_lock_init(&clnt->cl_lock); | ||
151 | 175 | ||
152 | if (!xprt_bound(clnt->cl_xprt)) | 176 | if (!xprt_bound(clnt->cl_xprt)) |
153 | clnt->cl_autobind = 1; | 177 | clnt->cl_autobind = 1; |
@@ -155,6 +179,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
155 | clnt->cl_rtt = &clnt->cl_rtt_default; | 179 | clnt->cl_rtt = &clnt->cl_rtt_default; |
156 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); | 180 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); |
157 | 181 | ||
182 | kref_init(&clnt->cl_kref); | ||
183 | |||
158 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); | 184 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); |
159 | if (err < 0) | 185 | if (err < 0) |
160 | goto out_no_path; | 186 | goto out_no_path; |
@@ -172,6 +198,7 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
172 | if (clnt->cl_nodelen > UNX_MAXNODENAME) | 198 | if (clnt->cl_nodelen > UNX_MAXNODENAME) |
173 | clnt->cl_nodelen = UNX_MAXNODENAME; | 199 | clnt->cl_nodelen = UNX_MAXNODENAME; |
174 | memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); | 200 | memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); |
201 | rpc_register_client(clnt); | ||
175 | return clnt; | 202 | return clnt; |
176 | 203 | ||
177 | out_no_auth: | 204 | out_no_auth: |
@@ -188,6 +215,8 @@ out_no_stats: | |||
188 | out_err: | 215 | out_err: |
189 | xprt_put(xprt); | 216 | xprt_put(xprt); |
190 | out_no_xprt: | 217 | out_no_xprt: |
218 | rpciod_down(); | ||
219 | out_no_rpciod: | ||
191 | return ERR_PTR(err); | 220 | return ERR_PTR(err); |
192 | } | 221 | } |
193 | 222 | ||
@@ -205,13 +234,32 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
205 | { | 234 | { |
206 | struct rpc_xprt *xprt; | 235 | struct rpc_xprt *xprt; |
207 | struct rpc_clnt *clnt; | 236 | struct rpc_clnt *clnt; |
237 | struct rpc_xprtsock_create xprtargs = { | ||
238 | .proto = args->protocol, | ||
239 | .srcaddr = args->saddress, | ||
240 | .dstaddr = args->address, | ||
241 | .addrlen = args->addrsize, | ||
242 | .timeout = args->timeout | ||
243 | }; | ||
244 | char servername[20]; | ||
208 | 245 | ||
209 | xprt = xprt_create_transport(args->protocol, args->address, | 246 | xprt = xprt_create_transport(&xprtargs); |
210 | args->addrsize, args->timeout); | ||
211 | if (IS_ERR(xprt)) | 247 | if (IS_ERR(xprt)) |
212 | return (struct rpc_clnt *)xprt; | 248 | return (struct rpc_clnt *)xprt; |
213 | 249 | ||
214 | /* | 250 | /* |
251 | * If the caller chooses not to specify a hostname, whip | ||
252 | * up a string representation of the passed-in address. | ||
253 | */ | ||
254 | if (args->servername == NULL) { | ||
255 | struct sockaddr_in *addr = | ||
256 | (struct sockaddr_in *) &args->address; | ||
257 | snprintf(servername, sizeof(servername), NIPQUAD_FMT, | ||
258 | NIPQUAD(addr->sin_addr.s_addr)); | ||
259 | args->servername = servername; | ||
260 | } | ||
261 | |||
262 | /* | ||
215 | * By default, kernel RPC client connects from a reserved port. | 263 | * By default, kernel RPC client connects from a reserved port. |
216 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, | 264 | * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, |
217 | * but it is always enabled for rpciod, which handles the connect | 265 | * but it is always enabled for rpciod, which handles the connect |
@@ -245,8 +293,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
245 | clnt->cl_intr = 1; | 293 | clnt->cl_intr = 1; |
246 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) | 294 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) |
247 | clnt->cl_autobind = 1; | 295 | clnt->cl_autobind = 1; |
248 | if (args->flags & RPC_CLNT_CREATE_ONESHOT) | ||
249 | clnt->cl_oneshot = 1; | ||
250 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) | 296 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) |
251 | clnt->cl_discrtry = 1; | 297 | clnt->cl_discrtry = 1; |
252 | 298 | ||
@@ -268,24 +314,25 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
268 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); | 314 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); |
269 | if (!new) | 315 | if (!new) |
270 | goto out_no_clnt; | 316 | goto out_no_clnt; |
271 | atomic_set(&new->cl_count, 1); | 317 | new->cl_parent = clnt; |
272 | atomic_set(&new->cl_users, 0); | 318 | /* Turn off autobind on clones */ |
319 | new->cl_autobind = 0; | ||
320 | INIT_LIST_HEAD(&new->cl_tasks); | ||
321 | spin_lock_init(&new->cl_lock); | ||
322 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | ||
273 | new->cl_metrics = rpc_alloc_iostats(clnt); | 323 | new->cl_metrics = rpc_alloc_iostats(clnt); |
274 | if (new->cl_metrics == NULL) | 324 | if (new->cl_metrics == NULL) |
275 | goto out_no_stats; | 325 | goto out_no_stats; |
326 | kref_init(&new->cl_kref); | ||
276 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 327 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); |
277 | if (err != 0) | 328 | if (err != 0) |
278 | goto out_no_path; | 329 | goto out_no_path; |
279 | new->cl_parent = clnt; | ||
280 | atomic_inc(&clnt->cl_count); | ||
281 | new->cl_xprt = xprt_get(clnt->cl_xprt); | ||
282 | /* Turn off autobind on clones */ | ||
283 | new->cl_autobind = 0; | ||
284 | new->cl_oneshot = 0; | ||
285 | new->cl_dead = 0; | ||
286 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | ||
287 | if (new->cl_auth) | 330 | if (new->cl_auth) |
288 | atomic_inc(&new->cl_auth->au_count); | 331 | atomic_inc(&new->cl_auth->au_count); |
332 | xprt_get(clnt->cl_xprt); | ||
333 | kref_get(&clnt->cl_kref); | ||
334 | rpc_register_client(new); | ||
335 | rpciod_up(); | ||
289 | return new; | 336 | return new; |
290 | out_no_path: | 337 | out_no_path: |
291 | rpc_free_iostats(new->cl_metrics); | 338 | rpc_free_iostats(new->cl_metrics); |
@@ -298,86 +345,86 @@ out_no_clnt: | |||
298 | 345 | ||
299 | /* | 346 | /* |
300 | * Properly shut down an RPC client, terminating all outstanding | 347 | * Properly shut down an RPC client, terminating all outstanding |
301 | * requests. Note that we must be certain that cl_oneshot and | 348 | * requests. |
302 | * cl_dead are cleared, or else the client would be destroyed | ||
303 | * when the last task releases it. | ||
304 | */ | 349 | */ |
305 | int | 350 | void rpc_shutdown_client(struct rpc_clnt *clnt) |
306 | rpc_shutdown_client(struct rpc_clnt *clnt) | ||
307 | { | 351 | { |
308 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", | 352 | dprintk("RPC: shutting down %s client for %s\n", |
309 | clnt->cl_protname, clnt->cl_server, | 353 | clnt->cl_protname, clnt->cl_server); |
310 | atomic_read(&clnt->cl_users)); | 354 | |
311 | 355 | while (!list_empty(&clnt->cl_tasks)) { | |
312 | while (atomic_read(&clnt->cl_users) > 0) { | ||
313 | /* Don't let rpc_release_client destroy us */ | ||
314 | clnt->cl_oneshot = 0; | ||
315 | clnt->cl_dead = 0; | ||
316 | rpc_killall_tasks(clnt); | 356 | rpc_killall_tasks(clnt); |
317 | wait_event_timeout(destroy_wait, | 357 | wait_event_timeout(destroy_wait, |
318 | !atomic_read(&clnt->cl_users), 1*HZ); | 358 | list_empty(&clnt->cl_tasks), 1*HZ); |
319 | } | ||
320 | |||
321 | if (atomic_read(&clnt->cl_users) < 0) { | ||
322 | printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", | ||
323 | clnt, atomic_read(&clnt->cl_users)); | ||
324 | #ifdef RPC_DEBUG | ||
325 | rpc_show_tasks(); | ||
326 | #endif | ||
327 | BUG(); | ||
328 | } | 359 | } |
329 | 360 | ||
330 | return rpc_destroy_client(clnt); | 361 | rpc_release_client(clnt); |
331 | } | 362 | } |
332 | 363 | ||
333 | /* | 364 | /* |
334 | * Delete an RPC client | 365 | * Free an RPC client |
335 | */ | 366 | */ |
336 | int | 367 | static void |
337 | rpc_destroy_client(struct rpc_clnt *clnt) | 368 | rpc_free_client(struct kref *kref) |
338 | { | 369 | { |
339 | if (!atomic_dec_and_test(&clnt->cl_count)) | 370 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); |
340 | return 1; | ||
341 | BUG_ON(atomic_read(&clnt->cl_users) != 0); | ||
342 | 371 | ||
343 | dprintk("RPC: destroying %s client for %s\n", | 372 | dprintk("RPC: destroying %s client for %s\n", |
344 | clnt->cl_protname, clnt->cl_server); | 373 | clnt->cl_protname, clnt->cl_server); |
345 | if (clnt->cl_auth) { | ||
346 | rpcauth_destroy(clnt->cl_auth); | ||
347 | clnt->cl_auth = NULL; | ||
348 | } | ||
349 | if (!IS_ERR(clnt->cl_dentry)) { | 374 | if (!IS_ERR(clnt->cl_dentry)) { |
350 | rpc_rmdir(clnt->cl_dentry); | 375 | rpc_rmdir(clnt->cl_dentry); |
351 | rpc_put_mount(); | 376 | rpc_put_mount(); |
352 | } | 377 | } |
353 | if (clnt->cl_parent != clnt) { | 378 | if (clnt->cl_parent != clnt) { |
354 | rpc_destroy_client(clnt->cl_parent); | 379 | rpc_release_client(clnt->cl_parent); |
355 | goto out_free; | 380 | goto out_free; |
356 | } | 381 | } |
357 | if (clnt->cl_server != clnt->cl_inline_name) | 382 | if (clnt->cl_server != clnt->cl_inline_name) |
358 | kfree(clnt->cl_server); | 383 | kfree(clnt->cl_server); |
359 | out_free: | 384 | out_free: |
385 | rpc_unregister_client(clnt); | ||
360 | rpc_free_iostats(clnt->cl_metrics); | 386 | rpc_free_iostats(clnt->cl_metrics); |
361 | clnt->cl_metrics = NULL; | 387 | clnt->cl_metrics = NULL; |
362 | xprt_put(clnt->cl_xprt); | 388 | xprt_put(clnt->cl_xprt); |
389 | rpciod_down(); | ||
363 | kfree(clnt); | 390 | kfree(clnt); |
364 | return 0; | ||
365 | } | 391 | } |
366 | 392 | ||
367 | /* | 393 | /* |
368 | * Release an RPC client | 394 | * Free an RPC client |
395 | */ | ||
396 | static void | ||
397 | rpc_free_auth(struct kref *kref) | ||
398 | { | ||
399 | struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); | ||
400 | |||
401 | if (clnt->cl_auth == NULL) { | ||
402 | rpc_free_client(kref); | ||
403 | return; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Note: RPCSEC_GSS may need to send NULL RPC calls in order to | ||
408 | * release remaining GSS contexts. This mechanism ensures | ||
409 | * that it can do so safely. | ||
410 | */ | ||
411 | kref_init(kref); | ||
412 | rpcauth_release(clnt->cl_auth); | ||
413 | clnt->cl_auth = NULL; | ||
414 | kref_put(kref, rpc_free_client); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * Release reference to the RPC client | ||
369 | */ | 419 | */ |
370 | void | 420 | void |
371 | rpc_release_client(struct rpc_clnt *clnt) | 421 | rpc_release_client(struct rpc_clnt *clnt) |
372 | { | 422 | { |
373 | dprintk("RPC: rpc_release_client(%p, %d)\n", | 423 | dprintk("RPC: rpc_release_client(%p)\n", clnt); |
374 | clnt, atomic_read(&clnt->cl_users)); | ||
375 | 424 | ||
376 | if (!atomic_dec_and_test(&clnt->cl_users)) | 425 | if (list_empty(&clnt->cl_tasks)) |
377 | return; | 426 | wake_up(&destroy_wait); |
378 | wake_up(&destroy_wait); | 427 | kref_put(&clnt->cl_kref, rpc_free_auth); |
379 | if (clnt->cl_oneshot || clnt->cl_dead) | ||
380 | rpc_destroy_client(clnt); | ||
381 | } | 428 | } |
382 | 429 | ||
383 | /** | 430 | /** |
@@ -468,82 +515,96 @@ void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) | |||
468 | rpc_restore_sigmask(oldset); | 515 | rpc_restore_sigmask(oldset); |
469 | } | 516 | } |
470 | 517 | ||
471 | /* | 518 | static |
472 | * New rpc_call implementation | 519 | struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt, |
520 | struct rpc_message *msg, | ||
521 | int flags, | ||
522 | const struct rpc_call_ops *ops, | ||
523 | void *data) | ||
524 | { | ||
525 | struct rpc_task *task, *ret; | ||
526 | sigset_t oldset; | ||
527 | |||
528 | task = rpc_new_task(clnt, flags, ops, data); | ||
529 | if (task == NULL) { | ||
530 | rpc_release_calldata(ops, data); | ||
531 | return ERR_PTR(-ENOMEM); | ||
532 | } | ||
533 | |||
534 | /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ | ||
535 | rpc_task_sigmask(task, &oldset); | ||
536 | if (msg != NULL) { | ||
537 | rpc_call_setup(task, msg, 0); | ||
538 | if (task->tk_status != 0) { | ||
539 | ret = ERR_PTR(task->tk_status); | ||
540 | rpc_put_task(task); | ||
541 | goto out; | ||
542 | } | ||
543 | } | ||
544 | atomic_inc(&task->tk_count); | ||
545 | rpc_execute(task); | ||
546 | ret = task; | ||
547 | out: | ||
548 | rpc_restore_sigmask(&oldset); | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * rpc_call_sync - Perform a synchronous RPC call | ||
554 | * @clnt: pointer to RPC client | ||
555 | * @msg: RPC call parameters | ||
556 | * @flags: RPC call flags | ||
473 | */ | 557 | */ |
474 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | 558 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) |
475 | { | 559 | { |
476 | struct rpc_task *task; | 560 | struct rpc_task *task; |
477 | sigset_t oldset; | 561 | int status; |
478 | int status; | ||
479 | |||
480 | /* If this client is slain all further I/O fails */ | ||
481 | if (clnt->cl_dead) | ||
482 | return -EIO; | ||
483 | 562 | ||
484 | BUG_ON(flags & RPC_TASK_ASYNC); | 563 | BUG_ON(flags & RPC_TASK_ASYNC); |
485 | 564 | ||
486 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); | 565 | task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL); |
487 | if (task == NULL) | 566 | if (IS_ERR(task)) |
488 | return -ENOMEM; | 567 | return PTR_ERR(task); |
489 | |||
490 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ | ||
491 | rpc_task_sigmask(task, &oldset); | ||
492 | |||
493 | /* Set up the call info struct and execute the task */ | ||
494 | rpc_call_setup(task, msg, 0); | ||
495 | if (task->tk_status == 0) { | ||
496 | atomic_inc(&task->tk_count); | ||
497 | rpc_execute(task); | ||
498 | } | ||
499 | status = task->tk_status; | 568 | status = task->tk_status; |
500 | rpc_put_task(task); | 569 | rpc_put_task(task); |
501 | rpc_restore_sigmask(&oldset); | ||
502 | return status; | 570 | return status; |
503 | } | 571 | } |
504 | 572 | ||
505 | /* | 573 | /** |
506 | * New rpc_call implementation | 574 | * rpc_call_async - Perform an asynchronous RPC call |
575 | * @clnt: pointer to RPC client | ||
576 | * @msg: RPC call parameters | ||
577 | * @flags: RPC call flags | ||
578 | * @ops: RPC call ops | ||
579 | * @data: user call data | ||
507 | */ | 580 | */ |
508 | int | 581 | int |
509 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | 582 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, |
510 | const struct rpc_call_ops *tk_ops, void *data) | 583 | const struct rpc_call_ops *tk_ops, void *data) |
511 | { | 584 | { |
512 | struct rpc_task *task; | 585 | struct rpc_task *task; |
513 | sigset_t oldset; | ||
514 | int status; | ||
515 | |||
516 | /* If this client is slain all further I/O fails */ | ||
517 | status = -EIO; | ||
518 | if (clnt->cl_dead) | ||
519 | goto out_release; | ||
520 | |||
521 | flags |= RPC_TASK_ASYNC; | ||
522 | |||
523 | /* Create/initialize a new RPC task */ | ||
524 | status = -ENOMEM; | ||
525 | if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) | ||
526 | goto out_release; | ||
527 | |||
528 | /* Mask signals on GSS_AUTH upcalls */ | ||
529 | rpc_task_sigmask(task, &oldset); | ||
530 | 586 | ||
531 | rpc_call_setup(task, msg, 0); | 587 | task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data); |
532 | 588 | if (IS_ERR(task)) | |
533 | /* Set up the call info struct and execute the task */ | 589 | return PTR_ERR(task); |
534 | status = task->tk_status; | 590 | rpc_put_task(task); |
535 | if (status == 0) | 591 | return 0; |
536 | rpc_execute(task); | ||
537 | else | ||
538 | rpc_put_task(task); | ||
539 | |||
540 | rpc_restore_sigmask(&oldset); | ||
541 | return status; | ||
542 | out_release: | ||
543 | rpc_release_calldata(tk_ops, data); | ||
544 | return status; | ||
545 | } | 592 | } |
546 | 593 | ||
594 | /** | ||
595 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | ||
596 | * @clnt: pointer to RPC client | ||
597 | * @flags: RPC flags | ||
598 | * @ops: RPC call ops | ||
599 | * @data: user call data | ||
600 | */ | ||
601 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
602 | const struct rpc_call_ops *tk_ops, | ||
603 | void *data) | ||
604 | { | ||
605 | return rpc_do_run_task(clnt, NULL, flags, tk_ops, data); | ||
606 | } | ||
607 | EXPORT_SYMBOL(rpc_run_task); | ||
547 | 608 | ||
548 | void | 609 | void |
549 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) | 610 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) |
@@ -745,7 +806,7 @@ call_reserveresult(struct rpc_task *task) | |||
745 | static void | 806 | static void |
746 | call_allocate(struct rpc_task *task) | 807 | call_allocate(struct rpc_task *task) |
747 | { | 808 | { |
748 | unsigned int slack = task->tk_auth->au_cslack; | 809 | unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; |
749 | struct rpc_rqst *req = task->tk_rqstp; | 810 | struct rpc_rqst *req = task->tk_rqstp; |
750 | struct rpc_xprt *xprt = task->tk_xprt; | 811 | struct rpc_xprt *xprt = task->tk_xprt; |
751 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; | 812 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
@@ -843,10 +904,8 @@ call_encode(struct rpc_task *task) | |||
843 | if (encode == NULL) | 904 | if (encode == NULL) |
844 | return; | 905 | return; |
845 | 906 | ||
846 | lock_kernel(); | ||
847 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, | 907 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, |
848 | task->tk_msg.rpc_argp); | 908 | task->tk_msg.rpc_argp); |
849 | unlock_kernel(); | ||
850 | if (task->tk_status == -ENOMEM) { | 909 | if (task->tk_status == -ENOMEM) { |
851 | /* XXX: Is this sane? */ | 910 | /* XXX: Is this sane? */ |
852 | rpc_delay(task, 3*HZ); | 911 | rpc_delay(task, 3*HZ); |
@@ -1177,10 +1236,8 @@ call_decode(struct rpc_task *task) | |||
1177 | task->tk_action = rpc_exit_task; | 1236 | task->tk_action = rpc_exit_task; |
1178 | 1237 | ||
1179 | if (decode) { | 1238 | if (decode) { |
1180 | lock_kernel(); | ||
1181 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, | 1239 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, |
1182 | task->tk_msg.rpc_resp); | 1240 | task->tk_msg.rpc_resp); |
1183 | unlock_kernel(); | ||
1184 | } | 1241 | } |
1185 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, | 1242 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, |
1186 | task->tk_status); | 1243 | task->tk_status); |
@@ -1273,9 +1330,9 @@ call_verify(struct rpc_task *task) | |||
1273 | * - if it isn't pointer subtraction in the NFS client may give | 1330 | * - if it isn't pointer subtraction in the NFS client may give |
1274 | * undefined results | 1331 | * undefined results |
1275 | */ | 1332 | */ |
1276 | printk(KERN_WARNING | 1333 | dprintk("RPC: %5u %s: XDR representation not a multiple of" |
1277 | "call_verify: XDR representation not a multiple of" | 1334 | " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, |
1278 | " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); | 1335 | task->tk_rqstp->rq_rcv_buf.len); |
1279 | goto out_eio; | 1336 | goto out_eio; |
1280 | } | 1337 | } |
1281 | if ((len -= 3) < 0) | 1338 | if ((len -= 3) < 0) |
@@ -1283,7 +1340,8 @@ call_verify(struct rpc_task *task) | |||
1283 | p += 1; /* skip XID */ | 1340 | p += 1; /* skip XID */ |
1284 | 1341 | ||
1285 | if ((n = ntohl(*p++)) != RPC_REPLY) { | 1342 | if ((n = ntohl(*p++)) != RPC_REPLY) { |
1286 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); | 1343 | dprintk("RPC: %5u %s: not an RPC reply: %x\n", |
1344 | task->tk_pid, __FUNCTION__, n); | ||
1287 | goto out_garbage; | 1345 | goto out_garbage; |
1288 | } | 1346 | } |
1289 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { | 1347 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { |
@@ -1334,7 +1392,8 @@ call_verify(struct rpc_task *task) | |||
1334 | "authentication.\n", task->tk_client->cl_server); | 1392 | "authentication.\n", task->tk_client->cl_server); |
1335 | break; | 1393 | break; |
1336 | default: | 1394 | default: |
1337 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); | 1395 | dprintk("RPC: %5u %s: unknown auth error: %x\n", |
1396 | task->tk_pid, __FUNCTION__, n); | ||
1338 | error = -EIO; | 1397 | error = -EIO; |
1339 | } | 1398 | } |
1340 | dprintk("RPC: %5u %s: call rejected %d\n", | 1399 | dprintk("RPC: %5u %s: call rejected %d\n", |
@@ -1342,7 +1401,8 @@ call_verify(struct rpc_task *task) | |||
1342 | goto out_err; | 1401 | goto out_err; |
1343 | } | 1402 | } |
1344 | if (!(p = rpcauth_checkverf(task, p))) { | 1403 | if (!(p = rpcauth_checkverf(task, p))) { |
1345 | printk(KERN_WARNING "call_verify: auth check failed\n"); | 1404 | dprintk("RPC: %5u %s: auth check failed\n", |
1405 | task->tk_pid, __FUNCTION__); | ||
1346 | goto out_garbage; /* bad verifier, retry */ | 1406 | goto out_garbage; /* bad verifier, retry */ |
1347 | } | 1407 | } |
1348 | len = p - (__be32 *)iov->iov_base - 1; | 1408 | len = p - (__be32 *)iov->iov_base - 1; |
@@ -1381,7 +1441,8 @@ call_verify(struct rpc_task *task) | |||
1381 | task->tk_pid, __FUNCTION__); | 1441 | task->tk_pid, __FUNCTION__); |
1382 | break; /* retry */ | 1442 | break; /* retry */ |
1383 | default: | 1443 | default: |
1384 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); | 1444 | dprintk("RPC: %5u %s: server accept status: %x\n", |
1445 | task->tk_pid, __FUNCTION__, n); | ||
1385 | /* Also retry */ | 1446 | /* Also retry */ |
1386 | } | 1447 | } |
1387 | 1448 | ||
@@ -1395,14 +1456,16 @@ out_garbage: | |||
1395 | out_retry: | 1456 | out_retry: |
1396 | return ERR_PTR(-EAGAIN); | 1457 | return ERR_PTR(-EAGAIN); |
1397 | } | 1458 | } |
1398 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); | ||
1399 | out_eio: | 1459 | out_eio: |
1400 | error = -EIO; | 1460 | error = -EIO; |
1401 | out_err: | 1461 | out_err: |
1402 | rpc_exit(task, error); | 1462 | rpc_exit(task, error); |
1463 | dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, | ||
1464 | __FUNCTION__, error); | ||
1403 | return ERR_PTR(error); | 1465 | return ERR_PTR(error); |
1404 | out_overflow: | 1466 | out_overflow: |
1405 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); | 1467 | dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, |
1468 | __FUNCTION__); | ||
1406 | goto out_garbage; | 1469 | goto out_garbage; |
1407 | } | 1470 | } |
1408 | 1471 | ||
@@ -1421,7 +1484,7 @@ static struct rpc_procinfo rpcproc_null = { | |||
1421 | .p_decode = rpcproc_decode_null, | 1484 | .p_decode = rpcproc_decode_null, |
1422 | }; | 1485 | }; |
1423 | 1486 | ||
1424 | int rpc_ping(struct rpc_clnt *clnt, int flags) | 1487 | static int rpc_ping(struct rpc_clnt *clnt, int flags) |
1425 | { | 1488 | { |
1426 | struct rpc_message msg = { | 1489 | struct rpc_message msg = { |
1427 | .rpc_proc = &rpcproc_null, | 1490 | .rpc_proc = &rpcproc_null, |
@@ -1432,3 +1495,51 @@ int rpc_ping(struct rpc_clnt *clnt, int flags) | |||
1432 | put_rpccred(msg.rpc_cred); | 1495 | put_rpccred(msg.rpc_cred); |
1433 | return err; | 1496 | return err; |
1434 | } | 1497 | } |
1498 | |||
1499 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) | ||
1500 | { | ||
1501 | struct rpc_message msg = { | ||
1502 | .rpc_proc = &rpcproc_null, | ||
1503 | .rpc_cred = cred, | ||
1504 | }; | ||
1505 | return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL); | ||
1506 | } | ||
1507 | EXPORT_SYMBOL(rpc_call_null); | ||
1508 | |||
1509 | #ifdef RPC_DEBUG | ||
1510 | void rpc_show_tasks(void) | ||
1511 | { | ||
1512 | struct rpc_clnt *clnt; | ||
1513 | struct rpc_task *t; | ||
1514 | |||
1515 | spin_lock(&rpc_client_lock); | ||
1516 | if (list_empty(&all_clients)) | ||
1517 | goto out; | ||
1518 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | ||
1519 | "-rpcwait -action- ---ops--\n"); | ||
1520 | list_for_each_entry(clnt, &all_clients, cl_clients) { | ||
1521 | if (list_empty(&clnt->cl_tasks)) | ||
1522 | continue; | ||
1523 | spin_lock(&clnt->cl_lock); | ||
1524 | list_for_each_entry(t, &clnt->cl_tasks, tk_task) { | ||
1525 | const char *rpc_waitq = "none"; | ||
1526 | |||
1527 | if (RPC_IS_QUEUED(t)) | ||
1528 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | ||
1529 | |||
1530 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | ||
1531 | t->tk_pid, | ||
1532 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | ||
1533 | t->tk_flags, t->tk_status, | ||
1534 | t->tk_client, | ||
1535 | (t->tk_client ? t->tk_client->cl_prog : 0), | ||
1536 | t->tk_rqstp, t->tk_timeout, | ||
1537 | rpc_waitq, | ||
1538 | t->tk_action, t->tk_ops); | ||
1539 | } | ||
1540 | spin_unlock(&clnt->cl_lock); | ||
1541 | } | ||
1542 | out: | ||
1543 | spin_unlock(&rpc_client_lock); | ||
1544 | } | ||
1545 | #endif | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5887457dc936..e787b6a43eee 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -344,7 +344,7 @@ rpc_info_open(struct inode *inode, struct file *file) | |||
344 | mutex_lock(&inode->i_mutex); | 344 | mutex_lock(&inode->i_mutex); |
345 | clnt = RPC_I(inode)->private; | 345 | clnt = RPC_I(inode)->private; |
346 | if (clnt) { | 346 | if (clnt) { |
347 | atomic_inc(&clnt->cl_users); | 347 | kref_get(&clnt->cl_kref); |
348 | m->private = clnt; | 348 | m->private = clnt; |
349 | } else { | 349 | } else { |
350 | single_release(inode, file); | 350 | single_release(inode, file); |
@@ -448,6 +448,15 @@ void rpc_put_mount(void) | |||
448 | simple_release_fs(&rpc_mount, &rpc_mount_count); | 448 | simple_release_fs(&rpc_mount, &rpc_mount_count); |
449 | } | 449 | } |
450 | 450 | ||
451 | static int rpc_delete_dentry(struct dentry *dentry) | ||
452 | { | ||
453 | return 1; | ||
454 | } | ||
455 | |||
456 | static struct dentry_operations rpc_dentry_operations = { | ||
457 | .d_delete = rpc_delete_dentry, | ||
458 | }; | ||
459 | |||
451 | static int | 460 | static int |
452 | rpc_lookup_parent(char *path, struct nameidata *nd) | 461 | rpc_lookup_parent(char *path, struct nameidata *nd) |
453 | { | 462 | { |
@@ -506,7 +515,7 @@ rpc_get_inode(struct super_block *sb, int mode) | |||
506 | * FIXME: This probably has races. | 515 | * FIXME: This probably has races. |
507 | */ | 516 | */ |
508 | static void | 517 | static void |
509 | rpc_depopulate(struct dentry *parent) | 518 | rpc_depopulate(struct dentry *parent, int start, int eof) |
510 | { | 519 | { |
511 | struct inode *dir = parent->d_inode; | 520 | struct inode *dir = parent->d_inode; |
512 | struct list_head *pos, *next; | 521 | struct list_head *pos, *next; |
@@ -518,6 +527,10 @@ repeat: | |||
518 | spin_lock(&dcache_lock); | 527 | spin_lock(&dcache_lock); |
519 | list_for_each_safe(pos, next, &parent->d_subdirs) { | 528 | list_for_each_safe(pos, next, &parent->d_subdirs) { |
520 | dentry = list_entry(pos, struct dentry, d_u.d_child); | 529 | dentry = list_entry(pos, struct dentry, d_u.d_child); |
530 | if (!dentry->d_inode || | ||
531 | dentry->d_inode->i_ino < start || | ||
532 | dentry->d_inode->i_ino >= eof) | ||
533 | continue; | ||
521 | spin_lock(&dentry->d_lock); | 534 | spin_lock(&dentry->d_lock); |
522 | if (!d_unhashed(dentry)) { | 535 | if (!d_unhashed(dentry)) { |
523 | dget_locked(dentry); | 536 | dget_locked(dentry); |
@@ -533,11 +546,11 @@ repeat: | |||
533 | if (n) { | 546 | if (n) { |
534 | do { | 547 | do { |
535 | dentry = dvec[--n]; | 548 | dentry = dvec[--n]; |
536 | if (dentry->d_inode) { | 549 | if (S_ISREG(dentry->d_inode->i_mode)) |
537 | rpc_close_pipes(dentry->d_inode); | ||
538 | simple_unlink(dir, dentry); | 550 | simple_unlink(dir, dentry); |
539 | } | 551 | else if (S_ISDIR(dentry->d_inode->i_mode)) |
540 | inode_dir_notify(dir, DN_DELETE); | 552 | simple_rmdir(dir, dentry); |
553 | d_delete(dentry); | ||
541 | dput(dentry); | 554 | dput(dentry); |
542 | } while (n); | 555 | } while (n); |
543 | goto repeat; | 556 | goto repeat; |
@@ -560,6 +573,7 @@ rpc_populate(struct dentry *parent, | |||
560 | dentry = d_alloc_name(parent, files[i].name); | 573 | dentry = d_alloc_name(parent, files[i].name); |
561 | if (!dentry) | 574 | if (!dentry) |
562 | goto out_bad; | 575 | goto out_bad; |
576 | dentry->d_op = &rpc_dentry_operations; | ||
563 | mode = files[i].mode; | 577 | mode = files[i].mode; |
564 | inode = rpc_get_inode(dir->i_sb, mode); | 578 | inode = rpc_get_inode(dir->i_sb, mode); |
565 | if (!inode) { | 579 | if (!inode) { |
@@ -607,21 +621,14 @@ static int | |||
607 | __rpc_rmdir(struct inode *dir, struct dentry *dentry) | 621 | __rpc_rmdir(struct inode *dir, struct dentry *dentry) |
608 | { | 622 | { |
609 | int error; | 623 | int error; |
610 | 624 | error = simple_rmdir(dir, dentry); | |
611 | shrink_dcache_parent(dentry); | 625 | if (!error) |
612 | if (d_unhashed(dentry)) | 626 | d_delete(dentry); |
613 | return 0; | 627 | return error; |
614 | if ((error = simple_rmdir(dir, dentry)) != 0) | ||
615 | return error; | ||
616 | if (!error) { | ||
617 | inode_dir_notify(dir, DN_DELETE); | ||
618 | d_drop(dentry); | ||
619 | } | ||
620 | return 0; | ||
621 | } | 628 | } |
622 | 629 | ||
623 | static struct dentry * | 630 | static struct dentry * |
624 | rpc_lookup_create(struct dentry *parent, const char *name, int len) | 631 | rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) |
625 | { | 632 | { |
626 | struct inode *dir = parent->d_inode; | 633 | struct inode *dir = parent->d_inode; |
627 | struct dentry *dentry; | 634 | struct dentry *dentry; |
@@ -630,7 +637,9 @@ rpc_lookup_create(struct dentry *parent, const char *name, int len) | |||
630 | dentry = lookup_one_len(name, parent, len); | 637 | dentry = lookup_one_len(name, parent, len); |
631 | if (IS_ERR(dentry)) | 638 | if (IS_ERR(dentry)) |
632 | goto out_err; | 639 | goto out_err; |
633 | if (dentry->d_inode) { | 640 | if (!dentry->d_inode) |
641 | dentry->d_op = &rpc_dentry_operations; | ||
642 | else if (exclusive) { | ||
634 | dput(dentry); | 643 | dput(dentry); |
635 | dentry = ERR_PTR(-EEXIST); | 644 | dentry = ERR_PTR(-EEXIST); |
636 | goto out_err; | 645 | goto out_err; |
@@ -649,7 +658,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
649 | 658 | ||
650 | if ((error = rpc_lookup_parent(path, nd)) != 0) | 659 | if ((error = rpc_lookup_parent(path, nd)) != 0) |
651 | return ERR_PTR(error); | 660 | return ERR_PTR(error); |
652 | dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); | 661 | dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1); |
653 | if (IS_ERR(dentry)) | 662 | if (IS_ERR(dentry)) |
654 | rpc_release_path(nd); | 663 | rpc_release_path(nd); |
655 | return dentry; | 664 | return dentry; |
@@ -681,7 +690,7 @@ out: | |||
681 | rpc_release_path(&nd); | 690 | rpc_release_path(&nd); |
682 | return dentry; | 691 | return dentry; |
683 | err_depopulate: | 692 | err_depopulate: |
684 | rpc_depopulate(dentry); | 693 | rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); |
685 | __rpc_rmdir(dir, dentry); | 694 | __rpc_rmdir(dir, dentry); |
686 | err_dput: | 695 | err_dput: |
687 | dput(dentry); | 696 | dput(dentry); |
@@ -701,7 +710,7 @@ rpc_rmdir(struct dentry *dentry) | |||
701 | parent = dget_parent(dentry); | 710 | parent = dget_parent(dentry); |
702 | dir = parent->d_inode; | 711 | dir = parent->d_inode; |
703 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); | 712 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
704 | rpc_depopulate(dentry); | 713 | rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); |
705 | error = __rpc_rmdir(dir, dentry); | 714 | error = __rpc_rmdir(dir, dentry); |
706 | dput(dentry); | 715 | dput(dentry); |
707 | mutex_unlock(&dir->i_mutex); | 716 | mutex_unlock(&dir->i_mutex); |
@@ -716,10 +725,21 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi | |||
716 | struct inode *dir, *inode; | 725 | struct inode *dir, *inode; |
717 | struct rpc_inode *rpci; | 726 | struct rpc_inode *rpci; |
718 | 727 | ||
719 | dentry = rpc_lookup_create(parent, name, strlen(name)); | 728 | dentry = rpc_lookup_create(parent, name, strlen(name), 0); |
720 | if (IS_ERR(dentry)) | 729 | if (IS_ERR(dentry)) |
721 | return dentry; | 730 | return dentry; |
722 | dir = parent->d_inode; | 731 | dir = parent->d_inode; |
732 | if (dentry->d_inode) { | ||
733 | rpci = RPC_I(dentry->d_inode); | ||
734 | if (rpci->private != private || | ||
735 | rpci->ops != ops || | ||
736 | rpci->flags != flags) { | ||
737 | dput (dentry); | ||
738 | dentry = ERR_PTR(-EBUSY); | ||
739 | } | ||
740 | rpci->nkern_readwriters++; | ||
741 | goto out; | ||
742 | } | ||
723 | inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); | 743 | inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); |
724 | if (!inode) | 744 | if (!inode) |
725 | goto err_dput; | 745 | goto err_dput; |
@@ -730,6 +750,7 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi | |||
730 | rpci->private = private; | 750 | rpci->private = private; |
731 | rpci->flags = flags; | 751 | rpci->flags = flags; |
732 | rpci->ops = ops; | 752 | rpci->ops = ops; |
753 | rpci->nkern_readwriters = 1; | ||
733 | inode_dir_notify(dir, DN_CREATE); | 754 | inode_dir_notify(dir, DN_CREATE); |
734 | dget(dentry); | 755 | dget(dentry); |
735 | out: | 756 | out: |
@@ -754,13 +775,11 @@ rpc_unlink(struct dentry *dentry) | |||
754 | parent = dget_parent(dentry); | 775 | parent = dget_parent(dentry); |
755 | dir = parent->d_inode; | 776 | dir = parent->d_inode; |
756 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); | 777 | mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); |
757 | if (!d_unhashed(dentry)) { | 778 | if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { |
758 | d_drop(dentry); | 779 | rpc_close_pipes(dentry->d_inode); |
759 | if (dentry->d_inode) { | 780 | error = simple_unlink(dir, dentry); |
760 | rpc_close_pipes(dentry->d_inode); | 781 | if (!error) |
761 | error = simple_unlink(dir, dentry); | 782 | d_delete(dentry); |
762 | } | ||
763 | inode_dir_notify(dir, DN_DELETE); | ||
764 | } | 783 | } |
765 | dput(dentry); | 784 | dput(dentry); |
766 | mutex_unlock(&dir->i_mutex); | 785 | mutex_unlock(&dir->i_mutex); |
@@ -833,6 +852,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) | |||
833 | rpci->nreaders = 0; | 852 | rpci->nreaders = 0; |
834 | rpci->nwriters = 0; | 853 | rpci->nwriters = 0; |
835 | INIT_LIST_HEAD(&rpci->in_upcall); | 854 | INIT_LIST_HEAD(&rpci->in_upcall); |
855 | INIT_LIST_HEAD(&rpci->in_downcall); | ||
836 | INIT_LIST_HEAD(&rpci->pipe); | 856 | INIT_LIST_HEAD(&rpci->pipe); |
837 | rpci->pipelen = 0; | 857 | rpci->pipelen = 0; |
838 | init_waitqueue_head(&rpci->waitq); | 858 | init_waitqueue_head(&rpci->waitq); |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 6c7aa8a1f0c6..d1740dbab991 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | 12 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | ||
16 | |||
15 | #include <linux/types.h> | 17 | #include <linux/types.h> |
16 | #include <linux/socket.h> | 18 | #include <linux/socket.h> |
17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
@@ -184,8 +186,8 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, | |||
184 | .program = &rpcb_program, | 186 | .program = &rpcb_program, |
185 | .version = version, | 187 | .version = version, |
186 | .authflavor = RPC_AUTH_UNIX, | 188 | .authflavor = RPC_AUTH_UNIX, |
187 | .flags = (RPC_CLNT_CREATE_ONESHOT | | 189 | .flags = (RPC_CLNT_CREATE_NOPING | |
188 | RPC_CLNT_CREATE_NOPING), | 190 | RPC_CLNT_CREATE_INTR), |
189 | }; | 191 | }; |
190 | 192 | ||
191 | ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); | 193 | ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); |
@@ -238,6 +240,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
238 | 240 | ||
239 | error = rpc_call_sync(rpcb_clnt, &msg, 0); | 241 | error = rpc_call_sync(rpcb_clnt, &msg, 0); |
240 | 242 | ||
243 | rpc_shutdown_client(rpcb_clnt); | ||
241 | if (error < 0) | 244 | if (error < 0) |
242 | printk(KERN_WARNING "RPC: failed to contact local rpcbind " | 245 | printk(KERN_WARNING "RPC: failed to contact local rpcbind " |
243 | "server (errno %d).\n", -error); | 246 | "server (errno %d).\n", -error); |
@@ -246,21 +249,20 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
246 | return error; | 249 | return error; |
247 | } | 250 | } |
248 | 251 | ||
249 | #ifdef CONFIG_ROOT_NFS | ||
250 | /** | 252 | /** |
251 | * rpcb_getport_external - obtain the port for an RPC service on a given host | 253 | * rpcb_getport_sync - obtain the port for an RPC service on a given host |
252 | * @sin: address of remote peer | 254 | * @sin: address of remote peer |
253 | * @prog: RPC program number to bind | 255 | * @prog: RPC program number to bind |
254 | * @vers: RPC version number to bind | 256 | * @vers: RPC version number to bind |
255 | * @prot: transport protocol to use to make this request | 257 | * @prot: transport protocol to use to make this request |
256 | * | 258 | * |
257 | * Called from outside the RPC client in a synchronous task context. | 259 | * Called from outside the RPC client in a synchronous task context. |
260 | * Uses default timeout parameters specified by underlying transport. | ||
258 | * | 261 | * |
259 | * For now, this supports only version 2 queries, but is used only by | 262 | * XXX: Needs to support IPv6, and rpcbind versions 3 and 4 |
260 | * mount_clnt for NFS_ROOT. | ||
261 | */ | 263 | */ |
262 | int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | 264 | int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog, |
263 | __u32 vers, int prot) | 265 | __u32 vers, int prot) |
264 | { | 266 | { |
265 | struct rpcbind_args map = { | 267 | struct rpcbind_args map = { |
266 | .r_prog = prog, | 268 | .r_prog = prog, |
@@ -277,15 +279,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | |||
277 | char hostname[40]; | 279 | char hostname[40]; |
278 | int status; | 280 | int status; |
279 | 281 | ||
280 | dprintk("RPC: rpcb_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", | 282 | dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", |
281 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 283 | __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
282 | 284 | ||
283 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 285 | sprintf(hostname, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); |
284 | rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); | 286 | rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); |
285 | if (IS_ERR(rpcb_clnt)) | 287 | if (IS_ERR(rpcb_clnt)) |
286 | return PTR_ERR(rpcb_clnt); | 288 | return PTR_ERR(rpcb_clnt); |
287 | 289 | ||
288 | status = rpc_call_sync(rpcb_clnt, &msg, 0); | 290 | status = rpc_call_sync(rpcb_clnt, &msg, 0); |
291 | rpc_shutdown_client(rpcb_clnt); | ||
289 | 292 | ||
290 | if (status >= 0) { | 293 | if (status >= 0) { |
291 | if (map.r_port != 0) | 294 | if (map.r_port != 0) |
@@ -294,16 +297,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, | |||
294 | } | 297 | } |
295 | return status; | 298 | return status; |
296 | } | 299 | } |
297 | #endif | 300 | EXPORT_SYMBOL_GPL(rpcb_getport_sync); |
298 | 301 | ||
299 | /** | 302 | /** |
300 | * rpcb_getport - obtain the port for a given RPC service on a given host | 303 | * rpcb_getport_async - obtain the port for a given RPC service on a given host |
301 | * @task: task that is waiting for portmapper request | 304 | * @task: task that is waiting for portmapper request |
302 | * | 305 | * |
303 | * This one can be called for an ongoing RPC request, and can be used in | 306 | * This one can be called for an ongoing RPC request, and can be used in |
304 | * an async (rpciod) context. | 307 | * an async (rpciod) context. |
305 | */ | 308 | */ |
306 | void rpcb_getport(struct rpc_task *task) | 309 | void rpcb_getport_async(struct rpc_task *task) |
307 | { | 310 | { |
308 | struct rpc_clnt *clnt = task->tk_client; | 311 | struct rpc_clnt *clnt = task->tk_client; |
309 | int bind_version; | 312 | int bind_version; |
@@ -314,17 +317,17 @@ void rpcb_getport(struct rpc_task *task) | |||
314 | struct sockaddr addr; | 317 | struct sockaddr addr; |
315 | int status; | 318 | int status; |
316 | 319 | ||
317 | dprintk("RPC: %5u rpcb_getport(%s, %u, %u, %d)\n", | 320 | dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", |
318 | task->tk_pid, clnt->cl_server, | 321 | task->tk_pid, __FUNCTION__, |
319 | clnt->cl_prog, clnt->cl_vers, xprt->prot); | 322 | clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); |
320 | 323 | ||
321 | /* Autobind on cloned rpc clients is discouraged */ | 324 | /* Autobind on cloned rpc clients is discouraged */ |
322 | BUG_ON(clnt->cl_parent != clnt); | 325 | BUG_ON(clnt->cl_parent != clnt); |
323 | 326 | ||
324 | if (xprt_test_and_set_binding(xprt)) { | 327 | if (xprt_test_and_set_binding(xprt)) { |
325 | status = -EACCES; /* tell caller to check again */ | 328 | status = -EACCES; /* tell caller to check again */ |
326 | dprintk("RPC: %5u rpcb_getport waiting for another binder\n", | 329 | dprintk("RPC: %5u %s: waiting for another binder\n", |
327 | task->tk_pid); | 330 | task->tk_pid, __FUNCTION__); |
328 | goto bailout_nowake; | 331 | goto bailout_nowake; |
329 | } | 332 | } |
330 | 333 | ||
@@ -335,27 +338,28 @@ void rpcb_getport(struct rpc_task *task) | |||
335 | /* Someone else may have bound if we slept */ | 338 | /* Someone else may have bound if we slept */ |
336 | if (xprt_bound(xprt)) { | 339 | if (xprt_bound(xprt)) { |
337 | status = 0; | 340 | status = 0; |
338 | dprintk("RPC: %5u rpcb_getport already bound\n", task->tk_pid); | 341 | dprintk("RPC: %5u %s: already bound\n", |
342 | task->tk_pid, __FUNCTION__); | ||
339 | goto bailout_nofree; | 343 | goto bailout_nofree; |
340 | } | 344 | } |
341 | 345 | ||
342 | if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { | 346 | if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { |
343 | xprt->bind_index = 0; | 347 | xprt->bind_index = 0; |
344 | status = -EACCES; /* tell caller to try again later */ | 348 | status = -EACCES; /* tell caller to try again later */ |
345 | dprintk("RPC: %5u rpcb_getport no more getport versions " | 349 | dprintk("RPC: %5u %s: no more getport versions available\n", |
346 | "available\n", task->tk_pid); | 350 | task->tk_pid, __FUNCTION__); |
347 | goto bailout_nofree; | 351 | goto bailout_nofree; |
348 | } | 352 | } |
349 | bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; | 353 | bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; |
350 | 354 | ||
351 | dprintk("RPC: %5u rpcb_getport trying rpcbind version %u\n", | 355 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", |
352 | task->tk_pid, bind_version); | 356 | task->tk_pid, __FUNCTION__, bind_version); |
353 | 357 | ||
354 | map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); | 358 | map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); |
355 | if (!map) { | 359 | if (!map) { |
356 | status = -ENOMEM; | 360 | status = -ENOMEM; |
357 | dprintk("RPC: %5u rpcb_getport no memory available\n", | 361 | dprintk("RPC: %5u %s: no memory available\n", |
358 | task->tk_pid); | 362 | task->tk_pid, __FUNCTION__); |
359 | goto bailout_nofree; | 363 | goto bailout_nofree; |
360 | } | 364 | } |
361 | map->r_prog = clnt->cl_prog; | 365 | map->r_prog = clnt->cl_prog; |
@@ -373,16 +377,17 @@ void rpcb_getport(struct rpc_task *task) | |||
373 | rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); | 377 | rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); |
374 | if (IS_ERR(rpcb_clnt)) { | 378 | if (IS_ERR(rpcb_clnt)) { |
375 | status = PTR_ERR(rpcb_clnt); | 379 | status = PTR_ERR(rpcb_clnt); |
376 | dprintk("RPC: %5u rpcb_getport rpcb_create failed, error %ld\n", | 380 | dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", |
377 | task->tk_pid, PTR_ERR(rpcb_clnt)); | 381 | task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); |
378 | goto bailout; | 382 | goto bailout; |
379 | } | 383 | } |
380 | 384 | ||
381 | child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); | 385 | child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); |
386 | rpc_release_client(rpcb_clnt); | ||
382 | if (IS_ERR(child)) { | 387 | if (IS_ERR(child)) { |
383 | status = -EIO; | 388 | status = -EIO; |
384 | dprintk("RPC: %5u rpcb_getport rpc_run_task failed\n", | 389 | dprintk("RPC: %5u %s: rpc_run_task failed\n", |
385 | task->tk_pid); | 390 | task->tk_pid, __FUNCTION__); |
386 | goto bailout_nofree; | 391 | goto bailout_nofree; |
387 | } | 392 | } |
388 | rpc_put_task(child); | 393 | rpc_put_task(child); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 944d75396fb3..2ac43c41c3a9 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifdef RPC_DEBUG | 25 | #ifdef RPC_DEBUG |
26 | #define RPCDBG_FACILITY RPCDBG_SCHED | 26 | #define RPCDBG_FACILITY RPCDBG_SCHED |
27 | #define RPC_TASK_MAGIC_ID 0xf00baa | 27 | #define RPC_TASK_MAGIC_ID 0xf00baa |
28 | static int rpc_task_id; | ||
29 | #endif | 28 | #endif |
30 | 29 | ||
31 | /* | 30 | /* |
@@ -40,7 +39,6 @@ static mempool_t *rpc_task_mempool __read_mostly; | |||
40 | static mempool_t *rpc_buffer_mempool __read_mostly; | 39 | static mempool_t *rpc_buffer_mempool __read_mostly; |
41 | 40 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 41 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | ||
44 | static void rpc_async_schedule(struct work_struct *); | 42 | static void rpc_async_schedule(struct work_struct *); |
45 | static void rpc_release_task(struct rpc_task *task); | 43 | static void rpc_release_task(struct rpc_task *task); |
46 | 44 | ||
@@ -50,23 +48,13 @@ static void rpc_release_task(struct rpc_task *task); | |||
50 | static RPC_WAITQ(delay_queue, "delayq"); | 48 | static RPC_WAITQ(delay_queue, "delayq"); |
51 | 49 | ||
52 | /* | 50 | /* |
53 | * All RPC tasks are linked into this list | ||
54 | */ | ||
55 | static LIST_HEAD(all_tasks); | ||
56 | |||
57 | /* | ||
58 | * rpciod-related stuff | 51 | * rpciod-related stuff |
59 | */ | 52 | */ |
60 | static DEFINE_MUTEX(rpciod_mutex); | 53 | static DEFINE_MUTEX(rpciod_mutex); |
61 | static unsigned int rpciod_users; | 54 | static atomic_t rpciod_users = ATOMIC_INIT(0); |
62 | struct workqueue_struct *rpciod_workqueue; | 55 | struct workqueue_struct *rpciod_workqueue; |
63 | 56 | ||
64 | /* | 57 | /* |
65 | * Spinlock for other critical sections of code. | ||
66 | */ | ||
67 | static DEFINE_SPINLOCK(rpc_sched_lock); | ||
68 | |||
69 | /* | ||
70 | * Disable the timer for a given RPC task. Should be called with | 58 | * Disable the timer for a given RPC task. Should be called with |
71 | * queue->lock and bh_disabled in order to avoid races within | 59 | * queue->lock and bh_disabled in order to avoid races within |
72 | * rpc_run_timer(). | 60 | * rpc_run_timer(). |
@@ -267,18 +255,33 @@ static int rpc_wait_bit_interruptible(void *word) | |||
267 | return 0; | 255 | return 0; |
268 | } | 256 | } |
269 | 257 | ||
258 | #ifdef RPC_DEBUG | ||
259 | static void rpc_task_set_debuginfo(struct rpc_task *task) | ||
260 | { | ||
261 | static atomic_t rpc_pid; | ||
262 | |||
263 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
264 | task->tk_pid = atomic_inc_return(&rpc_pid); | ||
265 | } | ||
266 | #else | ||
267 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | ||
268 | { | ||
269 | } | ||
270 | #endif | ||
271 | |||
270 | static void rpc_set_active(struct rpc_task *task) | 272 | static void rpc_set_active(struct rpc_task *task) |
271 | { | 273 | { |
274 | struct rpc_clnt *clnt; | ||
272 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | 275 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
273 | return; | 276 | return; |
274 | spin_lock(&rpc_sched_lock); | 277 | rpc_task_set_debuginfo(task); |
275 | #ifdef RPC_DEBUG | ||
276 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
277 | task->tk_pid = rpc_task_id++; | ||
278 | #endif | ||
279 | /* Add to global list of all tasks */ | 278 | /* Add to global list of all tasks */ |
280 | list_add_tail(&task->tk_task, &all_tasks); | 279 | clnt = task->tk_client; |
281 | spin_unlock(&rpc_sched_lock); | 280 | if (clnt != NULL) { |
281 | spin_lock(&clnt->cl_lock); | ||
282 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | ||
283 | spin_unlock(&clnt->cl_lock); | ||
284 | } | ||
282 | } | 285 | } |
283 | 286 | ||
284 | /* | 287 | /* |
@@ -818,6 +821,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
818 | if (tk_ops->rpc_call_prepare != NULL) | 821 | if (tk_ops->rpc_call_prepare != NULL) |
819 | task->tk_action = rpc_prepare_task; | 822 | task->tk_action = rpc_prepare_task; |
820 | task->tk_calldata = calldata; | 823 | task->tk_calldata = calldata; |
824 | INIT_LIST_HEAD(&task->tk_task); | ||
821 | 825 | ||
822 | /* Initialize retry counters */ | 826 | /* Initialize retry counters */ |
823 | task->tk_garb_retry = 2; | 827 | task->tk_garb_retry = 2; |
@@ -830,7 +834,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
830 | task->tk_workqueue = rpciod_workqueue; | 834 | task->tk_workqueue = rpciod_workqueue; |
831 | 835 | ||
832 | if (clnt) { | 836 | if (clnt) { |
833 | atomic_inc(&clnt->cl_users); | 837 | kref_get(&clnt->cl_kref); |
834 | if (clnt->cl_softrtry) | 838 | if (clnt->cl_softrtry) |
835 | task->tk_flags |= RPC_TASK_SOFT; | 839 | task->tk_flags |= RPC_TASK_SOFT; |
836 | if (!clnt->cl_intr) | 840 | if (!clnt->cl_intr) |
@@ -860,9 +864,7 @@ static void rpc_free_task(struct rcu_head *rcu) | |||
860 | } | 864 | } |
861 | 865 | ||
862 | /* | 866 | /* |
863 | * Create a new task for the specified client. We have to | 867 | * Create a new task for the specified client. |
864 | * clean up after an allocation failure, as the client may | ||
865 | * have specified "oneshot". | ||
866 | */ | 868 | */ |
867 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) | 869 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
868 | { | 870 | { |
@@ -870,7 +872,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
870 | 872 | ||
871 | task = rpc_alloc_task(); | 873 | task = rpc_alloc_task(); |
872 | if (!task) | 874 | if (!task) |
873 | goto cleanup; | 875 | goto out; |
874 | 876 | ||
875 | rpc_init_task(task, clnt, flags, tk_ops, calldata); | 877 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
876 | 878 | ||
@@ -878,16 +880,6 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
878 | task->tk_flags |= RPC_TASK_DYNAMIC; | 880 | task->tk_flags |= RPC_TASK_DYNAMIC; |
879 | out: | 881 | out: |
880 | return task; | 882 | return task; |
881 | |||
882 | cleanup: | ||
883 | /* Check whether to release the client */ | ||
884 | if (clnt) { | ||
885 | printk("rpc_new_task: failed, users=%d, oneshot=%d\n", | ||
886 | atomic_read(&clnt->cl_users), clnt->cl_oneshot); | ||
887 | atomic_inc(&clnt->cl_users); /* pretend we were used ... */ | ||
888 | rpc_release_client(clnt); | ||
889 | } | ||
890 | goto out; | ||
891 | } | 883 | } |
892 | 884 | ||
893 | 885 | ||
@@ -920,11 +912,13 @@ static void rpc_release_task(struct rpc_task *task) | |||
920 | #endif | 912 | #endif |
921 | dprintk("RPC: %5u release task\n", task->tk_pid); | 913 | dprintk("RPC: %5u release task\n", task->tk_pid); |
922 | 914 | ||
923 | /* Remove from global task list */ | 915 | if (!list_empty(&task->tk_task)) { |
924 | spin_lock(&rpc_sched_lock); | 916 | struct rpc_clnt *clnt = task->tk_client; |
925 | list_del(&task->tk_task); | 917 | /* Remove from client task list */ |
926 | spin_unlock(&rpc_sched_lock); | 918 | spin_lock(&clnt->cl_lock); |
927 | 919 | list_del(&task->tk_task); | |
920 | spin_unlock(&clnt->cl_lock); | ||
921 | } | ||
928 | BUG_ON (RPC_IS_QUEUED(task)); | 922 | BUG_ON (RPC_IS_QUEUED(task)); |
929 | 923 | ||
930 | /* Synchronously delete any running timer */ | 924 | /* Synchronously delete any running timer */ |
@@ -939,29 +933,6 @@ static void rpc_release_task(struct rpc_task *task) | |||
939 | rpc_put_task(task); | 933 | rpc_put_task(task); |
940 | } | 934 | } |
941 | 935 | ||
942 | /** | ||
943 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | ||
944 | * @clnt: pointer to RPC client | ||
945 | * @flags: RPC flags | ||
946 | * @ops: RPC call ops | ||
947 | * @data: user call data | ||
948 | */ | ||
949 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
950 | const struct rpc_call_ops *ops, | ||
951 | void *data) | ||
952 | { | ||
953 | struct rpc_task *task; | ||
954 | task = rpc_new_task(clnt, flags, ops, data); | ||
955 | if (task == NULL) { | ||
956 | rpc_release_calldata(ops, data); | ||
957 | return ERR_PTR(-ENOMEM); | ||
958 | } | ||
959 | atomic_inc(&task->tk_count); | ||
960 | rpc_execute(task); | ||
961 | return task; | ||
962 | } | ||
963 | EXPORT_SYMBOL(rpc_run_task); | ||
964 | |||
965 | /* | 936 | /* |
966 | * Kill all tasks for the given client. | 937 | * Kill all tasks for the given client. |
967 | * XXX: kill their descendants as well? | 938 | * XXX: kill their descendants as well? |
@@ -969,44 +940,25 @@ EXPORT_SYMBOL(rpc_run_task); | |||
969 | void rpc_killall_tasks(struct rpc_clnt *clnt) | 940 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
970 | { | 941 | { |
971 | struct rpc_task *rovr; | 942 | struct rpc_task *rovr; |
972 | struct list_head *le; | ||
973 | 943 | ||
974 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
975 | 944 | ||
945 | if (list_empty(&clnt->cl_tasks)) | ||
946 | return; | ||
947 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
976 | /* | 948 | /* |
977 | * Spin lock all_tasks to prevent changes... | 949 | * Spin lock all_tasks to prevent changes... |
978 | */ | 950 | */ |
979 | spin_lock(&rpc_sched_lock); | 951 | spin_lock(&clnt->cl_lock); |
980 | alltask_for_each(rovr, le, &all_tasks) { | 952 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { |
981 | if (! RPC_IS_ACTIVATED(rovr)) | 953 | if (! RPC_IS_ACTIVATED(rovr)) |
982 | continue; | 954 | continue; |
983 | if (!clnt || rovr->tk_client == clnt) { | 955 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
984 | rovr->tk_flags |= RPC_TASK_KILLED; | 956 | rovr->tk_flags |= RPC_TASK_KILLED; |
985 | rpc_exit(rovr, -EIO); | 957 | rpc_exit(rovr, -EIO); |
986 | rpc_wake_up_task(rovr); | 958 | rpc_wake_up_task(rovr); |
987 | } | 959 | } |
988 | } | 960 | } |
989 | spin_unlock(&rpc_sched_lock); | 961 | spin_unlock(&clnt->cl_lock); |
990 | } | ||
991 | |||
992 | static void rpciod_killall(void) | ||
993 | { | ||
994 | unsigned long flags; | ||
995 | |||
996 | while (!list_empty(&all_tasks)) { | ||
997 | clear_thread_flag(TIF_SIGPENDING); | ||
998 | rpc_killall_tasks(NULL); | ||
999 | flush_workqueue(rpciod_workqueue); | ||
1000 | if (!list_empty(&all_tasks)) { | ||
1001 | dprintk("RPC: rpciod_killall: waiting for tasks " | ||
1002 | "to exit\n"); | ||
1003 | yield(); | ||
1004 | } | ||
1005 | } | ||
1006 | |||
1007 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
1008 | recalc_sigpending(); | ||
1009 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
1010 | } | 962 | } |
1011 | 963 | ||
1012 | /* | 964 | /* |
@@ -1018,28 +970,27 @@ rpciod_up(void) | |||
1018 | struct workqueue_struct *wq; | 970 | struct workqueue_struct *wq; |
1019 | int error = 0; | 971 | int error = 0; |
1020 | 972 | ||
973 | if (atomic_inc_not_zero(&rpciod_users)) | ||
974 | return 0; | ||
975 | |||
1021 | mutex_lock(&rpciod_mutex); | 976 | mutex_lock(&rpciod_mutex); |
1022 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); | 977 | |
1023 | rpciod_users++; | 978 | /* Guard against races with rpciod_down() */ |
1024 | if (rpciod_workqueue) | 979 | if (rpciod_workqueue != NULL) |
1025 | goto out; | 980 | goto out_ok; |
1026 | /* | ||
1027 | * If there's no pid, we should be the first user. | ||
1028 | */ | ||
1029 | if (rpciod_users > 1) | ||
1030 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); | ||
1031 | /* | 981 | /* |
1032 | * Create the rpciod thread and wait for it to start. | 982 | * Create the rpciod thread and wait for it to start. |
1033 | */ | 983 | */ |
984 | dprintk("RPC: creating workqueue rpciod\n"); | ||
1034 | error = -ENOMEM; | 985 | error = -ENOMEM; |
1035 | wq = create_workqueue("rpciod"); | 986 | wq = create_workqueue("rpciod"); |
1036 | if (wq == NULL) { | 987 | if (wq == NULL) |
1037 | printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); | ||
1038 | rpciod_users--; | ||
1039 | goto out; | 988 | goto out; |
1040 | } | 989 | |
1041 | rpciod_workqueue = wq; | 990 | rpciod_workqueue = wq; |
1042 | error = 0; | 991 | error = 0; |
992 | out_ok: | ||
993 | atomic_inc(&rpciod_users); | ||
1043 | out: | 994 | out: |
1044 | mutex_unlock(&rpciod_mutex); | 995 | mutex_unlock(&rpciod_mutex); |
1045 | return error; | 996 | return error; |
@@ -1048,59 +999,19 @@ out: | |||
1048 | void | 999 | void |
1049 | rpciod_down(void) | 1000 | rpciod_down(void) |
1050 | { | 1001 | { |
1002 | if (!atomic_dec_and_test(&rpciod_users)) | ||
1003 | return; | ||
1004 | |||
1051 | mutex_lock(&rpciod_mutex); | 1005 | mutex_lock(&rpciod_mutex); |
1052 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); | 1006 | dprintk("RPC: destroying workqueue rpciod\n"); |
1053 | if (rpciod_users) { | ||
1054 | if (--rpciod_users) | ||
1055 | goto out; | ||
1056 | } else | ||
1057 | printk(KERN_WARNING "rpciod_down: no users??\n"); | ||
1058 | 1007 | ||
1059 | if (!rpciod_workqueue) { | 1008 | if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { |
1060 | dprintk("RPC: rpciod_down: Nothing to do!\n"); | 1009 | destroy_workqueue(rpciod_workqueue); |
1061 | goto out; | 1010 | rpciod_workqueue = NULL; |
1062 | } | 1011 | } |
1063 | rpciod_killall(); | ||
1064 | |||
1065 | destroy_workqueue(rpciod_workqueue); | ||
1066 | rpciod_workqueue = NULL; | ||
1067 | out: | ||
1068 | mutex_unlock(&rpciod_mutex); | 1012 | mutex_unlock(&rpciod_mutex); |
1069 | } | 1013 | } |
1070 | 1014 | ||
1071 | #ifdef RPC_DEBUG | ||
1072 | void rpc_show_tasks(void) | ||
1073 | { | ||
1074 | struct list_head *le; | ||
1075 | struct rpc_task *t; | ||
1076 | |||
1077 | spin_lock(&rpc_sched_lock); | ||
1078 | if (list_empty(&all_tasks)) { | ||
1079 | spin_unlock(&rpc_sched_lock); | ||
1080 | return; | ||
1081 | } | ||
1082 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | ||
1083 | "-rpcwait -action- ---ops--\n"); | ||
1084 | alltask_for_each(t, le, &all_tasks) { | ||
1085 | const char *rpc_waitq = "none"; | ||
1086 | |||
1087 | if (RPC_IS_QUEUED(t)) | ||
1088 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | ||
1089 | |||
1090 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | ||
1091 | t->tk_pid, | ||
1092 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | ||
1093 | t->tk_flags, t->tk_status, | ||
1094 | t->tk_client, | ||
1095 | (t->tk_client ? t->tk_client->cl_prog : 0), | ||
1096 | t->tk_rqstp, t->tk_timeout, | ||
1097 | rpc_waitq, | ||
1098 | t->tk_action, t->tk_ops); | ||
1099 | } | ||
1100 | spin_unlock(&rpc_sched_lock); | ||
1101 | } | ||
1102 | #endif | ||
1103 | |||
1104 | void | 1015 | void |
1105 | rpc_destroy_mempool(void) | 1016 | rpc_destroy_mempool(void) |
1106 | { | 1017 | { |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 73075dec83c0..384c4ad5ab86 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -28,15 +28,11 @@ EXPORT_SYMBOL(rpc_init_task); | |||
28 | EXPORT_SYMBOL(rpc_sleep_on); | 28 | EXPORT_SYMBOL(rpc_sleep_on); |
29 | EXPORT_SYMBOL(rpc_wake_up_next); | 29 | EXPORT_SYMBOL(rpc_wake_up_next); |
30 | EXPORT_SYMBOL(rpc_wake_up_task); | 30 | EXPORT_SYMBOL(rpc_wake_up_task); |
31 | EXPORT_SYMBOL(rpciod_down); | ||
32 | EXPORT_SYMBOL(rpciod_up); | ||
33 | EXPORT_SYMBOL(rpc_new_task); | ||
34 | EXPORT_SYMBOL(rpc_wake_up_status); | 31 | EXPORT_SYMBOL(rpc_wake_up_status); |
35 | 32 | ||
36 | /* RPC client functions */ | 33 | /* RPC client functions */ |
37 | EXPORT_SYMBOL(rpc_clone_client); | 34 | EXPORT_SYMBOL(rpc_clone_client); |
38 | EXPORT_SYMBOL(rpc_bind_new_program); | 35 | EXPORT_SYMBOL(rpc_bind_new_program); |
39 | EXPORT_SYMBOL(rpc_destroy_client); | ||
40 | EXPORT_SYMBOL(rpc_shutdown_client); | 36 | EXPORT_SYMBOL(rpc_shutdown_client); |
41 | EXPORT_SYMBOL(rpc_killall_tasks); | 37 | EXPORT_SYMBOL(rpc_killall_tasks); |
42 | EXPORT_SYMBOL(rpc_call_sync); | 38 | EXPORT_SYMBOL(rpc_call_sync); |
@@ -61,7 +57,7 @@ EXPORT_SYMBOL(rpcauth_unregister); | |||
61 | EXPORT_SYMBOL(rpcauth_create); | 57 | EXPORT_SYMBOL(rpcauth_create); |
62 | EXPORT_SYMBOL(rpcauth_lookupcred); | 58 | EXPORT_SYMBOL(rpcauth_lookupcred); |
63 | EXPORT_SYMBOL(rpcauth_lookup_credcache); | 59 | EXPORT_SYMBOL(rpcauth_lookup_credcache); |
64 | EXPORT_SYMBOL(rpcauth_free_credcache); | 60 | EXPORT_SYMBOL(rpcauth_destroy_credcache); |
65 | EXPORT_SYMBOL(rpcauth_init_credcache); | 61 | EXPORT_SYMBOL(rpcauth_init_credcache); |
66 | EXPORT_SYMBOL(put_rpccred); | 62 | EXPORT_SYMBOL(put_rpccred); |
67 | 63 | ||
@@ -156,6 +152,7 @@ init_sunrpc(void) | |||
156 | cache_register(&ip_map_cache); | 152 | cache_register(&ip_map_cache); |
157 | cache_register(&unix_gid_cache); | 153 | cache_register(&unix_gid_cache); |
158 | init_socket_xprt(); | 154 | init_socket_xprt(); |
155 | rpcauth_init_module(); | ||
159 | out: | 156 | out: |
160 | return err; | 157 | return err; |
161 | } | 158 | } |
@@ -163,6 +160,7 @@ out: | |||
163 | static void __exit | 160 | static void __exit |
164 | cleanup_sunrpc(void) | 161 | cleanup_sunrpc(void) |
165 | { | 162 | { |
163 | rpcauth_remove_module(); | ||
166 | cleanup_socket_xprt(); | 164 | cleanup_socket_xprt(); |
167 | unregister_rpc_pipefs(); | 165 | unregister_rpc_pipefs(); |
168 | rpc_destroy_mempool(); | 166 | rpc_destroy_mempool(); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5baf48de2558..64b9b8c743c4 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -644,6 +644,7 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |||
644 | struct msghdr msg = { | 644 | struct msghdr msg = { |
645 | .msg_flags = MSG_DONTWAIT, | 645 | .msg_flags = MSG_DONTWAIT, |
646 | }; | 646 | }; |
647 | struct sockaddr *sin; | ||
647 | int len; | 648 | int len; |
648 | 649 | ||
649 | len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, | 650 | len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, |
@@ -654,6 +655,19 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |||
654 | memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); | 655 | memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); |
655 | rqstp->rq_addrlen = svsk->sk_remotelen; | 656 | rqstp->rq_addrlen = svsk->sk_remotelen; |
656 | 657 | ||
658 | /* Destination address in request is needed for binding the | ||
659 | * source address in RPC callbacks later. | ||
660 | */ | ||
661 | sin = (struct sockaddr *)&svsk->sk_local; | ||
662 | switch (sin->sa_family) { | ||
663 | case AF_INET: | ||
664 | rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; | ||
665 | break; | ||
666 | case AF_INET6: | ||
667 | rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; | ||
668 | break; | ||
669 | } | ||
670 | |||
657 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", | 671 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", |
658 | svsk, iov[0].iov_base, iov[0].iov_len, len); | 672 | svsk, iov[0].iov_base, iov[0].iov_len, len); |
659 | 673 | ||
@@ -1064,6 +1078,12 @@ svc_tcp_accept(struct svc_sock *svsk) | |||
1064 | goto failed; | 1078 | goto failed; |
1065 | memcpy(&newsvsk->sk_remote, sin, slen); | 1079 | memcpy(&newsvsk->sk_remote, sin, slen); |
1066 | newsvsk->sk_remotelen = slen; | 1080 | newsvsk->sk_remotelen = slen; |
1081 | err = kernel_getsockname(newsock, sin, &slen); | ||
1082 | if (unlikely(err < 0)) { | ||
1083 | dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); | ||
1084 | slen = offsetof(struct sockaddr, sa_data); | ||
1085 | } | ||
1086 | memcpy(&newsvsk->sk_local, sin, slen); | ||
1067 | 1087 | ||
1068 | svc_sock_received(newsvsk); | 1088 | svc_sock_received(newsvsk); |
1069 | 1089 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5b05b73e4c1d..c8c2edccad7e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -127,7 +127,7 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) | |||
127 | clear_bit(XPRT_LOCKED, &xprt->state); | 127 | clear_bit(XPRT_LOCKED, &xprt->state); |
128 | smp_mb__after_clear_bit(); | 128 | smp_mb__after_clear_bit(); |
129 | } else | 129 | } else |
130 | schedule_work(&xprt->task_cleanup); | 130 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
@@ -515,7 +515,7 @@ xprt_init_autodisconnect(unsigned long data) | |||
515 | if (xprt_connecting(xprt)) | 515 | if (xprt_connecting(xprt)) |
516 | xprt_release_write(xprt, NULL); | 516 | xprt_release_write(xprt, NULL); |
517 | else | 517 | else |
518 | schedule_work(&xprt->task_cleanup); | 518 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
519 | return; | 519 | return; |
520 | out_abort: | 520 | out_abort: |
521 | spin_unlock(&xprt->transport_lock); | 521 | spin_unlock(&xprt->transport_lock); |
@@ -886,27 +886,24 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i | |||
886 | 886 | ||
887 | /** | 887 | /** |
888 | * xprt_create_transport - create an RPC transport | 888 | * xprt_create_transport - create an RPC transport |
889 | * @proto: requested transport protocol | 889 | * @args: rpc transport creation arguments |
890 | * @ap: remote peer address | ||
891 | * @size: length of address | ||
892 | * @to: timeout parameters | ||
893 | * | 890 | * |
894 | */ | 891 | */ |
895 | struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) | 892 | struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args) |
896 | { | 893 | { |
897 | struct rpc_xprt *xprt; | 894 | struct rpc_xprt *xprt; |
898 | struct rpc_rqst *req; | 895 | struct rpc_rqst *req; |
899 | 896 | ||
900 | switch (proto) { | 897 | switch (args->proto) { |
901 | case IPPROTO_UDP: | 898 | case IPPROTO_UDP: |
902 | xprt = xs_setup_udp(ap, size, to); | 899 | xprt = xs_setup_udp(args); |
903 | break; | 900 | break; |
904 | case IPPROTO_TCP: | 901 | case IPPROTO_TCP: |
905 | xprt = xs_setup_tcp(ap, size, to); | 902 | xprt = xs_setup_tcp(args); |
906 | break; | 903 | break; |
907 | default: | 904 | default: |
908 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | 905 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", |
909 | proto); | 906 | args->proto); |
910 | return ERR_PTR(-EIO); | 907 | return ERR_PTR(-EIO); |
911 | } | 908 | } |
912 | if (IS_ERR(xprt)) { | 909 | if (IS_ERR(xprt)) { |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index cc33c5880abb..4ae7eed7f617 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -235,6 +235,7 @@ struct sock_xprt { | |||
235 | * Connection of transports | 235 | * Connection of transports |
236 | */ | 236 | */ |
237 | struct delayed_work connect_worker; | 237 | struct delayed_work connect_worker; |
238 | struct sockaddr_storage addr; | ||
238 | unsigned short port; | 239 | unsigned short port; |
239 | 240 | ||
240 | /* | 241 | /* |
@@ -653,8 +654,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
653 | 654 | ||
654 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
655 | 656 | ||
656 | cancel_delayed_work(&transport->connect_worker); | 657 | cancel_rearming_delayed_work(&transport->connect_worker); |
657 | flush_scheduled_work(); | ||
658 | 658 | ||
659 | xprt_disconnect(xprt); | 659 | xprt_disconnect(xprt); |
660 | xs_close(xprt); | 660 | xs_close(xprt); |
@@ -1001,7 +1001,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1001 | /* Try to schedule an autoclose RPC calls */ | 1001 | /* Try to schedule an autoclose RPC calls */ |
1002 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | 1002 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); |
1003 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | 1003 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) |
1004 | schedule_work(&xprt->task_cleanup); | 1004 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
1005 | default: | 1005 | default: |
1006 | xprt_disconnect(xprt); | 1006 | xprt_disconnect(xprt); |
1007 | } | 1007 | } |
@@ -1146,31 +1146,36 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1146 | sap->sin_port = htons(port); | 1146 | sap->sin_port = htons(port); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | 1149 | static int xs_bind(struct sock_xprt *transport, struct socket *sock) |
1150 | { | 1150 | { |
1151 | struct sockaddr_in myaddr = { | 1151 | struct sockaddr_in myaddr = { |
1152 | .sin_family = AF_INET, | 1152 | .sin_family = AF_INET, |
1153 | }; | 1153 | }; |
1154 | struct sockaddr_in *sa; | ||
1154 | int err; | 1155 | int err; |
1155 | unsigned short port = transport->port; | 1156 | unsigned short port = transport->port; |
1156 | 1157 | ||
1158 | if (!transport->xprt.resvport) | ||
1159 | port = 0; | ||
1160 | sa = (struct sockaddr_in *)&transport->addr; | ||
1161 | myaddr.sin_addr = sa->sin_addr; | ||
1157 | do { | 1162 | do { |
1158 | myaddr.sin_port = htons(port); | 1163 | myaddr.sin_port = htons(port); |
1159 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1164 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, |
1160 | sizeof(myaddr)); | 1165 | sizeof(myaddr)); |
1166 | if (!transport->xprt.resvport) | ||
1167 | break; | ||
1161 | if (err == 0) { | 1168 | if (err == 0) { |
1162 | transport->port = port; | 1169 | transport->port = port; |
1163 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1170 | break; |
1164 | port); | ||
1165 | return 0; | ||
1166 | } | 1171 | } |
1167 | if (port <= xprt_min_resvport) | 1172 | if (port <= xprt_min_resvport) |
1168 | port = xprt_max_resvport; | 1173 | port = xprt_max_resvport; |
1169 | else | 1174 | else |
1170 | port--; | 1175 | port--; |
1171 | } while (err == -EADDRINUSE && port != transport->port); | 1176 | } while (err == -EADDRINUSE && port != transport->port); |
1172 | 1177 | dprintk("RPC: xs_bind "NIPQUAD_FMT":%u: %s (%d)\n", | |
1173 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1178 | NIPQUAD(myaddr.sin_addr), port, err ? "failed" : "ok", err); |
1174 | return err; | 1179 | return err; |
1175 | } | 1180 | } |
1176 | 1181 | ||
@@ -1229,7 +1234,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1229 | } | 1234 | } |
1230 | xs_reclassify_socket(sock); | 1235 | xs_reclassify_socket(sock); |
1231 | 1236 | ||
1232 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1237 | if (xs_bind(transport, sock)) { |
1233 | sock_release(sock); | 1238 | sock_release(sock); |
1234 | goto out; | 1239 | goto out; |
1235 | } | 1240 | } |
@@ -1316,7 +1321,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1316 | } | 1321 | } |
1317 | xs_reclassify_socket(sock); | 1322 | xs_reclassify_socket(sock); |
1318 | 1323 | ||
1319 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { | 1324 | if (xs_bind(transport, sock)) { |
1320 | sock_release(sock); | 1325 | sock_release(sock); |
1321 | goto out; | 1326 | goto out; |
1322 | } | 1327 | } |
@@ -1410,18 +1415,16 @@ static void xs_connect(struct rpc_task *task) | |||
1410 | dprintk("RPC: xs_connect delayed xprt %p for %lu " | 1415 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
1411 | "seconds\n", | 1416 | "seconds\n", |
1412 | xprt, xprt->reestablish_timeout / HZ); | 1417 | xprt, xprt->reestablish_timeout / HZ); |
1413 | schedule_delayed_work(&transport->connect_worker, | 1418 | queue_delayed_work(rpciod_workqueue, |
1414 | xprt->reestablish_timeout); | 1419 | &transport->connect_worker, |
1420 | xprt->reestablish_timeout); | ||
1415 | xprt->reestablish_timeout <<= 1; | 1421 | xprt->reestablish_timeout <<= 1; |
1416 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1422 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
1417 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1423 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1418 | } else { | 1424 | } else { |
1419 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1425 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1420 | schedule_delayed_work(&transport->connect_worker, 0); | 1426 | queue_delayed_work(rpciod_workqueue, |
1421 | 1427 | &transport->connect_worker, 0); | |
1422 | /* flush_scheduled_work can sleep... */ | ||
1423 | if (!RPC_IS_ASYNC(task)) | ||
1424 | flush_scheduled_work(); | ||
1425 | } | 1428 | } |
1426 | } | 1429 | } |
1427 | 1430 | ||
@@ -1476,7 +1479,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1476 | .set_buffer_size = xs_udp_set_buffer_size, | 1479 | .set_buffer_size = xs_udp_set_buffer_size, |
1477 | .reserve_xprt = xprt_reserve_xprt_cong, | 1480 | .reserve_xprt = xprt_reserve_xprt_cong, |
1478 | .release_xprt = xprt_release_xprt_cong, | 1481 | .release_xprt = xprt_release_xprt_cong, |
1479 | .rpcbind = rpcb_getport, | 1482 | .rpcbind = rpcb_getport_async, |
1480 | .set_port = xs_set_port, | 1483 | .set_port = xs_set_port, |
1481 | .connect = xs_connect, | 1484 | .connect = xs_connect, |
1482 | .buf_alloc = rpc_malloc, | 1485 | .buf_alloc = rpc_malloc, |
@@ -1493,7 +1496,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1493 | static struct rpc_xprt_ops xs_tcp_ops = { | 1496 | static struct rpc_xprt_ops xs_tcp_ops = { |
1494 | .reserve_xprt = xprt_reserve_xprt, | 1497 | .reserve_xprt = xprt_reserve_xprt, |
1495 | .release_xprt = xs_tcp_release_xprt, | 1498 | .release_xprt = xs_tcp_release_xprt, |
1496 | .rpcbind = rpcb_getport, | 1499 | .rpcbind = rpcb_getport_async, |
1497 | .set_port = xs_set_port, | 1500 | .set_port = xs_set_port, |
1498 | .connect = xs_connect, | 1501 | .connect = xs_connect, |
1499 | .buf_alloc = rpc_malloc, | 1502 | .buf_alloc = rpc_malloc, |
@@ -1505,12 +1508,12 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1505 | .print_stats = xs_tcp_print_stats, | 1508 | .print_stats = xs_tcp_print_stats, |
1506 | }; | 1509 | }; |
1507 | 1510 | ||
1508 | static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) | 1511 | static struct rpc_xprt *xs_setup_xprt(struct rpc_xprtsock_create *args, unsigned int slot_table_size) |
1509 | { | 1512 | { |
1510 | struct rpc_xprt *xprt; | 1513 | struct rpc_xprt *xprt; |
1511 | struct sock_xprt *new; | 1514 | struct sock_xprt *new; |
1512 | 1515 | ||
1513 | if (addrlen > sizeof(xprt->addr)) { | 1516 | if (args->addrlen > sizeof(xprt->addr)) { |
1514 | dprintk("RPC: xs_setup_xprt: address too large\n"); | 1517 | dprintk("RPC: xs_setup_xprt: address too large\n"); |
1515 | return ERR_PTR(-EBADF); | 1518 | return ERR_PTR(-EBADF); |
1516 | } | 1519 | } |
@@ -1532,8 +1535,10 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1532 | return ERR_PTR(-ENOMEM); | 1535 | return ERR_PTR(-ENOMEM); |
1533 | } | 1536 | } |
1534 | 1537 | ||
1535 | memcpy(&xprt->addr, addr, addrlen); | 1538 | memcpy(&xprt->addr, args->dstaddr, args->addrlen); |
1536 | xprt->addrlen = addrlen; | 1539 | xprt->addrlen = args->addrlen; |
1540 | if (args->srcaddr) | ||
1541 | memcpy(&new->addr, args->srcaddr, args->addrlen); | ||
1537 | new->port = xs_get_random_port(); | 1542 | new->port = xs_get_random_port(); |
1538 | 1543 | ||
1539 | return xprt; | 1544 | return xprt; |
@@ -1541,22 +1546,20 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1541 | 1546 | ||
1542 | /** | 1547 | /** |
1543 | * xs_setup_udp - Set up transport to use a UDP socket | 1548 | * xs_setup_udp - Set up transport to use a UDP socket |
1544 | * @addr: address of remote server | 1549 | * @args: rpc transport creation arguments |
1545 | * @addrlen: length of address in bytes | ||
1546 | * @to: timeout parameters | ||
1547 | * | 1550 | * |
1548 | */ | 1551 | */ |
1549 | struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) | 1552 | struct rpc_xprt *xs_setup_udp(struct rpc_xprtsock_create *args) |
1550 | { | 1553 | { |
1551 | struct rpc_xprt *xprt; | 1554 | struct rpc_xprt *xprt; |
1552 | struct sock_xprt *transport; | 1555 | struct sock_xprt *transport; |
1553 | 1556 | ||
1554 | xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); | 1557 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); |
1555 | if (IS_ERR(xprt)) | 1558 | if (IS_ERR(xprt)) |
1556 | return xprt; | 1559 | return xprt; |
1557 | transport = container_of(xprt, struct sock_xprt, xprt); | 1560 | transport = container_of(xprt, struct sock_xprt, xprt); |
1558 | 1561 | ||
1559 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) | 1562 | if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) |
1560 | xprt_set_bound(xprt); | 1563 | xprt_set_bound(xprt); |
1561 | 1564 | ||
1562 | xprt->prot = IPPROTO_UDP; | 1565 | xprt->prot = IPPROTO_UDP; |
@@ -1572,8 +1575,8 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1572 | 1575 | ||
1573 | xprt->ops = &xs_udp_ops; | 1576 | xprt->ops = &xs_udp_ops; |
1574 | 1577 | ||
1575 | if (to) | 1578 | if (args->timeout) |
1576 | xprt->timeout = *to; | 1579 | xprt->timeout = *args->timeout; |
1577 | else | 1580 | else |
1578 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | 1581 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
1579 | 1582 | ||
@@ -1586,22 +1589,20 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1586 | 1589 | ||
1587 | /** | 1590 | /** |
1588 | * xs_setup_tcp - Set up transport to use a TCP socket | 1591 | * xs_setup_tcp - Set up transport to use a TCP socket |
1589 | * @addr: address of remote server | 1592 | * @args: rpc transport creation arguments |
1590 | * @addrlen: length of address in bytes | ||
1591 | * @to: timeout parameters | ||
1592 | * | 1593 | * |
1593 | */ | 1594 | */ |
1594 | struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) | 1595 | struct rpc_xprt *xs_setup_tcp(struct rpc_xprtsock_create *args) |
1595 | { | 1596 | { |
1596 | struct rpc_xprt *xprt; | 1597 | struct rpc_xprt *xprt; |
1597 | struct sock_xprt *transport; | 1598 | struct sock_xprt *transport; |
1598 | 1599 | ||
1599 | xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); | 1600 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
1600 | if (IS_ERR(xprt)) | 1601 | if (IS_ERR(xprt)) |
1601 | return xprt; | 1602 | return xprt; |
1602 | transport = container_of(xprt, struct sock_xprt, xprt); | 1603 | transport = container_of(xprt, struct sock_xprt, xprt); |
1603 | 1604 | ||
1604 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) | 1605 | if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) |
1605 | xprt_set_bound(xprt); | 1606 | xprt_set_bound(xprt); |
1606 | 1607 | ||
1607 | xprt->prot = IPPROTO_TCP; | 1608 | xprt->prot = IPPROTO_TCP; |
@@ -1616,8 +1617,8 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1616 | 1617 | ||
1617 | xprt->ops = &xs_tcp_ops; | 1618 | xprt->ops = &xs_tcp_ops; |
1618 | 1619 | ||
1619 | if (to) | 1620 | if (args->timeout) |
1620 | xprt->timeout = *to; | 1621 | xprt->timeout = *args->timeout; |
1621 | else | 1622 | else |
1622 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | 1623 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
1623 | 1624 | ||