diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-11 23:11:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-11 23:11:28 -0500 |
commit | 31c1febd7a45229edb3e5d86f354e3c1df543cbb (patch) | |
tree | 9c96b4bb18d47c606d6d85d774d2523f3e47aec0 /net | |
parent | b4a237598aa740562f842db76d97465c44fb74c1 (diff) | |
parent | 0442f14b15f8e7a8b3778a9f8cf640ef89b2df26 (diff) |
Merge tag 'nfsd-4.4' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Apologies for coming a little late in the merge window. Fortunately
this is another fairly quiet one:
Mainly smaller bugfixes and cleanup. We're still finding some bugs
from the breakup of the big NFSv4 state lock in 3.17 -- thanks
especially to Andrew Elble and Jeff Layton for tracking down some of
the remaining races"
* tag 'nfsd-4.4' of git://linux-nfs.org/~bfields/linux:
svcrpc: document lack of some memory barriers
nfsd: fix race with open / open upgrade stateids
nfsd: eliminate sending duplicate and repeated delegations
nfsd: remove recurring workqueue job to clean DRC
SUNRPC: drop stale comment in svc_setup_socket()
nfsd: ensure that seqid morphing operations are atomic wrt to copies
nfsd: serialize layout stateid morphing operations
nfsd: improve client_has_state to check for unused openowners
nfsd: fix clid_inuse on mount with security change
sunrpc/cache: make cache flushing more reliable.
nfsd: move include of state.h from trace.c to trace.h
sunrpc: avoid warning in gss_key_timeout
lockd: get rid of reference-counted NSM RPC clients
SUNRPC: Use MSG_SENDPAGE_NOTLAST when calling sendpage()
lockd: create NSM handles per net namespace
nfsd: switch unsigned char flags in svc_fh to bools
nfsd: move svc_fh->fh_maxsize to just after fh_handle
nfsd: drop null test before destroy functions
nfsd: serialize state seqid morphing operations
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 13 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 53 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 40 |
3 files changed, 78 insertions, 28 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dace13d7638e..799e65b944b9 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1411,17 +1411,16 @@ gss_key_timeout(struct rpc_cred *rc) | |||
1411 | { | 1411 | { |
1412 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); | 1412 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); |
1413 | struct gss_cl_ctx *ctx; | 1413 | struct gss_cl_ctx *ctx; |
1414 | unsigned long now = jiffies; | 1414 | unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ); |
1415 | unsigned long expire; | 1415 | int ret = 0; |
1416 | 1416 | ||
1417 | rcu_read_lock(); | 1417 | rcu_read_lock(); |
1418 | ctx = rcu_dereference(gss_cred->gc_ctx); | 1418 | ctx = rcu_dereference(gss_cred->gc_ctx); |
1419 | if (ctx) | 1419 | if (!ctx || time_after(timeout, ctx->gc_expiry)) |
1420 | expire = ctx->gc_expiry - (gss_key_expire_timeo * HZ); | 1420 | ret = -EACCES; |
1421 | rcu_read_unlock(); | 1421 | rcu_read_unlock(); |
1422 | if (!ctx || time_after(now, expire)) | 1422 | |
1423 | return -EACCES; | 1423 | return ret; |
1424 | return 0; | ||
1425 | } | 1424 | } |
1426 | 1425 | ||
1427 | static int | 1426 | static int |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 4a2340a54401..5e4f815c2b34 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -41,13 +41,16 @@ | |||
41 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item); | 41 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item); |
42 | static void cache_revisit_request(struct cache_head *item); | 42 | static void cache_revisit_request(struct cache_head *item); |
43 | 43 | ||
44 | static void cache_init(struct cache_head *h) | 44 | static void cache_init(struct cache_head *h, struct cache_detail *detail) |
45 | { | 45 | { |
46 | time_t now = seconds_since_boot(); | 46 | time_t now = seconds_since_boot(); |
47 | INIT_HLIST_NODE(&h->cache_list); | 47 | INIT_HLIST_NODE(&h->cache_list); |
48 | h->flags = 0; | 48 | h->flags = 0; |
49 | kref_init(&h->ref); | 49 | kref_init(&h->ref); |
50 | h->expiry_time = now + CACHE_NEW_EXPIRY; | 50 | h->expiry_time = now + CACHE_NEW_EXPIRY; |
51 | if (now <= detail->flush_time) | ||
52 | /* ensure it isn't already expired */ | ||
53 | now = detail->flush_time + 1; | ||
51 | h->last_refresh = now; | 54 | h->last_refresh = now; |
52 | } | 55 | } |
53 | 56 | ||
@@ -81,7 +84,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
81 | * we might get lose if we need to | 84 | * we might get lose if we need to |
82 | * cache_put it soon. | 85 | * cache_put it soon. |
83 | */ | 86 | */ |
84 | cache_init(new); | 87 | cache_init(new, detail); |
85 | detail->init(new, key); | 88 | detail->init(new, key); |
86 | 89 | ||
87 | write_lock(&detail->hash_lock); | 90 | write_lock(&detail->hash_lock); |
@@ -116,10 +119,15 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); | |||
116 | 119 | ||
117 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); | 120 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); |
118 | 121 | ||
119 | static void cache_fresh_locked(struct cache_head *head, time_t expiry) | 122 | static void cache_fresh_locked(struct cache_head *head, time_t expiry, |
123 | struct cache_detail *detail) | ||
120 | { | 124 | { |
125 | time_t now = seconds_since_boot(); | ||
126 | if (now <= detail->flush_time) | ||
127 | /* ensure it isn't immediately treated as expired */ | ||
128 | now = detail->flush_time + 1; | ||
121 | head->expiry_time = expiry; | 129 | head->expiry_time = expiry; |
122 | head->last_refresh = seconds_since_boot(); | 130 | head->last_refresh = now; |
123 | smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ | 131 | smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ |
124 | set_bit(CACHE_VALID, &head->flags); | 132 | set_bit(CACHE_VALID, &head->flags); |
125 | } | 133 | } |
@@ -149,7 +157,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
149 | set_bit(CACHE_NEGATIVE, &old->flags); | 157 | set_bit(CACHE_NEGATIVE, &old->flags); |
150 | else | 158 | else |
151 | detail->update(old, new); | 159 | detail->update(old, new); |
152 | cache_fresh_locked(old, new->expiry_time); | 160 | cache_fresh_locked(old, new->expiry_time, detail); |
153 | write_unlock(&detail->hash_lock); | 161 | write_unlock(&detail->hash_lock); |
154 | cache_fresh_unlocked(old, detail); | 162 | cache_fresh_unlocked(old, detail); |
155 | return old; | 163 | return old; |
@@ -162,7 +170,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
162 | cache_put(old, detail); | 170 | cache_put(old, detail); |
163 | return NULL; | 171 | return NULL; |
164 | } | 172 | } |
165 | cache_init(tmp); | 173 | cache_init(tmp, detail); |
166 | detail->init(tmp, old); | 174 | detail->init(tmp, old); |
167 | 175 | ||
168 | write_lock(&detail->hash_lock); | 176 | write_lock(&detail->hash_lock); |
@@ -173,8 +181,8 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
173 | hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); | 181 | hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); |
174 | detail->entries++; | 182 | detail->entries++; |
175 | cache_get(tmp); | 183 | cache_get(tmp); |
176 | cache_fresh_locked(tmp, new->expiry_time); | 184 | cache_fresh_locked(tmp, new->expiry_time, detail); |
177 | cache_fresh_locked(old, 0); | 185 | cache_fresh_locked(old, 0, detail); |
178 | write_unlock(&detail->hash_lock); | 186 | write_unlock(&detail->hash_lock); |
179 | cache_fresh_unlocked(tmp, detail); | 187 | cache_fresh_unlocked(tmp, detail); |
180 | cache_fresh_unlocked(old, detail); | 188 | cache_fresh_unlocked(old, detail); |
@@ -219,7 +227,8 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h | |||
219 | rv = cache_is_valid(h); | 227 | rv = cache_is_valid(h); |
220 | if (rv == -EAGAIN) { | 228 | if (rv == -EAGAIN) { |
221 | set_bit(CACHE_NEGATIVE, &h->flags); | 229 | set_bit(CACHE_NEGATIVE, &h->flags); |
222 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | 230 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, |
231 | detail); | ||
223 | rv = -ENOENT; | 232 | rv = -ENOENT; |
224 | } | 233 | } |
225 | write_unlock(&detail->hash_lock); | 234 | write_unlock(&detail->hash_lock); |
@@ -487,10 +496,13 @@ EXPORT_SYMBOL_GPL(cache_flush); | |||
487 | 496 | ||
488 | void cache_purge(struct cache_detail *detail) | 497 | void cache_purge(struct cache_detail *detail) |
489 | { | 498 | { |
490 | detail->flush_time = LONG_MAX; | 499 | time_t now = seconds_since_boot(); |
500 | if (detail->flush_time >= now) | ||
501 | now = detail->flush_time + 1; | ||
502 | /* 'now' is the maximum value any 'last_refresh' can have */ | ||
503 | detail->flush_time = now; | ||
491 | detail->nextcheck = seconds_since_boot(); | 504 | detail->nextcheck = seconds_since_boot(); |
492 | cache_flush(); | 505 | cache_flush(); |
493 | detail->flush_time = 1; | ||
494 | } | 506 | } |
495 | EXPORT_SYMBOL_GPL(cache_purge); | 507 | EXPORT_SYMBOL_GPL(cache_purge); |
496 | 508 | ||
@@ -1436,6 +1448,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf, | |||
1436 | { | 1448 | { |
1437 | char tbuf[20]; | 1449 | char tbuf[20]; |
1438 | char *bp, *ep; | 1450 | char *bp, *ep; |
1451 | time_t then, now; | ||
1439 | 1452 | ||
1440 | if (*ppos || count > sizeof(tbuf)-1) | 1453 | if (*ppos || count > sizeof(tbuf)-1) |
1441 | return -EINVAL; | 1454 | return -EINVAL; |
@@ -1447,8 +1460,22 @@ static ssize_t write_flush(struct file *file, const char __user *buf, | |||
1447 | return -EINVAL; | 1460 | return -EINVAL; |
1448 | 1461 | ||
1449 | bp = tbuf; | 1462 | bp = tbuf; |
1450 | cd->flush_time = get_expiry(&bp); | 1463 | then = get_expiry(&bp); |
1451 | cd->nextcheck = seconds_since_boot(); | 1464 | now = seconds_since_boot(); |
1465 | cd->nextcheck = now; | ||
1466 | /* Can only set flush_time to 1 second beyond "now", or | ||
1467 | * possibly 1 second beyond flushtime. This is because | ||
1468 | * flush_time never goes backwards so it mustn't get too far | ||
1469 | * ahead of time. | ||
1470 | */ | ||
1471 | if (then >= now) { | ||
1472 | /* Want to flush everything, so behave like cache_purge() */ | ||
1473 | if (cd->flush_time >= now) | ||
1474 | now = cd->flush_time + 1; | ||
1475 | then = now; | ||
1476 | } | ||
1477 | |||
1478 | cd->flush_time = then; | ||
1452 | cache_flush(); | 1479 | cache_flush(); |
1453 | 1480 | ||
1454 | *ppos += count; | 1481 | *ppos += count; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0c8120229a03..1413cdcc131c 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -181,7 +181,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, | |||
181 | struct page **ppage = xdr->pages; | 181 | struct page **ppage = xdr->pages; |
182 | size_t base = xdr->page_base; | 182 | size_t base = xdr->page_base; |
183 | unsigned int pglen = xdr->page_len; | 183 | unsigned int pglen = xdr->page_len; |
184 | unsigned int flags = MSG_MORE; | 184 | unsigned int flags = MSG_MORE | MSG_SENDPAGE_NOTLAST; |
185 | int slen; | 185 | int slen; |
186 | int len = 0; | 186 | int len = 0; |
187 | 187 | ||
@@ -399,6 +399,31 @@ static int svc_sock_secure_port(struct svc_rqst *rqstp) | |||
399 | return svc_port_is_privileged(svc_addr(rqstp)); | 399 | return svc_port_is_privileged(svc_addr(rqstp)); |
400 | } | 400 | } |
401 | 401 | ||
402 | static bool sunrpc_waitqueue_active(wait_queue_head_t *wq) | ||
403 | { | ||
404 | if (!wq) | ||
405 | return false; | ||
406 | /* | ||
407 | * There should normally be a memory * barrier here--see | ||
408 | * wq_has_sleeper(). | ||
409 | * | ||
410 | * It appears that isn't currently necessary, though, basically | ||
411 | * because callers all appear to have sufficient memory barriers | ||
412 | * between the time the relevant change is made and the | ||
413 | * time they call these callbacks. | ||
414 | * | ||
415 | * The nfsd code itself doesn't actually explicitly wait on | ||
416 | * these waitqueues, but it may wait on them for example in | ||
417 | * sendpage() or sendmsg() calls. (And those may be the only | ||
418 | * places, since it it uses nonblocking reads.) | ||
419 | * | ||
420 | * Maybe we should add the memory barriers anyway, but these are | ||
421 | * hot paths so we'd need to be convinced there's no sigificant | ||
422 | * penalty. | ||
423 | */ | ||
424 | return waitqueue_active(wq); | ||
425 | } | ||
426 | |||
402 | /* | 427 | /* |
403 | * INET callback when data has been received on the socket. | 428 | * INET callback when data has been received on the socket. |
404 | */ | 429 | */ |
@@ -414,7 +439,7 @@ static void svc_udp_data_ready(struct sock *sk) | |||
414 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 439 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
415 | svc_xprt_enqueue(&svsk->sk_xprt); | 440 | svc_xprt_enqueue(&svsk->sk_xprt); |
416 | } | 441 | } |
417 | if (wq && waitqueue_active(wq)) | 442 | if (sunrpc_waitqueue_active(wq)) |
418 | wake_up_interruptible(wq); | 443 | wake_up_interruptible(wq); |
419 | } | 444 | } |
420 | 445 | ||
@@ -432,7 +457,7 @@ static void svc_write_space(struct sock *sk) | |||
432 | svc_xprt_enqueue(&svsk->sk_xprt); | 457 | svc_xprt_enqueue(&svsk->sk_xprt); |
433 | } | 458 | } |
434 | 459 | ||
435 | if (wq && waitqueue_active(wq)) { | 460 | if (sunrpc_waitqueue_active(wq)) { |
436 | dprintk("RPC svc_write_space: someone sleeping on %p\n", | 461 | dprintk("RPC svc_write_space: someone sleeping on %p\n", |
437 | svsk); | 462 | svsk); |
438 | wake_up_interruptible(wq); | 463 | wake_up_interruptible(wq); |
@@ -787,7 +812,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk) | |||
787 | } | 812 | } |
788 | 813 | ||
789 | wq = sk_sleep(sk); | 814 | wq = sk_sleep(sk); |
790 | if (wq && waitqueue_active(wq)) | 815 | if (sunrpc_waitqueue_active(wq)) |
791 | wake_up_interruptible_all(wq); | 816 | wake_up_interruptible_all(wq); |
792 | } | 817 | } |
793 | 818 | ||
@@ -808,7 +833,7 @@ static void svc_tcp_state_change(struct sock *sk) | |||
808 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 833 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
809 | svc_xprt_enqueue(&svsk->sk_xprt); | 834 | svc_xprt_enqueue(&svsk->sk_xprt); |
810 | } | 835 | } |
811 | if (wq && waitqueue_active(wq)) | 836 | if (sunrpc_waitqueue_active(wq)) |
812 | wake_up_interruptible_all(wq); | 837 | wake_up_interruptible_all(wq); |
813 | } | 838 | } |
814 | 839 | ||
@@ -823,7 +848,7 @@ static void svc_tcp_data_ready(struct sock *sk) | |||
823 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 848 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
824 | svc_xprt_enqueue(&svsk->sk_xprt); | 849 | svc_xprt_enqueue(&svsk->sk_xprt); |
825 | } | 850 | } |
826 | if (wq && waitqueue_active(wq)) | 851 | if (sunrpc_waitqueue_active(wq)) |
827 | wake_up_interruptible(wq); | 852 | wake_up_interruptible(wq); |
828 | } | 853 | } |
829 | 854 | ||
@@ -1367,7 +1392,6 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs); | |||
1367 | 1392 | ||
1368 | /* | 1393 | /* |
1369 | * Initialize socket for RPC use and create svc_sock struct | 1394 | * Initialize socket for RPC use and create svc_sock struct |
1370 | * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. | ||
1371 | */ | 1395 | */ |
1372 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | 1396 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, |
1373 | struct socket *sock, | 1397 | struct socket *sock, |
@@ -1594,7 +1618,7 @@ static void svc_sock_detach(struct svc_xprt *xprt) | |||
1594 | sk->sk_write_space = svsk->sk_owspace; | 1618 | sk->sk_write_space = svsk->sk_owspace; |
1595 | 1619 | ||
1596 | wq = sk_sleep(sk); | 1620 | wq = sk_sleep(sk); |
1597 | if (wq && waitqueue_active(wq)) | 1621 | if (sunrpc_waitqueue_active(wq)) |
1598 | wake_up_interruptible(wq); | 1622 | wake_up_interruptible(wq); |
1599 | } | 1623 | } |
1600 | 1624 | ||