aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/auth.c20
-rw-r--r--net/sunrpc/auth_generic.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/cache.c109
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/sched.c7
-rw-r--r--net/sunrpc/sunrpc.h14
-rw-r--r--net/sunrpc/svc_xprt.c25
-rw-r--r--net/sunrpc/svcauth_unix.c1
-rw-r--r--net/sunrpc/svcsock.c335
-rw-r--r--net/sunrpc/xprt.c15
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c4
-rw-r--r--net/sunrpc/xprtsock.c242
13 files changed, 603 insertions, 180 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 0c431c277af5..54a4e042f104 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -385,7 +385,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
385EXPORT_SYMBOL_GPL(rpcauth_init_cred); 385EXPORT_SYMBOL_GPL(rpcauth_init_cred);
386 386
387void 387void
388rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) 388rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
389{ 389{
390 task->tk_msg.rpc_cred = get_rpccred(cred); 390 task->tk_msg.rpc_cred = get_rpccred(cred);
391 dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, 391 dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
@@ -394,7 +394,7 @@ rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
394EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); 394EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
395 395
396static void 396static void
397rpcauth_bind_root_cred(struct rpc_task *task) 397rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
398{ 398{
399 struct rpc_auth *auth = task->tk_client->cl_auth; 399 struct rpc_auth *auth = task->tk_client->cl_auth;
400 struct auth_cred acred = { 400 struct auth_cred acred = {
@@ -405,7 +405,7 @@ rpcauth_bind_root_cred(struct rpc_task *task)
405 405
406 dprintk("RPC: %5u looking up %s cred\n", 406 dprintk("RPC: %5u looking up %s cred\n",
407 task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); 407 task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
408 ret = auth->au_ops->lookup_cred(auth, &acred, 0); 408 ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
409 if (!IS_ERR(ret)) 409 if (!IS_ERR(ret))
410 task->tk_msg.rpc_cred = ret; 410 task->tk_msg.rpc_cred = ret;
411 else 411 else
@@ -413,14 +413,14 @@ rpcauth_bind_root_cred(struct rpc_task *task)
413} 413}
414 414
415static void 415static void
416rpcauth_bind_new_cred(struct rpc_task *task) 416rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
417{ 417{
418 struct rpc_auth *auth = task->tk_client->cl_auth; 418 struct rpc_auth *auth = task->tk_client->cl_auth;
419 struct rpc_cred *ret; 419 struct rpc_cred *ret;
420 420
421 dprintk("RPC: %5u looking up %s cred\n", 421 dprintk("RPC: %5u looking up %s cred\n",
422 task->tk_pid, auth->au_ops->au_name); 422 task->tk_pid, auth->au_ops->au_name);
423 ret = rpcauth_lookupcred(auth, 0); 423 ret = rpcauth_lookupcred(auth, lookupflags);
424 if (!IS_ERR(ret)) 424 if (!IS_ERR(ret))
425 task->tk_msg.rpc_cred = ret; 425 task->tk_msg.rpc_cred = ret;
426 else 426 else
@@ -430,12 +430,16 @@ rpcauth_bind_new_cred(struct rpc_task *task)
430void 430void
431rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) 431rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
432{ 432{
433 int lookupflags = 0;
434
435 if (flags & RPC_TASK_ASYNC)
436 lookupflags |= RPCAUTH_LOOKUP_NEW;
433 if (cred != NULL) 437 if (cred != NULL)
434 cred->cr_ops->crbind(task, cred); 438 cred->cr_ops->crbind(task, cred, lookupflags);
435 else if (flags & RPC_TASK_ROOTCREDS) 439 else if (flags & RPC_TASK_ROOTCREDS)
436 rpcauth_bind_root_cred(task); 440 rpcauth_bind_root_cred(task, lookupflags);
437 else 441 else
438 rpcauth_bind_new_cred(task); 442 rpcauth_bind_new_cred(task, lookupflags);
439} 443}
440 444
441void 445void
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 4028502f0528..bf88bf8e9365 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -55,13 +55,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void)
55EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); 55EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
56 56
57static void 57static void
58generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) 58generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
59{ 59{
60 struct rpc_auth *auth = task->tk_client->cl_auth; 60 struct rpc_auth *auth = task->tk_client->cl_auth;
61 struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; 61 struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
62 struct rpc_cred *ret; 62 struct rpc_cred *ret;
63 63
64 ret = auth->au_ops->lookup_cred(auth, acred, 0); 64 ret = auth->au_ops->lookup_cred(auth, acred, lookupflags);
65 if (!IS_ERR(ret)) 65 if (!IS_ERR(ret))
66 task->tk_msg.rpc_cred = ret; 66 task->tk_msg.rpc_cred = ret;
67 else 67 else
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 2e6a148d277c..f6c51e562a02 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1374,8 +1374,10 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1374 if (stat) 1374 if (stat)
1375 goto out_err; 1375 goto out_err;
1376 break; 1376 break;
1377 default: 1377 /*
1378 goto out_err; 1378 * For any other gc_svc value, svcauth_gss_accept() already set
1379 * the auth_error appropriately; just fall through:
1380 */
1379 } 1381 }
1380 1382
1381out: 1383out:
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 45cdaff9b361..d6eee291a0e2 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -103,23 +103,21 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
103EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); 103EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
104 104
105 105
106static void queue_loose(struct cache_detail *detail, struct cache_head *ch); 106static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
107 107
108static int cache_fresh_locked(struct cache_head *head, time_t expiry) 108static void cache_fresh_locked(struct cache_head *head, time_t expiry)
109{ 109{
110 head->expiry_time = expiry; 110 head->expiry_time = expiry;
111 head->last_refresh = get_seconds(); 111 head->last_refresh = get_seconds();
112 return !test_and_set_bit(CACHE_VALID, &head->flags); 112 set_bit(CACHE_VALID, &head->flags);
113} 113}
114 114
115static void cache_fresh_unlocked(struct cache_head *head, 115static void cache_fresh_unlocked(struct cache_head *head,
116 struct cache_detail *detail, int new) 116 struct cache_detail *detail)
117{ 117{
118 if (new)
119 cache_revisit_request(head);
120 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { 118 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
121 cache_revisit_request(head); 119 cache_revisit_request(head);
122 queue_loose(detail, head); 120 cache_dequeue(detail, head);
123 } 121 }
124} 122}
125 123
@@ -132,7 +130,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
132 */ 130 */
133 struct cache_head **head; 131 struct cache_head **head;
134 struct cache_head *tmp; 132 struct cache_head *tmp;
135 int is_new;
136 133
137 if (!test_bit(CACHE_VALID, &old->flags)) { 134 if (!test_bit(CACHE_VALID, &old->flags)) {
138 write_lock(&detail->hash_lock); 135 write_lock(&detail->hash_lock);
@@ -141,9 +138,9 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
141 set_bit(CACHE_NEGATIVE, &old->flags); 138 set_bit(CACHE_NEGATIVE, &old->flags);
142 else 139 else
143 detail->update(old, new); 140 detail->update(old, new);
144 is_new = cache_fresh_locked(old, new->expiry_time); 141 cache_fresh_locked(old, new->expiry_time);
145 write_unlock(&detail->hash_lock); 142 write_unlock(&detail->hash_lock);
146 cache_fresh_unlocked(old, detail, is_new); 143 cache_fresh_unlocked(old, detail);
147 return old; 144 return old;
148 } 145 }
149 write_unlock(&detail->hash_lock); 146 write_unlock(&detail->hash_lock);
@@ -167,11 +164,11 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
167 *head = tmp; 164 *head = tmp;
168 detail->entries++; 165 detail->entries++;
169 cache_get(tmp); 166 cache_get(tmp);
170 is_new = cache_fresh_locked(tmp, new->expiry_time); 167 cache_fresh_locked(tmp, new->expiry_time);
171 cache_fresh_locked(old, 0); 168 cache_fresh_locked(old, 0);
172 write_unlock(&detail->hash_lock); 169 write_unlock(&detail->hash_lock);
173 cache_fresh_unlocked(tmp, detail, is_new); 170 cache_fresh_unlocked(tmp, detail);
174 cache_fresh_unlocked(old, detail, 0); 171 cache_fresh_unlocked(old, detail);
175 cache_put(old, detail); 172 cache_put(old, detail);
176 return tmp; 173 return tmp;
177} 174}
@@ -184,6 +181,22 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
184 return cd->cache_upcall(cd, h); 181 return cd->cache_upcall(cd, h);
185} 182}
186 183
184static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
185{
186 if (!test_bit(CACHE_VALID, &h->flags) ||
187 h->expiry_time < get_seconds())
188 return -EAGAIN;
189 else if (detail->flush_time > h->last_refresh)
190 return -EAGAIN;
191 else {
192 /* entry is valid */
193 if (test_bit(CACHE_NEGATIVE, &h->flags))
194 return -ENOENT;
195 else
196 return 0;
197 }
198}
199
187/* 200/*
188 * This is the generic cache management routine for all 201 * This is the generic cache management routine for all
189 * the authentication caches. 202 * the authentication caches.
@@ -192,8 +205,10 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
192 * 205 *
193 * 206 *
194 * Returns 0 if the cache_head can be used, or cache_puts it and returns 207 * Returns 0 if the cache_head can be used, or cache_puts it and returns
195 * -EAGAIN if upcall is pending, 208 * -EAGAIN if upcall is pending and request has been queued
196 * -ETIMEDOUT if upcall failed and should be retried, 209 * -ETIMEDOUT if upcall failed or request could not be queue or
210 * upcall completed but item is still invalid (implying that
211 * the cache item has been replaced with a newer one).
197 * -ENOENT if cache entry was negative 212 * -ENOENT if cache entry was negative
198 */ 213 */
199int cache_check(struct cache_detail *detail, 214int cache_check(struct cache_detail *detail,
@@ -203,17 +218,7 @@ int cache_check(struct cache_detail *detail,
203 long refresh_age, age; 218 long refresh_age, age;
204 219
205 /* First decide return status as best we can */ 220 /* First decide return status as best we can */
206 if (!test_bit(CACHE_VALID, &h->flags) || 221 rv = cache_is_valid(detail, h);
207 h->expiry_time < get_seconds())
208 rv = -EAGAIN;
209 else if (detail->flush_time > h->last_refresh)
210 rv = -EAGAIN;
211 else {
212 /* entry is valid */
213 if (test_bit(CACHE_NEGATIVE, &h->flags))
214 rv = -ENOENT;
215 else rv = 0;
216 }
217 222
218 /* now see if we want to start an upcall */ 223 /* now see if we want to start an upcall */
219 refresh_age = (h->expiry_time - h->last_refresh); 224 refresh_age = (h->expiry_time - h->last_refresh);
@@ -229,10 +234,11 @@ int cache_check(struct cache_detail *detail,
229 switch (cache_make_upcall(detail, h)) { 234 switch (cache_make_upcall(detail, h)) {
230 case -EINVAL: 235 case -EINVAL:
231 clear_bit(CACHE_PENDING, &h->flags); 236 clear_bit(CACHE_PENDING, &h->flags);
237 cache_revisit_request(h);
232 if (rv == -EAGAIN) { 238 if (rv == -EAGAIN) {
233 set_bit(CACHE_NEGATIVE, &h->flags); 239 set_bit(CACHE_NEGATIVE, &h->flags);
234 cache_fresh_unlocked(h, detail, 240 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
235 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY)); 241 cache_fresh_unlocked(h, detail);
236 rv = -ENOENT; 242 rv = -ENOENT;
237 } 243 }
238 break; 244 break;
@@ -245,10 +251,14 @@ int cache_check(struct cache_detail *detail,
245 } 251 }
246 } 252 }
247 253
248 if (rv == -EAGAIN) 254 if (rv == -EAGAIN) {
249 if (cache_defer_req(rqstp, h) != 0) 255 if (cache_defer_req(rqstp, h) < 0) {
250 rv = -ETIMEDOUT; 256 /* Request is not deferred */
251 257 rv = cache_is_valid(detail, h);
258 if (rv == -EAGAIN)
259 rv = -ETIMEDOUT;
260 }
261 }
252 if (rv) 262 if (rv)
253 cache_put(h, detail); 263 cache_put(h, detail);
254 return rv; 264 return rv;
@@ -396,7 +406,7 @@ static int cache_clean(void)
396 ) 406 )
397 continue; 407 continue;
398 if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) 408 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
399 queue_loose(current_detail, ch); 409 cache_dequeue(current_detail, ch);
400 410
401 if (atomic_read(&ch->ref.refcount) == 1) 411 if (atomic_read(&ch->ref.refcount) == 1)
402 break; 412 break;
@@ -412,8 +422,10 @@ static int cache_clean(void)
412 if (!ch) 422 if (!ch)
413 current_index ++; 423 current_index ++;
414 spin_unlock(&cache_list_lock); 424 spin_unlock(&cache_list_lock);
415 if (ch) 425 if (ch) {
426 cache_revisit_request(ch);
416 cache_put(ch, d); 427 cache_put(ch, d);
428 }
417 } else 429 } else
418 spin_unlock(&cache_list_lock); 430 spin_unlock(&cache_list_lock);
419 431
@@ -488,7 +500,7 @@ static int cache_defer_cnt;
488 500
489static int cache_defer_req(struct cache_req *req, struct cache_head *item) 501static int cache_defer_req(struct cache_req *req, struct cache_head *item)
490{ 502{
491 struct cache_deferred_req *dreq; 503 struct cache_deferred_req *dreq, *discard;
492 int hash = DFR_HASH(item); 504 int hash = DFR_HASH(item);
493 505
494 if (cache_defer_cnt >= DFR_MAX) { 506 if (cache_defer_cnt >= DFR_MAX) {
@@ -496,11 +508,11 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
496 * or continue and drop the oldest below 508 * or continue and drop the oldest below
497 */ 509 */
498 if (net_random()&1) 510 if (net_random()&1)
499 return -ETIMEDOUT; 511 return -ENOMEM;
500 } 512 }
501 dreq = req->defer(req); 513 dreq = req->defer(req);
502 if (dreq == NULL) 514 if (dreq == NULL)
503 return -ETIMEDOUT; 515 return -ENOMEM;
504 516
505 dreq->item = item; 517 dreq->item = item;
506 518
@@ -513,23 +525,24 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
513 list_add(&dreq->hash, &cache_defer_hash[hash]); 525 list_add(&dreq->hash, &cache_defer_hash[hash]);
514 526
515 /* it is in, now maybe clean up */ 527 /* it is in, now maybe clean up */
516 dreq = NULL; 528 discard = NULL;
517 if (++cache_defer_cnt > DFR_MAX) { 529 if (++cache_defer_cnt > DFR_MAX) {
518 dreq = list_entry(cache_defer_list.prev, 530 discard = list_entry(cache_defer_list.prev,
519 struct cache_deferred_req, recent); 531 struct cache_deferred_req, recent);
520 list_del(&dreq->recent); 532 list_del_init(&discard->recent);
521 list_del(&dreq->hash); 533 list_del_init(&discard->hash);
522 cache_defer_cnt--; 534 cache_defer_cnt--;
523 } 535 }
524 spin_unlock(&cache_defer_lock); 536 spin_unlock(&cache_defer_lock);
525 537
526 if (dreq) { 538 if (discard)
527 /* there was one too many */ 539 /* there was one too many */
528 dreq->revisit(dreq, 1); 540 discard->revisit(discard, 1);
529 } 541
530 if (!test_bit(CACHE_PENDING, &item->flags)) { 542 if (!test_bit(CACHE_PENDING, &item->flags)) {
531 /* must have just been validated... */ 543 /* must have just been validated... */
532 cache_revisit_request(item); 544 cache_revisit_request(item);
545 return -EAGAIN;
533 } 546 }
534 return 0; 547 return 0;
535} 548}
@@ -551,7 +564,7 @@ static void cache_revisit_request(struct cache_head *item)
551 dreq = list_entry(lp, struct cache_deferred_req, hash); 564 dreq = list_entry(lp, struct cache_deferred_req, hash);
552 lp = lp->next; 565 lp = lp->next;
553 if (dreq->item == item) { 566 if (dreq->item == item) {
554 list_del(&dreq->hash); 567 list_del_init(&dreq->hash);
555 list_move(&dreq->recent, &pending); 568 list_move(&dreq->recent, &pending);
556 cache_defer_cnt--; 569 cache_defer_cnt--;
557 } 570 }
@@ -577,7 +590,7 @@ void cache_clean_deferred(void *owner)
577 590
578 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 591 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
579 if (dreq->owner == owner) { 592 if (dreq->owner == owner) {
580 list_del(&dreq->hash); 593 list_del_init(&dreq->hash);
581 list_move(&dreq->recent, &pending); 594 list_move(&dreq->recent, &pending);
582 cache_defer_cnt--; 595 cache_defer_cnt--;
583 } 596 }
@@ -887,7 +900,7 @@ static int cache_release(struct inode *inode, struct file *filp,
887 900
888 901
889 902
890static void queue_loose(struct cache_detail *detail, struct cache_head *ch) 903static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
891{ 904{
892 struct cache_queue *cq; 905 struct cache_queue *cq;
893 spin_lock(&queue_lock); 906 spin_lock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index fac0ca93f06b..a417d5ab5dd7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -288,6 +288,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
288 .srcaddr = args->saddress, 288 .srcaddr = args->saddress,
289 .dstaddr = args->address, 289 .dstaddr = args->address,
290 .addrlen = args->addrsize, 290 .addrlen = args->addrsize,
291 .bc_xprt = args->bc_xprt,
291 }; 292 };
292 char servername[48]; 293 char servername[48];
293 294
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 8f459abe97cf..cef74ba0666c 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -21,6 +21,8 @@
21 21
22#include <linux/sunrpc/clnt.h> 22#include <linux/sunrpc/clnt.h>
23 23
24#include "sunrpc.h"
25
24#ifdef RPC_DEBUG 26#ifdef RPC_DEBUG
25#define RPCDBG_FACILITY RPCDBG_SCHED 27#define RPCDBG_FACILITY RPCDBG_SCHED
26#define RPC_TASK_MAGIC_ID 0xf00baa 28#define RPC_TASK_MAGIC_ID 0xf00baa
@@ -711,11 +713,6 @@ static void rpc_async_schedule(struct work_struct *work)
711 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 713 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
712} 714}
713 715
714struct rpc_buffer {
715 size_t len;
716 char data[];
717};
718
719/** 716/**
720 * rpc_malloc - allocate an RPC buffer 717 * rpc_malloc - allocate an RPC buffer
721 * @task: RPC task that will use this buffer 718 * @task: RPC task that will use this buffer
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index 5d9dd742264b..90c292e2738b 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -27,11 +27,25 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27#ifndef _NET_SUNRPC_SUNRPC_H 27#ifndef _NET_SUNRPC_SUNRPC_H
28#define _NET_SUNRPC_SUNRPC_H 28#define _NET_SUNRPC_SUNRPC_H
29 29
30#include <linux/net.h>
31
32/*
33 * Header for dynamically allocated rpc buffers.
34 */
35struct rpc_buffer {
36 size_t len;
37 char data[];
38};
39
30static inline int rpc_reply_expected(struct rpc_task *task) 40static inline int rpc_reply_expected(struct rpc_task *task)
31{ 41{
32 return (task->tk_msg.rpc_proc != NULL) && 42 return (task->tk_msg.rpc_proc != NULL) &&
33 (task->tk_msg.rpc_proc->p_decode != NULL); 43 (task->tk_msg.rpc_proc->p_decode != NULL);
34} 44}
35 45
46int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
47 struct page *headpage, unsigned long headoffset,
48 struct page *tailpage, unsigned long tailoffset);
49
36#endif /* _NET_SUNRPC_SUNRPC_H */ 50#endif /* _NET_SUNRPC_SUNRPC_H */
37 51
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 27d44332f017..df124f78ee48 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -160,6 +160,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
160 mutex_init(&xprt->xpt_mutex); 160 mutex_init(&xprt->xpt_mutex);
161 spin_lock_init(&xprt->xpt_lock); 161 spin_lock_init(&xprt->xpt_lock);
162 set_bit(XPT_BUSY, &xprt->xpt_flags); 162 set_bit(XPT_BUSY, &xprt->xpt_flags);
163 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
163} 164}
164EXPORT_SYMBOL_GPL(svc_xprt_init); 165EXPORT_SYMBOL_GPL(svc_xprt_init);
165 166
@@ -710,10 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
710 spin_unlock_bh(&pool->sp_lock); 711 spin_unlock_bh(&pool->sp_lock);
711 712
712 len = 0; 713 len = 0;
713 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 714 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
714 dprintk("svc_recv: found XPT_CLOSE\n");
715 svc_delete_xprt(xprt);
716 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
717 struct svc_xprt *newxpt; 715 struct svc_xprt *newxpt;
718 newxpt = xprt->xpt_ops->xpo_accept(xprt); 716 newxpt = xprt->xpt_ops->xpo_accept(xprt);
719 if (newxpt) { 717 if (newxpt) {
@@ -739,7 +737,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
739 svc_xprt_received(newxpt); 737 svc_xprt_received(newxpt);
740 } 738 }
741 svc_xprt_received(xprt); 739 svc_xprt_received(xprt);
742 } else { 740 } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
743 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 741 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
744 rqstp, pool->sp_id, xprt, 742 rqstp, pool->sp_id, xprt,
745 atomic_read(&xprt->xpt_ref.refcount)); 743 atomic_read(&xprt->xpt_ref.refcount));
@@ -752,6 +750,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
752 dprintk("svc: got len=%d\n", len); 750 dprintk("svc: got len=%d\n", len);
753 } 751 }
754 752
753 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
754 dprintk("svc_recv: found XPT_CLOSE\n");
755 svc_delete_xprt(xprt);
756 }
757
755 /* No data, incomplete (TCP) read, or accept() */ 758 /* No data, incomplete (TCP) read, or accept() */
756 if (len == 0 || len == -EAGAIN) { 759 if (len == 0 || len == -EAGAIN) {
757 rqstp->rq_res.len = 0; 760 rqstp->rq_res.len = 0;
@@ -808,6 +811,7 @@ int svc_send(struct svc_rqst *rqstp)
808 else 811 else
809 len = xprt->xpt_ops->xpo_sendto(rqstp); 812 len = xprt->xpt_ops->xpo_sendto(rqstp);
810 mutex_unlock(&xprt->xpt_mutex); 813 mutex_unlock(&xprt->xpt_mutex);
814 rpc_wake_up(&xprt->xpt_bc_pending);
811 svc_xprt_release(rqstp); 815 svc_xprt_release(rqstp);
812 816
813 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 817 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
@@ -1166,11 +1170,6 @@ static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1166 1170
1167 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1171 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1168 1172
1169 lock_kernel();
1170 /* bump up the pseudo refcount while traversing */
1171 svc_get(serv);
1172 unlock_kernel();
1173
1174 if (!pidx) 1173 if (!pidx)
1175 return SEQ_START_TOKEN; 1174 return SEQ_START_TOKEN;
1176 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1175 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
@@ -1198,12 +1197,6 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1198 1197
1199static void svc_pool_stats_stop(struct seq_file *m, void *p) 1198static void svc_pool_stats_stop(struct seq_file *m, void *p)
1200{ 1199{
1201 struct svc_serv *serv = m->private;
1202
1203 lock_kernel();
1204 /* this function really, really should have been called svc_put() */
1205 svc_destroy(serv);
1206 unlock_kernel();
1207} 1200}
1208 1201
1209static int svc_pool_stats_show(struct seq_file *m, void *p) 1202static int svc_pool_stats_show(struct seq_file *m, void *p)
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 6caffa34ac01..117f68a8aa40 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -668,6 +668,7 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
668 case 0: 668 case 0:
669 *gip = ug->gi; 669 *gip = ug->gi;
670 get_group_info(*gip); 670 get_group_info(*gip);
671 cache_put(&ug->h, &unix_gid_cache);
671 return 0; 672 return 0;
672 default: 673 default:
673 return -EAGAIN; 674 return -EAGAIN;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 23128ee191ae..ccc5e83cae5d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -49,6 +49,7 @@
49#include <linux/sunrpc/msg_prot.h> 49#include <linux/sunrpc/msg_prot.h>
50#include <linux/sunrpc/svcsock.h> 50#include <linux/sunrpc/svcsock.h>
51#include <linux/sunrpc/stats.h> 51#include <linux/sunrpc/stats.h>
52#include <linux/sunrpc/xprt.h>
52 53
53#define RPCDBG_FACILITY RPCDBG_SVCXPRT 54#define RPCDBG_FACILITY RPCDBG_SVCXPRT
54 55
@@ -153,49 +154,27 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
153} 154}
154 155
155/* 156/*
156 * Generic sendto routine 157 * send routine intended to be shared by the fore- and back-channel
157 */ 158 */
158static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 159int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
160 struct page *headpage, unsigned long headoffset,
161 struct page *tailpage, unsigned long tailoffset)
159{ 162{
160 struct svc_sock *svsk =
161 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
162 struct socket *sock = svsk->sk_sock;
163 int slen;
164 union {
165 struct cmsghdr hdr;
166 long all[SVC_PKTINFO_SPACE / sizeof(long)];
167 } buffer;
168 struct cmsghdr *cmh = &buffer.hdr;
169 int len = 0;
170 int result; 163 int result;
171 int size; 164 int size;
172 struct page **ppage = xdr->pages; 165 struct page **ppage = xdr->pages;
173 size_t base = xdr->page_base; 166 size_t base = xdr->page_base;
174 unsigned int pglen = xdr->page_len; 167 unsigned int pglen = xdr->page_len;
175 unsigned int flags = MSG_MORE; 168 unsigned int flags = MSG_MORE;
176 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); 169 int slen;
170 int len = 0;
177 171
178 slen = xdr->len; 172 slen = xdr->len;
179 173
180 if (rqstp->rq_prot == IPPROTO_UDP) {
181 struct msghdr msg = {
182 .msg_name = &rqstp->rq_addr,
183 .msg_namelen = rqstp->rq_addrlen,
184 .msg_control = cmh,
185 .msg_controllen = sizeof(buffer),
186 .msg_flags = MSG_MORE,
187 };
188
189 svc_set_cmsg_data(rqstp, cmh);
190
191 if (sock_sendmsg(sock, &msg, 0) < 0)
192 goto out;
193 }
194
195 /* send head */ 174 /* send head */
196 if (slen == xdr->head[0].iov_len) 175 if (slen == xdr->head[0].iov_len)
197 flags = 0; 176 flags = 0;
198 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, 177 len = kernel_sendpage(sock, headpage, headoffset,
199 xdr->head[0].iov_len, flags); 178 xdr->head[0].iov_len, flags);
200 if (len != xdr->head[0].iov_len) 179 if (len != xdr->head[0].iov_len)
201 goto out; 180 goto out;
@@ -219,16 +198,58 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
219 base = 0; 198 base = 0;
220 ppage++; 199 ppage++;
221 } 200 }
201
222 /* send tail */ 202 /* send tail */
223 if (xdr->tail[0].iov_len) { 203 if (xdr->tail[0].iov_len) {
224 result = kernel_sendpage(sock, rqstp->rq_respages[0], 204 result = kernel_sendpage(sock, tailpage, tailoffset,
225 ((unsigned long)xdr->tail[0].iov_base) 205 xdr->tail[0].iov_len, 0);
226 & (PAGE_SIZE-1),
227 xdr->tail[0].iov_len, 0);
228
229 if (result > 0) 206 if (result > 0)
230 len += result; 207 len += result;
231 } 208 }
209
210out:
211 return len;
212}
213
214
215/*
216 * Generic sendto routine
217 */
218static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
219{
220 struct svc_sock *svsk =
221 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
222 struct socket *sock = svsk->sk_sock;
223 union {
224 struct cmsghdr hdr;
225 long all[SVC_PKTINFO_SPACE / sizeof(long)];
226 } buffer;
227 struct cmsghdr *cmh = &buffer.hdr;
228 int len = 0;
229 unsigned long tailoff;
230 unsigned long headoff;
231 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
232
233 if (rqstp->rq_prot == IPPROTO_UDP) {
234 struct msghdr msg = {
235 .msg_name = &rqstp->rq_addr,
236 .msg_namelen = rqstp->rq_addrlen,
237 .msg_control = cmh,
238 .msg_controllen = sizeof(buffer),
239 .msg_flags = MSG_MORE,
240 };
241
242 svc_set_cmsg_data(rqstp, cmh);
243
244 if (sock_sendmsg(sock, &msg, 0) < 0)
245 goto out;
246 }
247
248 tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1);
249 headoff = 0;
250 len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff,
251 rqstp->rq_respages[0], tailoff);
252
232out: 253out:
233 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", 254 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
234 svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, 255 svsk, xdr->head[0].iov_base, xdr->head[0].iov_len,
@@ -432,29 +453,49 @@ static void svc_tcp_write_space(struct sock *sk)
432} 453}
433 454
434/* 455/*
456 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
457 */
458static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
459 struct cmsghdr *cmh)
460{
461 struct in_pktinfo *pki = CMSG_DATA(cmh);
462 if (cmh->cmsg_type != IP_PKTINFO)
463 return 0;
464 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
465 return 1;
466}
467
468/*
469 * See net/ipv6/datagram.c : datagram_recv_ctl
470 */
471static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
472 struct cmsghdr *cmh)
473{
474 struct in6_pktinfo *pki = CMSG_DATA(cmh);
475 if (cmh->cmsg_type != IPV6_PKTINFO)
476 return 0;
477 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
478 return 1;
479}
480
481/*
435 * Copy the UDP datagram's destination address to the rqstp structure. 482 * Copy the UDP datagram's destination address to the rqstp structure.
436 * The 'destination' address in this case is the address to which the 483 * The 'destination' address in this case is the address to which the
437 * peer sent the datagram, i.e. our local address. For multihomed 484 * peer sent the datagram, i.e. our local address. For multihomed
438 * hosts, this can change from msg to msg. Note that only the IP 485 * hosts, this can change from msg to msg. Note that only the IP
439 * address changes, the port number should remain the same. 486 * address changes, the port number should remain the same.
440 */ 487 */
441static void svc_udp_get_dest_address(struct svc_rqst *rqstp, 488static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
442 struct cmsghdr *cmh) 489 struct cmsghdr *cmh)
443{ 490{
444 struct svc_sock *svsk = 491 switch (cmh->cmsg_level) {
445 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); 492 case SOL_IP:
446 switch (svsk->sk_sk->sk_family) { 493 return svc_udp_get_dest_address4(rqstp, cmh);
447 case AF_INET: { 494 case SOL_IPV6:
448 struct in_pktinfo *pki = CMSG_DATA(cmh); 495 return svc_udp_get_dest_address6(rqstp, cmh);
449 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
450 break;
451 }
452 case AF_INET6: {
453 struct in6_pktinfo *pki = CMSG_DATA(cmh);
454 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
455 break;
456 }
457 } 496 }
497
498 return 0;
458} 499}
459 500
460/* 501/*
@@ -531,16 +572,15 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
531 572
532 rqstp->rq_prot = IPPROTO_UDP; 573 rqstp->rq_prot = IPPROTO_UDP;
533 574
534 if (cmh->cmsg_level != IPPROTO_IP || 575 if (!svc_udp_get_dest_address(rqstp, cmh)) {
535 cmh->cmsg_type != IP_PKTINFO) {
536 if (net_ratelimit()) 576 if (net_ratelimit())
537 printk("rpcsvc: received unknown control message:" 577 printk(KERN_WARNING
538 "%d/%d\n", 578 "svc: received unknown control message %d/%d; "
539 cmh->cmsg_level, cmh->cmsg_type); 579 "dropping RPC reply datagram\n",
580 cmh->cmsg_level, cmh->cmsg_type);
540 skb_free_datagram(svsk->sk_sk, skb); 581 skb_free_datagram(svsk->sk_sk, skb);
541 return 0; 582 return 0;
542 } 583 }
543 svc_udp_get_dest_address(rqstp, cmh);
544 584
545 if (skb_is_nonlinear(skb)) { 585 if (skb_is_nonlinear(skb)) {
546 /* we have to copy */ 586 /* we have to copy */
@@ -651,8 +691,7 @@ static struct svc_xprt_class svc_udp_class = {
651 691
652static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) 692static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
653{ 693{
654 int one = 1; 694 int err, level, optname, one = 1;
655 mm_segment_t oldfs;
656 695
657 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); 696 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv);
658 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); 697 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
@@ -671,12 +710,22 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
671 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 710 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
672 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 711 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
673 712
674 oldfs = get_fs();
675 set_fs(KERNEL_DS);
676 /* make sure we get destination address info */ 713 /* make sure we get destination address info */
677 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, 714 switch (svsk->sk_sk->sk_family) {
678 (char __user *)&one, sizeof(one)); 715 case AF_INET:
679 set_fs(oldfs); 716 level = SOL_IP;
717 optname = IP_PKTINFO;
718 break;
719 case AF_INET6:
720 level = SOL_IPV6;
721 optname = IPV6_RECVPKTINFO;
722 break;
723 default:
724 BUG();
725 }
726 err = kernel_setsockopt(svsk->sk_sock, level, optname,
727 (char *)&one, sizeof(one));
728 dprintk("svc: kernel_setsockopt returned %d\n", err);
680} 729}
681 730
682/* 731/*
@@ -826,21 +875,15 @@ failed:
826} 875}
827 876
828/* 877/*
829 * Receive data from a TCP socket. 878 * Receive data.
879 * If we haven't gotten the record length yet, get the next four bytes.
880 * Otherwise try to gobble up as much as possible up to the complete
881 * record length.
830 */ 882 */
831static int svc_tcp_recvfrom(struct svc_rqst *rqstp) 883static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
832{ 884{
833 struct svc_sock *svsk =
834 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
835 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 885 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
836 int len; 886 int len;
837 struct kvec *vec;
838 int pnum, vlen;
839
840 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
841 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
842 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
843 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
844 887
845 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) 888 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
846 /* sndbuf needs to have room for one request 889 /* sndbuf needs to have room for one request
@@ -861,10 +904,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
861 904
862 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 905 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
863 906
864 /* Receive data. If we haven't got the record length yet, get
865 * the next four bytes. Otherwise try to gobble up as much as
866 * possible up to the complete record length.
867 */
868 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { 907 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
869 int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; 908 int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
870 struct kvec iov; 909 struct kvec iov;
@@ -879,7 +918,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
879 dprintk("svc: short recvfrom while reading record " 918 dprintk("svc: short recvfrom while reading record "
880 "length (%d of %d)\n", len, want); 919 "length (%d of %d)\n", len, want);
881 svc_xprt_received(&svsk->sk_xprt); 920 svc_xprt_received(&svsk->sk_xprt);
882 return -EAGAIN; /* record header not complete */ 921 goto err_again; /* record header not complete */
883 } 922 }
884 923
885 svsk->sk_reclen = ntohl(svsk->sk_reclen); 924 svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -894,6 +933,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
894 "per record not supported\n"); 933 "per record not supported\n");
895 goto err_delete; 934 goto err_delete;
896 } 935 }
936
897 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; 937 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
898 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 938 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
899 if (svsk->sk_reclen > serv->sv_max_mesg) { 939 if (svsk->sk_reclen > serv->sv_max_mesg) {
@@ -914,17 +954,121 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
914 dprintk("svc: incomplete TCP record (%d of %d)\n", 954 dprintk("svc: incomplete TCP record (%d of %d)\n",
915 len, svsk->sk_reclen); 955 len, svsk->sk_reclen);
916 svc_xprt_received(&svsk->sk_xprt); 956 svc_xprt_received(&svsk->sk_xprt);
917 return -EAGAIN; /* record not complete */ 957 goto err_again; /* record not complete */
918 } 958 }
919 len = svsk->sk_reclen; 959 len = svsk->sk_reclen;
920 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 960 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
921 961
962 return len;
963 error:
964 if (len == -EAGAIN) {
965 dprintk("RPC: TCP recv_record got EAGAIN\n");
966 svc_xprt_received(&svsk->sk_xprt);
967 }
968 return len;
969 err_delete:
970 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
971 err_again:
972 return -EAGAIN;
973}
974
975static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
976 struct rpc_rqst **reqpp, struct kvec *vec)
977{
978 struct rpc_rqst *req = NULL;
979 u32 *p;
980 u32 xid;
981 u32 calldir;
982 int len;
983
984 len = svc_recvfrom(rqstp, vec, 1, 8);
985 if (len < 0)
986 goto error;
987
988 p = (u32 *)rqstp->rq_arg.head[0].iov_base;
989 xid = *p++;
990 calldir = *p;
991
992 if (calldir == 0) {
993 /* REQUEST is the most common case */
994 vec[0] = rqstp->rq_arg.head[0];
995 } else {
996 /* REPLY */
997 if (svsk->sk_bc_xprt)
998 req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
999
1000 if (!req) {
1001 printk(KERN_NOTICE
1002 "%s: Got unrecognized reply: "
1003 "calldir 0x%x sk_bc_xprt %p xid %08x\n",
1004 __func__, ntohl(calldir),
1005 svsk->sk_bc_xprt, xid);
1006 vec[0] = rqstp->rq_arg.head[0];
1007 goto out;
1008 }
1009
1010 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1011 sizeof(struct xdr_buf));
1012 /* copy the xid and call direction */
1013 memcpy(req->rq_private_buf.head[0].iov_base,
1014 rqstp->rq_arg.head[0].iov_base, 8);
1015 vec[0] = req->rq_private_buf.head[0];
1016 }
1017 out:
1018 vec[0].iov_base += 8;
1019 vec[0].iov_len -= 8;
1020 len = svsk->sk_reclen - 8;
1021 error:
1022 *reqpp = req;
1023 return len;
1024}
1025
1026/*
1027 * Receive data from a TCP socket.
1028 */
1029static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1030{
1031 struct svc_sock *svsk =
1032 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
1033 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1034 int len;
1035 struct kvec *vec;
1036 int pnum, vlen;
1037 struct rpc_rqst *req = NULL;
1038
1039 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1040 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
1041 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
1042 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
1043
1044 len = svc_tcp_recv_record(svsk, rqstp);
1045 if (len < 0)
1046 goto error;
1047
922 vec = rqstp->rq_vec; 1048 vec = rqstp->rq_vec;
923 vec[0] = rqstp->rq_arg.head[0]; 1049 vec[0] = rqstp->rq_arg.head[0];
924 vlen = PAGE_SIZE; 1050 vlen = PAGE_SIZE;
1051
1052 /*
1053 * We have enough data for the whole tcp record. Let's try and read the
1054 * first 8 bytes to get the xid and the call direction. We can use this
1055 * to figure out if this is a call or a reply to a callback. If
1056 * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
1057 * In that case, don't bother with the calldir and just read the data.
1058 * It will be rejected in svc_process.
1059 */
1060 if (len >= 8) {
1061 len = svc_process_calldir(svsk, rqstp, &req, vec);
1062 if (len < 0)
1063 goto err_again;
1064 vlen -= 8;
1065 }
1066
925 pnum = 1; 1067 pnum = 1;
926 while (vlen < len) { 1068 while (vlen < len) {
927 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); 1069 vec[pnum].iov_base = (req) ?
1070 page_address(req->rq_private_buf.pages[pnum - 1]) :
1071 page_address(rqstp->rq_pages[pnum]);
928 vec[pnum].iov_len = PAGE_SIZE; 1072 vec[pnum].iov_len = PAGE_SIZE;
929 pnum++; 1073 pnum++;
930 vlen += PAGE_SIZE; 1074 vlen += PAGE_SIZE;
@@ -934,8 +1078,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
934 /* Now receive data */ 1078 /* Now receive data */
935 len = svc_recvfrom(rqstp, vec, pnum, len); 1079 len = svc_recvfrom(rqstp, vec, pnum, len);
936 if (len < 0) 1080 if (len < 0)
937 goto error; 1081 goto err_again;
938 1082
1083 /*
1084 * Account for the 8 bytes we read earlier
1085 */
1086 len += 8;
1087
1088 if (req) {
1089 xprt_complete_rqst(req->rq_task, len);
1090 len = 0;
1091 goto out;
1092 }
939 dprintk("svc: TCP complete record (%d bytes)\n", len); 1093 dprintk("svc: TCP complete record (%d bytes)\n", len);
940 rqstp->rq_arg.len = len; 1094 rqstp->rq_arg.len = len;
941 rqstp->rq_arg.page_base = 0; 1095 rqstp->rq_arg.page_base = 0;
@@ -949,6 +1103,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
949 rqstp->rq_xprt_ctxt = NULL; 1103 rqstp->rq_xprt_ctxt = NULL;
950 rqstp->rq_prot = IPPROTO_TCP; 1104 rqstp->rq_prot = IPPROTO_TCP;
951 1105
1106out:
952 /* Reset TCP read info */ 1107 /* Reset TCP read info */
953 svsk->sk_reclen = 0; 1108 svsk->sk_reclen = 0;
954 svsk->sk_tcplen = 0; 1109 svsk->sk_tcplen = 0;
@@ -960,21 +1115,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
960 1115
961 return len; 1116 return len;
962 1117
963 err_delete: 1118err_again:
964 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
965 return -EAGAIN;
966
967 error:
968 if (len == -EAGAIN) { 1119 if (len == -EAGAIN) {
969 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1120 dprintk("RPC: TCP recvfrom got EAGAIN\n");
970 svc_xprt_received(&svsk->sk_xprt); 1121 svc_xprt_received(&svsk->sk_xprt);
971 } else { 1122 return len;
1123 }
1124error:
1125 if (len != -EAGAIN) {
972 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1126 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
973 svsk->sk_xprt.xpt_server->sv_name, -len); 1127 svsk->sk_xprt.xpt_server->sv_name, -len);
974 goto err_delete; 1128 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
975 } 1129 }
976 1130 return -EAGAIN;
977 return len;
978} 1131}
979 1132
980/* 1133/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f412a852bc73..fd46d42afa89 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -832,6 +832,11 @@ static void xprt_timer(struct rpc_task *task)
832 spin_unlock_bh(&xprt->transport_lock); 832 spin_unlock_bh(&xprt->transport_lock);
833} 833}
834 834
835static inline int xprt_has_timer(struct rpc_xprt *xprt)
836{
837 return xprt->idle_timeout != 0;
838}
839
835/** 840/**
836 * xprt_prepare_transmit - reserve the transport before sending a request 841 * xprt_prepare_transmit - reserve the transport before sending a request
837 * @task: RPC task about to send a request 842 * @task: RPC task about to send a request
@@ -1013,7 +1018,7 @@ void xprt_release(struct rpc_task *task)
1013 if (!list_empty(&req->rq_list)) 1018 if (!list_empty(&req->rq_list))
1014 list_del(&req->rq_list); 1019 list_del(&req->rq_list);
1015 xprt->last_used = jiffies; 1020 xprt->last_used = jiffies;
1016 if (list_empty(&xprt->recv)) 1021 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1017 mod_timer(&xprt->timer, 1022 mod_timer(&xprt->timer,
1018 xprt->last_used + xprt->idle_timeout); 1023 xprt->last_used + xprt->idle_timeout);
1019 spin_unlock_bh(&xprt->transport_lock); 1024 spin_unlock_bh(&xprt->transport_lock);
@@ -1082,8 +1087,11 @@ found:
1082#endif /* CONFIG_NFS_V4_1 */ 1087#endif /* CONFIG_NFS_V4_1 */
1083 1088
1084 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1089 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1085 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1090 if (xprt_has_timer(xprt))
1086 (unsigned long)xprt); 1091 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1092 (unsigned long)xprt);
1093 else
1094 init_timer(&xprt->timer);
1087 xprt->last_used = jiffies; 1095 xprt->last_used = jiffies;
1088 xprt->cwnd = RPC_INITCWND; 1096 xprt->cwnd = RPC_INITCWND;
1089 xprt->bind_index = 0; 1097 xprt->bind_index = 0;
@@ -1102,7 +1110,6 @@ found:
1102 1110
1103 dprintk("RPC: created transport %p with %u slots\n", xprt, 1111 dprintk("RPC: created transport %p with %u slots\n", xprt,
1104 xprt->max_reqs); 1112 xprt->max_reqs);
1105
1106 return xprt; 1113 return xprt;
1107} 1114}
1108 1115
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 5151f9f6c573..0cf5e8c27a10 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -730,12 +730,12 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
730 goto err; 730 goto err;
731 731
732 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); 732 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
733 if (!mr) 733 if (IS_ERR(mr))
734 goto err_free_frmr; 734 goto err_free_frmr;
735 735
736 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, 736 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
737 RPCSVC_MAXPAGES); 737 RPCSVC_MAXPAGES);
738 if (!pl) 738 if (IS_ERR(pl))
739 goto err_free_mr; 739 goto err_free_mr;
740 740
741 frmr->mr = mr; 741 frmr->mr = mr;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 62438f3a914d..bee415465754 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -32,6 +32,7 @@
32#include <linux/tcp.h> 32#include <linux/tcp.h>
33#include <linux/sunrpc/clnt.h> 33#include <linux/sunrpc/clnt.h>
34#include <linux/sunrpc/sched.h> 34#include <linux/sunrpc/sched.h>
35#include <linux/sunrpc/svcsock.h>
35#include <linux/sunrpc/xprtsock.h> 36#include <linux/sunrpc/xprtsock.h>
36#include <linux/file.h> 37#include <linux/file.h>
37#ifdef CONFIG_NFS_V4_1 38#ifdef CONFIG_NFS_V4_1
@@ -43,6 +44,7 @@
43#include <net/udp.h> 44#include <net/udp.h>
44#include <net/tcp.h> 45#include <net/tcp.h>
45 46
47#include "sunrpc.h"
46/* 48/*
47 * xprtsock tunables 49 * xprtsock tunables
48 */ 50 */
@@ -2098,6 +2100,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2098 xprt->stat.bklog_u); 2100 xprt->stat.bklog_u);
2099} 2101}
2100 2102
2103/*
2104 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2105 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2106 * to use the server side send routines.
2107 */
2108void *bc_malloc(struct rpc_task *task, size_t size)
2109{
2110 struct page *page;
2111 struct rpc_buffer *buf;
2112
2113 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2114 page = alloc_page(GFP_KERNEL);
2115
2116 if (!page)
2117 return NULL;
2118
2119 buf = page_address(page);
2120 buf->len = PAGE_SIZE;
2121
2122 return buf->data;
2123}
2124
2125/*
2126 * Free the space allocated in the bc_alloc routine
2127 */
2128void bc_free(void *buffer)
2129{
2130 struct rpc_buffer *buf;
2131
2132 if (!buffer)
2133 return;
2134
2135 buf = container_of(buffer, struct rpc_buffer, data);
2136 free_page((unsigned long)buf);
2137}
2138
2139/*
2140 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2141 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2142 */
2143static int bc_sendto(struct rpc_rqst *req)
2144{
2145 int len;
2146 struct xdr_buf *xbufp = &req->rq_snd_buf;
2147 struct rpc_xprt *xprt = req->rq_xprt;
2148 struct sock_xprt *transport =
2149 container_of(xprt, struct sock_xprt, xprt);
2150 struct socket *sock = transport->sock;
2151 unsigned long headoff;
2152 unsigned long tailoff;
2153
2154 /*
2155 * Set up the rpc header and record marker stuff
2156 */
2157 xs_encode_tcp_record_marker(xbufp);
2158
2159 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2160 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2161 len = svc_send_common(sock, xbufp,
2162 virt_to_page(xbufp->head[0].iov_base), headoff,
2163 xbufp->tail[0].iov_base, tailoff);
2164
2165 if (len != xbufp->len) {
2166 printk(KERN_NOTICE "Error sending entire callback!\n");
2167 len = -EAGAIN;
2168 }
2169
2170 return len;
2171}
2172
2173/*
2174 * The send routine. Borrows from svc_send
2175 */
2176static int bc_send_request(struct rpc_task *task)
2177{
2178 struct rpc_rqst *req = task->tk_rqstp;
2179 struct svc_xprt *xprt;
2180 struct svc_sock *svsk;
2181 u32 len;
2182
2183 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2184 /*
2185 * Get the server socket associated with this callback xprt
2186 */
2187 xprt = req->rq_xprt->bc_xprt;
2188 svsk = container_of(xprt, struct svc_sock, sk_xprt);
2189
2190 /*
2191 * Grab the mutex to serialize data as the connection is shared
2192 * with the fore channel
2193 */
2194 if (!mutex_trylock(&xprt->xpt_mutex)) {
2195 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2196 if (!mutex_trylock(&xprt->xpt_mutex))
2197 return -EAGAIN;
2198 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2199 }
2200 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2201 len = -ENOTCONN;
2202 else
2203 len = bc_sendto(req);
2204 mutex_unlock(&xprt->xpt_mutex);
2205
2206 if (len > 0)
2207 len = 0;
2208
2209 return len;
2210}
2211
2212/*
2213 * The close routine. Since this is client initiated, we do nothing
2214 */
2215
2216static void bc_close(struct rpc_xprt *xprt)
2217{
2218 return;
2219}
2220
2221/*
2222 * The xprt destroy routine. Again, because this connection is client
2223 * initiated, we do nothing
2224 */
2225
2226static void bc_destroy(struct rpc_xprt *xprt)
2227{
2228 return;
2229}
2230
2101static struct rpc_xprt_ops xs_udp_ops = { 2231static struct rpc_xprt_ops xs_udp_ops = {
2102 .set_buffer_size = xs_udp_set_buffer_size, 2232 .set_buffer_size = xs_udp_set_buffer_size,
2103 .reserve_xprt = xprt_reserve_xprt_cong, 2233 .reserve_xprt = xprt_reserve_xprt_cong,
@@ -2134,6 +2264,22 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2134 .print_stats = xs_tcp_print_stats, 2264 .print_stats = xs_tcp_print_stats,
2135}; 2265};
2136 2266
2267/*
2268 * The rpc_xprt_ops for the server backchannel
2269 */
2270
2271static struct rpc_xprt_ops bc_tcp_ops = {
2272 .reserve_xprt = xprt_reserve_xprt,
2273 .release_xprt = xprt_release_xprt,
2274 .buf_alloc = bc_malloc,
2275 .buf_free = bc_free,
2276 .send_request = bc_send_request,
2277 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2278 .close = bc_close,
2279 .destroy = bc_destroy,
2280 .print_stats = xs_tcp_print_stats,
2281};
2282
2137static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2283static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2138 unsigned int slot_table_size) 2284 unsigned int slot_table_size)
2139{ 2285{
@@ -2322,11 +2468,93 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2322 return ERR_PTR(-EINVAL); 2468 return ERR_PTR(-EINVAL);
2323} 2469}
2324 2470
2471/**
2472 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2473 * @args: rpc transport creation arguments
2474 *
2475 */
2476static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2477{
2478 struct sockaddr *addr = args->dstaddr;
2479 struct rpc_xprt *xprt;
2480 struct sock_xprt *transport;
2481 struct svc_sock *bc_sock;
2482
2483 if (!args->bc_xprt)
2484 ERR_PTR(-EINVAL);
2485
2486 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2487 if (IS_ERR(xprt))
2488 return xprt;
2489 transport = container_of(xprt, struct sock_xprt, xprt);
2490
2491 xprt->prot = IPPROTO_TCP;
2492 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2493 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2494 xprt->timeout = &xs_tcp_default_timeout;
2495
2496 /* backchannel */
2497 xprt_set_bound(xprt);
2498 xprt->bind_timeout = 0;
2499 xprt->connect_timeout = 0;
2500 xprt->reestablish_timeout = 0;
2501 xprt->idle_timeout = 0;
2502
2503 /*
2504 * The backchannel uses the same socket connection as the
2505 * forechannel
2506 */
2507 xprt->bc_xprt = args->bc_xprt;
2508 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2509 bc_sock->sk_bc_xprt = xprt;
2510 transport->sock = bc_sock->sk_sock;
2511 transport->inet = bc_sock->sk_sk;
2512
2513 xprt->ops = &bc_tcp_ops;
2514
2515 switch (addr->sa_family) {
2516 case AF_INET:
2517 xs_format_peer_addresses(xprt, "tcp",
2518 RPCBIND_NETID_TCP);
2519 break;
2520 case AF_INET6:
2521 xs_format_peer_addresses(xprt, "tcp",
2522 RPCBIND_NETID_TCP6);
2523 break;
2524 default:
2525 kfree(xprt);
2526 return ERR_PTR(-EAFNOSUPPORT);
2527 }
2528
2529 if (xprt_bound(xprt))
2530 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2531 xprt->address_strings[RPC_DISPLAY_ADDR],
2532 xprt->address_strings[RPC_DISPLAY_PORT],
2533 xprt->address_strings[RPC_DISPLAY_PROTO]);
2534 else
2535 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2536 xprt->address_strings[RPC_DISPLAY_ADDR],
2537 xprt->address_strings[RPC_DISPLAY_PROTO]);
2538
2539 /*
2540 * Since we don't want connections for the backchannel, we set
2541 * the xprt status to connected
2542 */
2543 xprt_set_connected(xprt);
2544
2545
2546 if (try_module_get(THIS_MODULE))
2547 return xprt;
2548 kfree(xprt->slot);
2549 kfree(xprt);
2550 return ERR_PTR(-EINVAL);
2551}
2552
2325static struct xprt_class xs_udp_transport = { 2553static struct xprt_class xs_udp_transport = {
2326 .list = LIST_HEAD_INIT(xs_udp_transport.list), 2554 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2327 .name = "udp", 2555 .name = "udp",
2328 .owner = THIS_MODULE, 2556 .owner = THIS_MODULE,
2329 .ident = IPPROTO_UDP, 2557 .ident = XPRT_TRANSPORT_UDP,
2330 .setup = xs_setup_udp, 2558 .setup = xs_setup_udp,
2331}; 2559};
2332 2560
@@ -2334,10 +2562,18 @@ static struct xprt_class xs_tcp_transport = {
2334 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 2562 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
2335 .name = "tcp", 2563 .name = "tcp",
2336 .owner = THIS_MODULE, 2564 .owner = THIS_MODULE,
2337 .ident = IPPROTO_TCP, 2565 .ident = XPRT_TRANSPORT_TCP,
2338 .setup = xs_setup_tcp, 2566 .setup = xs_setup_tcp,
2339}; 2567};
2340 2568
2569static struct xprt_class xs_bc_tcp_transport = {
2570 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2571 .name = "tcp NFSv4.1 backchannel",
2572 .owner = THIS_MODULE,
2573 .ident = XPRT_TRANSPORT_BC_TCP,
2574 .setup = xs_setup_bc_tcp,
2575};
2576
2341/** 2577/**
2342 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 2578 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2343 * 2579 *
@@ -2351,6 +2587,7 @@ int init_socket_xprt(void)
2351 2587
2352 xprt_register_transport(&xs_udp_transport); 2588 xprt_register_transport(&xs_udp_transport);
2353 xprt_register_transport(&xs_tcp_transport); 2589 xprt_register_transport(&xs_tcp_transport);
2590 xprt_register_transport(&xs_bc_tcp_transport);
2354 2591
2355 return 0; 2592 return 0;
2356} 2593}
@@ -2370,6 +2607,7 @@ void cleanup_socket_xprt(void)
2370 2607
2371 xprt_unregister_transport(&xs_udp_transport); 2608 xprt_unregister_transport(&xs_udp_transport);
2372 xprt_unregister_transport(&xs_tcp_transport); 2609 xprt_unregister_transport(&xs_tcp_transport);
2610 xprt_unregister_transport(&xs_bc_tcp_transport);
2373} 2611}
2374 2612
2375static int param_set_uint_minmax(const char *val, struct kernel_param *kp, 2613static int param_set_uint_minmax(const char *val, struct kernel_param *kp,