aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r--net/sunrpc/cache.c345
1 files changed, 237 insertions, 108 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b06410e584e..72ad836e4fe0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -28,21 +28,21 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/pagemap.h> 30#include <linux/pagemap.h>
31#include <linux/smp_lock.h>
32#include <asm/ioctls.h> 31#include <asm/ioctls.h>
33#include <linux/sunrpc/types.h> 32#include <linux/sunrpc/types.h>
34#include <linux/sunrpc/cache.h> 33#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h> 34#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h> 35#include <linux/sunrpc/rpc_pipe_fs.h>
36#include "netns.h"
37 37
38#define RPCDBG_FACILITY RPCDBG_CACHE 38#define RPCDBG_FACILITY RPCDBG_CACHE
39 39
40static int cache_defer_req(struct cache_req *req, struct cache_head *item); 40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41static void cache_revisit_request(struct cache_head *item); 41static void cache_revisit_request(struct cache_head *item);
42 42
43static void cache_init(struct cache_head *h) 43static void cache_init(struct cache_head *h)
44{ 44{
45 time_t now = get_seconds(); 45 time_t now = seconds_since_boot();
46 h->next = NULL; 46 h->next = NULL;
47 h->flags = 0; 47 h->flags = 0;
48 kref_init(&h->ref); 48 kref_init(&h->ref);
@@ -52,7 +52,7 @@ static void cache_init(struct cache_head *h)
52 52
53static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) 53static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
54{ 54{
55 return (h->expiry_time < get_seconds()) || 55 return (h->expiry_time < seconds_since_boot()) ||
56 (detail->flush_time > h->last_refresh); 56 (detail->flush_time > h->last_refresh);
57} 57}
58 58
@@ -127,7 +127,8 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
127static void cache_fresh_locked(struct cache_head *head, time_t expiry) 127static void cache_fresh_locked(struct cache_head *head, time_t expiry)
128{ 128{
129 head->expiry_time = expiry; 129 head->expiry_time = expiry;
130 head->last_refresh = get_seconds(); 130 head->last_refresh = seconds_since_boot();
131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
131 set_bit(CACHE_VALID, &head->flags); 132 set_bit(CACHE_VALID, &head->flags);
132} 133}
133 134
@@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head
208 /* entry is valid */ 209 /* entry is valid */
209 if (test_bit(CACHE_NEGATIVE, &h->flags)) 210 if (test_bit(CACHE_NEGATIVE, &h->flags))
210 return -ENOENT; 211 return -ENOENT;
211 else 212 else {
213 /*
214 * In combination with write barrier in
215 * sunrpc_cache_update, ensures that anyone
216 * using the cache entry after this sees the
217 * updated contents:
218 */
219 smp_rmb();
212 return 0; 220 return 0;
221 }
213 } 222 }
214} 223}
215 224
225static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
226{
227 int rv;
228
229 write_lock(&detail->hash_lock);
230 rv = cache_is_valid(detail, h);
231 if (rv != -EAGAIN) {
232 write_unlock(&detail->hash_lock);
233 return rv;
234 }
235 set_bit(CACHE_NEGATIVE, &h->flags);
236 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
237 write_unlock(&detail->hash_lock);
238 cache_fresh_unlocked(h, detail);
239 return -ENOENT;
240}
241
216/* 242/*
217 * This is the generic cache management routine for all 243 * This is the generic cache management routine for all
218 * the authentication caches. 244 * the authentication caches.
@@ -238,7 +264,7 @@ int cache_check(struct cache_detail *detail,
238 264
239 /* now see if we want to start an upcall */ 265 /* now see if we want to start an upcall */
240 refresh_age = (h->expiry_time - h->last_refresh); 266 refresh_age = (h->expiry_time - h->last_refresh);
241 age = get_seconds() - h->last_refresh; 267 age = seconds_since_boot() - h->last_refresh;
242 268
243 if (rqstp == NULL) { 269 if (rqstp == NULL) {
244 if (rv == -EAGAIN) 270 if (rv == -EAGAIN)
@@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail,
251 case -EINVAL: 277 case -EINVAL:
252 clear_bit(CACHE_PENDING, &h->flags); 278 clear_bit(CACHE_PENDING, &h->flags);
253 cache_revisit_request(h); 279 cache_revisit_request(h);
254 if (rv == -EAGAIN) { 280 rv = try_to_negate_entry(detail, h);
255 set_bit(CACHE_NEGATIVE, &h->flags);
256 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
257 cache_fresh_unlocked(h, detail);
258 rv = -ENOENT;
259 }
260 break; 281 break;
261
262 case -EAGAIN: 282 case -EAGAIN:
263 clear_bit(CACHE_PENDING, &h->flags); 283 clear_bit(CACHE_PENDING, &h->flags);
264 cache_revisit_request(h); 284 cache_revisit_request(h);
@@ -268,8 +288,11 @@ int cache_check(struct cache_detail *detail,
268 } 288 }
269 289
270 if (rv == -EAGAIN) { 290 if (rv == -EAGAIN) {
271 if (cache_defer_req(rqstp, h) < 0) { 291 if (!cache_defer_req(rqstp, h)) {
272 /* Request is not deferred */ 292 /*
293 * Request was not deferred; handle it as best
294 * we can ourselves:
295 */
273 rv = cache_is_valid(detail, h); 296 rv = cache_is_valid(detail, h);
274 if (rv == -EAGAIN) 297 if (rv == -EAGAIN)
275 rv = -ETIMEDOUT; 298 rv = -ETIMEDOUT;
@@ -388,11 +411,11 @@ static int cache_clean(void)
388 return -1; 411 return -1;
389 } 412 }
390 current_detail = list_entry(next, struct cache_detail, others); 413 current_detail = list_entry(next, struct cache_detail, others);
391 if (current_detail->nextcheck > get_seconds()) 414 if (current_detail->nextcheck > seconds_since_boot())
392 current_index = current_detail->hash_size; 415 current_index = current_detail->hash_size;
393 else { 416 else {
394 current_index = 0; 417 current_index = 0;
395 current_detail->nextcheck = get_seconds()+30*60; 418 current_detail->nextcheck = seconds_since_boot()+30*60;
396 } 419 }
397 } 420 }
398 421
@@ -477,7 +500,7 @@ EXPORT_SYMBOL_GPL(cache_flush);
477void cache_purge(struct cache_detail *detail) 500void cache_purge(struct cache_detail *detail)
478{ 501{
479 detail->flush_time = LONG_MAX; 502 detail->flush_time = LONG_MAX;
480 detail->nextcheck = get_seconds(); 503 detail->nextcheck = seconds_since_boot();
481 cache_flush(); 504 cache_flush();
482 detail->flush_time = 1; 505 detail->flush_time = 1;
483} 506}
@@ -506,81 +529,157 @@ EXPORT_SYMBOL_GPL(cache_purge);
506 529
507static DEFINE_SPINLOCK(cache_defer_lock); 530static DEFINE_SPINLOCK(cache_defer_lock);
508static LIST_HEAD(cache_defer_list); 531static LIST_HEAD(cache_defer_list);
509static struct list_head cache_defer_hash[DFR_HASHSIZE]; 532static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
510static int cache_defer_cnt; 533static int cache_defer_cnt;
511 534
512static int cache_defer_req(struct cache_req *req, struct cache_head *item) 535static void __unhash_deferred_req(struct cache_deferred_req *dreq)
536{
537 hlist_del_init(&dreq->hash);
538 if (!list_empty(&dreq->recent)) {
539 list_del_init(&dreq->recent);
540 cache_defer_cnt--;
541 }
542}
543
544static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
513{ 545{
514 struct cache_deferred_req *dreq, *discard;
515 int hash = DFR_HASH(item); 546 int hash = DFR_HASH(item);
516 547
517 if (cache_defer_cnt >= DFR_MAX) { 548 INIT_LIST_HEAD(&dreq->recent);
518 /* too much in the cache, randomly drop this one, 549 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
519 * or continue and drop the oldest below 550}
520 */ 551
521 if (net_random()&1) 552static void setup_deferral(struct cache_deferred_req *dreq,
522 return -ENOMEM; 553 struct cache_head *item,
523 } 554 int count_me)
524 dreq = req->defer(req); 555{
525 if (dreq == NULL)
526 return -ENOMEM;
527 556
528 dreq->item = item; 557 dreq->item = item;
529 558
530 spin_lock(&cache_defer_lock); 559 spin_lock(&cache_defer_lock);
531 560
532 list_add(&dreq->recent, &cache_defer_list); 561 __hash_deferred_req(dreq, item);
533 562
534 if (cache_defer_hash[hash].next == NULL) 563 if (count_me) {
535 INIT_LIST_HEAD(&cache_defer_hash[hash]); 564 cache_defer_cnt++;
536 list_add(&dreq->hash, &cache_defer_hash[hash]); 565 list_add(&dreq->recent, &cache_defer_list);
537
538 /* it is in, now maybe clean up */
539 discard = NULL;
540 if (++cache_defer_cnt > DFR_MAX) {
541 discard = list_entry(cache_defer_list.prev,
542 struct cache_deferred_req, recent);
543 list_del_init(&discard->recent);
544 list_del_init(&discard->hash);
545 cache_defer_cnt--;
546 } 566 }
567
547 spin_unlock(&cache_defer_lock); 568 spin_unlock(&cache_defer_lock);
548 569
570}
571
572struct thread_deferred_req {
573 struct cache_deferred_req handle;
574 struct completion completion;
575};
576
577static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
578{
579 struct thread_deferred_req *dr =
580 container_of(dreq, struct thread_deferred_req, handle);
581 complete(&dr->completion);
582}
583
584static void cache_wait_req(struct cache_req *req, struct cache_head *item)
585{
586 struct thread_deferred_req sleeper;
587 struct cache_deferred_req *dreq = &sleeper.handle;
588
589 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
590 dreq->revisit = cache_restart_thread;
591
592 setup_deferral(dreq, item, 0);
593
594 if (!test_bit(CACHE_PENDING, &item->flags) ||
595 wait_for_completion_interruptible_timeout(
596 &sleeper.completion, req->thread_wait) <= 0) {
597 /* The completion wasn't completed, so we need
598 * to clean up
599 */
600 spin_lock(&cache_defer_lock);
601 if (!hlist_unhashed(&sleeper.handle.hash)) {
602 __unhash_deferred_req(&sleeper.handle);
603 spin_unlock(&cache_defer_lock);
604 } else {
605 /* cache_revisit_request already removed
606 * this from the hash table, but hasn't
607 * called ->revisit yet. It will very soon
608 * and we need to wait for it.
609 */
610 spin_unlock(&cache_defer_lock);
611 wait_for_completion(&sleeper.completion);
612 }
613 }
614}
615
616static void cache_limit_defers(void)
617{
618 /* Make sure we haven't exceed the limit of allowed deferred
619 * requests.
620 */
621 struct cache_deferred_req *discard = NULL;
622
623 if (cache_defer_cnt <= DFR_MAX)
624 return;
625
626 spin_lock(&cache_defer_lock);
627
628 /* Consider removing either the first or the last */
629 if (cache_defer_cnt > DFR_MAX) {
630 if (net_random() & 1)
631 discard = list_entry(cache_defer_list.next,
632 struct cache_deferred_req, recent);
633 else
634 discard = list_entry(cache_defer_list.prev,
635 struct cache_deferred_req, recent);
636 __unhash_deferred_req(discard);
637 }
638 spin_unlock(&cache_defer_lock);
549 if (discard) 639 if (discard)
550 /* there was one too many */
551 discard->revisit(discard, 1); 640 discard->revisit(discard, 1);
641}
552 642
553 if (!test_bit(CACHE_PENDING, &item->flags)) { 643/* Return true if and only if a deferred request is queued. */
554 /* must have just been validated... */ 644static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
555 cache_revisit_request(item); 645{
556 return -EAGAIN; 646 struct cache_deferred_req *dreq;
647
648 if (req->thread_wait) {
649 cache_wait_req(req, item);
650 if (!test_bit(CACHE_PENDING, &item->flags))
651 return false;
557 } 652 }
558 return 0; 653 dreq = req->defer(req);
654 if (dreq == NULL)
655 return false;
656 setup_deferral(dreq, item, 1);
657 if (!test_bit(CACHE_PENDING, &item->flags))
658 /* Bit could have been cleared before we managed to
659 * set up the deferral, so need to revisit just in case
660 */
661 cache_revisit_request(item);
662
663 cache_limit_defers();
664 return true;
559} 665}
560 666
561static void cache_revisit_request(struct cache_head *item) 667static void cache_revisit_request(struct cache_head *item)
562{ 668{
563 struct cache_deferred_req *dreq; 669 struct cache_deferred_req *dreq;
564 struct list_head pending; 670 struct list_head pending;
565 671 struct hlist_node *lp, *tmp;
566 struct list_head *lp;
567 int hash = DFR_HASH(item); 672 int hash = DFR_HASH(item);
568 673
569 INIT_LIST_HEAD(&pending); 674 INIT_LIST_HEAD(&pending);
570 spin_lock(&cache_defer_lock); 675 spin_lock(&cache_defer_lock);
571 676
572 lp = cache_defer_hash[hash].next; 677 hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
573 if (lp) { 678 if (dreq->item == item) {
574 while (lp != &cache_defer_hash[hash]) { 679 __unhash_deferred_req(dreq);
575 dreq = list_entry(lp, struct cache_deferred_req, hash); 680 list_add(&dreq->recent, &pending);
576 lp = lp->next;
577 if (dreq->item == item) {
578 list_del_init(&dreq->hash);
579 list_move(&dreq->recent, &pending);
580 cache_defer_cnt--;
581 }
582 } 681 }
583 } 682
584 spin_unlock(&cache_defer_lock); 683 spin_unlock(&cache_defer_lock);
585 684
586 while (!list_empty(&pending)) { 685 while (!list_empty(&pending)) {
@@ -601,9 +700,8 @@ void cache_clean_deferred(void *owner)
601 700
602 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 701 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
603 if (dreq->owner == owner) { 702 if (dreq->owner == owner) {
604 list_del_init(&dreq->hash); 703 __unhash_deferred_req(dreq);
605 list_move(&dreq->recent, &pending); 704 list_add(&dreq->recent, &pending);
606 cache_defer_cnt--;
607 } 705 }
608 } 706 }
609 spin_unlock(&cache_defer_lock); 707 spin_unlock(&cache_defer_lock);
@@ -902,7 +1000,7 @@ static int cache_release(struct inode *inode, struct file *filp,
902 filp->private_data = NULL; 1000 filp->private_data = NULL;
903 kfree(rp); 1001 kfree(rp);
904 1002
905 cd->last_close = get_seconds(); 1003 cd->last_close = seconds_since_boot();
906 atomic_dec(&cd->readers); 1004 atomic_dec(&cd->readers);
907 } 1005 }
908 module_put(cd->owner); 1006 module_put(cd->owner);
@@ -1015,6 +1113,23 @@ static void warn_no_listener(struct cache_detail *detail)
1015 } 1113 }
1016} 1114}
1017 1115
1116static bool cache_listeners_exist(struct cache_detail *detail)
1117{
1118 if (atomic_read(&detail->readers))
1119 return true;
1120 if (detail->last_close == 0)
1121 /* This cache was never opened */
1122 return false;
1123 if (detail->last_close < seconds_since_boot() - 30)
1124 /*
1125 * We allow for the possibility that someone might
1126 * restart a userspace daemon without restarting the
1127 * server; but after 30 seconds, we give up.
1128 */
1129 return false;
1130 return true;
1131}
1132
1018/* 1133/*
1019 * register an upcall request to user-space and queue it up for read() by the 1134 * register an upcall request to user-space and queue it up for read() by the
1020 * upcall daemon. 1135 * upcall daemon.
@@ -1033,10 +1148,9 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1033 char *bp; 1148 char *bp;
1034 int len; 1149 int len;
1035 1150
1036 if (atomic_read(&detail->readers) == 0 && 1151 if (!cache_listeners_exist(detail)) {
1037 detail->last_close < get_seconds() - 30) { 1152 warn_no_listener(detail);
1038 warn_no_listener(detail); 1153 return -EINVAL;
1039 return -EINVAL;
1040 } 1154 }
1041 1155
1042 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 1156 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -1095,13 +1209,19 @@ int qword_get(char **bpp, char *dest, int bufsize)
1095 if (bp[0] == '\\' && bp[1] == 'x') { 1209 if (bp[0] == '\\' && bp[1] == 'x') {
1096 /* HEX STRING */ 1210 /* HEX STRING */
1097 bp += 2; 1211 bp += 2;
1098 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) { 1212 while (len < bufsize) {
1099 int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; 1213 int h, l;
1100 bp++; 1214
1101 byte <<= 4; 1215 h = hex_to_bin(bp[0]);
1102 byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; 1216 if (h < 0)
1103 *dest++ = byte; 1217 break;
1104 bp++; 1218
1219 l = hex_to_bin(bp[1]);
1220 if (l < 0)
1221 break;
1222
1223 *dest++ = (h << 4) | l;
1224 bp += 2;
1105 len++; 1225 len++;
1106 } 1226 }
1107 } else { 1227 } else {
@@ -1219,7 +1339,8 @@ static int c_show(struct seq_file *m, void *p)
1219 1339
1220 ifdebug(CACHE) 1340 ifdebug(CACHE)
1221 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", 1341 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1222 cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); 1342 convert_to_wallclock(cp->expiry_time),
1343 atomic_read(&cp->ref.refcount), cp->flags);
1223 cache_get(cp); 1344 cache_get(cp);
1224 if (cache_check(cd, cp, NULL)) 1345 if (cache_check(cd, cp, NULL))
1225 /* cache_check does a cache_put on failure */ 1346 /* cache_check does a cache_put on failure */
@@ -1285,7 +1406,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
1285 unsigned long p = *ppos; 1406 unsigned long p = *ppos;
1286 size_t len; 1407 size_t len;
1287 1408
1288 sprintf(tbuf, "%lu\n", cd->flush_time); 1409 sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
1289 len = strlen(tbuf); 1410 len = strlen(tbuf);
1290 if (p >= len) 1411 if (p >= len)
1291 return 0; 1412 return 0;
@@ -1303,19 +1424,20 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
1303 struct cache_detail *cd) 1424 struct cache_detail *cd)
1304{ 1425{
1305 char tbuf[20]; 1426 char tbuf[20];
1306 char *ep; 1427 char *bp, *ep;
1307 long flushtime; 1428
1308 if (*ppos || count > sizeof(tbuf)-1) 1429 if (*ppos || count > sizeof(tbuf)-1)
1309 return -EINVAL; 1430 return -EINVAL;
1310 if (copy_from_user(tbuf, buf, count)) 1431 if (copy_from_user(tbuf, buf, count))
1311 return -EFAULT; 1432 return -EFAULT;
1312 tbuf[count] = 0; 1433 tbuf[count] = 0;
1313 flushtime = simple_strtoul(tbuf, &ep, 0); 1434 simple_strtoul(tbuf, &ep, 0);
1314 if (*ep && *ep != '\n') 1435 if (*ep && *ep != '\n')
1315 return -EINVAL; 1436 return -EINVAL;
1316 1437
1317 cd->flush_time = flushtime; 1438 bp = tbuf;
1318 cd->nextcheck = get_seconds(); 1439 cd->flush_time = get_expiry(&bp);
1440 cd->nextcheck = seconds_since_boot();
1319 cache_flush(); 1441 cache_flush();
1320 1442
1321 *ppos += count; 1443 *ppos += count;
@@ -1348,15 +1470,10 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1348static long cache_ioctl_procfs(struct file *filp, 1470static long cache_ioctl_procfs(struct file *filp,
1349 unsigned int cmd, unsigned long arg) 1471 unsigned int cmd, unsigned long arg)
1350{ 1472{
1351 long ret;
1352 struct inode *inode = filp->f_path.dentry->d_inode; 1473 struct inode *inode = filp->f_path.dentry->d_inode;
1353 struct cache_detail *cd = PDE(inode)->data; 1474 struct cache_detail *cd = PDE(inode)->data;
1354 1475
1355 lock_kernel(); 1476 return cache_ioctl(inode, filp, cmd, arg, cd);
1356 ret = cache_ioctl(inode, filp, cmd, arg, cd);
1357 unlock_kernel();
1358
1359 return ret;
1360} 1477}
1361 1478
1362static int cache_open_procfs(struct inode *inode, struct file *filp) 1479static int cache_open_procfs(struct inode *inode, struct file *filp)
@@ -1441,10 +1558,13 @@ static const struct file_operations cache_flush_operations_procfs = {
1441 .read = read_flush_procfs, 1558 .read = read_flush_procfs,
1442 .write = write_flush_procfs, 1559 .write = write_flush_procfs,
1443 .release = release_flush_procfs, 1560 .release = release_flush_procfs,
1561 .llseek = no_llseek,
1444}; 1562};
1445 1563
1446static void remove_cache_proc_entries(struct cache_detail *cd) 1564static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1447{ 1565{
1566 struct sunrpc_net *sn;
1567
1448 if (cd->u.procfs.proc_ent == NULL) 1568 if (cd->u.procfs.proc_ent == NULL)
1449 return; 1569 return;
1450 if (cd->u.procfs.flush_ent) 1570 if (cd->u.procfs.flush_ent)
@@ -1454,15 +1574,18 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
1454 if (cd->u.procfs.content_ent) 1574 if (cd->u.procfs.content_ent)
1455 remove_proc_entry("content", cd->u.procfs.proc_ent); 1575 remove_proc_entry("content", cd->u.procfs.proc_ent);
1456 cd->u.procfs.proc_ent = NULL; 1576 cd->u.procfs.proc_ent = NULL;
1457 remove_proc_entry(cd->name, proc_net_rpc); 1577 sn = net_generic(net, sunrpc_net_id);
1578 remove_proc_entry(cd->name, sn->proc_net_rpc);
1458} 1579}
1459 1580
1460#ifdef CONFIG_PROC_FS 1581#ifdef CONFIG_PROC_FS
1461static int create_cache_proc_entries(struct cache_detail *cd) 1582static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1462{ 1583{
1463 struct proc_dir_entry *p; 1584 struct proc_dir_entry *p;
1585 struct sunrpc_net *sn;
1464 1586
1465 cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc); 1587 sn = net_generic(net, sunrpc_net_id);
1588 cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1466 if (cd->u.procfs.proc_ent == NULL) 1589 if (cd->u.procfs.proc_ent == NULL)
1467 goto out_nomem; 1590 goto out_nomem;
1468 cd->u.procfs.channel_ent = NULL; 1591 cd->u.procfs.channel_ent = NULL;
@@ -1493,11 +1616,11 @@ static int create_cache_proc_entries(struct cache_detail *cd)
1493 } 1616 }
1494 return 0; 1617 return 0;
1495out_nomem: 1618out_nomem:
1496 remove_cache_proc_entries(cd); 1619 remove_cache_proc_entries(cd, net);
1497 return -ENOMEM; 1620 return -ENOMEM;
1498} 1621}
1499#else /* CONFIG_PROC_FS */ 1622#else /* CONFIG_PROC_FS */
1500static int create_cache_proc_entries(struct cache_detail *cd) 1623static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1501{ 1624{
1502 return 0; 1625 return 0;
1503} 1626}
@@ -1508,23 +1631,33 @@ void __init cache_initialize(void)
1508 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); 1631 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
1509} 1632}
1510 1633
1511int cache_register(struct cache_detail *cd) 1634int cache_register_net(struct cache_detail *cd, struct net *net)
1512{ 1635{
1513 int ret; 1636 int ret;
1514 1637
1515 sunrpc_init_cache_detail(cd); 1638 sunrpc_init_cache_detail(cd);
1516 ret = create_cache_proc_entries(cd); 1639 ret = create_cache_proc_entries(cd, net);
1517 if (ret) 1640 if (ret)
1518 sunrpc_destroy_cache_detail(cd); 1641 sunrpc_destroy_cache_detail(cd);
1519 return ret; 1642 return ret;
1520} 1643}
1644
1645int cache_register(struct cache_detail *cd)
1646{
1647 return cache_register_net(cd, &init_net);
1648}
1521EXPORT_SYMBOL_GPL(cache_register); 1649EXPORT_SYMBOL_GPL(cache_register);
1522 1650
1523void cache_unregister(struct cache_detail *cd) 1651void cache_unregister_net(struct cache_detail *cd, struct net *net)
1524{ 1652{
1525 remove_cache_proc_entries(cd); 1653 remove_cache_proc_entries(cd, net);
1526 sunrpc_destroy_cache_detail(cd); 1654 sunrpc_destroy_cache_detail(cd);
1527} 1655}
1656
1657void cache_unregister(struct cache_detail *cd)
1658{
1659 cache_unregister_net(cd, &init_net);
1660}
1528EXPORT_SYMBOL_GPL(cache_unregister); 1661EXPORT_SYMBOL_GPL(cache_unregister);
1529 1662
1530static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, 1663static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
@@ -1555,13 +1688,8 @@ static long cache_ioctl_pipefs(struct file *filp,
1555{ 1688{
1556 struct inode *inode = filp->f_dentry->d_inode; 1689 struct inode *inode = filp->f_dentry->d_inode;
1557 struct cache_detail *cd = RPC_I(inode)->private; 1690 struct cache_detail *cd = RPC_I(inode)->private;
1558 long ret;
1559 1691
1560 lock_kernel(); 1692 return cache_ioctl(inode, filp, cmd, arg, cd);
1561 ret = cache_ioctl(inode, filp, cmd, arg, cd);
1562 unlock_kernel();
1563
1564 return ret;
1565} 1693}
1566 1694
1567static int cache_open_pipefs(struct inode *inode, struct file *filp) 1695static int cache_open_pipefs(struct inode *inode, struct file *filp)
@@ -1646,6 +1774,7 @@ const struct file_operations cache_flush_operations_pipefs = {
1646 .read = read_flush_pipefs, 1774 .read = read_flush_pipefs,
1647 .write = write_flush_pipefs, 1775 .write = write_flush_pipefs,
1648 .release = release_flush_pipefs, 1776 .release = release_flush_pipefs,
1777 .llseek = no_llseek,
1649}; 1778};
1650 1779
1651int sunrpc_cache_register_pipefs(struct dentry *parent, 1780int sunrpc_cache_register_pipefs(struct dentry *parent,