aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/cache.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2009-08-09 15:14:28 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2009-08-09 15:14:28 -0400
commitda77005f0d64486cd760f43d9b7cc2379262a363 (patch)
treefa64dac28c47940dd730fff68f692fd5bbd73d78 /net/sunrpc/cache.c
parent5b7a1b9f9214cb89dd164b43ca3fab7af4058e06 (diff)
SUNRPC: Remove the global temporary write buffer in net/sunrpc/cache.c
While we do want to protect against multiple concurrent readers and writers on each upcall/downcall pipe, we don't want to limit concurrent reading and writing to separate caches. This patch therefore replaces the static buffer 'write_buf', which can only be used by one writer at a time, with use of the page cache as the temporary buffer for downcalls. We still fall back to using the the old global buffer if the downcall is larger than PAGE_CACHE_SIZE, since this is apparently needed by the SPKM security context initialisation. It then replaces the use of the global 'queue_io_mutex' with the inode->i_mutex in cache_read() and cache_write(). Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r--net/sunrpc/cache.c95
1 files changed, 70 insertions, 25 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 062d4f4307eb..c8e7d2d07260 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -27,6 +27,7 @@
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/pagemap.h>
30#include <asm/ioctls.h> 31#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h> 32#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h> 33#include <linux/sunrpc/cache.h>
@@ -702,13 +703,14 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
702{ 703{
703 struct cache_reader *rp = filp->private_data; 704 struct cache_reader *rp = filp->private_data;
704 struct cache_request *rq; 705 struct cache_request *rq;
705 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; 706 struct inode *inode = filp->f_path.dentry->d_inode;
707 struct cache_detail *cd = PDE(inode)->data;
706 int err; 708 int err;
707 709
708 if (count == 0) 710 if (count == 0)
709 return 0; 711 return 0;
710 712
711 mutex_lock(&queue_io_mutex); /* protect against multiple concurrent 713 mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
712 * readers on this file */ 714 * readers on this file */
713 again: 715 again:
714 spin_lock(&queue_lock); 716 spin_lock(&queue_lock);
@@ -721,7 +723,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
721 } 723 }
722 if (rp->q.list.next == &cd->queue) { 724 if (rp->q.list.next == &cd->queue) {
723 spin_unlock(&queue_lock); 725 spin_unlock(&queue_lock);
724 mutex_unlock(&queue_io_mutex); 726 mutex_unlock(&inode->i_mutex);
725 BUG_ON(rp->offset); 727 BUG_ON(rp->offset);
726 return 0; 728 return 0;
727 } 729 }
@@ -768,38 +770,81 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
768 } 770 }
769 if (err == -EAGAIN) 771 if (err == -EAGAIN)
770 goto again; 772 goto again;
771 mutex_unlock(&queue_io_mutex); 773 mutex_unlock(&inode->i_mutex);
772 return err ? err : count; 774 return err ? err : count;
773} 775}
774 776
775static char write_buf[8192]; /* protected by queue_io_mutex */ 777static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
778 size_t count, struct cache_detail *cd)
779{
780 ssize_t ret;
776 781
777static ssize_t 782 if (copy_from_user(kaddr, buf, count))
778cache_write(struct file *filp, const char __user *buf, size_t count, 783 return -EFAULT;
779 loff_t *ppos) 784 kaddr[count] = '\0';
785 ret = cd->cache_parse(cd, kaddr, count);
786 if (!ret)
787 ret = count;
788 return ret;
789}
790
791static ssize_t cache_slow_downcall(const char __user *buf,
792 size_t count, struct cache_detail *cd)
780{ 793{
781 int err; 794 static char write_buf[8192]; /* protected by queue_io_mutex */
782 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; 795 ssize_t ret = -EINVAL;
783 796
784 if (count == 0)
785 return 0;
786 if (count >= sizeof(write_buf)) 797 if (count >= sizeof(write_buf))
787 return -EINVAL; 798 goto out;
788
789 mutex_lock(&queue_io_mutex); 799 mutex_lock(&queue_io_mutex);
800 ret = cache_do_downcall(write_buf, buf, count, cd);
801 mutex_unlock(&queue_io_mutex);
802out:
803 return ret;
804}
790 805
791 if (copy_from_user(write_buf, buf, count)) { 806static ssize_t cache_downcall(struct address_space *mapping,
792 mutex_unlock(&queue_io_mutex); 807 const char __user *buf,
793 return -EFAULT; 808 size_t count, struct cache_detail *cd)
794 } 809{
795 write_buf[count] = '\0'; 810 struct page *page;
796 if (cd->cache_parse) 811 char *kaddr;
797 err = cd->cache_parse(cd, write_buf, count); 812 ssize_t ret = -ENOMEM;
798 else 813
799 err = -EINVAL; 814 if (count >= PAGE_CACHE_SIZE)
815 goto out_slow;
816
817 page = find_or_create_page(mapping, 0, GFP_KERNEL);
818 if (!page)
819 goto out_slow;
820
821 kaddr = kmap(page);
822 ret = cache_do_downcall(kaddr, buf, count, cd);
823 kunmap(page);
824 unlock_page(page);
825 page_cache_release(page);
826 return ret;
827out_slow:
828 return cache_slow_downcall(buf, count, cd);
829}
800 830
801 mutex_unlock(&queue_io_mutex); 831static ssize_t
802 return err ? err : count; 832cache_write(struct file *filp, const char __user *buf, size_t count,
833 loff_t *ppos)
834{
835 struct address_space *mapping = filp->f_mapping;
836 struct inode *inode = filp->f_path.dentry->d_inode;
837 struct cache_detail *cd = PDE(inode)->data;
838 ssize_t ret = -EINVAL;
839
840 if (!cd->cache_parse)
841 goto out;
842
843 mutex_lock(&inode->i_mutex);
844 ret = cache_downcall(mapping, buf, count, cd);
845 mutex_unlock(&inode->i_mutex);
846out:
847 return ret;
803} 848}
804 849
805static DECLARE_WAIT_QUEUE_HEAD(queue_wait); 850static DECLARE_WAIT_QUEUE_HEAD(queue_wait);