diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/Kconfig | 30 | ||||
| -rw-r--r-- | fs/Makefile | 3 | ||||
| -rw-r--r-- | fs/lockd/Makefile | 2 | ||||
| -rw-r--r-- | fs/lockd/clntlock.c | 13 | ||||
| -rw-r--r-- | fs/lockd/grace.c | 59 | ||||
| -rw-r--r-- | fs/lockd/host.c | 350 | ||||
| -rw-r--r-- | fs/lockd/mon.c | 2 | ||||
| -rw-r--r-- | fs/lockd/svc.c | 88 | ||||
| -rw-r--r-- | fs/lockd/svc4proc.c | 31 | ||||
| -rw-r--r-- | fs/lockd/svclock.c | 18 | ||||
| -rw-r--r-- | fs/lockd/svcproc.c | 31 | ||||
| -rw-r--r-- | fs/lockd/svcsubs.c | 2 | ||||
| -rw-r--r-- | fs/lockd/xdr.c | 2 | ||||
| -rw-r--r-- | fs/lockd/xdr4.c | 2 | ||||
| -rw-r--r-- | fs/nfs/callback.c | 3 | ||||
| -rw-r--r-- | fs/nfsd/lockd.c | 1 | ||||
| -rw-r--r-- | fs/nfsd/nfs3proc.c | 8 | ||||
| -rw-r--r-- | fs/nfsd/nfs4callback.c | 7 | ||||
| -rw-r--r-- | fs/nfsd/nfs4proc.c | 8 | ||||
| -rw-r--r-- | fs/nfsd/nfs4state.c | 34 | ||||
| -rw-r--r-- | fs/nfsd/nfs4xdr.c | 171 | ||||
| -rw-r--r-- | fs/nfsd/nfsctl.c | 5 | ||||
| -rw-r--r-- | fs/nfsd/nfsfh.c | 30 | ||||
| -rw-r--r-- | fs/nfsd/nfsproc.c | 6 | ||||
| -rw-r--r-- | fs/nfsd/nfssvc.c | 20 | ||||
| -rw-r--r-- | fs/nfsd/vfs.c | 63 | ||||
| -rw-r--r-- | fs/proc/proc_misc.c | 4 |
27 files changed, 621 insertions, 372 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index f54a157a0296..501f012e0c6f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -433,6 +433,14 @@ config FS_POSIX_ACL | |||
| 433 | bool | 433 | bool |
| 434 | default n | 434 | default n |
| 435 | 435 | ||
| 436 | config FILE_LOCKING | ||
| 437 | bool "Enable POSIX file locking API" if EMBEDDED | ||
| 438 | default y | ||
| 439 | help | ||
| 440 | This option enables standard file locking support, required | ||
| 441 | for filesystems like NFS and for the flock() system | ||
| 442 | call. Disabling this option saves about 11k. | ||
| 443 | |||
| 436 | source "fs/xfs/Kconfig" | 444 | source "fs/xfs/Kconfig" |
| 437 | source "fs/gfs2/Kconfig" | 445 | source "fs/gfs2/Kconfig" |
| 438 | 446 | ||
| @@ -1779,6 +1787,28 @@ config SUNRPC_XPRT_RDMA | |||
| 1779 | 1787 | ||
| 1780 | If unsure, say N. | 1788 | If unsure, say N. |
| 1781 | 1789 | ||
| 1790 | config SUNRPC_REGISTER_V4 | ||
| 1791 | bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)" | ||
| 1792 | depends on SUNRPC && EXPERIMENTAL | ||
| 1793 | default n | ||
| 1794 | help | ||
| 1795 | Sun added support for registering RPC services at an IPv6 | ||
| 1796 | address by creating two new versions of the rpcbind protocol | ||
| 1797 | (RFC 1833). | ||
| 1798 | |||
| 1799 | This option enables support in the kernel RPC server for | ||
| 1800 | registering kernel RPC services via version 4 of the rpcbind | ||
| 1801 | protocol. If you enable this option, you must run a portmapper | ||
| 1802 | daemon that supports rpcbind protocol version 4. | ||
| 1803 | |||
| 1804 | Serving NFS over IPv6 from knfsd (the kernel's NFS server) | ||
| 1805 | requires that you enable this option and use a portmapper that | ||
| 1806 | supports rpcbind version 4. | ||
| 1807 | |||
| 1808 | If unsure, say N to get traditional behavior (register kernel | ||
| 1809 | RPC services using only rpcbind version 2). Distributions | ||
| 1810 | using the legacy Linux portmapper daemon must say N here. | ||
| 1811 | |||
| 1782 | config RPCSEC_GSS_KRB5 | 1812 | config RPCSEC_GSS_KRB5 |
| 1783 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | 1813 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" |
| 1784 | depends on SUNRPC && EXPERIMENTAL | 1814 | depends on SUNRPC && EXPERIMENTAL |
diff --git a/fs/Makefile b/fs/Makefile index de404b00eb0c..b6f27dc26b72 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | obj-y := open.o read_write.o file_table.o super.o \ | 8 | obj-y := open.o read_write.o file_table.o super.o \ |
| 9 | char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \ | 9 | char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \ |
| 10 | ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ | 10 | ioctl.o readdir.o select.o fifo.o dcache.o inode.o \ |
| 11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ | 11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ |
| 12 | seq_file.o xattr.o libfs.o fs-writeback.o \ | 12 | seq_file.o xattr.o libfs.o fs-writeback.o \ |
| 13 | pnode.o drop_caches.o splice.o sync.o utimes.o \ | 13 | pnode.o drop_caches.o splice.o sync.o utimes.o \ |
| @@ -27,6 +27,7 @@ obj-$(CONFIG_ANON_INODES) += anon_inodes.o | |||
| 27 | obj-$(CONFIG_SIGNALFD) += signalfd.o | 27 | obj-$(CONFIG_SIGNALFD) += signalfd.o |
| 28 | obj-$(CONFIG_TIMERFD) += timerfd.o | 28 | obj-$(CONFIG_TIMERFD) += timerfd.o |
| 29 | obj-$(CONFIG_EVENTFD) += eventfd.o | 29 | obj-$(CONFIG_EVENTFD) += eventfd.o |
| 30 | obj-$(CONFIG_FILE_LOCKING) += locks.o | ||
| 30 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o | 31 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o |
| 31 | 32 | ||
| 32 | nfsd-$(CONFIG_NFSD) := nfsctl.o | 33 | nfsd-$(CONFIG_NFSD) := nfsctl.o |
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile index 7725a0a9a555..97f6073ab339 100644 --- a/fs/lockd/Makefile +++ b/fs/lockd/Makefile | |||
| @@ -5,6 +5,6 @@ | |||
| 5 | obj-$(CONFIG_LOCKD) += lockd.o | 5 | obj-$(CONFIG_LOCKD) += lockd.o |
| 6 | 6 | ||
| 7 | lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \ | 7 | lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \ |
| 8 | svcproc.o svcsubs.o mon.o xdr.o | 8 | svcproc.o svcsubs.o mon.o xdr.o grace.o |
| 9 | lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o | 9 | lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o |
| 10 | lockd-objs := $(lockd-objs-y) | 10 | lockd-objs := $(lockd-objs-y) |
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 0b45fd3a4bfd..8307dd64bf46 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
| @@ -54,14 +54,13 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) | |||
| 54 | u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4; | 54 | u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4; |
| 55 | int status; | 55 | int status; |
| 56 | 56 | ||
| 57 | status = lockd_up(nlm_init->protocol); | 57 | status = lockd_up(); |
| 58 | if (status < 0) | 58 | if (status < 0) |
| 59 | return ERR_PTR(status); | 59 | return ERR_PTR(status); |
| 60 | 60 | ||
| 61 | host = nlmclnt_lookup_host((struct sockaddr_in *)nlm_init->address, | 61 | host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, |
| 62 | nlm_init->protocol, nlm_version, | 62 | nlm_init->protocol, nlm_version, |
| 63 | nlm_init->hostname, | 63 | nlm_init->hostname); |
| 64 | strlen(nlm_init->hostname)); | ||
| 65 | if (host == NULL) { | 64 | if (host == NULL) { |
| 66 | lockd_down(); | 65 | lockd_down(); |
| 67 | return ERR_PTR(-ENOLCK); | 66 | return ERR_PTR(-ENOLCK); |
| @@ -142,7 +141,7 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) | |||
| 142 | /* | 141 | /* |
| 143 | * The server lockd has called us back to tell us the lock was granted | 142 | * The server lockd has called us back to tell us the lock was granted |
| 144 | */ | 143 | */ |
| 145 | __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) | 144 | __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) |
| 146 | { | 145 | { |
| 147 | const struct file_lock *fl = &lock->fl; | 146 | const struct file_lock *fl = &lock->fl; |
| 148 | const struct nfs_fh *fh = &lock->fh; | 147 | const struct nfs_fh *fh = &lock->fh; |
| @@ -166,7 +165,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock | |||
| 166 | */ | 165 | */ |
| 167 | if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) | 166 | if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) |
| 168 | continue; | 167 | continue; |
| 169 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) | 168 | if (!nlm_cmp_addr(nlm_addr(block->b_host), addr)) |
| 170 | continue; | 169 | continue; |
| 171 | if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0) | 170 | if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0) |
| 172 | continue; | 171 | continue; |
| @@ -216,7 +215,7 @@ reclaimer(void *ptr) | |||
| 216 | /* This one ensures that our parent doesn't terminate while the | 215 | /* This one ensures that our parent doesn't terminate while the |
| 217 | * reclaim is in progress */ | 216 | * reclaim is in progress */ |
| 218 | lock_kernel(); | 217 | lock_kernel(); |
| 219 | lockd_up(0); /* note: this cannot fail as lockd is already running */ | 218 | lockd_up(); /* note: this cannot fail as lockd is already running */ |
| 220 | 219 | ||
| 221 | dprintk("lockd: reclaiming locks for host %s\n", host->h_name); | 220 | dprintk("lockd: reclaiming locks for host %s\n", host->h_name); |
| 222 | 221 | ||
diff --git a/fs/lockd/grace.c b/fs/lockd/grace.c new file mode 100644 index 000000000000..183cc1f0af1c --- /dev/null +++ b/fs/lockd/grace.c | |||
| @@ -0,0 +1,59 @@ | |||
| 1 | /* | ||
| 2 | * Common code for control of lockd and nfsv4 grace periods. | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/lockd/bind.h> | ||
| 7 | |||
| 8 | static LIST_HEAD(grace_list); | ||
| 9 | static DEFINE_SPINLOCK(grace_lock); | ||
| 10 | |||
| 11 | /** | ||
| 12 | * locks_start_grace | ||
| 13 | * @lm: who this grace period is for | ||
| 14 | * | ||
| 15 | * A grace period is a period during which locks should not be given | ||
| 16 | * out. Currently grace periods are only enforced by the two lock | ||
| 17 | * managers (lockd and nfsd), using the locks_in_grace() function to | ||
| 18 | * check when they are in a grace period. | ||
| 19 | * | ||
| 20 | * This function is called to start a grace period. | ||
| 21 | */ | ||
| 22 | void locks_start_grace(struct lock_manager *lm) | ||
| 23 | { | ||
| 24 | spin_lock(&grace_lock); | ||
| 25 | list_add(&lm->list, &grace_list); | ||
| 26 | spin_unlock(&grace_lock); | ||
| 27 | } | ||
| 28 | EXPORT_SYMBOL_GPL(locks_start_grace); | ||
| 29 | |||
| 30 | /** | ||
| 31 | * locks_end_grace | ||
| 32 | * @lm: who this grace period is for | ||
| 33 | * | ||
| 34 | * Call this function to state that the given lock manager is ready to | ||
| 35 | * resume regular locking. The grace period will not end until all lock | ||
| 36 | * managers that called locks_start_grace() also call locks_end_grace(). | ||
| 37 | * Note that callers count on it being safe to call this more than once, | ||
| 38 | * and the second call should be a no-op. | ||
| 39 | */ | ||
| 40 | void locks_end_grace(struct lock_manager *lm) | ||
| 41 | { | ||
| 42 | spin_lock(&grace_lock); | ||
| 43 | list_del_init(&lm->list); | ||
| 44 | spin_unlock(&grace_lock); | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL_GPL(locks_end_grace); | ||
| 47 | |||
| 48 | /** | ||
| 49 | * locks_in_grace | ||
| 50 | * | ||
| 51 | * Lock managers call this function to determine when it is OK for them | ||
| 52 | * to answer ordinary lock requests, and when they should accept only | ||
| 53 | * lock reclaims. | ||
| 54 | */ | ||
| 55 | int locks_in_grace(void) | ||
| 56 | { | ||
| 57 | return !list_empty(&grace_list); | ||
| 58 | } | ||
| 59 | EXPORT_SYMBOL_GPL(locks_in_grace); | ||
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index a17664c7eacc..9fd8889097b7 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
| @@ -11,16 +11,17 @@ | |||
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
| 14 | #include <linux/in6.h> | ||
| 14 | #include <linux/sunrpc/clnt.h> | 15 | #include <linux/sunrpc/clnt.h> |
| 15 | #include <linux/sunrpc/svc.h> | 16 | #include <linux/sunrpc/svc.h> |
| 16 | #include <linux/lockd/lockd.h> | 17 | #include <linux/lockd/lockd.h> |
| 17 | #include <linux/lockd/sm_inter.h> | 18 | #include <linux/lockd/sm_inter.h> |
| 18 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
| 19 | 20 | ||
| 21 | #include <net/ipv6.h> | ||
| 20 | 22 | ||
| 21 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | 23 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE |
| 22 | #define NLM_HOST_NRHASH 32 | 24 | #define NLM_HOST_NRHASH 32 |
| 23 | #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) | ||
| 24 | #define NLM_HOST_REBIND (60 * HZ) | 25 | #define NLM_HOST_REBIND (60 * HZ) |
| 25 | #define NLM_HOST_EXPIRE (300 * HZ) | 26 | #define NLM_HOST_EXPIRE (300 * HZ) |
| 26 | #define NLM_HOST_COLLECT (120 * HZ) | 27 | #define NLM_HOST_COLLECT (120 * HZ) |
| @@ -30,42 +31,115 @@ static unsigned long next_gc; | |||
| 30 | static int nrhosts; | 31 | static int nrhosts; |
| 31 | static DEFINE_MUTEX(nlm_host_mutex); | 32 | static DEFINE_MUTEX(nlm_host_mutex); |
| 32 | 33 | ||
| 33 | |||
| 34 | static void nlm_gc_hosts(void); | 34 | static void nlm_gc_hosts(void); |
| 35 | static struct nsm_handle * __nsm_find(const struct sockaddr_in *, | 35 | static struct nsm_handle *nsm_find(const struct sockaddr *sap, |
| 36 | const char *, unsigned int, int); | 36 | const size_t salen, |
| 37 | static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, | 37 | const char *hostname, |
| 38 | const char *hostname, | 38 | const size_t hostname_len, |
| 39 | unsigned int hostname_len); | 39 | const int create); |
| 40 | |||
| 41 | struct nlm_lookup_host_info { | ||
| 42 | const int server; /* search for server|client */ | ||
| 43 | const struct sockaddr *sap; /* address to search for */ | ||
| 44 | const size_t salen; /* it's length */ | ||
| 45 | const unsigned short protocol; /* transport to search for*/ | ||
| 46 | const u32 version; /* NLM version to search for */ | ||
| 47 | const char *hostname; /* remote's hostname */ | ||
| 48 | const size_t hostname_len; /* it's length */ | ||
| 49 | const struct sockaddr *src_sap; /* our address (optional) */ | ||
| 50 | const size_t src_len; /* it's length */ | ||
| 51 | }; | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Hash function must work well on big- and little-endian platforms | ||
| 55 | */ | ||
| 56 | static unsigned int __nlm_hash32(const __be32 n) | ||
| 57 | { | ||
| 58 | unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); | ||
| 59 | return hash ^ (hash >> 8); | ||
| 60 | } | ||
| 61 | |||
| 62 | static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) | ||
| 63 | { | ||
| 64 | const struct sockaddr_in *sin = (struct sockaddr_in *)sap; | ||
| 65 | return __nlm_hash32(sin->sin_addr.s_addr); | ||
| 66 | } | ||
| 67 | |||
| 68 | static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) | ||
| 69 | { | ||
| 70 | const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; | ||
| 71 | const struct in6_addr addr = sin6->sin6_addr; | ||
| 72 | return __nlm_hash32(addr.s6_addr32[0]) ^ | ||
| 73 | __nlm_hash32(addr.s6_addr32[1]) ^ | ||
| 74 | __nlm_hash32(addr.s6_addr32[2]) ^ | ||
| 75 | __nlm_hash32(addr.s6_addr32[3]); | ||
| 76 | } | ||
| 77 | |||
| 78 | static unsigned int nlm_hash_address(const struct sockaddr *sap) | ||
| 79 | { | ||
| 80 | unsigned int hash; | ||
| 81 | |||
| 82 | switch (sap->sa_family) { | ||
| 83 | case AF_INET: | ||
| 84 | hash = __nlm_hash_addr4(sap); | ||
| 85 | break; | ||
| 86 | case AF_INET6: | ||
| 87 | hash = __nlm_hash_addr6(sap); | ||
| 88 | break; | ||
| 89 | default: | ||
| 90 | hash = 0; | ||
| 91 | } | ||
| 92 | return hash & (NLM_HOST_NRHASH - 1); | ||
| 93 | } | ||
| 94 | |||
| 95 | static void nlm_clear_port(struct sockaddr *sap) | ||
| 96 | { | ||
| 97 | switch (sap->sa_family) { | ||
| 98 | case AF_INET: | ||
| 99 | ((struct sockaddr_in *)sap)->sin_port = 0; | ||
| 100 | break; | ||
| 101 | case AF_INET6: | ||
| 102 | ((struct sockaddr_in6 *)sap)->sin6_port = 0; | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | static void nlm_display_address(const struct sockaddr *sap, | ||
| 108 | char *buf, const size_t len) | ||
| 109 | { | ||
| 110 | const struct sockaddr_in *sin = (struct sockaddr_in *)sap; | ||
| 111 | const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; | ||
| 112 | |||
| 113 | switch (sap->sa_family) { | ||
| 114 | case AF_UNSPEC: | ||
| 115 | snprintf(buf, len, "unspecified"); | ||
| 116 | break; | ||
| 117 | case AF_INET: | ||
| 118 | snprintf(buf, len, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); | ||
| 119 | break; | ||
| 120 | case AF_INET6: | ||
| 121 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) | ||
| 122 | snprintf(buf, len, NIPQUAD_FMT, | ||
| 123 | NIPQUAD(sin6->sin6_addr.s6_addr32[3])); | ||
| 124 | else | ||
| 125 | snprintf(buf, len, NIP6_FMT, NIP6(sin6->sin6_addr)); | ||
| 126 | break; | ||
| 127 | default: | ||
| 128 | snprintf(buf, len, "unsupported address family"); | ||
| 129 | break; | ||
| 130 | } | ||
| 131 | } | ||
| 40 | 132 | ||
| 41 | /* | 133 | /* |
| 42 | * Common host lookup routine for server & client | 134 | * Common host lookup routine for server & client |
| 43 | */ | 135 | */ |
| 44 | static struct nlm_host *nlm_lookup_host(int server, | 136 | static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) |
| 45 | const struct sockaddr_in *sin, | ||
| 46 | int proto, u32 version, | ||
| 47 | const char *hostname, | ||
| 48 | unsigned int hostname_len, | ||
| 49 | const struct sockaddr_in *ssin) | ||
| 50 | { | 137 | { |
| 51 | struct hlist_head *chain; | 138 | struct hlist_head *chain; |
| 52 | struct hlist_node *pos; | 139 | struct hlist_node *pos; |
| 53 | struct nlm_host *host; | 140 | struct nlm_host *host; |
| 54 | struct nsm_handle *nsm = NULL; | 141 | struct nsm_handle *nsm = NULL; |
| 55 | int hash; | ||
| 56 | |||
| 57 | dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT | ||
| 58 | ", p=%d, v=%u, my role=%s, name=%.*s)\n", | ||
| 59 | NIPQUAD(ssin->sin_addr.s_addr), | ||
| 60 | NIPQUAD(sin->sin_addr.s_addr), proto, version, | ||
| 61 | server? "server" : "client", | ||
| 62 | hostname_len, | ||
| 63 | hostname? hostname : "<none>"); | ||
| 64 | 142 | ||
| 65 | |||
| 66 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | ||
| 67 | |||
| 68 | /* Lock hash table */ | ||
| 69 | mutex_lock(&nlm_host_mutex); | 143 | mutex_lock(&nlm_host_mutex); |
| 70 | 144 | ||
| 71 | if (time_after_eq(jiffies, next_gc)) | 145 | if (time_after_eq(jiffies, next_gc)) |
| @@ -78,22 +152,22 @@ static struct nlm_host *nlm_lookup_host(int server, | |||
| 78 | * different NLM rpc_clients into one single nlm_host object. | 152 | * different NLM rpc_clients into one single nlm_host object. |
| 79 | * This would allow us to have one nlm_host per address. | 153 | * This would allow us to have one nlm_host per address. |
| 80 | */ | 154 | */ |
| 81 | chain = &nlm_hosts[hash]; | 155 | chain = &nlm_hosts[nlm_hash_address(ni->sap)]; |
| 82 | hlist_for_each_entry(host, pos, chain, h_hash) { | 156 | hlist_for_each_entry(host, pos, chain, h_hash) { |
| 83 | if (!nlm_cmp_addr(&host->h_addr, sin)) | 157 | if (!nlm_cmp_addr(nlm_addr(host), ni->sap)) |
| 84 | continue; | 158 | continue; |
| 85 | 159 | ||
| 86 | /* See if we have an NSM handle for this client */ | 160 | /* See if we have an NSM handle for this client */ |
| 87 | if (!nsm) | 161 | if (!nsm) |
| 88 | nsm = host->h_nsmhandle; | 162 | nsm = host->h_nsmhandle; |
| 89 | 163 | ||
| 90 | if (host->h_proto != proto) | 164 | if (host->h_proto != ni->protocol) |
| 91 | continue; | 165 | continue; |
| 92 | if (host->h_version != version) | 166 | if (host->h_version != ni->version) |
| 93 | continue; | 167 | continue; |
| 94 | if (host->h_server != server) | 168 | if (host->h_server != ni->server) |
| 95 | continue; | 169 | continue; |
| 96 | if (!nlm_cmp_addr(&host->h_saddr, ssin)) | 170 | if (!nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) |
| 97 | continue; | 171 | continue; |
| 98 | 172 | ||
| 99 | /* Move to head of hash chain. */ | 173 | /* Move to head of hash chain. */ |
| @@ -101,30 +175,41 @@ static struct nlm_host *nlm_lookup_host(int server, | |||
| 101 | hlist_add_head(&host->h_hash, chain); | 175 | hlist_add_head(&host->h_hash, chain); |
| 102 | 176 | ||
| 103 | nlm_get_host(host); | 177 | nlm_get_host(host); |
| 178 | dprintk("lockd: nlm_lookup_host found host %s (%s)\n", | ||
| 179 | host->h_name, host->h_addrbuf); | ||
| 104 | goto out; | 180 | goto out; |
| 105 | } | 181 | } |
| 106 | if (nsm) | ||
| 107 | atomic_inc(&nsm->sm_count); | ||
| 108 | |||
| 109 | host = NULL; | ||
| 110 | 182 | ||
| 111 | /* Sadly, the host isn't in our hash table yet. See if | 183 | /* |
| 112 | * we have an NSM handle for it. If not, create one. | 184 | * The host wasn't in our hash table. If we don't |
| 185 | * have an NSM handle for it yet, create one. | ||
| 113 | */ | 186 | */ |
| 114 | if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len))) | 187 | if (nsm) |
| 115 | goto out; | 188 | atomic_inc(&nsm->sm_count); |
| 189 | else { | ||
| 190 | host = NULL; | ||
| 191 | nsm = nsm_find(ni->sap, ni->salen, | ||
| 192 | ni->hostname, ni->hostname_len, 1); | ||
| 193 | if (!nsm) { | ||
| 194 | dprintk("lockd: nlm_lookup_host failed; " | ||
| 195 | "no nsm handle\n"); | ||
| 196 | goto out; | ||
| 197 | } | ||
| 198 | } | ||
| 116 | 199 | ||
| 117 | host = kzalloc(sizeof(*host), GFP_KERNEL); | 200 | host = kzalloc(sizeof(*host), GFP_KERNEL); |
| 118 | if (!host) { | 201 | if (!host) { |
| 119 | nsm_release(nsm); | 202 | nsm_release(nsm); |
| 203 | dprintk("lockd: nlm_lookup_host failed; no memory\n"); | ||
| 120 | goto out; | 204 | goto out; |
| 121 | } | 205 | } |
| 122 | host->h_name = nsm->sm_name; | 206 | host->h_name = nsm->sm_name; |
| 123 | host->h_addr = *sin; | 207 | memcpy(nlm_addr(host), ni->sap, ni->salen); |
| 124 | host->h_addr.sin_port = 0; /* ouch! */ | 208 | host->h_addrlen = ni->salen; |
| 125 | host->h_saddr = *ssin; | 209 | nlm_clear_port(nlm_addr(host)); |
| 126 | host->h_version = version; | 210 | memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); |
| 127 | host->h_proto = proto; | 211 | host->h_version = ni->version; |
| 212 | host->h_proto = ni->protocol; | ||
| 128 | host->h_rpcclnt = NULL; | 213 | host->h_rpcclnt = NULL; |
| 129 | mutex_init(&host->h_mutex); | 214 | mutex_init(&host->h_mutex); |
| 130 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | 215 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
| @@ -135,7 +220,7 @@ static struct nlm_host *nlm_lookup_host(int server, | |||
| 135 | host->h_state = 0; /* pseudo NSM state */ | 220 | host->h_state = 0; /* pseudo NSM state */ |
| 136 | host->h_nsmstate = 0; /* real NSM state */ | 221 | host->h_nsmstate = 0; /* real NSM state */ |
| 137 | host->h_nsmhandle = nsm; | 222 | host->h_nsmhandle = nsm; |
| 138 | host->h_server = server; | 223 | host->h_server = ni->server; |
| 139 | hlist_add_head(&host->h_hash, chain); | 224 | hlist_add_head(&host->h_hash, chain); |
| 140 | INIT_LIST_HEAD(&host->h_lockowners); | 225 | INIT_LIST_HEAD(&host->h_lockowners); |
| 141 | spin_lock_init(&host->h_lock); | 226 | spin_lock_init(&host->h_lock); |
| @@ -143,6 +228,15 @@ static struct nlm_host *nlm_lookup_host(int server, | |||
| 143 | INIT_LIST_HEAD(&host->h_reclaim); | 228 | INIT_LIST_HEAD(&host->h_reclaim); |
| 144 | 229 | ||
| 145 | nrhosts++; | 230 | nrhosts++; |
| 231 | |||
| 232 | nlm_display_address((struct sockaddr *)&host->h_addr, | ||
| 233 | host->h_addrbuf, sizeof(host->h_addrbuf)); | ||
| 234 | nlm_display_address((struct sockaddr *)&host->h_srcaddr, | ||
| 235 | host->h_srcaddrbuf, sizeof(host->h_srcaddrbuf)); | ||
| 236 | |||
| 237 | dprintk("lockd: nlm_lookup_host created host %s\n", | ||
| 238 | host->h_name); | ||
| 239 | |||
| 146 | out: | 240 | out: |
| 147 | mutex_unlock(&nlm_host_mutex); | 241 | mutex_unlock(&nlm_host_mutex); |
| 148 | return host; | 242 | return host; |
| @@ -170,33 +264,103 @@ nlm_destroy_host(struct nlm_host *host) | |||
| 170 | kfree(host); | 264 | kfree(host); |
| 171 | } | 265 | } |
| 172 | 266 | ||
| 173 | /* | 267 | /** |
| 174 | * Find an NLM server handle in the cache. If there is none, create it. | 268 | * nlmclnt_lookup_host - Find an NLM host handle matching a remote server |
| 269 | * @sap: network address of server | ||
| 270 | * @salen: length of server address | ||
| 271 | * @protocol: transport protocol to use | ||
| 272 | * @version: NLM protocol version | ||
| 273 | * @hostname: '\0'-terminated hostname of server | ||
| 274 | * | ||
| 275 | * Returns an nlm_host structure that matches the passed-in | ||
| 276 | * [server address, transport protocol, NLM version, server hostname]. | ||
| 277 | * If one doesn't already exist in the host cache, a new handle is | ||
| 278 | * created and returned. | ||
| 175 | */ | 279 | */ |
| 176 | struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin, | 280 | struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, |
| 177 | int proto, u32 version, | 281 | const size_t salen, |
| 178 | const char *hostname, | 282 | const unsigned short protocol, |
| 179 | unsigned int hostname_len) | 283 | const u32 version, const char *hostname) |
| 180 | { | 284 | { |
| 181 | struct sockaddr_in ssin = {0}; | 285 | const struct sockaddr source = { |
| 182 | 286 | .sa_family = AF_UNSPEC, | |
| 183 | return nlm_lookup_host(0, sin, proto, version, | 287 | }; |
| 184 | hostname, hostname_len, &ssin); | 288 | struct nlm_lookup_host_info ni = { |
| 289 | .server = 0, | ||
| 290 | .sap = sap, | ||
| 291 | .salen = salen, | ||
| 292 | .protocol = protocol, | ||
| 293 | .version = version, | ||
| 294 | .hostname = hostname, | ||
| 295 | .hostname_len = strlen(hostname), | ||
| 296 | .src_sap = &source, | ||
| 297 | .src_len = sizeof(source), | ||
| 298 | }; | ||
| 299 | |||
| 300 | dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, | ||
| 301 | (hostname ? hostname : "<none>"), version, | ||
| 302 | (protocol == IPPROTO_UDP ? "udp" : "tcp")); | ||
| 303 | |||
| 304 | return nlm_lookup_host(&ni); | ||
| 185 | } | 305 | } |
| 186 | 306 | ||
| 187 | /* | 307 | /** |
| 188 | * Find an NLM client handle in the cache. If there is none, create it. | 308 | * nlmsvc_lookup_host - Find an NLM host handle matching a remote client |
| 309 | * @rqstp: incoming NLM request | ||
| 310 | * @hostname: name of client host | ||
| 311 | * @hostname_len: length of client hostname | ||
| 312 | * | ||
| 313 | * Returns an nlm_host structure that matches the [client address, | ||
| 314 | * transport protocol, NLM version, client hostname] of the passed-in | ||
| 315 | * NLM request. If one doesn't already exist in the host cache, a | ||
| 316 | * new handle is created and returned. | ||
| 317 | * | ||
| 318 | * Before possibly creating a new nlm_host, construct a sockaddr | ||
| 319 | * for a specific source address in case the local system has | ||
| 320 | * multiple network addresses. The family of the address in | ||
| 321 | * rq_daddr is guaranteed to be the same as the family of the | ||
| 322 | * address in rq_addr, so it's safe to use the same family for | ||
| 323 | * the source address. | ||
| 189 | */ | 324 | */ |
| 190 | struct nlm_host * | 325 | struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, |
| 191 | nlmsvc_lookup_host(struct svc_rqst *rqstp, | 326 | const char *hostname, |
| 192 | const char *hostname, unsigned int hostname_len) | 327 | const size_t hostname_len) |
| 193 | { | 328 | { |
| 194 | struct sockaddr_in ssin = {0}; | 329 | struct sockaddr_in sin = { |
| 330 | .sin_family = AF_INET, | ||
| 331 | }; | ||
| 332 | struct sockaddr_in6 sin6 = { | ||
| 333 | .sin6_family = AF_INET6, | ||
| 334 | }; | ||
| 335 | struct nlm_lookup_host_info ni = { | ||
| 336 | .server = 1, | ||
| 337 | .sap = svc_addr(rqstp), | ||
| 338 | .salen = rqstp->rq_addrlen, | ||
| 339 | .protocol = rqstp->rq_prot, | ||
| 340 | .version = rqstp->rq_vers, | ||
| 341 | .hostname = hostname, | ||
| 342 | .hostname_len = hostname_len, | ||
| 343 | .src_len = rqstp->rq_addrlen, | ||
| 344 | }; | ||
| 345 | |||
| 346 | dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, | ||
| 347 | (int)hostname_len, hostname, rqstp->rq_vers, | ||
| 348 | (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); | ||
| 349 | |||
| 350 | switch (ni.sap->sa_family) { | ||
| 351 | case AF_INET: | ||
| 352 | sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr; | ||
| 353 | ni.src_sap = (struct sockaddr *)&sin; | ||
| 354 | break; | ||
| 355 | case AF_INET6: | ||
| 356 | ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6); | ||
| 357 | ni.src_sap = (struct sockaddr *)&sin6; | ||
| 358 | break; | ||
| 359 | default: | ||
| 360 | return NULL; | ||
| 361 | } | ||
| 195 | 362 | ||
| 196 | ssin.sin_addr = rqstp->rq_daddr.addr; | 363 | return nlm_lookup_host(&ni); |
| 197 | return nlm_lookup_host(1, svc_addr_in(rqstp), | ||
| 198 | rqstp->rq_prot, rqstp->rq_vers, | ||
| 199 | hostname, hostname_len, &ssin); | ||
| 200 | } | 364 | } |
| 201 | 365 | ||
| 202 | /* | 366 | /* |
| @@ -207,9 +371,8 @@ nlm_bind_host(struct nlm_host *host) | |||
| 207 | { | 371 | { |
| 208 | struct rpc_clnt *clnt; | 372 | struct rpc_clnt *clnt; |
| 209 | 373 | ||
| 210 | dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n", | 374 | dprintk("lockd: nlm_bind_host %s (%s), my addr=%s\n", |
| 211 | NIPQUAD(host->h_saddr.sin_addr), | 375 | host->h_name, host->h_addrbuf, host->h_srcaddrbuf); |
| 212 | NIPQUAD(host->h_addr.sin_addr)); | ||
| 213 | 376 | ||
| 214 | /* Lock host handle */ | 377 | /* Lock host handle */ |
| 215 | mutex_lock(&host->h_mutex); | 378 | mutex_lock(&host->h_mutex); |
| @@ -221,7 +384,7 @@ nlm_bind_host(struct nlm_host *host) | |||
| 221 | if (time_after_eq(jiffies, host->h_nextrebind)) { | 384 | if (time_after_eq(jiffies, host->h_nextrebind)) { |
| 222 | rpc_force_rebind(clnt); | 385 | rpc_force_rebind(clnt); |
| 223 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; | 386 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
| 224 | dprintk("lockd: next rebind in %ld jiffies\n", | 387 | dprintk("lockd: next rebind in %lu jiffies\n", |
| 225 | host->h_nextrebind - jiffies); | 388 | host->h_nextrebind - jiffies); |
| 226 | } | 389 | } |
| 227 | } else { | 390 | } else { |
| @@ -234,9 +397,9 @@ nlm_bind_host(struct nlm_host *host) | |||
| 234 | }; | 397 | }; |
| 235 | struct rpc_create_args args = { | 398 | struct rpc_create_args args = { |
| 236 | .protocol = host->h_proto, | 399 | .protocol = host->h_proto, |
| 237 | .address = (struct sockaddr *)&host->h_addr, | 400 | .address = nlm_addr(host), |
| 238 | .addrsize = sizeof(host->h_addr), | 401 | .addrsize = host->h_addrlen, |
| 239 | .saddress = (struct sockaddr *)&host->h_saddr, | 402 | .saddress = nlm_srcaddr(host), |
| 240 | .timeout = &timeparms, | 403 | .timeout = &timeparms, |
| 241 | .servername = host->h_name, | 404 | .servername = host->h_name, |
| 242 | .program = &nlm_program, | 405 | .program = &nlm_program, |
| @@ -324,12 +487,16 @@ void nlm_host_rebooted(const struct sockaddr_in *sin, | |||
| 324 | struct nsm_handle *nsm; | 487 | struct nsm_handle *nsm; |
| 325 | struct nlm_host *host; | 488 | struct nlm_host *host; |
| 326 | 489 | ||
| 327 | dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n", | 490 | nsm = nsm_find((struct sockaddr *)sin, sizeof(*sin), |
| 328 | hostname, NIPQUAD(sin->sin_addr)); | 491 | hostname, hostname_len, 0); |
| 329 | 492 | if (nsm == NULL) { | |
| 330 | /* Find the NSM handle for this peer */ | 493 | dprintk("lockd: never saw rebooted peer '%.*s' before\n", |
| 331 | if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0))) | 494 | hostname_len, hostname); |
| 332 | return; | 495 | return; |
| 496 | } | ||
| 497 | |||
| 498 | dprintk("lockd: nlm_host_rebooted(%.*s, %s)\n", | ||
| 499 | hostname_len, hostname, nsm->sm_addrbuf); | ||
| 333 | 500 | ||
| 334 | /* When reclaiming locks on this peer, make sure that | 501 | /* When reclaiming locks on this peer, make sure that |
| 335 | * we set up a new notification */ | 502 | * we set up a new notification */ |
| @@ -461,22 +628,23 @@ nlm_gc_hosts(void) | |||
| 461 | static LIST_HEAD(nsm_handles); | 628 | static LIST_HEAD(nsm_handles); |
| 462 | static DEFINE_SPINLOCK(nsm_lock); | 629 | static DEFINE_SPINLOCK(nsm_lock); |
| 463 | 630 | ||
| 464 | static struct nsm_handle * | 631 | static struct nsm_handle *nsm_find(const struct sockaddr *sap, |
| 465 | __nsm_find(const struct sockaddr_in *sin, | 632 | const size_t salen, |
| 466 | const char *hostname, unsigned int hostname_len, | 633 | const char *hostname, |
| 467 | int create) | 634 | const size_t hostname_len, |
| 635 | const int create) | ||
| 468 | { | 636 | { |
| 469 | struct nsm_handle *nsm = NULL; | 637 | struct nsm_handle *nsm = NULL; |
| 470 | struct nsm_handle *pos; | 638 | struct nsm_handle *pos; |
| 471 | 639 | ||
| 472 | if (!sin) | 640 | if (!sap) |
| 473 | return NULL; | 641 | return NULL; |
| 474 | 642 | ||
| 475 | if (hostname && memchr(hostname, '/', hostname_len) != NULL) { | 643 | if (hostname && memchr(hostname, '/', hostname_len) != NULL) { |
| 476 | if (printk_ratelimit()) { | 644 | if (printk_ratelimit()) { |
| 477 | printk(KERN_WARNING "Invalid hostname \"%.*s\" " | 645 | printk(KERN_WARNING "Invalid hostname \"%.*s\" " |
| 478 | "in NFS lock request\n", | 646 | "in NFS lock request\n", |
| 479 | hostname_len, hostname); | 647 | (int)hostname_len, hostname); |
| 480 | } | 648 | } |
| 481 | return NULL; | 649 | return NULL; |
| 482 | } | 650 | } |
| @@ -489,7 +657,7 @@ retry: | |||
| 489 | if (strlen(pos->sm_name) != hostname_len | 657 | if (strlen(pos->sm_name) != hostname_len |
| 490 | || memcmp(pos->sm_name, hostname, hostname_len)) | 658 | || memcmp(pos->sm_name, hostname, hostname_len)) |
| 491 | continue; | 659 | continue; |
| 492 | } else if (!nlm_cmp_addr(&pos->sm_addr, sin)) | 660 | } else if (!nlm_cmp_addr(nsm_addr(pos), sap)) |
| 493 | continue; | 661 | continue; |
| 494 | atomic_inc(&pos->sm_count); | 662 | atomic_inc(&pos->sm_count); |
| 495 | kfree(nsm); | 663 | kfree(nsm); |
| @@ -509,10 +677,13 @@ retry: | |||
| 509 | if (nsm == NULL) | 677 | if (nsm == NULL) |
| 510 | return NULL; | 678 | return NULL; |
| 511 | 679 | ||
| 512 | nsm->sm_addr = *sin; | 680 | memcpy(nsm_addr(nsm), sap, salen); |
| 681 | nsm->sm_addrlen = salen; | ||
| 513 | nsm->sm_name = (char *) (nsm + 1); | 682 | nsm->sm_name = (char *) (nsm + 1); |
| 514 | memcpy(nsm->sm_name, hostname, hostname_len); | 683 | memcpy(nsm->sm_name, hostname, hostname_len); |
| 515 | nsm->sm_name[hostname_len] = '\0'; | 684 | nsm->sm_name[hostname_len] = '\0'; |
| 685 | nlm_display_address((struct sockaddr *)&nsm->sm_addr, | ||
| 686 | nsm->sm_addrbuf, sizeof(nsm->sm_addrbuf)); | ||
| 516 | atomic_set(&nsm->sm_count, 1); | 687 | atomic_set(&nsm->sm_count, 1); |
| 517 | goto retry; | 688 | goto retry; |
| 518 | 689 | ||
| @@ -521,13 +692,6 @@ found: | |||
| 521 | return nsm; | 692 | return nsm; |
| 522 | } | 693 | } |
| 523 | 694 | ||
| 524 | static struct nsm_handle * | ||
| 525 | nsm_find(const struct sockaddr_in *sin, const char *hostname, | ||
| 526 | unsigned int hostname_len) | ||
| 527 | { | ||
| 528 | return __nsm_find(sin, hostname, hostname_len, 1); | ||
| 529 | } | ||
| 530 | |||
| 531 | /* | 695 | /* |
| 532 | * Release an NSM handle | 696 | * Release an NSM handle |
| 533 | */ | 697 | */ |
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index e4d563543b11..4e7e958e8f67 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c | |||
| @@ -51,7 +51,7 @@ nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res) | |||
| 51 | 51 | ||
| 52 | memset(&args, 0, sizeof(args)); | 52 | memset(&args, 0, sizeof(args)); |
| 53 | args.mon_name = nsm->sm_name; | 53 | args.mon_name = nsm->sm_name; |
| 54 | args.addr = nsm->sm_addr.sin_addr.s_addr; | 54 | args.addr = nsm_addr_in(nsm)->sin_addr.s_addr; |
| 55 | args.prog = NLM_PROGRAM; | 55 | args.prog = NLM_PROGRAM; |
| 56 | args.vers = 3; | 56 | args.vers = 3; |
| 57 | args.proc = NLMPROC_NSM_NOTIFY; | 57 | args.proc = NLMPROC_NSM_NOTIFY; |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 5bd9bf0fa9df..c631a83931ce 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
| @@ -51,7 +51,6 @@ static DEFINE_MUTEX(nlmsvc_mutex); | |||
| 51 | static unsigned int nlmsvc_users; | 51 | static unsigned int nlmsvc_users; |
| 52 | static struct task_struct *nlmsvc_task; | 52 | static struct task_struct *nlmsvc_task; |
| 53 | static struct svc_rqst *nlmsvc_rqst; | 53 | static struct svc_rqst *nlmsvc_rqst; |
| 54 | int nlmsvc_grace_period; | ||
| 55 | unsigned long nlmsvc_timeout; | 54 | unsigned long nlmsvc_timeout; |
| 56 | 55 | ||
| 57 | /* | 56 | /* |
| @@ -85,27 +84,23 @@ static unsigned long get_lockd_grace_period(void) | |||
| 85 | return nlm_timeout * 5 * HZ; | 84 | return nlm_timeout * 5 * HZ; |
| 86 | } | 85 | } |
| 87 | 86 | ||
| 88 | unsigned long get_nfs_grace_period(void) | 87 | static struct lock_manager lockd_manager = { |
| 89 | { | 88 | }; |
| 90 | unsigned long lockdgrace = get_lockd_grace_period(); | ||
| 91 | unsigned long nfsdgrace = 0; | ||
| 92 | |||
| 93 | if (nlmsvc_ops) | ||
| 94 | nfsdgrace = nlmsvc_ops->get_grace_period(); | ||
| 95 | |||
| 96 | return max(lockdgrace, nfsdgrace); | ||
| 97 | } | ||
| 98 | EXPORT_SYMBOL(get_nfs_grace_period); | ||
| 99 | 89 | ||
| 100 | static unsigned long set_grace_period(void) | 90 | static void grace_ender(struct work_struct *not_used) |
| 101 | { | 91 | { |
| 102 | nlmsvc_grace_period = 1; | 92 | locks_end_grace(&lockd_manager); |
| 103 | return get_nfs_grace_period() + jiffies; | ||
| 104 | } | 93 | } |
| 105 | 94 | ||
| 106 | static inline void clear_grace_period(void) | 95 | static DECLARE_DELAYED_WORK(grace_period_end, grace_ender); |
| 96 | |||
| 97 | static void set_grace_period(void) | ||
| 107 | { | 98 | { |
| 108 | nlmsvc_grace_period = 0; | 99 | unsigned long grace_period = get_lockd_grace_period(); |
| 100 | |||
| 101 | locks_start_grace(&lockd_manager); | ||
| 102 | cancel_delayed_work_sync(&grace_period_end); | ||
| 103 | schedule_delayed_work(&grace_period_end, grace_period); | ||
| 109 | } | 104 | } |
| 110 | 105 | ||
| 111 | /* | 106 | /* |
| @@ -116,7 +111,6 @@ lockd(void *vrqstp) | |||
| 116 | { | 111 | { |
| 117 | int err = 0, preverr = 0; | 112 | int err = 0, preverr = 0; |
| 118 | struct svc_rqst *rqstp = vrqstp; | 113 | struct svc_rqst *rqstp = vrqstp; |
| 119 | unsigned long grace_period_expire; | ||
| 120 | 114 | ||
| 121 | /* try_to_freeze() is called from svc_recv() */ | 115 | /* try_to_freeze() is called from svc_recv() */ |
| 122 | set_freezable(); | 116 | set_freezable(); |
| @@ -139,7 +133,7 @@ lockd(void *vrqstp) | |||
| 139 | nlm_timeout = LOCKD_DFLT_TIMEO; | 133 | nlm_timeout = LOCKD_DFLT_TIMEO; |
| 140 | nlmsvc_timeout = nlm_timeout * HZ; | 134 | nlmsvc_timeout = nlm_timeout * HZ; |
| 141 | 135 | ||
| 142 | grace_period_expire = set_grace_period(); | 136 | set_grace_period(); |
| 143 | 137 | ||
| 144 | /* | 138 | /* |
| 145 | * The main request loop. We don't terminate until the last | 139 | * The main request loop. We don't terminate until the last |
| @@ -153,21 +147,12 @@ lockd(void *vrqstp) | |||
| 153 | flush_signals(current); | 147 | flush_signals(current); |
| 154 | if (nlmsvc_ops) { | 148 | if (nlmsvc_ops) { |
| 155 | nlmsvc_invalidate_all(); | 149 | nlmsvc_invalidate_all(); |
| 156 | grace_period_expire = set_grace_period(); | 150 | set_grace_period(); |
| 157 | } | 151 | } |
| 158 | continue; | 152 | continue; |
| 159 | } | 153 | } |
| 160 | 154 | ||
| 161 | /* | 155 | timeout = nlmsvc_retry_blocked(); |
| 162 | * Retry any blocked locks that have been notified by | ||
| 163 | * the VFS. Don't do this during grace period. | ||
| 164 | * (Theoretically, there shouldn't even be blocked locks | ||
| 165 | * during grace period). | ||
| 166 | */ | ||
| 167 | if (!nlmsvc_grace_period) { | ||
| 168 | timeout = nlmsvc_retry_blocked(); | ||
| 169 | } else if (time_before(grace_period_expire, jiffies)) | ||
| 170 | clear_grace_period(); | ||
| 171 | 156 | ||
| 172 | /* | 157 | /* |
| 173 | * Find a socket with data available and call its | 158 | * Find a socket with data available and call its |
| @@ -195,6 +180,7 @@ lockd(void *vrqstp) | |||
| 195 | svc_process(rqstp); | 180 | svc_process(rqstp); |
| 196 | } | 181 | } |
| 197 | flush_signals(current); | 182 | flush_signals(current); |
| 183 | cancel_delayed_work_sync(&grace_period_end); | ||
| 198 | if (nlmsvc_ops) | 184 | if (nlmsvc_ops) |
| 199 | nlmsvc_invalidate_all(); | 185 | nlmsvc_invalidate_all(); |
| 200 | nlm_shutdown_hosts(); | 186 | nlm_shutdown_hosts(); |
| @@ -203,25 +189,28 @@ lockd(void *vrqstp) | |||
| 203 | } | 189 | } |
| 204 | 190 | ||
| 205 | /* | 191 | /* |
| 206 | * Make any sockets that are needed but not present. | 192 | * Ensure there are active UDP and TCP listeners for lockd. |
| 207 | * If nlm_udpport or nlm_tcpport were set as module | 193 | * |
| 208 | * options, make those sockets unconditionally | 194 | * Even if we have only TCP NFS mounts and/or TCP NFSDs, some |
| 195 | * local services (such as rpc.statd) still require UDP, and | ||
| 196 | * some NFS servers do not yet support NLM over TCP. | ||
| 197 | * | ||
| 198 | * Returns zero if all listeners are available; otherwise a | ||
| 199 | * negative errno value is returned. | ||
| 209 | */ | 200 | */ |
| 210 | static int make_socks(struct svc_serv *serv, int proto) | 201 | static int make_socks(struct svc_serv *serv) |
| 211 | { | 202 | { |
| 212 | static int warned; | 203 | static int warned; |
| 213 | struct svc_xprt *xprt; | 204 | struct svc_xprt *xprt; |
| 214 | int err = 0; | 205 | int err = 0; |
| 215 | 206 | ||
| 216 | if (proto == IPPROTO_UDP || nlm_udpport) { | 207 | xprt = svc_find_xprt(serv, "udp", 0, 0); |
| 217 | xprt = svc_find_xprt(serv, "udp", 0, 0); | 208 | if (!xprt) |
| 218 | if (!xprt) | 209 | err = svc_create_xprt(serv, "udp", nlm_udpport, |
| 219 | err = svc_create_xprt(serv, "udp", nlm_udpport, | 210 | SVC_SOCK_DEFAULTS); |
| 220 | SVC_SOCK_DEFAULTS); | 211 | else |
| 221 | else | 212 | svc_xprt_put(xprt); |
| 222 | svc_xprt_put(xprt); | 213 | if (err >= 0) { |
| 223 | } | ||
| 224 | if (err >= 0 && (proto == IPPROTO_TCP || nlm_tcpport)) { | ||
| 225 | xprt = svc_find_xprt(serv, "tcp", 0, 0); | 214 | xprt = svc_find_xprt(serv, "tcp", 0, 0); |
| 226 | if (!xprt) | 215 | if (!xprt) |
| 227 | err = svc_create_xprt(serv, "tcp", nlm_tcpport, | 216 | err = svc_create_xprt(serv, "tcp", nlm_tcpport, |
| @@ -241,8 +230,7 @@ static int make_socks(struct svc_serv *serv, int proto) | |||
| 241 | /* | 230 | /* |
| 242 | * Bring up the lockd process if it's not already up. | 231 | * Bring up the lockd process if it's not already up. |
| 243 | */ | 232 | */ |
| 244 | int | 233 | int lockd_up(void) |
| 245 | lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ | ||
| 246 | { | 234 | { |
| 247 | struct svc_serv *serv; | 235 | struct svc_serv *serv; |
| 248 | int error = 0; | 236 | int error = 0; |
| @@ -251,11 +239,8 @@ lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ | |||
| 251 | /* | 239 | /* |
| 252 | * Check whether we're already up and running. | 240 | * Check whether we're already up and running. |
| 253 | */ | 241 | */ |
| 254 | if (nlmsvc_rqst) { | 242 | if (nlmsvc_rqst) |
| 255 | if (proto) | ||
| 256 | error = make_socks(nlmsvc_rqst->rq_server, proto); | ||
| 257 | goto out; | 243 | goto out; |
| 258 | } | ||
| 259 | 244 | ||
| 260 | /* | 245 | /* |
| 261 | * Sanity check: if there's no pid, | 246 | * Sanity check: if there's no pid, |
| @@ -266,13 +251,14 @@ lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ | |||
| 266 | "lockd_up: no pid, %d users??\n", nlmsvc_users); | 251 | "lockd_up: no pid, %d users??\n", nlmsvc_users); |
| 267 | 252 | ||
| 268 | error = -ENOMEM; | 253 | error = -ENOMEM; |
| 269 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL); | 254 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, AF_INET, NULL); |
| 270 | if (!serv) { | 255 | if (!serv) { |
| 271 | printk(KERN_WARNING "lockd_up: create service failed\n"); | 256 | printk(KERN_WARNING "lockd_up: create service failed\n"); |
| 272 | goto out; | 257 | goto out; |
| 273 | } | 258 | } |
| 274 | 259 | ||
| 275 | if ((error = make_socks(serv, proto)) < 0) | 260 | error = make_socks(serv); |
| 261 | if (error < 0) | ||
| 276 | goto destroy_and_out; | 262 | goto destroy_and_out; |
| 277 | 263 | ||
| 278 | /* | 264 | /* |
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 4a714f64515b..014f6ce48172 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c | |||
| @@ -88,12 +88,6 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 88 | dprintk("lockd: TEST4 called\n"); | 88 | dprintk("lockd: TEST4 called\n"); |
| 89 | resp->cookie = argp->cookie; | 89 | resp->cookie = argp->cookie; |
| 90 | 90 | ||
| 91 | /* Don't accept test requests during grace period */ | ||
| 92 | if (nlmsvc_grace_period) { | ||
| 93 | resp->status = nlm_lck_denied_grace_period; | ||
| 94 | return rc; | ||
| 95 | } | ||
| 96 | |||
| 97 | /* Obtain client and file */ | 91 | /* Obtain client and file */ |
| 98 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | 92 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) |
| 99 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; | 93 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; |
| @@ -122,12 +116,6 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 122 | 116 | ||
| 123 | resp->cookie = argp->cookie; | 117 | resp->cookie = argp->cookie; |
| 124 | 118 | ||
| 125 | /* Don't accept new lock requests during grace period */ | ||
| 126 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
| 127 | resp->status = nlm_lck_denied_grace_period; | ||
| 128 | return rc; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* Obtain client and file */ | 119 | /* Obtain client and file */ |
| 132 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) | 120 | if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) |
| 133 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; | 121 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; |
| @@ -146,7 +134,8 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 146 | 134 | ||
| 147 | /* Now try to lock the file */ | 135 | /* Now try to lock the file */ |
| 148 | resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock, | 136 | resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock, |
| 149 | argp->block, &argp->cookie); | 137 | argp->block, &argp->cookie, |
| 138 | argp->reclaim); | ||
| 150 | if (resp->status == nlm_drop_reply) | 139 | if (resp->status == nlm_drop_reply) |
| 151 | rc = rpc_drop_reply; | 140 | rc = rpc_drop_reply; |
| 152 | else | 141 | else |
| @@ -169,7 +158,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 169 | resp->cookie = argp->cookie; | 158 | resp->cookie = argp->cookie; |
| 170 | 159 | ||
| 171 | /* Don't accept requests during grace period */ | 160 | /* Don't accept requests during grace period */ |
| 172 | if (nlmsvc_grace_period) { | 161 | if (locks_in_grace()) { |
| 173 | resp->status = nlm_lck_denied_grace_period; | 162 | resp->status = nlm_lck_denied_grace_period; |
| 174 | return rpc_success; | 163 | return rpc_success; |
| 175 | } | 164 | } |
| @@ -202,7 +191,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 202 | resp->cookie = argp->cookie; | 191 | resp->cookie = argp->cookie; |
| 203 | 192 | ||
| 204 | /* Don't accept new lock requests during grace period */ | 193 | /* Don't accept new lock requests during grace period */ |
| 205 | if (nlmsvc_grace_period) { | 194 | if (locks_in_grace()) { |
| 206 | resp->status = nlm_lck_denied_grace_period; | 195 | resp->status = nlm_lck_denied_grace_period; |
| 207 | return rpc_success; | 196 | return rpc_success; |
| 208 | } | 197 | } |
| @@ -231,7 +220,7 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 231 | resp->cookie = argp->cookie; | 220 | resp->cookie = argp->cookie; |
| 232 | 221 | ||
| 233 | dprintk("lockd: GRANTED called\n"); | 222 | dprintk("lockd: GRANTED called\n"); |
| 234 | resp->status = nlmclnt_grant(svc_addr_in(rqstp), &argp->lock); | 223 | resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); |
| 235 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | 224 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); |
| 236 | return rpc_success; | 225 | return rpc_success; |
| 237 | } | 226 | } |
| @@ -341,7 +330,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 341 | resp->cookie = argp->cookie; | 330 | resp->cookie = argp->cookie; |
| 342 | 331 | ||
| 343 | /* Don't accept new lock requests during grace period */ | 332 | /* Don't accept new lock requests during grace period */ |
| 344 | if (nlmsvc_grace_period && !argp->reclaim) { | 333 | if (locks_in_grace() && !argp->reclaim) { |
| 345 | resp->status = nlm_lck_denied_grace_period; | 334 | resp->status = nlm_lck_denied_grace_period; |
| 346 | return rpc_success; | 335 | return rpc_success; |
| 347 | } | 336 | } |
| @@ -374,7 +363,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 374 | resp->cookie = argp->cookie; | 363 | resp->cookie = argp->cookie; |
| 375 | 364 | ||
| 376 | /* Don't accept requests during grace period */ | 365 | /* Don't accept requests during grace period */ |
| 377 | if (nlmsvc_grace_period) { | 366 | if (locks_in_grace()) { |
| 378 | resp->status = nlm_lck_denied_grace_period; | 367 | resp->status = nlm_lck_denied_grace_period; |
| 379 | return rpc_success; | 368 | return rpc_success; |
| 380 | } | 369 | } |
| @@ -432,11 +421,9 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, | |||
| 432 | { | 421 | { |
| 433 | struct sockaddr_in saddr; | 422 | struct sockaddr_in saddr; |
| 434 | 423 | ||
| 435 | memcpy(&saddr, svc_addr_in(rqstp), sizeof(saddr)); | ||
| 436 | |||
| 437 | dprintk("lockd: SM_NOTIFY called\n"); | 424 | dprintk("lockd: SM_NOTIFY called\n"); |
| 438 | if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) | 425 | |
| 439 | || ntohs(saddr.sin_port) >= 1024) { | 426 | if (!nlm_privileged_requester(rqstp)) { |
| 440 | char buf[RPC_MAX_ADDRBUFLEN]; | 427 | char buf[RPC_MAX_ADDRBUFLEN]; |
| 441 | printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", | 428 | printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", |
| 442 | svc_print_addr(rqstp, buf, sizeof(buf))); | 429 | svc_print_addr(rqstp, buf, sizeof(buf))); |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index cf0d5c2c318d..6063a8e4b9f3 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
| @@ -360,7 +360,7 @@ nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) | |||
| 360 | __be32 | 360 | __be32 |
| 361 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | 361 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, |
| 362 | struct nlm_host *host, struct nlm_lock *lock, int wait, | 362 | struct nlm_host *host, struct nlm_lock *lock, int wait, |
| 363 | struct nlm_cookie *cookie) | 363 | struct nlm_cookie *cookie, int reclaim) |
| 364 | { | 364 | { |
| 365 | struct nlm_block *block = NULL; | 365 | struct nlm_block *block = NULL; |
| 366 | int error; | 366 | int error; |
| @@ -406,6 +406,15 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
| 406 | goto out; | 406 | goto out; |
| 407 | } | 407 | } |
| 408 | 408 | ||
| 409 | if (locks_in_grace() && !reclaim) { | ||
| 410 | ret = nlm_lck_denied_grace_period; | ||
| 411 | goto out; | ||
| 412 | } | ||
| 413 | if (reclaim && !locks_in_grace()) { | ||
| 414 | ret = nlm_lck_denied_grace_period; | ||
| 415 | goto out; | ||
| 416 | } | ||
| 417 | |||
| 409 | if (!wait) | 418 | if (!wait) |
| 410 | lock->fl.fl_flags &= ~FL_SLEEP; | 419 | lock->fl.fl_flags &= ~FL_SLEEP; |
| 411 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); | 420 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); |
| @@ -502,6 +511,10 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, | |||
| 502 | goto out; | 511 | goto out; |
| 503 | } | 512 | } |
| 504 | 513 | ||
| 514 | if (locks_in_grace()) { | ||
| 515 | ret = nlm_lck_denied_grace_period; | ||
| 516 | goto out; | ||
| 517 | } | ||
| 505 | error = vfs_test_lock(file->f_file, &lock->fl); | 518 | error = vfs_test_lock(file->f_file, &lock->fl); |
| 506 | if (error == FILE_LOCK_DEFERRED) { | 519 | if (error == FILE_LOCK_DEFERRED) { |
| 507 | ret = nlmsvc_defer_lock_rqst(rqstp, block); | 520 | ret = nlmsvc_defer_lock_rqst(rqstp, block); |
| @@ -582,6 +595,9 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock) | |||
| 582 | (long long)lock->fl.fl_start, | 595 | (long long)lock->fl.fl_start, |
| 583 | (long long)lock->fl.fl_end); | 596 | (long long)lock->fl.fl_end); |
| 584 | 597 | ||
| 598 | if (locks_in_grace()) | ||
| 599 | return nlm_lck_denied_grace_period; | ||
| 600 | |||
| 585 | mutex_lock(&file->f_mutex); | 601 | mutex_lock(&file->f_mutex); |
| 586 | block = nlmsvc_lookup_block(file, lock); | 602 | block = nlmsvc_lookup_block(file, lock); |
| 587 | mutex_unlock(&file->f_mutex); | 603 | mutex_unlock(&file->f_mutex); |
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 76262c1986f2..548b0bb2b84d 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c | |||
| @@ -117,12 +117,6 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 117 | dprintk("lockd: TEST called\n"); | 117 | dprintk("lockd: TEST called\n"); |
| 118 | resp->cookie = argp->cookie; | 118 | resp->cookie = argp->cookie; |
| 119 | 119 | ||
| 120 | /* Don't accept test requests during grace period */ | ||
| 121 | if (nlmsvc_grace_period) { | ||
| 122 | resp->status = nlm_lck_denied_grace_period; | ||
| 123 | return rc; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* Obtain client and file */ | 120 | /* Obtain client and file */ |
| 127 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | 121 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) |
| 128 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; | 122 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; |
| @@ -152,12 +146,6 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 152 | 146 | ||
| 153 | resp->cookie = argp->cookie; | 147 | resp->cookie = argp->cookie; |
| 154 | 148 | ||
| 155 | /* Don't accept new lock requests during grace period */ | ||
| 156 | if (nlmsvc_grace_period && !argp->reclaim) { | ||
| 157 | resp->status = nlm_lck_denied_grace_period; | ||
| 158 | return rc; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* Obtain client and file */ | 149 | /* Obtain client and file */ |
| 162 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) | 150 | if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) |
| 163 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; | 151 | return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; |
| @@ -176,7 +164,8 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 176 | 164 | ||
| 177 | /* Now try to lock the file */ | 165 | /* Now try to lock the file */ |
| 178 | resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, | 166 | resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, |
| 179 | argp->block, &argp->cookie)); | 167 | argp->block, &argp->cookie, |
| 168 | argp->reclaim)); | ||
| 180 | if (resp->status == nlm_drop_reply) | 169 | if (resp->status == nlm_drop_reply) |
| 181 | rc = rpc_drop_reply; | 170 | rc = rpc_drop_reply; |
| 182 | else | 171 | else |
| @@ -199,7 +188,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 199 | resp->cookie = argp->cookie; | 188 | resp->cookie = argp->cookie; |
| 200 | 189 | ||
| 201 | /* Don't accept requests during grace period */ | 190 | /* Don't accept requests during grace period */ |
| 202 | if (nlmsvc_grace_period) { | 191 | if (locks_in_grace()) { |
| 203 | resp->status = nlm_lck_denied_grace_period; | 192 | resp->status = nlm_lck_denied_grace_period; |
| 204 | return rpc_success; | 193 | return rpc_success; |
| 205 | } | 194 | } |
| @@ -232,7 +221,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 232 | resp->cookie = argp->cookie; | 221 | resp->cookie = argp->cookie; |
| 233 | 222 | ||
| 234 | /* Don't accept new lock requests during grace period */ | 223 | /* Don't accept new lock requests during grace period */ |
| 235 | if (nlmsvc_grace_period) { | 224 | if (locks_in_grace()) { |
| 236 | resp->status = nlm_lck_denied_grace_period; | 225 | resp->status = nlm_lck_denied_grace_period; |
| 237 | return rpc_success; | 226 | return rpc_success; |
| 238 | } | 227 | } |
| @@ -261,7 +250,7 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 261 | resp->cookie = argp->cookie; | 250 | resp->cookie = argp->cookie; |
| 262 | 251 | ||
| 263 | dprintk("lockd: GRANTED called\n"); | 252 | dprintk("lockd: GRANTED called\n"); |
| 264 | resp->status = nlmclnt_grant(svc_addr_in(rqstp), &argp->lock); | 253 | resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); |
| 265 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | 254 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); |
| 266 | return rpc_success; | 255 | return rpc_success; |
| 267 | } | 256 | } |
| @@ -373,7 +362,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 373 | resp->cookie = argp->cookie; | 362 | resp->cookie = argp->cookie; |
| 374 | 363 | ||
| 375 | /* Don't accept new lock requests during grace period */ | 364 | /* Don't accept new lock requests during grace period */ |
| 376 | if (nlmsvc_grace_period && !argp->reclaim) { | 365 | if (locks_in_grace() && !argp->reclaim) { |
| 377 | resp->status = nlm_lck_denied_grace_period; | 366 | resp->status = nlm_lck_denied_grace_period; |
| 378 | return rpc_success; | 367 | return rpc_success; |
| 379 | } | 368 | } |
| @@ -406,7 +395,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
| 406 | resp->cookie = argp->cookie; | 395 | resp->cookie = argp->cookie; |
| 407 | 396 | ||
| 408 | /* Don't accept requests during grace period */ | 397 | /* Don't accept requests during grace period */ |
| 409 | if (nlmsvc_grace_period) { | 398 | if (locks_in_grace()) { |
| 410 | resp->status = nlm_lck_denied_grace_period; | 399 | resp->status = nlm_lck_denied_grace_period; |
| 411 | return rpc_success; | 400 | return rpc_success; |
| 412 | } | 401 | } |
| @@ -464,11 +453,9 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, | |||
| 464 | { | 453 | { |
| 465 | struct sockaddr_in saddr; | 454 | struct sockaddr_in saddr; |
| 466 | 455 | ||
| 467 | memcpy(&saddr, svc_addr_in(rqstp), sizeof(saddr)); | ||
| 468 | |||
| 469 | dprintk("lockd: SM_NOTIFY called\n"); | 456 | dprintk("lockd: SM_NOTIFY called\n"); |
| 470 | if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) | 457 | |
| 471 | || ntohs(saddr.sin_port) >= 1024) { | 458 | if (!nlm_privileged_requester(rqstp)) { |
| 472 | char buf[RPC_MAX_ADDRBUFLEN]; | 459 | char buf[RPC_MAX_ADDRBUFLEN]; |
| 473 | printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", | 460 | printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", |
| 474 | svc_print_addr(rqstp, buf, sizeof(buf))); | 461 | svc_print_addr(rqstp, buf, sizeof(buf))); |
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 198b4e55b373..34c2766e27c7 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
| @@ -418,7 +418,7 @@ EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb); | |||
| 418 | static int | 418 | static int |
| 419 | nlmsvc_match_ip(void *datap, struct nlm_host *host) | 419 | nlmsvc_match_ip(void *datap, struct nlm_host *host) |
| 420 | { | 420 | { |
| 421 | return nlm_cmp_addr(&host->h_saddr, datap); | 421 | return nlm_cmp_addr(nlm_srcaddr(host), datap); |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | /** | 424 | /** |
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 3e459e18cc31..1f226290c67c 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c | |||
| @@ -351,8 +351,6 @@ nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp) | |||
| 351 | argp->state = ntohl(*p++); | 351 | argp->state = ntohl(*p++); |
| 352 | /* Preserve the address in network byte order */ | 352 | /* Preserve the address in network byte order */ |
| 353 | argp->addr = *p++; | 353 | argp->addr = *p++; |
| 354 | argp->vers = *p++; | ||
| 355 | argp->proto = *p++; | ||
| 356 | return xdr_argsize_check(rqstp, p); | 354 | return xdr_argsize_check(rqstp, p); |
| 357 | } | 355 | } |
| 358 | 356 | ||
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index 43ff9397e6c6..50c493a8ad8e 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c | |||
| @@ -358,8 +358,6 @@ nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp | |||
| 358 | argp->state = ntohl(*p++); | 358 | argp->state = ntohl(*p++); |
| 359 | /* Preserve the address in network byte order */ | 359 | /* Preserve the address in network byte order */ |
| 360 | argp->addr = *p++; | 360 | argp->addr = *p++; |
| 361 | argp->vers = *p++; | ||
| 362 | argp->proto = *p++; | ||
| 363 | return xdr_argsize_check(rqstp, p); | 361 | return xdr_argsize_check(rqstp, p); |
| 364 | } | 362 | } |
| 365 | 363 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index f447f4b4476c..6a09760c5960 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
| @@ -105,7 +105,8 @@ int nfs_callback_up(void) | |||
| 105 | mutex_lock(&nfs_callback_mutex); | 105 | mutex_lock(&nfs_callback_mutex); |
| 106 | if (nfs_callback_info.users++ || nfs_callback_info.task != NULL) | 106 | if (nfs_callback_info.users++ || nfs_callback_info.task != NULL) |
| 107 | goto out; | 107 | goto out; |
| 108 | serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL); | 108 | serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, |
| 109 | AF_INET, NULL); | ||
| 109 | ret = -ENOMEM; | 110 | ret = -ENOMEM; |
| 110 | if (!serv) | 111 | if (!serv) |
| 111 | goto out_err; | 112 | goto out_err; |
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 15c6faeec77c..b2786a5f9afe 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c | |||
| @@ -70,7 +70,6 @@ nlm_fclose(struct file *filp) | |||
| 70 | static struct nlmsvc_binding nfsd_nlm_ops = { | 70 | static struct nlmsvc_binding nfsd_nlm_ops = { |
| 71 | .fopen = nlm_fopen, /* open file for locking */ | 71 | .fopen = nlm_fopen, /* open file for locking */ |
| 72 | .fclose = nlm_fclose, /* close file */ | 72 | .fclose = nlm_fclose, /* close file */ |
| 73 | .get_grace_period = get_nfs4_grace_period, | ||
| 74 | }; | 73 | }; |
| 75 | 74 | ||
| 76 | void | 75 | void |
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 4d617ea28cfc..9dbd2eb91281 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c | |||
| @@ -63,7 +63,8 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, | |||
| 63 | SVCFH_fmt(&argp->fh)); | 63 | SVCFH_fmt(&argp->fh)); |
| 64 | 64 | ||
| 65 | fh_copy(&resp->fh, &argp->fh); | 65 | fh_copy(&resp->fh, &argp->fh); |
| 66 | nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); | 66 | nfserr = fh_verify(rqstp, &resp->fh, 0, |
| 67 | NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT); | ||
| 67 | if (nfserr) | 68 | if (nfserr) |
| 68 | RETURN_STATUS(nfserr); | 69 | RETURN_STATUS(nfserr); |
| 69 | 70 | ||
| @@ -530,7 +531,7 @@ nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, | |||
| 530 | dprintk("nfsd: FSSTAT(3) %s\n", | 531 | dprintk("nfsd: FSSTAT(3) %s\n", |
| 531 | SVCFH_fmt(&argp->fh)); | 532 | SVCFH_fmt(&argp->fh)); |
| 532 | 533 | ||
| 533 | nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats); | 534 | nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0); |
| 534 | fh_put(&argp->fh); | 535 | fh_put(&argp->fh); |
| 535 | RETURN_STATUS(nfserr); | 536 | RETURN_STATUS(nfserr); |
| 536 | } | 537 | } |
| @@ -558,7 +559,8 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, | |||
| 558 | resp->f_maxfilesize = ~(u32) 0; | 559 | resp->f_maxfilesize = ~(u32) 0; |
| 559 | resp->f_properties = NFS3_FSF_DEFAULT; | 560 | resp->f_properties = NFS3_FSF_DEFAULT; |
| 560 | 561 | ||
| 561 | nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP); | 562 | nfserr = fh_verify(rqstp, &argp->fh, 0, |
| 563 | NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT); | ||
| 562 | 564 | ||
| 563 | /* Check special features of the file system. May request | 565 | /* Check special features of the file system. May request |
| 564 | * different read/write sizes for file systems known to have | 566 | * different read/write sizes for file systems known to have |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 702fa577aa6e..094747a1227c 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
| @@ -225,7 +225,8 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec) | |||
| 225 | 225 | ||
| 226 | RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len); | 226 | RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len); |
| 227 | WRITE32(OP_CB_RECALL); | 227 | WRITE32(OP_CB_RECALL); |
| 228 | WRITEMEM(&cb_rec->cbr_stateid, sizeof(stateid_t)); | 228 | WRITE32(cb_rec->cbr_stateid.si_generation); |
| 229 | WRITEMEM(&cb_rec->cbr_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 229 | WRITE32(cb_rec->cbr_trunc); | 230 | WRITE32(cb_rec->cbr_trunc); |
| 230 | WRITE32(len); | 231 | WRITE32(len); |
| 231 | WRITEMEM(cb_rec->cbr_fhval, len); | 232 | WRITEMEM(cb_rec->cbr_fhval, len); |
| @@ -379,6 +380,7 @@ static int do_probe_callback(void *data) | |||
| 379 | .addrsize = sizeof(addr), | 380 | .addrsize = sizeof(addr), |
| 380 | .timeout = &timeparms, | 381 | .timeout = &timeparms, |
| 381 | .program = &cb_program, | 382 | .program = &cb_program, |
| 383 | .prognumber = cb->cb_prog, | ||
| 382 | .version = nfs_cb_version[1]->number, | 384 | .version = nfs_cb_version[1]->number, |
| 383 | .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */ | 385 | .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */ |
| 384 | .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), | 386 | .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), |
| @@ -396,9 +398,6 @@ static int do_probe_callback(void *data) | |||
| 396 | addr.sin_port = htons(cb->cb_port); | 398 | addr.sin_port = htons(cb->cb_port); |
| 397 | addr.sin_addr.s_addr = htonl(cb->cb_addr); | 399 | addr.sin_addr.s_addr = htonl(cb->cb_addr); |
| 398 | 400 | ||
| 399 | /* Initialize rpc_stat */ | ||
| 400 | memset(args.program->stats, 0, sizeof(struct rpc_stat)); | ||
| 401 | |||
| 402 | /* Create RPC client */ | 401 | /* Create RPC client */ |
| 403 | client = rpc_create(&args); | 402 | client = rpc_create(&args); |
| 404 | if (IS_ERR(client)) { | 403 | if (IS_ERR(client)) { |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index e5b51ffafc6c..669461e291ae 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
| @@ -201,10 +201,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 201 | /* Openowner is now set, so sequence id will get bumped. Now we need | 201 | /* Openowner is now set, so sequence id will get bumped. Now we need |
| 202 | * these checks before we do any creates: */ | 202 | * these checks before we do any creates: */ |
| 203 | status = nfserr_grace; | 203 | status = nfserr_grace; |
| 204 | if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS) | 204 | if (locks_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS) |
| 205 | goto out; | 205 | goto out; |
| 206 | status = nfserr_no_grace; | 206 | status = nfserr_no_grace; |
| 207 | if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) | 207 | if (!locks_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) |
| 208 | goto out; | 208 | goto out; |
| 209 | 209 | ||
| 210 | switch (open->op_claim_type) { | 210 | switch (open->op_claim_type) { |
| @@ -575,7 +575,7 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 575 | { | 575 | { |
| 576 | __be32 status; | 576 | __be32 status; |
| 577 | 577 | ||
| 578 | if (nfs4_in_grace()) | 578 | if (locks_in_grace()) |
| 579 | return nfserr_grace; | 579 | return nfserr_grace; |
| 580 | status = nfsd_unlink(rqstp, &cstate->current_fh, 0, | 580 | status = nfsd_unlink(rqstp, &cstate->current_fh, 0, |
| 581 | remove->rm_name, remove->rm_namelen); | 581 | remove->rm_name, remove->rm_namelen); |
| @@ -596,7 +596,7 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 596 | 596 | ||
| 597 | if (!cstate->save_fh.fh_dentry) | 597 | if (!cstate->save_fh.fh_dentry) |
| 598 | return status; | 598 | return status; |
| 599 | if (nfs4_in_grace() && !(cstate->save_fh.fh_export->ex_flags | 599 | if (locks_in_grace() && !(cstate->save_fh.fh_export->ex_flags |
| 600 | & NFSEXP_NOSUBTREECHECK)) | 600 | & NFSEXP_NOSUBTREECHECK)) |
| 601 | return nfserr_grace; | 601 | return nfserr_grace; |
| 602 | status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname, | 602 | status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname, |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1578d7a2667e..0cc7ff5d5ab5 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -61,7 +61,6 @@ | |||
| 61 | static time_t lease_time = 90; /* default lease time */ | 61 | static time_t lease_time = 90; /* default lease time */ |
| 62 | static time_t user_lease_time = 90; | 62 | static time_t user_lease_time = 90; |
| 63 | static time_t boot_time; | 63 | static time_t boot_time; |
| 64 | static int in_grace = 1; | ||
| 65 | static u32 current_ownerid = 1; | 64 | static u32 current_ownerid = 1; |
| 66 | static u32 current_fileid = 1; | 65 | static u32 current_fileid = 1; |
| 67 | static u32 current_delegid = 1; | 66 | static u32 current_delegid = 1; |
| @@ -1640,7 +1639,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
| 1640 | case NFS4_OPEN_CLAIM_NULL: | 1639 | case NFS4_OPEN_CLAIM_NULL: |
| 1641 | /* Let's not give out any delegations till everyone's | 1640 | /* Let's not give out any delegations till everyone's |
| 1642 | * had the chance to reclaim theirs.... */ | 1641 | * had the chance to reclaim theirs.... */ |
| 1643 | if (nfs4_in_grace()) | 1642 | if (locks_in_grace()) |
| 1644 | goto out; | 1643 | goto out; |
| 1645 | if (!atomic_read(&cb->cb_set) || !sop->so_confirmed) | 1644 | if (!atomic_read(&cb->cb_set) || !sop->so_confirmed) |
| 1646 | goto out; | 1645 | goto out; |
| @@ -1816,12 +1815,15 @@ out: | |||
| 1816 | return status; | 1815 | return status; |
| 1817 | } | 1816 | } |
| 1818 | 1817 | ||
| 1818 | struct lock_manager nfsd4_manager = { | ||
| 1819 | }; | ||
| 1820 | |||
| 1819 | static void | 1821 | static void |
| 1820 | end_grace(void) | 1822 | nfsd4_end_grace(void) |
| 1821 | { | 1823 | { |
| 1822 | dprintk("NFSD: end of grace period\n"); | 1824 | dprintk("NFSD: end of grace period\n"); |
| 1823 | nfsd4_recdir_purge_old(); | 1825 | nfsd4_recdir_purge_old(); |
| 1824 | in_grace = 0; | 1826 | locks_end_grace(&nfsd4_manager); |
| 1825 | } | 1827 | } |
| 1826 | 1828 | ||
| 1827 | static time_t | 1829 | static time_t |
| @@ -1838,8 +1840,8 @@ nfs4_laundromat(void) | |||
| 1838 | nfs4_lock_state(); | 1840 | nfs4_lock_state(); |
| 1839 | 1841 | ||
| 1840 | dprintk("NFSD: laundromat service - starting\n"); | 1842 | dprintk("NFSD: laundromat service - starting\n"); |
| 1841 | if (in_grace) | 1843 | if (locks_in_grace()) |
| 1842 | end_grace(); | 1844 | nfsd4_end_grace(); |
| 1843 | list_for_each_safe(pos, next, &client_lru) { | 1845 | list_for_each_safe(pos, next, &client_lru) { |
| 1844 | clp = list_entry(pos, struct nfs4_client, cl_lru); | 1846 | clp = list_entry(pos, struct nfs4_client, cl_lru); |
| 1845 | if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { | 1847 | if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { |
| @@ -1974,7 +1976,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) | |||
| 1974 | return nfserr_bad_stateid; | 1976 | return nfserr_bad_stateid; |
| 1975 | else if (ONE_STATEID(stateid) && (flags & RD_STATE)) | 1977 | else if (ONE_STATEID(stateid) && (flags & RD_STATE)) |
| 1976 | return nfs_ok; | 1978 | return nfs_ok; |
| 1977 | else if (nfs4_in_grace()) { | 1979 | else if (locks_in_grace()) { |
| 1978 | /* Answer in remaining cases depends on existance of | 1980 | /* Answer in remaining cases depends on existance of |
| 1979 | * conflicting state; so we must wait out the grace period. */ | 1981 | * conflicting state; so we must wait out the grace period. */ |
| 1980 | return nfserr_grace; | 1982 | return nfserr_grace; |
| @@ -1993,7 +1995,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) | |||
| 1993 | static inline int | 1995 | static inline int |
| 1994 | io_during_grace_disallowed(struct inode *inode, int flags) | 1996 | io_during_grace_disallowed(struct inode *inode, int flags) |
| 1995 | { | 1997 | { |
| 1996 | return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE)) | 1998 | return locks_in_grace() && (flags & (RD_STATE | WR_STATE)) |
| 1997 | && mandatory_lock(inode); | 1999 | && mandatory_lock(inode); |
| 1998 | } | 2000 | } |
| 1999 | 2001 | ||
| @@ -2693,10 +2695,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 2693 | filp = lock_stp->st_vfs_file; | 2695 | filp = lock_stp->st_vfs_file; |
| 2694 | 2696 | ||
| 2695 | status = nfserr_grace; | 2697 | status = nfserr_grace; |
| 2696 | if (nfs4_in_grace() && !lock->lk_reclaim) | 2698 | if (locks_in_grace() && !lock->lk_reclaim) |
| 2697 | goto out; | 2699 | goto out; |
| 2698 | status = nfserr_no_grace; | 2700 | status = nfserr_no_grace; |
| 2699 | if (!nfs4_in_grace() && lock->lk_reclaim) | 2701 | if (!locks_in_grace() && lock->lk_reclaim) |
| 2700 | goto out; | 2702 | goto out; |
| 2701 | 2703 | ||
| 2702 | locks_init_lock(&file_lock); | 2704 | locks_init_lock(&file_lock); |
| @@ -2779,7 +2781,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 2779 | int error; | 2781 | int error; |
| 2780 | __be32 status; | 2782 | __be32 status; |
| 2781 | 2783 | ||
| 2782 | if (nfs4_in_grace()) | 2784 | if (locks_in_grace()) |
| 2783 | return nfserr_grace; | 2785 | return nfserr_grace; |
| 2784 | 2786 | ||
| 2785 | if (check_lock_length(lockt->lt_offset, lockt->lt_length)) | 2787 | if (check_lock_length(lockt->lt_offset, lockt->lt_length)) |
| @@ -3192,9 +3194,9 @@ __nfs4_state_start(void) | |||
| 3192 | unsigned long grace_time; | 3194 | unsigned long grace_time; |
| 3193 | 3195 | ||
| 3194 | boot_time = get_seconds(); | 3196 | boot_time = get_seconds(); |
| 3195 | grace_time = get_nfs_grace_period(); | 3197 | grace_time = get_nfs4_grace_period(); |
| 3196 | lease_time = user_lease_time; | 3198 | lease_time = user_lease_time; |
| 3197 | in_grace = 1; | 3199 | locks_start_grace(&nfsd4_manager); |
| 3198 | printk(KERN_INFO "NFSD: starting %ld-second grace period\n", | 3200 | printk(KERN_INFO "NFSD: starting %ld-second grace period\n", |
| 3199 | grace_time/HZ); | 3201 | grace_time/HZ); |
| 3200 | laundry_wq = create_singlethread_workqueue("nfsd4"); | 3202 | laundry_wq = create_singlethread_workqueue("nfsd4"); |
| @@ -3213,12 +3215,6 @@ nfs4_state_start(void) | |||
| 3213 | return; | 3215 | return; |
| 3214 | } | 3216 | } |
| 3215 | 3217 | ||
| 3216 | int | ||
| 3217 | nfs4_in_grace(void) | ||
| 3218 | { | ||
| 3219 | return in_grace; | ||
| 3220 | } | ||
| 3221 | |||
| 3222 | time_t | 3218 | time_t |
| 3223 | nfs4_lease_time(void) | 3219 | nfs4_lease_time(void) |
| 3224 | { | 3220 | { |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 14ba4d9b2859..afcdf4b76843 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
| @@ -413,6 +413,18 @@ out_nfserr: | |||
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | static __be32 | 415 | static __be32 |
| 416 | nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid) | ||
| 417 | { | ||
| 418 | DECODE_HEAD; | ||
| 419 | |||
| 420 | READ_BUF(sizeof(stateid_t)); | ||
| 421 | READ32(sid->si_generation); | ||
| 422 | COPYMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); | ||
| 423 | |||
| 424 | DECODE_TAIL; | ||
| 425 | } | ||
| 426 | |||
| 427 | static __be32 | ||
| 416 | nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access) | 428 | nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access) |
| 417 | { | 429 | { |
| 418 | DECODE_HEAD; | 430 | DECODE_HEAD; |
| @@ -429,10 +441,9 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close) | |||
| 429 | DECODE_HEAD; | 441 | DECODE_HEAD; |
| 430 | 442 | ||
| 431 | close->cl_stateowner = NULL; | 443 | close->cl_stateowner = NULL; |
| 432 | READ_BUF(4 + sizeof(stateid_t)); | 444 | READ_BUF(4); |
| 433 | READ32(close->cl_seqid); | 445 | READ32(close->cl_seqid); |
| 434 | READ32(close->cl_stateid.si_generation); | 446 | return nfsd4_decode_stateid(argp, &close->cl_stateid); |
| 435 | COPYMEM(&close->cl_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 436 | 447 | ||
| 437 | DECODE_TAIL; | 448 | DECODE_TAIL; |
| 438 | } | 449 | } |
| @@ -493,13 +504,7 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create | |||
| 493 | static inline __be32 | 504 | static inline __be32 |
| 494 | nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr) | 505 | nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr) |
| 495 | { | 506 | { |
| 496 | DECODE_HEAD; | 507 | return nfsd4_decode_stateid(argp, &dr->dr_stateid); |
| 497 | |||
| 498 | READ_BUF(sizeof(stateid_t)); | ||
| 499 | READ32(dr->dr_stateid.si_generation); | ||
| 500 | COPYMEM(&dr->dr_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 501 | |||
| 502 | DECODE_TAIL; | ||
| 503 | } | 508 | } |
| 504 | 509 | ||
| 505 | static inline __be32 | 510 | static inline __be32 |
| @@ -542,20 +547,22 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock) | |||
| 542 | READ32(lock->lk_is_new); | 547 | READ32(lock->lk_is_new); |
| 543 | 548 | ||
| 544 | if (lock->lk_is_new) { | 549 | if (lock->lk_is_new) { |
| 545 | READ_BUF(36); | 550 | READ_BUF(4); |
| 546 | READ32(lock->lk_new_open_seqid); | 551 | READ32(lock->lk_new_open_seqid); |
| 547 | READ32(lock->lk_new_open_stateid.si_generation); | 552 | status = nfsd4_decode_stateid(argp, &lock->lk_new_open_stateid); |
| 548 | 553 | if (status) | |
| 549 | COPYMEM(&lock->lk_new_open_stateid.si_opaque, sizeof(stateid_opaque_t)); | 554 | return status; |
| 555 | READ_BUF(8 + sizeof(clientid_t)); | ||
| 550 | READ32(lock->lk_new_lock_seqid); | 556 | READ32(lock->lk_new_lock_seqid); |
| 551 | COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t)); | 557 | COPYMEM(&lock->lk_new_clientid, sizeof(clientid_t)); |
| 552 | READ32(lock->lk_new_owner.len); | 558 | READ32(lock->lk_new_owner.len); |
| 553 | READ_BUF(lock->lk_new_owner.len); | 559 | READ_BUF(lock->lk_new_owner.len); |
| 554 | READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len); | 560 | READMEM(lock->lk_new_owner.data, lock->lk_new_owner.len); |
| 555 | } else { | 561 | } else { |
| 556 | READ_BUF(20); | 562 | status = nfsd4_decode_stateid(argp, &lock->lk_old_lock_stateid); |
| 557 | READ32(lock->lk_old_lock_stateid.si_generation); | 563 | if (status) |
| 558 | COPYMEM(&lock->lk_old_lock_stateid.si_opaque, sizeof(stateid_opaque_t)); | 564 | return status; |
| 565 | READ_BUF(4); | ||
| 559 | READ32(lock->lk_old_lock_seqid); | 566 | READ32(lock->lk_old_lock_seqid); |
| 560 | } | 567 | } |
| 561 | 568 | ||
| @@ -587,13 +594,15 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku) | |||
| 587 | DECODE_HEAD; | 594 | DECODE_HEAD; |
| 588 | 595 | ||
| 589 | locku->lu_stateowner = NULL; | 596 | locku->lu_stateowner = NULL; |
| 590 | READ_BUF(24 + sizeof(stateid_t)); | 597 | READ_BUF(8); |
| 591 | READ32(locku->lu_type); | 598 | READ32(locku->lu_type); |
| 592 | if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT)) | 599 | if ((locku->lu_type < NFS4_READ_LT) || (locku->lu_type > NFS4_WRITEW_LT)) |
| 593 | goto xdr_error; | 600 | goto xdr_error; |
| 594 | READ32(locku->lu_seqid); | 601 | READ32(locku->lu_seqid); |
| 595 | READ32(locku->lu_stateid.si_generation); | 602 | status = nfsd4_decode_stateid(argp, &locku->lu_stateid); |
| 596 | COPYMEM(&locku->lu_stateid.si_opaque, sizeof(stateid_opaque_t)); | 603 | if (status) |
| 604 | return status; | ||
| 605 | READ_BUF(16); | ||
| 597 | READ64(locku->lu_offset); | 606 | READ64(locku->lu_offset); |
| 598 | READ64(locku->lu_length); | 607 | READ64(locku->lu_length); |
| 599 | 608 | ||
| @@ -678,8 +687,10 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) | |||
| 678 | READ32(open->op_delegate_type); | 687 | READ32(open->op_delegate_type); |
| 679 | break; | 688 | break; |
| 680 | case NFS4_OPEN_CLAIM_DELEGATE_CUR: | 689 | case NFS4_OPEN_CLAIM_DELEGATE_CUR: |
| 681 | READ_BUF(sizeof(stateid_t) + 4); | 690 | status = nfsd4_decode_stateid(argp, &open->op_delegate_stateid); |
| 682 | COPYMEM(&open->op_delegate_stateid, sizeof(stateid_t)); | 691 | if (status) |
| 692 | return status; | ||
| 693 | READ_BUF(4); | ||
| 683 | READ32(open->op_fname.len); | 694 | READ32(open->op_fname.len); |
| 684 | READ_BUF(open->op_fname.len); | 695 | READ_BUF(open->op_fname.len); |
| 685 | SAVEMEM(open->op_fname.data, open->op_fname.len); | 696 | SAVEMEM(open->op_fname.data, open->op_fname.len); |
| @@ -699,9 +710,10 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con | |||
| 699 | DECODE_HEAD; | 710 | DECODE_HEAD; |
| 700 | 711 | ||
| 701 | open_conf->oc_stateowner = NULL; | 712 | open_conf->oc_stateowner = NULL; |
| 702 | READ_BUF(4 + sizeof(stateid_t)); | 713 | status = nfsd4_decode_stateid(argp, &open_conf->oc_req_stateid); |
| 703 | READ32(open_conf->oc_req_stateid.si_generation); | 714 | if (status) |
| 704 | COPYMEM(&open_conf->oc_req_stateid.si_opaque, sizeof(stateid_opaque_t)); | 715 | return status; |
| 716 | READ_BUF(4); | ||
| 705 | READ32(open_conf->oc_seqid); | 717 | READ32(open_conf->oc_seqid); |
| 706 | 718 | ||
| 707 | DECODE_TAIL; | 719 | DECODE_TAIL; |
| @@ -713,9 +725,10 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d | |||
| 713 | DECODE_HEAD; | 725 | DECODE_HEAD; |
| 714 | 726 | ||
| 715 | open_down->od_stateowner = NULL; | 727 | open_down->od_stateowner = NULL; |
| 716 | READ_BUF(12 + sizeof(stateid_t)); | 728 | status = nfsd4_decode_stateid(argp, &open_down->od_stateid); |
| 717 | READ32(open_down->od_stateid.si_generation); | 729 | if (status) |
| 718 | COPYMEM(&open_down->od_stateid.si_opaque, sizeof(stateid_opaque_t)); | 730 | return status; |
| 731 | READ_BUF(12); | ||
| 719 | READ32(open_down->od_seqid); | 732 | READ32(open_down->od_seqid); |
| 720 | READ32(open_down->od_share_access); | 733 | READ32(open_down->od_share_access); |
| 721 | READ32(open_down->od_share_deny); | 734 | READ32(open_down->od_share_deny); |
| @@ -743,9 +756,10 @@ nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read) | |||
| 743 | { | 756 | { |
| 744 | DECODE_HEAD; | 757 | DECODE_HEAD; |
| 745 | 758 | ||
| 746 | READ_BUF(sizeof(stateid_t) + 12); | 759 | status = nfsd4_decode_stateid(argp, &read->rd_stateid); |
| 747 | READ32(read->rd_stateid.si_generation); | 760 | if (status) |
| 748 | COPYMEM(&read->rd_stateid.si_opaque, sizeof(stateid_opaque_t)); | 761 | return status; |
| 762 | READ_BUF(12); | ||
| 749 | READ64(read->rd_offset); | 763 | READ64(read->rd_offset); |
| 750 | READ32(read->rd_length); | 764 | READ32(read->rd_length); |
| 751 | 765 | ||
| @@ -834,15 +848,13 @@ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp, | |||
| 834 | static __be32 | 848 | static __be32 |
| 835 | nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) | 849 | nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) |
| 836 | { | 850 | { |
| 837 | DECODE_HEAD; | 851 | __be32 status; |
| 838 | |||
| 839 | READ_BUF(sizeof(stateid_t)); | ||
| 840 | READ32(setattr->sa_stateid.si_generation); | ||
| 841 | COPYMEM(&setattr->sa_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 842 | if ((status = nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr, &setattr->sa_acl))) | ||
| 843 | goto out; | ||
| 844 | 852 | ||
| 845 | DECODE_TAIL; | 853 | status = nfsd4_decode_stateid(argp, &setattr->sa_stateid); |
| 854 | if (status) | ||
| 855 | return status; | ||
| 856 | return nfsd4_decode_fattr(argp, setattr->sa_bmval, | ||
| 857 | &setattr->sa_iattr, &setattr->sa_acl); | ||
| 846 | } | 858 | } |
| 847 | 859 | ||
| 848 | static __be32 | 860 | static __be32 |
| @@ -927,9 +939,10 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) | |||
| 927 | int len; | 939 | int len; |
| 928 | DECODE_HEAD; | 940 | DECODE_HEAD; |
| 929 | 941 | ||
| 930 | READ_BUF(sizeof(stateid_opaque_t) + 20); | 942 | status = nfsd4_decode_stateid(argp, &write->wr_stateid); |
| 931 | READ32(write->wr_stateid.si_generation); | 943 | if (status) |
| 932 | COPYMEM(&write->wr_stateid.si_opaque, sizeof(stateid_opaque_t)); | 944 | return status; |
| 945 | READ_BUF(16); | ||
| 933 | READ64(write->wr_offset); | 946 | READ64(write->wr_offset); |
| 934 | READ32(write->wr_stable_how); | 947 | READ32(write->wr_stable_how); |
| 935 | if (write->wr_stable_how > 2) | 948 | if (write->wr_stable_how > 2) |
| @@ -1183,7 +1196,6 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) | |||
| 1183 | * Header routine to setup seqid operation replay cache | 1196 | * Header routine to setup seqid operation replay cache |
| 1184 | */ | 1197 | */ |
| 1185 | #define ENCODE_SEQID_OP_HEAD \ | 1198 | #define ENCODE_SEQID_OP_HEAD \ |
| 1186 | __be32 *p; \ | ||
| 1187 | __be32 *save; \ | 1199 | __be32 *save; \ |
| 1188 | \ | 1200 | \ |
| 1189 | save = resp->p; | 1201 | save = resp->p; |
| @@ -1950,6 +1962,17 @@ fail: | |||
| 1950 | return -EINVAL; | 1962 | return -EINVAL; |
| 1951 | } | 1963 | } |
| 1952 | 1964 | ||
| 1965 | static void | ||
| 1966 | nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid) | ||
| 1967 | { | ||
| 1968 | ENCODE_HEAD; | ||
| 1969 | |||
| 1970 | RESERVE_SPACE(sizeof(stateid_t)); | ||
| 1971 | WRITE32(sid->si_generation); | ||
| 1972 | WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); | ||
| 1973 | ADJUST_ARGS(); | ||
| 1974 | } | ||
| 1975 | |||
| 1953 | static __be32 | 1976 | static __be32 |
| 1954 | nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access) | 1977 | nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access) |
| 1955 | { | 1978 | { |
| @@ -1969,12 +1992,9 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c | |||
| 1969 | { | 1992 | { |
| 1970 | ENCODE_SEQID_OP_HEAD; | 1993 | ENCODE_SEQID_OP_HEAD; |
| 1971 | 1994 | ||
| 1972 | if (!nfserr) { | 1995 | if (!nfserr) |
| 1973 | RESERVE_SPACE(sizeof(stateid_t)); | 1996 | nfsd4_encode_stateid(resp, &close->cl_stateid); |
| 1974 | WRITE32(close->cl_stateid.si_generation); | 1997 | |
| 1975 | WRITEMEM(&close->cl_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 1976 | ADJUST_ARGS(); | ||
| 1977 | } | ||
| 1978 | ENCODE_SEQID_OP_TAIL(close->cl_stateowner); | 1998 | ENCODE_SEQID_OP_TAIL(close->cl_stateowner); |
| 1979 | return nfserr; | 1999 | return nfserr; |
| 1980 | } | 2000 | } |
| @@ -2074,12 +2094,9 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo | |||
| 2074 | { | 2094 | { |
| 2075 | ENCODE_SEQID_OP_HEAD; | 2095 | ENCODE_SEQID_OP_HEAD; |
| 2076 | 2096 | ||
| 2077 | if (!nfserr) { | 2097 | if (!nfserr) |
| 2078 | RESERVE_SPACE(4 + sizeof(stateid_t)); | 2098 | nfsd4_encode_stateid(resp, &lock->lk_resp_stateid); |
| 2079 | WRITE32(lock->lk_resp_stateid.si_generation); | 2099 | else if (nfserr == nfserr_denied) |
| 2080 | WRITEMEM(&lock->lk_resp_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 2081 | ADJUST_ARGS(); | ||
| 2082 | } else if (nfserr == nfserr_denied) | ||
| 2083 | nfsd4_encode_lock_denied(resp, &lock->lk_denied); | 2100 | nfsd4_encode_lock_denied(resp, &lock->lk_denied); |
| 2084 | 2101 | ||
| 2085 | ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner); | 2102 | ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner); |
| @@ -2099,13 +2116,9 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l | |||
| 2099 | { | 2116 | { |
| 2100 | ENCODE_SEQID_OP_HEAD; | 2117 | ENCODE_SEQID_OP_HEAD; |
| 2101 | 2118 | ||
| 2102 | if (!nfserr) { | 2119 | if (!nfserr) |
| 2103 | RESERVE_SPACE(sizeof(stateid_t)); | 2120 | nfsd4_encode_stateid(resp, &locku->lu_stateid); |
| 2104 | WRITE32(locku->lu_stateid.si_generation); | 2121 | |
| 2105 | WRITEMEM(&locku->lu_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 2106 | ADJUST_ARGS(); | ||
| 2107 | } | ||
| 2108 | |||
| 2109 | ENCODE_SEQID_OP_TAIL(locku->lu_stateowner); | 2122 | ENCODE_SEQID_OP_TAIL(locku->lu_stateowner); |
| 2110 | return nfserr; | 2123 | return nfserr; |
| 2111 | } | 2124 | } |
| @@ -2128,14 +2141,14 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_li | |||
| 2128 | static __be32 | 2141 | static __be32 |
| 2129 | nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) | 2142 | nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) |
| 2130 | { | 2143 | { |
| 2144 | ENCODE_HEAD; | ||
| 2131 | ENCODE_SEQID_OP_HEAD; | 2145 | ENCODE_SEQID_OP_HEAD; |
| 2132 | 2146 | ||
| 2133 | if (nfserr) | 2147 | if (nfserr) |
| 2134 | goto out; | 2148 | goto out; |
| 2135 | 2149 | ||
| 2136 | RESERVE_SPACE(36 + sizeof(stateid_t)); | 2150 | nfsd4_encode_stateid(resp, &open->op_stateid); |
| 2137 | WRITE32(open->op_stateid.si_generation); | 2151 | RESERVE_SPACE(40); |
| 2138 | WRITEMEM(&open->op_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 2139 | WRITECINFO(open->op_cinfo); | 2152 | WRITECINFO(open->op_cinfo); |
| 2140 | WRITE32(open->op_rflags); | 2153 | WRITE32(open->op_rflags); |
| 2141 | WRITE32(2); | 2154 | WRITE32(2); |
| @@ -2148,8 +2161,8 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op | |||
| 2148 | case NFS4_OPEN_DELEGATE_NONE: | 2161 | case NFS4_OPEN_DELEGATE_NONE: |
| 2149 | break; | 2162 | break; |
| 2150 | case NFS4_OPEN_DELEGATE_READ: | 2163 | case NFS4_OPEN_DELEGATE_READ: |
| 2151 | RESERVE_SPACE(20 + sizeof(stateid_t)); | 2164 | nfsd4_encode_stateid(resp, &open->op_delegate_stateid); |
| 2152 | WRITEMEM(&open->op_delegate_stateid, sizeof(stateid_t)); | 2165 | RESERVE_SPACE(20); |
| 2153 | WRITE32(open->op_recall); | 2166 | WRITE32(open->op_recall); |
| 2154 | 2167 | ||
| 2155 | /* | 2168 | /* |
| @@ -2162,8 +2175,8 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op | |||
| 2162 | ADJUST_ARGS(); | 2175 | ADJUST_ARGS(); |
| 2163 | break; | 2176 | break; |
| 2164 | case NFS4_OPEN_DELEGATE_WRITE: | 2177 | case NFS4_OPEN_DELEGATE_WRITE: |
| 2165 | RESERVE_SPACE(32 + sizeof(stateid_t)); | 2178 | nfsd4_encode_stateid(resp, &open->op_delegate_stateid); |
| 2166 | WRITEMEM(&open->op_delegate_stateid, sizeof(stateid_t)); | 2179 | RESERVE_SPACE(32); |
| 2167 | WRITE32(0); | 2180 | WRITE32(0); |
| 2168 | 2181 | ||
| 2169 | /* | 2182 | /* |
| @@ -2195,13 +2208,9 @@ static __be32 | |||
| 2195 | nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) | 2208 | nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) |
| 2196 | { | 2209 | { |
| 2197 | ENCODE_SEQID_OP_HEAD; | 2210 | ENCODE_SEQID_OP_HEAD; |
| 2198 | 2211 | ||
| 2199 | if (!nfserr) { | 2212 | if (!nfserr) |
| 2200 | RESERVE_SPACE(sizeof(stateid_t)); | 2213 | nfsd4_encode_stateid(resp, &oc->oc_resp_stateid); |
| 2201 | WRITE32(oc->oc_resp_stateid.si_generation); | ||
| 2202 | WRITEMEM(&oc->oc_resp_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 2203 | ADJUST_ARGS(); | ||
| 2204 | } | ||
| 2205 | 2214 | ||
| 2206 | ENCODE_SEQID_OP_TAIL(oc->oc_stateowner); | 2215 | ENCODE_SEQID_OP_TAIL(oc->oc_stateowner); |
| 2207 | return nfserr; | 2216 | return nfserr; |
| @@ -2211,13 +2220,9 @@ static __be32 | |||
| 2211 | nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) | 2220 | nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) |
| 2212 | { | 2221 | { |
| 2213 | ENCODE_SEQID_OP_HEAD; | 2222 | ENCODE_SEQID_OP_HEAD; |
| 2214 | 2223 | ||
| 2215 | if (!nfserr) { | 2224 | if (!nfserr) |
| 2216 | RESERVE_SPACE(sizeof(stateid_t)); | 2225 | nfsd4_encode_stateid(resp, &od->od_stateid); |
| 2217 | WRITE32(od->od_stateid.si_generation); | ||
| 2218 | WRITEMEM(&od->od_stateid.si_opaque, sizeof(stateid_opaque_t)); | ||
| 2219 | ADJUST_ARGS(); | ||
| 2220 | } | ||
| 2221 | 2226 | ||
| 2222 | ENCODE_SEQID_OP_TAIL(od->od_stateowner); | 2227 | ENCODE_SEQID_OP_TAIL(od->od_stateowner); |
| 2223 | return nfserr; | 2228 | return nfserr; |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index c53e65f8f3a2..97543df58242 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
| @@ -614,10 +614,9 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size) | |||
| 614 | return -EINVAL; | 614 | return -EINVAL; |
| 615 | err = nfsd_create_serv(); | 615 | err = nfsd_create_serv(); |
| 616 | if (!err) { | 616 | if (!err) { |
| 617 | int proto = 0; | 617 | err = svc_addsock(nfsd_serv, fd, buf); |
| 618 | err = svc_addsock(nfsd_serv, fd, buf, &proto); | ||
| 619 | if (err >= 0) { | 618 | if (err >= 0) { |
| 620 | err = lockd_up(proto); | 619 | err = lockd_up(); |
| 621 | if (err < 0) | 620 | if (err < 0) |
| 622 | svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf); | 621 | svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf); |
| 623 | } | 622 | } |
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index ea37c96f0445..cd25d91895a1 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c | |||
| @@ -302,17 +302,27 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) | |||
| 302 | if (error) | 302 | if (error) |
| 303 | goto out; | 303 | goto out; |
| 304 | 304 | ||
| 305 | if (!(access & NFSD_MAY_LOCK)) { | 305 | /* |
| 306 | /* | 306 | * pseudoflavor restrictions are not enforced on NLM, |
| 307 | * pseudoflavor restrictions are not enforced on NLM, | 307 | * which clients virtually always use auth_sys for, |
| 308 | * which clients virtually always use auth_sys for, | 308 | * even while using RPCSEC_GSS for NFS. |
| 309 | * even while using RPCSEC_GSS for NFS. | 309 | */ |
| 310 | */ | 310 | if (access & NFSD_MAY_LOCK) |
| 311 | error = check_nfsd_access(exp, rqstp); | 311 | goto skip_pseudoflavor_check; |
| 312 | if (error) | 312 | /* |
| 313 | goto out; | 313 | * Clients may expect to be able to use auth_sys during mount, |
| 314 | } | 314 | * even if they use gss for everything else; see section 2.3.2 |
| 315 | * of rfc 2623. | ||
| 316 | */ | ||
| 317 | if (access & NFSD_MAY_BYPASS_GSS_ON_ROOT | ||
| 318 | && exp->ex_path.dentry == dentry) | ||
| 319 | goto skip_pseudoflavor_check; | ||
| 320 | |||
| 321 | error = check_nfsd_access(exp, rqstp); | ||
| 322 | if (error) | ||
| 323 | goto out; | ||
| 315 | 324 | ||
| 325 | skip_pseudoflavor_check: | ||
| 316 | /* Finally, check access permissions. */ | 326 | /* Finally, check access permissions. */ |
| 317 | error = nfsd_permission(rqstp, exp, dentry, access); | 327 | error = nfsd_permission(rqstp, exp, dentry, access); |
| 318 | 328 | ||
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 0766f95d236a..5cffeca7acef 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c | |||
| @@ -65,7 +65,8 @@ nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, | |||
| 65 | dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); | 65 | dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); |
| 66 | 66 | ||
| 67 | fh_copy(&resp->fh, &argp->fh); | 67 | fh_copy(&resp->fh, &argp->fh); |
| 68 | nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); | 68 | nfserr = fh_verify(rqstp, &resp->fh, 0, |
| 69 | NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT); | ||
| 69 | return nfsd_return_attrs(nfserr, resp); | 70 | return nfsd_return_attrs(nfserr, resp); |
| 70 | } | 71 | } |
| 71 | 72 | ||
| @@ -521,7 +522,8 @@ nfsd_proc_statfs(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, | |||
| 521 | 522 | ||
| 522 | dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh)); | 523 | dprintk("nfsd: STATFS %s\n", SVCFH_fmt(&argp->fh)); |
| 523 | 524 | ||
| 524 | nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats); | 525 | nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats, |
| 526 | NFSD_MAY_BYPASS_GSS_ON_ROOT); | ||
| 525 | fh_put(&argp->fh); | 527 | fh_put(&argp->fh); |
| 526 | return nfserr; | 528 | return nfserr; |
| 527 | } | 529 | } |
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 80292ff5e924..59eeb46f82c5 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
| @@ -229,6 +229,7 @@ int nfsd_create_serv(void) | |||
| 229 | 229 | ||
| 230 | atomic_set(&nfsd_busy, 0); | 230 | atomic_set(&nfsd_busy, 0); |
| 231 | nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, | 231 | nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, |
| 232 | AF_INET, | ||
| 232 | nfsd_last_thread, nfsd, THIS_MODULE); | 233 | nfsd_last_thread, nfsd, THIS_MODULE); |
| 233 | if (nfsd_serv == NULL) | 234 | if (nfsd_serv == NULL) |
| 234 | err = -ENOMEM; | 235 | err = -ENOMEM; |
| @@ -243,25 +244,20 @@ static int nfsd_init_socks(int port) | |||
| 243 | if (!list_empty(&nfsd_serv->sv_permsocks)) | 244 | if (!list_empty(&nfsd_serv->sv_permsocks)) |
| 244 | return 0; | 245 | return 0; |
| 245 | 246 | ||
| 246 | error = lockd_up(IPPROTO_UDP); | 247 | error = svc_create_xprt(nfsd_serv, "udp", port, |
| 247 | if (error >= 0) { | ||
| 248 | error = svc_create_xprt(nfsd_serv, "udp", port, | ||
| 249 | SVC_SOCK_DEFAULTS); | 248 | SVC_SOCK_DEFAULTS); |
| 250 | if (error < 0) | ||
| 251 | lockd_down(); | ||
| 252 | } | ||
| 253 | if (error < 0) | 249 | if (error < 0) |
| 254 | return error; | 250 | return error; |
| 255 | 251 | ||
| 256 | error = lockd_up(IPPROTO_TCP); | 252 | error = svc_create_xprt(nfsd_serv, "tcp", port, |
| 257 | if (error >= 0) { | ||
| 258 | error = svc_create_xprt(nfsd_serv, "tcp", port, | ||
| 259 | SVC_SOCK_DEFAULTS); | 253 | SVC_SOCK_DEFAULTS); |
| 260 | if (error < 0) | ||
| 261 | lockd_down(); | ||
| 262 | } | ||
| 263 | if (error < 0) | 254 | if (error < 0) |
| 264 | return error; | 255 | return error; |
| 256 | |||
| 257 | error = lockd_up(); | ||
| 258 | if (error < 0) | ||
| 259 | return error; | ||
| 260 | |||
| 265 | return 0; | 261 | return 0; |
| 266 | } | 262 | } |
| 267 | 263 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 18060bed5267..aa1d0d6489a1 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
| @@ -83,7 +83,6 @@ struct raparm_hbucket { | |||
| 83 | spinlock_t pb_lock; | 83 | spinlock_t pb_lock; |
| 84 | } ____cacheline_aligned_in_smp; | 84 | } ____cacheline_aligned_in_smp; |
| 85 | 85 | ||
| 86 | static struct raparms * raparml; | ||
| 87 | #define RAPARM_HASH_BITS 4 | 86 | #define RAPARM_HASH_BITS 4 |
| 88 | #define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS) | 87 | #define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS) |
| 89 | #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) | 88 | #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) |
| @@ -1866,9 +1865,9 @@ out: | |||
| 1866 | * N.B. After this call fhp needs an fh_put | 1865 | * N.B. After this call fhp needs an fh_put |
| 1867 | */ | 1866 | */ |
| 1868 | __be32 | 1867 | __be32 |
| 1869 | nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat) | 1868 | nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) |
| 1870 | { | 1869 | { |
| 1871 | __be32 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); | 1870 | __be32 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); |
| 1872 | if (!err && vfs_statfs(fhp->fh_dentry,stat)) | 1871 | if (!err && vfs_statfs(fhp->fh_dentry,stat)) |
| 1873 | err = nfserr_io; | 1872 | err = nfserr_io; |
| 1874 | return err; | 1873 | return err; |
| @@ -1966,11 +1965,20 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, | |||
| 1966 | void | 1965 | void |
| 1967 | nfsd_racache_shutdown(void) | 1966 | nfsd_racache_shutdown(void) |
| 1968 | { | 1967 | { |
| 1969 | if (!raparml) | 1968 | struct raparms *raparm, *last_raparm; |
| 1970 | return; | 1969 | unsigned int i; |
| 1970 | |||
| 1971 | dprintk("nfsd: freeing readahead buffers.\n"); | 1971 | dprintk("nfsd: freeing readahead buffers.\n"); |
| 1972 | kfree(raparml); | 1972 | |
| 1973 | raparml = NULL; | 1973 | for (i = 0; i < RAPARM_HASH_SIZE; i++) { |
| 1974 | raparm = raparm_hash[i].pb_head; | ||
| 1975 | while(raparm) { | ||
| 1976 | last_raparm = raparm; | ||
| 1977 | raparm = raparm->p_next; | ||
| 1978 | kfree(last_raparm); | ||
| 1979 | } | ||
| 1980 | raparm_hash[i].pb_head = NULL; | ||
| 1981 | } | ||
| 1974 | } | 1982 | } |
| 1975 | /* | 1983 | /* |
| 1976 | * Initialize readahead param cache | 1984 | * Initialize readahead param cache |
| @@ -1981,35 +1989,38 @@ nfsd_racache_init(int cache_size) | |||
| 1981 | int i; | 1989 | int i; |
| 1982 | int j = 0; | 1990 | int j = 0; |
| 1983 | int nperbucket; | 1991 | int nperbucket; |
| 1992 | struct raparms **raparm = NULL; | ||
| 1984 | 1993 | ||
| 1985 | 1994 | ||
| 1986 | if (raparml) | 1995 | if (raparm_hash[0].pb_head) |
| 1987 | return 0; | 1996 | return 0; |
| 1988 | if (cache_size < 2*RAPARM_HASH_SIZE) | 1997 | nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); |
| 1989 | cache_size = 2*RAPARM_HASH_SIZE; | 1998 | if (nperbucket < 2) |
| 1990 | raparml = kcalloc(cache_size, sizeof(struct raparms), GFP_KERNEL); | 1999 | nperbucket = 2; |
| 1991 | 2000 | cache_size = nperbucket * RAPARM_HASH_SIZE; | |
| 1992 | if (!raparml) { | ||
| 1993 | printk(KERN_WARNING | ||
| 1994 | "nfsd: Could not allocate memory read-ahead cache.\n"); | ||
| 1995 | return -ENOMEM; | ||
| 1996 | } | ||
| 1997 | 2001 | ||
| 1998 | dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); | 2002 | dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); |
| 1999 | for (i = 0 ; i < RAPARM_HASH_SIZE ; i++) { | 2003 | |
| 2000 | raparm_hash[i].pb_head = NULL; | 2004 | for (i = 0; i < RAPARM_HASH_SIZE; i++) { |
| 2001 | spin_lock_init(&raparm_hash[i].pb_lock); | 2005 | spin_lock_init(&raparm_hash[i].pb_lock); |
| 2002 | } | 2006 | |
| 2003 | nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); | 2007 | raparm = &raparm_hash[i].pb_head; |
| 2004 | for (i = 0; i < cache_size - 1; i++) { | 2008 | for (j = 0; j < nperbucket; j++) { |
| 2005 | if (i % nperbucket == 0) | 2009 | *raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL); |
| 2006 | raparm_hash[j++].pb_head = raparml + i; | 2010 | if (!*raparm) |
| 2007 | if (i % nperbucket < nperbucket-1) | 2011 | goto out_nomem; |
| 2008 | raparml[i].p_next = raparml + i + 1; | 2012 | raparm = &(*raparm)->p_next; |
| 2013 | } | ||
| 2014 | *raparm = NULL; | ||
| 2009 | } | 2015 | } |
| 2010 | 2016 | ||
| 2011 | nfsdstats.ra_size = cache_size; | 2017 | nfsdstats.ra_size = cache_size; |
| 2012 | return 0; | 2018 | return 0; |
| 2019 | |||
| 2020 | out_nomem: | ||
| 2021 | dprintk("nfsd: kmalloc failed, freeing readahead buffers\n"); | ||
| 2022 | nfsd_racache_shutdown(); | ||
| 2023 | return -ENOMEM; | ||
| 2013 | } | 2024 | } |
| 2014 | 2025 | ||
| 2015 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) | 2026 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 66c1ab87656c..b675a49c1823 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
| @@ -683,6 +683,7 @@ static int cmdline_read_proc(char *page, char **start, off_t off, | |||
| 683 | return proc_calc_metrics(page, start, off, count, eof, len); | 683 | return proc_calc_metrics(page, start, off, count, eof, len); |
| 684 | } | 684 | } |
| 685 | 685 | ||
| 686 | #ifdef CONFIG_FILE_LOCKING | ||
| 686 | static int locks_open(struct inode *inode, struct file *filp) | 687 | static int locks_open(struct inode *inode, struct file *filp) |
| 687 | { | 688 | { |
| 688 | return seq_open(filp, &locks_seq_operations); | 689 | return seq_open(filp, &locks_seq_operations); |
| @@ -694,6 +695,7 @@ static const struct file_operations proc_locks_operations = { | |||
| 694 | .llseek = seq_lseek, | 695 | .llseek = seq_lseek, |
| 695 | .release = seq_release, | 696 | .release = seq_release, |
| 696 | }; | 697 | }; |
| 698 | #endif /* CONFIG_FILE_LOCKING */ | ||
| 697 | 699 | ||
| 698 | static int execdomains_read_proc(char *page, char **start, off_t off, | 700 | static int execdomains_read_proc(char *page, char **start, off_t off, |
| 699 | int count, int *eof, void *data) | 701 | int count, int *eof, void *data) |
| @@ -887,7 +889,9 @@ void __init proc_misc_init(void) | |||
| 887 | #ifdef CONFIG_PRINTK | 889 | #ifdef CONFIG_PRINTK |
| 888 | proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); | 890 | proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); |
| 889 | #endif | 891 | #endif |
| 892 | #ifdef CONFIG_FILE_LOCKING | ||
| 890 | proc_create("locks", 0, NULL, &proc_locks_operations); | 893 | proc_create("locks", 0, NULL, &proc_locks_operations); |
| 894 | #endif | ||
| 891 | proc_create("devices", 0, NULL, &proc_devinfo_operations); | 895 | proc_create("devices", 0, NULL, &proc_devinfo_operations); |
| 892 | proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); | 896 | proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); |
| 893 | #ifdef CONFIG_BLOCK | 897 | #ifdef CONFIG_BLOCK |
