aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-02 13:58:33 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-10 23:40:39 -0400
commit9f958ab8858c75df800e0121b1920182820cbc39 (patch)
tree754bb9d68aac077825b40796e2acf716bce5df08 /fs
parent88d9093997e1c73ca98db41b5605dbde7783845f (diff)
NFSv4: Reduce the chances of an open_owner identifier collision
Currently we just use a 32-bit counter. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/nfs4_fs.h17
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4state.c193
-rw-r--r--fs/nfs/nfs4xdr.c27
5 files changed, 185 insertions, 63 deletions
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 6b424407d631..ccb455053ee4 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -130,7 +130,6 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
130#ifdef CONFIG_NFS_V4 130#ifdef CONFIG_NFS_V4
131 init_rwsem(&clp->cl_sem); 131 init_rwsem(&clp->cl_sem);
132 INIT_LIST_HEAD(&clp->cl_delegations); 132 INIT_LIST_HEAD(&clp->cl_delegations);
133 INIT_LIST_HEAD(&clp->cl_state_owners);
134 spin_lock_init(&clp->cl_lock); 133 spin_lock_init(&clp->cl_lock);
135 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 134 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
136 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 135 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -154,7 +153,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
154#ifdef CONFIG_NFS_V4 153#ifdef CONFIG_NFS_V4
155 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 154 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
156 nfs4_kill_renewd(clp); 155 nfs4_kill_renewd(clp);
157 BUG_ON(!list_empty(&clp->cl_state_owners)); 156 BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners));
158 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 157 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
159 nfs_idmap_delete(clp); 158 nfs_idmap_delete(clp);
160#endif 159#endif
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c97a0ad8430e..44b56c915f72 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -70,19 +70,25 @@ static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status
70 seqid->flags |= NFS_SEQID_CONFIRMED; 70 seqid->flags |= NFS_SEQID_CONFIRMED;
71} 71}
72 72
73struct nfs_unique_id {
74 struct rb_node rb_node;
75 __u64 id;
76};
77
73/* 78/*
74 * NFS4 state_owners and lock_owners are simply labels for ordered 79 * NFS4 state_owners and lock_owners are simply labels for ordered
75 * sequences of RPC calls. Their sole purpose is to provide once-only 80 * sequences of RPC calls. Their sole purpose is to provide once-only
76 * semantics by allowing the server to identify replayed requests. 81 * semantics by allowing the server to identify replayed requests.
77 */ 82 */
78struct nfs4_state_owner { 83struct nfs4_state_owner {
79 spinlock_t so_lock; 84 struct nfs_unique_id so_owner_id;
80 struct list_head so_list; /* per-clientid list of state_owners */
81 struct nfs_client *so_client; 85 struct nfs_client *so_client;
82 u32 so_id; /* 32-bit identifier, unique */ 86 struct rb_node so_client_node;
83 atomic_t so_count;
84 87
85 struct rpc_cred *so_cred; /* Associated cred */ 88 struct rpc_cred *so_cred; /* Associated cred */
89
90 spinlock_t so_lock;
91 atomic_t so_count;
86 struct list_head so_states; 92 struct list_head so_states;
87 struct list_head so_delegations; 93 struct list_head so_delegations;
88 struct nfs_seqid_counter so_seqid; 94 struct nfs_seqid_counter so_seqid;
@@ -108,7 +114,7 @@ struct nfs4_lock_state {
108#define NFS_LOCK_INITIALIZED 1 114#define NFS_LOCK_INITIALIZED 1
109 int ls_flags; 115 int ls_flags;
110 struct nfs_seqid_counter ls_seqid; 116 struct nfs_seqid_counter ls_seqid;
111 u32 ls_id; 117 struct nfs_unique_id ls_id;
112 nfs4_stateid ls_stateid; 118 nfs4_stateid ls_stateid;
113 atomic_t ls_count; 119 atomic_t ls_count;
114}; 120};
@@ -189,7 +195,6 @@ extern void nfs4_renew_state(struct work_struct *);
189 195
190/* nfs4state.c */ 196/* nfs4state.c */
191struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 197struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
192extern u32 nfs4_alloc_lockowner_id(struct nfs_client *);
193 198
194extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 199extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
195extern void nfs4_put_state_owner(struct nfs4_state_owner *); 200extern void nfs4_put_state_owner(struct nfs4_state_owner *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 84d0b7e0dd67..1840ebc78fd3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -253,7 +253,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
253 p->o_arg.fh = NFS_FH(dir); 253 p->o_arg.fh = NFS_FH(dir);
254 p->o_arg.open_flags = flags, 254 p->o_arg.open_flags = flags,
255 p->o_arg.clientid = server->nfs_client->cl_clientid; 255 p->o_arg.clientid = server->nfs_client->cl_clientid;
256 p->o_arg.id = sp->so_id; 256 p->o_arg.id = sp->so_owner_id.id;
257 p->o_arg.name = &p->path.dentry->d_name; 257 p->o_arg.name = &p->path.dentry->d_name;
258 p->o_arg.server = server; 258 p->o_arg.server = server;
259 p->o_arg.bitmask = server->attr_bitmask; 259 p->o_arg.bitmask = server->attr_bitmask;
@@ -651,7 +651,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
651 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 651 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
652 return; 652 return;
653 /* Update sequence id. */ 653 /* Update sequence id. */
654 data->o_arg.id = sp->so_id; 654 data->o_arg.id = sp->so_owner_id.id;
655 data->o_arg.clientid = sp->so_client->cl_clientid; 655 data->o_arg.clientid = sp->so_client->cl_clientid;
656 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 656 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
657 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 657 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
@@ -3029,7 +3029,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
3029 if (status != 0) 3029 if (status != 0)
3030 goto out; 3030 goto out;
3031 lsp = request->fl_u.nfs4_fl.owner; 3031 lsp = request->fl_u.nfs4_fl.owner;
3032 arg.lock_owner.id = lsp->ls_id; 3032 arg.lock_owner.id = lsp->ls_id.id;
3033 status = rpc_call_sync(server->client, &msg, 0); 3033 status = rpc_call_sync(server->client, &msg, 0);
3034 switch (status) { 3034 switch (status) {
3035 case 0: 3035 case 0:
@@ -3243,7 +3243,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
3243 goto out_free; 3243 goto out_free;
3244 p->arg.lock_stateid = &lsp->ls_stateid; 3244 p->arg.lock_stateid = &lsp->ls_stateid;
3245 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 3245 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
3246 p->arg.lock_owner.id = lsp->ls_id; 3246 p->arg.lock_owner.id = lsp->ls_id.id;
3247 p->lsp = lsp; 3247 p->lsp = lsp;
3248 atomic_inc(&lsp->ls_count); 3248 atomic_inc(&lsp->ls_count);
3249 p->ctx = get_nfs_open_context(ctx); 3249 p->ctx = get_nfs_open_context(ctx);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0f79d56e97f0..ab0b5ab60e60 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -44,6 +44,7 @@
44#include <linux/nfs_idmap.h> 44#include <linux/nfs_idmap.h>
45#include <linux/kthread.h> 45#include <linux/kthread.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/random.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
48#include <linux/bitops.h> 49#include <linux/bitops.h>
49 50
@@ -69,18 +70,14 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
69 return status; 70 return status;
70} 71}
71 72
72u32
73nfs4_alloc_lockowner_id(struct nfs_client *clp)
74{
75 return clp->cl_lockowner_id ++;
76}
77
78struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) 73struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
79{ 74{
80 struct nfs4_state_owner *sp; 75 struct nfs4_state_owner *sp;
76 struct rb_node *pos;
81 struct rpc_cred *cred = NULL; 77 struct rpc_cred *cred = NULL;
82 78
83 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 79 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
80 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
84 if (list_empty(&sp->so_states)) 81 if (list_empty(&sp->so_states))
85 continue; 82 continue;
86 cred = get_rpccred(sp->so_cred); 83 cred = get_rpccred(sp->so_cred);
@@ -92,32 +89,129 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
92static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 89static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
93{ 90{
94 struct nfs4_state_owner *sp; 91 struct nfs4_state_owner *sp;
92 struct rb_node *pos;
95 93
96 if (!list_empty(&clp->cl_state_owners)) { 94 pos = rb_first(&clp->cl_state_owners);
97 sp = list_entry(clp->cl_state_owners.next, 95 if (pos != NULL) {
98 struct nfs4_state_owner, so_list); 96 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
99 return get_rpccred(sp->so_cred); 97 return get_rpccred(sp->so_cred);
100 } 98 }
101 return NULL; 99 return NULL;
102} 100}
103 101
102static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
103 __u64 minval, int maxbits)
104{
105 struct rb_node **p, *parent;
106 struct nfs_unique_id *pos;
107 __u64 mask = ~0ULL;
108
109 if (maxbits < 64)
110 mask = (1ULL << maxbits) - 1ULL;
111
112 /* Ensure distribution is more or less flat */
113 get_random_bytes(&new->id, sizeof(new->id));
114 new->id &= mask;
115 if (new->id < minval)
116 new->id += minval;
117retry:
118 p = &root->rb_node;
119 parent = NULL;
120
121 while (*p != NULL) {
122 parent = *p;
123 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
124
125 if (new->id < pos->id)
126 p = &(*p)->rb_left;
127 else if (new->id > pos->id)
128 p = &(*p)->rb_right;
129 else
130 goto id_exists;
131 }
132 rb_link_node(&new->rb_node, parent, p);
133 rb_insert_color(&new->rb_node, root);
134 return;
135id_exists:
136 for (;;) {
137 new->id++;
138 if (new->id < minval || (new->id & mask) != new->id) {
139 new->id = minval;
140 break;
141 }
142 parent = rb_next(parent);
143 if (parent == NULL)
144 break;
145 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
146 if (new->id < pos->id)
147 break;
148 }
149 goto retry;
150}
151
152static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
153{
154 rb_erase(&id->rb_node, root);
155}
156
104static struct nfs4_state_owner * 157static struct nfs4_state_owner *
105nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred) 158nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred)
106{ 159{
160 struct rb_node **p = &clp->cl_state_owners.rb_node,
161 *parent = NULL;
107 struct nfs4_state_owner *sp, *res = NULL; 162 struct nfs4_state_owner *sp, *res = NULL;
108 163
109 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 164 while (*p != NULL) {
110 if (sp->so_cred != cred) 165 parent = *p;
111 continue; 166 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
112 atomic_inc(&sp->so_count); 167
113 /* Move to the head of the list */ 168 if (cred < sp->so_cred)
114 list_move(&sp->so_list, &clp->cl_state_owners); 169 p = &parent->rb_left;
115 res = sp; 170 else if (cred > sp->so_cred)
116 break; 171 p = &parent->rb_right;
172 else {
173 atomic_inc(&sp->so_count);
174 res = sp;
175 break;
176 }
117 } 177 }
118 return res; 178 return res;
119} 179}
120 180
181static struct nfs4_state_owner *
182nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
183{
184 struct rb_node **p = &clp->cl_state_owners.rb_node,
185 *parent = NULL;
186 struct nfs4_state_owner *sp;
187
188 while (*p != NULL) {
189 parent = *p;
190 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
191
192 if (new->so_cred < sp->so_cred)
193 p = &parent->rb_left;
194 else if (new->so_cred > sp->so_cred)
195 p = &parent->rb_right;
196 else {
197 atomic_inc(&sp->so_count);
198 return sp;
199 }
200 }
201 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
202 rb_link_node(&new->so_client_node, parent, p);
203 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
204 return new;
205}
206
207static void
208nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
209{
210 if (!RB_EMPTY_NODE(&sp->so_client_node))
211 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
212 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
213}
214
121/* 215/*
122 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to 216 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
123 * create a new state_owner. 217 * create a new state_owner.
@@ -145,10 +239,14 @@ nfs4_alloc_state_owner(void)
145void 239void
146nfs4_drop_state_owner(struct nfs4_state_owner *sp) 240nfs4_drop_state_owner(struct nfs4_state_owner *sp)
147{ 241{
148 struct nfs_client *clp = sp->so_client; 242 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
149 spin_lock(&clp->cl_lock); 243 struct nfs_client *clp = sp->so_client;
150 list_del_init(&sp->so_list); 244
151 spin_unlock(&clp->cl_lock); 245 spin_lock(&clp->cl_lock);
246 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
247 RB_CLEAR_NODE(&sp->so_client_node);
248 spin_unlock(&clp->cl_lock);
249 }
152} 250}
153 251
154/* 252/*
@@ -160,22 +258,24 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
160 struct nfs_client *clp = server->nfs_client; 258 struct nfs_client *clp = server->nfs_client;
161 struct nfs4_state_owner *sp, *new; 259 struct nfs4_state_owner *sp, *new;
162 260
163 new = nfs4_alloc_state_owner();
164 spin_lock(&clp->cl_lock); 261 spin_lock(&clp->cl_lock);
165 sp = nfs4_find_state_owner(clp, cred); 262 sp = nfs4_find_state_owner(clp, cred);
166 if (sp == NULL && new != NULL) {
167 list_add(&new->so_list, &clp->cl_state_owners);
168 new->so_client = clp;
169 new->so_id = nfs4_alloc_lockowner_id(clp);
170 new->so_cred = get_rpccred(cred);
171 sp = new;
172 new = NULL;
173 }
174 spin_unlock(&clp->cl_lock); 263 spin_unlock(&clp->cl_lock);
175 kfree(new);
176 if (sp != NULL) 264 if (sp != NULL)
177 return sp; 265 return sp;
178 return NULL; 266 new = nfs4_alloc_state_owner();
267 if (new == NULL)
268 return NULL;
269 new->so_client = clp;
270 new->so_cred = cred;
271 spin_lock(&clp->cl_lock);
272 sp = nfs4_insert_state_owner(clp, new);
273 spin_unlock(&clp->cl_lock);
274 if (sp == new)
275 get_rpccred(cred);
276 else
277 kfree(new);
278 return sp;
179} 279}
180 280
181/* 281/*
@@ -189,7 +289,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
189 289
190 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 290 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
191 return; 291 return;
192 list_del(&sp->so_list); 292 nfs4_remove_state_owner(clp, sp);
193 spin_unlock(&clp->cl_lock); 293 spin_unlock(&clp->cl_lock);
194 put_rpccred(cred); 294 put_rpccred(cred);
195 kfree(sp); 295 kfree(sp);
@@ -386,12 +486,22 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
386 atomic_set(&lsp->ls_count, 1); 486 atomic_set(&lsp->ls_count, 1);
387 lsp->ls_owner = fl_owner; 487 lsp->ls_owner = fl_owner;
388 spin_lock(&clp->cl_lock); 488 spin_lock(&clp->cl_lock);
389 lsp->ls_id = nfs4_alloc_lockowner_id(clp); 489 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
390 spin_unlock(&clp->cl_lock); 490 spin_unlock(&clp->cl_lock);
391 INIT_LIST_HEAD(&lsp->ls_locks); 491 INIT_LIST_HEAD(&lsp->ls_locks);
392 return lsp; 492 return lsp;
393} 493}
394 494
495static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
496{
497 struct nfs_client *clp = lsp->ls_state->owner->so_client;
498
499 spin_lock(&clp->cl_lock);
500 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
501 spin_unlock(&clp->cl_lock);
502 kfree(lsp);
503}
504
395/* 505/*
396 * Return a compatible lock_state. If no initialized lock_state structure 506 * Return a compatible lock_state. If no initialized lock_state structure
397 * exists, return an uninitialized one. 507 * exists, return an uninitialized one.
@@ -421,7 +531,8 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
421 return NULL; 531 return NULL;
422 } 532 }
423 spin_unlock(&state->state_lock); 533 spin_unlock(&state->state_lock);
424 kfree(new); 534 if (new != NULL)
535 nfs4_free_lock_state(new);
425 return lsp; 536 return lsp;
426} 537}
427 538
@@ -442,7 +553,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
442 if (list_empty(&state->lock_states)) 553 if (list_empty(&state->lock_states))
443 clear_bit(LK_STATE_IN_USE, &state->flags); 554 clear_bit(LK_STATE_IN_USE, &state->flags);
444 spin_unlock(&state->state_lock); 555 spin_unlock(&state->state_lock);
445 kfree(lsp); 556 nfs4_free_lock_state(lsp);
446} 557}
447 558
448static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 559static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
@@ -719,11 +830,13 @@ out_err:
719static void nfs4_state_mark_reclaim(struct nfs_client *clp) 830static void nfs4_state_mark_reclaim(struct nfs_client *clp)
720{ 831{
721 struct nfs4_state_owner *sp; 832 struct nfs4_state_owner *sp;
833 struct rb_node *pos;
722 struct nfs4_state *state; 834 struct nfs4_state *state;
723 struct nfs4_lock_state *lock; 835 struct nfs4_lock_state *lock;
724 836
725 /* Reset all sequence ids to zero */ 837 /* Reset all sequence ids to zero */
726 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 838 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
839 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
727 sp->so_seqid.counter = 0; 840 sp->so_seqid.counter = 0;
728 sp->so_seqid.flags = 0; 841 sp->so_seqid.flags = 0;
729 spin_lock(&sp->so_lock); 842 spin_lock(&sp->so_lock);
@@ -742,6 +855,7 @@ static int reclaimer(void *ptr)
742{ 855{
743 struct nfs_client *clp = ptr; 856 struct nfs_client *clp = ptr;
744 struct nfs4_state_owner *sp; 857 struct nfs4_state_owner *sp;
858 struct rb_node *pos;
745 struct nfs4_state_recovery_ops *ops; 859 struct nfs4_state_recovery_ops *ops;
746 struct rpc_cred *cred; 860 struct rpc_cred *cred;
747 int status = 0; 861 int status = 0;
@@ -787,7 +901,8 @@ restart_loop:
787 /* Mark all delegations for reclaim */ 901 /* Mark all delegations for reclaim */
788 nfs_delegation_mark_reclaim(clp); 902 nfs_delegation_mark_reclaim(clp);
789 /* Note: list is protected by exclusive lock on cl->cl_sem */ 903 /* Note: list is protected by exclusive lock on cl->cl_sem */
790 list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 904 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
905 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
791 status = nfs4_reclaim_open_state(ops, sp); 906 status = nfs4_reclaim_open_state(ops, sp);
792 if (status < 0) { 907 if (status < 0) {
793 if (status == -NFS4ERR_NO_GRACE) { 908 if (status == -NFS4ERR_NO_GRACE) {
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4c8f67d47523..c08738441f73 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -68,10 +68,10 @@ static int nfs4_stat_to_errno(int);
68#endif 68#endif
69 69
70/* lock,open owner id: 70/* lock,open owner id:
71 * we currently use size 1 (u32) out of (NFS4_OPAQUE_LIMIT >> 2) 71 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
72 */ 72 */
73#define open_owner_id_maxsz (1 + 1) 73#define open_owner_id_maxsz (1 + 4)
74#define lock_owner_id_maxsz (1 + 1) 74#define lock_owner_id_maxsz (1 + 4)
75#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 75#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
76#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 76#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
77#define op_encode_hdr_maxsz (1) 77#define op_encode_hdr_maxsz (1)
@@ -827,13 +827,14 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args)
827 WRITE64(nfs4_lock_length(args->fl)); 827 WRITE64(nfs4_lock_length(args->fl));
828 WRITE32(args->new_lock_owner); 828 WRITE32(args->new_lock_owner);
829 if (args->new_lock_owner){ 829 if (args->new_lock_owner){
830 RESERVE_SPACE(4+NFS4_STATEID_SIZE+20); 830 RESERVE_SPACE(4+NFS4_STATEID_SIZE+32);
831 WRITE32(args->open_seqid->sequence->counter); 831 WRITE32(args->open_seqid->sequence->counter);
832 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); 832 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE);
833 WRITE32(args->lock_seqid->sequence->counter); 833 WRITE32(args->lock_seqid->sequence->counter);
834 WRITE64(args->lock_owner.clientid); 834 WRITE64(args->lock_owner.clientid);
835 WRITE32(4); 835 WRITE32(16);
836 WRITE32(args->lock_owner.id); 836 WRITEMEM("lock id:", 8);
837 WRITE64(args->lock_owner.id);
837 } 838 }
838 else { 839 else {
839 RESERVE_SPACE(NFS4_STATEID_SIZE+4); 840 RESERVE_SPACE(NFS4_STATEID_SIZE+4);
@@ -848,14 +849,15 @@ static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *arg
848{ 849{
849 __be32 *p; 850 __be32 *p;
850 851
851 RESERVE_SPACE(40); 852 RESERVE_SPACE(52);
852 WRITE32(OP_LOCKT); 853 WRITE32(OP_LOCKT);
853 WRITE32(nfs4_lock_type(args->fl, 0)); 854 WRITE32(nfs4_lock_type(args->fl, 0));
854 WRITE64(args->fl->fl_start); 855 WRITE64(args->fl->fl_start);
855 WRITE64(nfs4_lock_length(args->fl)); 856 WRITE64(nfs4_lock_length(args->fl));
856 WRITE64(args->lock_owner.clientid); 857 WRITE64(args->lock_owner.clientid);
857 WRITE32(4); 858 WRITE32(16);
858 WRITE32(args->lock_owner.id); 859 WRITEMEM("lock id:", 8);
860 WRITE64(args->lock_owner.id);
859 861
860 return 0; 862 return 0;
861} 863}
@@ -920,10 +922,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
920 WRITE32(OP_OPEN); 922 WRITE32(OP_OPEN);
921 WRITE32(arg->seqid->sequence->counter); 923 WRITE32(arg->seqid->sequence->counter);
922 encode_share_access(xdr, arg->open_flags); 924 encode_share_access(xdr, arg->open_flags);
923 RESERVE_SPACE(16); 925 RESERVE_SPACE(28);
924 WRITE64(arg->clientid); 926 WRITE64(arg->clientid);
925 WRITE32(4); 927 WRITE32(16);
926 WRITE32(arg->id); 928 WRITEMEM("open id:", 8);
929 WRITE64(arg->id);
927} 930}
928 931
929static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) 932static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)