aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:53:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 17:53:25 -0400
commit71db34fc4330f7c784397acb9f1e6ee7f7b32eb2 (patch)
tree77dd08f6f778a799dcd0c48eb72d0742349df235 /net/sunrpc
parent50483c3268918ee51a56d1baa39b9149d2d0d521 (diff)
parent797a9d797f8483bb67f265c761b76dcd5a077a23 (diff)
Merge branch 'for-3.4' of git://linux-nfs.org/~bfields/linux
Pull nfsd changes from Bruce Fields: Highlights: - Benny Halevy and Tigran Mkrtchyan implemented some more 4.1 features, moving us closer to a complete 4.1 implementation. - Bernd Schubert fixed a long-standing problem with readdir cookies on ext2/3/4. - Jeff Layton performed a long-overdue overhaul of the server reboot recovery code which will allow us to deprecate the current code (a rather unusual user of the vfs), and give us some needed flexibility for further improvements. - Like the client, we now support numeric uid's and gid's in the auth_sys case, allowing easier upgrades from NFSv2/v3 to v4.x. Plus miscellaneous bugfixes and cleanup. Thanks to everyone! There are also some delegation fixes waiting on vfs review that I suppose will have to wait for 3.5. With that done I think we'll finally turn off the "EXPERIMENTAL" dependency for v4 (though that's mostly symbolic as it's been on by default in distro's for a while). And the list of 4.1 todo's should be achievable for 3.5 as well: http://wiki.linux-nfs.org/wiki/index.php/Server_4.0_and_4.1_issues though we may still want a bit more experience with it before turning it on by default. * 'for-3.4' of git://linux-nfs.org/~bfields/linux: (55 commits) nfsd: only register cld pipe notifier when CONFIG_NFSD_V4 is enabled nfsd4: use auth_unix unconditionally on backchannel nfsd: fix NULL pointer dereference in cld_pipe_downcall nfsd4: memory corruption in numeric_name_to_id() sunrpc: skip portmap calls on sessions backchannel nfsd4: allow numeric idmapping nfsd: don't allow legacy client tracker init for anything but init_net nfsd: add notifier to handle mount/unmount of rpc_pipefs sb nfsd: add the infrastructure to handle the cld upcall nfsd: add a header describing upcall to nfsdcld nfsd: add a per-net-namespace struct for nfsd sunrpc: create nfsd dir in rpc_pipefs nfsd: add nfsd4_client_tracking_ops struct and a way to set it nfsd: convert nfs4_client->cl_cb_flags to a generic flags field NFSD: Fix nfs4_verifier memory alignment NFSD: Fix warnings when NFSD_DEBUG is not defined nfsd: vfs_llseek() with 32 or 64 bit offsets (hashes) nfsd: rename 'int access' to 'int may_flags' in nfsd_open() ext4: return 32/64-bit dir name hash according to usage type fs: add new FMODE flags: FMODE_32bithash and FMODE_64bithash ...
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/svcauth_unix.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c66
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c26
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h7
-rw-r--r--net/sunrpc/xprtsock.c1
11 files changed, 59 insertions, 83 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f21ece088764..de0b0f39d9d8 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -830,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
830{ 830{
831 ssize_t ret; 831 ssize_t ret;
832 832
833 if (count == 0)
834 return -EINVAL;
833 if (copy_from_user(kaddr, buf, count)) 835 if (copy_from_user(kaddr, buf, count))
834 return -EFAULT; 836 return -EFAULT;
835 kaddr[count] = '\0'; 837 kaddr[count] = '\0';
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index c84c0e0c41cb..0af37fc46818 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1014,6 +1014,7 @@ enum {
1014 RPCAUTH_statd, 1014 RPCAUTH_statd,
1015 RPCAUTH_nfsd4_cb, 1015 RPCAUTH_nfsd4_cb,
1016 RPCAUTH_cache, 1016 RPCAUTH_cache,
1017 RPCAUTH_nfsd,
1017 RPCAUTH_RootEOF 1018 RPCAUTH_RootEOF
1018}; 1019};
1019 1020
@@ -1046,6 +1047,10 @@ static const struct rpc_filelist files[] = {
1046 .name = "cache", 1047 .name = "cache",
1047 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1048 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1048 }, 1049 },
1050 [RPCAUTH_nfsd] = {
1051 .name = "nfsd",
1052 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1053 },
1049}; 1054};
1050 1055
1051/* 1056/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index bcd574f2ac56..521d8f7dc833 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -507,7 +507,7 @@ static int unix_gid_parse(struct cache_detail *cd,
507 time_t expiry; 507 time_t expiry;
508 struct unix_gid ug, *ugp; 508 struct unix_gid ug, *ugp;
509 509
510 if (mlen <= 0 || mesg[mlen-1] != '\n') 510 if (mesg[mlen - 1] != '\n')
511 return -EINVAL; 511 return -EINVAL;
512 mesg[mlen-1] = 0; 512 mesg[mlen-1] = 0;
513 513
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 40ae884db865..824d32fb3121 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1381,8 +1381,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1381 spin_lock_bh(&serv->sv_lock); 1381 spin_lock_bh(&serv->sv_lock);
1382 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) 1382 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
1383 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 1383 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1384 list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
1385 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1386 spin_unlock_bh(&serv->sv_lock); 1384 spin_unlock_bh(&serv->sv_lock);
1387} 1385}
1388EXPORT_SYMBOL_GPL(svc_sock_update_bufs); 1386EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 09af4fab1a45..8343737e85f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -47,6 +47,7 @@
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/sched.h> 48#include <linux/sunrpc/sched.h>
49#include <linux/sunrpc/svc_rdma.h> 49#include <linux/sunrpc/svc_rdma.h>
50#include "xprt_rdma.h"
50 51
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT 52#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 53
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 9530ef2d40dc..8d2edddf48cf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; 60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61 61
62 while (ch->rc_discrim != xdr_zero) { 62 while (ch->rc_discrim != xdr_zero) {
63 u64 ch_offset;
64
65 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > 63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
66 (unsigned long)vaend) { 64 (unsigned long)vaend) {
67 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); 65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
68 return NULL; 66 return NULL;
69 } 67 }
70
71 ch->rc_discrim = ntohl(ch->rc_discrim);
72 ch->rc_position = ntohl(ch->rc_position);
73 ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
74 ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
75 va = (u32 *)&ch->rc_target.rs_offset;
76 xdr_decode_hyper(va, &ch_offset);
77 put_unaligned(ch_offset, (u64 *)va);
78 ch++; 68 ch++;
79 } 69 }
80 return (u32 *)&ch->rc_position; 70 return (u32 *)&ch->rc_position;
@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
91 *byte_count = 0; 81 *byte_count = 0;
92 *ch_count = 0; 82 *ch_count = 0;
93 for (; ch->rc_discrim != 0; ch++) { 83 for (; ch->rc_discrim != 0; ch++) {
94 *byte_count = *byte_count + ch->rc_target.rs_length; 84 *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
95 *ch_count = *ch_count + 1; 85 *ch_count = *ch_count + 1;
96 } 86 }
97} 87}
@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
108 */ 98 */
109static u32 *decode_write_list(u32 *va, u32 *vaend) 99static u32 *decode_write_list(u32 *va, u32 *vaend)
110{ 100{
111 int ch_no; 101 int nchunks;
102
112 struct rpcrdma_write_array *ary = 103 struct rpcrdma_write_array *ary =
113 (struct rpcrdma_write_array *)va; 104 (struct rpcrdma_write_array *)va;
114 105
@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
121 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 112 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
122 return NULL; 113 return NULL;
123 } 114 }
124 ary->wc_discrim = ntohl(ary->wc_discrim); 115 nchunks = ntohl(ary->wc_nchunks);
125 ary->wc_nchunks = ntohl(ary->wc_nchunks);
126 if (((unsigned long)&ary->wc_array[0] + 116 if (((unsigned long)&ary->wc_array[0] +
127 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 117 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
128 (unsigned long)vaend) { 118 (unsigned long)vaend) {
129 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 119 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
130 ary, ary->wc_nchunks, vaend); 120 ary, nchunks, vaend);
131 return NULL; 121 return NULL;
132 } 122 }
133 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
134 u64 ch_offset;
135
136 ary->wc_array[ch_no].wc_target.rs_handle =
137 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
138 ary->wc_array[ch_no].wc_target.rs_length =
139 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
140 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
141 xdr_decode_hyper(va, &ch_offset);
142 put_unaligned(ch_offset, (u64 *)va);
143 }
144
145 /* 123 /*
146 * rs_length is the 2nd 4B field in wc_target and taking its 124 * rs_length is the 2nd 4B field in wc_target and taking its
147 * address skips the list terminator 125 * address skips the list terminator
148 */ 126 */
149 return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length; 127 return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
150} 128}
151 129
152static u32 *decode_reply_array(u32 *va, u32 *vaend) 130static u32 *decode_reply_array(u32 *va, u32 *vaend)
153{ 131{
154 int ch_no; 132 int nchunks;
155 struct rpcrdma_write_array *ary = 133 struct rpcrdma_write_array *ary =
156 (struct rpcrdma_write_array *)va; 134 (struct rpcrdma_write_array *)va;
157 135
@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
164 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 142 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
165 return NULL; 143 return NULL;
166 } 144 }
167 ary->wc_discrim = ntohl(ary->wc_discrim); 145 nchunks = ntohl(ary->wc_nchunks);
168 ary->wc_nchunks = ntohl(ary->wc_nchunks);
169 if (((unsigned long)&ary->wc_array[0] + 146 if (((unsigned long)&ary->wc_array[0] +
170 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 147 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
171 (unsigned long)vaend) { 148 (unsigned long)vaend) {
172 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 149 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
173 ary, ary->wc_nchunks, vaend); 150 ary, nchunks, vaend);
174 return NULL; 151 return NULL;
175 } 152 }
176 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { 153 return (u32 *)&ary->wc_array[nchunks];
177 u64 ch_offset;
178
179 ary->wc_array[ch_no].wc_target.rs_handle =
180 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
181 ary->wc_array[ch_no].wc_target.rs_length =
182 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
183 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
184 xdr_decode_hyper(va, &ch_offset);
185 put_unaligned(ch_offset, (u64 *)va);
186 }
187
188 return (u32 *)&ary->wc_array[ch_no];
189} 154}
190 155
191int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, 156int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
386 351
387void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, 352void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
388 int chunk_no, 353 int chunk_no,
389 u32 rs_handle, u64 rs_offset, 354 __be32 rs_handle,
355 __be64 rs_offset,
390 u32 write_len) 356 u32 write_len)
391{ 357{
392 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; 358 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
393 seg->rs_handle = htonl(rs_handle); 359 seg->rs_handle = rs_handle;
360 seg->rs_offset = rs_offset;
394 seg->rs_length = htonl(write_len); 361 seg->rs_length = htonl(write_len);
395 xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
396} 362}
397 363
398void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, 364void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index df67211c4baf..41cb63b623df 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
147 page_off = 0; 147 page_off = 0;
148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149 ch_no = 0; 149 ch_no = 0;
150 ch_bytes = ch->rc_target.rs_length; 150 ch_bytes = ntohl(ch->rc_target.rs_length);
151 head->arg.head[0] = rqstp->rq_arg.head[0]; 151 head->arg.head[0] = rqstp->rq_arg.head[0];
152 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
153 head->arg.pages = &head->pages[head->count]; 153 head->arg.pages = &head->pages[head->count];
@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
183 ch_no++; 183 ch_no++;
184 ch++; 184 ch++;
185 chl_map->ch[ch_no].start = sge_no; 185 chl_map->ch[ch_no].start = sge_no;
186 ch_bytes = ch->rc_target.rs_length; 186 ch_bytes = ntohl(ch->rc_target.rs_length);
187 /* If bytes remaining account for next chunk */ 187 /* If bytes remaining account for next chunk */
188 if (byte_count) { 188 if (byte_count) {
189 head->arg.page_len += ch_bytes; 189 head->arg.page_len += ch_bytes;
@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
281 offset = 0; 281 offset = 0;
282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
283 for (ch_no = 0; ch_no < ch_count; ch_no++) { 283 for (ch_no = 0; ch_no < ch_count; ch_no++) {
284 int len = ntohl(ch->rc_target.rs_length);
284 rpl_map->sge[ch_no].iov_base = frmr->kva + offset; 285 rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
285 rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; 286 rpl_map->sge[ch_no].iov_len = len;
286 chl_map->ch[ch_no].count = 1; 287 chl_map->ch[ch_no].count = 1;
287 chl_map->ch[ch_no].start = ch_no; 288 chl_map->ch[ch_no].start = ch_no;
288 offset += ch->rc_target.rs_length; 289 offset += len;
289 ch++; 290 ch++;
290 } 291 }
291 292
@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
316 for (i = 0; i < count; i++) { 317 for (i = 0; i < count; i++) {
317 ctxt->sge[i].length = 0; /* in case map fails */ 318 ctxt->sge[i].length = 0; /* in case map fails */
318 if (!frmr) { 319 if (!frmr) {
319 BUG_ON(0 == virt_to_page(vec[i].iov_base)); 320 BUG_ON(!virt_to_page(vec[i].iov_base));
320 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; 321 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
321 ctxt->sge[i].addr = 322 ctxt->sge[i].addr =
322 ib_dma_map_page(xprt->sc_cm_id->device, 323 ib_dma_map_page(xprt->sc_cm_id->device,
@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
426 427
427 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 428 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
428 ch->rc_discrim != 0; ch++, ch_no++) { 429 ch->rc_discrim != 0; ch++, ch_no++) {
430 u64 rs_offset;
429next_sge: 431next_sge:
430 ctxt = svc_rdma_get_context(xprt); 432 ctxt = svc_rdma_get_context(xprt);
431 ctxt->direction = DMA_FROM_DEVICE; 433 ctxt->direction = DMA_FROM_DEVICE;
@@ -440,10 +442,10 @@ next_sge:
440 read_wr.opcode = IB_WR_RDMA_READ; 442 read_wr.opcode = IB_WR_RDMA_READ;
441 ctxt->wr_op = read_wr.opcode; 443 ctxt->wr_op = read_wr.opcode;
442 read_wr.send_flags = IB_SEND_SIGNALED; 444 read_wr.send_flags = IB_SEND_SIGNALED;
443 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; 445 read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
444 read_wr.wr.rdma.remote_addr = 446 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
445 get_unaligned(&(ch->rc_target.rs_offset)) + 447 &rs_offset);
446 sgl_offset; 448 read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
447 read_wr.sg_list = ctxt->sge; 449 read_wr.sg_list = ctxt->sge;
448 read_wr.num_sge = 450 read_wr.num_sge =
449 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); 451 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 249a835b703f..42eb7ba0b903 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
409 u64 rs_offset; 409 u64 rs_offset;
410 410
411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
412 write_len = min(xfer_len, arg_ch->rs_length); 412 write_len = min(xfer_len, ntohl(arg_ch->rs_length));
413 413
414 /* Prepare the response chunk given the length actually 414 /* Prepare the response chunk given the length actually
415 * written */ 415 * written */
416 rs_offset = get_unaligned(&(arg_ch->rs_offset)); 416 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
418 arg_ch->rs_handle, 418 arg_ch->rs_handle,
419 rs_offset, 419 arg_ch->rs_offset,
420 write_len); 420 write_len);
421 chunk_off = 0; 421 chunk_off = 0;
422 while (write_len) { 422 while (write_len) {
423 int this_write; 423 int this_write;
424 this_write = min(write_len, max_write); 424 this_write = min(write_len, max_write);
425 ret = send_write(xprt, rqstp, 425 ret = send_write(xprt, rqstp,
426 arg_ch->rs_handle, 426 ntohl(arg_ch->rs_handle),
427 rs_offset + chunk_off, 427 rs_offset + chunk_off,
428 xdr_off, 428 xdr_off,
429 this_write, 429 this_write,
@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
457 u32 xdr_off; 457 u32 xdr_off;
458 int chunk_no; 458 int chunk_no;
459 int chunk_off; 459 int chunk_off;
460 int nchunks;
460 struct rpcrdma_segment *ch; 461 struct rpcrdma_segment *ch;
461 struct rpcrdma_write_array *arg_ary; 462 struct rpcrdma_write_array *arg_ary;
462 struct rpcrdma_write_array *res_ary; 463 struct rpcrdma_write_array *res_ary;
@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
476 max_write = xprt->sc_max_sge * PAGE_SIZE; 477 max_write = xprt->sc_max_sge * PAGE_SIZE;
477 478
478 /* xdr offset starts at RPC message */ 479 /* xdr offset starts at RPC message */
480 nchunks = ntohl(arg_ary->wc_nchunks);
479 for (xdr_off = 0, chunk_no = 0; 481 for (xdr_off = 0, chunk_no = 0;
480 xfer_len && chunk_no < arg_ary->wc_nchunks; 482 xfer_len && chunk_no < nchunks;
481 chunk_no++) { 483 chunk_no++) {
482 u64 rs_offset; 484 u64 rs_offset;
483 ch = &arg_ary->wc_array[chunk_no].wc_target; 485 ch = &arg_ary->wc_array[chunk_no].wc_target;
484 write_len = min(xfer_len, ch->rs_length); 486 write_len = min(xfer_len, htonl(ch->rs_length));
485 487
486 /* Prepare the reply chunk given the length actually 488 /* Prepare the reply chunk given the length actually
487 * written */ 489 * written */
488 rs_offset = get_unaligned(&(ch->rs_offset)); 490 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
489 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 491 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
490 ch->rs_handle, rs_offset, 492 ch->rs_handle, ch->rs_offset,
491 write_len); 493 write_len);
492 chunk_off = 0; 494 chunk_off = 0;
493 while (write_len) { 495 while (write_len) {
494 int this_write; 496 int this_write;
495 497
496 this_write = min(write_len, max_write); 498 this_write = min(write_len, max_write);
497 ret = send_write(xprt, rqstp, 499 ret = send_write(xprt, rqstp,
498 ch->rs_handle, 500 ntohl(ch->rs_handle),
499 rs_offset + chunk_off, 501 rs_offset + chunk_off,
500 xdr_off, 502 xdr_off,
501 this_write, 503 this_write,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 894cb42db91d..73b428bef598 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -51,6 +51,7 @@
51#include <rdma/rdma_cm.h> 51#include <rdma/rdma_cm.h>
52#include <linux/sunrpc/svc_rdma.h> 52#include <linux/sunrpc/svc_rdma.h>
53#include <linux/export.h> 53#include <linux/export.h>
54#include "xprt_rdma.h"
54 55
55#define RPCDBG_FACILITY RPCDBG_SVCXPRT 56#define RPCDBG_FACILITY RPCDBG_SVCXPRT
56 57
@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
90 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, 91 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
91}; 92};
92 93
93/* WR context cache. Created in svc_rdma.c */
94extern struct kmem_cache *svc_rdma_ctxt_cachep;
95
96/* Workqueue created in svc_rdma.c */
97extern struct workqueue_struct *svc_rdma_wq;
98
99struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) 94struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
100{ 95{
101 struct svc_rdma_op_ctxt *ctxt; 96 struct svc_rdma_op_ctxt *ctxt;
@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
150 atomic_dec(&xprt->sc_ctxt_used); 145 atomic_dec(&xprt->sc_ctxt_used);
151} 146}
152 147
153/* Temporary NFS request map cache. Created in svc_rdma.c */
154extern struct kmem_cache *svc_rdma_map_cachep;
155
156/* 148/*
157 * Temporary NFS req mappings are shared across all transport 149 * Temporary NFS req mappings are shared across all transport
158 * instances. These are short lived and should be bounded by the number 150 * instances. These are short lived and should be bounded by the number
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 08c5d5a128fc..9a66c95b5837 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
343 */ 343 */
344int rpcrdma_marshal_req(struct rpc_rqst *); 344int rpcrdma_marshal_req(struct rpc_rqst *);
345 345
346/* Temporary NFS request map cache. Created in svc_rdma.c */
347extern struct kmem_cache *svc_rdma_map_cachep;
348/* WR context cache. Created in svc_rdma.c */
349extern struct kmem_cache *svc_rdma_ctxt_cachep;
350/* Workqueue created in svc_rdma.c */
351extern struct workqueue_struct *svc_rdma_wq;
352
346#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 353#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 92bc5181dbeb..890b03f8d877 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2475,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2475static struct rpc_xprt_ops bc_tcp_ops = { 2475static struct rpc_xprt_ops bc_tcp_ops = {
2476 .reserve_xprt = xprt_reserve_xprt, 2476 .reserve_xprt = xprt_reserve_xprt,
2477 .release_xprt = xprt_release_xprt, 2477 .release_xprt = xprt_release_xprt,
2478 .rpcbind = xs_local_rpcbind,
2478 .buf_alloc = bc_malloc, 2479 .buf_alloc = bc_malloc,
2479 .buf_free = bc_free, 2480 .buf_free = bc_free,
2480 .send_request = bc_send_request, 2481 .send_request = bc_send_request,