summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-27 13:14:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-27 13:14:39 -0400
commitd2c3ac7e7e39ec6d37e4114ae7444948561e59af (patch)
treec0e6a9eed0139de4e757fbf64498fe8c0b291e44 /net/sunrpc
parent546fac60739ef8d7cbf8ce0b8251a519f68b2804 (diff)
parent901f1379f6c9dc2d73b51971d129a6f7d5b9b20a (diff)
Merge branch 'for-4.2' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "A relatively quiet cycle, with a mix of cleanup and smaller bugfixes" * 'for-4.2' of git://linux-nfs.org/~bfields/linux: (24 commits) sunrpc: use sg_init_one() in krb5_rc4_setup_enc/seq_key() nfsd: wrap too long lines in nfsd4_encode_read nfsd: fput rd_file from XDR encode context nfsd: take struct file setup fully into nfs4_preprocess_stateid_op nfsd: refactor nfs4_preprocess_stateid_op nfsd: clean up raparams handling nfsd: use swap() in sort_pacl_range() rpcrdma: Merge svcrdma and xprtrdma modules into one svcrdma: Add a separate "max data segs macro for svcrdma svcrdma: Replace GFP_KERNEL in a loop with GFP_NOFAIL svcrdma: Keep rpcrdma_msg fields in network byte-order svcrdma: Fix byte-swapping in svc_rdma_sendto.c nfsd: Update callback sequnce id only CB_SEQUENCE success nfsd: Reset cb_status in nfsd4_cb_prepare() at retrying svcrdma: Remove svc_rdma_xdr_decode_deferred_req() SUNRPC: Move EXPORT_SYMBOL for svc_process uapi/nfs: Add NFSv4.1 ACL definitions nfsd: Remove dead declarations nfsd: work around a gcc-5.1 warning nfsd: Checking for acl support does not require fetching any acls ...
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/Kconfig28
-rw-r--r--net/sunrpc/Makefile3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c8
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/xprtrdma/Makefile14
-rw-r--r--net/sunrpc/xprtrdma/module.c46
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c8
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c140
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c16
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c36
-rw-r--r--net/sunrpc/xprtrdma/transport.c13
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h11
13 files changed, 129 insertions, 198 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 9068e72aa73c..04ce2c0b660e 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -48,28 +48,16 @@ config SUNRPC_DEBUG
48 48
49 If unsure, say Y. 49 If unsure, say Y.
50 50
51config SUNRPC_XPRT_RDMA_CLIENT 51config SUNRPC_XPRT_RDMA
52 tristate "RPC over RDMA Client Support" 52 tristate "RPC-over-RDMA transport"
53 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS 53 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
54 default SUNRPC && INFINIBAND 54 default SUNRPC && INFINIBAND
55 help 55 help
56 This option allows the NFS client to support an RDMA-enabled 56 This option allows the NFS client and server to use RDMA
57 transport. 57 transports (InfiniBand, iWARP, or RoCE).
58 58
59 To compile RPC client RDMA transport support as a module, 59 To compile this support as a module, choose M. The module
60 choose M here: the module will be called xprtrdma. 60 will be called rpcrdma.ko.
61 61
62 If unsure, say N. 62 If unsure, or you know there is no RDMA capability on your
63 63 hardware platform, say N.
64config SUNRPC_XPRT_RDMA_SERVER
65 tristate "RPC over RDMA Server Support"
66 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
67 default SUNRPC && INFINIBAND
68 help
69 This option allows the NFS server to support an RDMA-enabled
70 transport.
71
72 To compile RPC server RDMA transport support as a module,
73 choose M here: the module will be called svcrdma.
74
75 If unsure, say N.
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 15e6f6c23c5d..936ad0a15371 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -5,8 +5,7 @@
5 5
6obj-$(CONFIG_SUNRPC) += sunrpc.o 6obj-$(CONFIG_SUNRPC) += sunrpc.o
7obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ 7obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
8 8obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
9obj-y += xprtrdma/
10 9
11sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ 10sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
12 auth.o auth_null.o auth_unix.o auth_generic.o \ 11 auth.o auth_null.o auth_unix.o auth_generic.o \
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index b5408e8a37f2..fee3c15a4b52 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -881,9 +881,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
881 if (err) 881 if (err)
882 goto out_err; 882 goto out_err;
883 883
884 sg_init_table(sg, 1); 884 sg_init_one(sg, &zeroconstant, 4);
885 sg_set_buf(sg, &zeroconstant, 4);
886
887 err = crypto_hash_digest(&desc, sg, 4, Kseq); 885 err = crypto_hash_digest(&desc, sg, 4, Kseq);
888 if (err) 886 if (err)
889 goto out_err; 887 goto out_err;
@@ -951,9 +949,7 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
951 if (err) 949 if (err)
952 goto out_err; 950 goto out_err;
953 951
954 sg_init_table(sg, 1); 952 sg_init_one(sg, zeroconstant, 4);
955 sg_set_buf(sg, zeroconstant, 4);
956
957 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); 953 err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
958 if (err) 954 if (err)
959 goto out_err; 955 goto out_err;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 78974e4d9ad2..852ae606b02a 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1290,7 +1290,6 @@ err_bad:
1290 svc_putnl(resv, ntohl(rpc_stat)); 1290 svc_putnl(resv, ntohl(rpc_stat));
1291 goto sendit; 1291 goto sendit;
1292} 1292}
1293EXPORT_SYMBOL_GPL(svc_process);
1294 1293
1295/* 1294/*
1296 * Process the RPC request. 1295 * Process the RPC request.
@@ -1338,6 +1337,7 @@ out_drop:
1338 svc_drop(rqstp); 1337 svc_drop(rqstp);
1339 return 0; 1338 return 0;
1340} 1339}
1340EXPORT_SYMBOL_GPL(svc_process);
1341 1341
1342#if defined(CONFIG_SUNRPC_BACKCHANNEL) 1342#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1343/* 1343/*
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index 579f72bbcf4b..48913de240bd 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -1,9 +1,7 @@
1obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o 1obj-$(CONFIG_SUNRPC_XPRT_RDMA) += rpcrdma.o
2 2
3xprtrdma-y := transport.o rpc_rdma.o verbs.o \ 3rpcrdma-y := transport.o rpc_rdma.o verbs.o \
4 fmr_ops.o frwr_ops.o physical_ops.o 4 fmr_ops.o frwr_ops.o physical_ops.o \
5 5 svc_rdma.o svc_rdma_transport.o \
6obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o 6 svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o \
7 7 module.o
8svcrdma-y := svc_rdma.o svc_rdma_transport.o \
9 svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o
diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
new file mode 100644
index 000000000000..560712bd9fa2
--- /dev/null
+++ b/net/sunrpc/xprtrdma/module.c
@@ -0,0 +1,46 @@
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 */
4
5/* rpcrdma.ko module initialization
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sunrpc/svc_rdma.h>
11#include "xprt_rdma.h"
12
13#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
14# define RPCDBG_FACILITY RPCDBG_TRANS
15#endif
16
17MODULE_AUTHOR("Open Grid Computing and Network Appliance, Inc.");
18MODULE_DESCRIPTION("RPC/RDMA Transport");
19MODULE_LICENSE("Dual BSD/GPL");
20MODULE_ALIAS("svcrdma");
21MODULE_ALIAS("xprtrdma");
22
23static void __exit rpc_rdma_cleanup(void)
24{
25 xprt_rdma_cleanup();
26 svc_rdma_cleanup();
27}
28
29static int __init rpc_rdma_init(void)
30{
31 int rc;
32
33 rc = svc_rdma_init();
34 if (rc)
35 goto out;
36
37 rc = xprt_rdma_init();
38 if (rc)
39 svc_rdma_cleanup();
40
41out:
42 return rc;
43}
44
45module_init(rpc_rdma_init);
46module_exit(rpc_rdma_cleanup);
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index c1b6270262c2..2cd252f023a5 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -38,8 +38,7 @@
38 * 38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com> 39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */ 40 */
41#include <linux/module.h> 41
42#include <linux/init.h>
43#include <linux/slab.h> 42#include <linux/slab.h>
44#include <linux/fs.h> 43#include <linux/fs.h>
45#include <linux/sysctl.h> 44#include <linux/sysctl.h>
@@ -295,8 +294,3 @@ int svc_rdma_init(void)
295 destroy_workqueue(svc_rdma_wq); 294 destroy_workqueue(svc_rdma_wq);
296 return -ENOMEM; 295 return -ENOMEM;
297} 296}
298MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
299MODULE_DESCRIPTION("SVC RDMA Transport");
300MODULE_LICENSE("Dual BSD/GPL");
301module_init(svc_rdma_init);
302module_exit(svc_rdma_cleanup);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index b681855cf970..e2fca7617242 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -50,12 +50,12 @@
50/* 50/*
51 * Decodes a read chunk list. The expected format is as follows: 51 * Decodes a read chunk list. The expected format is as follows:
52 * descrim : xdr_one 52 * descrim : xdr_one
53 * position : u32 offset into XDR stream 53 * position : __be32 offset into XDR stream
54 * handle : u32 RKEY 54 * handle : __be32 RKEY
55 * . . . 55 * . . .
56 * end-of-list: xdr_zero 56 * end-of-list: xdr_zero
57 */ 57 */
58static u32 *decode_read_list(u32 *va, u32 *vaend) 58static __be32 *decode_read_list(__be32 *va, __be32 *vaend)
59{ 59{
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; 60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61 61
@@ -67,20 +67,20 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
67 } 67 }
68 ch++; 68 ch++;
69 } 69 }
70 return (u32 *)&ch->rc_position; 70 return &ch->rc_position;
71} 71}
72 72
73/* 73/*
74 * Decodes a write chunk list. The expected format is as follows: 74 * Decodes a write chunk list. The expected format is as follows:
75 * descrim : xdr_one 75 * descrim : xdr_one
76 * nchunks : <count> 76 * nchunks : <count>
77 * handle : u32 RKEY ---+ 77 * handle : __be32 RKEY ---+
78 * length : u32 <len of segment> | 78 * length : __be32 <len of segment> |
79 * offset : remove va + <count> 79 * offset : remove va + <count>
80 * . . . | 80 * . . . |
81 * ---+ 81 * ---+
82 */ 82 */
83static u32 *decode_write_list(u32 *va, u32 *vaend) 83static __be32 *decode_write_list(__be32 *va, __be32 *vaend)
84{ 84{
85 unsigned long start, end; 85 unsigned long start, end;
86 int nchunks; 86 int nchunks;
@@ -90,14 +90,14 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
90 90
91 /* Check for not write-array */ 91 /* Check for not write-array */
92 if (ary->wc_discrim == xdr_zero) 92 if (ary->wc_discrim == xdr_zero)
93 return (u32 *)&ary->wc_nchunks; 93 return &ary->wc_nchunks;
94 94
95 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > 95 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
96 (unsigned long)vaend) { 96 (unsigned long)vaend) {
97 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 97 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
98 return NULL; 98 return NULL;
99 } 99 }
100 nchunks = ntohl(ary->wc_nchunks); 100 nchunks = be32_to_cpu(ary->wc_nchunks);
101 101
102 start = (unsigned long)&ary->wc_array[0]; 102 start = (unsigned long)&ary->wc_array[0];
103 end = (unsigned long)vaend; 103 end = (unsigned long)vaend;
@@ -112,10 +112,10 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
112 * rs_length is the 2nd 4B field in wc_target and taking its 112 * rs_length is the 2nd 4B field in wc_target and taking its
113 * address skips the list terminator 113 * address skips the list terminator
114 */ 114 */
115 return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length; 115 return &ary->wc_array[nchunks].wc_target.rs_length;
116} 116}
117 117
118static u32 *decode_reply_array(u32 *va, u32 *vaend) 118static __be32 *decode_reply_array(__be32 *va, __be32 *vaend)
119{ 119{
120 unsigned long start, end; 120 unsigned long start, end;
121 int nchunks; 121 int nchunks;
@@ -124,14 +124,14 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
124 124
125 /* Check for no reply-array */ 125 /* Check for no reply-array */
126 if (ary->wc_discrim == xdr_zero) 126 if (ary->wc_discrim == xdr_zero)
127 return (u32 *)&ary->wc_nchunks; 127 return &ary->wc_nchunks;
128 128
129 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) > 129 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
130 (unsigned long)vaend) { 130 (unsigned long)vaend) {
131 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 131 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
132 return NULL; 132 return NULL;
133 } 133 }
134 nchunks = ntohl(ary->wc_nchunks); 134 nchunks = be32_to_cpu(ary->wc_nchunks);
135 135
136 start = (unsigned long)&ary->wc_array[0]; 136 start = (unsigned long)&ary->wc_array[0];
137 end = (unsigned long)vaend; 137 end = (unsigned long)vaend;
@@ -142,15 +142,14 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
142 ary, nchunks, vaend); 142 ary, nchunks, vaend);
143 return NULL; 143 return NULL;
144 } 144 }
145 return (u32 *)&ary->wc_array[nchunks]; 145 return (__be32 *)&ary->wc_array[nchunks];
146} 146}
147 147
148int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, 148int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
149 struct svc_rqst *rqstp) 149 struct svc_rqst *rqstp)
150{ 150{
151 struct rpcrdma_msg *rmsgp = NULL; 151 struct rpcrdma_msg *rmsgp = NULL;
152 u32 *va; 152 __be32 *va, *vaend;
153 u32 *vaend;
154 u32 hdr_len; 153 u32 hdr_len;
155 154
156 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; 155 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
@@ -162,22 +161,17 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
162 return -EINVAL; 161 return -EINVAL;
163 } 162 }
164 163
165 /* Decode the header */ 164 if (rmsgp->rm_vers != rpcrdma_version)
166 rmsgp->rm_xid = ntohl(rmsgp->rm_xid);
167 rmsgp->rm_vers = ntohl(rmsgp->rm_vers);
168 rmsgp->rm_credit = ntohl(rmsgp->rm_credit);
169 rmsgp->rm_type = ntohl(rmsgp->rm_type);
170
171 if (rmsgp->rm_vers != RPCRDMA_VERSION)
172 return -ENOSYS; 165 return -ENOSYS;
173 166
174 /* Pull in the extra for the padded case and bump our pointer */ 167 /* Pull in the extra for the padded case and bump our pointer */
175 if (rmsgp->rm_type == RDMA_MSGP) { 168 if (rmsgp->rm_type == rdma_msgp) {
176 int hdrlen; 169 int hdrlen;
170
177 rmsgp->rm_body.rm_padded.rm_align = 171 rmsgp->rm_body.rm_padded.rm_align =
178 ntohl(rmsgp->rm_body.rm_padded.rm_align); 172 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_align);
179 rmsgp->rm_body.rm_padded.rm_thresh = 173 rmsgp->rm_body.rm_padded.rm_thresh =
180 ntohl(rmsgp->rm_body.rm_padded.rm_thresh); 174 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh);
181 175
182 va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; 176 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
183 rqstp->rq_arg.head[0].iov_base = va; 177 rqstp->rq_arg.head[0].iov_base = va;
@@ -192,7 +186,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
192 * chunk list and a reply chunk list. 186 * chunk list and a reply chunk list.
193 */ 187 */
194 va = &rmsgp->rm_body.rm_chunks[0]; 188 va = &rmsgp->rm_body.rm_chunks[0];
195 vaend = (u32 *)((unsigned long)rmsgp + rqstp->rq_arg.len); 189 vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
196 va = decode_read_list(va, vaend); 190 va = decode_read_list(va, vaend);
197 if (!va) 191 if (!va)
198 return -EINVAL; 192 return -EINVAL;
@@ -211,76 +205,20 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
211 return hdr_len; 205 return hdr_len;
212} 206}
213 207
214int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *rqstp)
215{
216 struct rpcrdma_msg *rmsgp = NULL;
217 struct rpcrdma_read_chunk *ch;
218 struct rpcrdma_write_array *ary;
219 u32 *va;
220 u32 hdrlen;
221
222 dprintk("svcrdma: processing deferred RDMA header on rqstp=%p\n",
223 rqstp);
224 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
225
226 /* Pull in the extra for the padded case and bump our pointer */
227 if (rmsgp->rm_type == RDMA_MSGP) {
228 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
229 rqstp->rq_arg.head[0].iov_base = va;
230 hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp);
231 rqstp->rq_arg.head[0].iov_len -= hdrlen;
232 return hdrlen;
233 }
234
235 /*
236 * Skip all chunks to find RPC msg. These were previously processed
237 */
238 va = &rmsgp->rm_body.rm_chunks[0];
239
240 /* Skip read-list */
241 for (ch = (struct rpcrdma_read_chunk *)va;
242 ch->rc_discrim != xdr_zero; ch++);
243 va = (u32 *)&ch->rc_position;
244
245 /* Skip write-list */
246 ary = (struct rpcrdma_write_array *)va;
247 if (ary->wc_discrim == xdr_zero)
248 va = (u32 *)&ary->wc_nchunks;
249 else
250 /*
251 * rs_length is the 2nd 4B field in wc_target and taking its
252 * address skips the list terminator
253 */
254 va = (u32 *)&ary->wc_array[ary->wc_nchunks].wc_target.rs_length;
255
256 /* Skip reply-array */
257 ary = (struct rpcrdma_write_array *)va;
258 if (ary->wc_discrim == xdr_zero)
259 va = (u32 *)&ary->wc_nchunks;
260 else
261 va = (u32 *)&ary->wc_array[ary->wc_nchunks];
262
263 rqstp->rq_arg.head[0].iov_base = va;
264 hdrlen = (unsigned long)va - (unsigned long)rmsgp;
265 rqstp->rq_arg.head[0].iov_len -= hdrlen;
266
267 return hdrlen;
268}
269
270int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, 208int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt,
271 struct rpcrdma_msg *rmsgp, 209 struct rpcrdma_msg *rmsgp,
272 enum rpcrdma_errcode err, u32 *va) 210 enum rpcrdma_errcode err, __be32 *va)
273{ 211{
274 u32 *startp = va; 212 __be32 *startp = va;
275 213
276 *va++ = htonl(rmsgp->rm_xid); 214 *va++ = rmsgp->rm_xid;
277 *va++ = htonl(rmsgp->rm_vers); 215 *va++ = rmsgp->rm_vers;
278 *va++ = htonl(xprt->sc_max_requests); 216 *va++ = cpu_to_be32(xprt->sc_max_requests);
279 *va++ = htonl(RDMA_ERROR); 217 *va++ = rdma_error;
280 *va++ = htonl(err); 218 *va++ = cpu_to_be32(err);
281 if (err == ERR_VERS) { 219 if (err == ERR_VERS) {
282 *va++ = htonl(RPCRDMA_VERSION); 220 *va++ = rpcrdma_version;
283 *va++ = htonl(RPCRDMA_VERSION); 221 *va++ = rpcrdma_version;
284 } 222 }
285 223
286 return (int)((unsigned long)va - (unsigned long)startp); 224 return (int)((unsigned long)va - (unsigned long)startp);
@@ -297,7 +235,7 @@ int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp)
297 &rmsgp->rm_body.rm_chunks[1]; 235 &rmsgp->rm_body.rm_chunks[1];
298 if (wr_ary->wc_discrim) 236 if (wr_ary->wc_discrim)
299 wr_ary = (struct rpcrdma_write_array *) 237 wr_ary = (struct rpcrdma_write_array *)
300 &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)]. 238 &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)].
301 wc_target.rs_length; 239 wc_target.rs_length;
302 else 240 else
303 wr_ary = (struct rpcrdma_write_array *) 241 wr_ary = (struct rpcrdma_write_array *)
@@ -306,7 +244,7 @@ int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp)
306 /* skip reply array */ 244 /* skip reply array */
307 if (wr_ary->wc_discrim) 245 if (wr_ary->wc_discrim)
308 wr_ary = (struct rpcrdma_write_array *) 246 wr_ary = (struct rpcrdma_write_array *)
309 &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)]; 247 &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)];
310 else 248 else
311 wr_ary = (struct rpcrdma_write_array *) 249 wr_ary = (struct rpcrdma_write_array *)
312 &wr_ary->wc_nchunks; 250 &wr_ary->wc_nchunks;
@@ -325,7 +263,7 @@ void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks)
325 ary = (struct rpcrdma_write_array *) 263 ary = (struct rpcrdma_write_array *)
326 &rmsgp->rm_body.rm_chunks[1]; 264 &rmsgp->rm_body.rm_chunks[1];
327 ary->wc_discrim = xdr_one; 265 ary->wc_discrim = xdr_one;
328 ary->wc_nchunks = htonl(chunks); 266 ary->wc_nchunks = cpu_to_be32(chunks);
329 267
330 /* write-list terminator */ 268 /* write-list terminator */
331 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; 269 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero;
@@ -338,7 +276,7 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
338 int chunks) 276 int chunks)
339{ 277{
340 ary->wc_discrim = xdr_one; 278 ary->wc_discrim = xdr_one;
341 ary->wc_nchunks = htonl(chunks); 279 ary->wc_nchunks = cpu_to_be32(chunks);
342} 280}
343 281
344void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, 282void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
@@ -350,7 +288,7 @@ void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
350 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; 288 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
351 seg->rs_handle = rs_handle; 289 seg->rs_handle = rs_handle;
352 seg->rs_offset = rs_offset; 290 seg->rs_offset = rs_offset;
353 seg->rs_length = htonl(write_len); 291 seg->rs_length = cpu_to_be32(write_len);
354} 292}
355 293
356void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, 294void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
@@ -358,10 +296,10 @@ void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
358 struct rpcrdma_msg *rdma_resp, 296 struct rpcrdma_msg *rdma_resp,
359 enum rpcrdma_proc rdma_type) 297 enum rpcrdma_proc rdma_type)
360{ 298{
361 rdma_resp->rm_xid = htonl(rdma_argp->rm_xid); 299 rdma_resp->rm_xid = rdma_argp->rm_xid;
362 rdma_resp->rm_vers = htonl(rdma_argp->rm_vers); 300 rdma_resp->rm_vers = rdma_argp->rm_vers;
363 rdma_resp->rm_credit = htonl(xprt->sc_max_requests); 301 rdma_resp->rm_credit = cpu_to_be32(xprt->sc_max_requests);
364 rdma_resp->rm_type = htonl(rdma_type); 302 rdma_resp->rm_type = cpu_to_be32(rdma_type);
365 303
366 /* Encode <nul> chunks lists */ 304 /* Encode <nul> chunks lists */
367 rdma_resp->rm_body.rm_chunks[0] = xdr_zero; 305 rdma_resp->rm_body.rm_chunks[0] = xdr_zero;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 86b44164172b..2e1348bde325 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -85,7 +85,7 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
85 85
86 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */ 86 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; 87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
88 if (be32_to_cpu(rmsgp->rm_type) == RDMA_NOMSG) 88 if (rmsgp->rm_type == rdma_nomsg)
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0]; 89 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
90 else 90 else
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; 91 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 7de33d1af9b6..d25cd430f9ff 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -240,6 +240,7 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
240 u32 xdr_off; 240 u32 xdr_off;
241 int chunk_off; 241 int chunk_off;
242 int chunk_no; 242 int chunk_no;
243 int nchunks;
243 struct rpcrdma_write_array *arg_ary; 244 struct rpcrdma_write_array *arg_ary;
244 struct rpcrdma_write_array *res_ary; 245 struct rpcrdma_write_array *res_ary;
245 int ret; 246 int ret;
@@ -251,14 +252,15 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
251 &rdma_resp->rm_body.rm_chunks[1]; 252 &rdma_resp->rm_body.rm_chunks[1];
252 253
253 /* Write chunks start at the pagelist */ 254 /* Write chunks start at the pagelist */
255 nchunks = be32_to_cpu(arg_ary->wc_nchunks);
254 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; 256 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
255 xfer_len && chunk_no < arg_ary->wc_nchunks; 257 xfer_len && chunk_no < nchunks;
256 chunk_no++) { 258 chunk_no++) {
257 struct rpcrdma_segment *arg_ch; 259 struct rpcrdma_segment *arg_ch;
258 u64 rs_offset; 260 u64 rs_offset;
259 261
260 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 262 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
261 write_len = min(xfer_len, ntohl(arg_ch->rs_length)); 263 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
262 264
263 /* Prepare the response chunk given the length actually 265 /* Prepare the response chunk given the length actually
264 * written */ 266 * written */
@@ -270,7 +272,7 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
270 chunk_off = 0; 272 chunk_off = 0;
271 while (write_len) { 273 while (write_len) {
272 ret = send_write(xprt, rqstp, 274 ret = send_write(xprt, rqstp,
273 ntohl(arg_ch->rs_handle), 275 be32_to_cpu(arg_ch->rs_handle),
274 rs_offset + chunk_off, 276 rs_offset + chunk_off,
275 xdr_off, 277 xdr_off,
276 write_len, 278 write_len,
@@ -318,13 +320,13 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
318 &rdma_resp->rm_body.rm_chunks[2]; 320 &rdma_resp->rm_body.rm_chunks[2];
319 321
320 /* xdr offset starts at RPC message */ 322 /* xdr offset starts at RPC message */
321 nchunks = ntohl(arg_ary->wc_nchunks); 323 nchunks = be32_to_cpu(arg_ary->wc_nchunks);
322 for (xdr_off = 0, chunk_no = 0; 324 for (xdr_off = 0, chunk_no = 0;
323 xfer_len && chunk_no < nchunks; 325 xfer_len && chunk_no < nchunks;
324 chunk_no++) { 326 chunk_no++) {
325 u64 rs_offset; 327 u64 rs_offset;
326 ch = &arg_ary->wc_array[chunk_no].wc_target; 328 ch = &arg_ary->wc_array[chunk_no].wc_target;
327 write_len = min(xfer_len, htonl(ch->rs_length)); 329 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
328 330
329 /* Prepare the reply chunk given the length actually 331 /* Prepare the reply chunk given the length actually
330 * written */ 332 * written */
@@ -335,7 +337,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
335 chunk_off = 0; 337 chunk_off = 0;
336 while (write_len) { 338 while (write_len) {
337 ret = send_write(xprt, rqstp, 339 ret = send_write(xprt, rqstp,
338 ntohl(ch->rs_handle), 340 be32_to_cpu(ch->rs_handle),
339 rs_offset + chunk_off, 341 rs_offset + chunk_off,
340 xdr_off, 342 xdr_off,
341 write_len, 343 write_len,
@@ -515,7 +517,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
515 inline_bytes = rqstp->rq_res.len; 517 inline_bytes = rqstp->rq_res.len;
516 518
517 /* Create the RDMA response header */ 519 /* Create the RDMA response header */
518 res_page = svc_rdma_get_page(); 520 res_page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
519 rdma_resp = page_address(res_page); 521 rdma_resp = page_address(res_page);
520 reply_ary = svc_rdma_get_reply_array(rdma_argp); 522 reply_ary = svc_rdma_get_reply_array(rdma_argp);
521 if (reply_ary) 523 if (reply_ary)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index f4cfa764d76f..6b36279e4288 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -91,7 +91,7 @@ struct svc_xprt_class svc_rdma_class = {
91 .xcl_name = "rdma", 91 .xcl_name = "rdma",
92 .xcl_owner = THIS_MODULE, 92 .xcl_owner = THIS_MODULE,
93 .xcl_ops = &svc_rdma_ops, 93 .xcl_ops = &svc_rdma_ops,
94 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, 94 .xcl_max_payload = RPCRDMA_MAXPAYLOAD,
95 .xcl_ident = XPRT_TRANSPORT_RDMA, 95 .xcl_ident = XPRT_TRANSPORT_RDMA,
96}; 96};
97 97
@@ -99,12 +99,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
99{ 99{
100 struct svc_rdma_op_ctxt *ctxt; 100 struct svc_rdma_op_ctxt *ctxt;
101 101
102 while (1) { 102 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
103 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); 103 GFP_KERNEL | __GFP_NOFAIL);
104 if (ctxt)
105 break;
106 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
107 }
108 ctxt->xprt = xprt; 104 ctxt->xprt = xprt;
109 INIT_LIST_HEAD(&ctxt->dto_q); 105 INIT_LIST_HEAD(&ctxt->dto_q);
110 ctxt->count = 0; 106 ctxt->count = 0;
@@ -156,12 +152,8 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
156struct svc_rdma_req_map *svc_rdma_get_req_map(void) 152struct svc_rdma_req_map *svc_rdma_get_req_map(void)
157{ 153{
158 struct svc_rdma_req_map *map; 154 struct svc_rdma_req_map *map;
159 while (1) { 155 map = kmem_cache_alloc(svc_rdma_map_cachep,
160 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); 156 GFP_KERNEL | __GFP_NOFAIL);
161 if (map)
162 break;
163 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
164 }
165 map->count = 0; 157 map->count = 0;
166 return map; 158 return map;
167} 159}
@@ -493,18 +485,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
493 return cma_xprt; 485 return cma_xprt;
494} 486}
495 487
496struct page *svc_rdma_get_page(void)
497{
498 struct page *page;
499
500 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
501 /* If we can't get memory, wait a bit and try again */
502 printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
503 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
504 }
505 return page;
506}
507
508int svc_rdma_post_recv(struct svcxprt_rdma *xprt) 488int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
509{ 489{
510 struct ib_recv_wr recv_wr, *bad_recv_wr; 490 struct ib_recv_wr recv_wr, *bad_recv_wr;
@@ -523,7 +503,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
523 pr_err("svcrdma: Too many sges (%d)\n", sge_no); 503 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
524 goto err_put_ctxt; 504 goto err_put_ctxt;
525 } 505 }
526 page = svc_rdma_get_page(); 506 page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
527 ctxt->pages[sge_no] = page; 507 ctxt->pages[sge_no] = page;
528 pa = ib_dma_map_page(xprt->sc_cm_id->device, 508 pa = ib_dma_map_page(xprt->sc_cm_id->device,
529 page, 0, PAGE_SIZE, 509 page, 0, PAGE_SIZE,
@@ -1318,11 +1298,11 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1318 struct ib_send_wr err_wr; 1298 struct ib_send_wr err_wr;
1319 struct page *p; 1299 struct page *p;
1320 struct svc_rdma_op_ctxt *ctxt; 1300 struct svc_rdma_op_ctxt *ctxt;
1321 u32 *va; 1301 __be32 *va;
1322 int length; 1302 int length;
1323 int ret; 1303 int ret;
1324 1304
1325 p = svc_rdma_get_page(); 1305 p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
1326 va = page_address(p); 1306 va = page_address(p);
1327 1307
1328 /* XDR encode error */ 1308 /* XDR encode error */
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 54f23b1be986..436da2caec95 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -48,7 +48,6 @@
48 */ 48 */
49 49
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/init.h>
52#include <linux/slab.h> 51#include <linux/slab.h>
53#include <linux/seq_file.h> 52#include <linux/seq_file.h>
54#include <linux/sunrpc/addr.h> 53#include <linux/sunrpc/addr.h>
@@ -59,11 +58,6 @@
59# define RPCDBG_FACILITY RPCDBG_TRANS 58# define RPCDBG_FACILITY RPCDBG_TRANS
60#endif 59#endif
61 60
62MODULE_LICENSE("Dual BSD/GPL");
63
64MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
65MODULE_AUTHOR("Network Appliance, Inc.");
66
67/* 61/*
68 * tunables 62 * tunables
69 */ 63 */
@@ -711,7 +705,7 @@ static struct xprt_class xprt_rdma = {
711 .setup = xprt_setup_rdma, 705 .setup = xprt_setup_rdma,
712}; 706};
713 707
714static void __exit xprt_rdma_cleanup(void) 708void xprt_rdma_cleanup(void)
715{ 709{
716 int rc; 710 int rc;
717 711
@@ -728,7 +722,7 @@ static void __exit xprt_rdma_cleanup(void)
728 __func__, rc); 722 __func__, rc);
729} 723}
730 724
731static int __init xprt_rdma_init(void) 725int xprt_rdma_init(void)
732{ 726{
733 int rc; 727 int rc;
734 728
@@ -753,6 +747,3 @@ static int __init xprt_rdma_init(void)
753#endif 747#endif
754 return 0; 748 return 0;
755} 749}
756
757module_init(xprt_rdma_init);
758module_exit(xprt_rdma_cleanup);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 78e0b8beaa36..58163b88738c 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -480,6 +480,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
480 */ 480 */
481int rpcrdma_marshal_req(struct rpc_rqst *); 481int rpcrdma_marshal_req(struct rpc_rqst *);
482 482
483/* RPC/RDMA module init - xprtrdma/transport.c
484 */
485int xprt_rdma_init(void);
486void xprt_rdma_cleanup(void);
487
483/* Temporary NFS request map cache. Created in svc_rdma.c */ 488/* Temporary NFS request map cache. Created in svc_rdma.c */
484extern struct kmem_cache *svc_rdma_map_cachep; 489extern struct kmem_cache *svc_rdma_map_cachep;
485/* WR context cache. Created in svc_rdma.c */ 490/* WR context cache. Created in svc_rdma.c */
@@ -487,10 +492,4 @@ extern struct kmem_cache *svc_rdma_ctxt_cachep;
487/* Workqueue created in svc_rdma.c */ 492/* Workqueue created in svc_rdma.c */
488extern struct workqueue_struct *svc_rdma_wq; 493extern struct workqueue_struct *svc_rdma_wq;
489 494
490#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT)
491#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
492#else
493#define RPCSVC_MAXPAYLOAD_RDMA (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT)
494#endif
495
496#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 495#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */