aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2008-05-28 14:54:04 -0400
committerTom Tucker <tom@opengridcomputing.com>2008-07-02 16:01:53 -0400
commitab96dddbedf4bb8a7a0fe44012efc1d99598c36f (patch)
treebb3fcfe50264d9a0eb63b768aab8154d70185975
parente1441b9a41c33aa9236008a7cfe49a8e723fb397 (diff)
svcrdma: Add a type for keeping NFS RPC mapping
Create a new data structure to hold the remote client address space to local server address space mapping. Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h27
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c26
3 files changed, 72 insertions, 0 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 05eb4664d0dd..bd8749cc8084 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -86,6 +86,31 @@ struct svc_rdma_op_ctxt {
86 struct page *pages[RPCSVC_MAXPAGES]; 86 struct page *pages[RPCSVC_MAXPAGES];
87}; 87};
88 88
89/*
90 * NFS_ requests are mapped on the client side by the chunk lists in
91 * the RPCRDMA header. During the fetching of the RPC from the client
92 * and the writing of the reply to the client, the memory in the
93 * client and the memory in the server must be mapped as contiguous
94 * vaddr/len for access by the hardware. These data strucures keep
95 * these mappings.
96 *
97 * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
98 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
99 * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
100 * mapping of the reply.
101 */
102struct svc_rdma_chunk_sge {
103 int start; /* sge no for this chunk */
104 int count; /* sge count for this chunk */
105};
106struct svc_rdma_req_map {
107 unsigned long count;
108 union {
109 struct kvec sge[RPCSVC_MAXPAGES];
110 struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
111 };
112};
113
89#define RDMACTXT_F_LAST_CTXT 2 114#define RDMACTXT_F_LAST_CTXT 2
90 115
91struct svcxprt_rdma { 116struct svcxprt_rdma {
@@ -173,6 +198,8 @@ extern int svc_rdma_post_recv(struct svcxprt_rdma *);
173extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 198extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
174extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 199extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
175extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 200extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
201extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
202extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
176extern void svc_sq_reap(struct svcxprt_rdma *); 203extern void svc_sq_reap(struct svcxprt_rdma *);
177extern void svc_rq_reap(struct svcxprt_rdma *); 204extern void svc_rq_reap(struct svcxprt_rdma *);
178extern struct svc_xprt_class svc_rdma_class; 205extern struct svc_xprt_class svc_rdma_class;
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 88c0ca20bb1e..171f2053e90c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -69,6 +69,9 @@ atomic_t rdma_stat_rq_prod;
69atomic_t rdma_stat_sq_poll; 69atomic_t rdma_stat_sq_poll;
70atomic_t rdma_stat_sq_prod; 70atomic_t rdma_stat_sq_prod;
71 71
72/* Temporary NFS request map cache */
73struct kmem_cache *svc_rdma_map_cachep;
74
72/* 75/*
73 * This function implements reading and resetting an atomic_t stat 76 * This function implements reading and resetting an atomic_t stat
74 * variable through read/write to a proc file. Any write to the file 77 * variable through read/write to a proc file. Any write to the file
@@ -241,6 +244,7 @@ void svc_rdma_cleanup(void)
241 svcrdma_table_header = NULL; 244 svcrdma_table_header = NULL;
242 } 245 }
243 svc_unreg_xprt_class(&svc_rdma_class); 246 svc_unreg_xprt_class(&svc_rdma_class);
247 kmem_cache_destroy(svc_rdma_map_cachep);
244} 248}
245 249
246int svc_rdma_init(void) 250int svc_rdma_init(void)
@@ -255,9 +259,24 @@ int svc_rdma_init(void)
255 svcrdma_table_header = 259 svcrdma_table_header =
256 register_sysctl_table(svcrdma_root_table); 260 register_sysctl_table(svcrdma_root_table);
257 261
262 /* Create the temporary map cache */
263 svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
264 sizeof(struct svc_rdma_req_map),
265 0,
266 SLAB_HWCACHE_ALIGN,
267 NULL);
268 if (!svc_rdma_map_cachep) {
269 printk(KERN_INFO "Could not allocate map cache.\n");
270 goto err;
271 }
272
258 /* Register RDMA with the SVC transport switch */ 273 /* Register RDMA with the SVC transport switch */
259 svc_reg_xprt_class(&svc_rdma_class); 274 svc_reg_xprt_class(&svc_rdma_class);
260 return 0; 275 return 0;
276
277 err:
278 unregister_sysctl_table(svcrdma_table_header);
279 return -ENOMEM;
261} 280}
262MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); 281MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
263MODULE_DESCRIPTION("SVC RDMA Transport"); 282MODULE_DESCRIPTION("SVC RDMA Transport");
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index e132509d1db0..ae90758d8e9b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -173,6 +173,32 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
173 atomic_dec(&xprt->sc_ctxt_used); 173 atomic_dec(&xprt->sc_ctxt_used);
174} 174}
175 175
176/* Temporary NFS request map cache. Created in svc_rdma.c */
177extern struct kmem_cache *svc_rdma_map_cachep;
178
179/*
180 * Temporary NFS req mappings are shared across all transport
181 * instances. These are short lived and should be bounded by the number
182 * of concurrent server threads * depth of the SQ.
183 */
184struct svc_rdma_req_map *svc_rdma_get_req_map(void)
185{
186 struct svc_rdma_req_map *map;
187 while (1) {
188 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
189 if (map)
190 break;
191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
192 }
193 map->count = 0;
194 return map;
195}
196
197void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
198{
199 kmem_cache_free(svc_rdma_map_cachep, map);
200}
201
176/* ib_cq event handler */ 202/* ib_cq event handler */
177static void cq_event_handler(struct ib_event *event, void *context) 203static void cq_event_handler(struct ib_event *event, void *context)
178{ 204{