aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c26
2 files changed, 45 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 88c0ca20bb1e..171f2053e90c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -69,6 +69,9 @@ atomic_t rdma_stat_rq_prod;
69atomic_t rdma_stat_sq_poll; 69atomic_t rdma_stat_sq_poll;
70atomic_t rdma_stat_sq_prod; 70atomic_t rdma_stat_sq_prod;
71 71
72/* Temporary NFS request map cache */
73struct kmem_cache *svc_rdma_map_cachep;
74
72/* 75/*
73 * This function implements reading and resetting an atomic_t stat 76 * This function implements reading and resetting an atomic_t stat
74 * variable through read/write to a proc file. Any write to the file 77 * variable through read/write to a proc file. Any write to the file
@@ -241,6 +244,7 @@ void svc_rdma_cleanup(void)
241 svcrdma_table_header = NULL; 244 svcrdma_table_header = NULL;
242 } 245 }
243 svc_unreg_xprt_class(&svc_rdma_class); 246 svc_unreg_xprt_class(&svc_rdma_class);
247 kmem_cache_destroy(svc_rdma_map_cachep);
244} 248}
245 249
246int svc_rdma_init(void) 250int svc_rdma_init(void)
@@ -255,9 +259,24 @@ int svc_rdma_init(void)
255 svcrdma_table_header = 259 svcrdma_table_header =
256 register_sysctl_table(svcrdma_root_table); 260 register_sysctl_table(svcrdma_root_table);
257 261
262 /* Create the temporary map cache */
263 svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
264 sizeof(struct svc_rdma_req_map),
265 0,
266 SLAB_HWCACHE_ALIGN,
267 NULL);
268 if (!svc_rdma_map_cachep) {
269 printk(KERN_INFO "Could not allocate map cache.\n");
270 goto err;
271 }
272
258 /* Register RDMA with the SVC transport switch */ 273 /* Register RDMA with the SVC transport switch */
259 svc_reg_xprt_class(&svc_rdma_class); 274 svc_reg_xprt_class(&svc_rdma_class);
260 return 0; 275 return 0;
276
277 err:
278 unregister_sysctl_table(svcrdma_table_header);
279 return -ENOMEM;
261} 280}
262MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); 281MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
263MODULE_DESCRIPTION("SVC RDMA Transport"); 282MODULE_DESCRIPTION("SVC RDMA Transport");
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index e132509d1db0..ae90758d8e9b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -173,6 +173,32 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
173 atomic_dec(&xprt->sc_ctxt_used); 173 atomic_dec(&xprt->sc_ctxt_used);
174} 174}
175 175
176/* Temporary NFS request map cache. Created in svc_rdma.c */
177extern struct kmem_cache *svc_rdma_map_cachep;
178
179/*
180 * Temporary NFS req mappings are shared across all transport
181 * instances. These are short lived and should be bounded by the number
182 * of concurrent server threads * depth of the SQ.
183 */
184struct svc_rdma_req_map *svc_rdma_get_req_map(void)
185{
186 struct svc_rdma_req_map *map;
187 while (1) {
188 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
189 if (map)
190 break;
191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
192 }
193 map->count = 0;
194 return map;
195}
196
197void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
198{
199 kmem_cache_free(svc_rdma_map_cachep, map);
200}
201
176/* ib_cq event handler */ 202/* ib_cq event handler */
177static void cq_event_handler(struct ib_event *event, void *context) 203static void cq_event_handler(struct ib_event *event, void *context)
178{ 204{