diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2008-05-28 14:54:04 -0400 |
---|---|---|
committer | Tom Tucker <tom@opengridcomputing.com> | 2008-07-02 16:01:53 -0400 |
commit | ab96dddbedf4bb8a7a0fe44012efc1d99598c36f (patch) | |
tree | bb3fcfe50264d9a0eb63b768aab8154d70185975 /net | |
parent | e1441b9a41c33aa9236008a7cfe49a8e723fb397 (diff) |
svcrdma: Add a type for keeping NFS RPC mapping
Create a new data structure to hold the remote client address space
to local server address space mapping.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma.c | 19 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 26 |
2 files changed, 45 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 88c0ca20bb1e..171f2053e90c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c | |||
@@ -69,6 +69,9 @@ atomic_t rdma_stat_rq_prod; | |||
69 | atomic_t rdma_stat_sq_poll; | 69 | atomic_t rdma_stat_sq_poll; |
70 | atomic_t rdma_stat_sq_prod; | 70 | atomic_t rdma_stat_sq_prod; |
71 | 71 | ||
72 | /* Temporary NFS request map cache */ | ||
73 | struct kmem_cache *svc_rdma_map_cachep; | ||
74 | |||
72 | /* | 75 | /* |
73 | * This function implements reading and resetting an atomic_t stat | 76 | * This function implements reading and resetting an atomic_t stat |
74 | * variable through read/write to a proc file. Any write to the file | 77 | * variable through read/write to a proc file. Any write to the file |
@@ -241,6 +244,7 @@ void svc_rdma_cleanup(void) | |||
241 | svcrdma_table_header = NULL; | 244 | svcrdma_table_header = NULL; |
242 | } | 245 | } |
243 | svc_unreg_xprt_class(&svc_rdma_class); | 246 | svc_unreg_xprt_class(&svc_rdma_class); |
247 | kmem_cache_destroy(svc_rdma_map_cachep); | ||
244 | } | 248 | } |
245 | 249 | ||
246 | int svc_rdma_init(void) | 250 | int svc_rdma_init(void) |
@@ -255,9 +259,24 @@ int svc_rdma_init(void) | |||
255 | svcrdma_table_header = | 259 | svcrdma_table_header = |
256 | register_sysctl_table(svcrdma_root_table); | 260 | register_sysctl_table(svcrdma_root_table); |
257 | 261 | ||
262 | /* Create the temporary map cache */ | ||
263 | svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache", | ||
264 | sizeof(struct svc_rdma_req_map), | ||
265 | 0, | ||
266 | SLAB_HWCACHE_ALIGN, | ||
267 | NULL); | ||
268 | if (!svc_rdma_map_cachep) { | ||
269 | printk(KERN_INFO "Could not allocate map cache.\n"); | ||
270 | goto err; | ||
271 | } | ||
272 | |||
258 | /* Register RDMA with the SVC transport switch */ | 273 | /* Register RDMA with the SVC transport switch */ |
259 | svc_reg_xprt_class(&svc_rdma_class); | 274 | svc_reg_xprt_class(&svc_rdma_class); |
260 | return 0; | 275 | return 0; |
276 | |||
277 | err: | ||
278 | unregister_sysctl_table(svcrdma_table_header); | ||
279 | return -ENOMEM; | ||
261 | } | 280 | } |
262 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); | 281 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); |
263 | MODULE_DESCRIPTION("SVC RDMA Transport"); | 282 | MODULE_DESCRIPTION("SVC RDMA Transport"); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index e132509d1db0..ae90758d8e9b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -173,6 +173,32 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) | |||
173 | atomic_dec(&xprt->sc_ctxt_used); | 173 | atomic_dec(&xprt->sc_ctxt_used); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* Temporary NFS request map cache. Created in svc_rdma.c */ | ||
177 | extern struct kmem_cache *svc_rdma_map_cachep; | ||
178 | |||
179 | /* | ||
180 | * Temporary NFS req mappings are shared across all transport | ||
181 | * instances. These are short lived and should be bounded by the number | ||
182 | * of concurrent server threads * depth of the SQ. | ||
183 | */ | ||
184 | struct svc_rdma_req_map *svc_rdma_get_req_map(void) | ||
185 | { | ||
186 | struct svc_rdma_req_map *map; | ||
187 | while (1) { | ||
188 | map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); | ||
189 | if (map) | ||
190 | break; | ||
191 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | ||
192 | } | ||
193 | map->count = 0; | ||
194 | return map; | ||
195 | } | ||
196 | |||
197 | void svc_rdma_put_req_map(struct svc_rdma_req_map *map) | ||
198 | { | ||
199 | kmem_cache_free(svc_rdma_map_cachep, map); | ||
200 | } | ||
201 | |||
176 | /* ib_cq event handler */ | 202 | /* ib_cq event handler */ |
177 | static void cq_event_handler(struct ib_event *event, void *context) | 203 | static void cq_event_handler(struct ib_event *event, void *context) |
178 | { | 204 | { |