aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2008-04-30 21:44:39 -0400
committerTom Tucker <tom@opengridcomputing.com>2008-05-19 08:33:52 -0400
commit8740767376b32a7772607e1b2b07cde0c24120cc (patch)
tree62e9831548e0cdd2e70951a9ab9df4b93ca440aa
parent02e7452de74d308ca642f54f7e5ef801ced60a92 (diff)
svcrdma: Use standard Linux lists for context cache
Replace the one-off linked list implementation used to implement the context cache with the standard Linux list_head lists. Add a context counter to catch resource leaks. A WARN_ON will be added later to ensure that we've freed all contexts. Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h5
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c47
2 files changed, 29 insertions, 23 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c447c417b37b..701439064d21 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
72 */ 72 */
73struct svc_rdma_op_ctxt { 73struct svc_rdma_op_ctxt {
74 struct svc_rdma_op_ctxt *read_hdr; 74 struct svc_rdma_op_ctxt *read_hdr;
75 struct svc_rdma_op_ctxt *next; 75 struct list_head free_list;
76 struct xdr_buf arg; 76 struct xdr_buf arg;
77 struct list_head dto_q; 77 struct list_head dto_q;
78 enum ib_wr_opcode wr_op; 78 enum ib_wr_opcode wr_op;
@@ -104,7 +104,8 @@ struct svcxprt_rdma {
104 104
105 struct ib_pd *sc_pd; 105 struct ib_pd *sc_pd;
106 106
107 struct svc_rdma_op_ctxt *sc_ctxt_head; 107 atomic_t sc_ctxt_used;
108 struct list_head sc_ctxt_free;
108 int sc_ctxt_cnt; 109 int sc_ctxt_cnt;
109 int sc_ctxt_bump; 110 int sc_ctxt_bump;
110 int sc_ctxt_max; 111 int sc_ctxt_max;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 34141eaf25a0..817cf4de746c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -103,8 +103,8 @@ static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
103 spin_lock_bh(&xprt->sc_ctxt_lock); 103 spin_lock_bh(&xprt->sc_ctxt_lock);
104 if (ctxt) { 104 if (ctxt) {
105 at_least_one = 1; 105 at_least_one = 1;
106 ctxt->next = xprt->sc_ctxt_head; 106 INIT_LIST_HEAD(&ctxt->free_list);
107 xprt->sc_ctxt_head = ctxt; 107 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
108 } else { 108 } else {
109 /* kmalloc failed...give up for now */ 109 /* kmalloc failed...give up for now */
110 xprt->sc_ctxt_cnt--; 110 xprt->sc_ctxt_cnt--;
@@ -123,7 +123,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
123 123
124 while (1) { 124 while (1) {
125 spin_lock_bh(&xprt->sc_ctxt_lock); 125 spin_lock_bh(&xprt->sc_ctxt_lock);
126 if (unlikely(xprt->sc_ctxt_head == NULL)) { 126 if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
127 /* Try to bump my cache. */ 127 /* Try to bump my cache. */
128 spin_unlock_bh(&xprt->sc_ctxt_lock); 128 spin_unlock_bh(&xprt->sc_ctxt_lock);
129 129
@@ -136,12 +136,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
136 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 136 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
137 continue; 137 continue;
138 } 138 }
139 ctxt = xprt->sc_ctxt_head; 139 ctxt = list_entry(xprt->sc_ctxt_free.next,
140 xprt->sc_ctxt_head = ctxt->next; 140 struct svc_rdma_op_ctxt,
141 free_list);
142 list_del_init(&ctxt->free_list);
141 spin_unlock_bh(&xprt->sc_ctxt_lock); 143 spin_unlock_bh(&xprt->sc_ctxt_lock);
142 ctxt->xprt = xprt; 144 ctxt->xprt = xprt;
143 INIT_LIST_HEAD(&ctxt->dto_q); 145 INIT_LIST_HEAD(&ctxt->dto_q);
144 ctxt->count = 0; 146 ctxt->count = 0;
147 atomic_inc(&xprt->sc_ctxt_used);
145 break; 148 break;
146 } 149 }
147 return ctxt; 150 return ctxt;
@@ -163,10 +166,11 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
163 ctxt->sge[i].addr, 166 ctxt->sge[i].addr,
164 ctxt->sge[i].length, 167 ctxt->sge[i].length,
165 ctxt->direction); 168 ctxt->direction);
169
166 spin_lock_bh(&xprt->sc_ctxt_lock); 170 spin_lock_bh(&xprt->sc_ctxt_lock);
167 ctxt->next = xprt->sc_ctxt_head; 171 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
168 xprt->sc_ctxt_head = ctxt;
169 spin_unlock_bh(&xprt->sc_ctxt_lock); 172 spin_unlock_bh(&xprt->sc_ctxt_lock);
173 atomic_dec(&xprt->sc_ctxt_used);
170} 174}
171 175
172/* ib_cq event handler */ 176/* ib_cq event handler */
@@ -412,28 +416,29 @@ static void create_context_cache(struct svcxprt_rdma *xprt,
412 xprt->sc_ctxt_max = ctxt_max; 416 xprt->sc_ctxt_max = ctxt_max;
413 xprt->sc_ctxt_bump = ctxt_bump; 417 xprt->sc_ctxt_bump = ctxt_bump;
414 xprt->sc_ctxt_cnt = 0; 418 xprt->sc_ctxt_cnt = 0;
415 xprt->sc_ctxt_head = NULL; 419 atomic_set(&xprt->sc_ctxt_used, 0);
420
421 INIT_LIST_HEAD(&xprt->sc_ctxt_free);
416 for (i = 0; i < ctxt_count; i++) { 422 for (i = 0; i < ctxt_count; i++) {
417 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); 423 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
418 if (ctxt) { 424 if (ctxt) {
419 ctxt->next = xprt->sc_ctxt_head; 425 INIT_LIST_HEAD(&ctxt->free_list);
420 xprt->sc_ctxt_head = ctxt; 426 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
421 xprt->sc_ctxt_cnt++; 427 xprt->sc_ctxt_cnt++;
422 } 428 }
423 } 429 }
424} 430}
425 431
426static void destroy_context_cache(struct svc_rdma_op_ctxt *ctxt) 432static void destroy_context_cache(struct svcxprt_rdma *xprt)
427{ 433{
428 struct svc_rdma_op_ctxt *next; 434 while (!list_empty(&xprt->sc_ctxt_free)) {
429 if (!ctxt) 435 struct svc_rdma_op_ctxt *ctxt;
430 return; 436 ctxt = list_entry(xprt->sc_ctxt_free.next,
431 437 struct svc_rdma_op_ctxt,
432 do { 438 free_list);
433 next = ctxt->next; 439 list_del_init(&ctxt->free_list);
434 kfree(ctxt); 440 kfree(ctxt);
435 ctxt = next; 441 }
436 } while (next);
437} 442}
438 443
439static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, 444static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
@@ -470,7 +475,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
470 reqs + 475 reqs +
471 cma_xprt->sc_sq_depth + 476 cma_xprt->sc_sq_depth +
472 RPCRDMA_MAX_THREADS + 1); /* max */ 477 RPCRDMA_MAX_THREADS + 1); /* max */
473 if (!cma_xprt->sc_ctxt_head) { 478 if (list_empty(&cma_xprt->sc_ctxt_free)) {
474 kfree(cma_xprt); 479 kfree(cma_xprt);
475 return NULL; 480 return NULL;
476 } 481 }
@@ -976,7 +981,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
976 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) 981 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
977 ib_dealloc_pd(rdma->sc_pd); 982 ib_dealloc_pd(rdma->sc_pd);
978 983
979 destroy_context_cache(rdma->sc_ctxt_head); 984 destroy_context_cache(rdma);
980 kfree(rdma); 985 kfree(rdma);
981} 986}
982 987