aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorEmil Goode <emilgoode@gmail.com>2012-08-19 13:59:40 -0400
committerRoland Dreier <roland@purestorage.com>2012-09-30 23:32:14 -0400
commitc079c28714e4d1a0b7cad120f84217e0fcde09a6 (patch)
tree12349a5ebbdfd7bbf847a76aa413aade7bfcbe5a /drivers/infiniband/hw/cxgb4
parent979570e02981d4a8fc20b3cc8fd651856c98ee9d (diff)
RDMA/cxgb4: Fix error handling in create_qp()
The variable ret is assigned return values in a couple of places, but its value is never returned. This patch makes use of the ret variable so that the caller get correct error codes returned. The following changes are also introduced: - The alloc_oc_sq function can return -ENOSYS or -ENOMEM so we want to get the return value from it. - Change the label names to improve readability. Signed-off-by: Emil Goode <emilgoode@gmail.com> Acked-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c62
1 files changed, 38 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 45aedf1d9338..e2bf9c68cfc8 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -137,19 +137,25 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
137 return -ENOMEM; 137 return -ENOMEM;
138 138
139 wq->rq.qid = c4iw_get_qpid(rdev, uctx); 139 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
140 if (!wq->rq.qid) 140 if (!wq->rq.qid) {
141 goto err1; 141 ret = -ENOMEM;
142 goto free_sq_qid;
143 }
142 144
143 if (!user) { 145 if (!user) {
144 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, 146 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
145 GFP_KERNEL); 147 GFP_KERNEL);
146 if (!wq->sq.sw_sq) 148 if (!wq->sq.sw_sq) {
147 goto err2; 149 ret = -ENOMEM;
150 goto free_rq_qid;
151 }
148 152
149 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, 153 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
150 GFP_KERNEL); 154 GFP_KERNEL);
151 if (!wq->rq.sw_rq) 155 if (!wq->rq.sw_rq) {
152 goto err3; 156 ret = -ENOMEM;
157 goto free_sw_sq;
158 }
153 } 159 }
154 160
155 /* 161 /*
@@ -157,15 +163,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
157 */ 163 */
158 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); 164 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
159 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); 165 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
160 if (!wq->rq.rqt_hwaddr) 166 if (!wq->rq.rqt_hwaddr) {
161 goto err4; 167 ret = -ENOMEM;
168 goto free_sw_rq;
169 }
162 170
163 if (user) { 171 if (user) {
164 if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) 172 ret = alloc_oc_sq(rdev, &wq->sq);
165 goto err5; 173 if (ret)
174 goto free_hwaddr;
175
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
166 } else 179 } else
167 if (alloc_host_sq(rdev, &wq->sq)) 180 ret = alloc_host_sq(rdev, &wq->sq);
168 goto err5; 181 if (ret)
182 goto free_hwaddr;
169 memset(wq->sq.queue, 0, wq->sq.memsize); 183 memset(wq->sq.queue, 0, wq->sq.memsize);
170 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
171 185
@@ -173,7 +187,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
173 wq->rq.memsize, &(wq->rq.dma_addr), 187 wq->rq.memsize, &(wq->rq.dma_addr),
174 GFP_KERNEL); 188 GFP_KERNEL);
175 if (!wq->rq.queue) 189 if (!wq->rq.queue)
176 goto err6; 190 goto free_sq;
177 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", 191 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
178 __func__, wq->sq.queue, 192 __func__, wq->sq.queue,
179 (unsigned long long)virt_to_phys(wq->sq.queue), 193 (unsigned long long)virt_to_phys(wq->sq.queue),
@@ -201,7 +215,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
201 skb = alloc_skb(wr_len, GFP_KERNEL); 215 skb = alloc_skb(wr_len, GFP_KERNEL);
202 if (!skb) { 216 if (!skb) {
203 ret = -ENOMEM; 217 ret = -ENOMEM;
204 goto err7; 218 goto free_dma;
205 } 219 }
206 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 220 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
207 221
@@ -266,33 +280,33 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
266 280
267 ret = c4iw_ofld_send(rdev, skb); 281 ret = c4iw_ofld_send(rdev, skb);
268 if (ret) 282 if (ret)
269 goto err7; 283 goto free_dma;
270 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); 284 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
271 if (ret) 285 if (ret)
272 goto err7; 286 goto free_dma;
273 287
274 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", 288 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
275 __func__, wq->sq.qid, wq->rq.qid, wq->db, 289 __func__, wq->sq.qid, wq->rq.qid, wq->db,
276 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); 290 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
277 291
278 return 0; 292 return 0;
279err7: 293free_dma:
280 dma_free_coherent(&(rdev->lldi.pdev->dev), 294 dma_free_coherent(&(rdev->lldi.pdev->dev),
281 wq->rq.memsize, wq->rq.queue, 295 wq->rq.memsize, wq->rq.queue,
282 dma_unmap_addr(&wq->rq, mapping)); 296 dma_unmap_addr(&wq->rq, mapping));
283err6: 297free_sq:
284 dealloc_sq(rdev, &wq->sq); 298 dealloc_sq(rdev, &wq->sq);
285err5: 299free_hwaddr:
286 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 300 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
287err4: 301free_sw_rq:
288 kfree(wq->rq.sw_rq); 302 kfree(wq->rq.sw_rq);
289err3: 303free_sw_sq:
290 kfree(wq->sq.sw_sq); 304 kfree(wq->sq.sw_sq);
291err2: 305free_rq_qid:
292 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 306 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
293err1: 307free_sq_qid:
294 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 308 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
295 return -ENOMEM; 309 return ret;
296} 310}
297 311
298static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 312static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,