diff options
author | Roland Dreier <rolandd@cisco.com> | 2006-01-31 23:45:51 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-20 13:08:07 -0500 |
commit | d9b98b0f11ee7dd39429001ba289f095b9d66917 (patch) | |
tree | c473a7b348640cb80c4584ef5ea8dd055efed577 /drivers/infiniband | |
parent | 7705a8792b0fc82fd7d4dd923724606bbfd9fb20 (diff) |
IB/mthca: Make functions that never fail return void
The function mthca_free_err_wqe() can never fail, so get rid of its
return value. That means handle_error_cqe() doesn't have to check
what mthca_free_err_wqe() returns, which means it can't fail either
and doesn't have to return anything either. All this results in
simpler source code and a slight object code improvement:
add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-10 (-10)
function old new delta
mthca_free_err_wqe 83 81 -2
mthca_poll_cq 1758 1750 -8
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 8 |
3 files changed, 15 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 96f1a86bf049..16a851b341f8 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -324,12 +324,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
324 | wake_up(&cq->wait); | 324 | wake_up(&cq->wait); |
325 | } | 325 | } |
326 | 326 | ||
327 | static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, | 327 | static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, |
328 | struct mthca_qp *qp, int wqe_index, int is_send, | 328 | struct mthca_qp *qp, int wqe_index, int is_send, |
329 | struct mthca_err_cqe *cqe, | 329 | struct mthca_err_cqe *cqe, |
330 | struct ib_wc *entry, int *free_cqe) | 330 | struct ib_wc *entry, int *free_cqe) |
331 | { | 331 | { |
332 | int err; | ||
333 | int dbd; | 332 | int dbd; |
334 | __be32 new_wqe; | 333 | __be32 new_wqe; |
335 | 334 | ||
@@ -412,11 +411,9 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, | |||
412 | * error case, so we don't have to check the doorbell count, etc. | 411 | * error case, so we don't have to check the doorbell count, etc. |
413 | */ | 412 | */ |
414 | if (mthca_is_memfree(dev)) | 413 | if (mthca_is_memfree(dev)) |
415 | return 0; | 414 | return; |
416 | 415 | ||
417 | err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); | 416 | mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); |
418 | if (err) | ||
419 | return err; | ||
420 | 417 | ||
421 | /* | 418 | /* |
422 | * If we're at the end of the WQE chain, or we've used up our | 419 | * If we're at the end of the WQE chain, or we've used up our |
@@ -424,15 +421,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, | |||
424 | * the next poll operation. | 421 | * the next poll operation. |
425 | */ | 422 | */ |
426 | if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) | 423 | if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) |
427 | return 0; | 424 | return; |
428 | 425 | ||
429 | cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); | 426 | cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); |
430 | cqe->wqe = new_wqe; | 427 | cqe->wqe = new_wqe; |
431 | cqe->syndrome = SYNDROME_WR_FLUSH_ERR; | 428 | cqe->syndrome = SYNDROME_WR_FLUSH_ERR; |
432 | 429 | ||
433 | *free_cqe = 0; | 430 | *free_cqe = 0; |
434 | |||
435 | return 0; | ||
436 | } | 431 | } |
437 | 432 | ||
438 | static inline int mthca_poll_one(struct mthca_dev *dev, | 433 | static inline int mthca_poll_one(struct mthca_dev *dev, |
@@ -518,9 +513,9 @@ static inline int mthca_poll_one(struct mthca_dev *dev, | |||
518 | } | 513 | } |
519 | 514 | ||
520 | if (is_error) { | 515 | if (is_error) { |
521 | err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, | 516 | handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, |
522 | (struct mthca_err_cqe *) cqe, | 517 | (struct mthca_err_cqe *) cqe, |
523 | entry, &free_cqe); | 518 | entry, &free_cqe); |
524 | goto out; | 519 | goto out; |
525 | } | 520 | } |
526 | 521 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index e481037288d6..c98628ab8a09 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -495,8 +495,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
495 | struct ib_send_wr **bad_wr); | 495 | struct ib_send_wr **bad_wr); |
496 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | 496 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
497 | struct ib_recv_wr **bad_wr); | 497 | struct ib_recv_wr **bad_wr); |
498 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | 498 | void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
499 | int index, int *dbd, __be32 *new_wqe); | 499 | int index, int *dbd, __be32 *new_wqe); |
500 | int mthca_alloc_qp(struct mthca_dev *dev, | 500 | int mthca_alloc_qp(struct mthca_dev *dev, |
501 | struct mthca_pd *pd, | 501 | struct mthca_pd *pd, |
502 | struct mthca_cq *send_cq, | 502 | struct mthca_cq *send_cq, |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index fba608ed7df2..79245717e98f 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -2182,8 +2182,8 @@ out: | |||
2182 | return err; | 2182 | return err; |
2183 | } | 2183 | } |
2184 | 2184 | ||
2185 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | 2185 | void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
2186 | int index, int *dbd, __be32 *new_wqe) | 2186 | int index, int *dbd, __be32 *new_wqe) |
2187 | { | 2187 | { |
2188 | struct mthca_next_seg *next; | 2188 | struct mthca_next_seg *next; |
2189 | 2189 | ||
@@ -2193,7 +2193,7 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |||
2193 | */ | 2193 | */ |
2194 | if (qp->ibqp.srq) { | 2194 | if (qp->ibqp.srq) { |
2195 | *new_wqe = 0; | 2195 | *new_wqe = 0; |
2196 | return 0; | 2196 | return; |
2197 | } | 2197 | } |
2198 | 2198 | ||
2199 | if (is_send) | 2199 | if (is_send) |
@@ -2207,8 +2207,6 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |||
2207 | (next->ee_nds & cpu_to_be32(0x3f)); | 2207 | (next->ee_nds & cpu_to_be32(0x3f)); |
2208 | else | 2208 | else |
2209 | *new_wqe = 0; | 2209 | *new_wqe = 0; |
2210 | |||
2211 | return 0; | ||
2212 | } | 2210 | } |
2213 | 2211 | ||
2214 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | 2212 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) |