diff options
author | Joachim Fenkes <fenkes@de.ibm.com> | 2007-07-09 09:29:03 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-09 23:12:27 -0400 |
commit | 9844b71baa60270110eabaa9589d3260443d1a71 (patch) | |
tree | 30e443f3329bf1c0b4e78d0787c8e33158dff59e /drivers/infiniband/hw/ehca/ehca_reqs.c | |
parent | 15f001ec47b049051f679f8b8c965ac9aae95b3e (diff) |
IB/ehca: Lock renaming, static initializers
- Rename all spinlock flags to "flags", matching the vast majority of kernel
code.
- Move hcall_lock into the only file it's used in.
- Replaced spin_lock_init() and friends with static initializers for
global variables.
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_reqs.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_reqs.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index b5664fa34de3..73f0c0652a00 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -363,10 +363,10 @@ int ehca_post_send(struct ib_qp *qp, | |||
363 | struct ehca_wqe *wqe_p; | 363 | struct ehca_wqe *wqe_p; |
364 | int wqe_cnt = 0; | 364 | int wqe_cnt = 0; |
365 | int ret = 0; | 365 | int ret = 0; |
366 | unsigned long spl_flags; | 366 | unsigned long flags; |
367 | 367 | ||
368 | /* LOCK the QUEUE */ | 368 | /* LOCK the QUEUE */ |
369 | spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); | 369 | spin_lock_irqsave(&my_qp->spinlock_s, flags); |
370 | 370 | ||
371 | /* loop processes list of send reqs */ | 371 | /* loop processes list of send reqs */ |
372 | for (cur_send_wr = send_wr; cur_send_wr != NULL; | 372 | for (cur_send_wr = send_wr; cur_send_wr != NULL; |
@@ -408,7 +408,7 @@ int ehca_post_send(struct ib_qp *qp, | |||
408 | 408 | ||
409 | post_send_exit0: | 409 | post_send_exit0: |
410 | /* UNLOCK the QUEUE */ | 410 | /* UNLOCK the QUEUE */ |
411 | spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags); | 411 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
412 | iosync(); /* serialize GAL register access */ | 412 | iosync(); /* serialize GAL register access */ |
413 | hipz_update_sqa(my_qp, wqe_cnt); | 413 | hipz_update_sqa(my_qp, wqe_cnt); |
414 | return ret; | 414 | return ret; |
@@ -423,7 +423,7 @@ static int internal_post_recv(struct ehca_qp *my_qp, | |||
423 | struct ehca_wqe *wqe_p; | 423 | struct ehca_wqe *wqe_p; |
424 | int wqe_cnt = 0; | 424 | int wqe_cnt = 0; |
425 | int ret = 0; | 425 | int ret = 0; |
426 | unsigned long spl_flags; | 426 | unsigned long flags; |
427 | 427 | ||
428 | if (unlikely(!HAS_RQ(my_qp))) { | 428 | if (unlikely(!HAS_RQ(my_qp))) { |
429 | ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", | 429 | ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", |
@@ -432,7 +432,7 @@ static int internal_post_recv(struct ehca_qp *my_qp, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | /* LOCK the QUEUE */ | 434 | /* LOCK the QUEUE */ |
435 | spin_lock_irqsave(&my_qp->spinlock_r, spl_flags); | 435 | spin_lock_irqsave(&my_qp->spinlock_r, flags); |
436 | 436 | ||
437 | /* loop processes list of send reqs */ | 437 | /* loop processes list of send reqs */ |
438 | for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; | 438 | for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; |
@@ -473,7 +473,7 @@ static int internal_post_recv(struct ehca_qp *my_qp, | |||
473 | } /* eof for cur_recv_wr */ | 473 | } /* eof for cur_recv_wr */ |
474 | 474 | ||
475 | post_recv_exit0: | 475 | post_recv_exit0: |
476 | spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags); | 476 | spin_unlock_irqrestore(&my_qp->spinlock_r, flags); |
477 | iosync(); /* serialize GAL register access */ | 477 | iosync(); /* serialize GAL register access */ |
478 | hipz_update_rqa(my_qp, wqe_cnt); | 478 | hipz_update_rqa(my_qp, wqe_cnt); |
479 | return ret; | 479 | return ret; |
@@ -536,7 +536,7 @@ poll_cq_one_read_cqe: | |||
536 | if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { | 536 | if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { |
537 | struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); | 537 | struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); |
538 | int purgeflag; | 538 | int purgeflag; |
539 | unsigned long spl_flags; | 539 | unsigned long flags; |
540 | if (!qp) { | 540 | if (!qp) { |
541 | ehca_err(cq->device, "cq_num=%x qp_num=%x " | 541 | ehca_err(cq->device, "cq_num=%x qp_num=%x " |
542 | "could not find qp -> ignore cqe", | 542 | "could not find qp -> ignore cqe", |
@@ -546,9 +546,9 @@ poll_cq_one_read_cqe: | |||
546 | /* ignore this purged cqe */ | 546 | /* ignore this purged cqe */ |
547 | goto poll_cq_one_read_cqe; | 547 | goto poll_cq_one_read_cqe; |
548 | } | 548 | } |
549 | spin_lock_irqsave(&qp->spinlock_s, spl_flags); | 549 | spin_lock_irqsave(&qp->spinlock_s, flags); |
550 | purgeflag = qp->sqerr_purgeflag; | 550 | purgeflag = qp->sqerr_purgeflag; |
551 | spin_unlock_irqrestore(&qp->spinlock_s, spl_flags); | 551 | spin_unlock_irqrestore(&qp->spinlock_s, flags); |
552 | 552 | ||
553 | if (purgeflag) { | 553 | if (purgeflag) { |
554 | ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " | 554 | ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " |
@@ -633,7 +633,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) | |||
633 | int nr; | 633 | int nr; |
634 | struct ib_wc *current_wc = wc; | 634 | struct ib_wc *current_wc = wc; |
635 | int ret = 0; | 635 | int ret = 0; |
636 | unsigned long spl_flags; | 636 | unsigned long flags; |
637 | 637 | ||
638 | if (num_entries < 1) { | 638 | if (num_entries < 1) { |
639 | ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " | 639 | ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " |
@@ -642,14 +642,14 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) | |||
642 | goto poll_cq_exit0; | 642 | goto poll_cq_exit0; |
643 | } | 643 | } |
644 | 644 | ||
645 | spin_lock_irqsave(&my_cq->spinlock, spl_flags); | 645 | spin_lock_irqsave(&my_cq->spinlock, flags); |
646 | for (nr = 0; nr < num_entries; nr++) { | 646 | for (nr = 0; nr < num_entries; nr++) { |
647 | ret = ehca_poll_cq_one(cq, current_wc); | 647 | ret = ehca_poll_cq_one(cq, current_wc); |
648 | if (ret) | 648 | if (ret) |
649 | break; | 649 | break; |
650 | current_wc++; | 650 | current_wc++; |
651 | } /* eof for nr */ | 651 | } /* eof for nr */ |
652 | spin_unlock_irqrestore(&my_cq->spinlock, spl_flags); | 652 | spin_unlock_irqrestore(&my_cq->spinlock, flags); |
653 | if (ret == -EAGAIN || !ret) | 653 | if (ret == -EAGAIN || !ret) |
654 | ret = nr; | 654 | ret = nr; |
655 | 655 | ||