aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/amso1100
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2006-10-03 10:46:41 -0400
committerRoland Dreier <rolandd@cisco.com>2006-10-10 12:51:13 -0400
commite52e6080ca10e0a8ce2a35c86965945cdfa8ed7e (patch)
treec76aa076673165e66d905e5271c8deca90329f68 /drivers/infiniband/hw/amso1100
parentebf7a227dd1d810203a19642655d2fa293f395dd (diff)
RDMA/amso1100: Add spinlocks to serialize ib_post_send/ib_post_recv
The AMSO driver was not thread-safe in the post WR code and had code that would sleep if the WR post FIFO was full. Since these functions can be called on interrupt level I changed the sleep to a udelay. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/amso1100')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 12261132b077..5bcf697aa335 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -35,6 +35,8 @@
35 * 35 *
36 */ 36 */
37 37
38#include <linux/delay.h>
39
38#include "c2.h" 40#include "c2.h"
39#include "c2_vq.h" 41#include "c2_vq.h"
40#include "c2_status.h" 42#include "c2_status.h"
@@ -705,10 +707,8 @@ static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
705 * cannot get on the bus and the card and system hang in a 707 * cannot get on the bus and the card and system hang in a
706 * deadlock -- thus the need for this code. [TOT] 708 * deadlock -- thus the need for this code. [TOT]
707 */ 709 */
708 while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) { 710 while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
709 set_current_state(TASK_UNINTERRUPTIBLE); 711 udelay(10);
710 schedule_timeout(0);
711 }
712 712
713 __raw_writel(C2_HINT_MAKE(mq_index, shared), 713 __raw_writel(C2_HINT_MAKE(mq_index, shared),
714 c2dev->regs + PCI_BAR0_ADAPTER_HINT); 714 c2dev->regs + PCI_BAR0_ADAPTER_HINT);
@@ -766,6 +766,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
766 struct c2_dev *c2dev = to_c2dev(ibqp->device); 766 struct c2_dev *c2dev = to_c2dev(ibqp->device);
767 struct c2_qp *qp = to_c2qp(ibqp); 767 struct c2_qp *qp = to_c2qp(ibqp);
768 union c2wr wr; 768 union c2wr wr;
769 unsigned long lock_flags;
769 int err = 0; 770 int err = 0;
770 771
771 u32 flags; 772 u32 flags;
@@ -881,8 +882,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
881 /* 882 /*
882 * Post the puppy! 883 * Post the puppy!
883 */ 884 */
885 spin_lock_irqsave(&qp->lock, lock_flags);
884 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); 886 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
885 if (err) { 887 if (err) {
888 spin_unlock_irqrestore(&qp->lock, lock_flags);
886 break; 889 break;
887 } 890 }
888 891
@@ -890,6 +893,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
890 * Enqueue mq index to activity FIFO. 893 * Enqueue mq index to activity FIFO.
891 */ 894 */
892 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); 895 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
896 spin_unlock_irqrestore(&qp->lock, lock_flags);
893 897
894 ib_wr = ib_wr->next; 898 ib_wr = ib_wr->next;
895 } 899 }
@@ -905,6 +909,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
905 struct c2_dev *c2dev = to_c2dev(ibqp->device); 909 struct c2_dev *c2dev = to_c2dev(ibqp->device);
906 struct c2_qp *qp = to_c2qp(ibqp); 910 struct c2_qp *qp = to_c2qp(ibqp);
907 union c2wr wr; 911 union c2wr wr;
912 unsigned long lock_flags;
908 int err = 0; 913 int err = 0;
909 914
910 if (qp->state > IB_QPS_RTS) 915 if (qp->state > IB_QPS_RTS)
@@ -945,8 +950,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
945 break; 950 break;
946 } 951 }
947 952
953 spin_lock_irqsave(&qp->lock, lock_flags);
948 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); 954 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
949 if (err) { 955 if (err) {
956 spin_unlock_irqrestore(&qp->lock, lock_flags);
950 break; 957 break;
951 } 958 }
952 959
@@ -954,6 +961,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
954 * Enqueue mq index to activity FIFO 961 * Enqueue mq index to activity FIFO
955 */ 962 */
956 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); 963 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
964 spin_unlock_irqrestore(&qp->lock, lock_flags);
957 965
958 ib_wr = ib_wr->next; 966 ib_wr = ib_wr->next;
959 } 967 }