diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 55 |
1 files changed, 28 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index b4b88d0b53f5..1915771fd038 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, | |||
98 | case OP(RDMA_READ_RESPONSE_LAST): | 98 | case OP(RDMA_READ_RESPONSE_LAST): |
99 | case OP(RDMA_READ_RESPONSE_ONLY): | 99 | case OP(RDMA_READ_RESPONSE_ONLY): |
100 | case OP(ATOMIC_ACKNOWLEDGE): | 100 | case OP(ATOMIC_ACKNOWLEDGE): |
101 | qp->s_ack_state = OP(ACKNOWLEDGE); | 101 | /* |
102 | * We can increment the tail pointer now that the last | ||
103 | * response has been sent instead of only being | ||
104 | * constructed. | ||
105 | */ | ||
106 | if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) | ||
107 | qp->s_tail_ack_queue = 0; | ||
102 | /* FALLTHROUGH */ | 108 | /* FALLTHROUGH */ |
109 | case OP(SEND_ONLY): | ||
103 | case OP(ACKNOWLEDGE): | 110 | case OP(ACKNOWLEDGE): |
104 | /* Check for no next entry in the queue. */ | 111 | /* Check for no next entry in the queue. */ |
105 | if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { | 112 | if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { |
106 | if (qp->s_flags & IPATH_S_ACK_PENDING) | 113 | if (qp->s_flags & IPATH_S_ACK_PENDING) |
107 | goto normal; | 114 | goto normal; |
115 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
108 | goto bail; | 116 | goto bail; |
109 | } | 117 | } |
110 | 118 | ||
@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, | |||
117 | if (len > pmtu) { | 125 | if (len > pmtu) { |
118 | len = pmtu; | 126 | len = pmtu; |
119 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); | 127 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); |
120 | } else { | 128 | } else |
121 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); | 129 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); |
122 | if (++qp->s_tail_ack_queue > | ||
123 | IPATH_MAX_RDMA_ATOMIC) | ||
124 | qp->s_tail_ack_queue = 0; | ||
125 | } | ||
126 | ohdr->u.aeth = ipath_compute_aeth(qp); | 130 | ohdr->u.aeth = ipath_compute_aeth(qp); |
127 | hwords++; | 131 | hwords++; |
128 | qp->s_ack_rdma_psn = e->psn; | 132 | qp->s_ack_rdma_psn = e->psn; |
@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, | |||
139 | cpu_to_be32(e->atomic_data); | 143 | cpu_to_be32(e->atomic_data); |
140 | hwords += sizeof(ohdr->u.at) / sizeof(u32); | 144 | hwords += sizeof(ohdr->u.at) / sizeof(u32); |
141 | bth2 = e->psn; | 145 | bth2 = e->psn; |
142 | if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) | ||
143 | qp->s_tail_ack_queue = 0; | ||
144 | } | 146 | } |
145 | bth0 = qp->s_ack_state << 24; | 147 | bth0 = qp->s_ack_state << 24; |
146 | break; | 148 | break; |
@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, | |||
156 | ohdr->u.aeth = ipath_compute_aeth(qp); | 158 | ohdr->u.aeth = ipath_compute_aeth(qp); |
157 | hwords++; | 159 | hwords++; |
158 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); | 160 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); |
159 | if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) | ||
160 | qp->s_tail_ack_queue = 0; | ||
161 | } | 161 | } |
162 | bth0 = qp->s_ack_state << 24; | 162 | bth0 = qp->s_ack_state << 24; |
163 | bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; | 163 | bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; |
@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, | |||
171 | * the ACK before setting s_ack_state to ACKNOWLEDGE | 171 | * the ACK before setting s_ack_state to ACKNOWLEDGE |
172 | * (see above). | 172 | * (see above). |
173 | */ | 173 | */ |
174 | qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); | 174 | qp->s_ack_state = OP(SEND_ONLY); |
175 | qp->s_flags &= ~IPATH_S_ACK_PENDING; | 175 | qp->s_flags &= ~IPATH_S_ACK_PENDING; |
176 | qp->s_cur_sge = NULL; | 176 | qp->s_cur_sge = NULL; |
177 | if (qp->s_nak_state) | 177 | if (qp->s_nak_state) |
@@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
223 | /* Sending responses has higher priority over sending requests. */ | 223 | /* Sending responses has higher priority over sending requests. */ |
224 | if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || | 224 | if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || |
225 | (qp->s_flags & IPATH_S_ACK_PENDING) || | 225 | (qp->s_flags & IPATH_S_ACK_PENDING) || |
226 | qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && | 226 | qp->s_ack_state != OP(ACKNOWLEDGE)) && |
227 | ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) | 227 | ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) |
228 | goto done; | 228 | goto done; |
229 | 229 | ||
230 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || | 230 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || |
231 | qp->s_rnr_timeout) | 231 | qp->s_rnr_timeout || qp->s_wait_credit) |
232 | goto bail; | 232 | goto bail; |
233 | 233 | ||
234 | /* Limit the number of packets sent without an ACK. */ | 234 | /* Limit the number of packets sent without an ACK. */ |
235 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { | 235 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { |
236 | qp->s_wait_credit = 1; | 236 | qp->s_wait_credit = 1; |
237 | dev->n_rc_stalls++; | 237 | dev->n_rc_stalls++; |
238 | spin_lock(&dev->pending_lock); | ||
239 | if (list_empty(&qp->timerwait)) | ||
240 | list_add_tail(&qp->timerwait, | ||
241 | &dev->pending[dev->pending_index]); | ||
242 | spin_unlock(&dev->pending_lock); | ||
243 | goto bail; | 238 | goto bail; |
244 | } | 239 | } |
245 | 240 | ||
@@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
587 | u32 hwords; | 582 | u32 hwords; |
588 | struct ipath_ib_header hdr; | 583 | struct ipath_ib_header hdr; |
589 | struct ipath_other_headers *ohdr; | 584 | struct ipath_other_headers *ohdr; |
585 | unsigned long flags; | ||
590 | 586 | ||
591 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ | 587 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ |
592 | if (qp->r_head_ack_queue != qp->s_tail_ack_queue) | 588 | if (qp->r_head_ack_queue != qp->s_tail_ack_queue || |
589 | (qp->s_flags & IPATH_S_ACK_PENDING) || | ||
590 | qp->s_ack_state != OP(ACKNOWLEDGE)) | ||
593 | goto queue_ack; | 591 | goto queue_ack; |
594 | 592 | ||
595 | /* Construct the header. */ | 593 | /* Construct the header. */ |
@@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
640 | dev->n_rc_qacks++; | 638 | dev->n_rc_qacks++; |
641 | 639 | ||
642 | queue_ack: | 640 | queue_ack: |
643 | spin_lock_irq(&qp->s_lock); | 641 | spin_lock_irqsave(&qp->s_lock, flags); |
644 | qp->s_flags |= IPATH_S_ACK_PENDING; | 642 | qp->s_flags |= IPATH_S_ACK_PENDING; |
645 | qp->s_nak_state = qp->r_nak_state; | 643 | qp->s_nak_state = qp->r_nak_state; |
646 | qp->s_ack_psn = qp->r_ack_psn; | 644 | qp->s_ack_psn = qp->r_ack_psn; |
647 | spin_unlock_irq(&qp->s_lock); | 645 | spin_unlock_irqrestore(&qp->s_lock, flags); |
648 | 646 | ||
649 | /* Call ipath_do_rc_send() in another thread. */ | 647 | /* Call ipath_do_rc_send() in another thread. */ |
650 | tasklet_hi_schedule(&qp->s_task); | 648 | tasklet_hi_schedule(&qp->s_task); |
@@ -1261,6 +1259,7 @@ ack_err: | |||
1261 | wc.dlid_path_bits = 0; | 1259 | wc.dlid_path_bits = 0; |
1262 | wc.port_num = 0; | 1260 | wc.port_num = 0; |
1263 | ipath_sqerror_qp(qp, &wc); | 1261 | ipath_sqerror_qp(qp, &wc); |
1262 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1264 | bail: | 1263 | bail: |
1265 | return; | 1264 | return; |
1266 | } | 1265 | } |
@@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1294 | struct ipath_ack_entry *e; | 1293 | struct ipath_ack_entry *e; |
1295 | u8 i, prev; | 1294 | u8 i, prev; |
1296 | int old_req; | 1295 | int old_req; |
1296 | unsigned long flags; | ||
1297 | 1297 | ||
1298 | if (diff > 0) { | 1298 | if (diff > 0) { |
1299 | /* | 1299 | /* |
@@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1327 | psn &= IPATH_PSN_MASK; | 1327 | psn &= IPATH_PSN_MASK; |
1328 | e = NULL; | 1328 | e = NULL; |
1329 | old_req = 1; | 1329 | old_req = 1; |
1330 | spin_lock_irq(&qp->s_lock); | 1330 | spin_lock_irqsave(&qp->s_lock, flags); |
1331 | for (i = qp->r_head_ack_queue; ; i = prev) { | 1331 | for (i = qp->r_head_ack_queue; ; i = prev) { |
1332 | if (i == qp->s_tail_ack_queue) | 1332 | if (i == qp->s_tail_ack_queue) |
1333 | old_req = 0; | 1333 | old_req = 0; |
@@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1425 | * after all the previous RDMA reads and atomics. | 1425 | * after all the previous RDMA reads and atomics. |
1426 | */ | 1426 | */ |
1427 | if (i == qp->r_head_ack_queue) { | 1427 | if (i == qp->r_head_ack_queue) { |
1428 | spin_unlock_irq(&qp->s_lock); | 1428 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1429 | qp->r_nak_state = 0; | 1429 | qp->r_nak_state = 0; |
1430 | qp->r_ack_psn = qp->r_psn - 1; | 1430 | qp->r_ack_psn = qp->r_psn - 1; |
1431 | goto send_ack; | 1431 | goto send_ack; |
@@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1439 | break; | 1439 | break; |
1440 | } | 1440 | } |
1441 | qp->r_nak_state = 0; | 1441 | qp->r_nak_state = 0; |
1442 | spin_unlock_irq(&qp->s_lock); | ||
1443 | tasklet_hi_schedule(&qp->s_task); | 1442 | tasklet_hi_schedule(&qp->s_task); |
1444 | 1443 | ||
1445 | unlock_done: | 1444 | unlock_done: |
1446 | spin_unlock_irq(&qp->s_lock); | 1445 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1447 | done: | 1446 | done: |
1448 | return 1; | 1447 | return 1; |
1449 | 1448 | ||
@@ -1453,10 +1452,12 @@ send_ack: | |||
1453 | 1452 | ||
1454 | static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) | 1453 | static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) |
1455 | { | 1454 | { |
1456 | spin_lock_irq(&qp->s_lock); | 1455 | unsigned long flags; |
1456 | |||
1457 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1457 | qp->state = IB_QPS_ERR; | 1458 | qp->state = IB_QPS_ERR; |
1458 | ipath_error_qp(qp, err); | 1459 | ipath_error_qp(qp, err); |
1459 | spin_unlock_irq(&qp->s_lock); | 1460 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1460 | } | 1461 | } |
1461 | 1462 | ||
1462 | /** | 1463 | /** |