diff options
-rw-r--r-- | drivers/infiniband/hw/efa/efa_com_cmd.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/efa/efa_verbs.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/chip.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/chip.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/fault.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/hfi.h | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/pio.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/rc.c | 53 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/sdma.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/ud.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/user_sdma.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/user_sdma.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/verbs.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/verbs.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/verbs_txreq.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/verbs_txreq.h | 3 |
17 files changed, 174 insertions, 62 deletions
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 14227725521c..c0016648804c 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c | |||
@@ -139,9 +139,11 @@ int efa_com_destroy_qp(struct efa_com_dev *edev, | |||
139 | sizeof(qp_cmd), | 139 | sizeof(qp_cmd), |
140 | (struct efa_admin_acq_entry *)&cmd_completion, | 140 | (struct efa_admin_acq_entry *)&cmd_completion, |
141 | sizeof(cmd_completion)); | 141 | sizeof(cmd_completion)); |
142 | if (err) | 142 | if (err) { |
143 | ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", | 143 | ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", |
144 | qp_cmd.qp_handle, err); | 144 | qp_cmd.qp_handle, err); |
145 | return err; | ||
146 | } | ||
145 | 147 | ||
146 | return 0; | 148 | return 0; |
147 | } | 149 | } |
@@ -199,9 +201,11 @@ int efa_com_destroy_cq(struct efa_com_dev *edev, | |||
199 | (struct efa_admin_acq_entry *)&destroy_resp, | 201 | (struct efa_admin_acq_entry *)&destroy_resp, |
200 | sizeof(destroy_resp)); | 202 | sizeof(destroy_resp)); |
201 | 203 | ||
202 | if (err) | 204 | if (err) { |
203 | ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", | 205 | ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", |
204 | params->cq_idx, err); | 206 | params->cq_idx, err); |
207 | return err; | ||
208 | } | ||
205 | 209 | ||
206 | return 0; | 210 | return 0; |
207 | } | 211 | } |
@@ -273,10 +277,12 @@ int efa_com_dereg_mr(struct efa_com_dev *edev, | |||
273 | sizeof(mr_cmd), | 277 | sizeof(mr_cmd), |
274 | (struct efa_admin_acq_entry *)&cmd_completion, | 278 | (struct efa_admin_acq_entry *)&cmd_completion, |
275 | sizeof(cmd_completion)); | 279 | sizeof(cmd_completion)); |
276 | if (err) | 280 | if (err) { |
277 | ibdev_err(edev->efa_dev, | 281 | ibdev_err(edev->efa_dev, |
278 | "Failed to de-register mr(lkey-%u) [%d]\n", | 282 | "Failed to de-register mr(lkey-%u) [%d]\n", |
279 | mr_cmd.l_key, err); | 283 | mr_cmd.l_key, err); |
284 | return err; | ||
285 | } | ||
280 | 286 | ||
281 | return 0; | 287 | return 0; |
282 | } | 288 | } |
@@ -327,9 +333,11 @@ int efa_com_destroy_ah(struct efa_com_dev *edev, | |||
327 | sizeof(ah_cmd), | 333 | sizeof(ah_cmd), |
328 | (struct efa_admin_acq_entry *)&cmd_completion, | 334 | (struct efa_admin_acq_entry *)&cmd_completion, |
329 | sizeof(cmd_completion)); | 335 | sizeof(cmd_completion)); |
330 | if (err) | 336 | if (err) { |
331 | ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", | 337 | ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", |
332 | ah_cmd.ah, ah_cmd.pd, err); | 338 | ah_cmd.ah, ah_cmd.pd, err); |
339 | return err; | ||
340 | } | ||
333 | 341 | ||
334 | return 0; | 342 | return 0; |
335 | } | 343 | } |
@@ -387,10 +395,12 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, | |||
387 | get_resp, | 395 | get_resp, |
388 | sizeof(*get_resp)); | 396 | sizeof(*get_resp)); |
389 | 397 | ||
390 | if (err) | 398 | if (err) { |
391 | ibdev_err(edev->efa_dev, | 399 | ibdev_err(edev->efa_dev, |
392 | "Failed to submit get_feature command %d [%d]\n", | 400 | "Failed to submit get_feature command %d [%d]\n", |
393 | feature_id, err); | 401 | feature_id, err); |
402 | return err; | ||
403 | } | ||
394 | 404 | ||
395 | return 0; | 405 | return 0; |
396 | } | 406 | } |
@@ -534,10 +544,12 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, | |||
534 | (struct efa_admin_acq_entry *)set_resp, | 544 | (struct efa_admin_acq_entry *)set_resp, |
535 | sizeof(*set_resp)); | 545 | sizeof(*set_resp)); |
536 | 546 | ||
537 | if (err) | 547 | if (err) { |
538 | ibdev_err(edev->efa_dev, | 548 | ibdev_err(edev->efa_dev, |
539 | "Failed to submit set_feature command %d error: %d\n", | 549 | "Failed to submit set_feature command %d error: %d\n", |
540 | feature_id, err); | 550 | feature_id, err); |
551 | return err; | ||
552 | } | ||
541 | 553 | ||
542 | return 0; | 554 | return 0; |
543 | } | 555 | } |
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 0fea5d63fdbe..fb6115244d4c 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c | |||
@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, | |||
204 | void *obj, u64 address, u64 length, u8 mmap_flag) | 204 | void *obj, u64 address, u64 length, u8 mmap_flag) |
205 | { | 205 | { |
206 | struct efa_mmap_entry *entry; | 206 | struct efa_mmap_entry *entry; |
207 | u32 next_mmap_page; | ||
207 | int err; | 208 | int err; |
208 | 209 | ||
209 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 210 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, | |||
216 | entry->mmap_flag = mmap_flag; | 217 | entry->mmap_flag = mmap_flag; |
217 | 218 | ||
218 | xa_lock(&ucontext->mmap_xa); | 219 | xa_lock(&ucontext->mmap_xa); |
220 | if (check_add_overflow(ucontext->mmap_xa_page, | ||
221 | (u32)(length >> PAGE_SHIFT), | ||
222 | &next_mmap_page)) | ||
223 | goto err_unlock; | ||
224 | |||
219 | entry->mmap_page = ucontext->mmap_xa_page; | 225 | entry->mmap_page = ucontext->mmap_xa_page; |
220 | ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE); | 226 | ucontext->mmap_xa_page = next_mmap_page; |
221 | err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry, | 227 | err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry, |
222 | GFP_KERNEL); | 228 | GFP_KERNEL); |
229 | if (err) | ||
230 | goto err_unlock; | ||
231 | |||
223 | xa_unlock(&ucontext->mmap_xa); | 232 | xa_unlock(&ucontext->mmap_xa); |
224 | if (err){ | ||
225 | kfree(entry); | ||
226 | return EFA_MMAP_INVALID; | ||
227 | } | ||
228 | 233 | ||
229 | ibdev_dbg( | 234 | ibdev_dbg( |
230 | &dev->ibdev, | 235 | &dev->ibdev, |
@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, | |||
232 | entry->obj, entry->address, entry->length, get_mmap_key(entry)); | 237 | entry->obj, entry->address, entry->length, get_mmap_key(entry)); |
233 | 238 | ||
234 | return get_mmap_key(entry); | 239 | return get_mmap_key(entry); |
240 | |||
241 | err_unlock: | ||
242 | xa_unlock(&ucontext->mmap_xa); | ||
243 | kfree(entry); | ||
244 | return EFA_MMAP_INVALID; | ||
245 | |||
235 | } | 246 | } |
236 | 247 | ||
237 | int efa_query_device(struct ib_device *ibdev, | 248 | int efa_query_device(struct ib_device *ibdev, |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 4221a99ee7f4..d5b643a1d9fd 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -14032,6 +14032,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd) | |||
14032 | } | 14032 | } |
14033 | 14033 | ||
14034 | /** | 14034 | /** |
14035 | * hfi1_get_qp_map | ||
14036 | * @dd: device data | ||
14037 | * @idx: index to read | ||
14038 | */ | ||
14039 | u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) | ||
14040 | { | ||
14041 | u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); | ||
14042 | |||
14043 | reg >>= (idx % 8) * 8; | ||
14044 | return reg; | ||
14045 | } | ||
14046 | |||
14047 | /** | ||
14035 | * init_qpmap_table | 14048 | * init_qpmap_table |
14036 | * @dd - device data | 14049 | * @dd - device data |
14037 | * @first_ctxt - first context | 14050 | * @first_ctxt - first context |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 4e6c3556ec48..b76cf81f927f 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -1445,6 +1445,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd); | |||
1445 | void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr); | 1445 | void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr); |
1446 | void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr); | 1446 | void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr); |
1447 | void reset_interrupts(struct hfi1_devdata *dd); | 1447 | void reset_interrupts(struct hfi1_devdata *dd); |
1448 | u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx); | ||
1448 | 1449 | ||
1449 | /* | 1450 | /* |
1450 | * Interrupt source table. | 1451 | * Interrupt source table. |
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 3fd3315d0fb0..93613e5def9b 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c | |||
@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, | |||
153 | char *dash; | 153 | char *dash; |
154 | unsigned long range_start, range_end, i; | 154 | unsigned long range_start, range_end, i; |
155 | bool remove = false; | 155 | bool remove = false; |
156 | unsigned long bound = 1U << BITS_PER_BYTE; | ||
156 | 157 | ||
157 | end = strchr(ptr, ','); | 158 | end = strchr(ptr, ','); |
158 | if (end) | 159 | if (end) |
@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, | |||
178 | BITS_PER_BYTE); | 179 | BITS_PER_BYTE); |
179 | break; | 180 | break; |
180 | } | 181 | } |
182 | /* Check the inputs */ | ||
183 | if (range_start >= bound || range_end >= bound) | ||
184 | break; | ||
185 | |||
181 | for (i = range_start; i <= range_end; i++) { | 186 | for (i = range_start; i <= range_end; i++) { |
182 | if (remove) | 187 | if (remove) |
183 | clear_bit(i, fault->opcodes); | 188 | clear_bit(i, fault->opcodes); |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index b458c218842b..fa45350a9a1d 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -539,6 +539,37 @@ static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt, | |||
539 | mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); | 539 | mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); |
540 | } | 540 | } |
541 | 541 | ||
542 | /** | ||
543 | * hfi1_get_rc_ohdr - get extended header | ||
544 | * @opah - the opaheader | ||
545 | */ | ||
546 | static inline struct ib_other_headers * | ||
547 | hfi1_get_rc_ohdr(struct hfi1_opa_header *opah) | ||
548 | { | ||
549 | struct ib_other_headers *ohdr; | ||
550 | struct ib_header *hdr = NULL; | ||
551 | struct hfi1_16b_header *hdr_16b = NULL; | ||
552 | |||
553 | /* Find out where the BTH is */ | ||
554 | if (opah->hdr_type == HFI1_PKT_TYPE_9B) { | ||
555 | hdr = &opah->ibh; | ||
556 | if (ib_get_lnh(hdr) == HFI1_LRH_BTH) | ||
557 | ohdr = &hdr->u.oth; | ||
558 | else | ||
559 | ohdr = &hdr->u.l.oth; | ||
560 | } else { | ||
561 | u8 l4; | ||
562 | |||
563 | hdr_16b = &opah->opah; | ||
564 | l4 = hfi1_16B_get_l4(hdr_16b); | ||
565 | if (l4 == OPA_16B_L4_IB_LOCAL) | ||
566 | ohdr = &hdr_16b->u.oth; | ||
567 | else | ||
568 | ohdr = &hdr_16b->u.l.oth; | ||
569 | } | ||
570 | return ohdr; | ||
571 | } | ||
572 | |||
542 | struct rvt_sge_state; | 573 | struct rvt_sge_state; |
543 | 574 | ||
544 | /* | 575 | /* |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 16ba9d52e1b9..4e5c2d1b8cfa 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -952,6 +952,22 @@ void sc_disable(struct send_context *sc) | |||
952 | } | 952 | } |
953 | } | 953 | } |
954 | spin_unlock(&sc->release_lock); | 954 | spin_unlock(&sc->release_lock); |
955 | |||
956 | write_seqlock(&sc->waitlock); | ||
957 | while (!list_empty(&sc->piowait)) { | ||
958 | struct iowait *wait; | ||
959 | struct rvt_qp *qp; | ||
960 | struct hfi1_qp_priv *priv; | ||
961 | |||
962 | wait = list_first_entry(&sc->piowait, struct iowait, list); | ||
963 | qp = iowait_to_qp(wait); | ||
964 | priv = qp->priv; | ||
965 | list_del_init(&priv->s_iowait.list); | ||
966 | priv->s_iowait.lock = NULL; | ||
967 | hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); | ||
968 | } | ||
969 | write_sequnlock(&sc->waitlock); | ||
970 | |||
955 | spin_unlock_irq(&sc->alloc_lock); | 971 | spin_unlock_irq(&sc->alloc_lock); |
956 | } | 972 | } |
957 | 973 | ||
@@ -1427,7 +1443,8 @@ void sc_stop(struct send_context *sc, int flag) | |||
1427 | * @cb: optional callback to call when the buffer is finished sending | 1443 | * @cb: optional callback to call when the buffer is finished sending |
1428 | * @arg: argument for cb | 1444 | * @arg: argument for cb |
1429 | * | 1445 | * |
1430 | * Return a pointer to a PIO buffer if successful, NULL if not enough room. | 1446 | * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM |
1447 | * when link is down. | ||
1431 | */ | 1448 | */ |
1432 | struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, | 1449 | struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, |
1433 | pio_release_cb cb, void *arg) | 1450 | pio_release_cb cb, void *arg) |
@@ -1443,7 +1460,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, | |||
1443 | spin_lock_irqsave(&sc->alloc_lock, flags); | 1460 | spin_lock_irqsave(&sc->alloc_lock, flags); |
1444 | if (!(sc->flags & SCF_ENABLED)) { | 1461 | if (!(sc->flags & SCF_ENABLED)) { |
1445 | spin_unlock_irqrestore(&sc->alloc_lock, flags); | 1462 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
1446 | goto done; | 1463 | return ERR_PTR(-ECOMM); |
1447 | } | 1464 | } |
1448 | 1465 | ||
1449 | retry: | 1466 | retry: |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index a922edcf23d6..7c8cfb149da0 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -1432,7 +1432,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) | |||
1432 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, | 1432 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, |
1433 | sc_to_vlt(ppd->dd, sc5), plen); | 1433 | sc_to_vlt(ppd->dd, sc5), plen); |
1434 | pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); | 1434 | pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); |
1435 | if (!pbuf) { | 1435 | if (IS_ERR_OR_NULL(pbuf)) { |
1436 | /* | 1436 | /* |
1437 | * We have no room to send at the moment. Pass | 1437 | * We have no room to send at the moment. Pass |
1438 | * responsibility for sending the ACK to the send engine | 1438 | * responsibility for sending the ACK to the send engine |
@@ -1701,6 +1701,36 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn) | |||
1701 | } | 1701 | } |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | /** | ||
1705 | * hfi1_rc_verbs_aborted - handle abort status | ||
1706 | * @qp: the QP | ||
1707 | * @opah: the opa header | ||
1708 | * | ||
1709 | * This code modifies both ACK bit in BTH[2] | ||
1710 | * and the s_flags to go into send one mode. | ||
1711 | * | ||
1712 | * This serves to throttle the send engine to only | ||
1713 | * send a single packet in the likely case the | ||
1714 | * a link has gone down. | ||
1715 | */ | ||
1716 | void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) | ||
1717 | { | ||
1718 | struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah); | ||
1719 | u8 opcode = ib_bth_get_opcode(ohdr); | ||
1720 | u32 psn; | ||
1721 | |||
1722 | /* ignore responses */ | ||
1723 | if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && | ||
1724 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) || | ||
1725 | opcode == TID_OP(READ_RESP) || | ||
1726 | opcode == TID_OP(WRITE_RESP)) | ||
1727 | return; | ||
1728 | |||
1729 | psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK; | ||
1730 | ohdr->bth[2] = cpu_to_be32(psn); | ||
1731 | qp->s_flags |= RVT_S_SEND_ONE; | ||
1732 | } | ||
1733 | |||
1704 | /* | 1734 | /* |
1705 | * This should be called with the QP s_lock held and interrupts disabled. | 1735 | * This should be called with the QP s_lock held and interrupts disabled. |
1706 | */ | 1736 | */ |
@@ -1709,8 +1739,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) | |||
1709 | struct ib_other_headers *ohdr; | 1739 | struct ib_other_headers *ohdr; |
1710 | struct hfi1_qp_priv *priv = qp->priv; | 1740 | struct hfi1_qp_priv *priv = qp->priv; |
1711 | struct rvt_swqe *wqe; | 1741 | struct rvt_swqe *wqe; |
1712 | struct ib_header *hdr = NULL; | ||
1713 | struct hfi1_16b_header *hdr_16b = NULL; | ||
1714 | u32 opcode, head, tail; | 1742 | u32 opcode, head, tail; |
1715 | u32 psn; | 1743 | u32 psn; |
1716 | struct tid_rdma_request *req; | 1744 | struct tid_rdma_request *req; |
@@ -1719,24 +1747,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) | |||
1719 | if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) | 1747 | if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) |
1720 | return; | 1748 | return; |
1721 | 1749 | ||
1722 | /* Find out where the BTH is */ | 1750 | ohdr = hfi1_get_rc_ohdr(opah); |
1723 | if (priv->hdr_type == HFI1_PKT_TYPE_9B) { | ||
1724 | hdr = &opah->ibh; | ||
1725 | if (ib_get_lnh(hdr) == HFI1_LRH_BTH) | ||
1726 | ohdr = &hdr->u.oth; | ||
1727 | else | ||
1728 | ohdr = &hdr->u.l.oth; | ||
1729 | } else { | ||
1730 | u8 l4; | ||
1731 | |||
1732 | hdr_16b = &opah->opah; | ||
1733 | l4 = hfi1_16B_get_l4(hdr_16b); | ||
1734 | if (l4 == OPA_16B_L4_IB_LOCAL) | ||
1735 | ohdr = &hdr_16b->u.oth; | ||
1736 | else | ||
1737 | ohdr = &hdr_16b->u.l.oth; | ||
1738 | } | ||
1739 | |||
1740 | opcode = ib_bth_get_opcode(ohdr); | 1751 | opcode = ib_bth_get_opcode(ohdr); |
1741 | if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && | 1752 | if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && |
1742 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) || | 1753 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) || |
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index b0110728f541..28b66bd70b74 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -405,19 +405,33 @@ static void sdma_flush(struct sdma_engine *sde) | |||
405 | struct sdma_txreq *txp, *txp_next; | 405 | struct sdma_txreq *txp, *txp_next; |
406 | LIST_HEAD(flushlist); | 406 | LIST_HEAD(flushlist); |
407 | unsigned long flags; | 407 | unsigned long flags; |
408 | uint seq; | ||
408 | 409 | ||
409 | /* flush from head to tail */ | 410 | /* flush from head to tail */ |
410 | sdma_flush_descq(sde); | 411 | sdma_flush_descq(sde); |
411 | spin_lock_irqsave(&sde->flushlist_lock, flags); | 412 | spin_lock_irqsave(&sde->flushlist_lock, flags); |
412 | /* copy flush list */ | 413 | /* copy flush list */ |
413 | list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { | 414 | list_splice_init(&sde->flushlist, &flushlist); |
414 | list_del_init(&txp->list); | ||
415 | list_add_tail(&txp->list, &flushlist); | ||
416 | } | ||
417 | spin_unlock_irqrestore(&sde->flushlist_lock, flags); | 415 | spin_unlock_irqrestore(&sde->flushlist_lock, flags); |
418 | /* flush from flush list */ | 416 | /* flush from flush list */ |
419 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) | 417 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) |
420 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); | 418 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); |
419 | /* wakeup QPs orphaned on the dmawait list */ | ||
420 | do { | ||
421 | struct iowait *w, *nw; | ||
422 | |||
423 | seq = read_seqbegin(&sde->waitlock); | ||
424 | if (!list_empty(&sde->dmawait)) { | ||
425 | write_seqlock(&sde->waitlock); | ||
426 | list_for_each_entry_safe(w, nw, &sde->dmawait, list) { | ||
427 | if (w->wakeup) { | ||
428 | w->wakeup(w, SDMA_AVAIL_REASON); | ||
429 | list_del_init(&w->list); | ||
430 | } | ||
431 | } | ||
432 | write_sequnlock(&sde->waitlock); | ||
433 | } | ||
434 | } while (read_seqretry(&sde->waitlock, seq)); | ||
421 | } | 435 | } |
422 | 436 | ||
423 | /* | 437 | /* |
@@ -2413,7 +2427,7 @@ unlock_noconn: | |||
2413 | list_add_tail(&tx->list, &sde->flushlist); | 2427 | list_add_tail(&tx->list, &sde->flushlist); |
2414 | spin_unlock(&sde->flushlist_lock); | 2428 | spin_unlock(&sde->flushlist_lock); |
2415 | iowait_inc_wait_count(wait, tx->num_desc); | 2429 | iowait_inc_wait_count(wait, tx->num_desc); |
2416 | schedule_work(&sde->flush_worker); | 2430 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
2417 | ret = -ECOMM; | 2431 | ret = -ECOMM; |
2418 | goto unlock; | 2432 | goto unlock; |
2419 | nodesc: | 2433 | nodesc: |
@@ -2511,7 +2525,7 @@ unlock_noconn: | |||
2511 | iowait_inc_wait_count(wait, tx->num_desc); | 2525 | iowait_inc_wait_count(wait, tx->num_desc); |
2512 | } | 2526 | } |
2513 | spin_unlock(&sde->flushlist_lock); | 2527 | spin_unlock(&sde->flushlist_lock); |
2514 | schedule_work(&sde->flush_worker); | 2528 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
2515 | ret = -ECOMM; | 2529 | ret = -ECOMM; |
2516 | goto update_tail; | 2530 | goto update_tail; |
2517 | nodesc: | 2531 | nodesc: |
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 6fb93032fbef..aa9c8d3ef87b 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c | |||
@@ -312,9 +312,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, | |||
312 | if (qp->ibqp.qp_num == 0) | 312 | if (qp->ibqp.qp_num == 0) |
313 | ctxt = 0; | 313 | ctxt = 0; |
314 | else | 314 | else |
315 | ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) % | 315 | ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift); |
316 | (dd->n_krcv_queues - 1)) + 1; | ||
317 | |||
318 | return dd->rcd[ctxt]; | 316 | return dd->rcd[ctxt]; |
319 | } | 317 | } |
320 | 318 | ||
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index f88ad425664a..4cb0fce5c096 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -683,7 +683,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, | |||
683 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); | 683 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); |
684 | if (ctxt) { | 684 | if (ctxt) { |
685 | pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); | 685 | pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); |
686 | if (pbuf) { | 686 | if (!IS_ERR_OR_NULL(pbuf)) { |
687 | trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); | 687 | trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); |
688 | ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, | 688 | ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, |
689 | &hdr, hwords); | 689 | &hdr, hwords); |
@@ -738,7 +738,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, | |||
738 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); | 738 | pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); |
739 | if (ctxt) { | 739 | if (ctxt) { |
740 | pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); | 740 | pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); |
741 | if (pbuf) { | 741 | if (!IS_ERR_OR_NULL(pbuf)) { |
742 | trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); | 742 | trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); |
743 | ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, | 743 | ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, |
744 | &hdr, hwords); | 744 | &hdr, hwords); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 8bfbc6d7ea34..fd754a16475a 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -130,20 +130,16 @@ static int defer_packet_queue( | |||
130 | { | 130 | { |
131 | struct hfi1_user_sdma_pkt_q *pq = | 131 | struct hfi1_user_sdma_pkt_q *pq = |
132 | container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); | 132 | container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); |
133 | struct user_sdma_txreq *tx = | ||
134 | container_of(txreq, struct user_sdma_txreq, txreq); | ||
135 | 133 | ||
136 | if (sdma_progress(sde, seq, txreq)) { | 134 | write_seqlock(&sde->waitlock); |
137 | if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) | 135 | if (sdma_progress(sde, seq, txreq)) |
138 | goto eagain; | 136 | goto eagain; |
139 | } | ||
140 | /* | 137 | /* |
141 | * We are assuming that if the list is enqueued somewhere, it | 138 | * We are assuming that if the list is enqueued somewhere, it |
142 | * is to the dmawait list since that is the only place where | 139 | * is to the dmawait list since that is the only place where |
143 | * it is supposed to be enqueued. | 140 | * it is supposed to be enqueued. |
144 | */ | 141 | */ |
145 | xchg(&pq->state, SDMA_PKT_Q_DEFERRED); | 142 | xchg(&pq->state, SDMA_PKT_Q_DEFERRED); |
146 | write_seqlock(&sde->waitlock); | ||
147 | if (list_empty(&pq->busy.list)) { | 143 | if (list_empty(&pq->busy.list)) { |
148 | iowait_get_priority(&pq->busy); | 144 | iowait_get_priority(&pq->busy); |
149 | iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); | 145 | iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); |
@@ -151,6 +147,7 @@ static int defer_packet_queue( | |||
151 | write_sequnlock(&sde->waitlock); | 147 | write_sequnlock(&sde->waitlock); |
152 | return -EBUSY; | 148 | return -EBUSY; |
153 | eagain: | 149 | eagain: |
150 | write_sequnlock(&sde->waitlock); | ||
154 | return -EAGAIN; | 151 | return -EAGAIN; |
155 | } | 152 | } |
156 | 153 | ||
@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) | |||
804 | 801 | ||
805 | tx->flags = 0; | 802 | tx->flags = 0; |
806 | tx->req = req; | 803 | tx->req = req; |
807 | tx->busycount = 0; | ||
808 | INIT_LIST_HEAD(&tx->list); | 804 | INIT_LIST_HEAD(&tx->list); |
809 | 805 | ||
810 | /* | 806 | /* |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index 14dfd757dafd..4d8510b0fc38 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h | |||
@@ -245,7 +245,6 @@ struct user_sdma_txreq { | |||
245 | struct list_head list; | 245 | struct list_head list; |
246 | struct user_sdma_request *req; | 246 | struct user_sdma_request *req; |
247 | u16 flags; | 247 | u16 flags; |
248 | unsigned int busycount; | ||
249 | u16 seqnum; | 248 | u16 seqnum; |
250 | }; | 249 | }; |
251 | 250 | ||
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index a2b26a635baf..bad3229bad37 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
@@ -638,6 +638,8 @@ static void verbs_sdma_complete( | |||
638 | struct hfi1_opa_header *hdr; | 638 | struct hfi1_opa_header *hdr; |
639 | 639 | ||
640 | hdr = &tx->phdr.hdr; | 640 | hdr = &tx->phdr.hdr; |
641 | if (unlikely(status == SDMA_TXREQ_S_ABORTED)) | ||
642 | hfi1_rc_verbs_aborted(qp, hdr); | ||
641 | hfi1_rc_send_complete(qp, hdr); | 643 | hfi1_rc_send_complete(qp, hdr); |
642 | } | 644 | } |
643 | spin_unlock(&qp->s_lock); | 645 | spin_unlock(&qp->s_lock); |
@@ -1037,10 +1039,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | |||
1037 | if (cb) | 1039 | if (cb) |
1038 | iowait_pio_inc(&priv->s_iowait); | 1040 | iowait_pio_inc(&priv->s_iowait); |
1039 | pbuf = sc_buffer_alloc(sc, plen, cb, qp); | 1041 | pbuf = sc_buffer_alloc(sc, plen, cb, qp); |
1040 | if (unlikely(!pbuf)) { | 1042 | if (unlikely(IS_ERR_OR_NULL(pbuf))) { |
1041 | if (cb) | 1043 | if (cb) |
1042 | verbs_pio_complete(qp, 0); | 1044 | verbs_pio_complete(qp, 0); |
1043 | if (ppd->host_link_state != HLS_UP_ACTIVE) { | 1045 | if (IS_ERR(pbuf)) { |
1044 | /* | 1046 | /* |
1045 | * If we have filled the PIO buffers to capacity and are | 1047 | * If we have filled the PIO buffers to capacity and are |
1046 | * not in an active state this request is not going to | 1048 | * not in an active state this request is not going to |
@@ -1095,15 +1097,15 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | |||
1095 | &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); | 1097 | &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); |
1096 | 1098 | ||
1097 | pio_bail: | 1099 | pio_bail: |
1100 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1098 | if (qp->s_wqe) { | 1101 | if (qp->s_wqe) { |
1099 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1100 | rvt_send_complete(qp, qp->s_wqe, wc_status); | 1102 | rvt_send_complete(qp, qp->s_wqe, wc_status); |
1101 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1102 | } else if (qp->ibqp.qp_type == IB_QPT_RC) { | 1103 | } else if (qp->ibqp.qp_type == IB_QPT_RC) { |
1103 | spin_lock_irqsave(&qp->s_lock, flags); | 1104 | if (unlikely(wc_status == IB_WC_GENERAL_ERR)) |
1105 | hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr); | ||
1104 | hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); | 1106 | hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); |
1105 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1106 | } | 1107 | } |
1108 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1107 | 1109 | ||
1108 | ret = 0; | 1110 | ret = 0; |
1109 | 1111 | ||
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 7ecb8ed4a1d9..ae9582ddbc8f 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h | |||
@@ -416,6 +416,7 @@ void hfi1_rc_hdrerr( | |||
416 | 416 | ||
417 | u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); | 417 | u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); |
418 | 418 | ||
419 | void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah); | ||
419 | void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah); | 420 | void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah); |
420 | 421 | ||
421 | void hfi1_ud_rcv(struct hfi1_packet *packet); | 422 | void hfi1_ud_rcv(struct hfi1_packet *packet); |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index c4ab2d5b4502..8f766dd3f61c 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c | |||
@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { | 100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { |
101 | struct hfi1_qp_priv *priv; | 101 | struct hfi1_qp_priv *priv; |
102 | 102 | ||
103 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); | 103 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); |
104 | if (tx) | 104 | if (tx) |
105 | goto out; | 105 | goto out; |
106 | priv = qp->priv; | 106 | priv = qp->priv; |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index b002e96eb335..bfa6e081cb56 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h | |||
@@ -72,6 +72,7 @@ struct hfi1_ibdev; | |||
72 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | 72 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, |
73 | struct rvt_qp *qp); | 73 | struct rvt_qp *qp); |
74 | 74 | ||
75 | #define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN) | ||
75 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | 76 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, |
76 | struct rvt_qp *qp) | 77 | struct rvt_qp *qp) |
77 | __must_hold(&qp->slock) | 78 | __must_hold(&qp->slock) |
@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | |||
79 | struct verbs_txreq *tx; | 80 | struct verbs_txreq *tx; |
80 | struct hfi1_qp_priv *priv = qp->priv; | 81 | struct hfi1_qp_priv *priv = qp->priv; |
81 | 82 | ||
82 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); | 83 | tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); |
83 | if (unlikely(!tx)) { | 84 | if (unlikely(!tx)) { |
84 | /* call slow path to get the lock */ | 85 | /* call slow path to get the lock */ |
85 | tx = __get_txreq(dev, qp); | 86 | tx = __get_txreq(dev, qp); |