diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/infiniband/hw/qib | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/infiniband/hw/qib')
27 files changed, 768 insertions, 261 deletions
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 7c03a70c55a2..8349f9c5064c 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INFINIBAND_QIB | 1 | config INFINIBAND_QIB |
2 | tristate "QLogic PCIe HCA support" | 2 | tristate "QLogic PCIe HCA support" |
3 | depends on 64BIT && NET | 3 | depends on 64BIT |
4 | ---help--- | 4 | ---help--- |
5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host | 5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host |
6 | channel adapters. This driver does not support the QLogic | 6 | channel adapters. This driver does not support the QLogic |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 61de0654820e..769a1d9da4b7 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -653,7 +653,7 @@ struct diag_observer_list_elt; | |||
653 | 653 | ||
654 | /* device data struct now contains only "general per-device" info. | 654 | /* device data struct now contains only "general per-device" info. |
655 | * fields related to a physical IB port are in a qib_pportdata struct, | 655 | * fields related to a physical IB port are in a qib_pportdata struct, |
656 | * described above) while fields only used by a particualr chip-type are in | 656 | * described above) while fields only used by a particular chip-type are in |
657 | * a qib_chipdata struct, whose contents are opaque to this file. | 657 | * a qib_chipdata struct, whose contents are opaque to this file. |
658 | */ | 658 | */ |
659 | struct qib_devdata { | 659 | struct qib_devdata { |
@@ -766,7 +766,7 @@ struct qib_devdata { | |||
766 | void (*f_sdma_hw_start_up)(struct qib_pportdata *); | 766 | void (*f_sdma_hw_start_up)(struct qib_pportdata *); |
767 | void (*f_sdma_init_early)(struct qib_pportdata *); | 767 | void (*f_sdma_init_early)(struct qib_pportdata *); |
768 | void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); | 768 | void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); |
769 | void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); | 769 | void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32); |
770 | u32 (*f_hdrqempty)(struct qib_ctxtdata *); | 770 | u32 (*f_hdrqempty)(struct qib_ctxtdata *); |
771 | u64 (*f_portcntr)(struct qib_pportdata *, u32); | 771 | u64 (*f_portcntr)(struct qib_pportdata *, u32); |
772 | u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, | 772 | u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, |
@@ -1406,7 +1406,7 @@ extern struct mutex qib_mutex; | |||
1406 | */ | 1406 | */ |
1407 | #define qib_early_err(dev, fmt, ...) \ | 1407 | #define qib_early_err(dev, fmt, ...) \ |
1408 | do { \ | 1408 | do { \ |
1409 | dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \ | 1409 | dev_err(dev, fmt, ##__VA_ARGS__); \ |
1410 | } while (0) | 1410 | } while (0) |
1411 | 1411 | ||
1412 | #define qib_dev_err(dd, fmt, ...) \ | 1412 | #define qib_dev_err(dd, fmt, ...) \ |
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c index a86cbf880f98..5246aa486bbe 100644 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ b/drivers/infiniband/hw/qib/qib_cq.c | |||
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) | |||
100 | wc->head = next; | 100 | wc->head = next; |
101 | 101 | ||
102 | if (cq->notify == IB_CQ_NEXT_COMP || | 102 | if (cq->notify == IB_CQ_NEXT_COMP || |
103 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | 103 | (cq->notify == IB_CQ_SOLICITED && |
104 | (solicited || entry->status != IB_WC_SUCCESS))) { | ||
104 | cq->notify = IB_CQ_NONE; | 105 | cq->notify = IB_CQ_NONE; |
105 | cq->triggered++; | 106 | cq->triggered++; |
106 | /* | 107 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 05dcf0d9a7d3..204c4dd9dce0 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c | |||
@@ -136,7 +136,8 @@ static const struct file_operations diag_file_ops = { | |||
136 | .write = qib_diag_write, | 136 | .write = qib_diag_write, |
137 | .read = qib_diag_read, | 137 | .read = qib_diag_read, |
138 | .open = qib_diag_open, | 138 | .open = qib_diag_open, |
139 | .release = qib_diag_release | 139 | .release = qib_diag_release, |
140 | .llseek = default_llseek, | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | static atomic_t diagpkt_count = ATOMIC_INIT(0); | 143 | static atomic_t diagpkt_count = ATOMIC_INIT(0); |
@@ -149,6 +150,7 @@ static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, | |||
149 | static const struct file_operations diagpkt_file_ops = { | 150 | static const struct file_operations diagpkt_file_ops = { |
150 | .owner = THIS_MODULE, | 151 | .owner = THIS_MODULE, |
151 | .write = qib_diagpkt_write, | 152 | .write = qib_diagpkt_write, |
153 | .llseek = noop_llseek, | ||
152 | }; | 154 | }; |
153 | 155 | ||
154 | int qib_diag_add(struct qib_devdata *dd) | 156 | int qib_diag_add(struct qib_devdata *dd) |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 9cd193603fb1..23e584f4c36c 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver"); | |||
71 | */ | 71 | */ |
72 | #define QIB_PIO_MAXIBHDR 128 | 72 | #define QIB_PIO_MAXIBHDR 128 |
73 | 73 | ||
74 | /* | ||
75 | * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt. | ||
76 | */ | ||
77 | #define QIB_MAX_PKT_RECV 64 | ||
78 | |||
74 | struct qlogic_ib_stats qib_stats; | 79 | struct qlogic_ib_stats qib_stats; |
75 | 80 | ||
76 | const char *qib_get_unit_name(int unit) | 81 | const char *qib_get_unit_name(int unit) |
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) | |||
284 | * Returns 1 if error was a CRC, else 0. | 289 | * Returns 1 if error was a CRC, else 0. |
285 | * Needed for some chip's synthesized error counters. | 290 | * Needed for some chip's synthesized error counters. |
286 | */ | 291 | */ |
287 | static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, | 292 | static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, |
288 | u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, | 293 | u32 ctxt, u32 eflags, u32 l, u32 etail, |
289 | struct qib_message_header *hdr) | 294 | __le32 *rhf_addr, struct qib_message_header *rhdr) |
290 | { | 295 | { |
291 | u32 ret = 0; | 296 | u32 ret = 0; |
292 | 297 | ||
293 | if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) | 298 | if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) |
294 | ret = 1; | 299 | ret = 1; |
300 | else if (eflags == QLOGIC_IB_RHF_H_TIDERR) { | ||
301 | /* For TIDERR and RC QPs premptively schedule a NAK */ | ||
302 | struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr; | ||
303 | struct qib_other_headers *ohdr = NULL; | ||
304 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
305 | struct qib_qp *qp = NULL; | ||
306 | u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); | ||
307 | u16 lid = be16_to_cpu(hdr->lrh[1]); | ||
308 | int lnh = be16_to_cpu(hdr->lrh[0]) & 3; | ||
309 | u32 qp_num; | ||
310 | u32 opcode; | ||
311 | u32 psn; | ||
312 | int diff; | ||
313 | unsigned long flags; | ||
314 | |||
315 | /* Sanity check packet */ | ||
316 | if (tlen < 24) | ||
317 | goto drop; | ||
318 | |||
319 | if (lid < QIB_MULTICAST_LID_BASE) { | ||
320 | lid &= ~((1 << ppd->lmc) - 1); | ||
321 | if (unlikely(lid != ppd->lid)) | ||
322 | goto drop; | ||
323 | } | ||
324 | |||
325 | /* Check for GRH */ | ||
326 | if (lnh == QIB_LRH_BTH) | ||
327 | ohdr = &hdr->u.oth; | ||
328 | else if (lnh == QIB_LRH_GRH) { | ||
329 | u32 vtf; | ||
330 | |||
331 | ohdr = &hdr->u.l.oth; | ||
332 | if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) | ||
333 | goto drop; | ||
334 | vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); | ||
335 | if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) | ||
336 | goto drop; | ||
337 | } else | ||
338 | goto drop; | ||
339 | |||
340 | /* Get opcode and PSN from packet */ | ||
341 | opcode = be32_to_cpu(ohdr->bth[0]); | ||
342 | opcode >>= 24; | ||
343 | psn = be32_to_cpu(ohdr->bth[2]); | ||
344 | |||
345 | /* Get the destination QP number. */ | ||
346 | qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; | ||
347 | if (qp_num != QIB_MULTICAST_QPN) { | ||
348 | int ruc_res; | ||
349 | qp = qib_lookup_qpn(ibp, qp_num); | ||
350 | if (!qp) | ||
351 | goto drop; | ||
352 | |||
353 | /* | ||
354 | * Handle only RC QPs - for other QP types drop error | ||
355 | * packet. | ||
356 | */ | ||
357 | spin_lock(&qp->r_lock); | ||
358 | |||
359 | /* Check for valid receive state. */ | ||
360 | if (!(ib_qib_state_ops[qp->state] & | ||
361 | QIB_PROCESS_RECV_OK)) { | ||
362 | ibp->n_pkt_drops++; | ||
363 | goto unlock; | ||
364 | } | ||
365 | |||
366 | switch (qp->ibqp.qp_type) { | ||
367 | case IB_QPT_RC: | ||
368 | spin_lock_irqsave(&qp->s_lock, flags); | ||
369 | ruc_res = | ||
370 | qib_ruc_check_hdr( | ||
371 | ibp, hdr, | ||
372 | lnh == QIB_LRH_GRH, | ||
373 | qp, | ||
374 | be32_to_cpu(ohdr->bth[0])); | ||
375 | if (ruc_res) { | ||
376 | spin_unlock_irqrestore(&qp->s_lock, | ||
377 | flags); | ||
378 | goto unlock; | ||
379 | } | ||
380 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
381 | |||
382 | /* Only deal with RDMA Writes for now */ | ||
383 | if (opcode < | ||
384 | IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { | ||
385 | diff = qib_cmp24(psn, qp->r_psn); | ||
386 | if (!qp->r_nak_state && diff >= 0) { | ||
387 | ibp->n_rc_seqnak++; | ||
388 | qp->r_nak_state = | ||
389 | IB_NAK_PSN_ERROR; | ||
390 | /* Use the expected PSN. */ | ||
391 | qp->r_ack_psn = qp->r_psn; | ||
392 | /* | ||
393 | * Wait to send the sequence | ||
394 | * NAK until all packets | ||
395 | * in the receive queue have | ||
396 | * been processed. | ||
397 | * Otherwise, we end up | ||
398 | * propagating congestion. | ||
399 | */ | ||
400 | if (list_empty(&qp->rspwait)) { | ||
401 | qp->r_flags |= | ||
402 | QIB_R_RSP_NAK; | ||
403 | atomic_inc( | ||
404 | &qp->refcount); | ||
405 | list_add_tail( | ||
406 | &qp->rspwait, | ||
407 | &rcd->qp_wait_list); | ||
408 | } | ||
409 | } /* Out of sequence NAK */ | ||
410 | } /* QP Request NAKs */ | ||
411 | break; | ||
412 | case IB_QPT_SMI: | ||
413 | case IB_QPT_GSI: | ||
414 | case IB_QPT_UD: | ||
415 | case IB_QPT_UC: | ||
416 | default: | ||
417 | /* For now don't handle any other QP types */ | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | unlock: | ||
422 | spin_unlock(&qp->r_lock); | ||
423 | /* | ||
424 | * Notify qib_destroy_qp() if it is waiting | ||
425 | * for us to finish. | ||
426 | */ | ||
427 | if (atomic_dec_and_test(&qp->refcount)) | ||
428 | wake_up(&qp->wait); | ||
429 | } /* Unicast QP */ | ||
430 | } /* Valid packet with TIDErr */ | ||
431 | |||
432 | drop: | ||
295 | return ret; | 433 | return ret; |
296 | } | 434 | } |
297 | 435 | ||
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
335 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ | 473 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
336 | } | 474 | } |
337 | 475 | ||
338 | for (last = 0, i = 1; !last && i <= 64; i += !last) { | 476 | for (last = 0, i = 1; !last; i += !last) { |
339 | hdr = dd->f_get_msgheader(dd, rhf_addr); | 477 | hdr = dd->f_get_msgheader(dd, rhf_addr); |
340 | eflags = qib_hdrget_err_flags(rhf_addr); | 478 | eflags = qib_hdrget_err_flags(rhf_addr); |
341 | etype = qib_hdrget_rcv_type(rhf_addr); | 479 | etype = qib_hdrget_rcv_type(rhf_addr); |
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
371 | * packets; only qibhdrerr should be set. | 509 | * packets; only qibhdrerr should be set. |
372 | */ | 510 | */ |
373 | if (unlikely(eflags)) | 511 | if (unlikely(eflags)) |
374 | crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, | 512 | crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, |
375 | etail, rhf_addr, hdr); | 513 | etail, rhf_addr, hdr); |
376 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { | 514 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { |
377 | qib_ib_rcv(rcd, hdr, ebuf, tlen); | 515 | qib_ib_rcv(rcd, hdr, ebuf, tlen); |
@@ -384,6 +522,9 @@ move_along: | |||
384 | l += rsize; | 522 | l += rsize; |
385 | if (l >= maxcnt) | 523 | if (l >= maxcnt) |
386 | l = 0; | 524 | l = 0; |
525 | if (i == QIB_MAX_PKT_RECV) | ||
526 | last = 1; | ||
527 | |||
387 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; | 528 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; |
388 | if (dd->flags & QIB_NODMA_RTAIL) { | 529 | if (dd->flags & QIB_NODMA_RTAIL) { |
389 | u32 seq = qib_hdrget_seq(rhf_addr); | 530 | u32 seq = qib_hdrget_seq(rhf_addr); |
@@ -402,7 +543,7 @@ move_along: | |||
402 | */ | 543 | */ |
403 | lval = l; | 544 | lval = l; |
404 | if (!last && !(i & 0xf)) { | 545 | if (!last && !(i & 0xf)) { |
405 | dd->f_update_usrhead(rcd, lval, updegr, etail); | 546 | dd->f_update_usrhead(rcd, lval, updegr, etail, i); |
406 | updegr = 0; | 547 | updegr = 0; |
407 | } | 548 | } |
408 | } | 549 | } |
@@ -444,7 +585,7 @@ bail: | |||
444 | * if no packets were processed. | 585 | * if no packets were processed. |
445 | */ | 586 | */ |
446 | lval = (u64)rcd->head | dd->rhdrhead_intr_off; | 587 | lval = (u64)rcd->head | dd->rhdrhead_intr_off; |
447 | dd->f_update_usrhead(rcd, lval, updegr, etail); | 588 | dd->f_update_usrhead(rcd, lval, updegr, etail, i); |
448 | return crcs; | 589 | return crcs; |
449 | } | 590 | } |
450 | 591 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6b11645edf35..406fca50d036 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -63,7 +63,8 @@ static const struct file_operations qib_file_ops = { | |||
63 | .open = qib_open, | 63 | .open = qib_open, |
64 | .release = qib_close, | 64 | .release = qib_close, |
65 | .poll = qib_poll, | 65 | .poll = qib_poll, |
66 | .mmap = qib_mmapf | 66 | .mmap = qib_mmapf, |
67 | .llseek = noop_llseek, | ||
67 | }; | 68 | }; |
68 | 69 | ||
69 | /* | 70 | /* |
@@ -1378,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, | |||
1378 | /* find device (with ACTIVE ports) with fewest ctxts in use */ | 1379 | /* find device (with ACTIVE ports) with fewest ctxts in use */ |
1379 | for (ndev = 0; ndev < devmax; ndev++) { | 1380 | for (ndev = 0; ndev < devmax; ndev++) { |
1380 | struct qib_devdata *dd = qib_lookup(ndev); | 1381 | struct qib_devdata *dd = qib_lookup(ndev); |
1381 | unsigned cused = 0, cfree = 0; | 1382 | unsigned cused = 0, cfree = 0, pusable = 0; |
1382 | if (!dd) | 1383 | if (!dd) |
1383 | continue; | 1384 | continue; |
1384 | if (port && port <= dd->num_pports && | 1385 | if (port && port <= dd->num_pports && |
1385 | usable(dd->pport + port - 1)) | 1386 | usable(dd->pport + port - 1)) |
1386 | dusable = 1; | 1387 | pusable = 1; |
1387 | else | 1388 | else |
1388 | for (i = 0; i < dd->num_pports; i++) | 1389 | for (i = 0; i < dd->num_pports; i++) |
1389 | if (usable(dd->pport + i)) | 1390 | if (usable(dd->pport + i)) |
1390 | dusable++; | 1391 | pusable++; |
1391 | if (!dusable) | 1392 | if (!pusable) |
1392 | continue; | 1393 | continue; |
1393 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; | 1394 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; |
1394 | ctxt++) | 1395 | ctxt++) |
@@ -1396,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, | |||
1396 | cused++; | 1397 | cused++; |
1397 | else | 1398 | else |
1398 | cfree++; | 1399 | cfree++; |
1399 | if (cfree && cused < inuse) { | 1400 | if (pusable && cfree && cused < inuse) { |
1400 | udd = dd; | 1401 | udd = dd; |
1401 | inuse = cused; | 1402 | inuse = cused; |
1402 | } | 1403 | } |
@@ -1538,7 +1539,7 @@ done_chk_sdma: | |||
1538 | 1539 | ||
1539 | /* | 1540 | /* |
1540 | * If process has NOT already set it's affinity, select and | 1541 | * If process has NOT already set it's affinity, select and |
1541 | * reserve a processor for it, as a rendevous for all | 1542 | * reserve a processor for it, as a rendezvous for all |
1542 | * users of the driver. If they don't actually later | 1543 | * users of the driver. If they don't actually later |
1543 | * set affinity to this cpu, or set it to some other cpu, | 1544 | * set affinity to this cpu, or set it to some other cpu, |
1544 | * it just means that sooner or later we don't recommend | 1545 | * it just means that sooner or later we don't recommend |
@@ -1656,7 +1657,7 @@ static int qib_do_user_init(struct file *fp, | |||
1656 | * 0 to 1. So for those chips, we turn it off and then back on. | 1657 | * 0 to 1. So for those chips, we turn it off and then back on. |
1657 | * This will (very briefly) affect any other open ctxts, but the | 1658 | * This will (very briefly) affect any other open ctxts, but the |
1658 | * duration is very short, and therefore isn't an issue. We | 1659 | * duration is very short, and therefore isn't an issue. We |
1659 | * explictly set the in-memory tail copy to 0 beforehand, so we | 1660 | * explicitly set the in-memory tail copy to 0 beforehand, so we |
1660 | * don't have to wait to be sure the DMA update has happened | 1661 | * don't have to wait to be sure the DMA update has happened |
1661 | * (chip resets head/tail to 0 on transition to enable). | 1662 | * (chip resets head/tail to 0 on transition to enable). |
1662 | */ | 1663 | */ |
@@ -1722,7 +1723,7 @@ static int qib_close(struct inode *in, struct file *fp) | |||
1722 | 1723 | ||
1723 | mutex_lock(&qib_mutex); | 1724 | mutex_lock(&qib_mutex); |
1724 | 1725 | ||
1725 | fd = (struct qib_filedata *) fp->private_data; | 1726 | fd = fp->private_data; |
1726 | fp->private_data = NULL; | 1727 | fp->private_data = NULL; |
1727 | rcd = fd->rcd; | 1728 | rcd = fd->rcd; |
1728 | if (!rcd) { | 1729 | if (!rcd) { |
@@ -1808,7 +1809,7 @@ static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) | |||
1808 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | 1809 | struct qib_ctxtdata *rcd = ctxt_fp(fp); |
1809 | struct qib_filedata *fd; | 1810 | struct qib_filedata *fd; |
1810 | 1811 | ||
1811 | fd = (struct qib_filedata *) fp->private_data; | 1812 | fd = fp->private_data; |
1812 | 1813 | ||
1813 | info.num_active = qib_count_active_units(); | 1814 | info.num_active = qib_count_active_units(); |
1814 | info.unit = rcd->dd->unit; | 1815 | info.unit = rcd->dd->unit; |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 9f989c0ba9d3..df7fa251dcdc 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -58,6 +58,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry, | |||
58 | goto bail; | 58 | goto bail; |
59 | } | 59 | } |
60 | 60 | ||
61 | inode->i_ino = get_next_ino(); | ||
61 | inode->i_mode = mode; | 62 | inode->i_mode = mode; |
62 | inode->i_uid = 0; | 63 | inode->i_uid = 0; |
63 | inode->i_gid = 0; | 64 | inode->i_gid = 0; |
@@ -367,6 +368,7 @@ bail: | |||
367 | static const struct file_operations flash_ops = { | 368 | static const struct file_operations flash_ops = { |
368 | .read = flash_read, | 369 | .read = flash_read, |
369 | .write = flash_write, | 370 | .write = flash_write, |
371 | .llseek = default_llseek, | ||
370 | }; | 372 | }; |
371 | 373 | ||
372 | static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | 374 | static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) |
@@ -451,17 +453,14 @@ static int remove_file(struct dentry *parent, char *name) | |||
451 | goto bail; | 453 | goto bail; |
452 | } | 454 | } |
453 | 455 | ||
454 | spin_lock(&dcache_lock); | ||
455 | spin_lock(&tmp->d_lock); | 456 | spin_lock(&tmp->d_lock); |
456 | if (!(d_unhashed(tmp) && tmp->d_inode)) { | 457 | if (!(d_unhashed(tmp) && tmp->d_inode)) { |
457 | dget_locked(tmp); | 458 | dget_dlock(tmp); |
458 | __d_drop(tmp); | 459 | __d_drop(tmp); |
459 | spin_unlock(&tmp->d_lock); | 460 | spin_unlock(&tmp->d_lock); |
460 | spin_unlock(&dcache_lock); | ||
461 | simple_unlink(parent->d_inode, tmp); | 461 | simple_unlink(parent->d_inode, tmp); |
462 | } else { | 462 | } else { |
463 | spin_unlock(&tmp->d_lock); | 463 | spin_unlock(&tmp->d_lock); |
464 | spin_unlock(&dcache_lock); | ||
465 | } | 464 | } |
466 | 465 | ||
467 | ret = 0; | 466 | ret = 0; |
@@ -553,13 +552,13 @@ bail: | |||
553 | return ret; | 552 | return ret; |
554 | } | 553 | } |
555 | 554 | ||
556 | static int qibfs_get_sb(struct file_system_type *fs_type, int flags, | 555 | static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags, |
557 | const char *dev_name, void *data, struct vfsmount *mnt) | 556 | const char *dev_name, void *data) |
558 | { | 557 | { |
559 | int ret = get_sb_single(fs_type, flags, data, | 558 | struct dentry *ret; |
560 | qibfs_fill_super, mnt); | 559 | ret = mount_single(fs_type, flags, data, qibfs_fill_super); |
561 | if (ret >= 0) | 560 | if (!IS_ERR(ret)) |
562 | qib_super = mnt->mnt_sb; | 561 | qib_super = ret->d_sb; |
563 | return ret; | 562 | return ret; |
564 | } | 563 | } |
565 | 564 | ||
@@ -601,7 +600,7 @@ int qibfs_remove(struct qib_devdata *dd) | |||
601 | static struct file_system_type qibfs_fs_type = { | 600 | static struct file_system_type qibfs_fs_type = { |
602 | .owner = THIS_MODULE, | 601 | .owner = THIS_MODULE, |
603 | .name = "ipathfs", | 602 | .name = "ipathfs", |
604 | .get_sb = qibfs_get_sb, | 603 | .mount = qibfs_mount, |
605 | .kill_sb = qibfs_kill_super, | 604 | .kill_sb = qibfs_kill_super, |
606 | }; | 605 | }; |
607 | 606 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a5e29dbb9537..d8ca0a0b970d 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -1799,7 +1799,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) | |||
1799 | /* | 1799 | /* |
1800 | * Keep chip from being accessed until we are ready. Use | 1800 | * Keep chip from being accessed until we are ready. Use |
1801 | * writeq() directly, to allow the write even though QIB_PRESENT | 1801 | * writeq() directly, to allow the write even though QIB_PRESENT |
1802 | * isnt' set. | 1802 | * isn't set. |
1803 | */ | 1803 | */ |
1804 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | 1804 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); |
1805 | dd->int_counter = 0; /* so we check interrupts work again */ | 1805 | dd->int_counter = 0; /* so we check interrupts work again */ |
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd) | |||
2074 | } | 2074 | } |
2075 | 2075 | ||
2076 | static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 2076 | static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
2077 | u32 updegr, u32 egrhd) | 2077 | u32 updegr, u32 egrhd, u32 npkts) |
2078 | { | 2078 | { |
2079 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 2079 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
2080 | if (updegr) | 2080 | if (updegr) |
@@ -2171,7 +2171,7 @@ static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op, | |||
2171 | * Init the context registers also; if we were | 2171 | * Init the context registers also; if we were |
2172 | * disabled, tail and head should both be zero | 2172 | * disabled, tail and head should both be zero |
2173 | * already from the enable, but since we don't | 2173 | * already from the enable, but since we don't |
2174 | * know, we have to do it explictly. | 2174 | * know, we have to do it explicitly. |
2175 | */ | 2175 | */ |
2176 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | 2176 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); |
2177 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | 2177 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 6fd8d74e7392..c765a2eb04cf 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) | |||
1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
1694 | wake_up(&ppd->cpspec->autoneg_wait); | 1694 | wake_up(&ppd->cpspec->autoneg_wait); |
1695 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 1695 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
1696 | flush_scheduled_work(); | ||
1697 | 1696 | ||
1698 | shutdown_7220_relock_poll(ppd->dd); | 1697 | shutdown_7220_relock_poll(ppd->dd); |
1699 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); | 1698 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); |
@@ -2112,7 +2111,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) | |||
2112 | /* | 2111 | /* |
2113 | * Keep chip from being accessed until we are ready. Use | 2112 | * Keep chip from being accessed until we are ready. Use |
2114 | * writeq() directly, to allow the write even though QIB_PRESENT | 2113 | * writeq() directly, to allow the write even though QIB_PRESENT |
2115 | * isnt' set. | 2114 | * isn't set. |
2116 | */ | 2115 | */ |
2117 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | 2116 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); |
2118 | dd->int_counter = 0; /* so we check interrupts work again */ | 2117 | dd->int_counter = 0; /* so we check interrupts work again */ |
@@ -2297,7 +2296,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd) | |||
2297 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); | 2296 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); |
2298 | dd->cspec->numctxts = nchipctxts; | 2297 | dd->cspec->numctxts = nchipctxts; |
2299 | if (qib_n_krcv_queues > 1) { | 2298 | if (qib_n_krcv_queues > 1) { |
2300 | dd->qpn_mask = 0x3f; | 2299 | dd->qpn_mask = 0x3e; |
2301 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; | 2300 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; |
2302 | if (dd->first_user_ctxt > nchipctxts) | 2301 | if (dd->first_user_ctxt > nchipctxts) |
2303 | dd->first_user_ctxt = nchipctxts; | 2302 | dd->first_user_ctxt = nchipctxts; |
@@ -2480,7 +2479,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | |||
2480 | * we command the link down. As with width, only write the | 2479 | * we command the link down. As with width, only write the |
2481 | * actual register if the link is currently down, otherwise | 2480 | * actual register if the link is currently down, otherwise |
2482 | * takes effect on next link change. Since setting is being | 2481 | * takes effect on next link change. Since setting is being |
2483 | * explictly requested (via MAD or sysfs), clear autoneg | 2482 | * explicitly requested (via MAD or sysfs), clear autoneg |
2484 | * failure status if speed autoneg is enabled. | 2483 | * failure status if speed autoneg is enabled. |
2485 | */ | 2484 | */ |
2486 | ppd->link_speed_enabled = val; | 2485 | ppd->link_speed_enabled = val; |
@@ -2703,7 +2702,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) | |||
2703 | } | 2702 | } |
2704 | 2703 | ||
2705 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 2704 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
2706 | u32 updegr, u32 egrhd) | 2705 | u32 updegr, u32 egrhd, u32 npkts) |
2707 | { | 2706 | { |
2708 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 2707 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
2709 | if (updegr) | 2708 | if (updegr) |
@@ -2779,7 +2778,7 @@ static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, | |||
2779 | * Init the context registers also; if we were | 2778 | * Init the context registers also; if we were |
2780 | * disabled, tail and head should both be zero | 2779 | * disabled, tail and head should both be zero |
2781 | * already from the enable, but since we don't | 2780 | * already from the enable, but since we don't |
2782 | * know, we have to do it explictly. | 2781 | * know, we have to do it explicitly. |
2783 | */ | 2782 | */ |
2784 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | 2783 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); |
2785 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | 2784 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); |
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) | |||
3515 | 3514 | ||
3516 | toggle_7220_rclkrls(ppd->dd); | 3515 | toggle_7220_rclkrls(ppd->dd); |
3517 | /* 2 msec is minimum length of a poll cycle */ | 3516 | /* 2 msec is minimum length of a poll cycle */ |
3518 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 3517 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
3519 | msecs_to_jiffies(2)); | 3518 | msecs_to_jiffies(2)); |
3520 | } | 3519 | } |
3521 | 3520 | ||
3522 | /* | 3521 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 584d443b5335..8ec5237031a0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *); | |||
71 | 71 | ||
72 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); | 72 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); |
73 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); | 73 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); |
74 | static void serdes_7322_los_enable(struct qib_pportdata *, int); | ||
75 | static int serdes_7322_init_old(struct qib_pportdata *); | ||
76 | static int serdes_7322_init_new(struct qib_pportdata *); | ||
74 | 77 | ||
75 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) | 78 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) |
76 | 79 | ||
@@ -111,6 +114,21 @@ static ushort qib_singleport; | |||
111 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | 114 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); |
112 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | 115 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); |
113 | 116 | ||
117 | /* | ||
118 | * Receive header queue sizes | ||
119 | */ | ||
120 | static unsigned qib_rcvhdrcnt; | ||
121 | module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); | ||
122 | MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); | ||
123 | |||
124 | static unsigned qib_rcvhdrsize; | ||
125 | module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); | ||
126 | MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); | ||
127 | |||
128 | static unsigned qib_rcvhdrentsize; | ||
129 | module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); | ||
130 | MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); | ||
131 | |||
114 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ | 132 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ |
115 | /* for read back, default index is ~5m copper cable */ | 133 | /* for read back, default index is ~5m copper cable */ |
116 | static char txselect_list[MAX_ATTEN_LEN] = "10"; | 134 | static char txselect_list[MAX_ATTEN_LEN] = "10"; |
@@ -314,7 +332,7 @@ MODULE_PARM_DESC(txselect, \ | |||
314 | #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) | 332 | #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) |
315 | 333 | ||
316 | /* | 334 | /* |
317 | * Per-context kernel registers. Acess only with qib_read_kreg_ctxt() | 335 | * Per-context kernel registers. Access only with qib_read_kreg_ctxt() |
318 | * or qib_write_kreg_ctxt() | 336 | * or qib_write_kreg_ctxt() |
319 | */ | 337 | */ |
320 | #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) | 338 | #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) |
@@ -451,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | |||
451 | #define IB_7322_LT_STATE_RECOVERIDLE 0x0f | 469 | #define IB_7322_LT_STATE_RECOVERIDLE 0x0f |
452 | #define IB_7322_LT_STATE_CFGENH 0x10 | 470 | #define IB_7322_LT_STATE_CFGENH 0x10 |
453 | #define IB_7322_LT_STATE_CFGTEST 0x11 | 471 | #define IB_7322_LT_STATE_CFGTEST 0x11 |
472 | #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12 | ||
473 | #define IB_7322_LT_STATE_CFGWAITENH 0x13 | ||
454 | 474 | ||
455 | /* link state machine states from IBC */ | 475 | /* link state machine states from IBC */ |
456 | #define IB_7322_L_STATE_DOWN 0x0 | 476 | #define IB_7322_L_STATE_DOWN 0x0 |
@@ -480,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = { | |||
480 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | 500 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, |
481 | [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, | 501 | [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, |
482 | [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, | 502 | [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, |
483 | [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, | 503 | [IB_7322_LT_STATE_CFGWAITRMTTEST] = |
484 | [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, | 504 | IB_PHYSPORTSTATE_CFG_TRAIN, |
505 | [IB_7322_LT_STATE_CFGWAITENH] = | ||
506 | IB_PHYSPORTSTATE_CFG_WAIT_ENH, | ||
485 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, | 507 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, |
486 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | 508 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, |
487 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | 509 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, |
@@ -544,6 +566,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | |||
544 | 566 | ||
545 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | 567 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ |
546 | #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ | 568 | #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ |
569 | #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ | ||
547 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | 570 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
548 | 571 | ||
549 | #define H1_FORCE_VAL 8 | 572 | #define H1_FORCE_VAL 8 |
@@ -1673,10 +1696,14 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |||
1673 | break; | 1696 | break; |
1674 | } | 1697 | } |
1675 | 1698 | ||
1676 | if (ibclt == IB_7322_LT_STATE_CFGTEST && | 1699 | if (((ibclt >= IB_7322_LT_STATE_CFGTEST && |
1700 | ibclt <= IB_7322_LT_STATE_CFGWAITENH) || | ||
1701 | ibclt == IB_7322_LT_STATE_LINKUP) && | ||
1677 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { | 1702 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { |
1678 | force_h1(ppd); | 1703 | force_h1(ppd); |
1679 | ppd->cpspec->qdr_reforce = 1; | 1704 | ppd->cpspec->qdr_reforce = 1; |
1705 | if (!ppd->dd->cspec->r1) | ||
1706 | serdes_7322_los_enable(ppd, 0); | ||
1680 | } else if (ppd->cpspec->qdr_reforce && | 1707 | } else if (ppd->cpspec->qdr_reforce && |
1681 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && | 1708 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && |
1682 | (ibclt == IB_7322_LT_STATE_CFGENH || | 1709 | (ibclt == IB_7322_LT_STATE_CFGENH || |
@@ -1692,18 +1719,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |||
1692 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) | 1719 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) |
1693 | adj_tx_serdes(ppd); | 1720 | adj_tx_serdes(ppd); |
1694 | 1721 | ||
1695 | if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && | 1722 | if (ibclt != IB_7322_LT_STATE_LINKUP) { |
1696 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | 1723 | u8 ltstate = qib_7322_phys_portstate(ibcst); |
1697 | ppd->cpspec->qdr_dfe_on = 1; | 1724 | u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, |
1698 | ppd->cpspec->qdr_dfe_time = 0; | 1725 | LinkTrainingState); |
1699 | /* On link down, reenable QDR adaptation */ | 1726 | if (!ppd->dd->cspec->r1 && |
1700 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | 1727 | pibclt == IB_7322_LT_STATE_LINKUP && |
1701 | ppd->dd->cspec->r1 ? | 1728 | ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && |
1702 | QDR_STATIC_ADAPT_DOWN_R1 : | 1729 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && |
1703 | QDR_STATIC_ADAPT_DOWN); | 1730 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && |
1731 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | ||
1732 | /* If the link went down (but no into recovery, | ||
1733 | * turn LOS back on */ | ||
1734 | serdes_7322_los_enable(ppd, 1); | ||
1735 | if (!ppd->cpspec->qdr_dfe_on && | ||
1736 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | ||
1737 | ppd->cpspec->qdr_dfe_on = 1; | ||
1738 | ppd->cpspec->qdr_dfe_time = 0; | ||
1739 | /* On link down, reenable QDR adaptation */ | ||
1740 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
1741 | ppd->dd->cspec->r1 ? | ||
1742 | QDR_STATIC_ADAPT_DOWN_R1 : | ||
1743 | QDR_STATIC_ADAPT_DOWN); | ||
1744 | printk(KERN_INFO QIB_DRV_NAME | ||
1745 | " IB%u:%u re-enabled QDR adaptation " | ||
1746 | "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); | ||
1747 | } | ||
1704 | } | 1748 | } |
1705 | } | 1749 | } |
1706 | 1750 | ||
1751 | static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); | ||
1752 | |||
1707 | /* | 1753 | /* |
1708 | * This is per-pport error handling. | 1754 | * This is per-pport error handling. |
1709 | * will likely get it's own MSIx interrupt (one for each port, | 1755 | * will likely get it's own MSIx interrupt (one for each port, |
@@ -2323,6 +2369,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2323 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | 2369 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); |
2324 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | 2370 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); |
2325 | 2371 | ||
2372 | /* Hold the link state machine for mezz boards */ | ||
2373 | if (IS_QMH(dd) || IS_QME(dd)) | ||
2374 | qib_set_ib_7322_lstate(ppd, 0, | ||
2375 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
2376 | |||
2326 | /* Also enable IBSTATUSCHG interrupt. */ | 2377 | /* Also enable IBSTATUSCHG interrupt. */ |
2327 | val = qib_read_kreg_port(ppd, krp_errmask); | 2378 | val = qib_read_kreg_port(ppd, krp_errmask); |
2328 | qib_write_kreg_port(ppd, krp_errmask, | 2379 | qib_write_kreg_port(ppd, krp_errmask, |
@@ -2348,10 +2399,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | |||
2348 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 2399 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
2349 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 2400 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
2350 | wake_up(&ppd->cpspec->autoneg_wait); | 2401 | wake_up(&ppd->cpspec->autoneg_wait); |
2351 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 2402 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
2352 | if (ppd->dd->cspec->r1) | 2403 | if (ppd->dd->cspec->r1) |
2353 | cancel_delayed_work(&ppd->cpspec->ipg_work); | 2404 | cancel_delayed_work_sync(&ppd->cpspec->ipg_work); |
2354 | flush_scheduled_work(); | ||
2355 | 2405 | ||
2356 | ppd->cpspec->chase_end = 0; | 2406 | ppd->cpspec->chase_end = 0; |
2357 | if (ppd->cpspec->chase_timer.data) /* if initted */ | 2407 | if (ppd->cpspec->chase_timer.data) /* if initted */ |
@@ -2648,7 +2698,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | |||
2648 | if (!(pins & mask)) { | 2698 | if (!(pins & mask)) { |
2649 | ++handled; | 2699 | ++handled; |
2650 | qd->t_insert = get_jiffies_64(); | 2700 | qd->t_insert = get_jiffies_64(); |
2651 | schedule_work(&qd->work); | 2701 | queue_work(ib_wq, &qd->work); |
2652 | } | 2702 | } |
2653 | } | 2703 | } |
2654 | } | 2704 | } |
@@ -2785,7 +2835,6 @@ static irqreturn_t qib_7322intr(int irq, void *data) | |||
2785 | ctxtrbits &= ~rmask; | 2835 | ctxtrbits &= ~rmask; |
2786 | if (dd->rcd[i]) { | 2836 | if (dd->rcd[i]) { |
2787 | qib_kreceive(dd->rcd[i], NULL, &npkts); | 2837 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
2788 | adjust_rcv_timeout(dd->rcd[i], npkts); | ||
2789 | } | 2838 | } |
2790 | } | 2839 | } |
2791 | rmask <<= 1; | 2840 | rmask <<= 1; |
@@ -2835,7 +2884,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data) | |||
2835 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | 2884 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); |
2836 | 2885 | ||
2837 | qib_kreceive(rcd, NULL, &npkts); | 2886 | qib_kreceive(rcd, NULL, &npkts); |
2838 | adjust_rcv_timeout(rcd, npkts); | ||
2839 | 2887 | ||
2840 | return IRQ_HANDLED; | 2888 | return IRQ_HANDLED; |
2841 | } | 2889 | } |
@@ -3157,6 +3205,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) | |||
3157 | case BOARD_QME7342: | 3205 | case BOARD_QME7342: |
3158 | n = "InfiniPath_QME7342"; | 3206 | n = "InfiniPath_QME7342"; |
3159 | break; | 3207 | break; |
3208 | case 8: | ||
3209 | n = "InfiniPath_QME7362"; | ||
3210 | dd->flags |= QIB_HAS_QSFP; | ||
3211 | break; | ||
3160 | case 15: | 3212 | case 15: |
3161 | n = "InfiniPath_QLE7342_TEST"; | 3213 | n = "InfiniPath_QLE7342_TEST"; |
3162 | dd->flags |= QIB_HAS_QSFP; | 3214 | dd->flags |= QIB_HAS_QSFP; |
@@ -3253,7 +3305,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) | |||
3253 | /* | 3305 | /* |
3254 | * Keep chip from being accessed until we are ready. Use | 3306 | * Keep chip from being accessed until we are ready. Use |
3255 | * writeq() directly, to allow the write even though QIB_PRESENT | 3307 | * writeq() directly, to allow the write even though QIB_PRESENT |
3256 | * isnt' set. | 3308 | * isn't set. |
3257 | */ | 3309 | */ |
3258 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); | 3310 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); |
3259 | dd->flags |= QIB_DOING_RESET; | 3311 | dd->flags |= QIB_DOING_RESET; |
@@ -3475,11 +3527,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) | |||
3475 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); | 3527 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); |
3476 | dd->cspec->numctxts = nchipctxts; | 3528 | dd->cspec->numctxts = nchipctxts; |
3477 | if (qib_n_krcv_queues > 1 && dd->num_pports) { | 3529 | if (qib_n_krcv_queues > 1 && dd->num_pports) { |
3478 | /* | ||
3479 | * Set the mask for which bits from the QPN are used | ||
3480 | * to select a context number. | ||
3481 | */ | ||
3482 | dd->qpn_mask = 0x3f; | ||
3483 | dd->first_user_ctxt = NUM_IB_PORTS + | 3530 | dd->first_user_ctxt = NUM_IB_PORTS + |
3484 | (qib_n_krcv_queues - 1) * dd->num_pports; | 3531 | (qib_n_krcv_queues - 1) * dd->num_pports; |
3485 | if (dd->first_user_ctxt > nchipctxts) | 3532 | if (dd->first_user_ctxt > nchipctxts) |
@@ -3530,8 +3577,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) | |||
3530 | 3577 | ||
3531 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | 3578 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ |
3532 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | 3579 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); |
3533 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, | 3580 | if (qib_rcvhdrcnt) |
3534 | dd->num_pports > 1 ? 1024U : 2048U); | 3581 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); |
3582 | else | ||
3583 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, | ||
3584 | dd->num_pports > 1 ? 1024U : 2048U); | ||
3535 | } | 3585 | } |
3536 | 3586 | ||
3537 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) | 3587 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) |
@@ -3683,7 +3733,7 @@ static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | |||
3683 | /* | 3733 | /* |
3684 | * As with width, only write the actual register if the | 3734 | * As with width, only write the actual register if the |
3685 | * link is currently down, otherwise takes effect on next | 3735 | * link is currently down, otherwise takes effect on next |
3686 | * link change. Since setting is being explictly requested | 3736 | * link change. Since setting is being explicitly requested |
3687 | * (via MAD or sysfs), clear autoneg failure status if speed | 3737 | * (via MAD or sysfs), clear autoneg failure status if speed |
3688 | * autoneg is enabled. | 3738 | * autoneg is enabled. |
3689 | */ | 3739 | */ |
@@ -4002,8 +4052,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) | |||
4002 | } | 4052 | } |
4003 | 4053 | ||
4004 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 4054 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
4005 | u32 updegr, u32 egrhd) | 4055 | u32 updegr, u32 egrhd, u32 npkts) |
4006 | { | 4056 | { |
4057 | /* | ||
4058 | * Need to write timeout register before updating rcvhdrhead to ensure | ||
4059 | * that the timer is enabled on reception of a packet. | ||
4060 | */ | ||
4061 | if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) | ||
4062 | adjust_rcv_timeout(rcd, npkts); | ||
4007 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 4063 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
4008 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 4064 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
4009 | if (updegr) | 4065 | if (updegr) |
@@ -4113,7 +4169,7 @@ static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, | |||
4113 | * Init the context registers also; if we were | 4169 | * Init the context registers also; if we were |
4114 | * disabled, tail and head should both be zero | 4170 | * disabled, tail and head should both be zero |
4115 | * already from the enable, but since we don't | 4171 | * already from the enable, but since we don't |
4116 | * know, we have to do it explictly. | 4172 | * know, we have to do it explicitly. |
4117 | */ | 4173 | */ |
4118 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | 4174 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); |
4119 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | 4175 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); |
@@ -4926,8 +4982,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) | |||
4926 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | 4982 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); |
4927 | qib_7322_mini_pcs_reset(ppd); | 4983 | qib_7322_mini_pcs_reset(ppd); |
4928 | /* 2 msec is minimum length of a poll cycle */ | 4984 | /* 2 msec is minimum length of a poll cycle */ |
4929 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 4985 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
4930 | msecs_to_jiffies(2)); | 4986 | msecs_to_jiffies(2)); |
4931 | } | 4987 | } |
4932 | 4988 | ||
4933 | /* | 4989 | /* |
@@ -5057,7 +5113,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) | |||
5057 | ib_free_send_mad(send_buf); | 5113 | ib_free_send_mad(send_buf); |
5058 | retry: | 5114 | retry: |
5059 | delay = 2 << ppd->cpspec->ipg_tries; | 5115 | delay = 2 << ppd->cpspec->ipg_tries; |
5060 | schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); | 5116 | queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, |
5117 | msecs_to_jiffies(delay)); | ||
5061 | } | 5118 | } |
5062 | 5119 | ||
5063 | /* | 5120 | /* |
@@ -5522,7 +5579,7 @@ static void qsfp_7322_event(struct work_struct *work) | |||
5522 | u64 now = get_jiffies_64(); | 5579 | u64 now = get_jiffies_64(); |
5523 | if (time_after64(now, pwrup)) | 5580 | if (time_after64(now, pwrup)) |
5524 | break; | 5581 | break; |
5525 | msleep(1); | 5582 | msleep(20); |
5526 | } | 5583 | } |
5527 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | 5584 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); |
5528 | /* | 5585 | /* |
@@ -5531,9 +5588,16 @@ static void qsfp_7322_event(struct work_struct *work) | |||
5531 | * even on failure to read cable information. We don't | 5588 | * even on failure to read cable information. We don't |
5532 | * get here for QME, so IS_QME check not needed here. | 5589 | * get here for QME, so IS_QME check not needed here. |
5533 | */ | 5590 | */ |
5534 | le2 = (!ret && qd->cache.atten[1] >= qib_long_atten && | 5591 | if (!ret && !ppd->dd->cspec->r1) { |
5535 | !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ? | 5592 | if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) |
5536 | LE2_5m : LE2_DEFAULT; | 5593 | le2 = LE2_QME; |
5594 | else if (qd->cache.atten[1] >= qib_long_atten && | ||
5595 | QSFP_IS_CU(qd->cache.tech)) | ||
5596 | le2 = LE2_5m; | ||
5597 | else | ||
5598 | le2 = LE2_DEFAULT; | ||
5599 | } else | ||
5600 | le2 = LE2_DEFAULT; | ||
5537 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); | 5601 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); |
5538 | init_txdds_table(ppd, 0); | 5602 | init_txdds_table(ppd, 0); |
5539 | } | 5603 | } |
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5579 | u32 pidx, unit, port, deflt, h1; | 5643 | u32 pidx, unit, port, deflt, h1; |
5580 | unsigned long val; | 5644 | unsigned long val; |
5581 | int any = 0, seth1; | 5645 | int any = 0, seth1; |
5646 | int txdds_size; | ||
5582 | 5647 | ||
5583 | str = txselect_list; | 5648 | str = txselect_list; |
5584 | 5649 | ||
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5587 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 5652 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
5588 | dd->pport[pidx].cpspec->no_eep = deflt; | 5653 | dd->pport[pidx].cpspec->no_eep = deflt; |
5589 | 5654 | ||
5655 | txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; | ||
5656 | if (IS_QME(dd) || IS_QMH(dd)) | ||
5657 | txdds_size += TXDDS_MFG_SZ; | ||
5658 | |||
5590 | while (*nxt && nxt[1]) { | 5659 | while (*nxt && nxt[1]) { |
5591 | str = ++nxt; | 5660 | str = ++nxt; |
5592 | unit = simple_strtoul(str, &nxt, 0); | 5661 | unit = simple_strtoul(str, &nxt, 0); |
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5609 | ; | 5678 | ; |
5610 | continue; | 5679 | continue; |
5611 | } | 5680 | } |
5612 | if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) | 5681 | if (val >= txdds_size) |
5613 | continue; | 5682 | continue; |
5614 | seth1 = 0; | 5683 | seth1 = 0; |
5615 | h1 = 0; /* gcc thinks it might be used uninitted */ | 5684 | h1 = 0; /* gcc thinks it might be used uninitted */ |
@@ -5633,6 +5702,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5633 | ppd->cpspec->h1_val = h1; | 5702 | ppd->cpspec->h1_val = h1; |
5634 | /* now change the IBC and serdes, overriding generic */ | 5703 | /* now change the IBC and serdes, overriding generic */ |
5635 | init_txdds_table(ppd, 1); | 5704 | init_txdds_table(ppd, 1); |
5705 | /* Re-enable the physical state machine on mezz boards | ||
5706 | * now that the correct settings have been set. */ | ||
5707 | if (IS_QMH(dd) || IS_QME(dd)) | ||
5708 | qib_set_ib_7322_lstate(ppd, 0, | ||
5709 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | ||
5636 | any++; | 5710 | any++; |
5637 | } | 5711 | } |
5638 | if (*nxt == '\n') | 5712 | if (*nxt == '\n') |
@@ -5661,10 +5735,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp) | |||
5661 | return -ENOSPC; | 5735 | return -ENOSPC; |
5662 | } | 5736 | } |
5663 | val = simple_strtoul(str, &n, 0); | 5737 | val = simple_strtoul(str, &n, 0); |
5664 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { | 5738 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + |
5739 | TXDDS_MFG_SZ)) { | ||
5665 | printk(KERN_INFO QIB_DRV_NAME | 5740 | printk(KERN_INFO QIB_DRV_NAME |
5666 | "txselect_values must start with a number < %d\n", | 5741 | "txselect_values must start with a number < %d\n", |
5667 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | 5742 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); |
5668 | return -EINVAL; | 5743 | return -EINVAL; |
5669 | } | 5744 | } |
5670 | strcpy(txselect_list, str); | 5745 | strcpy(txselect_list, str); |
@@ -5810,7 +5885,8 @@ static void write_7322_initregs(struct qib_devdata *dd) | |||
5810 | unsigned n, regno; | 5885 | unsigned n, regno; |
5811 | unsigned long flags; | 5886 | unsigned long flags; |
5812 | 5887 | ||
5813 | if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) | 5888 | if (dd->n_krcv_queues < 2 || |
5889 | !dd->pport[pidx].link_speed_supported) | ||
5814 | continue; | 5890 | continue; |
5815 | 5891 | ||
5816 | ppd = &dd->pport[pidx]; | 5892 | ppd = &dd->pport[pidx]; |
@@ -6097,8 +6173,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6097 | ppd++; | 6173 | ppd++; |
6098 | } | 6174 | } |
6099 | 6175 | ||
6100 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | 6176 | dd->rcvhdrentsize = qib_rcvhdrentsize ? |
6101 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | 6177 | qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; |
6178 | dd->rcvhdrsize = qib_rcvhdrsize ? | ||
6179 | qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; | ||
6102 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | 6180 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); |
6103 | 6181 | ||
6104 | /* we always allocate at least 2048 bytes for eager buffers */ | 6182 | /* we always allocate at least 2048 bytes for eager buffers */ |
@@ -6495,7 +6573,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | |||
6495 | /* make sure we see an updated copy next time around */ | 6573 | /* make sure we see an updated copy next time around */ |
6496 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | 6574 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); |
6497 | sleeps++; | 6575 | sleeps++; |
6498 | msleep(1); | 6576 | msleep(20); |
6499 | } | 6577 | } |
6500 | 6578 | ||
6501 | switch (which) { | 6579 | switch (which) { |
@@ -6993,6 +7071,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |||
6993 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ | 7071 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ |
6994 | }; | 7072 | }; |
6995 | 7073 | ||
7074 | static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { | ||
7075 | /* amp, pre, main, post */ | ||
7076 | { 0, 0, 0, 0 }, /* QME7342 mfg settings */ | ||
7077 | { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ | ||
7078 | }; | ||
7079 | |||
6996 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | 7080 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
6997 | unsigned atten) | 7081 | unsigned atten) |
6998 | { | 7082 | { |
@@ -7066,6 +7150,16 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
7066 | *sdr_dds = &txdds_extra_sdr[idx]; | 7150 | *sdr_dds = &txdds_extra_sdr[idx]; |
7067 | *ddr_dds = &txdds_extra_ddr[idx]; | 7151 | *ddr_dds = &txdds_extra_ddr[idx]; |
7068 | *qdr_dds = &txdds_extra_qdr[idx]; | 7152 | *qdr_dds = &txdds_extra_qdr[idx]; |
7153 | } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && | ||
7154 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | ||
7155 | TXDDS_MFG_SZ)) { | ||
7156 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | ||
7157 | printk(KERN_INFO QIB_DRV_NAME | ||
7158 | " IB%u:%u use idx %u into txdds_mfg\n", | ||
7159 | ppd->dd->unit, ppd->port, idx); | ||
7160 | *sdr_dds = &txdds_extra_mfg[idx]; | ||
7161 | *ddr_dds = &txdds_extra_mfg[idx]; | ||
7162 | *qdr_dds = &txdds_extra_mfg[idx]; | ||
7069 | } else { | 7163 | } else { |
7070 | /* this shouldn't happen, it's range checked */ | 7164 | /* this shouldn't happen, it's range checked */ |
7071 | *sdr_dds = txdds_sdr + qib_long_atten; | 7165 | *sdr_dds = txdds_sdr + qib_long_atten; |
@@ -7210,9 +7304,35 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, | |||
7210 | } | 7304 | } |
7211 | } | 7305 | } |
7212 | 7306 | ||
7307 | static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) | ||
7308 | { | ||
7309 | u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); | ||
7310 | u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); | ||
7311 | |||
7312 | if (enable && !state) { | ||
7313 | printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n", | ||
7314 | ppd->dd->unit, ppd->port); | ||
7315 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | ||
7316 | } else if (!enable && state) { | ||
7317 | printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n", | ||
7318 | ppd->dd->unit, ppd->port); | ||
7319 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | ||
7320 | } | ||
7321 | qib_write_kreg_port(ppd, krp_serdesctrl, data); | ||
7322 | } | ||
7323 | |||
7213 | static int serdes_7322_init(struct qib_pportdata *ppd) | 7324 | static int serdes_7322_init(struct qib_pportdata *ppd) |
7214 | { | 7325 | { |
7215 | u64 data; | 7326 | int ret = 0; |
7327 | if (ppd->dd->cspec->r1) | ||
7328 | ret = serdes_7322_init_old(ppd); | ||
7329 | else | ||
7330 | ret = serdes_7322_init_new(ppd); | ||
7331 | return ret; | ||
7332 | } | ||
7333 | |||
7334 | static int serdes_7322_init_old(struct qib_pportdata *ppd) | ||
7335 | { | ||
7216 | u32 le_val; | 7336 | u32 le_val; |
7217 | 7337 | ||
7218 | /* | 7338 | /* |
@@ -7270,11 +7390,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
7270 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | 7390 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ |
7271 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | 7391 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ |
7272 | 7392 | ||
7273 | data = qib_read_kreg_port(ppd, krp_serdesctrl); | 7393 | serdes_7322_los_enable(ppd, 1); |
7274 | /* Turn off IB latency mode */ | ||
7275 | data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); | ||
7276 | qib_write_kreg_port(ppd, krp_serdesctrl, data | | ||
7277 | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); | ||
7278 | 7394 | ||
7279 | /* rxbistena; set 0 to avoid effects of it switch later */ | 7395 | /* rxbistena; set 0 to avoid effects of it switch later */ |
7280 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); | 7396 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); |
@@ -7314,6 +7430,206 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
7314 | return 0; | 7430 | return 0; |
7315 | } | 7431 | } |
7316 | 7432 | ||
7433 | static int serdes_7322_init_new(struct qib_pportdata *ppd) | ||
7434 | { | ||
7435 | u64 tstart; | ||
7436 | u32 le_val, rxcaldone; | ||
7437 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | ||
7438 | |||
7439 | /* | ||
7440 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7441 | * for adapters with QSFP | ||
7442 | */ | ||
7443 | init_txdds_table(ppd, 0); | ||
7444 | |||
7445 | /* Clear cmode-override, may be set from older driver */ | ||
7446 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | ||
7447 | |||
7448 | /* ensure no tx overrides from earlier driver loads */ | ||
7449 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
7450 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7451 | reset_tx_deemphasis_override)); | ||
7452 | |||
7453 | /* START OF LSI SUGGESTED SERDES BRINGUP */ | ||
7454 | /* Reset - Calibration Setup */ | ||
7455 | /* Stop DFE adaptaion */ | ||
7456 | ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); | ||
7457 | /* Disable LE1 */ | ||
7458 | ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); | ||
7459 | /* Disable autoadapt for LE1 */ | ||
7460 | ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); | ||
7461 | /* Disable LE2 */ | ||
7462 | ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); | ||
7463 | /* Disable VGA */ | ||
7464 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | ||
7465 | /* Disable AFE Offset Cancel */ | ||
7466 | ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); | ||
7467 | /* Disable Timing Loop */ | ||
7468 | ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); | ||
7469 | /* Disable Frequency Loop */ | ||
7470 | ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); | ||
7471 | /* Disable Baseline Wander Correction */ | ||
7472 | ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); | ||
7473 | /* Disable RX Calibration */ | ||
7474 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | ||
7475 | /* Disable RX Offset Calibration */ | ||
7476 | ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); | ||
7477 | /* Select BB CDR */ | ||
7478 | ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); | ||
7479 | /* CDR Step Size */ | ||
7480 | ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); | ||
7481 | /* Enable phase Calibration */ | ||
7482 | ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); | ||
7483 | /* DFE Bandwidth [2:14-12] */ | ||
7484 | ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); | ||
7485 | /* DFE Config (4 taps only) */ | ||
7486 | ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); | ||
7487 | /* Gain Loop Bandwidth */ | ||
7488 | if (!ppd->dd->cspec->r1) { | ||
7489 | ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); | ||
7490 | ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); | ||
7491 | } else { | ||
7492 | ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); | ||
7493 | } | ||
7494 | /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ | ||
7495 | /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ | ||
7496 | /* Data Rate Select [5:7-6] (leave as default) */ | ||
7497 | /* RX Parallel Word Width [3:10-8] (leave as default) */ | ||
7498 | |||
7499 | /* RX REST */ | ||
7500 | /* Single- or Multi-channel reset */ | ||
7501 | /* RX Analog reset */ | ||
7502 | /* RX Digital reset */ | ||
7503 | ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); | ||
7504 | msleep(20); | ||
7505 | /* RX Analog reset */ | ||
7506 | ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); | ||
7507 | msleep(20); | ||
7508 | /* RX Digital reset */ | ||
7509 | ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); | ||
7510 | msleep(20); | ||
7511 | |||
7512 | /* setup LoS params; these are subsystem, so chan == 5 */ | ||
7513 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | ||
7514 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | ||
7515 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | ||
7516 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | ||
7517 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | ||
7518 | |||
7519 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | ||
7520 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | ||
7521 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | ||
7522 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | ||
7523 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | ||
7524 | |||
7525 | /* LoS filter select enabled */ | ||
7526 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | ||
7527 | |||
7528 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | ||
7529 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | ||
7530 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | ||
7531 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | ||
7532 | |||
7533 | /* Turn on LOS on initial SERDES init */ | ||
7534 | serdes_7322_los_enable(ppd, 1); | ||
7535 | /* FLoop LOS gate: PPM filter enabled */ | ||
7536 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | ||
7537 | |||
7538 | /* RX LATCH CALIBRATION */ | ||
7539 | /* Enable Eyefinder Phase Calibration latch */ | ||
7540 | ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); | ||
7541 | /* Enable RX Offset Calibration latch */ | ||
7542 | ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); | ||
7543 | msleep(20); | ||
7544 | /* Start Calibration */ | ||
7545 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | ||
7546 | tstart = get_jiffies_64(); | ||
7547 | while (chan_done && | ||
7548 | !time_after64(get_jiffies_64(), | ||
7549 | tstart + msecs_to_jiffies(500))) { | ||
7550 | msleep(20); | ||
7551 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | ||
7552 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | ||
7553 | (chan + (chan >> 1)), | ||
7554 | 25, 0, 0); | ||
7555 | if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && | ||
7556 | (~chan_done & (1 << chan)) == 0) | ||
7557 | chan_done &= ~(1 << chan); | ||
7558 | } | ||
7559 | } | ||
7560 | if (chan_done) { | ||
7561 | printk(KERN_INFO QIB_DRV_NAME | ||
7562 | " Serdes %d calibration not done after .5 sec: 0x%x\n", | ||
7563 | IBSD(ppd->hw_pidx), chan_done); | ||
7564 | } else { | ||
7565 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | ||
7566 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | ||
7567 | (chan + (chan >> 1)), | ||
7568 | 25, 0, 0); | ||
7569 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) | ||
7570 | printk(KERN_INFO QIB_DRV_NAME | ||
7571 | " Serdes %d chan %d calibration " | ||
7572 | "failed\n", IBSD(ppd->hw_pidx), chan); | ||
7573 | } | ||
7574 | } | ||
7575 | |||
7576 | /* Turn off Calibration */ | ||
7577 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | ||
7578 | msleep(20); | ||
7579 | |||
7580 | /* BRING RX UP */ | ||
7581 | /* Set LE2 value (May be overridden in qsfp_7322_event) */ | ||
7582 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | ||
7583 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | ||
7584 | /* Set LE2 Loop bandwidth */ | ||
7585 | ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); | ||
7586 | /* Enable LE2 */ | ||
7587 | ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); | ||
7588 | msleep(20); | ||
7589 | /* Enable H0 only */ | ||
7590 | ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); | ||
7591 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | ||
7592 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | ||
7593 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | ||
7594 | /* Enable VGA */ | ||
7595 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | ||
7596 | msleep(20); | ||
7597 | /* Set Frequency Loop Bandwidth */ | ||
7598 | ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5)); | ||
7599 | /* Enable Frequency Loop */ | ||
7600 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); | ||
7601 | /* Set Timing Loop Bandwidth */ | ||
7602 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | ||
7603 | /* Enable Timing Loop */ | ||
7604 | ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); | ||
7605 | msleep(50); | ||
7606 | /* Enable DFE | ||
7607 | * Set receive adaptation mode. SDR and DDR adaptation are | ||
7608 | * always on, and QDR is initially enabled; later disabled. | ||
7609 | */ | ||
7610 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | ||
7611 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | ||
7612 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
7613 | ppd->dd->cspec->r1 ? | ||
7614 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | ||
7615 | ppd->cpspec->qdr_dfe_on = 1; | ||
7616 | /* Disable LE1 */ | ||
7617 | ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); | ||
7618 | /* Disable auto adapt for LE1 */ | ||
7619 | ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); | ||
7620 | msleep(20); | ||
7621 | /* Enable AFE Offset Cancel */ | ||
7622 | ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); | ||
7623 | /* Enable Baseline Wander Correction */ | ||
7624 | ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); | ||
7625 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | ||
7626 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | ||
7627 | /* VGA output common mode */ | ||
7628 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | ||
7629 | |||
7630 | return 0; | ||
7631 | } | ||
7632 | |||
7317 | /* start adjust QMH serdes parameters */ | 7633 | /* start adjust QMH serdes parameters */ |
7318 | 7634 | ||
7319 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) | 7635 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index f1d16d3a01f6..a01f3fce8eb3 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); |
81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); |
82 | 82 | ||
83 | struct workqueue_struct *qib_wq; | ||
84 | struct workqueue_struct *qib_cq_wq; | 83 | struct workqueue_struct *qib_cq_wq; |
85 | 84 | ||
86 | static void verify_interrupt(unsigned long); | 85 | static void verify_interrupt(unsigned long); |
@@ -92,9 +91,11 @@ unsigned long *qib_cpulist; | |||
92 | /* set number of contexts we'll actually use */ | 91 | /* set number of contexts we'll actually use */ |
93 | void qib_set_ctxtcnt(struct qib_devdata *dd) | 92 | void qib_set_ctxtcnt(struct qib_devdata *dd) |
94 | { | 93 | { |
95 | if (!qib_cfgctxts) | 94 | if (!qib_cfgctxts) { |
96 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); | 95 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); |
97 | else if (qib_cfgctxts < dd->num_pports) | 96 | if (dd->cfgctxts > dd->ctxtcnt) |
97 | dd->cfgctxts = dd->ctxtcnt; | ||
98 | } else if (qib_cfgctxts < dd->num_pports) | ||
98 | dd->cfgctxts = dd->ctxtcnt; | 99 | dd->cfgctxts = dd->ctxtcnt; |
99 | else if (qib_cfgctxts <= dd->ctxtcnt) | 100 | else if (qib_cfgctxts <= dd->ctxtcnt) |
100 | dd->cfgctxts = qib_cfgctxts; | 101 | dd->cfgctxts = qib_cfgctxts; |
@@ -268,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd) | |||
268 | struct page **pages; | 269 | struct page **pages; |
269 | dma_addr_t *addrs; | 270 | dma_addr_t *addrs; |
270 | 271 | ||
271 | pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | 272 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); |
272 | if (!pages) { | 273 | if (!pages) { |
273 | qib_dev_err(dd, "failed to allocate shadow page * " | 274 | qib_dev_err(dd, "failed to allocate shadow page * " |
274 | "array, no expected sends!\n"); | 275 | "array, no expected sends!\n"); |
275 | goto bail; | 276 | goto bail; |
276 | } | 277 | } |
277 | 278 | ||
278 | addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | 279 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); |
279 | if (!addrs) { | 280 | if (!addrs) { |
280 | qib_dev_err(dd, "failed to allocate shadow dma handle " | 281 | qib_dev_err(dd, "failed to allocate shadow dma handle " |
281 | "array, no expected sends!\n"); | 282 | "array, no expected sends!\n"); |
282 | goto bail_free; | 283 | goto bail_free; |
283 | } | 284 | } |
284 | 285 | ||
285 | memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
286 | memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
287 | |||
288 | dd->pageshadow = pages; | 286 | dd->pageshadow = pages; |
289 | dd->physshadow = addrs; | 287 | dd->physshadow = addrs; |
290 | return; | 288 | return; |
@@ -348,7 +346,7 @@ done: | |||
348 | * @dd: the qlogic_ib device | 346 | * @dd: the qlogic_ib device |
349 | * | 347 | * |
350 | * sanity check at least some of the values after reset, and | 348 | * sanity check at least some of the values after reset, and |
351 | * ensure no receive or transmit (explictly, in case reset | 349 | * ensure no receive or transmit (explicitly, in case reset |
352 | * failed | 350 | * failed |
353 | */ | 351 | */ |
354 | static int init_after_reset(struct qib_devdata *dd) | 352 | static int init_after_reset(struct qib_devdata *dd) |
@@ -1045,24 +1043,10 @@ static int __init qlogic_ib_init(void) | |||
1045 | if (ret) | 1043 | if (ret) |
1046 | goto bail; | 1044 | goto bail; |
1047 | 1045 | ||
1048 | /* | ||
1049 | * We create our own workqueue mainly because we want to be | ||
1050 | * able to flush it when devices are being removed. We can't | ||
1051 | * use schedule_work()/flush_scheduled_work() because both | ||
1052 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
1053 | * so flush_scheduled_work() can deadlock during device | ||
1054 | * removal. | ||
1055 | */ | ||
1056 | qib_wq = create_workqueue("qib"); | ||
1057 | if (!qib_wq) { | ||
1058 | ret = -ENOMEM; | ||
1059 | goto bail_dev; | ||
1060 | } | ||
1061 | |||
1062 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); | 1046 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
1063 | if (!qib_cq_wq) { | 1047 | if (!qib_cq_wq) { |
1064 | ret = -ENOMEM; | 1048 | ret = -ENOMEM; |
1065 | goto bail_wq; | 1049 | goto bail_dev; |
1066 | } | 1050 | } |
1067 | 1051 | ||
1068 | /* | 1052 | /* |
@@ -1092,8 +1076,6 @@ bail_unit: | |||
1092 | idr_destroy(&qib_unit_table); | 1076 | idr_destroy(&qib_unit_table); |
1093 | bail_cq_wq: | 1077 | bail_cq_wq: |
1094 | destroy_workqueue(qib_cq_wq); | 1078 | destroy_workqueue(qib_cq_wq); |
1095 | bail_wq: | ||
1096 | destroy_workqueue(qib_wq); | ||
1097 | bail_dev: | 1079 | bail_dev: |
1098 | qib_dev_cleanup(); | 1080 | qib_dev_cleanup(); |
1099 | bail: | 1081 | bail: |
@@ -1117,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) | |||
1117 | 1099 | ||
1118 | pci_unregister_driver(&qib_driver); | 1100 | pci_unregister_driver(&qib_driver); |
1119 | 1101 | ||
1120 | destroy_workqueue(qib_wq); | ||
1121 | destroy_workqueue(qib_cq_wq); | 1102 | destroy_workqueue(qib_cq_wq); |
1122 | 1103 | ||
1123 | qib_cpulist_count = 0; | 1104 | qib_cpulist_count = 0; |
@@ -1243,6 +1224,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1243 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | 1224 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " |
1244 | "work if CONFIG_PCI_MSI is not enabled\n", | 1225 | "work if CONFIG_PCI_MSI is not enabled\n", |
1245 | ent->device); | 1226 | ent->device); |
1227 | dd = ERR_PTR(-ENODEV); | ||
1246 | #endif | 1228 | #endif |
1247 | break; | 1229 | break; |
1248 | 1230 | ||
@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1289 | 1271 | ||
1290 | if (qib_mini_init || initfail || ret) { | 1272 | if (qib_mini_init || initfail || ret) { |
1291 | qib_stop_timers(dd); | 1273 | qib_stop_timers(dd); |
1292 | flush_scheduled_work(); | 1274 | flush_workqueue(ib_wq); |
1293 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1275 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
1294 | dd->f_quiet_serdes(dd->pport + pidx); | 1276 | dd->f_quiet_serdes(dd->pport + pidx); |
1295 | if (qib_mini_init) | 1277 | if (qib_mini_init) |
@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) | |||
1338 | 1320 | ||
1339 | qib_stop_timers(dd); | 1321 | qib_stop_timers(dd); |
1340 | 1322 | ||
1341 | /* wait until all of our (qsfp) schedule_work() calls complete */ | 1323 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1342 | flush_scheduled_work(); | 1324 | flush_workqueue(ib_wq); |
1343 | 1325 | ||
1344 | ret = qibfs_remove(dd); | 1326 | ret = qibfs_remove(dd); |
1345 | if (ret) | 1327 | if (ret) |
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 54a40828a106..6ae57d23004a 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c | |||
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) | |||
96 | * states, or if it transitions from any of the up (INIT or better) | 96 | * states, or if it transitions from any of the up (INIT or better) |
97 | * states into any of the down states (except link recovery), then | 97 | * states into any of the down states (except link recovery), then |
98 | * call the chip-specific code to take appropriate actions. | 98 | * call the chip-specific code to take appropriate actions. |
99 | * | ||
100 | * ppd->lflags could be 0 if this is the first time the interrupt | ||
101 | * handlers has been called but the link is already up. | ||
99 | */ | 102 | */ |
100 | if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && | 103 | if (lstate >= IB_PORT_INIT && |
104 | (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) && | ||
101 | ltstate == IB_PHYSPORTSTATE_LINKUP) { | 105 | ltstate == IB_PHYSPORTSTATE_LINKUP) { |
102 | /* transitioned to UP */ | 106 | /* transitioned to UP */ |
103 | if (dd->f_ib_updown(ppd, 1, ibcs)) | 107 | if (dd->f_ib_updown(ppd, 1, ibcs)) |
@@ -131,7 +135,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) | |||
131 | /* start a 75msec timer to clear symbol errors */ | 135 | /* start a 75msec timer to clear symbol errors */ |
132 | mod_timer(&ppd->symerr_clear_timer, | 136 | mod_timer(&ppd->symerr_clear_timer, |
133 | msecs_to_jiffies(75)); | 137 | msecs_to_jiffies(75)); |
134 | } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { | 138 | } else if (ltstate == IB_PHYSPORTSTATE_LINKUP && |
139 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
135 | /* active, but not active defered */ | 140 | /* active, but not active defered */ |
136 | qib_hol_up(ppd); /* useful only for 6120 now */ | 141 | qib_hol_up(ppd); /* useful only for 6120 now */ |
137 | *ppd->statusp |= | 142 | *ppd->statusp |= |
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 4b80eb153d57..8fd19a47df0c 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
136 | struct qib_mregion *mr; | 136 | struct qib_mregion *mr; |
137 | unsigned n, m; | 137 | unsigned n, m; |
138 | size_t off; | 138 | size_t off; |
139 | int ret = 0; | ||
140 | unsigned long flags; | 139 | unsigned long flags; |
141 | 140 | ||
142 | /* | 141 | /* |
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
152 | if (!dev->dma_mr) | 151 | if (!dev->dma_mr) |
153 | goto bail; | 152 | goto bail; |
154 | atomic_inc(&dev->dma_mr->refcount); | 153 | atomic_inc(&dev->dma_mr->refcount); |
154 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
155 | |||
155 | isge->mr = dev->dma_mr; | 156 | isge->mr = dev->dma_mr; |
156 | isge->vaddr = (void *) sge->addr; | 157 | isge->vaddr = (void *) sge->addr; |
157 | isge->length = sge->length; | 158 | isge->length = sge->length; |
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
170 | off + sge->length > mr->length || | 171 | off + sge->length > mr->length || |
171 | (mr->access_flags & acc) != acc)) | 172 | (mr->access_flags & acc) != acc)) |
172 | goto bail; | 173 | goto bail; |
174 | atomic_inc(&mr->refcount); | ||
175 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
173 | 176 | ||
174 | off += mr->offset; | 177 | off += mr->offset; |
175 | m = 0; | 178 | if (mr->page_shift) { |
176 | n = 0; | 179 | /* |
177 | while (off >= mr->map[m]->segs[n].length) { | 180 | page sizes are uniform power of 2 so no loop is necessary |
178 | off -= mr->map[m]->segs[n].length; | 181 | entries_spanned_by_off is the number of times the loop below |
179 | n++; | 182 | would have executed. |
180 | if (n >= QIB_SEGSZ) { | 183 | */ |
181 | m++; | 184 | size_t entries_spanned_by_off; |
182 | n = 0; | 185 | |
186 | entries_spanned_by_off = off >> mr->page_shift; | ||
187 | off -= (entries_spanned_by_off << mr->page_shift); | ||
188 | m = entries_spanned_by_off/QIB_SEGSZ; | ||
189 | n = entries_spanned_by_off%QIB_SEGSZ; | ||
190 | } else { | ||
191 | m = 0; | ||
192 | n = 0; | ||
193 | while (off >= mr->map[m]->segs[n].length) { | ||
194 | off -= mr->map[m]->segs[n].length; | ||
195 | n++; | ||
196 | if (n >= QIB_SEGSZ) { | ||
197 | m++; | ||
198 | n = 0; | ||
199 | } | ||
183 | } | 200 | } |
184 | } | 201 | } |
185 | atomic_inc(&mr->refcount); | ||
186 | isge->mr = mr; | 202 | isge->mr = mr; |
187 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; | 203 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; |
188 | isge->length = mr->map[m]->segs[n].length - off; | 204 | isge->length = mr->map[m]->segs[n].length - off; |
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
190 | isge->m = m; | 206 | isge->m = m; |
191 | isge->n = n; | 207 | isge->n = n; |
192 | ok: | 208 | ok: |
193 | ret = 1; | 209 | return 1; |
194 | bail: | 210 | bail: |
195 | spin_unlock_irqrestore(&rkt->lock, flags); | 211 | spin_unlock_irqrestore(&rkt->lock, flags); |
196 | return ret; | 212 | return 0; |
197 | } | 213 | } |
198 | 214 | ||
199 | /** | 215 | /** |
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
214 | struct qib_mregion *mr; | 230 | struct qib_mregion *mr; |
215 | unsigned n, m; | 231 | unsigned n, m; |
216 | size_t off; | 232 | size_t off; |
217 | int ret = 0; | ||
218 | unsigned long flags; | 233 | unsigned long flags; |
219 | 234 | ||
220 | /* | 235 | /* |
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
231 | if (!dev->dma_mr) | 246 | if (!dev->dma_mr) |
232 | goto bail; | 247 | goto bail; |
233 | atomic_inc(&dev->dma_mr->refcount); | 248 | atomic_inc(&dev->dma_mr->refcount); |
249 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
250 | |||
234 | sge->mr = dev->dma_mr; | 251 | sge->mr = dev->dma_mr; |
235 | sge->vaddr = (void *) vaddr; | 252 | sge->vaddr = (void *) vaddr; |
236 | sge->length = len; | 253 | sge->length = len; |
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
248 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | 265 | if (unlikely(vaddr < mr->iova || off + len > mr->length || |
249 | (mr->access_flags & acc) == 0)) | 266 | (mr->access_flags & acc) == 0)) |
250 | goto bail; | 267 | goto bail; |
268 | atomic_inc(&mr->refcount); | ||
269 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
251 | 270 | ||
252 | off += mr->offset; | 271 | off += mr->offset; |
253 | m = 0; | 272 | if (mr->page_shift) { |
254 | n = 0; | 273 | /* |
255 | while (off >= mr->map[m]->segs[n].length) { | 274 | page sizes are uniform power of 2 so no loop is necessary |
256 | off -= mr->map[m]->segs[n].length; | 275 | entries_spanned_by_off is the number of times the loop below |
257 | n++; | 276 | would have executed. |
258 | if (n >= QIB_SEGSZ) { | 277 | */ |
259 | m++; | 278 | size_t entries_spanned_by_off; |
260 | n = 0; | 279 | |
280 | entries_spanned_by_off = off >> mr->page_shift; | ||
281 | off -= (entries_spanned_by_off << mr->page_shift); | ||
282 | m = entries_spanned_by_off/QIB_SEGSZ; | ||
283 | n = entries_spanned_by_off%QIB_SEGSZ; | ||
284 | } else { | ||
285 | m = 0; | ||
286 | n = 0; | ||
287 | while (off >= mr->map[m]->segs[n].length) { | ||
288 | off -= mr->map[m]->segs[n].length; | ||
289 | n++; | ||
290 | if (n >= QIB_SEGSZ) { | ||
291 | m++; | ||
292 | n = 0; | ||
293 | } | ||
261 | } | 294 | } |
262 | } | 295 | } |
263 | atomic_inc(&mr->refcount); | ||
264 | sge->mr = mr; | 296 | sge->mr = mr; |
265 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; | 297 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; |
266 | sge->length = mr->map[m]->segs[n].length - off; | 298 | sge->length = mr->map[m]->segs[n].length - off; |
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
268 | sge->m = m; | 300 | sge->m = m; |
269 | sge->n = n; | 301 | sge->n = n; |
270 | ok: | 302 | ok: |
271 | ret = 1; | 303 | return 1; |
272 | bail: | 304 | bail: |
273 | spin_unlock_irqrestore(&rkt->lock, flags); | 305 | spin_unlock_irqrestore(&rkt->lock, flags); |
274 | return ret; | 306 | return 0; |
275 | } | 307 | } |
276 | 308 | ||
277 | /* | 309 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 94b0d1f3a8f0..8fd3df5bf04d 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -464,8 +464,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
464 | memset(smp->data, 0, sizeof(smp->data)); | 464 | memset(smp->data, 0, sizeof(smp->data)); |
465 | 465 | ||
466 | /* Only return the mkey if the protection field allows it. */ | 466 | /* Only return the mkey if the protection field allows it. */ |
467 | if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey || | 467 | if (!(smp->method == IB_MGMT_METHOD_GET && |
468 | ibp->mkeyprot == 0) | 468 | ibp->mkey != smp->mkey && |
469 | ibp->mkeyprot == 1)) | ||
469 | pip->mkey = ibp->mkey; | 470 | pip->mkey = ibp->mkey; |
470 | pip->gid_prefix = ibp->gid_prefix; | 471 | pip->gid_prefix = ibp->gid_prefix; |
471 | lid = ppd->lid; | 472 | lid = ppd->lid; |
@@ -668,8 +669,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
668 | lid = be16_to_cpu(pip->lid); | 669 | lid = be16_to_cpu(pip->lid); |
669 | /* Must be a valid unicast LID address. */ | 670 | /* Must be a valid unicast LID address. */ |
670 | if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) | 671 | if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) |
671 | goto err; | 672 | smp->status |= IB_SMP_INVALID_FIELD; |
672 | if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { | 673 | else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { |
673 | if (ppd->lid != lid) | 674 | if (ppd->lid != lid) |
674 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); | 675 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); |
675 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) | 676 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) |
@@ -683,8 +684,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
683 | msl = pip->neighbormtu_mastersmsl & 0xF; | 684 | msl = pip->neighbormtu_mastersmsl & 0xF; |
684 | /* Must be a valid unicast LID address. */ | 685 | /* Must be a valid unicast LID address. */ |
685 | if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) | 686 | if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) |
686 | goto err; | 687 | smp->status |= IB_SMP_INVALID_FIELD; |
687 | if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { | 688 | else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { |
688 | spin_lock_irqsave(&ibp->lock, flags); | 689 | spin_lock_irqsave(&ibp->lock, flags); |
689 | if (ibp->sm_ah) { | 690 | if (ibp->sm_ah) { |
690 | if (smlid != ibp->sm_lid) | 691 | if (smlid != ibp->sm_lid) |
@@ -705,10 +706,11 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
705 | lwe = pip->link_width_enabled; | 706 | lwe = pip->link_width_enabled; |
706 | if (lwe) { | 707 | if (lwe) { |
707 | if (lwe == 0xFF) | 708 | if (lwe == 0xFF) |
708 | lwe = ppd->link_width_supported; | 709 | set_link_width_enabled(ppd, ppd->link_width_supported); |
709 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) | 710 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) |
710 | goto err; | 711 | smp->status |= IB_SMP_INVALID_FIELD; |
711 | set_link_width_enabled(ppd, lwe); | 712 | else if (lwe != ppd->link_width_enabled) |
713 | set_link_width_enabled(ppd, lwe); | ||
712 | } | 714 | } |
713 | 715 | ||
714 | lse = pip->linkspeedactive_enabled & 0xF; | 716 | lse = pip->linkspeedactive_enabled & 0xF; |
@@ -719,10 +721,12 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
719 | * speeds. | 721 | * speeds. |
720 | */ | 722 | */ |
721 | if (lse == 15) | 723 | if (lse == 15) |
722 | lse = ppd->link_speed_supported; | 724 | set_link_speed_enabled(ppd, |
725 | ppd->link_speed_supported); | ||
723 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) | 726 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) |
724 | goto err; | 727 | smp->status |= IB_SMP_INVALID_FIELD; |
725 | set_link_speed_enabled(ppd, lse); | 728 | else if (lse != ppd->link_speed_enabled) |
729 | set_link_speed_enabled(ppd, lse); | ||
726 | } | 730 | } |
727 | 731 | ||
728 | /* Set link down default state. */ | 732 | /* Set link down default state. */ |
@@ -738,7 +742,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
738 | IB_LINKINITCMD_POLL); | 742 | IB_LINKINITCMD_POLL); |
739 | break; | 743 | break; |
740 | default: | 744 | default: |
741 | goto err; | 745 | smp->status |= IB_SMP_INVALID_FIELD; |
742 | } | 746 | } |
743 | 747 | ||
744 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; | 748 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; |
@@ -748,15 +752,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
748 | 752 | ||
749 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); | 753 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); |
750 | if (mtu == -1) | 754 | if (mtu == -1) |
751 | goto err; | 755 | smp->status |= IB_SMP_INVALID_FIELD; |
752 | qib_set_mtu(ppd, mtu); | 756 | else |
757 | qib_set_mtu(ppd, mtu); | ||
753 | 758 | ||
754 | /* Set operational VLs */ | 759 | /* Set operational VLs */ |
755 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; | 760 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; |
756 | if (vls) { | 761 | if (vls) { |
757 | if (vls > ppd->vls_supported) | 762 | if (vls > ppd->vls_supported) |
758 | goto err; | 763 | smp->status |= IB_SMP_INVALID_FIELD; |
759 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | 764 | else |
765 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | ||
760 | } | 766 | } |
761 | 767 | ||
762 | if (pip->mkey_violations == 0) | 768 | if (pip->mkey_violations == 0) |
@@ -770,10 +776,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
770 | 776 | ||
771 | ore = pip->localphyerrors_overrunerrors; | 777 | ore = pip->localphyerrors_overrunerrors; |
772 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) | 778 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) |
773 | goto err; | 779 | smp->status |= IB_SMP_INVALID_FIELD; |
774 | 780 | ||
775 | if (set_overrunthreshold(ppd, (ore & 0xF))) | 781 | if (set_overrunthreshold(ppd, (ore & 0xF))) |
776 | goto err; | 782 | smp->status |= IB_SMP_INVALID_FIELD; |
777 | 783 | ||
778 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | 784 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
779 | 785 | ||
@@ -792,7 +798,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
792 | state = pip->linkspeed_portstate & 0xF; | 798 | state = pip->linkspeed_portstate & 0xF; |
793 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | 799 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; |
794 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | 800 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) |
795 | goto err; | 801 | smp->status |= IB_SMP_INVALID_FIELD; |
796 | 802 | ||
797 | /* | 803 | /* |
798 | * Only state changes of DOWN, ARM, and ACTIVE are valid | 804 | * Only state changes of DOWN, ARM, and ACTIVE are valid |
@@ -812,8 +818,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
812 | lstate = QIB_IB_LINKDOWN; | 818 | lstate = QIB_IB_LINKDOWN; |
813 | else if (lstate == 3) | 819 | else if (lstate == 3) |
814 | lstate = QIB_IB_LINKDOWN_DISABLE; | 820 | lstate = QIB_IB_LINKDOWN_DISABLE; |
815 | else | 821 | else { |
816 | goto err; | 822 | smp->status |= IB_SMP_INVALID_FIELD; |
823 | break; | ||
824 | } | ||
817 | spin_lock_irqsave(&ppd->lflags_lock, flags); | 825 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
818 | ppd->lflags &= ~QIBL_LINKV; | 826 | ppd->lflags &= ~QIBL_LINKV; |
819 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 827 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
@@ -835,8 +843,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
835 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); | 843 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); |
836 | break; | 844 | break; |
837 | default: | 845 | default: |
838 | /* XXX We have already partially updated our state! */ | 846 | smp->status |= IB_SMP_INVALID_FIELD; |
839 | goto err; | ||
840 | } | 847 | } |
841 | 848 | ||
842 | ret = subn_get_portinfo(smp, ibdev, port); | 849 | ret = subn_get_portinfo(smp, ibdev, port); |
@@ -844,7 +851,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
844 | if (clientrereg) | 851 | if (clientrereg) |
845 | pip->clientrereg_resv_subnetto |= 0x80; | 852 | pip->clientrereg_resv_subnetto |= 0x80; |
846 | 853 | ||
847 | goto done; | 854 | goto get_only; |
848 | 855 | ||
849 | err: | 856 | err: |
850 | smp->status |= IB_SMP_INVALID_FIELD; | 857 | smp->status |= IB_SMP_INVALID_FIELD; |
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h index 147aff9117d7..7840ab593bcf 100644 --- a/drivers/infiniband/hw/qib/qib_mad.h +++ b/drivers/infiniband/hw/qib/qib_mad.h | |||
@@ -73,7 +73,7 @@ struct ib_mad_notice_attr { | |||
73 | 73 | ||
74 | struct { | 74 | struct { |
75 | __be16 reserved; | 75 | __be16 reserved; |
76 | __be16 lid; /* LID where change occured */ | 76 | __be16 lid; /* LID where change occurred */ |
77 | u8 reserved2; | 77 | u8 reserved2; |
78 | u8 local_changes; /* low bit - local changes */ | 78 | u8 local_changes; /* low bit - local changes */ |
79 | __be32 new_cap_mask; /* new capability mask */ | 79 | __be32 new_cap_mask; /* new capability mask */ |
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 5f95f0f6385d..08944e2ee334 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
@@ -39,7 +39,6 @@ | |||
39 | /* Fast memory region */ | 39 | /* Fast memory region */ |
40 | struct qib_fmr { | 40 | struct qib_fmr { |
41 | struct ib_fmr ibfmr; | 41 | struct ib_fmr ibfmr; |
42 | u8 page_shift; | ||
43 | struct qib_mregion mr; /* must be last */ | 42 | struct qib_mregion mr; /* must be last */ |
44 | }; | 43 | }; |
45 | 44 | ||
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) | |||
107 | goto bail; | 106 | goto bail; |
108 | } | 107 | } |
109 | mr->mr.mapsz = m; | 108 | mr->mr.mapsz = m; |
109 | mr->mr.page_shift = 0; | ||
110 | mr->mr.max_segs = count; | 110 | mr->mr.max_segs = count; |
111 | 111 | ||
112 | /* | 112 | /* |
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
231 | mr->mr.access_flags = mr_access_flags; | 231 | mr->mr.access_flags = mr_access_flags; |
232 | mr->umem = umem; | 232 | mr->umem = umem; |
233 | 233 | ||
234 | if (is_power_of_2(umem->page_size)) | ||
235 | mr->mr.page_shift = ilog2(umem->page_size); | ||
234 | m = 0; | 236 | m = 0; |
235 | n = 0; | 237 | n = 0; |
236 | list_for_each_entry(chunk, &umem->chunk_list, list) { | 238 | list_for_each_entry(chunk, &umem->chunk_list, list) { |
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
390 | fmr->mr.offset = 0; | 392 | fmr->mr.offset = 0; |
391 | fmr->mr.access_flags = mr_access_flags; | 393 | fmr->mr.access_flags = mr_access_flags; |
392 | fmr->mr.max_segs = fmr_attr->max_pages; | 394 | fmr->mr.max_segs = fmr_attr->max_pages; |
393 | fmr->page_shift = fmr_attr->page_shift; | 395 | fmr->mr.page_shift = fmr_attr->page_shift; |
394 | 396 | ||
395 | atomic_set(&fmr->mr.refcount, 0); | 397 | atomic_set(&fmr->mr.refcount, 0); |
396 | ret = &fmr->ibfmr; | 398 | ret = &fmr->ibfmr; |
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
437 | spin_lock_irqsave(&rkt->lock, flags); | 439 | spin_lock_irqsave(&rkt->lock, flags); |
438 | fmr->mr.user_base = iova; | 440 | fmr->mr.user_base = iova; |
439 | fmr->mr.iova = iova; | 441 | fmr->mr.iova = iova; |
440 | ps = 1 << fmr->page_shift; | 442 | ps = 1 << fmr->mr.page_shift; |
441 | fmr->mr.length = list_len * ps; | 443 | fmr->mr.length = list_len * ps; |
442 | m = 0; | 444 | m = 0; |
443 | n = 0; | 445 | n = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 7fa6e5592630..891cc2ff5f00 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -103,16 +103,20 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
104 | } else | 104 | } else |
105 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 105 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
106 | if (ret) | 106 | if (ret) { |
107 | qib_early_err(&pdev->dev, | 107 | qib_early_err(&pdev->dev, |
108 | "Unable to set DMA consistent mask: %d\n", ret); | 108 | "Unable to set DMA consistent mask: %d\n", ret); |
109 | goto bail; | ||
110 | } | ||
109 | 111 | ||
110 | pci_set_master(pdev); | 112 | pci_set_master(pdev); |
111 | ret = pci_enable_pcie_error_reporting(pdev); | 113 | ret = pci_enable_pcie_error_reporting(pdev); |
112 | if (ret) | 114 | if (ret) { |
113 | qib_early_err(&pdev->dev, | 115 | qib_early_err(&pdev->dev, |
114 | "Unable to enable pcie error reporting: %d\n", | 116 | "Unable to enable pcie error reporting: %d\n", |
115 | ret); | 117 | ret); |
118 | ret = 0; | ||
119 | } | ||
116 | goto done; | 120 | goto done; |
117 | 121 | ||
118 | bail: | 122 | bail: |
@@ -522,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | |||
522 | */ | 526 | */ |
523 | devid = parent->device; | 527 | devid = parent->device; |
524 | if (devid >= 0x25e2 && devid <= 0x25fa) { | 528 | if (devid >= 0x25e2 && devid <= 0x25fa) { |
525 | u8 rev; | ||
526 | |||
527 | /* 5000 P/V/X/Z */ | 529 | /* 5000 P/V/X/Z */ |
528 | pci_read_config_byte(parent, PCI_REVISION_ID, &rev); | 530 | if (parent->revision <= 0xb2) |
529 | if (rev <= 0xb2) | ||
530 | bits = 1U << 10; | 531 | bits = 1U << 10; |
531 | else | 532 | else |
532 | bits = 7U << 10; | 533 | bits = 7U << 10; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6c39851d2ded..e16751f8639e 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt, | |||
48 | 48 | ||
49 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, | 49 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, |
50 | struct qpn_map *map, unsigned off, | 50 | struct qpn_map *map, unsigned off, |
51 | unsigned r) | 51 | unsigned n) |
52 | { | 52 | { |
53 | if (qpt->mask) { | 53 | if (qpt->mask) { |
54 | off++; | 54 | off++; |
55 | if ((off & qpt->mask) >> 1 != r) | 55 | if (((off & qpt->mask) >> 1) >= n) |
56 | off = ((off & qpt->mask) ? | 56 | off = (off | qpt->mask) + 2; |
57 | (off | qpt->mask) + 1 : off) | (r << 1); | ||
58 | } else | 57 | } else |
59 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); | 58 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); |
60 | return off; | 59 | return off; |
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
123 | u32 i, offset, max_scan, qpn; | 122 | u32 i, offset, max_scan, qpn; |
124 | struct qpn_map *map; | 123 | struct qpn_map *map; |
125 | u32 ret; | 124 | u32 ret; |
126 | int r; | ||
127 | 125 | ||
128 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { | 126 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { |
129 | unsigned n; | 127 | unsigned n; |
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
139 | goto bail; | 137 | goto bail; |
140 | } | 138 | } |
141 | 139 | ||
142 | r = smp_processor_id(); | 140 | qpn = qpt->last + 2; |
143 | if (r >= dd->n_krcv_queues) | ||
144 | r %= dd->n_krcv_queues; | ||
145 | qpn = qpt->last + 1; | ||
146 | if (qpn >= QPN_MAX) | 141 | if (qpn >= QPN_MAX) |
147 | qpn = 2; | 142 | qpn = 2; |
148 | if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) | 143 | if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) |
149 | qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | | 144 | qpn = (qpn | qpt->mask) + 2; |
150 | (r << 1); | ||
151 | offset = qpn & BITS_PER_PAGE_MASK; | 145 | offset = qpn & BITS_PER_PAGE_MASK; |
152 | map = &qpt->map[qpn / BITS_PER_PAGE]; | 146 | map = &qpt->map[qpn / BITS_PER_PAGE]; |
153 | max_scan = qpt->nmaps - !offset; | 147 | max_scan = qpt->nmaps - !offset; |
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
163 | ret = qpn; | 157 | ret = qpn; |
164 | goto bail; | 158 | goto bail; |
165 | } | 159 | } |
166 | offset = find_next_offset(qpt, map, offset, r); | 160 | offset = find_next_offset(qpt, map, offset, |
161 | dd->n_krcv_queues); | ||
167 | qpn = mk_qpn(qpt, map, offset); | 162 | qpn = mk_qpn(qpt, map, offset); |
168 | /* | 163 | /* |
169 | * This test differs from alloc_pidmap(). | 164 | * This test differs from alloc_pidmap(). |
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
183 | if (qpt->nmaps == QPNMAP_ENTRIES) | 178 | if (qpt->nmaps == QPNMAP_ENTRIES) |
184 | break; | 179 | break; |
185 | map = &qpt->map[qpt->nmaps++]; | 180 | map = &qpt->map[qpt->nmaps++]; |
186 | offset = qpt->mask ? (r << 1) : 0; | 181 | offset = 0; |
187 | } else if (map < &qpt->map[qpt->nmaps]) { | 182 | } else if (map < &qpt->map[qpt->nmaps]) { |
188 | ++map; | 183 | ++map; |
189 | offset = qpt->mask ? (r << 1) : 0; | 184 | offset = 0; |
190 | } else { | 185 | } else { |
191 | map = &qpt->map[0]; | 186 | map = &qpt->map[0]; |
192 | offset = qpt->mask ? (r << 1) : 2; | 187 | offset = 2; |
193 | } | 188 | } |
194 | qpn = mk_qpn(qpt, map, offset); | 189 | qpn = mk_qpn(qpt, map, offset); |
195 | } | 190 | } |
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | |||
468 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | 463 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); |
469 | del_timer(&qp->s_timer); | 464 | del_timer(&qp->s_timer); |
470 | } | 465 | } |
466 | |||
467 | if (qp->s_flags & QIB_S_ANY_WAIT_SEND) | ||
468 | qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; | ||
469 | |||
471 | spin_lock(&dev->pending_lock); | 470 | spin_lock(&dev->pending_lock); |
472 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { | 471 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { |
473 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; | 472 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; |
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
1061 | } | 1060 | } |
1062 | qp->ibqp.qp_num = err; | 1061 | qp->ibqp.qp_num = err; |
1063 | qp->port_num = init_attr->port_num; | 1062 | qp->port_num = init_attr->port_num; |
1064 | qp->processor_id = smp_processor_id(); | ||
1065 | qib_reset_qp(qp, init_attr->qp_type); | 1063 | qib_reset_qp(qp, init_attr->qp_type); |
1066 | break; | 1064 | break; |
1067 | 1065 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 35b3604b691d..3374a52232c1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
485 | goto bail; | 485 | goto bail; |
486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | 486 | /* We see a module, but it may be unwise to look yet. Just schedule */ |
487 | qd->t_insert = get_jiffies_64(); | 487 | qd->t_insert = get_jiffies_64(); |
488 | schedule_work(&qd->work); | 488 | queue_work(ib_wq, &qd->work); |
489 | bail: | 489 | bail: |
490 | return; | 490 | return; |
491 | } | 491 | } |
@@ -493,10 +493,9 @@ bail: | |||
493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) | 493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) |
494 | { | 494 | { |
495 | /* | 495 | /* |
496 | * There is nothing to do here for now. our | 496 | * There is nothing to do here for now. our work is scheduled |
497 | * work is scheduled with schedule_work(), and | 497 | * with queue_work(), and flush_workqueue() from remove_one |
498 | * flush_scheduled_work() from remove_one will | 498 | * will block until all work setup with queue_work() |
499 | * block until all work ssetup with schedule_work() | ||
500 | * completes. | 499 | * completes. |
501 | */ | 500 | */ |
502 | } | 501 | } |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h index 19b527bafd57..c109bbdc90ac 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.h +++ b/drivers/infiniband/hw/qib/qib_qsfp.h | |||
@@ -79,6 +79,8 @@ | |||
79 | extern const char *const qib_qsfp_devtech[16]; | 79 | extern const char *const qib_qsfp_devtech[16]; |
80 | /* Active Equalization includes fiber, copper full EQ, and copper near Eq */ | 80 | /* Active Equalization includes fiber, copper full EQ, and copper near Eq */ |
81 | #define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1) | 81 | #define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1) |
82 | /* Active Equalization includes fiber, copper full EQ, and copper far Eq */ | ||
83 | #define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1) | ||
82 | /* Attenuation should be valid for copper other than full/near Eq */ | 84 | /* Attenuation should be valid for copper other than full/near Eq */ |
83 | #define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1) | 85 | #define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1) |
84 | /* Length is only valid if technology is "copper" */ | 86 | /* Length is only valid if technology is "copper" */ |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index a0931119bd78..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1005 | * there are still requests that haven't been acked. | 1005 | * there are still requests that haven't been acked. |
1006 | */ | 1006 | */ |
1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | 1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && |
1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | 1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && |
1009 | (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1009 | start_timer(qp); | 1010 | start_timer(qp); |
1010 | 1011 | ||
1011 | while (qp->s_last != qp->s_acked) { | 1012 | while (qp->s_last != qp->s_acked) { |
@@ -1407,6 +1408,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1407 | struct qib_ctxtdata *rcd) | 1408 | struct qib_ctxtdata *rcd) |
1408 | { | 1409 | { |
1409 | struct qib_swqe *wqe; | 1410 | struct qib_swqe *wqe; |
1411 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1410 | enum ib_wc_status status; | 1412 | enum ib_wc_status status; |
1411 | unsigned long flags; | 1413 | unsigned long flags; |
1412 | int diff; | 1414 | int diff; |
@@ -1414,7 +1416,32 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1414 | u32 aeth; | 1416 | u32 aeth; |
1415 | u64 val; | 1417 | u64 val; |
1416 | 1418 | ||
1419 | if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) { | ||
1420 | /* | ||
1421 | * If ACK'd PSN on SDMA busy list try to make progress to | ||
1422 | * reclaim SDMA credits. | ||
1423 | */ | ||
1424 | if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && | ||
1425 | (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { | ||
1426 | |||
1427 | /* | ||
1428 | * If send tasklet not running attempt to progress | ||
1429 | * SDMA queue. | ||
1430 | */ | ||
1431 | if (!(qp->s_flags & QIB_S_BUSY)) { | ||
1432 | /* Acquire SDMA Lock */ | ||
1433 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
1434 | /* Invoke sdma make progress */ | ||
1435 | qib_sdma_make_progress(ppd); | ||
1436 | /* Release SDMA Lock */ | ||
1437 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
1438 | } | ||
1439 | } | ||
1440 | } | ||
1441 | |||
1417 | spin_lock_irqsave(&qp->s_lock, flags); | 1442 | spin_lock_irqsave(&qp->s_lock, flags); |
1443 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1444 | goto ack_done; | ||
1418 | 1445 | ||
1419 | /* Ignore invalid responses. */ | 1446 | /* Ignore invalid responses. */ |
1420 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1447 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
@@ -2068,7 +2095,10 @@ send_last: | |||
2068 | goto nack_op_err; | 2095 | goto nack_op_err; |
2069 | if (!ret) | 2096 | if (!ret) |
2070 | goto rnr_nak; | 2097 | goto rnr_nak; |
2071 | goto send_last_imm; | 2098 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
2099 | hdrsize += 4; | ||
2100 | wc.wc_flags = IB_WC_WITH_IMM; | ||
2101 | goto send_last; | ||
2072 | 2102 | ||
2073 | case OP(RDMA_READ_REQUEST): { | 2103 | case OP(RDMA_READ_REQUEST): { |
2074 | struct qib_ack_entry *e; | 2104 | struct qib_ack_entry *e; |
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c index 6f31ca5039db..ddde72e11edb 100644 --- a/drivers/infiniband/hw/qib/qib_twsi.c +++ b/drivers/infiniband/hw/qib/qib_twsi.c | |||
@@ -41,7 +41,7 @@ | |||
41 | * QLogic_IB "Two Wire Serial Interface" driver. | 41 | * QLogic_IB "Two Wire Serial Interface" driver. |
42 | * Originally written for a not-quite-i2c serial eeprom, which is | 42 | * Originally written for a not-quite-i2c serial eeprom, which is |
43 | * still used on some supported boards. Later boards have added a | 43 | * still used on some supported boards. Later boards have added a |
44 | * variety of other uses, most board-specific, so teh bit-boffing | 44 | * variety of other uses, most board-specific, so the bit-boffing |
45 | * part has been split off to this file, while the other parts | 45 | * part has been split off to this file, while the other parts |
46 | * have been moved to chip-specific files. | 46 | * have been moved to chip-specific files. |
47 | * | 47 | * |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index b9c8b6346c1b..32ccf3c824ca 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -457,8 +457,10 @@ rdma_first: | |||
457 | } | 457 | } |
458 | if (opcode == OP(RDMA_WRITE_ONLY)) | 458 | if (opcode == OP(RDMA_WRITE_ONLY)) |
459 | goto rdma_last; | 459 | goto rdma_last; |
460 | else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | 460 | else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { |
461 | wc.ex.imm_data = ohdr->u.rc.imm_data; | ||
461 | goto rdma_last_imm; | 462 | goto rdma_last_imm; |
463 | } | ||
462 | /* FALLTHROUGH */ | 464 | /* FALLTHROUGH */ |
463 | case OP(RDMA_WRITE_MIDDLE): | 465 | case OP(RDMA_WRITE_MIDDLE): |
464 | /* Check for invalid length PMTU or posted rwqe len. */ | 466 | /* Check for invalid length PMTU or posted rwqe len. */ |
@@ -471,8 +473,8 @@ rdma_first: | |||
471 | break; | 473 | break; |
472 | 474 | ||
473 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): | 475 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): |
474 | rdma_last_imm: | ||
475 | wc.ex.imm_data = ohdr->u.imm_data; | 476 | wc.ex.imm_data = ohdr->u.imm_data; |
477 | rdma_last_imm: | ||
476 | hdrsize += 4; | 478 | hdrsize += 4; |
477 | wc.wc_flags = IB_WC_WITH_IMM; | 479 | wc.wc_flags = IB_WC_WITH_IMM; |
478 | 480 | ||
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index e1b3da2a1f85..828609fa4d28 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -116,7 +116,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * A GRH is expected to preceed the data even if not | 119 | * A GRH is expected to precede the data even if not |
120 | * present on the wire. | 120 | * present on the wire. |
121 | */ | 121 | */ |
122 | length = swqe->length; | 122 | length = swqe->length; |
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
445 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | 445 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); |
446 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; | 446 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; |
447 | 447 | ||
448 | /* Get the number of bytes the message was padded by. */ | 448 | /* |
449 | * Get the number of bytes the message was padded by | ||
450 | * and drop incomplete packets. | ||
451 | */ | ||
449 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 452 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
450 | if (unlikely(tlen < (hdrsize + pad + 4))) { | 453 | if (unlikely(tlen < (hdrsize + pad + 4))) |
451 | /* Drop incomplete packets. */ | 454 | goto drop; |
452 | ibp->n_pkt_drops++; | 455 | |
453 | goto bail; | ||
454 | } | ||
455 | tlen -= hdrsize + pad + 4; | 456 | tlen -= hdrsize + pad + 4; |
456 | 457 | ||
457 | /* | 458 | /* |
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
460 | */ | 461 | */ |
461 | if (qp->ibqp.qp_num) { | 462 | if (qp->ibqp.qp_num) { |
462 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || | 463 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || |
463 | hdr->lrh[3] == IB_LID_PERMISSIVE)) { | 464 | hdr->lrh[3] == IB_LID_PERMISSIVE)) |
464 | ibp->n_pkt_drops++; | 465 | goto drop; |
465 | goto bail; | ||
466 | } | ||
467 | if (qp->ibqp.qp_num > 1) { | 466 | if (qp->ibqp.qp_num > 1) { |
468 | u16 pkey1, pkey2; | 467 | u16 pkey1, pkey2; |
469 | 468 | ||
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
476 | 0xF, | 475 | 0xF, |
477 | src_qp, qp->ibqp.qp_num, | 476 | src_qp, qp->ibqp.qp_num, |
478 | hdr->lrh[3], hdr->lrh[1]); | 477 | hdr->lrh[3], hdr->lrh[1]); |
479 | goto bail; | 478 | return; |
480 | } | 479 | } |
481 | } | 480 | } |
482 | if (unlikely(qkey != qp->qkey)) { | 481 | if (unlikely(qkey != qp->qkey)) { |
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
484 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, | 483 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
485 | src_qp, qp->ibqp.qp_num, | 484 | src_qp, qp->ibqp.qp_num, |
486 | hdr->lrh[3], hdr->lrh[1]); | 485 | hdr->lrh[3], hdr->lrh[1]); |
487 | goto bail; | 486 | return; |
488 | } | 487 | } |
489 | /* Drop invalid MAD packets (see 13.5.3.1). */ | 488 | /* Drop invalid MAD packets (see 13.5.3.1). */ |
490 | if (unlikely(qp->ibqp.qp_num == 1 && | 489 | if (unlikely(qp->ibqp.qp_num == 1 && |
491 | (tlen != 256 || | 490 | (tlen != 256 || |
492 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { | 491 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) |
493 | ibp->n_pkt_drops++; | 492 | goto drop; |
494 | goto bail; | ||
495 | } | ||
496 | } else { | 493 | } else { |
497 | struct ib_smp *smp; | 494 | struct ib_smp *smp; |
498 | 495 | ||
499 | /* Drop invalid MAD packets (see 13.5.3.1). */ | 496 | /* Drop invalid MAD packets (see 13.5.3.1). */ |
500 | if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { | 497 | if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) |
501 | ibp->n_pkt_drops++; | 498 | goto drop; |
502 | goto bail; | ||
503 | } | ||
504 | smp = (struct ib_smp *) data; | 499 | smp = (struct ib_smp *) data; |
505 | if ((hdr->lrh[1] == IB_LID_PERMISSIVE || | 500 | if ((hdr->lrh[1] == IB_LID_PERMISSIVE || |
506 | hdr->lrh[3] == IB_LID_PERMISSIVE) && | 501 | hdr->lrh[3] == IB_LID_PERMISSIVE) && |
507 | smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 502 | smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
508 | ibp->n_pkt_drops++; | 503 | goto drop; |
509 | goto bail; | ||
510 | } | ||
511 | } | 504 | } |
512 | 505 | ||
513 | /* | 506 | /* |
@@ -519,17 +512,15 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
519 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | 512 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { |
520 | wc.ex.imm_data = ohdr->u.ud.imm_data; | 513 | wc.ex.imm_data = ohdr->u.ud.imm_data; |
521 | wc.wc_flags = IB_WC_WITH_IMM; | 514 | wc.wc_flags = IB_WC_WITH_IMM; |
522 | hdrsize += sizeof(u32); | 515 | tlen -= sizeof(u32); |
523 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | 516 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { |
524 | wc.ex.imm_data = 0; | 517 | wc.ex.imm_data = 0; |
525 | wc.wc_flags = 0; | 518 | wc.wc_flags = 0; |
526 | } else { | 519 | } else |
527 | ibp->n_pkt_drops++; | 520 | goto drop; |
528 | goto bail; | ||
529 | } | ||
530 | 521 | ||
531 | /* | 522 | /* |
532 | * A GRH is expected to preceed the data even if not | 523 | * A GRH is expected to precede the data even if not |
533 | * present on the wire. | 524 | * present on the wire. |
534 | */ | 525 | */ |
535 | wc.byte_len = tlen + sizeof(struct ib_grh); | 526 | wc.byte_len = tlen + sizeof(struct ib_grh); |
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
556 | /* Silently drop packets which are too big. */ | 547 | /* Silently drop packets which are too big. */ |
557 | if (unlikely(wc.byte_len > qp->r_len)) { | 548 | if (unlikely(wc.byte_len > qp->r_len)) { |
558 | qp->r_flags |= QIB_R_REUSE_SGE; | 549 | qp->r_flags |= QIB_R_REUSE_SGE; |
559 | ibp->n_pkt_drops++; | 550 | goto drop; |
560 | return; | ||
561 | } | 551 | } |
562 | if (has_grh) { | 552 | if (has_grh) { |
563 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 553 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
594 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 584 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
595 | (ohdr->bth[0] & | 585 | (ohdr->bth[0] & |
596 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | 586 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); |
597 | bail:; | 587 | return; |
588 | |||
589 | drop: | ||
590 | ibp->n_pkt_drops++; | ||
598 | } | 591 | } |
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index d7a26c1d4f37..7689e49c13c9 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
@@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages, | |||
51 | /* | 51 | /* |
52 | * Call with current->mm->mmap_sem held. | 52 | * Call with current->mm->mmap_sem held. |
53 | */ | 53 | */ |
54 | static int __get_user_pages(unsigned long start_page, size_t num_pages, | 54 | static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, |
55 | struct page **p, struct vm_area_struct **vma) | 55 | struct page **p, struct vm_area_struct **vma) |
56 | { | 56 | { |
57 | unsigned long lock_limit; | 57 | unsigned long lock_limit; |
58 | size_t got; | 58 | size_t got; |
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, | |||
136 | 136 | ||
137 | down_write(¤t->mm->mmap_sem); | 137 | down_write(¤t->mm->mmap_sem); |
138 | 138 | ||
139 | ret = __get_user_pages(start_page, num_pages, p, NULL); | 139 | ret = __qib_get_user_pages(start_page, num_pages, p, NULL); |
140 | 140 | ||
141 | up_write(¤t->mm->mmap_sem); | 141 | up_write(¤t->mm->mmap_sem); |
142 | 142 | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 4c19e06b5e85..82442085cbe6 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c | |||
@@ -239,7 +239,7 @@ static int qib_user_sdma_num_pages(const struct iovec *iov) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * Truncate length to page boundry. | 242 | * Truncate length to page boundary. |
243 | */ | 243 | */ |
244 | static int qib_user_sdma_page_length(unsigned long addr, unsigned long len) | 244 | static int qib_user_sdma_page_length(unsigned long addr, unsigned long len) |
245 | { | 245 | { |
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev, | |||
382 | 382 | ||
383 | kmem_cache_free(pq->pkt_slab, pkt); | 383 | kmem_cache_free(pq->pkt_slab, pkt); |
384 | } | 384 | } |
385 | INIT_LIST_HEAD(list); | ||
385 | } | 386 | } |
386 | 387 | ||
387 | /* | 388 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index bd57c1273225..95e5b47223b3 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -301,6 +301,7 @@ struct qib_mregion { | |||
301 | int access_flags; | 301 | int access_flags; |
302 | u32 max_segs; /* number of qib_segs in all the arrays */ | 302 | u32 max_segs; /* number of qib_segs in all the arrays */ |
303 | u32 mapsz; /* size of the map array */ | 303 | u32 mapsz; /* size of the map array */ |
304 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ | ||
304 | atomic_t refcount; | 305 | atomic_t refcount; |
305 | struct qib_segarray *map[0]; /* the segments */ | 306 | struct qib_segarray *map[0]; /* the segments */ |
306 | }; | 307 | }; |
@@ -435,7 +436,6 @@ struct qib_qp { | |||
435 | spinlock_t r_lock; /* used for APM */ | 436 | spinlock_t r_lock; /* used for APM */ |
436 | spinlock_t s_lock; | 437 | spinlock_t s_lock; |
437 | atomic_t s_dma_busy; | 438 | atomic_t s_dma_busy; |
438 | unsigned processor_id; /* Processor ID QP is bound to */ | ||
439 | u32 s_flags; | 439 | u32 s_flags; |
440 | u32 s_cur_size; /* size of send packet in bytes */ | 440 | u32 s_cur_size; /* size of send packet in bytes */ |
441 | u32 s_len; /* total length of s_sge */ | 441 | u32 s_len; /* total length of s_sge */ |
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp) | |||
805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | 805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); |
806 | } | 806 | } |
807 | 807 | ||
808 | extern struct workqueue_struct *qib_wq; | ||
809 | extern struct workqueue_struct *qib_cq_wq; | 808 | extern struct workqueue_struct *qib_cq_wq; |
810 | 809 | ||
811 | /* | 810 | /* |
@@ -813,13 +812,8 @@ extern struct workqueue_struct *qib_cq_wq; | |||
813 | */ | 812 | */ |
814 | static inline void qib_schedule_send(struct qib_qp *qp) | 813 | static inline void qib_schedule_send(struct qib_qp *qp) |
815 | { | 814 | { |
816 | if (qib_send_ok(qp)) { | 815 | if (qib_send_ok(qp)) |
817 | if (qp->processor_id == smp_processor_id()) | 816 | queue_work(ib_wq, &qp->s_work); |
818 | queue_work(qib_wq, &qp->s_work); | ||
819 | else | ||
820 | queue_work_on(qp->processor_id, | ||
821 | qib_wq, &qp->s_work); | ||
822 | } | ||
823 | } | 817 | } |
824 | 818 | ||
825 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 819 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |