aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2013-05-30 18:25:25 -0400
committerRoland Dreier <roland@purestorage.com>2013-06-21 20:19:38 -0400
commit8469ba39a6b77917e8879680aed17229bf72f263 (patch)
treef064dfaf3387f3ed5d59d69941b90825fe628729
parentfedaf4ffc224a194e2d13a3ec2abe5df0bc94258 (diff)
IB/qib: Add DCA support
This patch adds DCA cache warming for systems that support DCA. The code uses cpu affinity notification to react to an affinity change from a user mode program like irqbalance and (re-)program the chip accordingly. This notification avoids reading the current cpu on every interrupt. Reviewed-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> [ Add Kconfig dependency on SMP && GENERIC_HARDIRQS to avoid failure to build due to undefined struct irq_affinity_notify. - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/qib/Kconfig8
-rw-r--r--drivers/infiniband/hw/qib/qib.h13
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c334
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c41
6 files changed, 404 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 1e603a375069..d03ca4c1ff25 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -5,3 +5,11 @@ config INFINIBAND_QIB
5 This is a low-level driver for Intel PCIe QLE InfiniBand host 5 This is a low-level driver for Intel PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the Intel 6 channel adapters. This driver does not support the Intel
7 HyperTransport card (model QHT7140). 7 HyperTransport card (model QHT7140).
8
9config INFINIBAND_QIB_DCA
10 bool "QIB DCA support"
11 depends on INFINIBAND_QIB && DCA && SMP && GENERIC_HARDIRQS && !(INFINIBAND_QIB=y && DCA=m)
12 default y
13 ---help---
14 Setting this enables DCA support on some Intel chip sets
15 with the iba7322 HCA.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 4d11575c2010..cecbd43f9212 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -428,9 +428,19 @@ struct qib_verbs_txreq {
428#define ACTIVITY_TIMER 5 428#define ACTIVITY_TIMER 5
429 429
430#define MAX_NAME_SIZE 64 430#define MAX_NAME_SIZE 64
431
432#ifdef CONFIG_INFINIBAND_QIB_DCA
433struct qib_irq_notify;
434#endif
435
431struct qib_msix_entry { 436struct qib_msix_entry {
432 struct msix_entry msix; 437 struct msix_entry msix;
433 void *arg; 438 void *arg;
439#ifdef CONFIG_INFINIBAND_QIB_DCA
440 int dca;
441 int rcv;
442 struct qib_irq_notify *notifier;
443#endif
434 char name[MAX_NAME_SIZE]; 444 char name[MAX_NAME_SIZE];
435 cpumask_var_t mask; 445 cpumask_var_t mask;
436}; 446};
@@ -828,6 +838,9 @@ struct qib_devdata {
828 struct qib_ctxtdata *); 838 struct qib_ctxtdata *);
829 void (*f_writescratch)(struct qib_devdata *, u32); 839 void (*f_writescratch)(struct qib_devdata *, u32);
830 int (*f_tempsense_rd)(struct qib_devdata *, int regnum); 840 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
841#ifdef CONFIG_INFINIBAND_QIB_DCA
842 int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
843#endif
831 844
832 char *boardname; /* human readable board info */ 845 char *boardname; /* human readable board info */
833 846
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 0232ae56b1fa..84e593d6007b 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3464,6 +3464,13 @@ static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3464 return -ENXIO; 3464 return -ENXIO;
3465} 3465}
3466 3466
3467#ifdef CONFIG_INFINIBAND_QIB_DCA
3468static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
3469{
3470 return 0;
3471}
3472#endif
3473
3467/* Dummy function, as 6120 boards never disable EEPROM Write */ 3474/* Dummy function, as 6120 boards never disable EEPROM Write */
3468static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen) 3475static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
3469{ 3476{
@@ -3539,6 +3546,9 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3539 dd->f_xgxs_reset = qib_6120_xgxs_reset; 3546 dd->f_xgxs_reset = qib_6120_xgxs_reset;
3540 dd->f_writescratch = writescratch; 3547 dd->f_writescratch = writescratch;
3541 dd->f_tempsense_rd = qib_6120_tempsense_rd; 3548 dd->f_tempsense_rd = qib_6120_tempsense_rd;
3549#ifdef CONFIG_INFINIBAND_QIB_DCA
3550 dd->f_notify_dca = qib_6120_notify_dca;
3551#endif
3542 /* 3552 /*
3543 * Do remaining pcie setup and save pcie values in dd. 3553 * Do remaining pcie setup and save pcie values in dd.
3544 * Any error printing is already done by the init code. 3554 * Any error printing is already done by the init code.
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 64d0ecb90cdc..454c2e7668fe 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4513,6 +4513,13 @@ bail:
4513 return ret; 4513 return ret;
4514} 4514}
4515 4515
4516#ifdef CONFIG_INFINIBAND_QIB_DCA
4517static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
4518{
4519 return 0;
4520}
4521#endif
4522
4516/* Dummy function, as 7220 boards never disable EEPROM Write */ 4523/* Dummy function, as 7220 boards never disable EEPROM Write */
4517static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) 4524static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4518{ 4525{
@@ -4587,6 +4594,9 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4587 dd->f_xgxs_reset = qib_7220_xgxs_reset; 4594 dd->f_xgxs_reset = qib_7220_xgxs_reset;
4588 dd->f_writescratch = writescratch; 4595 dd->f_writescratch = writescratch;
4589 dd->f_tempsense_rd = qib_7220_tempsense_rd; 4596 dd->f_tempsense_rd = qib_7220_tempsense_rd;
4597#ifdef CONFIG_INFINIBAND_QIB_DCA
4598 dd->f_notify_dca = qib_7220_notify_dca;
4599#endif
4590 /* 4600 /*
4591 * Do remaining pcie setup and save pcie values in dd. 4601 * Do remaining pcie setup and save pcie values in dd.
4592 * Any error printing is already done by the init code. 4602 * Any error printing is already done by the init code.
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 3f6b21e9dc11..46ffea033be0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -44,6 +44,9 @@
44#include <linux/module.h> 44#include <linux/module.h>
45#include <rdma/ib_verbs.h> 45#include <rdma/ib_verbs.h>
46#include <rdma/ib_smi.h> 46#include <rdma/ib_smi.h>
47#ifdef CONFIG_INFINIBAND_QIB_DCA
48#include <linux/dca.h>
49#endif
47 50
48#include "qib.h" 51#include "qib.h"
49#include "qib_7322_regs.h" 52#include "qib_7322_regs.h"
@@ -519,6 +522,14 @@ static const u8 qib_7322_physportstate[0x20] = {
519 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN 522 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
520}; 523};
521 524
525#ifdef CONFIG_INFINIBAND_QIB_DCA
526struct qib_irq_notify {
527 int rcv;
528 void *arg;
529 struct irq_affinity_notify notify;
530};
531#endif
532
522struct qib_chip_specific { 533struct qib_chip_specific {
523 u64 __iomem *cregbase; 534 u64 __iomem *cregbase;
524 u64 *cntrs; 535 u64 *cntrs;
@@ -546,6 +557,12 @@ struct qib_chip_specific {
546 u32 lastbuf_for_pio; 557 u32 lastbuf_for_pio;
547 u32 stay_in_freeze; 558 u32 stay_in_freeze;
548 u32 recovery_ports_initted; 559 u32 recovery_ports_initted;
560#ifdef CONFIG_INFINIBAND_QIB_DCA
561 u32 dca_ctrl;
562 int rhdr_cpu[18];
563 int sdma_cpu[2];
564 u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
565#endif
549 struct qib_msix_entry *msix_entries; 566 struct qib_msix_entry *msix_entries;
550 unsigned long *sendchkenable; 567 unsigned long *sendchkenable;
551 unsigned long *sendgrhchk; 568 unsigned long *sendgrhchk;
@@ -642,28 +659,76 @@ static struct {
642 irq_handler_t handler; 659 irq_handler_t handler;
643 int lsb; 660 int lsb;
644 int port; /* 0 if not port-specific, else port # */ 661 int port; /* 0 if not port-specific, else port # */
662 int dca;
645} irq_table[] = { 663} irq_table[] = {
646 { "", qib_7322intr, -1, 0 }, 664 { "", qib_7322intr, -1, 0, 0 },
647 { " (buf avail)", qib_7322bufavail, 665 { " (buf avail)", qib_7322bufavail,
648 SYM_LSB(IntStatus, SendBufAvail), 0 }, 666 SYM_LSB(IntStatus, SendBufAvail), 0, 0},
649 { " (sdma 0)", sdma_intr, 667 { " (sdma 0)", sdma_intr,
650 SYM_LSB(IntStatus, SDmaInt_0), 1 }, 668 SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
651 { " (sdma 1)", sdma_intr, 669 { " (sdma 1)", sdma_intr,
652 SYM_LSB(IntStatus, SDmaInt_1), 2 }, 670 SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
653 { " (sdmaI 0)", sdma_idle_intr, 671 { " (sdmaI 0)", sdma_idle_intr,
654 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, 672 SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
655 { " (sdmaI 1)", sdma_idle_intr, 673 { " (sdmaI 1)", sdma_idle_intr,
656 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, 674 SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
657 { " (sdmaP 0)", sdma_progress_intr, 675 { " (sdmaP 0)", sdma_progress_intr,
658 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, 676 SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
659 { " (sdmaP 1)", sdma_progress_intr, 677 { " (sdmaP 1)", sdma_progress_intr,
660 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, 678 SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
661 { " (sdmaC 0)", sdma_cleanup_intr, 679 { " (sdmaC 0)", sdma_cleanup_intr,
662 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, 680 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
663 { " (sdmaC 1)", sdma_cleanup_intr, 681 { " (sdmaC 1)", sdma_cleanup_intr,
664 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, 682 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
665}; 683};
666 684
685#ifdef CONFIG_INFINIBAND_QIB_DCA
686
687static const struct dca_reg_map {
688 int shadow_inx;
689 int lsb;
690 u64 mask;
691 u16 regno;
692} dca_rcvhdr_reg_map[] = {
693 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
694 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
695 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
696 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
697 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
698 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
699 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
700 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
701 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
702 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
703 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
704 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
705 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
706 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
707 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
708 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
709 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
710 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
711 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
712 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
713 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
714 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
715 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
716 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
717 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
718 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
719 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
720 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
721 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
722 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
723 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
724 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
725 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
726 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
727 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
728 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
729};
730#endif
731
667/* ibcctrl bits */ 732/* ibcctrl bits */
668#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 733#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
669/* cycle through TS1/TS2 till OK */ 734/* cycle through TS1/TS2 till OK */
@@ -686,6 +751,13 @@ static void write_7322_init_portregs(struct qib_pportdata *);
686static void setup_7322_link_recovery(struct qib_pportdata *, u32); 751static void setup_7322_link_recovery(struct qib_pportdata *, u32);
687static void check_7322_rxe_status(struct qib_pportdata *); 752static void check_7322_rxe_status(struct qib_pportdata *);
688static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); 753static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
754#ifdef CONFIG_INFINIBAND_QIB_DCA
755static void qib_setup_dca(struct qib_devdata *dd);
756static void setup_dca_notifier(struct qib_devdata *dd,
757 struct qib_msix_entry *m);
758static void reset_dca_notifier(struct qib_devdata *dd,
759 struct qib_msix_entry *m);
760#endif
689 761
690/** 762/**
691 * qib_read_ureg32 - read 32-bit virtualized per-context register 763 * qib_read_ureg32 - read 32-bit virtualized per-context register
@@ -2558,6 +2630,162 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2558 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); 2630 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2559} 2631}
2560 2632
2633#ifdef CONFIG_INFINIBAND_QIB_DCA
2634
2635static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2636{
2637 switch (event) {
2638 case DCA_PROVIDER_ADD:
2639 if (dd->flags & QIB_DCA_ENABLED)
2640 break;
2641 if (!dca_add_requester(&dd->pcidev->dev)) {
2642 qib_devinfo(dd->pcidev, "DCA enabled\n");
2643 dd->flags |= QIB_DCA_ENABLED;
2644 qib_setup_dca(dd);
2645 }
2646 break;
2647 case DCA_PROVIDER_REMOVE:
2648 if (dd->flags & QIB_DCA_ENABLED) {
2649 dca_remove_requester(&dd->pcidev->dev);
2650 dd->flags &= ~QIB_DCA_ENABLED;
2651 dd->cspec->dca_ctrl = 0;
2652 qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2653 dd->cspec->dca_ctrl);
2654 }
2655 break;
2656 }
2657 return 0;
2658}
2659
2660static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2661{
2662 struct qib_devdata *dd = rcd->dd;
2663 struct qib_chip_specific *cspec = dd->cspec;
2664
2665 if (!(dd->flags & QIB_DCA_ENABLED))
2666 return;
2667 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2668 const struct dca_reg_map *rmp;
2669
2670 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2671 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2672 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2673 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2674 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2675 qib_devinfo(dd->pcidev,
2676 "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2677 (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2678 qib_write_kreg(dd, rmp->regno,
2679 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2680 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2681 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2682 }
2683}
2684
2685static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2686{
2687 struct qib_devdata *dd = ppd->dd;
2688 struct qib_chip_specific *cspec = dd->cspec;
2689 unsigned pidx = ppd->port - 1;
2690
2691 if (!(dd->flags & QIB_DCA_ENABLED))
2692 return;
2693 if (cspec->sdma_cpu[pidx] != cpu) {
2694 cspec->sdma_cpu[pidx] = cpu;
2695 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2696 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2697 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2698 cspec->dca_rcvhdr_ctrl[4] |=
2699 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2700 (ppd->hw_pidx ?
2701 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2702 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2703 qib_devinfo(dd->pcidev,
2704 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2705 (long long) cspec->dca_rcvhdr_ctrl[4]);
2706 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2707 cspec->dca_rcvhdr_ctrl[4]);
2708 cspec->dca_ctrl |= ppd->hw_pidx ?
2709 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2710 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2711 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2712 }
2713}
2714
2715static void qib_setup_dca(struct qib_devdata *dd)
2716{
2717 struct qib_chip_specific *cspec = dd->cspec;
2718 int i;
2719
2720 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2721 cspec->rhdr_cpu[i] = -1;
2722 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2723 cspec->sdma_cpu[i] = -1;
2724 cspec->dca_rcvhdr_ctrl[0] =
2725 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2726 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2727 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2728 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2729 cspec->dca_rcvhdr_ctrl[1] =
2730 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2731 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2732 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2733 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2734 cspec->dca_rcvhdr_ctrl[2] =
2735 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2736 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2737 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2738 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2739 cspec->dca_rcvhdr_ctrl[3] =
2740 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2741 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2742 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2743 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2744 cspec->dca_rcvhdr_ctrl[4] =
2745 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2746 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2747 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2748 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2749 cspec->dca_rcvhdr_ctrl[i]);
2750 for (i = 0; i < cspec->num_msix_entries; i++)
2751 setup_dca_notifier(dd, &cspec->msix_entries[i]);
2752}
2753
2754static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2755 const cpumask_t *mask)
2756{
2757 struct qib_irq_notify *n =
2758 container_of(notify, struct qib_irq_notify, notify);
2759 int cpu = cpumask_first(mask);
2760
2761 if (n->rcv) {
2762 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2763 qib_update_rhdrq_dca(rcd, cpu);
2764 } else {
2765 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2766 qib_update_sdma_dca(ppd, cpu);
2767 }
2768}
2769
2770static void qib_irq_notifier_release(struct kref *ref)
2771{
2772 struct qib_irq_notify *n =
2773 container_of(ref, struct qib_irq_notify, notify.kref);
2774 struct qib_devdata *dd;
2775
2776 if (n->rcv) {
2777 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2778 dd = rcd->dd;
2779 } else {
2780 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2781 dd = ppd->dd;
2782 }
2783 qib_devinfo(dd->pcidev,
2784 "release on HCA notify 0x%p n 0x%p\n", ref, n);
2785 kfree(n);
2786}
2787#endif
2788
2561/* 2789/*
2562 * Disable MSIx interrupt if enabled, call generic MSIx code 2790 * Disable MSIx interrupt if enabled, call generic MSIx code
2563 * to cleanup, and clear pending MSIx interrupts. 2791 * to cleanup, and clear pending MSIx interrupts.
@@ -2575,6 +2803,9 @@ static void qib_7322_nomsix(struct qib_devdata *dd)
2575 2803
2576 dd->cspec->num_msix_entries = 0; 2804 dd->cspec->num_msix_entries = 0;
2577 for (i = 0; i < n; i++) { 2805 for (i = 0; i < n; i++) {
2806#ifdef CONFIG_INFINIBAND_QIB_DCA
2807 reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2808#endif
2578 irq_set_affinity_hint( 2809 irq_set_affinity_hint(
2579 dd->cspec->msix_entries[i].msix.vector, NULL); 2810 dd->cspec->msix_entries[i].msix.vector, NULL);
2580 free_cpumask_var(dd->cspec->msix_entries[i].mask); 2811 free_cpumask_var(dd->cspec->msix_entries[i].mask);
@@ -2602,6 +2833,15 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2602{ 2833{
2603 int i; 2834 int i;
2604 2835
2836#ifdef CONFIG_INFINIBAND_QIB_DCA
2837 if (dd->flags & QIB_DCA_ENABLED) {
2838 dca_remove_requester(&dd->pcidev->dev);
2839 dd->flags &= ~QIB_DCA_ENABLED;
2840 dd->cspec->dca_ctrl = 0;
2841 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2842 }
2843#endif
2844
2605 qib_7322_free_irq(dd); 2845 qib_7322_free_irq(dd);
2606 kfree(dd->cspec->cntrs); 2846 kfree(dd->cspec->cntrs);
2607 kfree(dd->cspec->sendchkenable); 2847 kfree(dd->cspec->sendchkenable);
@@ -3068,6 +3308,53 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3068 return IRQ_HANDLED; 3308 return IRQ_HANDLED;
3069} 3309}
3070 3310
3311#ifdef CONFIG_INFINIBAND_QIB_DCA
3312
3313static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3314{
3315 if (!m->dca)
3316 return;
3317 qib_devinfo(dd->pcidev,
3318 "Disabling notifier on HCA %d irq %d\n",
3319 dd->unit,
3320 m->msix.vector);
3321 irq_set_affinity_notifier(
3322 m->msix.vector,
3323 NULL);
3324 m->notifier = NULL;
3325}
3326
3327static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3328{
3329 struct qib_irq_notify *n;
3330
3331 if (!m->dca)
3332 return;
3333 n = kzalloc(sizeof(*n), GFP_KERNEL);
3334 if (n) {
3335 int ret;
3336
3337 m->notifier = n;
3338 n->notify.irq = m->msix.vector;
3339 n->notify.notify = qib_irq_notifier_notify;
3340 n->notify.release = qib_irq_notifier_release;
3341 n->arg = m->arg;
3342 n->rcv = m->rcv;
3343 qib_devinfo(dd->pcidev,
3344 "set notifier irq %d rcv %d notify %p\n",
3345 n->notify.irq, n->rcv, &n->notify);
3346 ret = irq_set_affinity_notifier(
3347 n->notify.irq,
3348 &n->notify);
3349 if (ret) {
3350 m->notifier = NULL;
3351 kfree(n);
3352 }
3353 }
3354}
3355
3356#endif
3357
3071/* 3358/*
3072 * Set up our chip-specific interrupt handler. 3359 * Set up our chip-specific interrupt handler.
3073 * The interrupt type has already been setup, so 3360 * The interrupt type has already been setup, so
@@ -3149,6 +3436,9 @@ try_intx:
3149 void *arg; 3436 void *arg;
3150 u64 val; 3437 u64 val;
3151 int lsb, reg, sh; 3438 int lsb, reg, sh;
3439#ifdef CONFIG_INFINIBAND_QIB_DCA
3440 int dca = 0;
3441#endif
3152 3442
3153 dd->cspec->msix_entries[msixnum]. 3443 dd->cspec->msix_entries[msixnum].
3154 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] 3444 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
@@ -3161,6 +3451,9 @@ try_intx:
3161 arg = dd->pport + irq_table[i].port - 1; 3451 arg = dd->pport + irq_table[i].port - 1;
3162 } else 3452 } else
3163 arg = dd; 3453 arg = dd;
3454#ifdef CONFIG_INFINIBAND_QIB_DCA
3455 dca = irq_table[i].dca;
3456#endif
3164 lsb = irq_table[i].lsb; 3457 lsb = irq_table[i].lsb;
3165 handler = irq_table[i].handler; 3458 handler = irq_table[i].handler;
3166 snprintf(dd->cspec->msix_entries[msixnum].name, 3459 snprintf(dd->cspec->msix_entries[msixnum].name,
@@ -3178,6 +3471,9 @@ try_intx:
3178 continue; 3471 continue;
3179 if (qib_krcvq01_no_msi && ctxt < 2) 3472 if (qib_krcvq01_no_msi && ctxt < 2)
3180 continue; 3473 continue;
3474#ifdef CONFIG_INFINIBAND_QIB_DCA
3475 dca = 1;
3476#endif
3181 lsb = QIB_I_RCVAVAIL_LSB + ctxt; 3477 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3182 handler = qib_7322pintr; 3478 handler = qib_7322pintr;
3183 snprintf(dd->cspec->msix_entries[msixnum].name, 3479 snprintf(dd->cspec->msix_entries[msixnum].name,
@@ -3203,6 +3499,11 @@ try_intx:
3203 goto try_intx; 3499 goto try_intx;
3204 } 3500 }
3205 dd->cspec->msix_entries[msixnum].arg = arg; 3501 dd->cspec->msix_entries[msixnum].arg = arg;
3502#ifdef CONFIG_INFINIBAND_QIB_DCA
3503 dd->cspec->msix_entries[msixnum].dca = dca;
3504 dd->cspec->msix_entries[msixnum].rcv =
3505 handler == qib_7322pintr;
3506#endif
3206 if (lsb >= 0) { 3507 if (lsb >= 0) {
3207 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; 3508 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3208 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * 3509 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
@@ -6885,6 +7186,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6885 dd->f_sdma_init_early = qib_7322_sdma_init_early; 7186 dd->f_sdma_init_early = qib_7322_sdma_init_early;
6886 dd->f_writescratch = writescratch; 7187 dd->f_writescratch = writescratch;
6887 dd->f_tempsense_rd = qib_7322_tempsense_rd; 7188 dd->f_tempsense_rd = qib_7322_tempsense_rd;
7189#ifdef CONFIG_INFINIBAND_QIB_DCA
7190 dd->f_notify_dca = qib_7322_notify_dca;
7191#endif
6888 /* 7192 /*
6889 * Do remaining PCIe setup and save PCIe values in dd. 7193 * Do remaining PCIe setup and save PCIe values in dd.
6890 * Any error printing is already done by the init code. 7194 * Any error printing is already done by the init code.
@@ -6921,7 +7225,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6921 actual_cnt -= dd->num_pports; 7225 actual_cnt -= dd->num_pports;
6922 7226
6923 tabsize = actual_cnt; 7227 tabsize = actual_cnt;
6924 dd->cspec->msix_entries = kmalloc(tabsize * 7228 dd->cspec->msix_entries = kzalloc(tabsize *
6925 sizeof(struct qib_msix_entry), GFP_KERNEL); 7229 sizeof(struct qib_msix_entry), GFP_KERNEL);
6926 if (!dd->cspec->msix_entries) { 7230 if (!dd->cspec->msix_entries) {
6927 qib_dev_err(dd, "No memory for MSIx table\n"); 7231 qib_dev_err(dd, "No memory for MSIx table\n");
@@ -6941,7 +7245,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6941 7245
6942 /* clear diagctrl register, in case diags were running and crashed */ 7246 /* clear diagctrl register, in case diags were running and crashed */
6943 qib_write_kreg(dd, kr_hwdiagctrl, 0); 7247 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6944 7248#ifdef CONFIG_INFINIBAND_QIB_DCA
7249 if (!dca_add_requester(&pdev->dev)) {
7250 qib_devinfo(dd->pcidev, "DCA enabled\n");
7251 dd->flags |= QIB_DCA_ENABLED;
7252 qib_setup_dca(dd);
7253 }
7254#endif
6945 goto bail; 7255 goto bail;
6946 7256
6947bail_cleanup: 7257bail_cleanup:
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 173f805790da..4b64c885fa0d 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -39,6 +39,9 @@
39#include <linux/idr.h> 39#include <linux/idr.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/printk.h> 41#include <linux/printk.h>
42#ifdef CONFIG_INFINIBAND_QIB_DCA
43#include <linux/dca.h>
44#endif
42 45
43#include "qib.h" 46#include "qib.h"
44#include "qib_common.h" 47#include "qib_common.h"
@@ -1158,6 +1161,35 @@ struct pci_driver qib_driver = {
1158 .err_handler = &qib_pci_err_handler, 1161 .err_handler = &qib_pci_err_handler,
1159}; 1162};
1160 1163
1164#ifdef CONFIG_INFINIBAND_QIB_DCA
1165
1166static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1167static struct notifier_block dca_notifier = {
1168 .notifier_call = qib_notify_dca,
1169 .next = NULL,
1170 .priority = 0
1171};
1172
1173static int qib_notify_dca_device(struct device *device, void *data)
1174{
1175 struct qib_devdata *dd = dev_get_drvdata(device);
1176 unsigned long event = *(unsigned long *)data;
1177
1178 return dd->f_notify_dca(dd, event);
1179}
1180
1181static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1182 void *p)
1183{
1184 int rval;
1185
1186 rval = driver_for_each_device(&qib_driver.driver, NULL,
1187 &event, qib_notify_dca_device);
1188 return rval ? NOTIFY_BAD : NOTIFY_DONE;
1189}
1190
1191#endif
1192
1161/* 1193/*
1162 * Do all the generic driver unit- and chip-independent memory 1194 * Do all the generic driver unit- and chip-independent memory
1163 * allocation and initialization. 1195 * allocation and initialization.
@@ -1182,6 +1214,9 @@ static int __init qlogic_ib_init(void)
1182 */ 1214 */
1183 idr_init(&qib_unit_table); 1215 idr_init(&qib_unit_table);
1184 1216
1217#ifdef CONFIG_INFINIBAND_QIB_DCA
1218 dca_register_notify(&dca_notifier);
1219#endif
1185 ret = pci_register_driver(&qib_driver); 1220 ret = pci_register_driver(&qib_driver);
1186 if (ret < 0) { 1221 if (ret < 0) {
1187 pr_err("Unable to register driver: error %d\n", -ret); 1222 pr_err("Unable to register driver: error %d\n", -ret);
@@ -1194,6 +1229,9 @@ static int __init qlogic_ib_init(void)
1194 goto bail; /* all OK */ 1229 goto bail; /* all OK */
1195 1230
1196bail_unit: 1231bail_unit:
1232#ifdef CONFIG_INFINIBAND_QIB_DCA
1233 dca_unregister_notify(&dca_notifier);
1234#endif
1197 idr_destroy(&qib_unit_table); 1235 idr_destroy(&qib_unit_table);
1198 destroy_workqueue(qib_cq_wq); 1236 destroy_workqueue(qib_cq_wq);
1199bail_dev: 1237bail_dev:
@@ -1217,6 +1255,9 @@ static void __exit qlogic_ib_cleanup(void)
1217 "Unable to cleanup counter filesystem: error %d\n", 1255 "Unable to cleanup counter filesystem: error %d\n",
1218 -ret); 1256 -ret);
1219 1257
1258#ifdef CONFIG_INFINIBAND_QIB_DCA
1259 dca_unregister_notify(&dca_notifier);
1260#endif
1220 pci_unregister_driver(&qib_driver); 1261 pci_unregister_driver(&qib_driver);
1221 1262
1222 destroy_workqueue(qib_cq_wq); 1263 destroy_workqueue(qib_cq_wq);