aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ntb')
-rw-r--r--drivers/ntb/ntb_hw.c121
-rw-r--r--drivers/ntb/ntb_hw.h7
-rw-r--r--drivers/ntb/ntb_regs.h16
-rw-r--r--drivers/ntb/ntb_transport.c77
4 files changed, 156 insertions, 65 deletions
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 1cb6e51e6bda..170e8e60cdb7 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -141,6 +141,24 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
141 ndev->event_cb = NULL; 141 ndev->event_cb = NULL;
142} 142}
143 143
144static void ntb_irq_work(unsigned long data)
145{
146 struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
147 int rc;
148
149 rc = db_cb->callback(db_cb->data, db_cb->db_num);
150 if (rc)
151 tasklet_schedule(&db_cb->irq_work);
152 else {
153 struct ntb_device *ndev = db_cb->ndev;
154 unsigned long mask;
155
156 mask = readw(ndev->reg_ofs.ldb_mask);
157 clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
158 writew(mask, ndev->reg_ofs.ldb_mask);
159 }
160}
161
144/** 162/**
145 * ntb_register_db_callback() - register a callback for doorbell interrupt 163 * ntb_register_db_callback() - register a callback for doorbell interrupt
146 * @ndev: pointer to ntb_device instance 164 * @ndev: pointer to ntb_device instance
@@ -155,7 +173,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev)
155 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 173 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
156 */ 174 */
157int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 175int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
158 void *data, void (*func)(void *data, int db_num)) 176 void *data, int (*func)(void *data, int db_num))
159{ 177{
160 unsigned long mask; 178 unsigned long mask;
161 179
@@ -166,6 +184,10 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
166 184
167 ndev->db_cb[idx].callback = func; 185 ndev->db_cb[idx].callback = func;
168 ndev->db_cb[idx].data = data; 186 ndev->db_cb[idx].data = data;
187 ndev->db_cb[idx].ndev = ndev;
188
189 tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
190 (unsigned long) &ndev->db_cb[idx]);
169 191
170 /* unmask interrupt */ 192 /* unmask interrupt */
171 mask = readw(ndev->reg_ofs.ldb_mask); 193 mask = readw(ndev->reg_ofs.ldb_mask);
@@ -194,6 +216,8 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
194 set_bit(idx * ndev->bits_per_vector, &mask); 216 set_bit(idx * ndev->bits_per_vector, &mask);
195 writew(mask, ndev->reg_ofs.ldb_mask); 217 writew(mask, ndev->reg_ofs.ldb_mask);
196 218
219 tasklet_disable(&ndev->db_cb[idx].irq_work);
220
197 ndev->db_cb[idx].callback = NULL; 221 ndev->db_cb[idx].callback = NULL;
198} 222}
199 223
@@ -678,6 +702,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
678 return -EINVAL; 702 return -EINVAL;
679 703
680 ndev->limits.max_mw = SNB_ERRATA_MAX_MW; 704 ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
705 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
681 ndev->reg_ofs.spad_write = ndev->mw[1].vbase + 706 ndev->reg_ofs.spad_write = ndev->mw[1].vbase +
682 SNB_SPAD_OFFSET; 707 SNB_SPAD_OFFSET;
683 ndev->reg_ofs.rdb = ndev->mw[1].vbase + 708 ndev->reg_ofs.rdb = ndev->mw[1].vbase +
@@ -688,8 +713,21 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
688 */ 713 */
689 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base + 714 writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
690 SNB_PBAR4LMT_OFFSET); 715 SNB_PBAR4LMT_OFFSET);
716 /* HW errata on the Limit registers. They can only be
717 * written when the base register is 4GB aligned and
718 * < 32bit. This should already be the case based on the
719 * driver defaults, but write the Limit registers first
720 * just in case.
721 */
691 } else { 722 } else {
692 ndev->limits.max_mw = SNB_MAX_MW; 723 ndev->limits.max_mw = SNB_MAX_MW;
724
725 /* HW Errata on bit 14 of b2bdoorbell register. Writes
726 * will not be mirrored to the remote system. Shrink
727 * the number of bits by one, since bit 14 is the last
728 * bit.
729 */
730 ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
693 ndev->reg_ofs.spad_write = ndev->reg_base + 731 ndev->reg_ofs.spad_write = ndev->reg_base +
694 SNB_B2B_SPAD_OFFSET; 732 SNB_B2B_SPAD_OFFSET;
695 ndev->reg_ofs.rdb = ndev->reg_base + 733 ndev->reg_ofs.rdb = ndev->reg_base +
@@ -699,6 +737,12 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
699 * something silly 737 * something silly
700 */ 738 */
701 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); 739 writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
740 /* HW errata on the Limit registers. They can only be
741 * written when the base register is 4GB aligned and
742 * < 32bit. This should already be the case based on the
743 * driver defaults, but write the Limit registers first
744 * just in case.
745 */
702 } 746 }
703 747
704 /* The Xeon errata workaround requires setting SBAR Base 748 /* The Xeon errata workaround requires setting SBAR Base
@@ -769,6 +813,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
769 * have an equal amount. 813 * have an equal amount.
770 */ 814 */
771 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 815 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
816 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
772 /* Note: The SDOORBELL is the cause of the errata. You REALLY 817 /* Note: The SDOORBELL is the cause of the errata. You REALLY
773 * don't want to touch it. 818 * don't want to touch it.
774 */ 819 */
@@ -793,6 +838,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
793 * have an equal amount. 838 * have an equal amount.
794 */ 839 */
795 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; 840 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
841 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
796 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET; 842 ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
797 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET; 843 ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
798 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET; 844 ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
@@ -819,7 +865,6 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
819 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET; 865 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
820 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET; 866 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
821 867
822 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
823 ndev->limits.msix_cnt = SNB_MSIX_CNT; 868 ndev->limits.msix_cnt = SNB_MSIX_CNT;
824 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC; 869 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
825 870
@@ -934,12 +979,16 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
934{ 979{
935 struct ntb_db_cb *db_cb = data; 980 struct ntb_db_cb *db_cb = data;
936 struct ntb_device *ndev = db_cb->ndev; 981 struct ntb_device *ndev = db_cb->ndev;
982 unsigned long mask;
937 983
938 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 984 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
939 db_cb->db_num); 985 db_cb->db_num);
940 986
941 if (db_cb->callback) 987 mask = readw(ndev->reg_ofs.ldb_mask);
942 db_cb->callback(db_cb->data, db_cb->db_num); 988 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
989 writew(mask, ndev->reg_ofs.ldb_mask);
990
991 tasklet_schedule(&db_cb->irq_work);
943 992
944 /* No need to check for the specific HB irq, any interrupt means 993 /* No need to check for the specific HB irq, any interrupt means
945 * we're connected. 994 * we're connected.
@@ -955,12 +1004,16 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
955{ 1004{
956 struct ntb_db_cb *db_cb = data; 1005 struct ntb_db_cb *db_cb = data;
957 struct ntb_device *ndev = db_cb->ndev; 1006 struct ntb_device *ndev = db_cb->ndev;
1007 unsigned long mask;
958 1008
959 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, 1009 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
960 db_cb->db_num); 1010 db_cb->db_num);
961 1011
962 if (db_cb->callback) 1012 mask = readw(ndev->reg_ofs.ldb_mask);
963 db_cb->callback(db_cb->data, db_cb->db_num); 1013 set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
1014 writew(mask, ndev->reg_ofs.ldb_mask);
1015
1016 tasklet_schedule(&db_cb->irq_work);
964 1017
965 /* On Sandybridge, there are 16 bits in the interrupt register 1018 /* On Sandybridge, there are 16 bits in the interrupt register
966 * but only 4 vectors. So, 5 bits are assigned to the first 3 1019 * but only 4 vectors. So, 5 bits are assigned to the first 3
@@ -986,7 +1039,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
986 dev_err(&ndev->pdev->dev, "Error determining link status\n"); 1039 dev_err(&ndev->pdev->dev, "Error determining link status\n");
987 1040
988 /* bit 15 is always the link bit */ 1041 /* bit 15 is always the link bit */
989 writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb); 1042 writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
990 1043
991 return IRQ_HANDLED; 1044 return IRQ_HANDLED;
992} 1045}
@@ -1075,6 +1128,10 @@ static int ntb_setup_msix(struct ntb_device *ndev)
1075 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", 1128 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
1076 rc); 1129 rc);
1077 msix_entries = rc; 1130 msix_entries = rc;
1131
1132 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
1133 if (rc)
1134 goto err1;
1078 } 1135 }
1079 1136
1080 for (i = 0; i < msix_entries; i++) { 1137 for (i = 0; i < msix_entries; i++) {
@@ -1176,9 +1233,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev)
1176 */ 1233 */
1177 if (ndev->hw_type == BWD_HW) 1234 if (ndev->hw_type == BWD_HW)
1178 writeq(~0, ndev->reg_ofs.ldb_mask); 1235 writeq(~0, ndev->reg_ofs.ldb_mask);
1179 else 1236 else {
1180 writew(~(1 << ndev->limits.max_db_bits), 1237 u16 var = 1 << SNB_LINK_DB;
1181 ndev->reg_ofs.ldb_mask); 1238 writew(~var, ndev->reg_ofs.ldb_mask);
1239 }
1182 1240
1183 rc = ntb_setup_msix(ndev); 1241 rc = ntb_setup_msix(ndev);
1184 if (!rc) 1242 if (!rc)
@@ -1286,6 +1344,39 @@ static void ntb_free_debugfs(struct ntb_device *ndev)
1286 } 1344 }
1287} 1345}
1288 1346
1347static void ntb_hw_link_up(struct ntb_device *ndev)
1348{
1349 if (ndev->conn_type == NTB_CONN_TRANSPARENT)
1350 ntb_link_event(ndev, NTB_LINK_UP);
1351 else {
1352 u32 ntb_cntl;
1353
1354 /* Let's bring the NTB link up */
1355 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1356 ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
1357 ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
1358 ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP;
1359 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1360 }
1361}
1362
1363static void ntb_hw_link_down(struct ntb_device *ndev)
1364{
1365 u32 ntb_cntl;
1366
1367 if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
1368 ntb_link_event(ndev, NTB_LINK_DOWN);
1369 return;
1370 }
1371
1372 /* Bring NTB link down */
1373 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1374 ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
1375 ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP);
1376 ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
1377 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1378}
1379
1289static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1380static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1290{ 1381{
1291 struct ntb_device *ndev; 1382 struct ntb_device *ndev;
@@ -1374,9 +1465,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1374 if (rc) 1465 if (rc)
1375 goto err6; 1466 goto err6;
1376 1467
1377 /* Let's bring the NTB link up */ 1468 ntb_hw_link_up(ndev);
1378 writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
1379 ndev->reg_ofs.lnk_cntl);
1380 1469
1381 return 0; 1470 return 0;
1382 1471
@@ -1406,12 +1495,8 @@ static void ntb_pci_remove(struct pci_dev *pdev)
1406{ 1495{
1407 struct ntb_device *ndev = pci_get_drvdata(pdev); 1496 struct ntb_device *ndev = pci_get_drvdata(pdev);
1408 int i; 1497 int i;
1409 u32 ntb_cntl;
1410 1498
1411 /* Bring NTB link down */ 1499 ntb_hw_link_down(ndev);
1412 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1413 ntb_cntl |= NTB_CNTL_LINK_DISABLE;
1414 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1415 1500
1416 ntb_transport_free(ndev->ntb_transport); 1501 ntb_transport_free(ndev->ntb_transport);
1417 1502
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 0a31cedae7d4..bbdb7edca10c 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -106,10 +106,11 @@ struct ntb_mw {
106}; 106};
107 107
108struct ntb_db_cb { 108struct ntb_db_cb {
109 void (*callback) (void *data, int db_num); 109 int (*callback)(void *data, int db_num);
110 unsigned int db_num; 110 unsigned int db_num;
111 void *data; 111 void *data;
112 struct ntb_device *ndev; 112 struct ntb_device *ndev;
113 struct tasklet_struct irq_work;
113}; 114};
114 115
115struct ntb_device { 116struct ntb_device {
@@ -228,8 +229,8 @@ struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
228void ntb_unregister_transport(struct ntb_device *ndev); 229void ntb_unregister_transport(struct ntb_device *ndev);
229void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr); 230void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
230int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, 231int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
231 void *data, void (*db_cb_func) (void *data, 232 void *data, int (*db_cb_func)(void *data,
232 int db_num)); 233 int db_num));
233void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); 234void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
234int ntb_register_event_callback(struct ntb_device *ndev, 235int ntb_register_event_callback(struct ntb_device *ndev,
235 void (*event_cb_func) (void *handle, 236 void (*event_cb_func) (void *handle,
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index aa4bdd393c58..9774506419d7 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -55,6 +55,7 @@
55#define SNB_MAX_COMPAT_SPADS 16 55#define SNB_MAX_COMPAT_SPADS 16
56/* Reserve the uppermost bit for link interrupt */ 56/* Reserve the uppermost bit for link interrupt */
57#define SNB_MAX_DB_BITS 15 57#define SNB_MAX_DB_BITS 15
58#define SNB_LINK_DB 15
58#define SNB_DB_BITS_PER_VEC 5 59#define SNB_DB_BITS_PER_VEC 5
59#define SNB_MAX_MW 2 60#define SNB_MAX_MW 2
60#define SNB_ERRATA_MAX_MW 1 61#define SNB_ERRATA_MAX_MW 1
@@ -75,9 +76,6 @@
75#define SNB_SBAR2XLAT_OFFSET 0x0030 76#define SNB_SBAR2XLAT_OFFSET 0x0030
76#define SNB_SBAR4XLAT_OFFSET 0x0038 77#define SNB_SBAR4XLAT_OFFSET 0x0038
77#define SNB_SBAR0BASE_OFFSET 0x0040 78#define SNB_SBAR0BASE_OFFSET 0x0040
78#define SNB_SBAR0BASE_OFFSET 0x0040
79#define SNB_SBAR2BASE_OFFSET 0x0048
80#define SNB_SBAR4BASE_OFFSET 0x0050
81#define SNB_SBAR2BASE_OFFSET 0x0048 79#define SNB_SBAR2BASE_OFFSET 0x0048
82#define SNB_SBAR4BASE_OFFSET 0x0050 80#define SNB_SBAR4BASE_OFFSET 0x0050
83#define SNB_NTBCNTL_OFFSET 0x0058 81#define SNB_NTBCNTL_OFFSET 0x0058
@@ -145,11 +143,13 @@
145#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2) 143#define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2)
146#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF 144#define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF
147 145
148#define NTB_CNTL_CFG_LOCK (1 << 0) 146#define NTB_CNTL_CFG_LOCK (1 << 0)
149#define NTB_CNTL_LINK_DISABLE (1 << 1) 147#define NTB_CNTL_LINK_DISABLE (1 << 1)
150#define NTB_CNTL_BAR23_SNOOP (1 << 2) 148#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2)
151#define NTB_CNTL_BAR45_SNOOP (1 << 6) 149#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4)
152#define BWD_CNTL_LINK_DOWN (1 << 16) 150#define NTB_CNTL_S2P_BAR45_SNOOP (1 << 6)
151#define NTB_CNTL_P2S_BAR45_SNOOP (1 << 8)
152#define BWD_CNTL_LINK_DOWN (1 << 16)
153 153
154#define NTB_PPD_OFFSET 0x00D4 154#define NTB_PPD_OFFSET 0x00D4
155#define SNB_PPD_CONN_TYPE 0x0003 155#define SNB_PPD_CONN_TYPE 0x0003
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d0222f13d154..3217f394d45b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
119 119
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len); 121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q; 122 struct list_head rx_pend_q;
124 struct list_head rx_free_q; 123 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock; 124 spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
584 return 0; 583 return 0;
585} 584}
586 585
587static void ntb_qp_link_cleanup(struct work_struct *work) 586static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
588{ 587{
589 struct ntb_transport_qp *qp = container_of(work,
590 struct ntb_transport_qp,
591 link_cleanup);
592 struct ntb_transport *nt = qp->transport; 588 struct ntb_transport *nt = qp->transport;
593 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 589 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
594 590
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
602 598
603 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 599 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
604 qp->qp_link = NTB_LINK_DOWN; 600 qp->qp_link = NTB_LINK_DOWN;
601}
602
603static void ntb_qp_link_cleanup_work(struct work_struct *work)
604{
605 struct ntb_transport_qp *qp = container_of(work,
606 struct ntb_transport_qp,
607 link_cleanup);
608 struct ntb_transport *nt = qp->transport;
609
610 ntb_qp_link_cleanup(qp);
605 611
606 if (nt->transport_link == NTB_LINK_UP) 612 if (nt->transport_link == NTB_LINK_UP)
607 schedule_delayed_work(&qp->link_work, 613 schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
613 schedule_work(&qp->link_cleanup); 619 schedule_work(&qp->link_cleanup);
614} 620}
615 621
616static void ntb_transport_link_cleanup(struct work_struct *work) 622static void ntb_transport_link_cleanup(struct ntb_transport *nt)
617{ 623{
618 struct ntb_transport *nt = container_of(work, struct ntb_transport,
619 link_cleanup);
620 int i; 624 int i;
621 625
626 /* Pass along the info to any clients */
627 for (i = 0; i < nt->max_qps; i++)
628 if (!test_bit(i, &nt->qp_bitmap))
629 ntb_qp_link_cleanup(&nt->qps[i]);
630
622 if (nt->transport_link == NTB_LINK_DOWN) 631 if (nt->transport_link == NTB_LINK_DOWN)
623 cancel_delayed_work_sync(&nt->link_work); 632 cancel_delayed_work_sync(&nt->link_work);
624 else 633 else
625 nt->transport_link = NTB_LINK_DOWN; 634 nt->transport_link = NTB_LINK_DOWN;
626 635
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_down(&nt->qps[i]);
631
632 /* The scratchpad registers keep the values if the remote side 636 /* The scratchpad registers keep the values if the remote side
633 * goes down, blast them now to give them a sane value the next 637 * goes down, blast them now to give them a sane value the next
634 * time they are accessed 638 * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
637 ntb_write_local_spad(nt->ndev, i, 0); 641 ntb_write_local_spad(nt->ndev, i, 0);
638} 642}
639 643
644static void ntb_transport_link_cleanup_work(struct work_struct *work)
645{
646 struct ntb_transport *nt = container_of(work, struct ntb_transport,
647 link_cleanup);
648
649 ntb_transport_link_cleanup(nt);
650}
651
640static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 652static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
641{ 653{
642 struct ntb_transport *nt = data; 654 struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
880 } 892 }
881 893
882 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 894 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 895 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
884 896
885 spin_lock_init(&qp->ntb_rx_pend_q_lock); 897 spin_lock_init(&qp->ntb_rx_pend_q_lock);
886 spin_lock_init(&qp->ntb_rx_free_q_lock); 898 spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
936 } 948 }
937 949
938 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 950 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 951 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
940 952
941 rc = ntb_register_event_callback(nt->ndev, 953 rc = ntb_register_event_callback(nt->ndev,
942 ntb_transport_event_callback); 954 ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
972 struct ntb_device *ndev = nt->ndev; 984 struct ntb_device *ndev = nt->ndev;
973 int i; 985 int i;
974 986
975 nt->transport_link = NTB_LINK_DOWN; 987 ntb_transport_link_cleanup(nt);
976 988
977 /* verify that all the qp's are freed */ 989 /* verify that all the qp's are freed */
978 for (i = 0; i < nt->max_qps; i++) { 990 for (i = 0; i < nt->max_qps; i++) {
@@ -1188,11 +1200,14 @@ err:
1188 goto out; 1200 goto out;
1189} 1201}
1190 1202
1191static void ntb_transport_rx(unsigned long data) 1203static int ntb_transport_rxc_db(void *data, int db_num)
1192{ 1204{
1193 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1205 struct ntb_transport_qp *qp = data;
1194 int rc, i; 1206 int rc, i;
1195 1207
1208 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 __func__, db_num);
1210
1196 /* Limit the number of packets processed in a single interrupt to 1211 /* Limit the number of packets processed in a single interrupt to
1197 * provide fairness to others 1212 * provide fairness to others
1198 */ 1213 */
@@ -1204,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
1204 1219
1205 if (qp->dma_chan) 1220 if (qp->dma_chan)
1206 dma_async_issue_pending(qp->dma_chan); 1221 dma_async_issue_pending(qp->dma_chan);
1207}
1208
1209static void ntb_transport_rxc_db(void *data, int db_num)
1210{
1211 struct ntb_transport_qp *qp = data;
1212
1213 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1214 __func__, db_num);
1215 1222
1216 tasklet_schedule(&qp->rx_work); 1223 return i;
1217} 1224}
1218 1225
1219static void ntb_tx_copy_callback(void *data) 1226static void ntb_tx_copy_callback(void *data)
@@ -1432,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1432 qp->tx_handler = handlers->tx_handler; 1439 qp->tx_handler = handlers->tx_handler;
1433 qp->event_handler = handlers->event_handler; 1440 qp->event_handler = handlers->event_handler;
1434 1441
1442 dmaengine_get();
1435 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1443 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1436 if (!qp->dma_chan) 1444 if (!qp->dma_chan) {
1445 dmaengine_put();
1437 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1446 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1438 else 1447 }
1439 dmaengine_get();
1440 1448
1441 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1449 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1442 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1450 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1458,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1458 &qp->tx_free_q); 1466 &qp->tx_free_q);
1459 } 1467 }
1460 1468
1461 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1462
1463 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1469 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1464 ntb_transport_rxc_db); 1470 ntb_transport_rxc_db);
1465 if (rc) 1471 if (rc)
1466 goto err3; 1472 goto err2;
1467 1473
1468 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1474 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1469 1475
1470 return qp; 1476 return qp;
1471 1477
1472err3:
1473 tasklet_disable(&qp->rx_work);
1474err2: 1478err2:
1475 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1479 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1476 kfree(entry); 1480 kfree(entry);
1477err1: 1481err1:
1478 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1482 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1479 kfree(entry); 1483 kfree(entry);
1484 if (qp->dma_chan)
1485 dmaengine_put();
1480 set_bit(free_queue, &nt->qp_bitmap); 1486 set_bit(free_queue, &nt->qp_bitmap);
1481err: 1487err:
1482 return NULL; 1488 return NULL;
@@ -1515,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1515 } 1521 }
1516 1522
1517 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1523 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1518 tasklet_disable(&qp->rx_work);
1519 1524
1520 cancel_delayed_work_sync(&qp->link_work); 1525 cancel_delayed_work_sync(&qp->link_work);
1521 1526