aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 16:14:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 16:14:30 -0400
commit7259f6452577f2df48f03d07e8302b8535cad74c (patch)
tree24d70385f3e275dfa8fe726aa13069da29e1faf0
parent467590e055f5c714fb457803250415879d0da9e5 (diff)
parentc9160b69258ef46ab62c27a09decb8fef311e700 (diff)
Merge tag 'ntb-4.18' of git://github.com/jonmason/ntb
Pull NTB updates from Jon Mason: - reorg and clean-up of the Intel NTB driver - a trivial comment change - change GFP_ATOMIC to GFP_KERNEL where appropriate * tag 'ntb-4.18' of git://github.com/jonmason/ntb: ntb: ntb_transport: Replace GFP_ATOMIC with GFP_KERNEL in ntb_transport_create_queue ntb: ntb_transport: Replace GFP_ATOMIC with GFP_KERNEL in ntb_transport_setup_qp_mw NTB: ntb_hw_idt: fix typo 'can by' to 'can be' ntb: intel: change references of skx to gen3 ntb: intel: split out the gen3 code ntb: intel: header definitions refactor
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c2
-rw-r--r--drivers/ntb/hw/intel/Makefile1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c (renamed from drivers/ntb/hw/intel/ntb_hw_intel.c)713
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h182
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c597
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.h110
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h203
-rw-r--r--drivers/ntb/ntb_transport.c6
8 files changed, 1001 insertions, 813 deletions
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 8d98872d0983..dbe72f116017 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -1401,7 +1401,7 @@ static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
1401 * 5. Doorbell operations 1401 * 5. Doorbell operations
1402 * 1402 *
1403 * Doorbell functionality of IDT PCIe-switches is pretty unusual. First of 1403 * Doorbell functionality of IDT PCIe-switches is pretty unusual. First of
1404 * all there is global doorbell register which state can by changed by any 1404 * all there is global doorbell register which state can be changed by any
1405 * NT-function of the IDT device in accordance with global permissions. These 1405 * NT-function of the IDT device in accordance with global permissions. These
1406 * permissions configs are not supported by NTB API, so it must be done by 1406 * permissions configs are not supported by NTB API, so it must be done by
1407 * either BIOS or EEPROM settings. In the same way the state of the global 1407 * either BIOS or EEPROM settings. In the same way the state of the global
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile
index 1b434568d2ad..4ff22af967c6 100644
--- a/drivers/ntb/hw/intel/Makefile
+++ b/drivers/ntb/hw/intel/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o 1obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
2ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 156b45cd4a19..ffdee98e8ece 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -45,9 +45,6 @@
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 * 46 *
47 * Intel PCIe NTB Linux driver 47 * Intel PCIe NTB Linux driver
48 *
49 * Contact Information:
50 * Jon Mason <jon.mason@intel.com>
51 */ 48 */
52 49
53#include <linux/debugfs.h> 50#include <linux/debugfs.h>
@@ -61,6 +58,8 @@
61#include <linux/ntb.h> 58#include <linux/ntb.h>
62 59
63#include "ntb_hw_intel.h" 60#include "ntb_hw_intel.h"
61#include "ntb_hw_gen1.h"
62#include "ntb_hw_gen3.h"
64 63
65#define NTB_NAME "ntb_hw_intel" 64#define NTB_NAME "ntb_hw_intel"
66#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver" 65#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
@@ -80,14 +79,7 @@ static const struct intel_ntb_alt_reg xeon_sec_reg;
80static const struct intel_ntb_alt_reg xeon_b2b_reg; 79static const struct intel_ntb_alt_reg xeon_b2b_reg;
81static const struct intel_ntb_xlat_reg xeon_pri_xlat; 80static const struct intel_ntb_xlat_reg xeon_pri_xlat;
82static const struct intel_ntb_xlat_reg xeon_sec_xlat; 81static const struct intel_ntb_xlat_reg xeon_sec_xlat;
83static struct intel_b2b_addr xeon_b2b_usd_addr;
84static struct intel_b2b_addr xeon_b2b_dsd_addr;
85static const struct intel_ntb_reg skx_reg;
86static const struct intel_ntb_alt_reg skx_pri_reg;
87static const struct intel_ntb_alt_reg skx_b2b_reg;
88static const struct intel_ntb_xlat_reg skx_sec_xlat;
89static const struct ntb_dev_ops intel_ntb_ops; 82static const struct ntb_dev_ops intel_ntb_ops;
90static const struct ntb_dev_ops intel_ntb3_ops;
91 83
92static const struct file_operations intel_ntb_debugfs_info; 84static const struct file_operations intel_ntb_debugfs_info;
93static struct dentry *debugfs_dir; 85static struct dentry *debugfs_dir;
@@ -146,68 +138,8 @@ module_param_named(xeon_b2b_dsd_bar5_addr32,
146MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, 138MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
147 "XEON B2B DSD split-BAR 5 32-bit address"); 139 "XEON B2B DSD split-BAR 5 32-bit address");
148 140
149static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
150static int xeon_init_isr(struct intel_ntb_dev *ndev);
151
152#ifndef ioread64
153#ifdef readq
154#define ioread64 readq
155#else
156#define ioread64 _ioread64
157static inline u64 _ioread64(void __iomem *mmio)
158{
159 u64 low, high;
160
161 low = ioread32(mmio);
162 high = ioread32(mmio + sizeof(u32));
163 return low | (high << 32);
164}
165#endif
166#endif
167
168#ifndef iowrite64
169#ifdef writeq
170#define iowrite64 writeq
171#else
172#define iowrite64 _iowrite64
173static inline void _iowrite64(u64 val, void __iomem *mmio)
174{
175 iowrite32(val, mmio);
176 iowrite32(val >> 32, mmio + sizeof(u32));
177}
178#endif
179#endif
180
181static inline int pdev_is_xeon(struct pci_dev *pdev)
182{
183 switch (pdev->device) {
184 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
185 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
186 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
187 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
188 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
189 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
190 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
191 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
192 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
193 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
194 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
195 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
196 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
197 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
198 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
199 return 1;
200 }
201 return 0;
202}
203
204static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
205{
206 if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
207 return 1;
208 141
209 return 0; 142static int xeon_init_isr(struct intel_ntb_dev *ndev);
210}
211 143
212static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) 144static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
213{ 145{
@@ -241,7 +173,7 @@ static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
241 return !!flag; 173 return !!flag;
242} 174}
243 175
244static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) 176int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
245{ 177{
246 if (idx < 0 || idx >= ndev->mw_count) 178 if (idx < 0 || idx >= ndev->mw_count)
247 return -EINVAL; 179 return -EINVAL;
@@ -268,7 +200,7 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
268 return 0; 200 return 0;
269} 201}
270 202
271static inline u64 ndev_db_read(struct intel_ntb_dev *ndev, 203u64 ndev_db_read(struct intel_ntb_dev *ndev,
272 void __iomem *mmio) 204 void __iomem *mmio)
273{ 205{
274 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) 206 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
@@ -277,7 +209,7 @@ static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
277 return ndev->reg->db_ioread(mmio); 209 return ndev->reg->db_ioread(mmio);
278} 210}
279 211
280static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, 212int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
281 void __iomem *mmio) 213 void __iomem *mmio)
282{ 214{
283 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) 215 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
@@ -429,7 +361,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev)
429 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); 361 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
430} 362}
431 363
432static int ndev_init_isr(struct intel_ntb_dev *ndev, 364int ndev_init_isr(struct intel_ntb_dev *ndev,
433 int msix_min, int msix_max, 365 int msix_min, int msix_max,
434 int msix_shift, int total_shift) 366 int msix_shift, int total_shift)
435{ 367{
@@ -557,169 +489,6 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
557 } 489 }
558} 490}
559 491
560static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
561 size_t count, loff_t *offp)
562{
563 struct intel_ntb_dev *ndev;
564 void __iomem *mmio;
565 char *buf;
566 size_t buf_size;
567 ssize_t ret, off;
568 union { u64 v64; u32 v32; u16 v16; } u;
569
570 ndev = filp->private_data;
571 mmio = ndev->self_mmio;
572
573 buf_size = min(count, 0x800ul);
574
575 buf = kmalloc(buf_size, GFP_KERNEL);
576 if (!buf)
577 return -ENOMEM;
578
579 off = 0;
580
581 off += scnprintf(buf + off, buf_size - off,
582 "NTB Device Information:\n");
583
584 off += scnprintf(buf + off, buf_size - off,
585 "Connection Topology -\t%s\n",
586 ntb_topo_string(ndev->ntb.topo));
587
588 off += scnprintf(buf + off, buf_size - off,
589 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
590 off += scnprintf(buf + off, buf_size - off,
591 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
592
593 if (!ndev->reg->link_is_up(ndev))
594 off += scnprintf(buf + off, buf_size - off,
595 "Link Status -\t\tDown\n");
596 else {
597 off += scnprintf(buf + off, buf_size - off,
598 "Link Status -\t\tUp\n");
599 off += scnprintf(buf + off, buf_size - off,
600 "Link Speed -\t\tPCI-E Gen %u\n",
601 NTB_LNK_STA_SPEED(ndev->lnk_sta));
602 off += scnprintf(buf + off, buf_size - off,
603 "Link Width -\t\tx%u\n",
604 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
605 }
606
607 off += scnprintf(buf + off, buf_size - off,
608 "Memory Window Count -\t%u\n", ndev->mw_count);
609 off += scnprintf(buf + off, buf_size - off,
610 "Scratchpad Count -\t%u\n", ndev->spad_count);
611 off += scnprintf(buf + off, buf_size - off,
612 "Doorbell Count -\t%u\n", ndev->db_count);
613 off += scnprintf(buf + off, buf_size - off,
614 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
615 off += scnprintf(buf + off, buf_size - off,
616 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
617
618 off += scnprintf(buf + off, buf_size - off,
619 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
620 off += scnprintf(buf + off, buf_size - off,
621 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
622 off += scnprintf(buf + off, buf_size - off,
623 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
624
625 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
626 off += scnprintf(buf + off, buf_size - off,
627 "Doorbell Mask -\t\t%#llx\n", u.v64);
628
629 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
630 off += scnprintf(buf + off, buf_size - off,
631 "Doorbell Bell -\t\t%#llx\n", u.v64);
632
633 off += scnprintf(buf + off, buf_size - off,
634 "\nNTB Incoming XLAT:\n");
635
636 u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
637 off += scnprintf(buf + off, buf_size - off,
638 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
639
640 u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
641 off += scnprintf(buf + off, buf_size - off,
642 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
643
644 u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
645 off += scnprintf(buf + off, buf_size - off,
646 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
647
648 u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
649 off += scnprintf(buf + off, buf_size - off,
650 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
651
652 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
653 off += scnprintf(buf + off, buf_size - off,
654 "\nNTB Outgoing B2B XLAT:\n");
655
656 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
657 off += scnprintf(buf + off, buf_size - off,
658 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
659
660 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
661 off += scnprintf(buf + off, buf_size - off,
662 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
663
664 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
665 off += scnprintf(buf + off, buf_size - off,
666 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
667
668 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
669 off += scnprintf(buf + off, buf_size - off,
670 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
671
672 off += scnprintf(buf + off, buf_size - off,
673 "\nNTB Secondary BAR:\n");
674
675 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
676 off += scnprintf(buf + off, buf_size - off,
677 "EMBAR0 -\t\t%#018llx\n", u.v64);
678
679 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
680 off += scnprintf(buf + off, buf_size - off,
681 "EMBAR1 -\t\t%#018llx\n", u.v64);
682
683 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
684 off += scnprintf(buf + off, buf_size - off,
685 "EMBAR2 -\t\t%#018llx\n", u.v64);
686 }
687
688 off += scnprintf(buf + off, buf_size - off,
689 "\nNTB Statistics:\n");
690
691 u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
692 off += scnprintf(buf + off, buf_size - off,
693 "Upstream Memory Miss -\t%u\n", u.v16);
694
695 off += scnprintf(buf + off, buf_size - off,
696 "\nNTB Hardware Errors:\n");
697
698 if (!pci_read_config_word(ndev->ntb.pdev,
699 SKX_DEVSTS_OFFSET, &u.v16))
700 off += scnprintf(buf + off, buf_size - off,
701 "DEVSTS -\t\t%#06x\n", u.v16);
702
703 if (!pci_read_config_word(ndev->ntb.pdev,
704 SKX_LINK_STATUS_OFFSET, &u.v16))
705 off += scnprintf(buf + off, buf_size - off,
706 "LNKSTS -\t\t%#06x\n", u.v16);
707
708 if (!pci_read_config_dword(ndev->ntb.pdev,
709 SKX_UNCERRSTS_OFFSET, &u.v32))
710 off += scnprintf(buf + off, buf_size - off,
711 "UNCERRSTS -\t\t%#06x\n", u.v32);
712
713 if (!pci_read_config_dword(ndev->ntb.pdev,
714 SKX_CORERRSTS_OFFSET, &u.v32))
715 off += scnprintf(buf + off, buf_size - off,
716 "CORERRSTS -\t\t%#06x\n", u.v32);
717
718 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
719 kfree(buf);
720 return ret;
721}
722
723static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, 492static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
724 size_t count, loff_t *offp) 493 size_t count, loff_t *offp)
725{ 494{
@@ -879,7 +648,7 @@ static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
879 "LMT45 -\t\t\t%#018llx\n", u.v64); 648 "LMT45 -\t\t\t%#018llx\n", u.v64);
880 } 649 }
881 650
882 if (pdev_is_xeon(pdev)) { 651 if (pdev_is_gen1(pdev)) {
883 if (ntb_topo_is_b2b(ndev->ntb.topo)) { 652 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
884 off += scnprintf(buf + off, buf_size - off, 653 off += scnprintf(buf + off, buf_size - off,
885 "\nNTB Outgoing B2B XLAT:\n"); 654 "\nNTB Outgoing B2B XLAT:\n");
@@ -991,9 +760,9 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
991{ 760{
992 struct intel_ntb_dev *ndev = filp->private_data; 761 struct intel_ntb_dev *ndev = filp->private_data;
993 762
994 if (pdev_is_xeon(ndev->ntb.pdev)) 763 if (pdev_is_gen1(ndev->ntb.pdev))
995 return ndev_ntb_debugfs_read(filp, ubuf, count, offp); 764 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
996 else if (pdev_is_skx_xeon(ndev->ntb.pdev)) 765 else if (pdev_is_gen3(ndev->ntb.pdev))
997 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp); 766 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
998 767
999 return -ENXIO; 768 return -ENXIO;
@@ -1023,7 +792,7 @@ static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1023 debugfs_remove_recursive(ndev->debugfs_dir); 792 debugfs_remove_recursive(ndev->debugfs_dir);
1024} 793}
1025 794
1026static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) 795int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
1027{ 796{
1028 if (pidx != NTB_DEF_PEER_IDX) 797 if (pidx != NTB_DEF_PEER_IDX)
1029 return -EINVAL; 798 return -EINVAL;
@@ -1031,10 +800,10 @@ static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
1031 return ntb_ndev(ntb)->mw_count; 800 return ntb_ndev(ntb)->mw_count;
1032} 801}
1033 802
1034static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, 803int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
1035 resource_size_t *addr_align, 804 resource_size_t *addr_align,
1036 resource_size_t *size_align, 805 resource_size_t *size_align,
1037 resource_size_t *size_max) 806 resource_size_t *size_max)
1038{ 807{
1039 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 808 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1040 resource_size_t bar_size, mw_size; 809 resource_size_t bar_size, mw_size;
@@ -1170,9 +939,8 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
1170 return 0; 939 return 0;
1171} 940}
1172 941
1173static u64 intel_ntb_link_is_up(struct ntb_dev *ntb, 942u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
1174 enum ntb_speed *speed, 943 enum ntb_width *width)
1175 enum ntb_width *width)
1176{ 944{
1177 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 945 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1178 946
@@ -1224,7 +992,7 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb,
1224 return 0; 992 return 0;
1225} 993}
1226 994
1227static int intel_ntb_link_disable(struct ntb_dev *ntb) 995int intel_ntb_link_disable(struct ntb_dev *ntb)
1228{ 996{
1229 struct intel_ntb_dev *ndev; 997 struct intel_ntb_dev *ndev;
1230 u32 ntb_cntl; 998 u32 ntb_cntl;
@@ -1248,14 +1016,14 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb)
1248 return 0; 1016 return 0;
1249} 1017}
1250 1018
1251static int intel_ntb_peer_mw_count(struct ntb_dev *ntb) 1019int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
1252{ 1020{
1253 /* Numbers of inbound and outbound memory windows match */ 1021 /* Numbers of inbound and outbound memory windows match */
1254 return ntb_ndev(ntb)->mw_count; 1022 return ntb_ndev(ntb)->mw_count;
1255} 1023}
1256 1024
1257static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, 1025int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1258 phys_addr_t *base, resource_size_t *size) 1026 phys_addr_t *base, resource_size_t *size)
1259{ 1027{
1260 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1028 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1261 int bar; 1029 int bar;
@@ -1283,12 +1051,12 @@ static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1283 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB); 1051 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1284} 1052}
1285 1053
1286static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb) 1054u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1287{ 1055{
1288 return ntb_ndev(ntb)->db_valid_mask; 1056 return ntb_ndev(ntb)->db_valid_mask;
1289} 1057}
1290 1058
1291static int intel_ntb_db_vector_count(struct ntb_dev *ntb) 1059int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1292{ 1060{
1293 struct intel_ntb_dev *ndev; 1061 struct intel_ntb_dev *ndev;
1294 1062
@@ -1297,7 +1065,7 @@ static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1297 return ndev->db_vec_count; 1065 return ndev->db_vec_count;
1298} 1066}
1299 1067
1300static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) 1068u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1301{ 1069{
1302 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1070 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1303 1071
@@ -1325,7 +1093,7 @@ static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1325 ndev->self_reg->db_bell); 1093 ndev->self_reg->db_bell);
1326} 1094}
1327 1095
1328static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) 1096int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1329{ 1097{
1330 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1098 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1331 1099
@@ -1334,7 +1102,7 @@ static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1334 ndev->self_reg->db_mask); 1102 ndev->self_reg->db_mask);
1335} 1103}
1336 1104
1337static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) 1105int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1338{ 1106{
1339 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1107 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1340 1108
@@ -1343,9 +1111,8 @@ static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1343 ndev->self_reg->db_mask); 1111 ndev->self_reg->db_mask);
1344} 1112}
1345 1113
1346static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, 1114int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1347 phys_addr_t *db_addr, 1115 resource_size_t *db_size)
1348 resource_size_t *db_size)
1349{ 1116{
1350 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1117 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1351 1118
@@ -1362,12 +1129,12 @@ static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1362 ndev->peer_reg->db_bell); 1129 ndev->peer_reg->db_bell);
1363} 1130}
1364 1131
1365static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb) 1132int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1366{ 1133{
1367 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD); 1134 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1368} 1135}
1369 1136
1370static int intel_ntb_spad_count(struct ntb_dev *ntb) 1137int intel_ntb_spad_count(struct ntb_dev *ntb)
1371{ 1138{
1372 struct intel_ntb_dev *ndev; 1139 struct intel_ntb_dev *ndev;
1373 1140
@@ -1376,7 +1143,7 @@ static int intel_ntb_spad_count(struct ntb_dev *ntb)
1376 return ndev->spad_count; 1143 return ndev->spad_count;
1377} 1144}
1378 1145
1379static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx) 1146u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1380{ 1147{
1381 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1148 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1382 1149
@@ -1385,8 +1152,7 @@ static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1385 ndev->self_reg->spad); 1152 ndev->self_reg->spad);
1386} 1153}
1387 1154
1388static int intel_ntb_spad_write(struct ntb_dev *ntb, 1155int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
1389 int idx, u32 val)
1390{ 1156{
1391 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1157 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1392 1158
@@ -1395,8 +1161,8 @@ static int intel_ntb_spad_write(struct ntb_dev *ntb,
1395 ndev->self_reg->spad); 1161 ndev->self_reg->spad);
1396} 1162}
1397 1163
1398static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, 1164int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1399 phys_addr_t *spad_addr) 1165 phys_addr_t *spad_addr)
1400{ 1166{
1401 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1167 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1402 1168
@@ -1404,7 +1170,7 @@ static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1404 ndev->peer_reg->spad); 1170 ndev->peer_reg->spad);
1405} 1171}
1406 1172
1407static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) 1173u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1408{ 1174{
1409 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1175 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1410 1176
@@ -1413,8 +1179,8 @@ static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1413 ndev->peer_reg->spad); 1179 ndev->peer_reg->spad);
1414} 1180}
1415 1181
1416static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, 1182int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1417 int sidx, u32 val) 1183 u32 val)
1418{ 1184{
1419 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1185 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1420 1186
@@ -1423,336 +1189,6 @@ static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
1423 ndev->peer_reg->spad); 1189 ndev->peer_reg->spad);
1424} 1190}
1425 1191
1426/* Skylake Xeon NTB */
1427
1428static int skx_poll_link(struct intel_ntb_dev *ndev)
1429{
1430 u16 reg_val;
1431 int rc;
1432
1433 ndev->reg->db_iowrite(ndev->db_link_mask,
1434 ndev->self_mmio +
1435 ndev->self_reg->db_clear);
1436
1437 rc = pci_read_config_word(ndev->ntb.pdev,
1438 SKX_LINK_STATUS_OFFSET, &reg_val);
1439 if (rc)
1440 return 0;
1441
1442 if (reg_val == ndev->lnk_sta)
1443 return 0;
1444
1445 ndev->lnk_sta = reg_val;
1446
1447 return 1;
1448}
1449
1450static u64 skx_db_ioread(void __iomem *mmio)
1451{
1452 return ioread64(mmio);
1453}
1454
1455static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1456{
1457 iowrite64(bits, mmio);
1458}
1459
1460static int skx_init_isr(struct intel_ntb_dev *ndev)
1461{
1462 int i;
1463
1464 /*
1465 * The MSIX vectors and the interrupt status bits are not lined up
1466 * on Skylake. By default the link status bit is bit 32, however it
1467 * is by default MSIX vector0. We need to fixup to line them up.
1468 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1469 */
1470
1471 for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1472 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1473
1474 /* move link status down one as workaround */
1475 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1476 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1477 ndev->self_mmio + SKX_INTVEC_OFFSET +
1478 (SKX_DB_MSIX_VECTOR_COUNT - 1));
1479 }
1480
1481 return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1482 SKX_DB_MSIX_VECTOR_COUNT,
1483 SKX_DB_MSIX_VECTOR_SHIFT,
1484 SKX_DB_TOTAL_SHIFT);
1485}
1486
1487static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1488 const struct intel_b2b_addr *addr,
1489 const struct intel_b2b_addr *peer_addr)
1490{
1491 struct pci_dev *pdev;
1492 void __iomem *mmio;
1493 phys_addr_t bar_addr;
1494
1495 pdev = ndev->ntb.pdev;
1496 mmio = ndev->self_mmio;
1497
1498 /* setup incoming bar limits == base addrs (zero length windows) */
1499 bar_addr = addr->bar2_addr64;
1500 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1501 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1502 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
1503
1504 bar_addr = addr->bar4_addr64;
1505 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1506 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1507 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
1508
1509 /* zero incoming translation addrs */
1510 iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1511 iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1512
1513 ndev->peer_mmio = ndev->self_mmio;
1514
1515 return 0;
1516}
1517
1518static int skx_init_ntb(struct intel_ntb_dev *ndev)
1519{
1520 int rc;
1521
1522
1523 ndev->mw_count = XEON_MW_COUNT;
1524 ndev->spad_count = SKX_SPAD_COUNT;
1525 ndev->db_count = SKX_DB_COUNT;
1526 ndev->db_link_mask = SKX_DB_LINK_BIT;
1527
1528 /* DB fixup for using 31 right now */
1529 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1530 ndev->db_link_mask |= BIT_ULL(31);
1531
1532 switch (ndev->ntb.topo) {
1533 case NTB_TOPO_B2B_USD:
1534 case NTB_TOPO_B2B_DSD:
1535 ndev->self_reg = &skx_pri_reg;
1536 ndev->peer_reg = &skx_b2b_reg;
1537 ndev->xlat_reg = &skx_sec_xlat;
1538
1539 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1540 rc = skx_setup_b2b_mw(ndev,
1541 &xeon_b2b_dsd_addr,
1542 &xeon_b2b_usd_addr);
1543 } else {
1544 rc = skx_setup_b2b_mw(ndev,
1545 &xeon_b2b_usd_addr,
1546 &xeon_b2b_dsd_addr);
1547 }
1548
1549 if (rc)
1550 return rc;
1551
1552 /* Enable Bus Master and Memory Space on the secondary side */
1553 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1554 ndev->self_mmio + SKX_SPCICMD_OFFSET);
1555
1556 break;
1557
1558 default:
1559 return -EINVAL;
1560 }
1561
1562 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1563
1564 ndev->reg->db_iowrite(ndev->db_valid_mask,
1565 ndev->self_mmio +
1566 ndev->self_reg->db_mask);
1567
1568 return 0;
1569}
1570
1571static int skx_init_dev(struct intel_ntb_dev *ndev)
1572{
1573 struct pci_dev *pdev;
1574 u8 ppd;
1575 int rc;
1576
1577 pdev = ndev->ntb.pdev;
1578
1579 ndev->reg = &skx_reg;
1580
1581 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1582 if (rc)
1583 return -EIO;
1584
1585 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1586 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
1587 ntb_topo_string(ndev->ntb.topo));
1588 if (ndev->ntb.topo == NTB_TOPO_NONE)
1589 return -EINVAL;
1590
1591 if (pdev_is_skx_xeon(pdev))
1592 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1593
1594 rc = skx_init_ntb(ndev);
1595 if (rc)
1596 return rc;
1597
1598 return skx_init_isr(ndev);
1599}
1600
1601static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1602 enum ntb_speed max_speed,
1603 enum ntb_width max_width)
1604{
1605 struct intel_ntb_dev *ndev;
1606 u32 ntb_ctl;
1607
1608 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1609
1610 dev_dbg(&ntb->pdev->dev,
1611 "Enabling link with max_speed %d max_width %d\n",
1612 max_speed, max_width);
1613
1614 if (max_speed != NTB_SPEED_AUTO)
1615 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
1616 if (max_width != NTB_WIDTH_AUTO)
1617 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
1618
1619 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1620 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1621 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1622 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1623 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1624
1625 return 0;
1626}
1627static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
1628 dma_addr_t addr, resource_size_t size)
1629{
1630 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1631 unsigned long xlat_reg, limit_reg;
1632 resource_size_t bar_size, mw_size;
1633 void __iomem *mmio;
1634 u64 base, limit, reg_val;
1635 int bar;
1636
1637 if (pidx != NTB_DEF_PEER_IDX)
1638 return -EINVAL;
1639
1640 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1641 idx += 1;
1642
1643 bar = ndev_mw_to_bar(ndev, idx);
1644 if (bar < 0)
1645 return bar;
1646
1647 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1648
1649 if (idx == ndev->b2b_idx)
1650 mw_size = bar_size - ndev->b2b_off;
1651 else
1652 mw_size = bar_size;
1653
1654 /* hardware requires that addr is aligned to bar size */
1655 if (addr & (bar_size - 1))
1656 return -EINVAL;
1657
1658 /* make sure the range fits in the usable mw size */
1659 if (size > mw_size)
1660 return -EINVAL;
1661
1662 mmio = ndev->self_mmio;
1663 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1664 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1665 base = pci_resource_start(ndev->ntb.pdev, bar);
1666
1667 /* Set the limit if supported, if size is not mw_size */
1668 if (limit_reg && size != mw_size)
1669 limit = base + size;
1670 else
1671 limit = base + mw_size;
1672
1673 /* set and verify setting the translation address */
1674 iowrite64(addr, mmio + xlat_reg);
1675 reg_val = ioread64(mmio + xlat_reg);
1676 if (reg_val != addr) {
1677 iowrite64(0, mmio + xlat_reg);
1678 return -EIO;
1679 }
1680
1681 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
1682
1683 /* set and verify setting the limit */
1684 iowrite64(limit, mmio + limit_reg);
1685 reg_val = ioread64(mmio + limit_reg);
1686 if (reg_val != limit) {
1687 iowrite64(base, mmio + limit_reg);
1688 iowrite64(0, mmio + xlat_reg);
1689 return -EIO;
1690 }
1691
1692 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
1693
1694 /* setup the EP */
1695 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1696 base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1697 base &= ~0xf;
1698
1699 if (limit_reg && size != mw_size)
1700 limit = base + size;
1701 else
1702 limit = base + mw_size;
1703
1704 /* set and verify setting the limit */
1705 iowrite64(limit, mmio + limit_reg);
1706 reg_val = ioread64(mmio + limit_reg);
1707 if (reg_val != limit) {
1708 iowrite64(base, mmio + limit_reg);
1709 iowrite64(0, mmio + xlat_reg);
1710 return -EIO;
1711 }
1712
1713 dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
1714
1715 return 0;
1716}
1717
1718static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1719{
1720 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1721 int bit;
1722
1723 if (db_bits & ~ndev->db_valid_mask)
1724 return -EINVAL;
1725
1726 while (db_bits) {
1727 bit = __ffs(db_bits);
1728 iowrite32(1, ndev->peer_mmio +
1729 ndev->peer_reg->db_bell + (bit * 4));
1730 db_bits &= db_bits - 1;
1731 }
1732
1733 return 0;
1734}
1735
1736static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
1737{
1738 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1739
1740 return ndev_db_read(ndev,
1741 ndev->self_mmio +
1742 ndev->self_reg->db_clear);
1743}
1744
1745static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
1746{
1747 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1748
1749 return ndev_db_write(ndev, db_bits,
1750 ndev->self_mmio +
1751 ndev->self_reg->db_clear);
1752}
1753
1754/* XEON */
1755
1756static u64 xeon_db_ioread(void __iomem *mmio) 1192static u64 xeon_db_ioread(void __iomem *mmio)
1757{ 1193{
1758 return (u64)ioread16(mmio); 1194 return (u64)ioread16(mmio);
@@ -1785,7 +1221,7 @@ static int xeon_poll_link(struct intel_ntb_dev *ndev)
1785 return 1; 1221 return 1;
1786} 1222}
1787 1223
1788static int xeon_link_is_up(struct intel_ntb_dev *ndev) 1224int xeon_link_is_up(struct intel_ntb_dev *ndev)
1789{ 1225{
1790 if (ndev->ntb.topo == NTB_TOPO_SEC) 1226 if (ndev->ntb.topo == NTB_TOPO_SEC)
1791 return 1; 1227 return 1;
@@ -1793,7 +1229,7 @@ static int xeon_link_is_up(struct intel_ntb_dev *ndev)
1793 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); 1229 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1794} 1230}
1795 1231
1796static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) 1232enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1797{ 1233{
1798 switch (ppd & XEON_PPD_TOPO_MASK) { 1234 switch (ppd & XEON_PPD_TOPO_MASK) {
1799 case XEON_PPD_TOPO_B2B_USD: 1235 case XEON_PPD_TOPO_B2B_USD:
@@ -2410,7 +1846,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2410 1846
2411 node = dev_to_node(&pdev->dev); 1847 node = dev_to_node(&pdev->dev);
2412 1848
2413 if (pdev_is_xeon(pdev)) { 1849 if (pdev_is_gen1(pdev)) {
2414 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); 1850 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2415 if (!ndev) { 1851 if (!ndev) {
2416 rc = -ENOMEM; 1852 rc = -ENOMEM;
@@ -2427,7 +1863,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2427 if (rc) 1863 if (rc)
2428 goto err_init_dev; 1864 goto err_init_dev;
2429 1865
2430 } else if (pdev_is_skx_xeon(pdev)) { 1866 } else if (pdev_is_gen3(pdev)) {
2431 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); 1867 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2432 if (!ndev) { 1868 if (!ndev) {
2433 rc = -ENOMEM; 1869 rc = -ENOMEM;
@@ -2441,7 +1877,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2441 if (rc) 1877 if (rc)
2442 goto err_init_pci; 1878 goto err_init_pci;
2443 1879
2444 rc = skx_init_dev(ndev); 1880 rc = gen3_init_dev(ndev);
2445 if (rc) 1881 if (rc)
2446 goto err_init_dev; 1882 goto err_init_dev;
2447 1883
@@ -2466,7 +1902,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2466 1902
2467err_register: 1903err_register:
2468 ndev_deinit_debugfs(ndev); 1904 ndev_deinit_debugfs(ndev);
2469 if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev)) 1905 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
2470 xeon_deinit_dev(ndev); 1906 xeon_deinit_dev(ndev);
2471err_init_dev: 1907err_init_dev:
2472 intel_ntb_deinit_pci(ndev); 1908 intel_ntb_deinit_pci(ndev);
@@ -2482,7 +1918,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
2482 1918
2483 ntb_unregister_device(&ndev->ntb); 1919 ntb_unregister_device(&ndev->ntb);
2484 ndev_deinit_debugfs(ndev); 1920 ndev_deinit_debugfs(ndev);
2485 if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev)) 1921 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
2486 xeon_deinit_dev(ndev); 1922 xeon_deinit_dev(ndev);
2487 intel_ntb_deinit_pci(ndev); 1923 intel_ntb_deinit_pci(ndev);
2488 kfree(ndev); 1924 kfree(ndev);
@@ -2537,50 +1973,20 @@ static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2537 .bar2_xlat = XEON_SBAR23XLAT_OFFSET, 1973 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
2538}; 1974};
2539 1975
2540static struct intel_b2b_addr xeon_b2b_usd_addr = { 1976struct intel_b2b_addr xeon_b2b_usd_addr = {
2541 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 1977 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2542 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 1978 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2543 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 1979 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2544 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 1980 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
2545}; 1981};
2546 1982
2547static struct intel_b2b_addr xeon_b2b_dsd_addr = { 1983struct intel_b2b_addr xeon_b2b_dsd_addr = {
2548 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 1984 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2549 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 1985 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2550 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 1986 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2551 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 1987 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
2552}; 1988};
2553 1989
2554static const struct intel_ntb_reg skx_reg = {
2555 .poll_link = skx_poll_link,
2556 .link_is_up = xeon_link_is_up,
2557 .db_ioread = skx_db_ioread,
2558 .db_iowrite = skx_db_iowrite,
2559 .db_size = sizeof(u32),
2560 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2561 .mw_bar = {2, 4},
2562};
2563
2564static const struct intel_ntb_alt_reg skx_pri_reg = {
2565 .db_bell = SKX_EM_DOORBELL_OFFSET,
2566 .db_clear = SKX_IM_INT_STATUS_OFFSET,
2567 .db_mask = SKX_IM_INT_DISABLE_OFFSET,
2568 .spad = SKX_IM_SPAD_OFFSET,
2569};
2570
2571static const struct intel_ntb_alt_reg skx_b2b_reg = {
2572 .db_bell = SKX_IM_DOORBELL_OFFSET,
2573 .db_clear = SKX_EM_INT_STATUS_OFFSET,
2574 .db_mask = SKX_EM_INT_DISABLE_OFFSET,
2575 .spad = SKX_B2B_SPAD_OFFSET,
2576};
2577
2578static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2579/* .bar0_base = SKX_EMBAR0_OFFSET, */
2580 .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
2581 .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
2582};
2583
2584/* operations for primary side of local ntb */ 1990/* operations for primary side of local ntb */
2585static const struct ntb_dev_ops intel_ntb_ops = { 1991static const struct ntb_dev_ops intel_ntb_ops = {
2586 .mw_count = intel_ntb_mw_count, 1992 .mw_count = intel_ntb_mw_count,
@@ -2610,33 +2016,6 @@ static const struct ntb_dev_ops intel_ntb_ops = {
2610 .peer_spad_write = intel_ntb_peer_spad_write, 2016 .peer_spad_write = intel_ntb_peer_spad_write,
2611}; 2017};
2612 2018
2613static const struct ntb_dev_ops intel_ntb3_ops = {
2614 .mw_count = intel_ntb_mw_count,
2615 .mw_get_align = intel_ntb_mw_get_align,
2616 .mw_set_trans = intel_ntb3_mw_set_trans,
2617 .peer_mw_count = intel_ntb_peer_mw_count,
2618 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
2619 .link_is_up = intel_ntb_link_is_up,
2620 .link_enable = intel_ntb3_link_enable,
2621 .link_disable = intel_ntb_link_disable,
2622 .db_valid_mask = intel_ntb_db_valid_mask,
2623 .db_vector_count = intel_ntb_db_vector_count,
2624 .db_vector_mask = intel_ntb_db_vector_mask,
2625 .db_read = intel_ntb3_db_read,
2626 .db_clear = intel_ntb3_db_clear,
2627 .db_set_mask = intel_ntb_db_set_mask,
2628 .db_clear_mask = intel_ntb_db_clear_mask,
2629 .peer_db_addr = intel_ntb_peer_db_addr,
2630 .peer_db_set = intel_ntb3_peer_db_set,
2631 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2632 .spad_count = intel_ntb_spad_count,
2633 .spad_read = intel_ntb_spad_read,
2634 .spad_write = intel_ntb_spad_write,
2635 .peer_spad_addr = intel_ntb_peer_spad_addr,
2636 .peer_spad_read = intel_ntb_peer_spad_read,
2637 .peer_spad_write = intel_ntb_peer_spad_write,
2638};
2639
2640static const struct file_operations intel_ntb_debugfs_info = { 2019static const struct file_operations intel_ntb_debugfs_info = {
2641 .owner = THIS_MODULE, 2020 .owner = THIS_MODULE,
2642 .open = simple_open, 2021 .open = simple_open,
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
new file mode 100644
index 000000000000..ad8ec1444436
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -0,0 +1,182 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012-2017 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012-2017 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef _NTB_INTEL_GEN1_H_
45#define _NTB_INTEL_GEN1_H_
46
47#include "ntb_hw_intel.h"
48
49/* Intel Gen1 Xeon hardware */
50#define XEON_PBAR23LMT_OFFSET 0x0000
51#define XEON_PBAR45LMT_OFFSET 0x0008
52#define XEON_PBAR4LMT_OFFSET 0x0008
53#define XEON_PBAR5LMT_OFFSET 0x000c
54#define XEON_PBAR23XLAT_OFFSET 0x0010
55#define XEON_PBAR45XLAT_OFFSET 0x0018
56#define XEON_PBAR4XLAT_OFFSET 0x0018
57#define XEON_PBAR5XLAT_OFFSET 0x001c
58#define XEON_SBAR23LMT_OFFSET 0x0020
59#define XEON_SBAR45LMT_OFFSET 0x0028
60#define XEON_SBAR4LMT_OFFSET 0x0028
61#define XEON_SBAR5LMT_OFFSET 0x002c
62#define XEON_SBAR23XLAT_OFFSET 0x0030
63#define XEON_SBAR45XLAT_OFFSET 0x0038
64#define XEON_SBAR4XLAT_OFFSET 0x0038
65#define XEON_SBAR5XLAT_OFFSET 0x003c
66#define XEON_SBAR0BASE_OFFSET 0x0040
67#define XEON_SBAR23BASE_OFFSET 0x0048
68#define XEON_SBAR45BASE_OFFSET 0x0050
69#define XEON_SBAR4BASE_OFFSET 0x0050
70#define XEON_SBAR5BASE_OFFSET 0x0054
71#define XEON_SBDF_OFFSET 0x005c
72#define XEON_NTBCNTL_OFFSET 0x0058
73#define XEON_PDOORBELL_OFFSET 0x0060
74#define XEON_PDBMSK_OFFSET 0x0062
75#define XEON_SDOORBELL_OFFSET 0x0064
76#define XEON_SDBMSK_OFFSET 0x0066
77#define XEON_USMEMMISS_OFFSET 0x0070
78#define XEON_SPAD_OFFSET 0x0080
79#define XEON_PBAR23SZ_OFFSET 0x00d0
80#define XEON_PBAR45SZ_OFFSET 0x00d1
81#define XEON_PBAR4SZ_OFFSET 0x00d1
82#define XEON_SBAR23SZ_OFFSET 0x00d2
83#define XEON_SBAR45SZ_OFFSET 0x00d3
84#define XEON_SBAR4SZ_OFFSET 0x00d3
85#define XEON_PPD_OFFSET 0x00d4
86#define XEON_PBAR5SZ_OFFSET 0x00d5
87#define XEON_SBAR5SZ_OFFSET 0x00d6
88#define XEON_WCCNTRL_OFFSET 0x00e0
89#define XEON_UNCERRSTS_OFFSET 0x014c
90#define XEON_CORERRSTS_OFFSET 0x0158
91#define XEON_LINK_STATUS_OFFSET 0x01a2
92#define XEON_SPCICMD_OFFSET 0x0504
93#define XEON_DEVCTRL_OFFSET 0x0598
94#define XEON_DEVSTS_OFFSET 0x059a
95#define XEON_SLINK_STATUS_OFFSET 0x05a2
96#define XEON_B2B_SPAD_OFFSET 0x0100
97#define XEON_B2B_DOORBELL_OFFSET 0x0140
98#define XEON_B2B_XLAT_OFFSETL 0x0144
99#define XEON_B2B_XLAT_OFFSETU 0x0148
100#define XEON_PPD_CONN_MASK 0x03
101#define XEON_PPD_CONN_TRANSPARENT 0x00
102#define XEON_PPD_CONN_B2B 0x01
103#define XEON_PPD_CONN_RP 0x02
104#define XEON_PPD_DEV_MASK 0x10
105#define XEON_PPD_DEV_USD 0x00
106#define XEON_PPD_DEV_DSD 0x10
107#define XEON_PPD_SPLIT_BAR_MASK 0x40
108
109#define XEON_PPD_TOPO_MASK (XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK)
110#define XEON_PPD_TOPO_PRI_USD (XEON_PPD_CONN_RP | XEON_PPD_DEV_USD)
111#define XEON_PPD_TOPO_PRI_DSD (XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD)
112#define XEON_PPD_TOPO_SEC_USD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD)
113#define XEON_PPD_TOPO_SEC_DSD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD)
114#define XEON_PPD_TOPO_B2B_USD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD)
115#define XEON_PPD_TOPO_B2B_DSD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD)
116
117#define XEON_MW_COUNT 2
118#define HSX_SPLIT_BAR_MW_COUNT 3
119#define XEON_DB_COUNT 15
120#define XEON_DB_LINK 15
121#define XEON_DB_LINK_BIT BIT_ULL(XEON_DB_LINK)
122#define XEON_DB_MSIX_VECTOR_COUNT 4
123#define XEON_DB_MSIX_VECTOR_SHIFT 5
124#define XEON_DB_TOTAL_SHIFT 16
125#define XEON_SPAD_COUNT 16
126
127/* Use the following addresses for translation between b2b ntb devices in case
128 * the hardware default values are not reliable. */
129#define XEON_B2B_BAR0_ADDR 0x1000000000000000ull
130#define XEON_B2B_BAR2_ADDR64 0x2000000000000000ull
131#define XEON_B2B_BAR4_ADDR64 0x4000000000000000ull
132#define XEON_B2B_BAR4_ADDR32 0x20000000u
133#define XEON_B2B_BAR5_ADDR32 0x40000000u
134
135/* The peer ntb secondary config space is 32KB fixed size */
136#define XEON_B2B_MIN_SIZE 0x8000
137
138/* flags to indicate hardware errata */
139#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
140#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
141#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
142#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
143
144extern struct intel_b2b_addr xeon_b2b_usd_addr;
145extern struct intel_b2b_addr xeon_b2b_dsd_addr;
146
147int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max,
148 int msix_shift, int total_shift);
149enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
150u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio);
151int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
152 void __iomem *mmio);
153int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx);
154int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx);
155int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
156 resource_size_t *addr_align, resource_size_t *size_align,
157 resource_size_t *size_max);
158int intel_ntb_peer_mw_count(struct ntb_dev *ntb);
159int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
160 phys_addr_t *base, resource_size_t *size);
161u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
162 enum ntb_width *width);
163int intel_ntb_link_disable(struct ntb_dev *ntb);
164u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb);
165int intel_ntb_db_vector_count(struct ntb_dev *ntb);
166u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector);
167int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits);
168int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits);
169int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
170 resource_size_t *db_size);
171int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb);
172int intel_ntb_spad_count(struct ntb_dev *ntb);
173u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx);
174int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val);
175u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx);
176int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
177 u32 val);
178int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
179 phys_addr_t *spad_addr);
180int xeon_link_is_up(struct intel_ntb_dev *ndev);
181
182#endif
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
new file mode 100644
index 000000000000..b3fa24778f94
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -0,0 +1,597 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2017 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2017 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe GEN3 NTB Linux driver
44 *
45 */
46
47#include <linux/debugfs.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/module.h>
52#include <linux/pci.h>
53#include <linux/random.h>
54#include <linux/slab.h>
55#include <linux/ntb.h>
56
57#include "ntb_hw_intel.h"
58#include "ntb_hw_gen1.h"
59#include "ntb_hw_gen3.h"
60
61static int gen3_poll_link(struct intel_ntb_dev *ndev);
62
63static const struct intel_ntb_reg gen3_reg = {
64 .poll_link = gen3_poll_link,
65 .link_is_up = xeon_link_is_up,
66 .db_ioread = gen3_db_ioread,
67 .db_iowrite = gen3_db_iowrite,
68 .db_size = sizeof(u32),
69 .ntb_ctl = GEN3_NTBCNTL_OFFSET,
70 .mw_bar = {2, 4},
71};
72
73static const struct intel_ntb_alt_reg gen3_pri_reg = {
74 .db_bell = GEN3_EM_DOORBELL_OFFSET,
75 .db_clear = GEN3_IM_INT_STATUS_OFFSET,
76 .db_mask = GEN3_IM_INT_DISABLE_OFFSET,
77 .spad = GEN3_IM_SPAD_OFFSET,
78};
79
80static const struct intel_ntb_alt_reg gen3_b2b_reg = {
81 .db_bell = GEN3_IM_DOORBELL_OFFSET,
82 .db_clear = GEN3_EM_INT_STATUS_OFFSET,
83 .db_mask = GEN3_EM_INT_DISABLE_OFFSET,
84 .spad = GEN3_B2B_SPAD_OFFSET,
85};
86
87static const struct intel_ntb_xlat_reg gen3_sec_xlat = {
88/* .bar0_base = GEN3_EMBAR0_OFFSET, */
89 .bar2_limit = GEN3_IMBAR1XLMT_OFFSET,
90 .bar2_xlat = GEN3_IMBAR1XBASE_OFFSET,
91};
92
93static int gen3_poll_link(struct intel_ntb_dev *ndev)
94{
95 u16 reg_val;
96 int rc;
97
98 ndev->reg->db_iowrite(ndev->db_link_mask,
99 ndev->self_mmio +
100 ndev->self_reg->db_clear);
101
102 rc = pci_read_config_word(ndev->ntb.pdev,
103 GEN3_LINK_STATUS_OFFSET, &reg_val);
104 if (rc)
105 return 0;
106
107 if (reg_val == ndev->lnk_sta)
108 return 0;
109
110 ndev->lnk_sta = reg_val;
111
112 return 1;
113}
114
115static int gen3_init_isr(struct intel_ntb_dev *ndev)
116{
117 int i;
118
119 /*
120 * The MSIX vectors and the interrupt status bits are not lined up
121 * on Skylake. By default the link status bit is bit 32, however it
122 * is by default MSIX vector0. We need to fixup to line them up.
123 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
124 */
125
126 for (i = 0; i < GEN3_DB_MSIX_VECTOR_COUNT; i++)
127 iowrite8(i, ndev->self_mmio + GEN3_INTVEC_OFFSET + i);
128
129 /* move link status down one as workaround */
130 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
131 iowrite8(GEN3_DB_MSIX_VECTOR_COUNT - 2,
132 ndev->self_mmio + GEN3_INTVEC_OFFSET +
133 (GEN3_DB_MSIX_VECTOR_COUNT - 1));
134 }
135
136 return ndev_init_isr(ndev, GEN3_DB_MSIX_VECTOR_COUNT,
137 GEN3_DB_MSIX_VECTOR_COUNT,
138 GEN3_DB_MSIX_VECTOR_SHIFT,
139 GEN3_DB_TOTAL_SHIFT);
140}
141
142static int gen3_setup_b2b_mw(struct intel_ntb_dev *ndev,
143 const struct intel_b2b_addr *addr,
144 const struct intel_b2b_addr *peer_addr)
145{
146 struct pci_dev *pdev;
147 void __iomem *mmio;
148 phys_addr_t bar_addr;
149
150 pdev = ndev->ntb.pdev;
151 mmio = ndev->self_mmio;
152
153 /* setup incoming bar limits == base addrs (zero length windows) */
154 bar_addr = addr->bar2_addr64;
155 iowrite64(bar_addr, mmio + GEN3_IMBAR1XLMT_OFFSET);
156 bar_addr = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
157 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
158
159 bar_addr = addr->bar4_addr64;
160 iowrite64(bar_addr, mmio + GEN3_IMBAR2XLMT_OFFSET);
161 bar_addr = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
162 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
163
164 /* zero incoming translation addrs */
165 iowrite64(0, mmio + GEN3_IMBAR1XBASE_OFFSET);
166 iowrite64(0, mmio + GEN3_IMBAR2XBASE_OFFSET);
167
168 ndev->peer_mmio = ndev->self_mmio;
169
170 return 0;
171}
172
173static int gen3_init_ntb(struct intel_ntb_dev *ndev)
174{
175 int rc;
176
177
178 ndev->mw_count = XEON_MW_COUNT;
179 ndev->spad_count = GEN3_SPAD_COUNT;
180 ndev->db_count = GEN3_DB_COUNT;
181 ndev->db_link_mask = GEN3_DB_LINK_BIT;
182
183 /* DB fixup for using 31 right now */
184 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
185 ndev->db_link_mask |= BIT_ULL(31);
186
187 switch (ndev->ntb.topo) {
188 case NTB_TOPO_B2B_USD:
189 case NTB_TOPO_B2B_DSD:
190 ndev->self_reg = &gen3_pri_reg;
191 ndev->peer_reg = &gen3_b2b_reg;
192 ndev->xlat_reg = &gen3_sec_xlat;
193
194 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
195 rc = gen3_setup_b2b_mw(ndev,
196 &xeon_b2b_dsd_addr,
197 &xeon_b2b_usd_addr);
198 } else {
199 rc = gen3_setup_b2b_mw(ndev,
200 &xeon_b2b_usd_addr,
201 &xeon_b2b_dsd_addr);
202 }
203
204 if (rc)
205 return rc;
206
207 /* Enable Bus Master and Memory Space on the secondary side */
208 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
209 ndev->self_mmio + GEN3_SPCICMD_OFFSET);
210
211 break;
212
213 default:
214 return -EINVAL;
215 }
216
217 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
218
219 ndev->reg->db_iowrite(ndev->db_valid_mask,
220 ndev->self_mmio +
221 ndev->self_reg->db_mask);
222
223 return 0;
224}
225
226int gen3_init_dev(struct intel_ntb_dev *ndev)
227{
228 struct pci_dev *pdev;
229 u8 ppd;
230 int rc;
231
232 pdev = ndev->ntb.pdev;
233
234 ndev->reg = &gen3_reg;
235
236 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
237 if (rc)
238 return -EIO;
239
240 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
241 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
242 ntb_topo_string(ndev->ntb.topo));
243 if (ndev->ntb.topo == NTB_TOPO_NONE)
244 return -EINVAL;
245
246 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
247
248 rc = gen3_init_ntb(ndev);
249 if (rc)
250 return rc;
251
252 return gen3_init_isr(ndev);
253}
254
255ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
256 size_t count, loff_t *offp)
257{
258 struct intel_ntb_dev *ndev;
259 void __iomem *mmio;
260 char *buf;
261 size_t buf_size;
262 ssize_t ret, off;
263 union { u64 v64; u32 v32; u16 v16; } u;
264
265 ndev = filp->private_data;
266 mmio = ndev->self_mmio;
267
268 buf_size = min(count, 0x800ul);
269
270 buf = kmalloc(buf_size, GFP_KERNEL);
271 if (!buf)
272 return -ENOMEM;
273
274 off = 0;
275
276 off += scnprintf(buf + off, buf_size - off,
277 "NTB Device Information:\n");
278
279 off += scnprintf(buf + off, buf_size - off,
280 "Connection Topology -\t%s\n",
281 ntb_topo_string(ndev->ntb.topo));
282
283 off += scnprintf(buf + off, buf_size - off,
284 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
285 off += scnprintf(buf + off, buf_size - off,
286 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
287
288 if (!ndev->reg->link_is_up(ndev))
289 off += scnprintf(buf + off, buf_size - off,
290 "Link Status -\t\tDown\n");
291 else {
292 off += scnprintf(buf + off, buf_size - off,
293 "Link Status -\t\tUp\n");
294 off += scnprintf(buf + off, buf_size - off,
295 "Link Speed -\t\tPCI-E Gen %u\n",
296 NTB_LNK_STA_SPEED(ndev->lnk_sta));
297 off += scnprintf(buf + off, buf_size - off,
298 "Link Width -\t\tx%u\n",
299 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
300 }
301
302 off += scnprintf(buf + off, buf_size - off,
303 "Memory Window Count -\t%u\n", ndev->mw_count);
304 off += scnprintf(buf + off, buf_size - off,
305 "Scratchpad Count -\t%u\n", ndev->spad_count);
306 off += scnprintf(buf + off, buf_size - off,
307 "Doorbell Count -\t%u\n", ndev->db_count);
308 off += scnprintf(buf + off, buf_size - off,
309 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
310 off += scnprintf(buf + off, buf_size - off,
311 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
312
313 off += scnprintf(buf + off, buf_size - off,
314 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
315 off += scnprintf(buf + off, buf_size - off,
316 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
317 off += scnprintf(buf + off, buf_size - off,
318 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
319
320 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
321 off += scnprintf(buf + off, buf_size - off,
322 "Doorbell Mask -\t\t%#llx\n", u.v64);
323
324 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
325 off += scnprintf(buf + off, buf_size - off,
326 "Doorbell Bell -\t\t%#llx\n", u.v64);
327
328 off += scnprintf(buf + off, buf_size - off,
329 "\nNTB Incoming XLAT:\n");
330
331 u.v64 = ioread64(mmio + GEN3_IMBAR1XBASE_OFFSET);
332 off += scnprintf(buf + off, buf_size - off,
333 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
334
335 u.v64 = ioread64(mmio + GEN3_IMBAR2XBASE_OFFSET);
336 off += scnprintf(buf + off, buf_size - off,
337 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
338
339 u.v64 = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
340 off += scnprintf(buf + off, buf_size - off,
341 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
342
343 u.v64 = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
344 off += scnprintf(buf + off, buf_size - off,
345 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
346
347 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
348 off += scnprintf(buf + off, buf_size - off,
349 "\nNTB Outgoing B2B XLAT:\n");
350
351 u.v64 = ioread64(mmio + GEN3_EMBAR1XBASE_OFFSET);
352 off += scnprintf(buf + off, buf_size - off,
353 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
354
355 u.v64 = ioread64(mmio + GEN3_EMBAR2XBASE_OFFSET);
356 off += scnprintf(buf + off, buf_size - off,
357 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
358
359 u.v64 = ioread64(mmio + GEN3_EMBAR1XLMT_OFFSET);
360 off += scnprintf(buf + off, buf_size - off,
361 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
362
363 u.v64 = ioread64(mmio + GEN3_EMBAR2XLMT_OFFSET);
364 off += scnprintf(buf + off, buf_size - off,
365 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
366
367 off += scnprintf(buf + off, buf_size - off,
368 "\nNTB Secondary BAR:\n");
369
370 u.v64 = ioread64(mmio + GEN3_EMBAR0_OFFSET);
371 off += scnprintf(buf + off, buf_size - off,
372 "EMBAR0 -\t\t%#018llx\n", u.v64);
373
374 u.v64 = ioread64(mmio + GEN3_EMBAR1_OFFSET);
375 off += scnprintf(buf + off, buf_size - off,
376 "EMBAR1 -\t\t%#018llx\n", u.v64);
377
378 u.v64 = ioread64(mmio + GEN3_EMBAR2_OFFSET);
379 off += scnprintf(buf + off, buf_size - off,
380 "EMBAR2 -\t\t%#018llx\n", u.v64);
381 }
382
383 off += scnprintf(buf + off, buf_size - off,
384 "\nNTB Statistics:\n");
385
386 u.v16 = ioread16(mmio + GEN3_USMEMMISS_OFFSET);
387 off += scnprintf(buf + off, buf_size - off,
388 "Upstream Memory Miss -\t%u\n", u.v16);
389
390 off += scnprintf(buf + off, buf_size - off,
391 "\nNTB Hardware Errors:\n");
392
393 if (!pci_read_config_word(ndev->ntb.pdev,
394 GEN3_DEVSTS_OFFSET, &u.v16))
395 off += scnprintf(buf + off, buf_size - off,
396 "DEVSTS -\t\t%#06x\n", u.v16);
397
398 if (!pci_read_config_word(ndev->ntb.pdev,
399 GEN3_LINK_STATUS_OFFSET, &u.v16))
400 off += scnprintf(buf + off, buf_size - off,
401 "LNKSTS -\t\t%#06x\n", u.v16);
402
403 if (!pci_read_config_dword(ndev->ntb.pdev,
404 GEN3_UNCERRSTS_OFFSET, &u.v32))
405 off += scnprintf(buf + off, buf_size - off,
406 "UNCERRSTS -\t\t%#06x\n", u.v32);
407
408 if (!pci_read_config_dword(ndev->ntb.pdev,
409 GEN3_CORERRSTS_OFFSET, &u.v32))
410 off += scnprintf(buf + off, buf_size - off,
411 "CORERRSTS -\t\t%#06x\n", u.v32);
412
413 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
414 kfree(buf);
415 return ret;
416}
417
418static int intel_ntb3_link_enable(struct ntb_dev *ntb,
419 enum ntb_speed max_speed,
420 enum ntb_width max_width)
421{
422 struct intel_ntb_dev *ndev;
423 u32 ntb_ctl;
424
425 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
426
427 dev_dbg(&ntb->pdev->dev,
428 "Enabling link with max_speed %d max_width %d\n",
429 max_speed, max_width);
430
431 if (max_speed != NTB_SPEED_AUTO)
432 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
433 if (max_width != NTB_WIDTH_AUTO)
434 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
435
436 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
437 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
438 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
439 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
440 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
441
442 return 0;
443}
444static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
445 dma_addr_t addr, resource_size_t size)
446{
447 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
448 unsigned long xlat_reg, limit_reg;
449 resource_size_t bar_size, mw_size;
450 void __iomem *mmio;
451 u64 base, limit, reg_val;
452 int bar;
453
454 if (pidx != NTB_DEF_PEER_IDX)
455 return -EINVAL;
456
457 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
458 idx += 1;
459
460 bar = ndev_mw_to_bar(ndev, idx);
461 if (bar < 0)
462 return bar;
463
464 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
465
466 if (idx == ndev->b2b_idx)
467 mw_size = bar_size - ndev->b2b_off;
468 else
469 mw_size = bar_size;
470
471 /* hardware requires that addr is aligned to bar size */
472 if (addr & (bar_size - 1))
473 return -EINVAL;
474
475 /* make sure the range fits in the usable mw size */
476 if (size > mw_size)
477 return -EINVAL;
478
479 mmio = ndev->self_mmio;
480 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
481 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
482 base = pci_resource_start(ndev->ntb.pdev, bar);
483
484 /* Set the limit if supported, if size is not mw_size */
485 if (limit_reg && size != mw_size)
486 limit = base + size;
487 else
488 limit = base + mw_size;
489
490 /* set and verify setting the translation address */
491 iowrite64(addr, mmio + xlat_reg);
492 reg_val = ioread64(mmio + xlat_reg);
493 if (reg_val != addr) {
494 iowrite64(0, mmio + xlat_reg);
495 return -EIO;
496 }
497
498 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
499
500 /* set and verify setting the limit */
501 iowrite64(limit, mmio + limit_reg);
502 reg_val = ioread64(mmio + limit_reg);
503 if (reg_val != limit) {
504 iowrite64(base, mmio + limit_reg);
505 iowrite64(0, mmio + xlat_reg);
506 return -EIO;
507 }
508
509 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
510
511 /* setup the EP */
512 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
513 base = ioread64(mmio + GEN3_EMBAR1_OFFSET + (8 * idx));
514 base &= ~0xf;
515
516 if (limit_reg && size != mw_size)
517 limit = base + size;
518 else
519 limit = base + mw_size;
520
521 /* set and verify setting the limit */
522 iowrite64(limit, mmio + limit_reg);
523 reg_val = ioread64(mmio + limit_reg);
524 if (reg_val != limit) {
525 iowrite64(base, mmio + limit_reg);
526 iowrite64(0, mmio + xlat_reg);
527 return -EIO;
528 }
529
530 dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
531
532 return 0;
533}
534
535static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
536{
537 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
538 int bit;
539
540 if (db_bits & ~ndev->db_valid_mask)
541 return -EINVAL;
542
543 while (db_bits) {
544 bit = __ffs(db_bits);
545 iowrite32(1, ndev->peer_mmio +
546 ndev->peer_reg->db_bell + (bit * 4));
547 db_bits &= db_bits - 1;
548 }
549
550 return 0;
551}
552
553static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
554{
555 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
556
557 return ndev_db_read(ndev,
558 ndev->self_mmio +
559 ndev->self_reg->db_clear);
560}
561
562static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
563{
564 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
565
566 return ndev_db_write(ndev, db_bits,
567 ndev->self_mmio +
568 ndev->self_reg->db_clear);
569}
570
571const struct ntb_dev_ops intel_ntb3_ops = {
572 .mw_count = intel_ntb_mw_count,
573 .mw_get_align = intel_ntb_mw_get_align,
574 .mw_set_trans = intel_ntb3_mw_set_trans,
575 .peer_mw_count = intel_ntb_peer_mw_count,
576 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
577 .link_is_up = intel_ntb_link_is_up,
578 .link_enable = intel_ntb3_link_enable,
579 .link_disable = intel_ntb_link_disable,
580 .db_valid_mask = intel_ntb_db_valid_mask,
581 .db_vector_count = intel_ntb_db_vector_count,
582 .db_vector_mask = intel_ntb_db_vector_mask,
583 .db_read = intel_ntb3_db_read,
584 .db_clear = intel_ntb3_db_clear,
585 .db_set_mask = intel_ntb_db_set_mask,
586 .db_clear_mask = intel_ntb_db_clear_mask,
587 .peer_db_addr = intel_ntb_peer_db_addr,
588 .peer_db_set = intel_ntb3_peer_db_set,
589 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
590 .spad_count = intel_ntb_spad_count,
591 .spad_read = intel_ntb_spad_read,
592 .spad_write = intel_ntb_spad_write,
593 .peer_spad_addr = intel_ntb_peer_spad_addr,
594 .peer_spad_read = intel_ntb_peer_spad_read,
595 .peer_spad_write = intel_ntb_peer_spad_write,
596};
597
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h
new file mode 100644
index 000000000000..75fb86ca27bb
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h
@@ -0,0 +1,110 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012-2017 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012-2017 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef _NTB_INTEL_GEN3_H_
45#define _NTB_INTEL_GEN3_H_
46
47#include "ntb_hw_intel.h"
48
49/* Intel Skylake Xeon hardware */
50#define GEN3_IMBAR1SZ_OFFSET 0x00d0
51#define GEN3_IMBAR2SZ_OFFSET 0x00d1
52#define GEN3_EMBAR1SZ_OFFSET 0x00d2
53#define GEN3_EMBAR2SZ_OFFSET 0x00d3
54#define GEN3_DEVCTRL_OFFSET 0x0098
55#define GEN3_DEVSTS_OFFSET 0x009a
56#define GEN3_UNCERRSTS_OFFSET 0x014c
57#define GEN3_CORERRSTS_OFFSET 0x0158
58#define GEN3_LINK_STATUS_OFFSET 0x01a2
59
60#define GEN3_NTBCNTL_OFFSET 0x0000
61#define GEN3_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
62#define GEN3_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
63#define GEN3_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
64#define GEN3_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
65#define GEN3_IM_INT_STATUS_OFFSET 0x0040
66#define GEN3_IM_INT_DISABLE_OFFSET 0x0048
67#define GEN3_IM_SPAD_OFFSET 0x0080 /* SPAD */
68#define GEN3_USMEMMISS_OFFSET 0x0070
69#define GEN3_INTVEC_OFFSET 0x00d0
70#define GEN3_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
71#define GEN3_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
72#define GEN3_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
73#define GEN3_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
74#define GEN3_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
75#define GEN3_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
76#define GEN3_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
77#define GEN3_EM_INT_STATUS_OFFSET 0x4040
78#define GEN3_EM_INT_DISABLE_OFFSET 0x4048
79#define GEN3_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
80#define GEN3_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
81#define GEN3_SPCICMD_OFFSET 0x4504 /* SPCICMD */
82#define GEN3_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
83#define GEN3_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
84#define GEN3_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
85
86#define GEN3_DB_COUNT 32
87#define GEN3_DB_LINK 32
88#define GEN3_DB_LINK_BIT BIT_ULL(GEN3_DB_LINK)
89#define GEN3_DB_MSIX_VECTOR_COUNT 33
90#define GEN3_DB_MSIX_VECTOR_SHIFT 1
91#define GEN3_DB_TOTAL_SHIFT 33
92#define GEN3_SPAD_COUNT 16
93
94static inline u64 gen3_db_ioread(void __iomem *mmio)
95{
96 return ioread64(mmio);
97}
98
99static inline void gen3_db_iowrite(u64 bits, void __iomem *mmio)
100{
101 iowrite64(bits, mmio);
102}
103
104ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
105 size_t count, loff_t *offp);
106int gen3_init_dev(struct intel_ntb_dev *ndev);
107
108extern const struct ntb_dev_ops intel_ntb3_ops;
109
110#endif
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 4415aa7ea775..c49ff8970ce3 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -54,6 +54,7 @@
54#include <linux/ntb.h> 54#include <linux/ntb.h>
55#include <linux/pci.h> 55#include <linux/pci.h>
56 56
57/* PCI device IDs */
57#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 58#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
58#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 59#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
59#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727 60#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
@@ -71,132 +72,7 @@
71#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F 72#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
72#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C 73#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
73 74
74/* Intel Xeon hardware */
75
76#define XEON_PBAR23LMT_OFFSET 0x0000
77#define XEON_PBAR45LMT_OFFSET 0x0008
78#define XEON_PBAR4LMT_OFFSET 0x0008
79#define XEON_PBAR5LMT_OFFSET 0x000c
80#define XEON_PBAR23XLAT_OFFSET 0x0010
81#define XEON_PBAR45XLAT_OFFSET 0x0018
82#define XEON_PBAR4XLAT_OFFSET 0x0018
83#define XEON_PBAR5XLAT_OFFSET 0x001c
84#define XEON_SBAR23LMT_OFFSET 0x0020
85#define XEON_SBAR45LMT_OFFSET 0x0028
86#define XEON_SBAR4LMT_OFFSET 0x0028
87#define XEON_SBAR5LMT_OFFSET 0x002c
88#define XEON_SBAR23XLAT_OFFSET 0x0030
89#define XEON_SBAR45XLAT_OFFSET 0x0038
90#define XEON_SBAR4XLAT_OFFSET 0x0038
91#define XEON_SBAR5XLAT_OFFSET 0x003c
92#define XEON_SBAR0BASE_OFFSET 0x0040
93#define XEON_SBAR23BASE_OFFSET 0x0048
94#define XEON_SBAR45BASE_OFFSET 0x0050
95#define XEON_SBAR4BASE_OFFSET 0x0050
96#define XEON_SBAR5BASE_OFFSET 0x0054
97#define XEON_SBDF_OFFSET 0x005c
98#define XEON_NTBCNTL_OFFSET 0x0058
99#define XEON_PDOORBELL_OFFSET 0x0060
100#define XEON_PDBMSK_OFFSET 0x0062
101#define XEON_SDOORBELL_OFFSET 0x0064
102#define XEON_SDBMSK_OFFSET 0x0066
103#define XEON_USMEMMISS_OFFSET 0x0070
104#define XEON_SPAD_OFFSET 0x0080
105#define XEON_PBAR23SZ_OFFSET 0x00d0
106#define XEON_PBAR45SZ_OFFSET 0x00d1
107#define XEON_PBAR4SZ_OFFSET 0x00d1
108#define XEON_SBAR23SZ_OFFSET 0x00d2
109#define XEON_SBAR45SZ_OFFSET 0x00d3
110#define XEON_SBAR4SZ_OFFSET 0x00d3
111#define XEON_PPD_OFFSET 0x00d4
112#define XEON_PBAR5SZ_OFFSET 0x00d5
113#define XEON_SBAR5SZ_OFFSET 0x00d6
114#define XEON_WCCNTRL_OFFSET 0x00e0
115#define XEON_UNCERRSTS_OFFSET 0x014c
116#define XEON_CORERRSTS_OFFSET 0x0158
117#define XEON_LINK_STATUS_OFFSET 0x01a2
118#define XEON_SPCICMD_OFFSET 0x0504
119#define XEON_DEVCTRL_OFFSET 0x0598
120#define XEON_DEVSTS_OFFSET 0x059a
121#define XEON_SLINK_STATUS_OFFSET 0x05a2
122#define XEON_B2B_SPAD_OFFSET 0x0100
123#define XEON_B2B_DOORBELL_OFFSET 0x0140
124#define XEON_B2B_XLAT_OFFSETL 0x0144
125#define XEON_B2B_XLAT_OFFSETU 0x0148
126#define XEON_PPD_CONN_MASK 0x03
127#define XEON_PPD_CONN_TRANSPARENT 0x00
128#define XEON_PPD_CONN_B2B 0x01
129#define XEON_PPD_CONN_RP 0x02
130#define XEON_PPD_DEV_MASK 0x10
131#define XEON_PPD_DEV_USD 0x00
132#define XEON_PPD_DEV_DSD 0x10
133#define XEON_PPD_SPLIT_BAR_MASK 0x40
134
135#define XEON_PPD_TOPO_MASK (XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK)
136#define XEON_PPD_TOPO_PRI_USD (XEON_PPD_CONN_RP | XEON_PPD_DEV_USD)
137#define XEON_PPD_TOPO_PRI_DSD (XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD)
138#define XEON_PPD_TOPO_SEC_USD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD)
139#define XEON_PPD_TOPO_SEC_DSD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD)
140#define XEON_PPD_TOPO_B2B_USD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD)
141#define XEON_PPD_TOPO_B2B_DSD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD)
142
143#define XEON_MW_COUNT 2
144#define HSX_SPLIT_BAR_MW_COUNT 3
145#define XEON_DB_COUNT 15
146#define XEON_DB_LINK 15
147#define XEON_DB_LINK_BIT BIT_ULL(XEON_DB_LINK)
148#define XEON_DB_MSIX_VECTOR_COUNT 4
149#define XEON_DB_MSIX_VECTOR_SHIFT 5
150#define XEON_DB_TOTAL_SHIFT 16
151#define XEON_SPAD_COUNT 16
152
153/* Intel Skylake Xeon hardware */
154#define SKX_IMBAR1SZ_OFFSET 0x00d0
155#define SKX_IMBAR2SZ_OFFSET 0x00d1
156#define SKX_EMBAR1SZ_OFFSET 0x00d2
157#define SKX_EMBAR2SZ_OFFSET 0x00d3
158#define SKX_DEVCTRL_OFFSET 0x0098
159#define SKX_DEVSTS_OFFSET 0x009a
160#define SKX_UNCERRSTS_OFFSET 0x014c
161#define SKX_CORERRSTS_OFFSET 0x0158
162#define SKX_LINK_STATUS_OFFSET 0x01a2
163
164#define SKX_NTBCNTL_OFFSET 0x0000
165#define SKX_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
166#define SKX_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
167#define SKX_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
168#define SKX_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
169#define SKX_IM_INT_STATUS_OFFSET 0x0040
170#define SKX_IM_INT_DISABLE_OFFSET 0x0048
171#define SKX_IM_SPAD_OFFSET 0x0080 /* SPAD */
172#define SKX_USMEMMISS_OFFSET 0x0070
173#define SKX_INTVEC_OFFSET 0x00d0
174#define SKX_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
175#define SKX_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
176#define SKX_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
177#define SKX_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
178#define SKX_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
179#define SKX_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
180#define SKX_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
181#define SKX_EM_INT_STATUS_OFFSET 0x4040
182#define SKX_EM_INT_DISABLE_OFFSET 0x4048
183#define SKX_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
184#define SKX_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
185#define SKX_SPCICMD_OFFSET 0x4504 /* SPCICMD */
186#define SKX_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
187#define SKX_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
188#define SKX_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
189
190#define SKX_DB_COUNT 32
191#define SKX_DB_LINK 32
192#define SKX_DB_LINK_BIT BIT_ULL(SKX_DB_LINK)
193#define SKX_DB_MSIX_VECTOR_COUNT 33
194#define SKX_DB_MSIX_VECTOR_SHIFT 1
195#define SKX_DB_TOTAL_SHIFT 33
196#define SKX_SPAD_COUNT 16
197
198/* Ntb control and link status */ 75/* Ntb control and link status */
199
200#define NTB_CTL_CFG_LOCK BIT(0) 76#define NTB_CTL_CFG_LOCK BIT(0)
201#define NTB_CTL_DISABLE BIT(1) 77#define NTB_CTL_DISABLE BIT(1)
202#define NTB_CTL_S2P_BAR2_SNOOP BIT(2) 78#define NTB_CTL_S2P_BAR2_SNOOP BIT(2)
@@ -213,23 +89,6 @@
213#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK) 89#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK)
214#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4) 90#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
215 91
216/* Use the following addresses for translation between b2b ntb devices in case
217 * the hardware default values are not reliable. */
218#define XEON_B2B_BAR0_ADDR 0x1000000000000000ull
219#define XEON_B2B_BAR2_ADDR64 0x2000000000000000ull
220#define XEON_B2B_BAR4_ADDR64 0x4000000000000000ull
221#define XEON_B2B_BAR4_ADDR32 0x20000000u
222#define XEON_B2B_BAR5_ADDR32 0x40000000u
223
224/* The peer ntb secondary config space is 32KB fixed size */
225#define XEON_B2B_MIN_SIZE 0x8000
226
227/* flags to indicate hardware errata */
228#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
229#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
230#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
231#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
232
233/* flags to indicate unsafe api */ 92/* flags to indicate unsafe api */
234#define NTB_UNSAFE_DB BIT_ULL(0) 93#define NTB_UNSAFE_DB BIT_ULL(0)
235#define NTB_UNSAFE_SPAD BIT_ULL(1) 94#define NTB_UNSAFE_SPAD BIT_ULL(1)
@@ -328,4 +187,64 @@ struct intel_ntb_dev {
328#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ 187#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \
329 hb_timer.work) 188 hb_timer.work)
330 189
190static inline int pdev_is_gen1(struct pci_dev *pdev)
191{
192 switch (pdev->device) {
193 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
194 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
195 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
196 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
197 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
198 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
199 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
200 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
201 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
202 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
203 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
204 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
205 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
206 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
207 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
208 return 1;
209 }
210 return 0;
211}
212
213static inline int pdev_is_gen3(struct pci_dev *pdev)
214{
215 if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
216 return 1;
217
218 return 0;
219}
220
221#ifndef ioread64
222#ifdef readq
223#define ioread64 readq
224#else
225#define ioread64 _ioread64
226static inline u64 _ioread64(void __iomem *mmio)
227{
228 u64 low, high;
229
230 low = ioread32(mmio);
231 high = ioread32(mmio + sizeof(u32));
232 return low | (high << 32);
233}
234#endif
235#endif
236
237#ifndef iowrite64
238#ifdef writeq
239#define iowrite64 writeq
240#else
241#define iowrite64 _iowrite64
242static inline void _iowrite64(u64 val, void __iomem *mmio)
243{
244 iowrite32(val, mmio);
245 iowrite32(val >> 32, mmio + sizeof(u32));
246}
247#endif
248#endif
249
331#endif 250#endif
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 9878c48826e3..8145be34328b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -637,7 +637,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
637 */ 637 */
638 node = dev_to_node(&ndev->dev); 638 node = dev_to_node(&ndev->dev);
639 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { 639 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
640 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 640 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
641 if (!entry) 641 if (!entry)
642 return -ENOMEM; 642 return -ENOMEM;
643 643
@@ -1828,7 +1828,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1828 qp->rx_dma_chan ? "DMA" : "CPU"); 1828 qp->rx_dma_chan ? "DMA" : "CPU");
1829 1829
1830 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1830 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1831 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1831 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
1832 if (!entry) 1832 if (!entry)
1833 goto err1; 1833 goto err1;
1834 1834
@@ -1839,7 +1839,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1839 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; 1839 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
1840 1840
1841 for (i = 0; i < qp->tx_max_entry; i++) { 1841 for (i = 0; i < qp->tx_max_entry; i++) {
1842 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1842 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
1843 if (!entry) 1843 if (!entry)
1844 goto err2; 1844 goto err2;
1845 1845