diff options
author | Dave Jiang <dave.jiang@intel.com> | 2018-01-29 15:22:24 -0500 |
---|---|---|
committer | Jon Mason <jdmason@kudzu.us> | 2018-06-11 15:20:59 -0400 |
commit | f6e51c354b60c177a4287f236d353b430d3dc6c1 (patch) | |
tree | 05ac8d016d9d9ccde66a105183d3e73a6b851bcf | |
parent | a9065055ed09fe6e59e5bbfd12c8de629c53005d (diff) |
ntb: intel: split out the gen3 code
Move the Intel hw gen3 code to its own source file. The ntb_hw_intel.c was
getting too large and makes it hard to maintain with future hardware
changes.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Jon Mason <jdmason@kudzu.us>
-rw-r--r-- | drivers/ntb/hw/intel/Makefile | 1 | ||||
-rw-r--r-- | drivers/ntb/hw/intel/ntb_hw_gen1.c (renamed from drivers/ntb/hw/intel/ntb_hw_intel.c) | 694 | ||||
-rw-r--r-- | drivers/ntb/hw/intel/ntb_hw_gen1.h | 40 | ||||
-rw-r--r-- | drivers/ntb/hw/intel/ntb_hw_gen3.c | 597 | ||||
-rw-r--r-- | drivers/ntb/hw/intel/ntb_hw_gen3.h | 19 | ||||
-rw-r--r-- | drivers/ntb/hw/intel/ntb_hw_intel.h | 60 |
6 files changed, 754 insertions, 657 deletions
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile index 1b434568d2ad..4ff22af967c6 100644 --- a/drivers/ntb/hw/intel/Makefile +++ b/drivers/ntb/hw/intel/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o | 1 | obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o |
2 | ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o | ||
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 44bf2f4eb068..f2554ac8afac 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c | |||
@@ -60,9 +60,9 @@ | |||
60 | #include <linux/slab.h> | 60 | #include <linux/slab.h> |
61 | #include <linux/ntb.h> | 61 | #include <linux/ntb.h> |
62 | 62 | ||
63 | #include "ntb_hw_intel.h" | ||
63 | #include "ntb_hw_gen1.h" | 64 | #include "ntb_hw_gen1.h" |
64 | #include "ntb_hw_gen3.h" | 65 | #include "ntb_hw_gen3.h" |
65 | #include "ntb_hw_intel.h" | ||
66 | 66 | ||
67 | #define NTB_NAME "ntb_hw_intel" | 67 | #define NTB_NAME "ntb_hw_intel" |
68 | #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver" | 68 | #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver" |
@@ -82,14 +82,7 @@ static const struct intel_ntb_alt_reg xeon_sec_reg; | |||
82 | static const struct intel_ntb_alt_reg xeon_b2b_reg; | 82 | static const struct intel_ntb_alt_reg xeon_b2b_reg; |
83 | static const struct intel_ntb_xlat_reg xeon_pri_xlat; | 83 | static const struct intel_ntb_xlat_reg xeon_pri_xlat; |
84 | static const struct intel_ntb_xlat_reg xeon_sec_xlat; | 84 | static const struct intel_ntb_xlat_reg xeon_sec_xlat; |
85 | static struct intel_b2b_addr xeon_b2b_usd_addr; | ||
86 | static struct intel_b2b_addr xeon_b2b_dsd_addr; | ||
87 | static const struct intel_ntb_reg skx_reg; | ||
88 | static const struct intel_ntb_alt_reg skx_pri_reg; | ||
89 | static const struct intel_ntb_alt_reg skx_b2b_reg; | ||
90 | static const struct intel_ntb_xlat_reg skx_sec_xlat; | ||
91 | static const struct ntb_dev_ops intel_ntb_ops; | 85 | static const struct ntb_dev_ops intel_ntb_ops; |
92 | static const struct ntb_dev_ops intel_ntb3_ops; | ||
93 | 86 | ||
94 | static const struct file_operations intel_ntb_debugfs_info; | 87 | static const struct file_operations intel_ntb_debugfs_info; |
95 | static struct dentry *debugfs_dir; | 88 | static struct dentry *debugfs_dir; |
@@ -148,68 +141,8 @@ module_param_named(xeon_b2b_dsd_bar5_addr32, | |||
148 | MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, | 141 | MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, |
149 | "XEON B2B DSD split-BAR 5 32-bit address"); | 142 | "XEON B2B DSD split-BAR 5 32-bit address"); |
150 | 143 | ||
151 | static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); | ||
152 | static int xeon_init_isr(struct intel_ntb_dev *ndev); | ||
153 | |||
154 | #ifndef ioread64 | ||
155 | #ifdef readq | ||
156 | #define ioread64 readq | ||
157 | #else | ||
158 | #define ioread64 _ioread64 | ||
159 | static inline u64 _ioread64(void __iomem *mmio) | ||
160 | { | ||
161 | u64 low, high; | ||
162 | |||
163 | low = ioread32(mmio); | ||
164 | high = ioread32(mmio + sizeof(u32)); | ||
165 | return low | (high << 32); | ||
166 | } | ||
167 | #endif | ||
168 | #endif | ||
169 | 144 | ||
170 | #ifndef iowrite64 | 145 | static int xeon_init_isr(struct intel_ntb_dev *ndev); |
171 | #ifdef writeq | ||
172 | #define iowrite64 writeq | ||
173 | #else | ||
174 | #define iowrite64 _iowrite64 | ||
175 | static inline void _iowrite64(u64 val, void __iomem *mmio) | ||
176 | { | ||
177 | iowrite32(val, mmio); | ||
178 | iowrite32(val >> 32, mmio + sizeof(u32)); | ||
179 | } | ||
180 | #endif | ||
181 | #endif | ||
182 | |||
183 | static inline int pdev_is_xeon(struct pci_dev *pdev) | ||
184 | { | ||
185 | switch (pdev->device) { | ||
186 | case PCI_DEVICE_ID_INTEL_NTB_SS_JSF: | ||
187 | case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: | ||
188 | case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: | ||
189 | case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: | ||
190 | case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: | ||
191 | case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: | ||
192 | case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: | ||
193 | case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: | ||
194 | case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: | ||
195 | case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: | ||
196 | case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: | ||
197 | case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: | ||
198 | case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: | ||
199 | case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: | ||
200 | case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: | ||
201 | return 1; | ||
202 | } | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static inline int pdev_is_skx_xeon(struct pci_dev *pdev) | ||
207 | { | ||
208 | if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX) | ||
209 | return 1; | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | 146 | ||
214 | static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) | 147 | static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) |
215 | { | 148 | { |
@@ -243,7 +176,7 @@ static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev, | |||
243 | return !!flag; | 176 | return !!flag; |
244 | } | 177 | } |
245 | 178 | ||
246 | static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) | 179 | int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) |
247 | { | 180 | { |
248 | if (idx < 0 || idx >= ndev->mw_count) | 181 | if (idx < 0 || idx >= ndev->mw_count) |
249 | return -EINVAL; | 182 | return -EINVAL; |
@@ -270,7 +203,7 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, | |||
270 | return 0; | 203 | return 0; |
271 | } | 204 | } |
272 | 205 | ||
273 | static inline u64 ndev_db_read(struct intel_ntb_dev *ndev, | 206 | u64 ndev_db_read(struct intel_ntb_dev *ndev, |
274 | void __iomem *mmio) | 207 | void __iomem *mmio) |
275 | { | 208 | { |
276 | if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) | 209 | if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) |
@@ -279,7 +212,7 @@ static inline u64 ndev_db_read(struct intel_ntb_dev *ndev, | |||
279 | return ndev->reg->db_ioread(mmio); | 212 | return ndev->reg->db_ioread(mmio); |
280 | } | 213 | } |
281 | 214 | ||
282 | static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, | 215 | int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, |
283 | void __iomem *mmio) | 216 | void __iomem *mmio) |
284 | { | 217 | { |
285 | if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) | 218 | if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) |
@@ -431,7 +364,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev) | |||
431 | return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); | 364 | return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); |
432 | } | 365 | } |
433 | 366 | ||
434 | static int ndev_init_isr(struct intel_ntb_dev *ndev, | 367 | int ndev_init_isr(struct intel_ntb_dev *ndev, |
435 | int msix_min, int msix_max, | 368 | int msix_min, int msix_max, |
436 | int msix_shift, int total_shift) | 369 | int msix_shift, int total_shift) |
437 | { | 370 | { |
@@ -559,169 +492,6 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev) | |||
559 | } | 492 | } |
560 | } | 493 | } |
561 | 494 | ||
562 | static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, | ||
563 | size_t count, loff_t *offp) | ||
564 | { | ||
565 | struct intel_ntb_dev *ndev; | ||
566 | void __iomem *mmio; | ||
567 | char *buf; | ||
568 | size_t buf_size; | ||
569 | ssize_t ret, off; | ||
570 | union { u64 v64; u32 v32; u16 v16; } u; | ||
571 | |||
572 | ndev = filp->private_data; | ||
573 | mmio = ndev->self_mmio; | ||
574 | |||
575 | buf_size = min(count, 0x800ul); | ||
576 | |||
577 | buf = kmalloc(buf_size, GFP_KERNEL); | ||
578 | if (!buf) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | off = 0; | ||
582 | |||
583 | off += scnprintf(buf + off, buf_size - off, | ||
584 | "NTB Device Information:\n"); | ||
585 | |||
586 | off += scnprintf(buf + off, buf_size - off, | ||
587 | "Connection Topology -\t%s\n", | ||
588 | ntb_topo_string(ndev->ntb.topo)); | ||
589 | |||
590 | off += scnprintf(buf + off, buf_size - off, | ||
591 | "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); | ||
592 | off += scnprintf(buf + off, buf_size - off, | ||
593 | "LNK STA -\t\t%#06x\n", ndev->lnk_sta); | ||
594 | |||
595 | if (!ndev->reg->link_is_up(ndev)) | ||
596 | off += scnprintf(buf + off, buf_size - off, | ||
597 | "Link Status -\t\tDown\n"); | ||
598 | else { | ||
599 | off += scnprintf(buf + off, buf_size - off, | ||
600 | "Link Status -\t\tUp\n"); | ||
601 | off += scnprintf(buf + off, buf_size - off, | ||
602 | "Link Speed -\t\tPCI-E Gen %u\n", | ||
603 | NTB_LNK_STA_SPEED(ndev->lnk_sta)); | ||
604 | off += scnprintf(buf + off, buf_size - off, | ||
605 | "Link Width -\t\tx%u\n", | ||
606 | NTB_LNK_STA_WIDTH(ndev->lnk_sta)); | ||
607 | } | ||
608 | |||
609 | off += scnprintf(buf + off, buf_size - off, | ||
610 | "Memory Window Count -\t%u\n", ndev->mw_count); | ||
611 | off += scnprintf(buf + off, buf_size - off, | ||
612 | "Scratchpad Count -\t%u\n", ndev->spad_count); | ||
613 | off += scnprintf(buf + off, buf_size - off, | ||
614 | "Doorbell Count -\t%u\n", ndev->db_count); | ||
615 | off += scnprintf(buf + off, buf_size - off, | ||
616 | "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); | ||
617 | off += scnprintf(buf + off, buf_size - off, | ||
618 | "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); | ||
619 | |||
620 | off += scnprintf(buf + off, buf_size - off, | ||
621 | "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); | ||
622 | off += scnprintf(buf + off, buf_size - off, | ||
623 | "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); | ||
624 | off += scnprintf(buf + off, buf_size - off, | ||
625 | "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); | ||
626 | |||
627 | u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); | ||
628 | off += scnprintf(buf + off, buf_size - off, | ||
629 | "Doorbell Mask -\t\t%#llx\n", u.v64); | ||
630 | |||
631 | u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); | ||
632 | off += scnprintf(buf + off, buf_size - off, | ||
633 | "Doorbell Bell -\t\t%#llx\n", u.v64); | ||
634 | |||
635 | off += scnprintf(buf + off, buf_size - off, | ||
636 | "\nNTB Incoming XLAT:\n"); | ||
637 | |||
638 | u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET); | ||
639 | off += scnprintf(buf + off, buf_size - off, | ||
640 | "IMBAR1XBASE -\t\t%#018llx\n", u.v64); | ||
641 | |||
642 | u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET); | ||
643 | off += scnprintf(buf + off, buf_size - off, | ||
644 | "IMBAR2XBASE -\t\t%#018llx\n", u.v64); | ||
645 | |||
646 | u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); | ||
647 | off += scnprintf(buf + off, buf_size - off, | ||
648 | "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64); | ||
649 | |||
650 | u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); | ||
651 | off += scnprintf(buf + off, buf_size - off, | ||
652 | "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64); | ||
653 | |||
654 | if (ntb_topo_is_b2b(ndev->ntb.topo)) { | ||
655 | off += scnprintf(buf + off, buf_size - off, | ||
656 | "\nNTB Outgoing B2B XLAT:\n"); | ||
657 | |||
658 | u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET); | ||
659 | off += scnprintf(buf + off, buf_size - off, | ||
660 | "EMBAR1XBASE -\t\t%#018llx\n", u.v64); | ||
661 | |||
662 | u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET); | ||
663 | off += scnprintf(buf + off, buf_size - off, | ||
664 | "EMBAR2XBASE -\t\t%#018llx\n", u.v64); | ||
665 | |||
666 | u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET); | ||
667 | off += scnprintf(buf + off, buf_size - off, | ||
668 | "EMBAR1XLMT -\t\t%#018llx\n", u.v64); | ||
669 | |||
670 | u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET); | ||
671 | off += scnprintf(buf + off, buf_size - off, | ||
672 | "EMBAR2XLMT -\t\t%#018llx\n", u.v64); | ||
673 | |||
674 | off += scnprintf(buf + off, buf_size - off, | ||
675 | "\nNTB Secondary BAR:\n"); | ||
676 | |||
677 | u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET); | ||
678 | off += scnprintf(buf + off, buf_size - off, | ||
679 | "EMBAR0 -\t\t%#018llx\n", u.v64); | ||
680 | |||
681 | u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET); | ||
682 | off += scnprintf(buf + off, buf_size - off, | ||
683 | "EMBAR1 -\t\t%#018llx\n", u.v64); | ||
684 | |||
685 | u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET); | ||
686 | off += scnprintf(buf + off, buf_size - off, | ||
687 | "EMBAR2 -\t\t%#018llx\n", u.v64); | ||
688 | } | ||
689 | |||
690 | off += scnprintf(buf + off, buf_size - off, | ||
691 | "\nNTB Statistics:\n"); | ||
692 | |||
693 | u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET); | ||
694 | off += scnprintf(buf + off, buf_size - off, | ||
695 | "Upstream Memory Miss -\t%u\n", u.v16); | ||
696 | |||
697 | off += scnprintf(buf + off, buf_size - off, | ||
698 | "\nNTB Hardware Errors:\n"); | ||
699 | |||
700 | if (!pci_read_config_word(ndev->ntb.pdev, | ||
701 | SKX_DEVSTS_OFFSET, &u.v16)) | ||
702 | off += scnprintf(buf + off, buf_size - off, | ||
703 | "DEVSTS -\t\t%#06x\n", u.v16); | ||
704 | |||
705 | if (!pci_read_config_word(ndev->ntb.pdev, | ||
706 | SKX_LINK_STATUS_OFFSET, &u.v16)) | ||
707 | off += scnprintf(buf + off, buf_size - off, | ||
708 | "LNKSTS -\t\t%#06x\n", u.v16); | ||
709 | |||
710 | if (!pci_read_config_dword(ndev->ntb.pdev, | ||
711 | SKX_UNCERRSTS_OFFSET, &u.v32)) | ||
712 | off += scnprintf(buf + off, buf_size - off, | ||
713 | "UNCERRSTS -\t\t%#06x\n", u.v32); | ||
714 | |||
715 | if (!pci_read_config_dword(ndev->ntb.pdev, | ||
716 | SKX_CORERRSTS_OFFSET, &u.v32)) | ||
717 | off += scnprintf(buf + off, buf_size - off, | ||
718 | "CORERRSTS -\t\t%#06x\n", u.v32); | ||
719 | |||
720 | ret = simple_read_from_buffer(ubuf, count, offp, buf, off); | ||
721 | kfree(buf); | ||
722 | return ret; | ||
723 | } | ||
724 | |||
725 | static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, | 495 | static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, |
726 | size_t count, loff_t *offp) | 496 | size_t count, loff_t *offp) |
727 | { | 497 | { |
@@ -1025,7 +795,7 @@ static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) | |||
1025 | debugfs_remove_recursive(ndev->debugfs_dir); | 795 | debugfs_remove_recursive(ndev->debugfs_dir); |
1026 | } | 796 | } |
1027 | 797 | ||
1028 | static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) | 798 | int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) |
1029 | { | 799 | { |
1030 | if (pidx != NTB_DEF_PEER_IDX) | 800 | if (pidx != NTB_DEF_PEER_IDX) |
1031 | return -EINVAL; | 801 | return -EINVAL; |
@@ -1033,10 +803,10 @@ static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) | |||
1033 | return ntb_ndev(ntb)->mw_count; | 803 | return ntb_ndev(ntb)->mw_count; |
1034 | } | 804 | } |
1035 | 805 | ||
1036 | static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, | 806 | int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, |
1037 | resource_size_t *addr_align, | 807 | resource_size_t *addr_align, |
1038 | resource_size_t *size_align, | 808 | resource_size_t *size_align, |
1039 | resource_size_t *size_max) | 809 | resource_size_t *size_max) |
1040 | { | 810 | { |
1041 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 811 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1042 | resource_size_t bar_size, mw_size; | 812 | resource_size_t bar_size, mw_size; |
@@ -1172,9 +942,8 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, | |||
1172 | return 0; | 942 | return 0; |
1173 | } | 943 | } |
1174 | 944 | ||
1175 | static u64 intel_ntb_link_is_up(struct ntb_dev *ntb, | 945 | u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, |
1176 | enum ntb_speed *speed, | 946 | enum ntb_width *width) |
1177 | enum ntb_width *width) | ||
1178 | { | 947 | { |
1179 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 948 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1180 | 949 | ||
@@ -1226,7 +995,7 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb, | |||
1226 | return 0; | 995 | return 0; |
1227 | } | 996 | } |
1228 | 997 | ||
1229 | static int intel_ntb_link_disable(struct ntb_dev *ntb) | 998 | int intel_ntb_link_disable(struct ntb_dev *ntb) |
1230 | { | 999 | { |
1231 | struct intel_ntb_dev *ndev; | 1000 | struct intel_ntb_dev *ndev; |
1232 | u32 ntb_cntl; | 1001 | u32 ntb_cntl; |
@@ -1250,14 +1019,14 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb) | |||
1250 | return 0; | 1019 | return 0; |
1251 | } | 1020 | } |
1252 | 1021 | ||
1253 | static int intel_ntb_peer_mw_count(struct ntb_dev *ntb) | 1022 | int intel_ntb_peer_mw_count(struct ntb_dev *ntb) |
1254 | { | 1023 | { |
1255 | /* Numbers of inbound and outbound memory windows match */ | 1024 | /* Numbers of inbound and outbound memory windows match */ |
1256 | return ntb_ndev(ntb)->mw_count; | 1025 | return ntb_ndev(ntb)->mw_count; |
1257 | } | 1026 | } |
1258 | 1027 | ||
1259 | static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, | 1028 | int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, |
1260 | phys_addr_t *base, resource_size_t *size) | 1029 | phys_addr_t *base, resource_size_t *size) |
1261 | { | 1030 | { |
1262 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1031 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1263 | int bar; | 1032 | int bar; |
@@ -1285,12 +1054,12 @@ static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb) | |||
1285 | return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB); | 1054 | return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB); |
1286 | } | 1055 | } |
1287 | 1056 | ||
1288 | static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb) | 1057 | u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb) |
1289 | { | 1058 | { |
1290 | return ntb_ndev(ntb)->db_valid_mask; | 1059 | return ntb_ndev(ntb)->db_valid_mask; |
1291 | } | 1060 | } |
1292 | 1061 | ||
1293 | static int intel_ntb_db_vector_count(struct ntb_dev *ntb) | 1062 | int intel_ntb_db_vector_count(struct ntb_dev *ntb) |
1294 | { | 1063 | { |
1295 | struct intel_ntb_dev *ndev; | 1064 | struct intel_ntb_dev *ndev; |
1296 | 1065 | ||
@@ -1299,7 +1068,7 @@ static int intel_ntb_db_vector_count(struct ntb_dev *ntb) | |||
1299 | return ndev->db_vec_count; | 1068 | return ndev->db_vec_count; |
1300 | } | 1069 | } |
1301 | 1070 | ||
1302 | static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) | 1071 | u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) |
1303 | { | 1072 | { |
1304 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1073 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1305 | 1074 | ||
@@ -1327,7 +1096,7 @@ static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) | |||
1327 | ndev->self_reg->db_bell); | 1096 | ndev->self_reg->db_bell); |
1328 | } | 1097 | } |
1329 | 1098 | ||
1330 | static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) | 1099 | int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) |
1331 | { | 1100 | { |
1332 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1101 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1333 | 1102 | ||
@@ -1336,7 +1105,7 @@ static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) | |||
1336 | ndev->self_reg->db_mask); | 1105 | ndev->self_reg->db_mask); |
1337 | } | 1106 | } |
1338 | 1107 | ||
1339 | static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | 1108 | int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) |
1340 | { | 1109 | { |
1341 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1110 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1342 | 1111 | ||
@@ -1345,9 +1114,8 @@ static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | |||
1345 | ndev->self_reg->db_mask); | 1114 | ndev->self_reg->db_mask); |
1346 | } | 1115 | } |
1347 | 1116 | ||
1348 | static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, | 1117 | int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, |
1349 | phys_addr_t *db_addr, | 1118 | resource_size_t *db_size) |
1350 | resource_size_t *db_size) | ||
1351 | { | 1119 | { |
1352 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1120 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1353 | 1121 | ||
@@ -1364,12 +1132,12 @@ static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) | |||
1364 | ndev->peer_reg->db_bell); | 1132 | ndev->peer_reg->db_bell); |
1365 | } | 1133 | } |
1366 | 1134 | ||
1367 | static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb) | 1135 | int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb) |
1368 | { | 1136 | { |
1369 | return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD); | 1137 | return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD); |
1370 | } | 1138 | } |
1371 | 1139 | ||
1372 | static int intel_ntb_spad_count(struct ntb_dev *ntb) | 1140 | int intel_ntb_spad_count(struct ntb_dev *ntb) |
1373 | { | 1141 | { |
1374 | struct intel_ntb_dev *ndev; | 1142 | struct intel_ntb_dev *ndev; |
1375 | 1143 | ||
@@ -1378,7 +1146,7 @@ static int intel_ntb_spad_count(struct ntb_dev *ntb) | |||
1378 | return ndev->spad_count; | 1146 | return ndev->spad_count; |
1379 | } | 1147 | } |
1380 | 1148 | ||
1381 | static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx) | 1149 | u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx) |
1382 | { | 1150 | { |
1383 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1151 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1384 | 1152 | ||
@@ -1387,8 +1155,7 @@ static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx) | |||
1387 | ndev->self_reg->spad); | 1155 | ndev->self_reg->spad); |
1388 | } | 1156 | } |
1389 | 1157 | ||
1390 | static int intel_ntb_spad_write(struct ntb_dev *ntb, | 1158 | int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) |
1391 | int idx, u32 val) | ||
1392 | { | 1159 | { |
1393 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1160 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1394 | 1161 | ||
@@ -1397,8 +1164,8 @@ static int intel_ntb_spad_write(struct ntb_dev *ntb, | |||
1397 | ndev->self_reg->spad); | 1164 | ndev->self_reg->spad); |
1398 | } | 1165 | } |
1399 | 1166 | ||
1400 | static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, | 1167 | int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, |
1401 | phys_addr_t *spad_addr) | 1168 | phys_addr_t *spad_addr) |
1402 | { | 1169 | { |
1403 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1170 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1404 | 1171 | ||
@@ -1406,7 +1173,7 @@ static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, | |||
1406 | ndev->peer_reg->spad); | 1173 | ndev->peer_reg->spad); |
1407 | } | 1174 | } |
1408 | 1175 | ||
1409 | static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) | 1176 | u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) |
1410 | { | 1177 | { |
1411 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1178 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1412 | 1179 | ||
@@ -1415,8 +1182,8 @@ static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) | |||
1415 | ndev->peer_reg->spad); | 1182 | ndev->peer_reg->spad); |
1416 | } | 1183 | } |
1417 | 1184 | ||
1418 | static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, | 1185 | int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, |
1419 | int sidx, u32 val) | 1186 | u32 val) |
1420 | { | 1187 | { |
1421 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | 1188 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); |
1422 | 1189 | ||
@@ -1425,336 +1192,6 @@ static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, | |||
1425 | ndev->peer_reg->spad); | 1192 | ndev->peer_reg->spad); |
1426 | } | 1193 | } |
1427 | 1194 | ||
1428 | /* Skylake Xeon NTB */ | ||
1429 | |||
1430 | static int skx_poll_link(struct intel_ntb_dev *ndev) | ||
1431 | { | ||
1432 | u16 reg_val; | ||
1433 | int rc; | ||
1434 | |||
1435 | ndev->reg->db_iowrite(ndev->db_link_mask, | ||
1436 | ndev->self_mmio + | ||
1437 | ndev->self_reg->db_clear); | ||
1438 | |||
1439 | rc = pci_read_config_word(ndev->ntb.pdev, | ||
1440 | SKX_LINK_STATUS_OFFSET, ®_val); | ||
1441 | if (rc) | ||
1442 | return 0; | ||
1443 | |||
1444 | if (reg_val == ndev->lnk_sta) | ||
1445 | return 0; | ||
1446 | |||
1447 | ndev->lnk_sta = reg_val; | ||
1448 | |||
1449 | return 1; | ||
1450 | } | ||
1451 | |||
1452 | static u64 skx_db_ioread(void __iomem *mmio) | ||
1453 | { | ||
1454 | return ioread64(mmio); | ||
1455 | } | ||
1456 | |||
1457 | static void skx_db_iowrite(u64 bits, void __iomem *mmio) | ||
1458 | { | ||
1459 | iowrite64(bits, mmio); | ||
1460 | } | ||
1461 | |||
1462 | static int skx_init_isr(struct intel_ntb_dev *ndev) | ||
1463 | { | ||
1464 | int i; | ||
1465 | |||
1466 | /* | ||
1467 | * The MSIX vectors and the interrupt status bits are not lined up | ||
1468 | * on Skylake. By default the link status bit is bit 32, however it | ||
1469 | * is by default MSIX vector0. We need to fixup to line them up. | ||
1470 | * The vectors at reset is 1-32,0. We need to reprogram to 0-32. | ||
1471 | */ | ||
1472 | |||
1473 | for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++) | ||
1474 | iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i); | ||
1475 | |||
1476 | /* move link status down one as workaround */ | ||
1477 | if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) { | ||
1478 | iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2, | ||
1479 | ndev->self_mmio + SKX_INTVEC_OFFSET + | ||
1480 | (SKX_DB_MSIX_VECTOR_COUNT - 1)); | ||
1481 | } | ||
1482 | |||
1483 | return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT, | ||
1484 | SKX_DB_MSIX_VECTOR_COUNT, | ||
1485 | SKX_DB_MSIX_VECTOR_SHIFT, | ||
1486 | SKX_DB_TOTAL_SHIFT); | ||
1487 | } | ||
1488 | |||
1489 | static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, | ||
1490 | const struct intel_b2b_addr *addr, | ||
1491 | const struct intel_b2b_addr *peer_addr) | ||
1492 | { | ||
1493 | struct pci_dev *pdev; | ||
1494 | void __iomem *mmio; | ||
1495 | phys_addr_t bar_addr; | ||
1496 | |||
1497 | pdev = ndev->ntb.pdev; | ||
1498 | mmio = ndev->self_mmio; | ||
1499 | |||
1500 | /* setup incoming bar limits == base addrs (zero length windows) */ | ||
1501 | bar_addr = addr->bar2_addr64; | ||
1502 | iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); | ||
1503 | bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); | ||
1504 | dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); | ||
1505 | |||
1506 | bar_addr = addr->bar4_addr64; | ||
1507 | iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); | ||
1508 | bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); | ||
1509 | dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); | ||
1510 | |||
1511 | /* zero incoming translation addrs */ | ||
1512 | iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET); | ||
1513 | iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET); | ||
1514 | |||
1515 | ndev->peer_mmio = ndev->self_mmio; | ||
1516 | |||
1517 | return 0; | ||
1518 | } | ||
1519 | |||
1520 | static int skx_init_ntb(struct intel_ntb_dev *ndev) | ||
1521 | { | ||
1522 | int rc; | ||
1523 | |||
1524 | |||
1525 | ndev->mw_count = XEON_MW_COUNT; | ||
1526 | ndev->spad_count = SKX_SPAD_COUNT; | ||
1527 | ndev->db_count = SKX_DB_COUNT; | ||
1528 | ndev->db_link_mask = SKX_DB_LINK_BIT; | ||
1529 | |||
1530 | /* DB fixup for using 31 right now */ | ||
1531 | if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) | ||
1532 | ndev->db_link_mask |= BIT_ULL(31); | ||
1533 | |||
1534 | switch (ndev->ntb.topo) { | ||
1535 | case NTB_TOPO_B2B_USD: | ||
1536 | case NTB_TOPO_B2B_DSD: | ||
1537 | ndev->self_reg = &skx_pri_reg; | ||
1538 | ndev->peer_reg = &skx_b2b_reg; | ||
1539 | ndev->xlat_reg = &skx_sec_xlat; | ||
1540 | |||
1541 | if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { | ||
1542 | rc = skx_setup_b2b_mw(ndev, | ||
1543 | &xeon_b2b_dsd_addr, | ||
1544 | &xeon_b2b_usd_addr); | ||
1545 | } else { | ||
1546 | rc = skx_setup_b2b_mw(ndev, | ||
1547 | &xeon_b2b_usd_addr, | ||
1548 | &xeon_b2b_dsd_addr); | ||
1549 | } | ||
1550 | |||
1551 | if (rc) | ||
1552 | return rc; | ||
1553 | |||
1554 | /* Enable Bus Master and Memory Space on the secondary side */ | ||
1555 | iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, | ||
1556 | ndev->self_mmio + SKX_SPCICMD_OFFSET); | ||
1557 | |||
1558 | break; | ||
1559 | |||
1560 | default: | ||
1561 | return -EINVAL; | ||
1562 | } | ||
1563 | |||
1564 | ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; | ||
1565 | |||
1566 | ndev->reg->db_iowrite(ndev->db_valid_mask, | ||
1567 | ndev->self_mmio + | ||
1568 | ndev->self_reg->db_mask); | ||
1569 | |||
1570 | return 0; | ||
1571 | } | ||
1572 | |||
1573 | static int skx_init_dev(struct intel_ntb_dev *ndev) | ||
1574 | { | ||
1575 | struct pci_dev *pdev; | ||
1576 | u8 ppd; | ||
1577 | int rc; | ||
1578 | |||
1579 | pdev = ndev->ntb.pdev; | ||
1580 | |||
1581 | ndev->reg = &skx_reg; | ||
1582 | |||
1583 | rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd); | ||
1584 | if (rc) | ||
1585 | return -EIO; | ||
1586 | |||
1587 | ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); | ||
1588 | dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, | ||
1589 | ntb_topo_string(ndev->ntb.topo)); | ||
1590 | if (ndev->ntb.topo == NTB_TOPO_NONE) | ||
1591 | return -EINVAL; | ||
1592 | |||
1593 | if (pdev_is_skx_xeon(pdev)) | ||
1594 | ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD; | ||
1595 | |||
1596 | rc = skx_init_ntb(ndev); | ||
1597 | if (rc) | ||
1598 | return rc; | ||
1599 | |||
1600 | return skx_init_isr(ndev); | ||
1601 | } | ||
1602 | |||
1603 | static int intel_ntb3_link_enable(struct ntb_dev *ntb, | ||
1604 | enum ntb_speed max_speed, | ||
1605 | enum ntb_width max_width) | ||
1606 | { | ||
1607 | struct intel_ntb_dev *ndev; | ||
1608 | u32 ntb_ctl; | ||
1609 | |||
1610 | ndev = container_of(ntb, struct intel_ntb_dev, ntb); | ||
1611 | |||
1612 | dev_dbg(&ntb->pdev->dev, | ||
1613 | "Enabling link with max_speed %d max_width %d\n", | ||
1614 | max_speed, max_width); | ||
1615 | |||
1616 | if (max_speed != NTB_SPEED_AUTO) | ||
1617 | dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); | ||
1618 | if (max_width != NTB_WIDTH_AUTO) | ||
1619 | dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); | ||
1620 | |||
1621 | ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); | ||
1622 | ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); | ||
1623 | ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP; | ||
1624 | ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP; | ||
1625 | iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); | ||
1626 | |||
1627 | return 0; | ||
1628 | } | ||
1629 | static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, | ||
1630 | dma_addr_t addr, resource_size_t size) | ||
1631 | { | ||
1632 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
1633 | unsigned long xlat_reg, limit_reg; | ||
1634 | resource_size_t bar_size, mw_size; | ||
1635 | void __iomem *mmio; | ||
1636 | u64 base, limit, reg_val; | ||
1637 | int bar; | ||
1638 | |||
1639 | if (pidx != NTB_DEF_PEER_IDX) | ||
1640 | return -EINVAL; | ||
1641 | |||
1642 | if (idx >= ndev->b2b_idx && !ndev->b2b_off) | ||
1643 | idx += 1; | ||
1644 | |||
1645 | bar = ndev_mw_to_bar(ndev, idx); | ||
1646 | if (bar < 0) | ||
1647 | return bar; | ||
1648 | |||
1649 | bar_size = pci_resource_len(ndev->ntb.pdev, bar); | ||
1650 | |||
1651 | if (idx == ndev->b2b_idx) | ||
1652 | mw_size = bar_size - ndev->b2b_off; | ||
1653 | else | ||
1654 | mw_size = bar_size; | ||
1655 | |||
1656 | /* hardware requires that addr is aligned to bar size */ | ||
1657 | if (addr & (bar_size - 1)) | ||
1658 | return -EINVAL; | ||
1659 | |||
1660 | /* make sure the range fits in the usable mw size */ | ||
1661 | if (size > mw_size) | ||
1662 | return -EINVAL; | ||
1663 | |||
1664 | mmio = ndev->self_mmio; | ||
1665 | xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); | ||
1666 | limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); | ||
1667 | base = pci_resource_start(ndev->ntb.pdev, bar); | ||
1668 | |||
1669 | /* Set the limit if supported, if size is not mw_size */ | ||
1670 | if (limit_reg && size != mw_size) | ||
1671 | limit = base + size; | ||
1672 | else | ||
1673 | limit = base + mw_size; | ||
1674 | |||
1675 | /* set and verify setting the translation address */ | ||
1676 | iowrite64(addr, mmio + xlat_reg); | ||
1677 | reg_val = ioread64(mmio + xlat_reg); | ||
1678 | if (reg_val != addr) { | ||
1679 | iowrite64(0, mmio + xlat_reg); | ||
1680 | return -EIO; | ||
1681 | } | ||
1682 | |||
1683 | dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); | ||
1684 | |||
1685 | /* set and verify setting the limit */ | ||
1686 | iowrite64(limit, mmio + limit_reg); | ||
1687 | reg_val = ioread64(mmio + limit_reg); | ||
1688 | if (reg_val != limit) { | ||
1689 | iowrite64(base, mmio + limit_reg); | ||
1690 | iowrite64(0, mmio + xlat_reg); | ||
1691 | return -EIO; | ||
1692 | } | ||
1693 | |||
1694 | dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); | ||
1695 | |||
1696 | /* setup the EP */ | ||
1697 | limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; | ||
1698 | base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx)); | ||
1699 | base &= ~0xf; | ||
1700 | |||
1701 | if (limit_reg && size != mw_size) | ||
1702 | limit = base + size; | ||
1703 | else | ||
1704 | limit = base + mw_size; | ||
1705 | |||
1706 | /* set and verify setting the limit */ | ||
1707 | iowrite64(limit, mmio + limit_reg); | ||
1708 | reg_val = ioread64(mmio + limit_reg); | ||
1709 | if (reg_val != limit) { | ||
1710 | iowrite64(base, mmio + limit_reg); | ||
1711 | iowrite64(0, mmio + xlat_reg); | ||
1712 | return -EIO; | ||
1713 | } | ||
1714 | |||
1715 | dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); | ||
1716 | |||
1717 | return 0; | ||
1718 | } | ||
1719 | |||
1720 | static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) | ||
1721 | { | ||
1722 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
1723 | int bit; | ||
1724 | |||
1725 | if (db_bits & ~ndev->db_valid_mask) | ||
1726 | return -EINVAL; | ||
1727 | |||
1728 | while (db_bits) { | ||
1729 | bit = __ffs(db_bits); | ||
1730 | iowrite32(1, ndev->peer_mmio + | ||
1731 | ndev->peer_reg->db_bell + (bit * 4)); | ||
1732 | db_bits &= db_bits - 1; | ||
1733 | } | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | static u64 intel_ntb3_db_read(struct ntb_dev *ntb) | ||
1739 | { | ||
1740 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
1741 | |||
1742 | return ndev_db_read(ndev, | ||
1743 | ndev->self_mmio + | ||
1744 | ndev->self_reg->db_clear); | ||
1745 | } | ||
1746 | |||
1747 | static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) | ||
1748 | { | ||
1749 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
1750 | |||
1751 | return ndev_db_write(ndev, db_bits, | ||
1752 | ndev->self_mmio + | ||
1753 | ndev->self_reg->db_clear); | ||
1754 | } | ||
1755 | |||
1756 | /* XEON */ | ||
1757 | |||
1758 | static u64 xeon_db_ioread(void __iomem *mmio) | 1195 | static u64 xeon_db_ioread(void __iomem *mmio) |
1759 | { | 1196 | { |
1760 | return (u64)ioread16(mmio); | 1197 | return (u64)ioread16(mmio); |
@@ -1787,7 +1224,7 @@ static int xeon_poll_link(struct intel_ntb_dev *ndev) | |||
1787 | return 1; | 1224 | return 1; |
1788 | } | 1225 | } |
1789 | 1226 | ||
1790 | static int xeon_link_is_up(struct intel_ntb_dev *ndev) | 1227 | int xeon_link_is_up(struct intel_ntb_dev *ndev) |
1791 | { | 1228 | { |
1792 | if (ndev->ntb.topo == NTB_TOPO_SEC) | 1229 | if (ndev->ntb.topo == NTB_TOPO_SEC) |
1793 | return 1; | 1230 | return 1; |
@@ -1795,7 +1232,7 @@ static int xeon_link_is_up(struct intel_ntb_dev *ndev) | |||
1795 | return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); | 1232 | return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); |
1796 | } | 1233 | } |
1797 | 1234 | ||
1798 | static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) | 1235 | enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) |
1799 | { | 1236 | { |
1800 | switch (ppd & XEON_PPD_TOPO_MASK) { | 1237 | switch (ppd & XEON_PPD_TOPO_MASK) { |
1801 | case XEON_PPD_TOPO_B2B_USD: | 1238 | case XEON_PPD_TOPO_B2B_USD: |
@@ -2539,50 +1976,20 @@ static const struct intel_ntb_xlat_reg xeon_sec_xlat = { | |||
2539 | .bar2_xlat = XEON_SBAR23XLAT_OFFSET, | 1976 | .bar2_xlat = XEON_SBAR23XLAT_OFFSET, |
2540 | }; | 1977 | }; |
2541 | 1978 | ||
2542 | static struct intel_b2b_addr xeon_b2b_usd_addr = { | 1979 | struct intel_b2b_addr xeon_b2b_usd_addr = { |
2543 | .bar2_addr64 = XEON_B2B_BAR2_ADDR64, | 1980 | .bar2_addr64 = XEON_B2B_BAR2_ADDR64, |
2544 | .bar4_addr64 = XEON_B2B_BAR4_ADDR64, | 1981 | .bar4_addr64 = XEON_B2B_BAR4_ADDR64, |
2545 | .bar4_addr32 = XEON_B2B_BAR4_ADDR32, | 1982 | .bar4_addr32 = XEON_B2B_BAR4_ADDR32, |
2546 | .bar5_addr32 = XEON_B2B_BAR5_ADDR32, | 1983 | .bar5_addr32 = XEON_B2B_BAR5_ADDR32, |
2547 | }; | 1984 | }; |
2548 | 1985 | ||
2549 | static struct intel_b2b_addr xeon_b2b_dsd_addr = { | 1986 | struct intel_b2b_addr xeon_b2b_dsd_addr = { |
2550 | .bar2_addr64 = XEON_B2B_BAR2_ADDR64, | 1987 | .bar2_addr64 = XEON_B2B_BAR2_ADDR64, |
2551 | .bar4_addr64 = XEON_B2B_BAR4_ADDR64, | 1988 | .bar4_addr64 = XEON_B2B_BAR4_ADDR64, |
2552 | .bar4_addr32 = XEON_B2B_BAR4_ADDR32, | 1989 | .bar4_addr32 = XEON_B2B_BAR4_ADDR32, |
2553 | .bar5_addr32 = XEON_B2B_BAR5_ADDR32, | 1990 | .bar5_addr32 = XEON_B2B_BAR5_ADDR32, |
2554 | }; | 1991 | }; |
2555 | 1992 | ||
2556 | static const struct intel_ntb_reg skx_reg = { | ||
2557 | .poll_link = skx_poll_link, | ||
2558 | .link_is_up = xeon_link_is_up, | ||
2559 | .db_ioread = skx_db_ioread, | ||
2560 | .db_iowrite = skx_db_iowrite, | ||
2561 | .db_size = sizeof(u32), | ||
2562 | .ntb_ctl = SKX_NTBCNTL_OFFSET, | ||
2563 | .mw_bar = {2, 4}, | ||
2564 | }; | ||
2565 | |||
2566 | static const struct intel_ntb_alt_reg skx_pri_reg = { | ||
2567 | .db_bell = SKX_EM_DOORBELL_OFFSET, | ||
2568 | .db_clear = SKX_IM_INT_STATUS_OFFSET, | ||
2569 | .db_mask = SKX_IM_INT_DISABLE_OFFSET, | ||
2570 | .spad = SKX_IM_SPAD_OFFSET, | ||
2571 | }; | ||
2572 | |||
2573 | static const struct intel_ntb_alt_reg skx_b2b_reg = { | ||
2574 | .db_bell = SKX_IM_DOORBELL_OFFSET, | ||
2575 | .db_clear = SKX_EM_INT_STATUS_OFFSET, | ||
2576 | .db_mask = SKX_EM_INT_DISABLE_OFFSET, | ||
2577 | .spad = SKX_B2B_SPAD_OFFSET, | ||
2578 | }; | ||
2579 | |||
2580 | static const struct intel_ntb_xlat_reg skx_sec_xlat = { | ||
2581 | /* .bar0_base = SKX_EMBAR0_OFFSET, */ | ||
2582 | .bar2_limit = SKX_IMBAR1XLMT_OFFSET, | ||
2583 | .bar2_xlat = SKX_IMBAR1XBASE_OFFSET, | ||
2584 | }; | ||
2585 | |||
2586 | /* operations for primary side of local ntb */ | 1993 | /* operations for primary side of local ntb */ |
2587 | static const struct ntb_dev_ops intel_ntb_ops = { | 1994 | static const struct ntb_dev_ops intel_ntb_ops = { |
2588 | .mw_count = intel_ntb_mw_count, | 1995 | .mw_count = intel_ntb_mw_count, |
@@ -2612,33 +2019,6 @@ static const struct ntb_dev_ops intel_ntb_ops = { | |||
2612 | .peer_spad_write = intel_ntb_peer_spad_write, | 2019 | .peer_spad_write = intel_ntb_peer_spad_write, |
2613 | }; | 2020 | }; |
2614 | 2021 | ||
2615 | static const struct ntb_dev_ops intel_ntb3_ops = { | ||
2616 | .mw_count = intel_ntb_mw_count, | ||
2617 | .mw_get_align = intel_ntb_mw_get_align, | ||
2618 | .mw_set_trans = intel_ntb3_mw_set_trans, | ||
2619 | .peer_mw_count = intel_ntb_peer_mw_count, | ||
2620 | .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, | ||
2621 | .link_is_up = intel_ntb_link_is_up, | ||
2622 | .link_enable = intel_ntb3_link_enable, | ||
2623 | .link_disable = intel_ntb_link_disable, | ||
2624 | .db_valid_mask = intel_ntb_db_valid_mask, | ||
2625 | .db_vector_count = intel_ntb_db_vector_count, | ||
2626 | .db_vector_mask = intel_ntb_db_vector_mask, | ||
2627 | .db_read = intel_ntb3_db_read, | ||
2628 | .db_clear = intel_ntb3_db_clear, | ||
2629 | .db_set_mask = intel_ntb_db_set_mask, | ||
2630 | .db_clear_mask = intel_ntb_db_clear_mask, | ||
2631 | .peer_db_addr = intel_ntb_peer_db_addr, | ||
2632 | .peer_db_set = intel_ntb3_peer_db_set, | ||
2633 | .spad_is_unsafe = intel_ntb_spad_is_unsafe, | ||
2634 | .spad_count = intel_ntb_spad_count, | ||
2635 | .spad_read = intel_ntb_spad_read, | ||
2636 | .spad_write = intel_ntb_spad_write, | ||
2637 | .peer_spad_addr = intel_ntb_peer_spad_addr, | ||
2638 | .peer_spad_read = intel_ntb_peer_spad_read, | ||
2639 | .peer_spad_write = intel_ntb_peer_spad_write, | ||
2640 | }; | ||
2641 | |||
2642 | static const struct file_operations intel_ntb_debugfs_info = { | 2022 | static const struct file_operations intel_ntb_debugfs_info = { |
2643 | .owner = THIS_MODULE, | 2023 | .owner = THIS_MODULE, |
2644 | .open = simple_open, | 2024 | .open = simple_open, |
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h index fa61dcb4e812..ad8ec1444436 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h | |||
@@ -44,6 +44,8 @@ | |||
44 | #ifndef _NTB_INTEL_GEN1_H_ | 44 | #ifndef _NTB_INTEL_GEN1_H_ |
45 | #define _NTB_INTEL_GEN1_H_ | 45 | #define _NTB_INTEL_GEN1_H_ |
46 | 46 | ||
47 | #include "ntb_hw_intel.h" | ||
48 | |||
47 | /* Intel Gen1 Xeon hardware */ | 49 | /* Intel Gen1 Xeon hardware */ |
48 | #define XEON_PBAR23LMT_OFFSET 0x0000 | 50 | #define XEON_PBAR23LMT_OFFSET 0x0000 |
49 | #define XEON_PBAR45LMT_OFFSET 0x0008 | 51 | #define XEON_PBAR45LMT_OFFSET 0x0008 |
@@ -139,4 +141,42 @@ | |||
139 | #define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) | 141 | #define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) |
140 | #define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3) | 142 | #define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3) |
141 | 143 | ||
144 | extern struct intel_b2b_addr xeon_b2b_usd_addr; | ||
145 | extern struct intel_b2b_addr xeon_b2b_dsd_addr; | ||
146 | |||
147 | int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, | ||
148 | int msix_shift, int total_shift); | ||
149 | enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); | ||
150 | u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); | ||
151 | int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, | ||
152 | void __iomem *mmio); | ||
153 | int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx); | ||
154 | int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx); | ||
155 | int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, | ||
156 | resource_size_t *addr_align, resource_size_t *size_align, | ||
157 | resource_size_t *size_max); | ||
158 | int intel_ntb_peer_mw_count(struct ntb_dev *ntb); | ||
159 | int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, | ||
160 | phys_addr_t *base, resource_size_t *size); | ||
161 | u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, | ||
162 | enum ntb_width *width); | ||
163 | int intel_ntb_link_disable(struct ntb_dev *ntb); | ||
164 | u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb); | ||
165 | int intel_ntb_db_vector_count(struct ntb_dev *ntb); | ||
166 | u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); | ||
167 | int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); | ||
168 | int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); | ||
169 | int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, | ||
170 | resource_size_t *db_size); | ||
171 | int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); | ||
172 | int intel_ntb_spad_count(struct ntb_dev *ntb); | ||
173 | u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); | ||
174 | int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val); | ||
175 | u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx); | ||
176 | int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, | ||
177 | u32 val); | ||
178 | int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, | ||
179 | phys_addr_t *spad_addr); | ||
180 | int xeon_link_is_up(struct intel_ntb_dev *ndev); | ||
181 | |||
142 | #endif | 182 | #endif |
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c new file mode 100644 index 000000000000..52cd8cdf7697 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c | |||
@@ -0,0 +1,597 @@ | |||
1 | /* | ||
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright(c) 2017 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of version 2 of the GNU General Public License as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * BSD LICENSE | ||
14 | * | ||
15 | * Copyright(c) 2017 Intel Corporation. All rights reserved. | ||
16 | * | ||
17 | * Redistribution and use in source and binary forms, with or without | ||
18 | * modification, are permitted provided that the following conditions | ||
19 | * are met: | ||
20 | * | ||
21 | * * Redistributions of source code must retain the above copyright | ||
22 | * notice, this list of conditions and the following disclaimer. | ||
23 | * * Redistributions in binary form must reproduce the above copy | ||
24 | * notice, this list of conditions and the following disclaimer in | ||
25 | * the documentation and/or other materials provided with the | ||
26 | * distribution. | ||
27 | * * Neither the name of Intel Corporation nor the names of its | ||
28 | * contributors may be used to endorse or promote products derived | ||
29 | * from this software without specific prior written permission. | ||
30 | * | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
36 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
37 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
38 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
39 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
41 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
42 | * | ||
43 | * Intel PCIe NTB Linux driver | ||
44 | * | ||
45 | * Contact Information: | ||
46 | * Jon Mason <jon.mason@intel.com> | ||
47 | */ | ||
48 | |||
49 | #include <linux/debugfs.h> | ||
50 | #include <linux/delay.h> | ||
51 | #include <linux/init.h> | ||
52 | #include <linux/interrupt.h> | ||
53 | #include <linux/module.h> | ||
54 | #include <linux/pci.h> | ||
55 | #include <linux/random.h> | ||
56 | #include <linux/slab.h> | ||
57 | #include <linux/ntb.h> | ||
58 | |||
59 | #include "ntb_hw_intel.h" | ||
60 | #include "ntb_hw_gen1.h" | ||
61 | #include "ntb_hw_gen3.h" | ||
62 | |||
63 | static const struct intel_ntb_reg skx_reg = { | ||
64 | .poll_link = skx_poll_link, | ||
65 | .link_is_up = xeon_link_is_up, | ||
66 | .db_ioread = skx_db_ioread, | ||
67 | .db_iowrite = skx_db_iowrite, | ||
68 | .db_size = sizeof(u32), | ||
69 | .ntb_ctl = SKX_NTBCNTL_OFFSET, | ||
70 | .mw_bar = {2, 4}, | ||
71 | }; | ||
72 | |||
73 | static const struct intel_ntb_alt_reg skx_pri_reg = { | ||
74 | .db_bell = SKX_EM_DOORBELL_OFFSET, | ||
75 | .db_clear = SKX_IM_INT_STATUS_OFFSET, | ||
76 | .db_mask = SKX_IM_INT_DISABLE_OFFSET, | ||
77 | .spad = SKX_IM_SPAD_OFFSET, | ||
78 | }; | ||
79 | |||
80 | static const struct intel_ntb_alt_reg skx_b2b_reg = { | ||
81 | .db_bell = SKX_IM_DOORBELL_OFFSET, | ||
82 | .db_clear = SKX_EM_INT_STATUS_OFFSET, | ||
83 | .db_mask = SKX_EM_INT_DISABLE_OFFSET, | ||
84 | .spad = SKX_B2B_SPAD_OFFSET, | ||
85 | }; | ||
86 | |||
87 | static const struct intel_ntb_xlat_reg skx_sec_xlat = { | ||
88 | /* .bar0_base = SKX_EMBAR0_OFFSET, */ | ||
89 | .bar2_limit = SKX_IMBAR1XLMT_OFFSET, | ||
90 | .bar2_xlat = SKX_IMBAR1XBASE_OFFSET, | ||
91 | }; | ||
92 | |||
93 | int skx_poll_link(struct intel_ntb_dev *ndev) | ||
94 | { | ||
95 | u16 reg_val; | ||
96 | int rc; | ||
97 | |||
98 | ndev->reg->db_iowrite(ndev->db_link_mask, | ||
99 | ndev->self_mmio + | ||
100 | ndev->self_reg->db_clear); | ||
101 | |||
102 | rc = pci_read_config_word(ndev->ntb.pdev, | ||
103 | SKX_LINK_STATUS_OFFSET, ®_val); | ||
104 | if (rc) | ||
105 | return 0; | ||
106 | |||
107 | if (reg_val == ndev->lnk_sta) | ||
108 | return 0; | ||
109 | |||
110 | ndev->lnk_sta = reg_val; | ||
111 | |||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static int skx_init_isr(struct intel_ntb_dev *ndev) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | /* | ||
120 | * The MSIX vectors and the interrupt status bits are not lined up | ||
121 | * on Skylake. By default the link status bit is bit 32, however it | ||
122 | * is by default MSIX vector0. We need to fixup to line them up. | ||
123 | * The vectors at reset is 1-32,0. We need to reprogram to 0-32. | ||
124 | */ | ||
125 | |||
126 | for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++) | ||
127 | iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i); | ||
128 | |||
129 | /* move link status down one as workaround */ | ||
130 | if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) { | ||
131 | iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2, | ||
132 | ndev->self_mmio + SKX_INTVEC_OFFSET + | ||
133 | (SKX_DB_MSIX_VECTOR_COUNT - 1)); | ||
134 | } | ||
135 | |||
136 | return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT, | ||
137 | SKX_DB_MSIX_VECTOR_COUNT, | ||
138 | SKX_DB_MSIX_VECTOR_SHIFT, | ||
139 | SKX_DB_TOTAL_SHIFT); | ||
140 | } | ||
141 | |||
142 | static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, | ||
143 | const struct intel_b2b_addr *addr, | ||
144 | const struct intel_b2b_addr *peer_addr) | ||
145 | { | ||
146 | struct pci_dev *pdev; | ||
147 | void __iomem *mmio; | ||
148 | phys_addr_t bar_addr; | ||
149 | |||
150 | pdev = ndev->ntb.pdev; | ||
151 | mmio = ndev->self_mmio; | ||
152 | |||
153 | /* setup incoming bar limits == base addrs (zero length windows) */ | ||
154 | bar_addr = addr->bar2_addr64; | ||
155 | iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); | ||
156 | bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); | ||
157 | dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); | ||
158 | |||
159 | bar_addr = addr->bar4_addr64; | ||
160 | iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); | ||
161 | bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); | ||
162 | dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); | ||
163 | |||
164 | /* zero incoming translation addrs */ | ||
165 | iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET); | ||
166 | iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET); | ||
167 | |||
168 | ndev->peer_mmio = ndev->self_mmio; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int skx_init_ntb(struct intel_ntb_dev *ndev) | ||
174 | { | ||
175 | int rc; | ||
176 | |||
177 | |||
178 | ndev->mw_count = XEON_MW_COUNT; | ||
179 | ndev->spad_count = SKX_SPAD_COUNT; | ||
180 | ndev->db_count = SKX_DB_COUNT; | ||
181 | ndev->db_link_mask = SKX_DB_LINK_BIT; | ||
182 | |||
183 | /* DB fixup for using 31 right now */ | ||
184 | if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) | ||
185 | ndev->db_link_mask |= BIT_ULL(31); | ||
186 | |||
187 | switch (ndev->ntb.topo) { | ||
188 | case NTB_TOPO_B2B_USD: | ||
189 | case NTB_TOPO_B2B_DSD: | ||
190 | ndev->self_reg = &skx_pri_reg; | ||
191 | ndev->peer_reg = &skx_b2b_reg; | ||
192 | ndev->xlat_reg = &skx_sec_xlat; | ||
193 | |||
194 | if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { | ||
195 | rc = skx_setup_b2b_mw(ndev, | ||
196 | &xeon_b2b_dsd_addr, | ||
197 | &xeon_b2b_usd_addr); | ||
198 | } else { | ||
199 | rc = skx_setup_b2b_mw(ndev, | ||
200 | &xeon_b2b_usd_addr, | ||
201 | &xeon_b2b_dsd_addr); | ||
202 | } | ||
203 | |||
204 | if (rc) | ||
205 | return rc; | ||
206 | |||
207 | /* Enable Bus Master and Memory Space on the secondary side */ | ||
208 | iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, | ||
209 | ndev->self_mmio + SKX_SPCICMD_OFFSET); | ||
210 | |||
211 | break; | ||
212 | |||
213 | default: | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | |||
217 | ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; | ||
218 | |||
219 | ndev->reg->db_iowrite(ndev->db_valid_mask, | ||
220 | ndev->self_mmio + | ||
221 | ndev->self_reg->db_mask); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | int skx_init_dev(struct intel_ntb_dev *ndev) | ||
227 | { | ||
228 | struct pci_dev *pdev; | ||
229 | u8 ppd; | ||
230 | int rc; | ||
231 | |||
232 | pdev = ndev->ntb.pdev; | ||
233 | |||
234 | ndev->reg = &skx_reg; | ||
235 | |||
236 | rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd); | ||
237 | if (rc) | ||
238 | return -EIO; | ||
239 | |||
240 | ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); | ||
241 | dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, | ||
242 | ntb_topo_string(ndev->ntb.topo)); | ||
243 | if (ndev->ntb.topo == NTB_TOPO_NONE) | ||
244 | return -EINVAL; | ||
245 | |||
246 | ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD; | ||
247 | |||
248 | rc = skx_init_ntb(ndev); | ||
249 | if (rc) | ||
250 | return rc; | ||
251 | |||
252 | return skx_init_isr(ndev); | ||
253 | } | ||
254 | |||
255 | ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, | ||
256 | size_t count, loff_t *offp) | ||
257 | { | ||
258 | struct intel_ntb_dev *ndev; | ||
259 | void __iomem *mmio; | ||
260 | char *buf; | ||
261 | size_t buf_size; | ||
262 | ssize_t ret, off; | ||
263 | union { u64 v64; u32 v32; u16 v16; } u; | ||
264 | |||
265 | ndev = filp->private_data; | ||
266 | mmio = ndev->self_mmio; | ||
267 | |||
268 | buf_size = min(count, 0x800ul); | ||
269 | |||
270 | buf = kmalloc(buf_size, GFP_KERNEL); | ||
271 | if (!buf) | ||
272 | return -ENOMEM; | ||
273 | |||
274 | off = 0; | ||
275 | |||
276 | off += scnprintf(buf + off, buf_size - off, | ||
277 | "NTB Device Information:\n"); | ||
278 | |||
279 | off += scnprintf(buf + off, buf_size - off, | ||
280 | "Connection Topology -\t%s\n", | ||
281 | ntb_topo_string(ndev->ntb.topo)); | ||
282 | |||
283 | off += scnprintf(buf + off, buf_size - off, | ||
284 | "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); | ||
285 | off += scnprintf(buf + off, buf_size - off, | ||
286 | "LNK STA -\t\t%#06x\n", ndev->lnk_sta); | ||
287 | |||
288 | if (!ndev->reg->link_is_up(ndev)) | ||
289 | off += scnprintf(buf + off, buf_size - off, | ||
290 | "Link Status -\t\tDown\n"); | ||
291 | else { | ||
292 | off += scnprintf(buf + off, buf_size - off, | ||
293 | "Link Status -\t\tUp\n"); | ||
294 | off += scnprintf(buf + off, buf_size - off, | ||
295 | "Link Speed -\t\tPCI-E Gen %u\n", | ||
296 | NTB_LNK_STA_SPEED(ndev->lnk_sta)); | ||
297 | off += scnprintf(buf + off, buf_size - off, | ||
298 | "Link Width -\t\tx%u\n", | ||
299 | NTB_LNK_STA_WIDTH(ndev->lnk_sta)); | ||
300 | } | ||
301 | |||
302 | off += scnprintf(buf + off, buf_size - off, | ||
303 | "Memory Window Count -\t%u\n", ndev->mw_count); | ||
304 | off += scnprintf(buf + off, buf_size - off, | ||
305 | "Scratchpad Count -\t%u\n", ndev->spad_count); | ||
306 | off += scnprintf(buf + off, buf_size - off, | ||
307 | "Doorbell Count -\t%u\n", ndev->db_count); | ||
308 | off += scnprintf(buf + off, buf_size - off, | ||
309 | "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); | ||
310 | off += scnprintf(buf + off, buf_size - off, | ||
311 | "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); | ||
312 | |||
313 | off += scnprintf(buf + off, buf_size - off, | ||
314 | "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); | ||
315 | off += scnprintf(buf + off, buf_size - off, | ||
316 | "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); | ||
317 | off += scnprintf(buf + off, buf_size - off, | ||
318 | "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); | ||
319 | |||
320 | u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); | ||
321 | off += scnprintf(buf + off, buf_size - off, | ||
322 | "Doorbell Mask -\t\t%#llx\n", u.v64); | ||
323 | |||
324 | u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); | ||
325 | off += scnprintf(buf + off, buf_size - off, | ||
326 | "Doorbell Bell -\t\t%#llx\n", u.v64); | ||
327 | |||
328 | off += scnprintf(buf + off, buf_size - off, | ||
329 | "\nNTB Incoming XLAT:\n"); | ||
330 | |||
331 | u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET); | ||
332 | off += scnprintf(buf + off, buf_size - off, | ||
333 | "IMBAR1XBASE -\t\t%#018llx\n", u.v64); | ||
334 | |||
335 | u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET); | ||
336 | off += scnprintf(buf + off, buf_size - off, | ||
337 | "IMBAR2XBASE -\t\t%#018llx\n", u.v64); | ||
338 | |||
339 | u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); | ||
340 | off += scnprintf(buf + off, buf_size - off, | ||
341 | "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64); | ||
342 | |||
343 | u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); | ||
344 | off += scnprintf(buf + off, buf_size - off, | ||
345 | "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64); | ||
346 | |||
347 | if (ntb_topo_is_b2b(ndev->ntb.topo)) { | ||
348 | off += scnprintf(buf + off, buf_size - off, | ||
349 | "\nNTB Outgoing B2B XLAT:\n"); | ||
350 | |||
351 | u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET); | ||
352 | off += scnprintf(buf + off, buf_size - off, | ||
353 | "EMBAR1XBASE -\t\t%#018llx\n", u.v64); | ||
354 | |||
355 | u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET); | ||
356 | off += scnprintf(buf + off, buf_size - off, | ||
357 | "EMBAR2XBASE -\t\t%#018llx\n", u.v64); | ||
358 | |||
359 | u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET); | ||
360 | off += scnprintf(buf + off, buf_size - off, | ||
361 | "EMBAR1XLMT -\t\t%#018llx\n", u.v64); | ||
362 | |||
363 | u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET); | ||
364 | off += scnprintf(buf + off, buf_size - off, | ||
365 | "EMBAR2XLMT -\t\t%#018llx\n", u.v64); | ||
366 | |||
367 | off += scnprintf(buf + off, buf_size - off, | ||
368 | "\nNTB Secondary BAR:\n"); | ||
369 | |||
370 | u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET); | ||
371 | off += scnprintf(buf + off, buf_size - off, | ||
372 | "EMBAR0 -\t\t%#018llx\n", u.v64); | ||
373 | |||
374 | u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET); | ||
375 | off += scnprintf(buf + off, buf_size - off, | ||
376 | "EMBAR1 -\t\t%#018llx\n", u.v64); | ||
377 | |||
378 | u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET); | ||
379 | off += scnprintf(buf + off, buf_size - off, | ||
380 | "EMBAR2 -\t\t%#018llx\n", u.v64); | ||
381 | } | ||
382 | |||
383 | off += scnprintf(buf + off, buf_size - off, | ||
384 | "\nNTB Statistics:\n"); | ||
385 | |||
386 | u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET); | ||
387 | off += scnprintf(buf + off, buf_size - off, | ||
388 | "Upstream Memory Miss -\t%u\n", u.v16); | ||
389 | |||
390 | off += scnprintf(buf + off, buf_size - off, | ||
391 | "\nNTB Hardware Errors:\n"); | ||
392 | |||
393 | if (!pci_read_config_word(ndev->ntb.pdev, | ||
394 | SKX_DEVSTS_OFFSET, &u.v16)) | ||
395 | off += scnprintf(buf + off, buf_size - off, | ||
396 | "DEVSTS -\t\t%#06x\n", u.v16); | ||
397 | |||
398 | if (!pci_read_config_word(ndev->ntb.pdev, | ||
399 | SKX_LINK_STATUS_OFFSET, &u.v16)) | ||
400 | off += scnprintf(buf + off, buf_size - off, | ||
401 | "LNKSTS -\t\t%#06x\n", u.v16); | ||
402 | |||
403 | if (!pci_read_config_dword(ndev->ntb.pdev, | ||
404 | SKX_UNCERRSTS_OFFSET, &u.v32)) | ||
405 | off += scnprintf(buf + off, buf_size - off, | ||
406 | "UNCERRSTS -\t\t%#06x\n", u.v32); | ||
407 | |||
408 | if (!pci_read_config_dword(ndev->ntb.pdev, | ||
409 | SKX_CORERRSTS_OFFSET, &u.v32)) | ||
410 | off += scnprintf(buf + off, buf_size - off, | ||
411 | "CORERRSTS -\t\t%#06x\n", u.v32); | ||
412 | |||
413 | ret = simple_read_from_buffer(ubuf, count, offp, buf, off); | ||
414 | kfree(buf); | ||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | static int intel_ntb3_link_enable(struct ntb_dev *ntb, | ||
419 | enum ntb_speed max_speed, | ||
420 | enum ntb_width max_width) | ||
421 | { | ||
422 | struct intel_ntb_dev *ndev; | ||
423 | u32 ntb_ctl; | ||
424 | |||
425 | ndev = container_of(ntb, struct intel_ntb_dev, ntb); | ||
426 | |||
427 | dev_dbg(&ntb->pdev->dev, | ||
428 | "Enabling link with max_speed %d max_width %d\n", | ||
429 | max_speed, max_width); | ||
430 | |||
431 | if (max_speed != NTB_SPEED_AUTO) | ||
432 | dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); | ||
433 | if (max_width != NTB_WIDTH_AUTO) | ||
434 | dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); | ||
435 | |||
436 | ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); | ||
437 | ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); | ||
438 | ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP; | ||
439 | ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP; | ||
440 | iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, | ||
445 | dma_addr_t addr, resource_size_t size) | ||
446 | { | ||
447 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
448 | unsigned long xlat_reg, limit_reg; | ||
449 | resource_size_t bar_size, mw_size; | ||
450 | void __iomem *mmio; | ||
451 | u64 base, limit, reg_val; | ||
452 | int bar; | ||
453 | |||
454 | if (pidx != NTB_DEF_PEER_IDX) | ||
455 | return -EINVAL; | ||
456 | |||
457 | if (idx >= ndev->b2b_idx && !ndev->b2b_off) | ||
458 | idx += 1; | ||
459 | |||
460 | bar = ndev_mw_to_bar(ndev, idx); | ||
461 | if (bar < 0) | ||
462 | return bar; | ||
463 | |||
464 | bar_size = pci_resource_len(ndev->ntb.pdev, bar); | ||
465 | |||
466 | if (idx == ndev->b2b_idx) | ||
467 | mw_size = bar_size - ndev->b2b_off; | ||
468 | else | ||
469 | mw_size = bar_size; | ||
470 | |||
471 | /* hardware requires that addr is aligned to bar size */ | ||
472 | if (addr & (bar_size - 1)) | ||
473 | return -EINVAL; | ||
474 | |||
475 | /* make sure the range fits in the usable mw size */ | ||
476 | if (size > mw_size) | ||
477 | return -EINVAL; | ||
478 | |||
479 | mmio = ndev->self_mmio; | ||
480 | xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); | ||
481 | limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); | ||
482 | base = pci_resource_start(ndev->ntb.pdev, bar); | ||
483 | |||
484 | /* Set the limit if supported, if size is not mw_size */ | ||
485 | if (limit_reg && size != mw_size) | ||
486 | limit = base + size; | ||
487 | else | ||
488 | limit = base + mw_size; | ||
489 | |||
490 | /* set and verify setting the translation address */ | ||
491 | iowrite64(addr, mmio + xlat_reg); | ||
492 | reg_val = ioread64(mmio + xlat_reg); | ||
493 | if (reg_val != addr) { | ||
494 | iowrite64(0, mmio + xlat_reg); | ||
495 | return -EIO; | ||
496 | } | ||
497 | |||
498 | dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); | ||
499 | |||
500 | /* set and verify setting the limit */ | ||
501 | iowrite64(limit, mmio + limit_reg); | ||
502 | reg_val = ioread64(mmio + limit_reg); | ||
503 | if (reg_val != limit) { | ||
504 | iowrite64(base, mmio + limit_reg); | ||
505 | iowrite64(0, mmio + xlat_reg); | ||
506 | return -EIO; | ||
507 | } | ||
508 | |||
509 | dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); | ||
510 | |||
511 | /* setup the EP */ | ||
512 | limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; | ||
513 | base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx)); | ||
514 | base &= ~0xf; | ||
515 | |||
516 | if (limit_reg && size != mw_size) | ||
517 | limit = base + size; | ||
518 | else | ||
519 | limit = base + mw_size; | ||
520 | |||
521 | /* set and verify setting the limit */ | ||
522 | iowrite64(limit, mmio + limit_reg); | ||
523 | reg_val = ioread64(mmio + limit_reg); | ||
524 | if (reg_val != limit) { | ||
525 | iowrite64(base, mmio + limit_reg); | ||
526 | iowrite64(0, mmio + xlat_reg); | ||
527 | return -EIO; | ||
528 | } | ||
529 | |||
530 | dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) | ||
536 | { | ||
537 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
538 | int bit; | ||
539 | |||
540 | if (db_bits & ~ndev->db_valid_mask) | ||
541 | return -EINVAL; | ||
542 | |||
543 | while (db_bits) { | ||
544 | bit = __ffs(db_bits); | ||
545 | iowrite32(1, ndev->peer_mmio + | ||
546 | ndev->peer_reg->db_bell + (bit * 4)); | ||
547 | db_bits &= db_bits - 1; | ||
548 | } | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static u64 intel_ntb3_db_read(struct ntb_dev *ntb) | ||
554 | { | ||
555 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
556 | |||
557 | return ndev_db_read(ndev, | ||
558 | ndev->self_mmio + | ||
559 | ndev->self_reg->db_clear); | ||
560 | } | ||
561 | |||
562 | static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) | ||
563 | { | ||
564 | struct intel_ntb_dev *ndev = ntb_ndev(ntb); | ||
565 | |||
566 | return ndev_db_write(ndev, db_bits, | ||
567 | ndev->self_mmio + | ||
568 | ndev->self_reg->db_clear); | ||
569 | } | ||
570 | |||
571 | const struct ntb_dev_ops intel_ntb3_ops = { | ||
572 | .mw_count = intel_ntb_mw_count, | ||
573 | .mw_get_align = intel_ntb_mw_get_align, | ||
574 | .mw_set_trans = intel_ntb3_mw_set_trans, | ||
575 | .peer_mw_count = intel_ntb_peer_mw_count, | ||
576 | .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, | ||
577 | .link_is_up = intel_ntb_link_is_up, | ||
578 | .link_enable = intel_ntb3_link_enable, | ||
579 | .link_disable = intel_ntb_link_disable, | ||
580 | .db_valid_mask = intel_ntb_db_valid_mask, | ||
581 | .db_vector_count = intel_ntb_db_vector_count, | ||
582 | .db_vector_mask = intel_ntb_db_vector_mask, | ||
583 | .db_read = intel_ntb3_db_read, | ||
584 | .db_clear = intel_ntb3_db_clear, | ||
585 | .db_set_mask = intel_ntb_db_set_mask, | ||
586 | .db_clear_mask = intel_ntb_db_clear_mask, | ||
587 | .peer_db_addr = intel_ntb_peer_db_addr, | ||
588 | .peer_db_set = intel_ntb3_peer_db_set, | ||
589 | .spad_is_unsafe = intel_ntb_spad_is_unsafe, | ||
590 | .spad_count = intel_ntb_spad_count, | ||
591 | .spad_read = intel_ntb_spad_read, | ||
592 | .spad_write = intel_ntb_spad_write, | ||
593 | .peer_spad_addr = intel_ntb_peer_spad_addr, | ||
594 | .peer_spad_read = intel_ntb_peer_spad_read, | ||
595 | .peer_spad_write = intel_ntb_peer_spad_write, | ||
596 | }; | ||
597 | |||
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h index 889453ca2ce6..09fd1d3e6b5b 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h | |||
@@ -44,6 +44,8 @@ | |||
44 | #ifndef _NTB_INTEL_GEN3_H_ | 44 | #ifndef _NTB_INTEL_GEN3_H_ |
45 | #define _NTB_INTEL_GEN3_H_ | 45 | #define _NTB_INTEL_GEN3_H_ |
46 | 46 | ||
47 | #include "ntb_hw_intel.h" | ||
48 | |||
47 | /* Intel Skylake Xeon hardware */ | 49 | /* Intel Skylake Xeon hardware */ |
48 | #define SKX_IMBAR1SZ_OFFSET 0x00d0 | 50 | #define SKX_IMBAR1SZ_OFFSET 0x00d0 |
49 | #define SKX_IMBAR2SZ_OFFSET 0x00d1 | 51 | #define SKX_IMBAR2SZ_OFFSET 0x00d1 |
@@ -89,4 +91,21 @@ | |||
89 | #define SKX_DB_TOTAL_SHIFT 33 | 91 | #define SKX_DB_TOTAL_SHIFT 33 |
90 | #define SKX_SPAD_COUNT 16 | 92 | #define SKX_SPAD_COUNT 16 |
91 | 93 | ||
94 | static inline u64 skx_db_ioread(void __iomem *mmio) | ||
95 | { | ||
96 | return ioread64(mmio); | ||
97 | } | ||
98 | |||
99 | static inline void skx_db_iowrite(u64 bits, void __iomem *mmio) | ||
100 | { | ||
101 | iowrite64(bits, mmio); | ||
102 | } | ||
103 | |||
104 | ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, | ||
105 | size_t count, loff_t *offp); | ||
106 | int skx_init_dev(struct intel_ntb_dev *ndev); | ||
107 | int skx_poll_link(struct intel_ntb_dev *ndev); | ||
108 | |||
109 | extern const struct ntb_dev_ops intel_ntb3_ops; | ||
110 | |||
92 | #endif | 111 | #endif |
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index bdfa302e0152..46d757c3850e 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h | |||
@@ -187,4 +187,64 @@ struct intel_ntb_dev { | |||
187 | #define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ | 187 | #define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ |
188 | hb_timer.work) | 188 | hb_timer.work) |
189 | 189 | ||
190 | static inline int pdev_is_xeon(struct pci_dev *pdev) | ||
191 | { | ||
192 | switch (pdev->device) { | ||
193 | case PCI_DEVICE_ID_INTEL_NTB_SS_JSF: | ||
194 | case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: | ||
195 | case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: | ||
196 | case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: | ||
197 | case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: | ||
198 | case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: | ||
199 | case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: | ||
200 | case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: | ||
201 | case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: | ||
202 | case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: | ||
203 | case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: | ||
204 | case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: | ||
205 | case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: | ||
206 | case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: | ||
207 | case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: | ||
208 | return 1; | ||
209 | } | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static inline int pdev_is_skx_xeon(struct pci_dev *pdev) | ||
214 | { | ||
215 | if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX) | ||
216 | return 1; | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | #ifndef ioread64 | ||
222 | #ifdef readq | ||
223 | #define ioread64 readq | ||
224 | #else | ||
225 | #define ioread64 _ioread64 | ||
226 | static inline u64 _ioread64(void __iomem *mmio) | ||
227 | { | ||
228 | u64 low, high; | ||
229 | |||
230 | low = ioread32(mmio); | ||
231 | high = ioread32(mmio + sizeof(u32)); | ||
232 | return low | (high << 32); | ||
233 | } | ||
234 | #endif | ||
235 | #endif | ||
236 | |||
237 | #ifndef iowrite64 | ||
238 | #ifdef writeq | ||
239 | #define iowrite64 writeq | ||
240 | #else | ||
241 | #define iowrite64 _iowrite64 | ||
242 | static inline void _iowrite64(u64 val, void __iomem *mmio) | ||
243 | { | ||
244 | iowrite32(val, mmio); | ||
245 | iowrite32(val >> 32, mmio + sizeof(u32)); | ||
246 | } | ||
247 | #endif | ||
248 | #endif | ||
249 | |||
190 | #endif | 250 | #endif |