aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-20 01:41:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-20 01:41:53 -0500
commitc8a0739b185d11d6e2ca7ad9f5835841d1cfc765 (patch)
tree7f3c2cc0d7ed32fe4f0580217c3734e3ecc1505c
parent020aae3ee58c1af0e7ffc4e2cc9fe4dc630338cb (diff)
parent4201a9918c49bece71d25b2ef30cbadb1fc528e8 (diff)
Merge tag 'ntb-4.15' of git://github.com/jonmason/ntb
Pull ntb updates from Jon Mason: "Support for the switchtec ntb and related changes. Also, a couple of bug fixes" [ The timing isn't great. I had asked people to send me pull requests before my family vacation, and this code has not even been in linux-next as far as I can tell. But Logan Gunthorpe pleaded for its inclusion because the Switchtec driver has apparently been around for a while, just never in linux-next - Linus ] * tag 'ntb-4.15' of git://github.com/jonmason/ntb: ntb: intel: remove b2b memory window workaround for Skylake NTB NTB: make idt_89hpes_cfg const NTB: switchtec_ntb: Update switchtec documentation with notes for NTB NTB: switchtec_ntb: Add memory window support NTB: switchtec_ntb: Implement scratchpad registers NTB: switchtec_ntb: Implement doorbell registers NTB: switchtec_ntb: Add link management NTB: switchtec_ntb: Add skeleton NTB driver NTB: switchtec_ntb: Initialize hardware for doorbells and messages NTB: switchtec_ntb: Initialize hardware for memory windows NTB: switchtec_ntb: Introduce initial NTB driver NTB: Add check and comment for link up to mw_count() and mw_get_align() NTB: Ensure ntb_mw_get_align() is only called when the link is up NTB: switchtec: Add link event notifier callback NTB: switchtec: Add NTB hardware register definitions NTB: switchtec: Export class symbol for use in upper layer driver NTB: switchtec: Move structure definitions into a common header ntb: update maintainer list for Intel NTB driver
-rw-r--r--Documentation/switchtec.txt12
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c16
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c75
-rw-r--r--drivers/ntb/hw/mscc/Kconfig9
-rw-r--r--drivers/ntb/hw/mscc/Makefile1
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c1216
-rw-r--r--drivers/ntb/ntb_transport.c20
-rw-r--r--drivers/ntb/test/ntb_perf.c18
-rw-r--r--drivers/ntb/test/ntb_tool.c6
-rw-r--r--drivers/pci/switch/switchtec.c316
-rw-r--r--include/linux/ntb.h11
-rw-r--r--include/linux/switchtec.h373
15 files changed, 1715 insertions, 367 deletions
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt
index a0a9c7b3d4d5..f788264921ff 100644
--- a/Documentation/switchtec.txt
+++ b/Documentation/switchtec.txt
@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device:
78 between PCI Function Framework number (used by the event system) 78 between PCI Function Framework number (used by the event system)
79 and Switchtec Logic Port ID and Partition number (which is more 79 and Switchtec Logic Port ID and Partition number (which is more
80 user friendly). 80 user friendly).
81
82
83Non-Transparent Bridge (NTB) Driver
84===================================
85
86An NTB driver is provided for the switchtec hardware in switchtec_ntb.
87Currently, it only supports switches configured with exactly 2
88partitions. It also requires the following configuration settings:
89
90* Both partitions must be able to access each other's GAS spaces.
91 Thus, the bits in the GAS Access Vector under Management Settings
92 must be set to support this.
diff --git a/MAINTAINERS b/MAINTAINERS
index bf3cf8a8974f..290e13fa9b77 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9726,12 +9726,11 @@ S: Supported
9726F: drivers/ntb/hw/idt/ 9726F: drivers/ntb/hw/idt/
9727 9727
9728NTB INTEL DRIVER 9728NTB INTEL DRIVER
9729M: Jon Mason <jdmason@kudzu.us>
9730M: Dave Jiang <dave.jiang@intel.com> 9729M: Dave Jiang <dave.jiang@intel.com>
9731L: linux-ntb@googlegroups.com 9730L: linux-ntb@googlegroups.com
9732S: Supported 9731S: Supported
9733W: https://github.com/jonmason/ntb/wiki 9732W: https://github.com/davejiang/linux/wiki
9734T: git git://github.com/jonmason/ntb.git 9733T: git https://github.com/davejiang/linux.git
9735F: drivers/ntb/hw/intel/ 9734F: drivers/ntb/hw/intel/
9736 9735
9737NTFS FILESYSTEM 9736NTFS FILESYSTEM
@@ -10443,6 +10442,8 @@ F: Documentation/switchtec.txt
10443F: Documentation/ABI/testing/sysfs-class-switchtec 10442F: Documentation/ABI/testing/sysfs-class-switchtec
10444F: drivers/pci/switch/switchtec* 10443F: drivers/pci/switch/switchtec*
10445F: include/uapi/linux/switchtec_ioctl.h 10444F: include/uapi/linux/switchtec_ioctl.h
10445F: include/linux/switchtec.h
10446F: drivers/ntb/hw/mscc/
10446 10447
10447PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 10448PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
10448M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 10449M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index a89243c9fdd3..e51b581fd102 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -1,3 +1,4 @@
1source "drivers/ntb/hw/amd/Kconfig" 1source "drivers/ntb/hw/amd/Kconfig"
2source "drivers/ntb/hw/idt/Kconfig" 2source "drivers/ntb/hw/idt/Kconfig"
3source "drivers/ntb/hw/intel/Kconfig" 3source "drivers/ntb/hw/intel/Kconfig"
4source "drivers/ntb/hw/mscc/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 87332c3905f0..923c442db750 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_NTB_AMD) += amd/ 1obj-$(CONFIG_NTB_AMD) += amd/
2obj-$(CONFIG_NTB_IDT) += idt/ 2obj-$(CONFIG_NTB_IDT) += idt/
3obj-$(CONFIG_NTB_INTEL) += intel/ 3obj-$(CONFIG_NTB_INTEL) += intel/
4obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index d44d7ef38fe8..0cd79f367f7c 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
2628/* 2628/*
2629 * IDT PCIe-switch models ports configuration structures 2629 * IDT PCIe-switch models ports configuration structures
2630 */ 2630 */
2631static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { 2631static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
2632 .name = "89HPES24NT6AG2", 2632 .name = "89HPES24NT6AG2",
2633 .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} 2633 .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
2634}; 2634};
2635static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { 2635static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
2636 .name = "89HPES32NT8AG2", 2636 .name = "89HPES32NT8AG2",
2637 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2637 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2638}; 2638};
2639static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { 2639static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
2640 .name = "89HPES32NT8BG2", 2640 .name = "89HPES32NT8BG2",
2641 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2641 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2642}; 2642};
2643static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { 2643static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
2644 .name = "89HPES12NT12G2", 2644 .name = "89HPES12NT12G2",
2645 .port_cnt = 3, .ports = {0, 8, 16} 2645 .port_cnt = 3, .ports = {0, 8, 16}
2646}; 2646};
2647static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { 2647static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
2648 .name = "89HPES16NT16G2", 2648 .name = "89HPES16NT16G2",
2649 .port_cnt = 4, .ports = {0, 8, 12, 16} 2649 .port_cnt = 4, .ports = {0, 8, 12, 16}
2650}; 2650};
2651static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { 2651static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
2652 .name = "89HPES24NT24G2", 2652 .name = "89HPES24NT24G2",
2653 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2653 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2654}; 2654};
2655static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { 2655static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
2656 .name = "89HPES32NT24AG2", 2656 .name = "89HPES32NT24AG2",
2657 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2657 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2658}; 2658};
2659static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { 2659static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
2660 .name = "89HPES32NT24BG2", 2660 .name = "89HPES32NT24BG2",
2661 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} 2661 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2662}; 2662};
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 2557e2c05b90..4de074a86073 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1742{ 1742{
1743 struct pci_dev *pdev; 1743 struct pci_dev *pdev;
1744 void __iomem *mmio; 1744 void __iomem *mmio;
1745 resource_size_t bar_size;
1746 phys_addr_t bar_addr; 1745 phys_addr_t bar_addr;
1747 int b2b_bar;
1748 u8 bar_sz;
1749 1746
1750 pdev = ndev->ntb.pdev; 1747 pdev = ndev->ntb.pdev;
1751 mmio = ndev->self_mmio; 1748 mmio = ndev->self_mmio;
1752 1749
1753 if (ndev->b2b_idx == UINT_MAX) {
1754 dev_dbg(&pdev->dev, "not using b2b mw\n");
1755 b2b_bar = 0;
1756 ndev->b2b_off = 0;
1757 } else {
1758 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1759 if (b2b_bar < 0)
1760 return -EIO;
1761
1762 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
1763
1764 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1765
1766 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
1767
1768 if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
1769 dev_dbg(&pdev->dev, "b2b using first half of bar\n");
1770 ndev->b2b_off = bar_size >> 1;
1771 } else if (bar_size >= XEON_B2B_MIN_SIZE) {
1772 dev_dbg(&pdev->dev, "b2b using whole bar\n");
1773 ndev->b2b_off = 0;
1774 --ndev->mw_count;
1775 } else {
1776 dev_dbg(&pdev->dev, "b2b bar size is too small\n");
1777 return -EIO;
1778 }
1779 }
1780
1781 /*
1782 * Reset the secondary bar sizes to match the primary bar sizes,
1783 * except disable or halve the size of the b2b secondary bar.
1784 */
1785 pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
1786 dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
1787 if (b2b_bar == 1) {
1788 if (ndev->b2b_off)
1789 bar_sz -= 1;
1790 else
1791 bar_sz = 0;
1792 }
1793
1794 pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
1795 pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
1796 dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
1797
1798 pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
1799 dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
1800 if (b2b_bar == 2) {
1801 if (ndev->b2b_off)
1802 bar_sz -= 1;
1803 else
1804 bar_sz = 0;
1805 }
1806
1807 pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
1808 pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
1809 dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
1810
1811 /* SBAR01 hit by first part of the b2b bar */
1812 if (b2b_bar == 0)
1813 bar_addr = addr->bar0_addr;
1814 else if (b2b_bar == 1)
1815 bar_addr = addr->bar2_addr64;
1816 else if (b2b_bar == 2)
1817 bar_addr = addr->bar4_addr64;
1818 else
1819 return -EIO;
1820
1821 /* setup incoming bar limits == base addrs (zero length windows) */ 1750 /* setup incoming bar limits == base addrs (zero length windows) */
1822 bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); 1751 bar_addr = addr->bar2_addr64;
1823 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); 1752 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1824 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); 1753 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1825 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); 1754 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
1826 1755
1827 bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); 1756 bar_addr = addr->bar4_addr64;
1828 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); 1757 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1829 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); 1758 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1830 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); 1759 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig
new file mode 100644
index 000000000000..013ed6716438
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Kconfig
@@ -0,0 +1,9 @@
1config NTB_SWITCHTEC
2 tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
3 select PCI_SW_SWITCHTEC
4 help
5 Enables NTB support for Switchtec PCI switches. This also
6 selects the Switchtec management driver as they share the same
7 hardware interface.
8
9 If unsure, say N.
diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile
new file mode 100644
index 000000000000..064686ead1ba
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
new file mode 100644
index 000000000000..afe8ed6f3b23
--- /dev/null
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -0,0 +1,1216 @@
1/*
2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/switchtec.h>
17#include <linux/module.h>
18#include <linux/delay.h>
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21#include <linux/ntb.h>
22
23MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24MODULE_VERSION("0.1");
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Microsemi Corporation");
27
28static ulong max_mw_size = SZ_2M;
29module_param(max_mw_size, ulong, 0644);
30MODULE_PARM_DESC(max_mw_size,
31 "Max memory window size reported to the upper layer");
32
33static bool use_lut_mws;
34module_param(use_lut_mws, bool, 0644);
35MODULE_PARM_DESC(use_lut_mws,
36 "Enable the use of the LUT based memory windows");
37
38#ifndef ioread64
39#ifdef readq
40#define ioread64 readq
41#else
42#define ioread64 _ioread64
43static inline u64 _ioread64(void __iomem *mmio)
44{
45 u64 low, high;
46
47 low = ioread32(mmio);
48 high = ioread32(mmio + sizeof(u32));
49 return low | (high << 32);
50}
51#endif
52#endif
53
54#ifndef iowrite64
55#ifdef writeq
56#define iowrite64 writeq
57#else
58#define iowrite64 _iowrite64
59static inline void _iowrite64(u64 val, void __iomem *mmio)
60{
61 iowrite32(val, mmio);
62 iowrite32(val >> 32, mmio + sizeof(u32));
63}
64#endif
65#endif
66
67#define SWITCHTEC_NTB_MAGIC 0x45CC0001
68#define MAX_MWS 128
69
70struct shared_mw {
71 u32 magic;
72 u32 link_sta;
73 u32 partition_id;
74 u64 mw_sizes[MAX_MWS];
75 u32 spad[128];
76};
77
78#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79#define LUT_SIZE SZ_64K
80
81struct switchtec_ntb {
82 struct ntb_dev ntb;
83 struct switchtec_dev *stdev;
84
85 int self_partition;
86 int peer_partition;
87
88 int doorbell_irq;
89 int message_irq;
90
91 struct ntb_info_regs __iomem *mmio_ntb;
92 struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97
98 struct shared_mw *self_shared;
99 struct shared_mw __iomem *peer_shared;
100 dma_addr_t self_shared_dma;
101
102 u64 db_mask;
103 u64 db_valid_mask;
104 int db_shift;
105 int db_peer_shift;
106
107 /* synchronize rmw access of db_mask and hw reg */
108 spinlock_t db_mask_lock;
109
110 int nr_direct_mw;
111 int nr_lut_mw;
112 int direct_mw_to_bar[MAX_DIRECT_MW];
113
114 int peer_nr_direct_mw;
115 int peer_nr_lut_mw;
116 int peer_direct_mw_to_bar[MAX_DIRECT_MW];
117
118 bool link_is_up;
119 enum ntb_speed link_speed;
120 enum ntb_width link_width;
121};
122
123static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
124{
125 return container_of(ntb, struct switchtec_ntb, ntb);
126}
127
128static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
129 struct ntb_ctrl_regs __iomem *ctl,
130 u32 op, int wait_status)
131{
132 static const char * const op_text[] = {
133 [NTB_CTRL_PART_OP_LOCK] = "lock",
134 [NTB_CTRL_PART_OP_CFG] = "configure",
135 [NTB_CTRL_PART_OP_RESET] = "reset",
136 };
137
138 int i;
139 u32 ps;
140 int status;
141
142 switch (op) {
143 case NTB_CTRL_PART_OP_LOCK:
144 status = NTB_CTRL_PART_STATUS_LOCKING;
145 break;
146 case NTB_CTRL_PART_OP_CFG:
147 status = NTB_CTRL_PART_STATUS_CONFIGURING;
148 break;
149 case NTB_CTRL_PART_OP_RESET:
150 status = NTB_CTRL_PART_STATUS_RESETTING;
151 break;
152 default:
153 return -EINVAL;
154 }
155
156 iowrite32(op, &ctl->partition_op);
157
158 for (i = 0; i < 1000; i++) {
159 if (msleep_interruptible(50) != 0) {
160 iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
161 return -EINTR;
162 }
163
164 ps = ioread32(&ctl->partition_status) & 0xFFFF;
165
166 if (ps != status)
167 break;
168 }
169
170 if (ps == wait_status)
171 return 0;
172
173 if (ps == status) {
174 dev_err(&sndev->stdev->dev,
175 "Timed out while peforming %s (%d). (%08x)",
176 op_text[op], op,
177 ioread32(&ctl->partition_status));
178
179 return -ETIMEDOUT;
180 }
181
182 return -EIO;
183}
184
185static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
186 u32 val)
187{
188 if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
189 return -EINVAL;
190
191 iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
192
193 return 0;
194}
195
196static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
197{
198 struct switchtec_ntb *sndev = ntb_sndev(ntb);
199 int nr_direct_mw = sndev->peer_nr_direct_mw;
200 int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
201
202 if (pidx != NTB_DEF_PEER_IDX)
203 return -EINVAL;
204
205 if (!use_lut_mws)
206 nr_lut_mw = 0;
207
208 return nr_direct_mw + nr_lut_mw;
209}
210
211static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
212{
213 return mw_idx - sndev->nr_direct_mw + 1;
214}
215
216static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
217{
218 return mw_idx - sndev->peer_nr_direct_mw + 1;
219}
220
221static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
222 int widx, resource_size_t *addr_align,
223 resource_size_t *size_align,
224 resource_size_t *size_max)
225{
226 struct switchtec_ntb *sndev = ntb_sndev(ntb);
227 int lut;
228 resource_size_t size;
229
230 if (pidx != NTB_DEF_PEER_IDX)
231 return -EINVAL;
232
233 lut = widx >= sndev->peer_nr_direct_mw;
234 size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
235
236 if (size == 0)
237 return -EINVAL;
238
239 if (addr_align)
240 *addr_align = lut ? size : SZ_4K;
241
242 if (size_align)
243 *size_align = lut ? size : SZ_4K;
244
245 if (size_max)
246 *size_max = size;
247
248 return 0;
249}
250
251static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
252{
253 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
254 int bar = sndev->peer_direct_mw_to_bar[idx];
255 u32 ctl_val;
256
257 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
258 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
259 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
260 iowrite32(0, &ctl->bar_entry[bar].win_size);
261 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
262}
263
264static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
265{
266 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
267
268 iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
269}
270
271static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
272 dma_addr_t addr, resource_size_t size)
273{
274 int xlate_pos = ilog2(size);
275 int bar = sndev->peer_direct_mw_to_bar[idx];
276 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
277 u32 ctl_val;
278
279 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
280 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
281
282 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
283 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
284 iowrite64(sndev->self_partition | addr,
285 &ctl->bar_entry[bar].xlate_addr);
286}
287
288static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
289 dma_addr_t addr, resource_size_t size)
290{
291 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
292
293 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
294 &ctl->lut_entry[peer_lut_index(sndev, idx)]);
295}
296
297static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
298 dma_addr_t addr, resource_size_t size)
299{
300 struct switchtec_ntb *sndev = ntb_sndev(ntb);
301 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
302 int xlate_pos = ilog2(size);
303 int nr_direct_mw = sndev->peer_nr_direct_mw;
304 int rc;
305
306 if (pidx != NTB_DEF_PEER_IDX)
307 return -EINVAL;
308
309 dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
310 widx, pidx, &addr, &size);
311
312 if (widx >= switchtec_ntb_mw_count(ntb, pidx))
313 return -EINVAL;
314
315 if (xlate_pos < 12)
316 return -EINVAL;
317
318 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
319 NTB_CTRL_PART_STATUS_LOCKED);
320 if (rc)
321 return rc;
322
323 if (addr == 0 || size == 0) {
324 if (widx < nr_direct_mw)
325 switchtec_ntb_mw_clr_direct(sndev, widx);
326 else
327 switchtec_ntb_mw_clr_lut(sndev, widx);
328 } else {
329 if (widx < nr_direct_mw)
330 switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
331 else
332 switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
333 }
334
335 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
336 NTB_CTRL_PART_STATUS_NORMAL);
337
338 if (rc == -EIO) {
339 dev_err(&sndev->stdev->dev,
340 "Hardware reported an error configuring mw %d: %08x",
341 widx, ioread32(&ctl->bar_error));
342
343 if (widx < nr_direct_mw)
344 switchtec_ntb_mw_clr_direct(sndev, widx);
345 else
346 switchtec_ntb_mw_clr_lut(sndev, widx);
347
348 switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
349 NTB_CTRL_PART_STATUS_NORMAL);
350 }
351
352 return rc;
353}
354
355static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
356{
357 struct switchtec_ntb *sndev = ntb_sndev(ntb);
358
359 return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
360}
361
362static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
363 int idx, phys_addr_t *base,
364 resource_size_t *size)
365{
366 int bar = sndev->direct_mw_to_bar[idx];
367 size_t offset = 0;
368
369 if (bar < 0)
370 return -EINVAL;
371
372 if (idx == 0) {
373 /*
374 * This is the direct BAR shared with the LUTs
375 * which means the actual window will be offset
376 * by the size of all the LUT entries.
377 */
378
379 offset = LUT_SIZE * sndev->nr_lut_mw;
380 }
381
382 if (base)
383 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
384
385 if (size) {
386 *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
387 if (offset && *size > offset)
388 *size = offset;
389
390 if (*size > max_mw_size)
391 *size = max_mw_size;
392 }
393
394 return 0;
395}
396
397static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
398 int idx, phys_addr_t *base,
399 resource_size_t *size)
400{
401 int bar = sndev->direct_mw_to_bar[0];
402 int offset;
403
404 offset = LUT_SIZE * lut_index(sndev, idx);
405
406 if (base)
407 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
408
409 if (size)
410 *size = LUT_SIZE;
411
412 return 0;
413}
414
415static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
416 phys_addr_t *base,
417 resource_size_t *size)
418{
419 struct switchtec_ntb *sndev = ntb_sndev(ntb);
420
421 if (idx < sndev->nr_direct_mw)
422 return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
423 else if (idx < switchtec_ntb_peer_mw_count(ntb))
424 return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
425 else
426 return -EINVAL;
427}
428
429static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
430 int partition,
431 enum ntb_speed *speed,
432 enum ntb_width *width)
433{
434 struct switchtec_dev *stdev = sndev->stdev;
435
436 u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
437 u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
438
439 if (speed)
440 *speed = (linksta >> 16) & 0xF;
441
442 if (width)
443 *width = (linksta >> 20) & 0x3F;
444}
445
446static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
447{
448 enum ntb_speed self_speed, peer_speed;
449 enum ntb_width self_width, peer_width;
450
451 if (!sndev->link_is_up) {
452 sndev->link_speed = NTB_SPEED_NONE;
453 sndev->link_width = NTB_WIDTH_NONE;
454 return;
455 }
456
457 switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
458 &self_speed, &self_width);
459 switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
460 &peer_speed, &peer_width);
461
462 sndev->link_speed = min(self_speed, peer_speed);
463 sndev->link_width = min(self_width, peer_width);
464}
465
466enum {
467 LINK_MESSAGE = 0,
468 MSG_LINK_UP = 1,
469 MSG_LINK_DOWN = 2,
470 MSG_CHECK_LINK = 3,
471};
472
473static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
474{
475 int link_sta;
476 int old = sndev->link_is_up;
477
478 link_sta = sndev->self_shared->link_sta;
479 if (link_sta) {
480 u64 peer = ioread64(&sndev->peer_shared->magic);
481
482 if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
483 link_sta = peer >> 32;
484 else
485 link_sta = 0;
486 }
487
488 sndev->link_is_up = link_sta;
489 switchtec_ntb_set_link_speed(sndev);
490
491 if (link_sta != old) {
492 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
493 ntb_link_event(&sndev->ntb);
494 dev_info(&sndev->stdev->dev, "ntb link %s",
495 link_sta ? "up" : "down");
496 }
497}
498
499static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
500{
501 struct switchtec_ntb *sndev = stdev->sndev;
502
503 switchtec_ntb_check_link(sndev);
504}
505
506static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
507 enum ntb_speed *speed,
508 enum ntb_width *width)
509{
510 struct switchtec_ntb *sndev = ntb_sndev(ntb);
511
512 if (speed)
513 *speed = sndev->link_speed;
514 if (width)
515 *width = sndev->link_width;
516
517 return sndev->link_is_up;
518}
519
520static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
521 enum ntb_speed max_speed,
522 enum ntb_width max_width)
523{
524 struct switchtec_ntb *sndev = ntb_sndev(ntb);
525
526 dev_dbg(&sndev->stdev->dev, "enabling link");
527
528 sndev->self_shared->link_sta = 1;
529 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
530
531 switchtec_ntb_check_link(sndev);
532
533 return 0;
534}
535
536static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
537{
538 struct switchtec_ntb *sndev = ntb_sndev(ntb);
539
540 dev_dbg(&sndev->stdev->dev, "disabling link");
541
542 sndev->self_shared->link_sta = 0;
543 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
544
545 switchtec_ntb_check_link(sndev);
546
547 return 0;
548}
549
550static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
551{
552 struct switchtec_ntb *sndev = ntb_sndev(ntb);
553
554 return sndev->db_valid_mask;
555}
556
557static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
558{
559 return 1;
560}
561
562static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
563{
564 struct switchtec_ntb *sndev = ntb_sndev(ntb);
565
566 if (db_vector < 0 || db_vector > 1)
567 return 0;
568
569 return sndev->db_valid_mask;
570}
571
572static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
573{
574 u64 ret;
575 struct switchtec_ntb *sndev = ntb_sndev(ntb);
576
577 ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
578
579 return ret & sndev->db_valid_mask;
580}
581
582static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
583{
584 struct switchtec_ntb *sndev = ntb_sndev(ntb);
585
586 iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
587
588 return 0;
589}
590
591static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
592{
593 unsigned long irqflags;
594 struct switchtec_ntb *sndev = ntb_sndev(ntb);
595
596 if (db_bits & ~sndev->db_valid_mask)
597 return -EINVAL;
598
599 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
600
601 sndev->db_mask |= db_bits << sndev->db_shift;
602 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
603
604 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
605
606 return 0;
607}
608
609static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
610{
611 unsigned long irqflags;
612 struct switchtec_ntb *sndev = ntb_sndev(ntb);
613
614 if (db_bits & ~sndev->db_valid_mask)
615 return -EINVAL;
616
617 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
618
619 sndev->db_mask &= ~(db_bits << sndev->db_shift);
620 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
621
622 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
623
624 return 0;
625}
626
627static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
628{
629 struct switchtec_ntb *sndev = ntb_sndev(ntb);
630
631 return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
632}
633
634static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
635 phys_addr_t *db_addr,
636 resource_size_t *db_size)
637{
638 struct switchtec_ntb *sndev = ntb_sndev(ntb);
639 unsigned long offset;
640
641 offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
642 (unsigned long)sndev->stdev->mmio;
643
644 offset += sndev->db_shift / 8;
645
646 if (db_addr)
647 *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
648 if (db_size)
649 *db_size = sizeof(u32);
650
651 return 0;
652}
653
654static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
655{
656 struct switchtec_ntb *sndev = ntb_sndev(ntb);
657
658 iowrite64(db_bits << sndev->db_peer_shift,
659 &sndev->mmio_self_dbmsg->odb);
660
661 return 0;
662}
663
664static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
665{
666 struct switchtec_ntb *sndev = ntb_sndev(ntb);
667
668 return ARRAY_SIZE(sndev->self_shared->spad);
669}
670
671static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
672{
673 struct switchtec_ntb *sndev = ntb_sndev(ntb);
674
675 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
676 return 0;
677
678 if (!sndev->self_shared)
679 return 0;
680
681 return sndev->self_shared->spad[idx];
682}
683
684static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
685{
686 struct switchtec_ntb *sndev = ntb_sndev(ntb);
687
688 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
689 return -EINVAL;
690
691 if (!sndev->self_shared)
692 return -EIO;
693
694 sndev->self_shared->spad[idx] = val;
695
696 return 0;
697}
698
699static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
700 int sidx)
701{
702 struct switchtec_ntb *sndev = ntb_sndev(ntb);
703
704 if (pidx != NTB_DEF_PEER_IDX)
705 return -EINVAL;
706
707 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
708 return 0;
709
710 if (!sndev->peer_shared)
711 return 0;
712
713 return ioread32(&sndev->peer_shared->spad[sidx]);
714}
715
716static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
717 int sidx, u32 val)
718{
719 struct switchtec_ntb *sndev = ntb_sndev(ntb);
720
721 if (pidx != NTB_DEF_PEER_IDX)
722 return -EINVAL;
723
724 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
725 return -EINVAL;
726
727 if (!sndev->peer_shared)
728 return -EIO;
729
730 iowrite32(val, &sndev->peer_shared->spad[sidx]);
731
732 return 0;
733}
734
735static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
736 int sidx, phys_addr_t *spad_addr)
737{
738 struct switchtec_ntb *sndev = ntb_sndev(ntb);
739 unsigned long offset;
740
741 if (pidx != NTB_DEF_PEER_IDX)
742 return -EINVAL;
743
744 offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
745 (unsigned long)sndev->stdev->mmio;
746
747 if (spad_addr)
748 *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
749
750 return 0;
751}
752
753static const struct ntb_dev_ops switchtec_ntb_ops = {
754 .mw_count = switchtec_ntb_mw_count,
755 .mw_get_align = switchtec_ntb_mw_get_align,
756 .mw_set_trans = switchtec_ntb_mw_set_trans,
757 .peer_mw_count = switchtec_ntb_peer_mw_count,
758 .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
759 .link_is_up = switchtec_ntb_link_is_up,
760 .link_enable = switchtec_ntb_link_enable,
761 .link_disable = switchtec_ntb_link_disable,
762 .db_valid_mask = switchtec_ntb_db_valid_mask,
763 .db_vector_count = switchtec_ntb_db_vector_count,
764 .db_vector_mask = switchtec_ntb_db_vector_mask,
765 .db_read = switchtec_ntb_db_read,
766 .db_clear = switchtec_ntb_db_clear,
767 .db_set_mask = switchtec_ntb_db_set_mask,
768 .db_clear_mask = switchtec_ntb_db_clear_mask,
769 .db_read_mask = switchtec_ntb_db_read_mask,
770 .peer_db_addr = switchtec_ntb_peer_db_addr,
771 .peer_db_set = switchtec_ntb_peer_db_set,
772 .spad_count = switchtec_ntb_spad_count,
773 .spad_read = switchtec_ntb_spad_read,
774 .spad_write = switchtec_ntb_spad_write,
775 .peer_spad_read = switchtec_ntb_peer_spad_read,
776 .peer_spad_write = switchtec_ntb_peer_spad_write,
777 .peer_spad_addr = switchtec_ntb_peer_spad_addr,
778};
779
780static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
781{
782 u64 part_map;
783
784 sndev->ntb.pdev = sndev->stdev->pdev;
785 sndev->ntb.topo = NTB_TOPO_SWITCH;
786 sndev->ntb.ops = &switchtec_ntb_ops;
787
788 sndev->self_partition = sndev->stdev->partition;
789
790 sndev->mmio_ntb = sndev->stdev->mmio_ntb;
791 part_map = ioread64(&sndev->mmio_ntb->ep_map);
792 part_map &= ~(1 << sndev->self_partition);
793 sndev->peer_partition = ffs(part_map) - 1;
794
795 dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
796 sndev->self_partition, sndev->stdev->partition_count,
797 part_map);
798
799 sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
800 SWITCHTEC_NTB_REG_CTRL_OFFSET;
801 sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
802 SWITCHTEC_NTB_REG_DBMSG_OFFSET;
803
804 sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
805 sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
806 sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
807}
808
809static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
810{
811 int i;
812 int cnt = 0;
813
814 for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
815 u32 r = ioread32(&ctrl->bar_entry[i].ctl);
816
817 if (r & NTB_CTRL_BAR_VALID)
818 map[cnt++] = i;
819 }
820
821 return cnt;
822}
823
824static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
825{
826 sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
827 sndev->mmio_self_ctrl);
828
829 sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
830 sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
831
832 dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
833 sndev->nr_direct_mw, sndev->nr_lut_mw);
834
835 sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
836 sndev->mmio_peer_ctrl);
837
838 sndev->peer_nr_lut_mw =
839 ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
840 sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
841
842 dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
843 sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
844
845}
846
847/*
848 * There are 64 doorbells in the switch hardware but this is
849 * shared among all partitions. So we must split them in half
850 * (32 for each partition). However, the message interrupts are
851 * also shared with the top 4 doorbells so we just limit this to
852 * 28 doorbells per partition
853 */
854static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
855{
856 sndev->db_valid_mask = 0x0FFFFFFF;
857
858 if (sndev->self_partition < sndev->peer_partition) {
859 sndev->db_shift = 0;
860 sndev->db_peer_shift = 32;
861 } else {
862 sndev->db_shift = 32;
863 sndev->db_peer_shift = 0;
864 }
865
866 sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
867 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
868 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
869 &sndev->mmio_self_dbmsg->odb_mask);
870}
871
872static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
873{
874 int i;
875 u32 msg_map = 0;
876
877 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
878 int m = i | sndev->peer_partition << 2;
879
880 msg_map |= m << i * 8;
881 }
882
883 iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
884
885 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
886 iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
887 &sndev->mmio_self_dbmsg->imsg[i]);
888}
889
890static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
891{
892 int rc = 0;
893 u16 req_id;
894 u32 error;
895
896 req_id = ioread16(&sndev->mmio_ntb->requester_id);
897
898 if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
899 dev_err(&sndev->stdev->dev,
900 "Not enough requester IDs available.");
901 return -EFAULT;
902 }
903
904 rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
905 NTB_CTRL_PART_OP_LOCK,
906 NTB_CTRL_PART_STATUS_LOCKED);
907 if (rc)
908 return rc;
909
910 iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
911 &sndev->mmio_self_ctrl->partition_ctrl);
912
913 /*
914 * Root Complex Requester ID (which is 0:00.0)
915 */
916 iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
917 &sndev->mmio_self_ctrl->req_id_table[0]);
918
919 /*
920 * Host Bridge Requester ID (as read from the mmap address)
921 */
922 iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
923 &sndev->mmio_self_ctrl->req_id_table[1]);
924
925 rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
926 NTB_CTRL_PART_OP_CFG,
927 NTB_CTRL_PART_STATUS_NORMAL);
928 if (rc == -EIO) {
929 error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
930 dev_err(&sndev->stdev->dev,
931 "Error setting up the requester ID table: %08x",
932 error);
933 }
934
935 return rc;
936}
937
938static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
939{
940 int i;
941
942 memset(sndev->self_shared, 0, LUT_SIZE);
943 sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
944 sndev->self_shared->partition_id = sndev->stdev->partition;
945
946 for (i = 0; i < sndev->nr_direct_mw; i++) {
947 int bar = sndev->direct_mw_to_bar[i];
948 resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
949
950 if (i == 0)
951 sz = min_t(resource_size_t, sz,
952 LUT_SIZE * sndev->nr_lut_mw);
953
954 sndev->self_shared->mw_sizes[i] = sz;
955 }
956
957 for (i = 0; i < sndev->nr_lut_mw; i++) {
958 int idx = sndev->nr_direct_mw + i;
959
960 sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
961 }
962}
963
964static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
965{
966 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
967 int bar = sndev->direct_mw_to_bar[0];
968 u32 ctl_val;
969 int rc;
970
971 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
972 LUT_SIZE,
973 &sndev->self_shared_dma,
974 GFP_KERNEL);
975 if (!sndev->self_shared) {
976 dev_err(&sndev->stdev->dev,
977 "unable to allocate memory for shared mw");
978 return -ENOMEM;
979 }
980
981 switchtec_ntb_init_shared(sndev);
982
983 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
984 NTB_CTRL_PART_STATUS_LOCKED);
985 if (rc)
986 goto unalloc_and_exit;
987
988 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
989 ctl_val &= 0xFF;
990 ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
991 ctl_val |= ilog2(LUT_SIZE) << 8;
992 ctl_val |= (sndev->nr_lut_mw - 1) << 14;
993 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
994
995 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
996 sndev->self_shared_dma),
997 &ctl->lut_entry[0]);
998
999 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1000 NTB_CTRL_PART_STATUS_NORMAL);
1001 if (rc) {
1002 u32 bar_error, lut_error;
1003
1004 bar_error = ioread32(&ctl->bar_error);
1005 lut_error = ioread32(&ctl->lut_error);
1006 dev_err(&sndev->stdev->dev,
1007 "Error setting up shared MW: %08x / %08x",
1008 bar_error, lut_error);
1009 goto unalloc_and_exit;
1010 }
1011
1012 sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
1013 if (!sndev->peer_shared) {
1014 rc = -ENOMEM;
1015 goto unalloc_and_exit;
1016 }
1017
1018 dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
1019 return 0;
1020
1021unalloc_and_exit:
1022 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1023 sndev->self_shared, sndev->self_shared_dma);
1024
1025 return rc;
1026}
1027
1028static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1029{
1030 if (sndev->peer_shared)
1031 pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1032
1033 if (sndev->self_shared)
1034 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1035 sndev->self_shared,
1036 sndev->self_shared_dma);
1037}
1038
1039static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1040{
1041 struct switchtec_ntb *sndev = dev;
1042
1043 dev_dbg(&sndev->stdev->dev, "doorbell\n");
1044
1045 ntb_db_event(&sndev->ntb, 0);
1046
1047 return IRQ_HANDLED;
1048}
1049
1050static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1051{
1052 int i;
1053 struct switchtec_ntb *sndev = dev;
1054
1055 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1056 u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1057
1058 if (msg & NTB_DBMSG_IMSG_STATUS) {
1059 dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
1060 (u32)msg);
1061 iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1062
1063 if (i == LINK_MESSAGE)
1064 switchtec_ntb_check_link(sndev);
1065 }
1066 }
1067
1068 return IRQ_HANDLED;
1069}
1070
1071static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1072{
1073 int i;
1074 int rc;
1075 int doorbell_irq = 0;
1076 int message_irq = 0;
1077 int event_irq;
1078 int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1079
1080 event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1081
1082 while (doorbell_irq == event_irq)
1083 doorbell_irq++;
1084 while (message_irq == doorbell_irq ||
1085 message_irq == event_irq)
1086 message_irq++;
1087
1088 dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
1089 event_irq, doorbell_irq, message_irq);
1090
1091 for (i = 0; i < idb_vecs - 4; i++)
1092 iowrite8(doorbell_irq,
1093 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1094
1095 for (; i < idb_vecs; i++)
1096 iowrite8(message_irq,
1097 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1098
1099 sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1100 sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1101
1102 rc = request_irq(sndev->doorbell_irq,
1103 switchtec_ntb_doorbell_isr, 0,
1104 "switchtec_ntb_doorbell", sndev);
1105 if (rc)
1106 return rc;
1107
1108 rc = request_irq(sndev->message_irq,
1109 switchtec_ntb_message_isr, 0,
1110 "switchtec_ntb_message", sndev);
1111 if (rc) {
1112 free_irq(sndev->doorbell_irq, sndev);
1113 return rc;
1114 }
1115
1116 return 0;
1117}
1118
1119static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1120{
1121 free_irq(sndev->doorbell_irq, sndev);
1122 free_irq(sndev->message_irq, sndev);
1123}
1124
1125static int switchtec_ntb_add(struct device *dev,
1126 struct class_interface *class_intf)
1127{
1128 struct switchtec_dev *stdev = to_stdev(dev);
1129 struct switchtec_ntb *sndev;
1130 int rc;
1131
1132 stdev->sndev = NULL;
1133
1134 if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1135 return -ENODEV;
1136
1137 if (stdev->partition_count != 2)
1138 dev_warn(dev, "ntb driver only supports 2 partitions");
1139
1140 sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1141 if (!sndev)
1142 return -ENOMEM;
1143
1144 sndev->stdev = stdev;
1145 switchtec_ntb_init_sndev(sndev);
1146 switchtec_ntb_init_mw(sndev);
1147 switchtec_ntb_init_db(sndev);
1148 switchtec_ntb_init_msgs(sndev);
1149
1150 rc = switchtec_ntb_init_req_id_table(sndev);
1151 if (rc)
1152 goto free_and_exit;
1153
1154 rc = switchtec_ntb_init_shared_mw(sndev);
1155 if (rc)
1156 goto free_and_exit;
1157
1158 rc = switchtec_ntb_init_db_msg_irq(sndev);
1159 if (rc)
1160 goto deinit_shared_and_exit;
1161
1162 rc = ntb_register_device(&sndev->ntb);
1163 if (rc)
1164 goto deinit_and_exit;
1165
1166 stdev->sndev = sndev;
1167 stdev->link_notifier = switchtec_ntb_link_notification;
1168 dev_info(dev, "NTB device registered");
1169
1170 return 0;
1171
1172deinit_and_exit:
1173 switchtec_ntb_deinit_db_msg_irq(sndev);
1174deinit_shared_and_exit:
1175 switchtec_ntb_deinit_shared_mw(sndev);
1176free_and_exit:
1177 kfree(sndev);
1178 dev_err(dev, "failed to register ntb device: %d", rc);
1179 return rc;
1180}
1181
1182void switchtec_ntb_remove(struct device *dev,
1183 struct class_interface *class_intf)
1184{
1185 struct switchtec_dev *stdev = to_stdev(dev);
1186 struct switchtec_ntb *sndev = stdev->sndev;
1187
1188 if (!sndev)
1189 return;
1190
1191 stdev->link_notifier = NULL;
1192 stdev->sndev = NULL;
1193 ntb_unregister_device(&sndev->ntb);
1194 switchtec_ntb_deinit_db_msg_irq(sndev);
1195 switchtec_ntb_deinit_shared_mw(sndev);
1196 kfree(sndev);
1197 dev_info(dev, "ntb device unregistered");
1198}
1199
1200static struct class_interface switchtec_interface = {
1201 .add_dev = switchtec_ntb_add,
1202 .remove_dev = switchtec_ntb_remove,
1203};
1204
1205static int __init switchtec_ntb_init(void)
1206{
1207 switchtec_interface.class = switchtec_class;
1208 return class_interface_register(&switchtec_interface);
1209}
1210module_init(switchtec_ntb_init);
1211
1212static void __exit switchtec_ntb_exit(void)
1213{
1214 class_interface_unregister(&switchtec_interface);
1215}
1216module_exit(switchtec_ntb_exit);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f58d8e305323..045e3dd4750e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -191,8 +191,6 @@ struct ntb_transport_qp {
191struct ntb_transport_mw { 191struct ntb_transport_mw {
192 phys_addr_t phys_addr; 192 phys_addr_t phys_addr;
193 resource_size_t phys_size; 193 resource_size_t phys_size;
194 resource_size_t xlat_align;
195 resource_size_t xlat_align_size;
196 void __iomem *vbase; 194 void __iomem *vbase;
197 size_t xlat_size; 195 size_t xlat_size;
198 size_t buff_size; 196 size_t buff_size;
@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
687 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 685 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
688 struct pci_dev *pdev = nt->ndev->pdev; 686 struct pci_dev *pdev = nt->ndev->pdev;
689 size_t xlat_size, buff_size; 687 size_t xlat_size, buff_size;
688 resource_size_t xlat_align;
689 resource_size_t xlat_align_size;
690 int rc; 690 int rc;
691 691
692 if (!size) 692 if (!size)
693 return -EINVAL; 693 return -EINVAL;
694 694
695 xlat_size = round_up(size, mw->xlat_align_size); 695 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
696 buff_size = round_up(size, mw->xlat_align); 696 &xlat_align_size, NULL);
697 if (rc)
698 return rc;
699
700 xlat_size = round_up(size, xlat_align_size);
701 buff_size = round_up(size, xlat_align);
697 702
698 /* No need to re-setup */ 703 /* No need to re-setup */
699 if (mw->xlat_size == xlat_size) 704 if (mw->xlat_size == xlat_size)
@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
722 * is a requirement of the hardware. It is recommended to setup CMA 727 * is a requirement of the hardware. It is recommended to setup CMA
723 * for BAR sizes equal or greater than 4MB. 728 * for BAR sizes equal or greater than 4MB.
724 */ 729 */
725 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 730 if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
726 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 731 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
727 &mw->dma_addr); 732 &mw->dma_addr);
728 ntb_free_mw(nt, num_mw); 733 ntb_free_mw(nt, num_mw);
@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1104 for (i = 0; i < mw_count; i++) { 1109 for (i = 0; i < mw_count; i++) {
1105 mw = &nt->mw_vec[i]; 1110 mw = &nt->mw_vec[i];
1106 1111
1107 rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
1108 &mw->xlat_align_size, NULL);
1109 if (rc)
1110 goto err1;
1111
1112 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, 1112 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
1113 &mw->phys_size); 1113 &mw->phys_size);
1114 if (rc) 1114 if (rc)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 759f772fa00c..427112cf101a 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
108struct perf_mw { 108struct perf_mw {
109 phys_addr_t phys_addr; 109 phys_addr_t phys_addr;
110 resource_size_t phys_size; 110 resource_size_t phys_size;
111 resource_size_t xlat_align;
112 resource_size_t xlat_align_size;
113 void __iomem *vbase; 111 void __iomem *vbase;
114 size_t xlat_size; 112 size_t xlat_size;
115 size_t buf_size; 113 size_t buf_size;
@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
472{ 470{
473 struct perf_mw *mw = &perf->mw; 471 struct perf_mw *mw = &perf->mw;
474 size_t xlat_size, buf_size; 472 size_t xlat_size, buf_size;
473 resource_size_t xlat_align;
474 resource_size_t xlat_align_size;
475 int rc; 475 int rc;
476 476
477 if (!size) 477 if (!size)
478 return -EINVAL; 478 return -EINVAL;
479 479
480 xlat_size = round_up(size, mw->xlat_align_size); 480 rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
481 buf_size = round_up(size, mw->xlat_align); 481 &xlat_align_size, NULL);
482 if (rc)
483 return rc;
484
485 xlat_size = round_up(size, xlat_align_size);
486 buf_size = round_up(size, xlat_align);
482 487
483 if (mw->xlat_size == xlat_size) 488 if (mw->xlat_size == xlat_size)
484 return 0; 489 return 0;
@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
567 572
568 mw = &perf->mw; 573 mw = &perf->mw;
569 574
570 rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
571 &mw->xlat_align_size, NULL);
572 if (rc)
573 return rc;
574
575 rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); 575 rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
576 if (rc) 576 if (rc)
577 return rc; 577 return rc;
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index a69815c45ce6..91526a986caa 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
753 753
754 phys_addr_t base; 754 phys_addr_t base;
755 resource_size_t mw_size; 755 resource_size_t mw_size;
756 resource_size_t align_addr; 756 resource_size_t align_addr = 0;
757 resource_size_t align_size; 757 resource_size_t align_size = 0;
758 resource_size_t max_size; 758 resource_size_t max_size = 0;
759 759
760 buf_size = min_t(size_t, size, 512); 760 buf_size = min_t(size_t, size, 512);
761 761
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index da45dbea20ce..730cc897b94d 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -13,6 +13,7 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/switchtec.h>
16#include <linux/switchtec_ioctl.h> 17#include <linux/switchtec_ioctl.h>
17 18
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
@@ -20,8 +21,6 @@
20#include <linux/fs.h> 21#include <linux/fs.h>
21#include <linux/uaccess.h> 22#include <linux/uaccess.h>
22#include <linux/poll.h> 23#include <linux/poll.h>
23#include <linux/pci.h>
24#include <linux/cdev.h>
25#include <linux/wait.h> 24#include <linux/wait.h>
26 25
27MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); 26MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
34MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); 33MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
35 34
36static dev_t switchtec_devt; 35static dev_t switchtec_devt;
37static struct class *switchtec_class;
38static DEFINE_IDA(switchtec_minor_ida); 36static DEFINE_IDA(switchtec_minor_ida);
39 37
40#define MICROSEMI_VENDOR_ID 0x11f8 38struct class *switchtec_class;
41#define MICROSEMI_NTB_CLASSCODE 0x068000 39EXPORT_SYMBOL_GPL(switchtec_class);
42#define MICROSEMI_MGMT_CLASSCODE 0x058000
43
44#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
45#define SWITCHTEC_MAX_PFF_CSR 48
46
47#define SWITCHTEC_EVENT_OCCURRED BIT(0)
48#define SWITCHTEC_EVENT_CLEAR BIT(0)
49#define SWITCHTEC_EVENT_EN_LOG BIT(1)
50#define SWITCHTEC_EVENT_EN_CLI BIT(2)
51#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
52#define SWITCHTEC_EVENT_FATAL BIT(4)
53
54enum {
55 SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
56 SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
57 SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
58 SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
59 SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
60 SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
61 SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
62 SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
63};
64
65struct mrpc_regs {
66 u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
67 u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
68 u32 cmd;
69 u32 status;
70 u32 ret_value;
71} __packed;
72
73enum mrpc_status {
74 SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
75 SWITCHTEC_MRPC_STATUS_DONE = 2,
76 SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
77 SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
78};
79
80struct sw_event_regs {
81 u64 event_report_ctrl;
82 u64 reserved1;
83 u64 part_event_bitmap;
84 u64 reserved2;
85 u32 global_summary;
86 u32 reserved3[3];
87 u32 stack_error_event_hdr;
88 u32 stack_error_event_data;
89 u32 reserved4[4];
90 u32 ppu_error_event_hdr;
91 u32 ppu_error_event_data;
92 u32 reserved5[4];
93 u32 isp_error_event_hdr;
94 u32 isp_error_event_data;
95 u32 reserved6[4];
96 u32 sys_reset_event_hdr;
97 u32 reserved7[5];
98 u32 fw_exception_hdr;
99 u32 reserved8[5];
100 u32 fw_nmi_hdr;
101 u32 reserved9[5];
102 u32 fw_non_fatal_hdr;
103 u32 reserved10[5];
104 u32 fw_fatal_hdr;
105 u32 reserved11[5];
106 u32 twi_mrpc_comp_hdr;
107 u32 twi_mrpc_comp_data;
108 u32 reserved12[4];
109 u32 twi_mrpc_comp_async_hdr;
110 u32 twi_mrpc_comp_async_data;
111 u32 reserved13[4];
112 u32 cli_mrpc_comp_hdr;
113 u32 cli_mrpc_comp_data;
114 u32 reserved14[4];
115 u32 cli_mrpc_comp_async_hdr;
116 u32 cli_mrpc_comp_async_data;
117 u32 reserved15[4];
118 u32 gpio_interrupt_hdr;
119 u32 gpio_interrupt_data;
120 u32 reserved16[4];
121} __packed;
122
123enum {
124 SWITCHTEC_CFG0_RUNNING = 0x04,
125 SWITCHTEC_CFG1_RUNNING = 0x05,
126 SWITCHTEC_IMG0_RUNNING = 0x03,
127 SWITCHTEC_IMG1_RUNNING = 0x07,
128};
129
130struct sys_info_regs {
131 u32 device_id;
132 u32 device_version;
133 u32 firmware_version;
134 u32 reserved1;
135 u32 vendor_table_revision;
136 u32 table_format_version;
137 u32 partition_id;
138 u32 cfg_file_fmt_version;
139 u16 cfg_running;
140 u16 img_running;
141 u32 reserved2[57];
142 char vendor_id[8];
143 char product_id[16];
144 char product_revision[4];
145 char component_vendor[8];
146 u16 component_id;
147 u8 component_revision;
148} __packed;
149
150struct flash_info_regs {
151 u32 flash_part_map_upd_idx;
152
153 struct active_partition_info {
154 u32 address;
155 u32 build_version;
156 u32 build_string;
157 } active_img;
158
159 struct active_partition_info active_cfg;
160 struct active_partition_info inactive_img;
161 struct active_partition_info inactive_cfg;
162
163 u32 flash_length;
164
165 struct partition_info {
166 u32 address;
167 u32 length;
168 } cfg0;
169
170 struct partition_info cfg1;
171 struct partition_info img0;
172 struct partition_info img1;
173 struct partition_info nvlog;
174 struct partition_info vendor[8];
175};
176
177struct ntb_info_regs {
178 u8 partition_count;
179 u8 partition_id;
180 u16 reserved1;
181 u64 ep_map;
182 u16 requester_id;
183} __packed;
184
185struct part_cfg_regs {
186 u32 status;
187 u32 state;
188 u32 port_cnt;
189 u32 usp_port_mode;
190 u32 usp_pff_inst_id;
191 u32 vep_pff_inst_id;
192 u32 dsp_pff_inst_id[47];
193 u32 reserved1[11];
194 u16 vep_vector_number;
195 u16 usp_vector_number;
196 u32 port_event_bitmap;
197 u32 reserved2[3];
198 u32 part_event_summary;
199 u32 reserved3[3];
200 u32 part_reset_hdr;
201 u32 part_reset_data[5];
202 u32 mrpc_comp_hdr;
203 u32 mrpc_comp_data[5];
204 u32 mrpc_comp_async_hdr;
205 u32 mrpc_comp_async_data[5];
206 u32 dyn_binding_hdr;
207 u32 dyn_binding_data[5];
208 u32 reserved4[159];
209} __packed;
210
211enum {
212 SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
213 SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
214 SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
215 SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
216};
217
218struct pff_csr_regs {
219 u16 vendor_id;
220 u16 device_id;
221 u32 pci_cfg_header[15];
222 u32 pci_cap_region[48];
223 u32 pcie_cap_region[448];
224 u32 indirect_gas_window[128];
225 u32 indirect_gas_window_off;
226 u32 reserved[127];
227 u32 pff_event_summary;
228 u32 reserved2[3];
229 u32 aer_in_p2p_hdr;
230 u32 aer_in_p2p_data[5];
231 u32 aer_in_vep_hdr;
232 u32 aer_in_vep_data[5];
233 u32 dpc_hdr;
234 u32 dpc_data[5];
235 u32 cts_hdr;
236 u32 cts_data[5];
237 u32 reserved3[6];
238 u32 hotplug_hdr;
239 u32 hotplug_data[5];
240 u32 ier_hdr;
241 u32 ier_data[5];
242 u32 threshold_hdr;
243 u32 threshold_data[5];
244 u32 power_mgmt_hdr;
245 u32 power_mgmt_data[5];
246 u32 tlp_throttling_hdr;
247 u32 tlp_throttling_data[5];
248 u32 force_speed_hdr;
249 u32 force_speed_data[5];
250 u32 credit_timeout_hdr;
251 u32 credit_timeout_data[5];
252 u32 link_state_hdr;
253 u32 link_state_data[5];
254 u32 reserved4[174];
255} __packed;
256
257struct switchtec_dev {
258 struct pci_dev *pdev;
259 struct device dev;
260 struct cdev cdev;
261
262 int partition;
263 int partition_count;
264 int pff_csr_count;
265 char pff_local[SWITCHTEC_MAX_PFF_CSR];
266
267 void __iomem *mmio;
268 struct mrpc_regs __iomem *mmio_mrpc;
269 struct sw_event_regs __iomem *mmio_sw_event;
270 struct sys_info_regs __iomem *mmio_sys_info;
271 struct flash_info_regs __iomem *mmio_flash_info;
272 struct ntb_info_regs __iomem *mmio_ntb;
273 struct part_cfg_regs __iomem *mmio_part_cfg;
274 struct part_cfg_regs __iomem *mmio_part_cfg_all;
275 struct pff_csr_regs __iomem *mmio_pff_csr;
276
277 /*
278 * The mrpc mutex must be held when accessing the other
279 * mrpc_ fields, alive flag and stuser->state field
280 */
281 struct mutex mrpc_mutex;
282 struct list_head mrpc_queue;
283 int mrpc_busy;
284 struct work_struct mrpc_work;
285 struct delayed_work mrpc_timeout;
286 bool alive;
287
288 wait_queue_head_t event_wq;
289 atomic_t event_cnt;
290};
291
292static struct switchtec_dev *to_stdev(struct device *dev)
293{
294 return container_of(dev, struct switchtec_dev, dev);
295}
296 40
297enum mrpc_state { 41enum mrpc_state {
298 MRPC_IDLE = 0, 42 MRPC_IDLE = 0,
@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
1234 .compat_ioctl = switchtec_dev_ioctl, 978 .compat_ioctl = switchtec_dev_ioctl,
1235}; 979};
1236 980
981static void link_event_work(struct work_struct *work)
982{
983 struct switchtec_dev *stdev;
984
985 stdev = container_of(work, struct switchtec_dev, link_event_work);
986
987 if (stdev->link_notifier)
988 stdev->link_notifier(stdev);
989}
990
991static void check_link_state_events(struct switchtec_dev *stdev)
992{
993 int idx;
994 u32 reg;
995 int count;
996 int occurred = 0;
997
998 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
999 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1000 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1001 count = (reg >> 5) & 0xFF;
1002
1003 if (count != stdev->link_event_count[idx]) {
1004 occurred = 1;
1005 stdev->link_event_count[idx] = count;
1006 }
1007 }
1008
1009 if (occurred)
1010 schedule_work(&stdev->link_event_work);
1011}
1012
1013static void enable_link_state_events(struct switchtec_dev *stdev)
1014{
1015 int idx;
1016
1017 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1018 iowrite32(SWITCHTEC_EVENT_CLEAR |
1019 SWITCHTEC_EVENT_EN_IRQ,
1020 &stdev->mmio_pff_csr[idx].link_state_hdr);
1021 }
1022}
1023
1237static void stdev_release(struct device *dev) 1024static void stdev_release(struct device *dev)
1238{ 1025{
1239 struct switchtec_dev *stdev = to_stdev(dev); 1026 struct switchtec_dev *stdev = to_stdev(dev);
@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1286 stdev->mrpc_busy = 0; 1073 stdev->mrpc_busy = 0;
1287 INIT_WORK(&stdev->mrpc_work, mrpc_event_work); 1074 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1288 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); 1075 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1076 INIT_WORK(&stdev->link_event_work, link_event_work);
1289 init_waitqueue_head(&stdev->event_wq); 1077 init_waitqueue_head(&stdev->event_wq);
1290 atomic_set(&stdev->event_cnt, 0); 1078 atomic_set(&stdev->event_cnt, 0);
1291 1079
@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1329 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) 1117 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1330 return 0; 1118 return 0;
1331 1119
1120 if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
1121 return 0;
1122
1332 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); 1123 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1333 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); 1124 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1334 iowrite32(hdr, hdr_reg); 1125 iowrite32(hdr, hdr_reg);
@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
1348 for (idx = 0; idx < stdev->pff_csr_count; idx++) { 1139 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1349 if (!stdev->pff_local[idx]) 1140 if (!stdev->pff_local[idx])
1350 continue; 1141 continue;
1142
1351 count += mask_event(stdev, eid, idx); 1143 count += mask_event(stdev, eid, idx);
1352 } 1144 }
1353 } else { 1145 } else {
@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
1372 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); 1164 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1373 } 1165 }
1374 1166
1167 check_link_state_events(stdev);
1168
1375 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) 1169 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1376 event_count += mask_all_events(stdev, eid); 1170 event_count += mask_all_events(stdev, eid);
1377 1171
@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1481 struct switchtec_dev *stdev; 1275 struct switchtec_dev *stdev;
1482 int rc; 1276 int rc;
1483 1277
1278 if (pdev->class == MICROSEMI_NTB_CLASSCODE)
1279 request_module_nowait("ntb_hw_switchtec");
1280
1484 stdev = stdev_create(pdev); 1281 stdev = stdev_create(pdev);
1485 if (IS_ERR(stdev)) 1282 if (IS_ERR(stdev))
1486 return PTR_ERR(stdev); 1283 return PTR_ERR(stdev);
@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1498 iowrite32(SWITCHTEC_EVENT_CLEAR | 1295 iowrite32(SWITCHTEC_EVENT_CLEAR |
1499 SWITCHTEC_EVENT_EN_IRQ, 1296 SWITCHTEC_EVENT_EN_IRQ,
1500 &stdev->mmio_part_cfg->mrpc_comp_hdr); 1297 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1298 enable_link_state_events(stdev);
1501 1299
1502 rc = cdev_device_add(&stdev->cdev, &stdev->dev); 1300 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1503 if (rc) 1301 if (rc)
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 609e232c00da..c308964777eb 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -70,6 +70,7 @@ struct pci_dev;
70 * @NTB_TOPO_SEC: On secondary side of remote ntb. 70 * @NTB_TOPO_SEC: On secondary side of remote ntb.
71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. 71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. 72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
73 * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
73 */ 74 */
74enum ntb_topo { 75enum ntb_topo {
75 NTB_TOPO_NONE = -1, 76 NTB_TOPO_NONE = -1,
@@ -77,6 +78,7 @@ enum ntb_topo {
77 NTB_TOPO_SEC, 78 NTB_TOPO_SEC,
78 NTB_TOPO_B2B_USD, 79 NTB_TOPO_B2B_USD,
79 NTB_TOPO_B2B_DSD, 80 NTB_TOPO_B2B_DSD,
81 NTB_TOPO_SWITCH,
80}; 82};
81 83
82static inline int ntb_topo_is_b2b(enum ntb_topo topo) 84static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
97 case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; 99 case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
98 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; 100 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
99 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; 101 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
102 case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
100 } 103 }
101 return "NTB_TOPO_INVALID"; 104 return "NTB_TOPO_INVALID";
102} 105}
@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
730 * Hardware and topology may support a different number of memory windows. 733 * Hardware and topology may support a different number of memory windows.
731 * Moreover different peer devices can support different number of memory 734 * Moreover different peer devices can support different number of memory
732 * windows. Simply speaking this method returns the number of possible inbound 735 * windows. Simply speaking this method returns the number of possible inbound
733 * memory windows to share with specified peer device. 736 * memory windows to share with specified peer device. Note: this may return
737 * zero if the link is not up yet.
734 * 738 *
735 * Return: the number of memory windows. 739 * Return: the number of memory windows.
736 */ 740 */
@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
751 * Get the alignments of an inbound memory window with specified index. 755 * Get the alignments of an inbound memory window with specified index.
752 * NULL may be given for any output parameter if the value is not needed. 756 * NULL may be given for any output parameter if the value is not needed.
753 * The alignment and size parameters may be used for allocation of proper 757 * The alignment and size parameters may be used for allocation of proper
754 * shared memory. 758 * shared memory. Note: this must only be called when the link is up.
755 * 759 *
756 * Return: Zero on success, otherwise a negative error number. 760 * Return: Zero on success, otherwise a negative error number.
757 */ 761 */
@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
760 resource_size_t *size_align, 764 resource_size_t *size_align,
761 resource_size_t *size_max) 765 resource_size_t *size_max)
762{ 766{
767 if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
768 return -ENOTCONN;
769
763 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, 770 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
764 size_max); 771 size_max);
765} 772}
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
new file mode 100644
index 000000000000..09d73d0d1aa8
--- /dev/null
+++ b/include/linux/switchtec.h
@@ -0,0 +1,373 @@
1/*
2 * Microsemi Switchtec PCIe Driver
3 * Copyright (c) 2017, Microsemi Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef _SWITCHTEC_H
17#define _SWITCHTEC_H
18
19#include <linux/pci.h>
20#include <linux/cdev.h>
21
22#define MICROSEMI_VENDOR_ID 0x11f8
23#define MICROSEMI_NTB_CLASSCODE 0x068000
24#define MICROSEMI_MGMT_CLASSCODE 0x058000
25
26#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
27#define SWITCHTEC_MAX_PFF_CSR 48
28
29#define SWITCHTEC_EVENT_OCCURRED BIT(0)
30#define SWITCHTEC_EVENT_CLEAR BIT(0)
31#define SWITCHTEC_EVENT_EN_LOG BIT(1)
32#define SWITCHTEC_EVENT_EN_CLI BIT(2)
33#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
34#define SWITCHTEC_EVENT_FATAL BIT(4)
35
36enum {
37 SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
38 SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
39 SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
40 SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
41 SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
42 SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
43 SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
44 SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
45};
46
47struct mrpc_regs {
48 u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
49 u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
50 u32 cmd;
51 u32 status;
52 u32 ret_value;
53} __packed;
54
55enum mrpc_status {
56 SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
57 SWITCHTEC_MRPC_STATUS_DONE = 2,
58 SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
59 SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
60};
61
62struct sw_event_regs {
63 u64 event_report_ctrl;
64 u64 reserved1;
65 u64 part_event_bitmap;
66 u64 reserved2;
67 u32 global_summary;
68 u32 reserved3[3];
69 u32 stack_error_event_hdr;
70 u32 stack_error_event_data;
71 u32 reserved4[4];
72 u32 ppu_error_event_hdr;
73 u32 ppu_error_event_data;
74 u32 reserved5[4];
75 u32 isp_error_event_hdr;
76 u32 isp_error_event_data;
77 u32 reserved6[4];
78 u32 sys_reset_event_hdr;
79 u32 reserved7[5];
80 u32 fw_exception_hdr;
81 u32 reserved8[5];
82 u32 fw_nmi_hdr;
83 u32 reserved9[5];
84 u32 fw_non_fatal_hdr;
85 u32 reserved10[5];
86 u32 fw_fatal_hdr;
87 u32 reserved11[5];
88 u32 twi_mrpc_comp_hdr;
89 u32 twi_mrpc_comp_data;
90 u32 reserved12[4];
91 u32 twi_mrpc_comp_async_hdr;
92 u32 twi_mrpc_comp_async_data;
93 u32 reserved13[4];
94 u32 cli_mrpc_comp_hdr;
95 u32 cli_mrpc_comp_data;
96 u32 reserved14[4];
97 u32 cli_mrpc_comp_async_hdr;
98 u32 cli_mrpc_comp_async_data;
99 u32 reserved15[4];
100 u32 gpio_interrupt_hdr;
101 u32 gpio_interrupt_data;
102 u32 reserved16[4];
103} __packed;
104
105enum {
106 SWITCHTEC_CFG0_RUNNING = 0x04,
107 SWITCHTEC_CFG1_RUNNING = 0x05,
108 SWITCHTEC_IMG0_RUNNING = 0x03,
109 SWITCHTEC_IMG1_RUNNING = 0x07,
110};
111
112struct sys_info_regs {
113 u32 device_id;
114 u32 device_version;
115 u32 firmware_version;
116 u32 reserved1;
117 u32 vendor_table_revision;
118 u32 table_format_version;
119 u32 partition_id;
120 u32 cfg_file_fmt_version;
121 u16 cfg_running;
122 u16 img_running;
123 u32 reserved2[57];
124 char vendor_id[8];
125 char product_id[16];
126 char product_revision[4];
127 char component_vendor[8];
128 u16 component_id;
129 u8 component_revision;
130} __packed;
131
132struct flash_info_regs {
133 u32 flash_part_map_upd_idx;
134
135 struct active_partition_info {
136 u32 address;
137 u32 build_version;
138 u32 build_string;
139 } active_img;
140
141 struct active_partition_info active_cfg;
142 struct active_partition_info inactive_img;
143 struct active_partition_info inactive_cfg;
144
145 u32 flash_length;
146
147 struct partition_info {
148 u32 address;
149 u32 length;
150 } cfg0;
151
152 struct partition_info cfg1;
153 struct partition_info img0;
154 struct partition_info img1;
155 struct partition_info nvlog;
156 struct partition_info vendor[8];
157};
158
159enum {
160 SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000,
161 SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000,
162 SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000,
163};
164
165struct ntb_info_regs {
166 u8 partition_count;
167 u8 partition_id;
168 u16 reserved1;
169 u64 ep_map;
170 u16 requester_id;
171} __packed;
172
173struct part_cfg_regs {
174 u32 status;
175 u32 state;
176 u32 port_cnt;
177 u32 usp_port_mode;
178 u32 usp_pff_inst_id;
179 u32 vep_pff_inst_id;
180 u32 dsp_pff_inst_id[47];
181 u32 reserved1[11];
182 u16 vep_vector_number;
183 u16 usp_vector_number;
184 u32 port_event_bitmap;
185 u32 reserved2[3];
186 u32 part_event_summary;
187 u32 reserved3[3];
188 u32 part_reset_hdr;
189 u32 part_reset_data[5];
190 u32 mrpc_comp_hdr;
191 u32 mrpc_comp_data[5];
192 u32 mrpc_comp_async_hdr;
193 u32 mrpc_comp_async_data[5];
194 u32 dyn_binding_hdr;
195 u32 dyn_binding_data[5];
196 u32 reserved4[159];
197} __packed;
198
199enum {
200 NTB_CTRL_PART_OP_LOCK = 0x1,
201 NTB_CTRL_PART_OP_CFG = 0x2,
202 NTB_CTRL_PART_OP_RESET = 0x3,
203
204 NTB_CTRL_PART_STATUS_NORMAL = 0x1,
205 NTB_CTRL_PART_STATUS_LOCKED = 0x2,
206 NTB_CTRL_PART_STATUS_LOCKING = 0x3,
207 NTB_CTRL_PART_STATUS_CONFIGURING = 0x4,
208 NTB_CTRL_PART_STATUS_RESETTING = 0x5,
209
210 NTB_CTRL_BAR_VALID = 1 << 0,
211 NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4,
212 NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5,
213
214 NTB_CTRL_REQ_ID_EN = 1 << 0,
215
216 NTB_CTRL_LUT_EN = 1 << 0,
217
218 NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
219};
220
221struct ntb_ctrl_regs {
222 u32 partition_status;
223 u32 partition_op;
224 u32 partition_ctrl;
225 u32 bar_setup;
226 u32 bar_error;
227 u16 lut_table_entries;
228 u16 lut_table_offset;
229 u32 lut_error;
230 u16 req_id_table_size;
231 u16 req_id_table_offset;
232 u32 req_id_error;
233 u32 reserved1[7];
234 struct {
235 u32 ctl;
236 u32 win_size;
237 u64 xlate_addr;
238 } bar_entry[6];
239 u32 reserved2[216];
240 u32 req_id_table[256];
241 u32 reserved3[512];
242 u64 lut_entry[512];
243} __packed;
244
245#define NTB_DBMSG_IMSG_STATUS BIT_ULL(32)
246#define NTB_DBMSG_IMSG_MASK BIT_ULL(40)
247
248struct ntb_dbmsg_regs {
249 u32 reserved1[1024];
250 u64 odb;
251 u64 odb_mask;
252 u64 idb;
253 u64 idb_mask;
254 u8 idb_vec_map[64];
255 u32 msg_map;
256 u32 reserved2;
257 struct {
258 u32 msg;
259 u32 status;
260 } omsg[4];
261
262 struct {
263 u32 msg;
264 u8 status;
265 u8 mask;
266 u8 src;
267 u8 reserved;
268 } imsg[4];
269
270 u8 reserved3[3928];
271 u8 msix_table[1024];
272 u8 reserved4[3072];
273 u8 pba[24];
274 u8 reserved5[4072];
275} __packed;
276
277enum {
278 SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
279 SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
280 SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
281 SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
282};
283
284struct pff_csr_regs {
285 u16 vendor_id;
286 u16 device_id;
287 u32 pci_cfg_header[15];
288 u32 pci_cap_region[48];
289 u32 pcie_cap_region[448];
290 u32 indirect_gas_window[128];
291 u32 indirect_gas_window_off;
292 u32 reserved[127];
293 u32 pff_event_summary;
294 u32 reserved2[3];
295 u32 aer_in_p2p_hdr;
296 u32 aer_in_p2p_data[5];
297 u32 aer_in_vep_hdr;
298 u32 aer_in_vep_data[5];
299 u32 dpc_hdr;
300 u32 dpc_data[5];
301 u32 cts_hdr;
302 u32 cts_data[5];
303 u32 reserved3[6];
304 u32 hotplug_hdr;
305 u32 hotplug_data[5];
306 u32 ier_hdr;
307 u32 ier_data[5];
308 u32 threshold_hdr;
309 u32 threshold_data[5];
310 u32 power_mgmt_hdr;
311 u32 power_mgmt_data[5];
312 u32 tlp_throttling_hdr;
313 u32 tlp_throttling_data[5];
314 u32 force_speed_hdr;
315 u32 force_speed_data[5];
316 u32 credit_timeout_hdr;
317 u32 credit_timeout_data[5];
318 u32 link_state_hdr;
319 u32 link_state_data[5];
320 u32 reserved4[174];
321} __packed;
322
323struct switchtec_ntb;
324
325struct switchtec_dev {
326 struct pci_dev *pdev;
327 struct device dev;
328 struct cdev cdev;
329
330 int partition;
331 int partition_count;
332 int pff_csr_count;
333 char pff_local[SWITCHTEC_MAX_PFF_CSR];
334
335 void __iomem *mmio;
336 struct mrpc_regs __iomem *mmio_mrpc;
337 struct sw_event_regs __iomem *mmio_sw_event;
338 struct sys_info_regs __iomem *mmio_sys_info;
339 struct flash_info_regs __iomem *mmio_flash_info;
340 struct ntb_info_regs __iomem *mmio_ntb;
341 struct part_cfg_regs __iomem *mmio_part_cfg;
342 struct part_cfg_regs __iomem *mmio_part_cfg_all;
343 struct pff_csr_regs __iomem *mmio_pff_csr;
344
345 /*
346 * The mrpc mutex must be held when accessing the other
347 * mrpc_ fields, alive flag and stuser->state field
348 */
349 struct mutex mrpc_mutex;
350 struct list_head mrpc_queue;
351 int mrpc_busy;
352 struct work_struct mrpc_work;
353 struct delayed_work mrpc_timeout;
354 bool alive;
355
356 wait_queue_head_t event_wq;
357 atomic_t event_cnt;
358
359 struct work_struct link_event_work;
360 void (*link_notifier)(struct switchtec_dev *stdev);
361 u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
362
363 struct switchtec_ntb *sndev;
364};
365
366static inline struct switchtec_dev *to_stdev(struct device *dev)
367{
368 return container_of(dev, struct switchtec_dev, dev);
369}
370
371extern struct class *switchtec_class;
372
373#endif