diff options
Diffstat (limited to 'drivers/net/dsa')
32 files changed, 6685 insertions, 435 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 71bb3aebded4..c6c5ecdbcaef 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig | |||
@@ -41,7 +41,7 @@ config NET_DSA_MT7530 | |||
41 | 41 | ||
42 | config NET_DSA_MV88E6060 | 42 | config NET_DSA_MV88E6060 |
43 | tristate "Marvell 88E6060 ethernet switch chip support" | 43 | tristate "Marvell 88E6060 ethernet switch chip support" |
44 | depends on NET_DSA && NET_DSA_LEGACY | 44 | depends on NET_DSA |
45 | select NET_DSA_TAG_TRAILER | 45 | select NET_DSA_TAG_TRAILER |
46 | ---help--- | 46 | ---help--- |
47 | This enables support for the Marvell 88E6060 ethernet switch | 47 | This enables support for the Marvell 88E6060 ethernet switch |
@@ -51,6 +51,8 @@ source "drivers/net/dsa/microchip/Kconfig" | |||
51 | 51 | ||
52 | source "drivers/net/dsa/mv88e6xxx/Kconfig" | 52 | source "drivers/net/dsa/mv88e6xxx/Kconfig" |
53 | 53 | ||
54 | source "drivers/net/dsa/sja1105/Kconfig" | ||
55 | |||
54 | config NET_DSA_QCA8K | 56 | config NET_DSA_QCA8K |
55 | tristate "Qualcomm Atheros QCA8K Ethernet switch family support" | 57 | tristate "Qualcomm Atheros QCA8K Ethernet switch family support" |
56 | depends on NET_DSA | 58 | depends on NET_DSA |
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 82e5d794c41f..fefb6aaa82ba 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile | |||
@@ -18,3 +18,4 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o | |||
18 | obj-y += b53/ | 18 | obj-y += b53/ |
19 | obj-y += microchip/ | 19 | obj-y += microchip/ |
20 | obj-y += mv88e6xxx/ | 20 | obj-y += mv88e6xxx/ |
21 | obj-y += sja1105/ | ||
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0852e5e08177..c8040ecf4425 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -428,7 +428,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, | |||
428 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); | 428 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); |
429 | 429 | ||
430 | dev->vlan_enabled = enable; | 430 | dev->vlan_enabled = enable; |
431 | dev->vlan_filtering_enabled = enable_filtering; | ||
432 | } | 431 | } |
433 | 432 | ||
434 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) | 433 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) |
@@ -665,7 +664,7 @@ int b53_configure_vlan(struct dsa_switch *ds) | |||
665 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); | 664 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); |
666 | } | 665 | } |
667 | 666 | ||
668 | b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); | 667 | b53_enable_vlan(dev, false, ds->vlan_filtering); |
669 | 668 | ||
670 | b53_for_each_port(dev, i) | 669 | b53_for_each_port(dev, i) |
671 | b53_write16(dev, B53_VLAN_PAGE, | 670 | b53_write16(dev, B53_VLAN_PAGE, |
@@ -966,6 +965,13 @@ static int b53_setup(struct dsa_switch *ds) | |||
966 | b53_disable_port(ds, port); | 965 | b53_disable_port(ds, port); |
967 | } | 966 | } |
968 | 967 | ||
968 | /* Let DSA handle the case were multiple bridges span the same switch | ||
969 | * device and different VLAN awareness settings are requested, which | ||
970 | * would be breaking filtering semantics for any of the other bridge | ||
971 | * devices. (not hardware supported) | ||
972 | */ | ||
973 | ds->vlan_filtering_is_global = true; | ||
974 | |||
969 | return ret; | 975 | return ret; |
970 | } | 976 | } |
971 | 977 | ||
@@ -1275,35 +1281,17 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); | |||
1275 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) | 1281 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) |
1276 | { | 1282 | { |
1277 | struct b53_device *dev = ds->priv; | 1283 | struct b53_device *dev = ds->priv; |
1278 | struct net_device *bridge_dev; | ||
1279 | unsigned int i; | ||
1280 | u16 pvid, new_pvid; | 1284 | u16 pvid, new_pvid; |
1281 | 1285 | ||
1282 | /* Handle the case were multiple bridges span the same switch device | ||
1283 | * and one of them has a different setting than what is being requested | ||
1284 | * which would be breaking filtering semantics for any of the other | ||
1285 | * bridge devices. | ||
1286 | */ | ||
1287 | b53_for_each_port(dev, i) { | ||
1288 | bridge_dev = dsa_to_port(ds, i)->bridge_dev; | ||
1289 | if (bridge_dev && | ||
1290 | bridge_dev != dsa_to_port(ds, port)->bridge_dev && | ||
1291 | br_vlan_enabled(bridge_dev) != vlan_filtering) { | ||
1292 | netdev_err(bridge_dev, | ||
1293 | "VLAN filtering is global to the switch!\n"); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); | 1286 | b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); |
1299 | new_pvid = pvid; | 1287 | new_pvid = pvid; |
1300 | if (dev->vlan_filtering_enabled && !vlan_filtering) { | 1288 | if (!vlan_filtering) { |
1301 | /* Filtering is currently enabled, use the default PVID since | 1289 | /* Filtering is currently enabled, use the default PVID since |
1302 | * the bridge does not expect tagging anymore | 1290 | * the bridge does not expect tagging anymore |
1303 | */ | 1291 | */ |
1304 | dev->ports[port].pvid = pvid; | 1292 | dev->ports[port].pvid = pvid; |
1305 | new_pvid = b53_default_pvid(dev); | 1293 | new_pvid = b53_default_pvid(dev); |
1306 | } else if (!dev->vlan_filtering_enabled && vlan_filtering) { | 1294 | } else { |
1307 | /* Filtering is currently disabled, restore the previous PVID */ | 1295 | /* Filtering is currently disabled, restore the previous PVID */ |
1308 | new_pvid = dev->ports[port].pvid; | 1296 | new_pvid = dev->ports[port].pvid; |
1309 | } | 1297 | } |
@@ -1329,7 +1317,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, | |||
1329 | if (vlan->vid_end > dev->num_vlans) | 1317 | if (vlan->vid_end > dev->num_vlans) |
1330 | return -ERANGE; | 1318 | return -ERANGE; |
1331 | 1319 | ||
1332 | b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); | 1320 | b53_enable_vlan(dev, true, ds->vlan_filtering); |
1333 | 1321 | ||
1334 | return 0; | 1322 | return 0; |
1335 | } | 1323 | } |
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index e3441dcf2d21..f25bc80c4ffc 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h | |||
@@ -139,7 +139,6 @@ struct b53_device { | |||
139 | unsigned int num_vlans; | 139 | unsigned int num_vlans; |
140 | struct b53_vlan *vlans; | 140 | struct b53_vlan *vlans; |
141 | bool vlan_enabled; | 141 | bool vlan_enabled; |
142 | bool vlan_filtering_enabled; | ||
143 | unsigned int num_ports; | 142 | unsigned int num_ports; |
144 | struct b53_port *ports; | 143 | struct b53_port *ports; |
145 | }; | 144 | }; |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index c8e3f05e1d72..4ccb3239f5f7 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -1188,10 +1188,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) | |||
1188 | if (ret) | 1188 | if (ret) |
1189 | goto out_mdio; | 1189 | goto out_mdio; |
1190 | 1190 | ||
1191 | pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", | 1191 | dev_info(&pdev->dev, |
1192 | priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, | 1192 | "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n", |
1193 | priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, | 1193 | priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, |
1194 | priv->core, priv->irq0, priv->irq1); | 1194 | priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, |
1195 | priv->irq0, priv->irq1); | ||
1195 | 1196 | ||
1196 | return 0; | 1197 | return 0; |
1197 | 1198 | ||
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index e6234d209787..4212bc4a5f31 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c | |||
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, | |||
886 | fs->m_ext.data[1])) | 886 | fs->m_ext.data[1])) |
887 | return -EINVAL; | 887 | return -EINVAL; |
888 | 888 | ||
889 | if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) | ||
890 | return -EINVAL; | ||
891 | |||
889 | if (fs->location != RX_CLS_LOC_ANY && | 892 | if (fs->location != RX_CLS_LOC_ANY && |
890 | test_bit(fs->location, priv->cfp.used)) | 893 | test_bit(fs->location, priv->cfp.used)) |
891 | return -EBUSY; | 894 | return -EBUSY; |
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) | |||
974 | struct cfp_rule *rule; | 977 | struct cfp_rule *rule; |
975 | int ret; | 978 | int ret; |
976 | 979 | ||
980 | if (loc >= CFP_NUM_RULES) | ||
981 | return -EINVAL; | ||
982 | |||
977 | /* Refuse deleting unused rules, and those that are not unique since | 983 | /* Refuse deleting unused rules, and those that are not unique since |
978 | * that could leave IPv6 rules with one of the chained rule in the | 984 | * that could leave IPv6 rules with one of the chained rule in the |
979 | * table. | 985 | * table. |
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index d8328866908c..553831df58fe 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c | |||
@@ -4,7 +4,25 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 2010 Lantiq Deutschland | 5 | * Copyright (C) 2010 Lantiq Deutschland |
6 | * Copyright (C) 2012 John Crispin <john@phrozen.org> | 6 | * Copyright (C) 2012 John Crispin <john@phrozen.org> |
7 | * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> | 7 | * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> |
8 | * | ||
9 | * The VLAN and bridge model the GSWIP hardware uses does not directly | ||
10 | * matches the model DSA uses. | ||
11 | * | ||
12 | * The hardware has 64 possible table entries for bridges with one VLAN | ||
13 | * ID, one flow id and a list of ports for each bridge. All entries which | ||
14 | * match the same flow ID are combined in the mac learning table, they | ||
15 | * act as one global bridge. | ||
16 | * The hardware does not support VLAN filter on the port, but on the | ||
17 | * bridge, this driver converts the DSA model to the hardware. | ||
18 | * | ||
19 | * The CPU gets all the exception frames which do not match any forwarding | ||
20 | * rule and the CPU port is also added to all bridges. This makes it possible | ||
21 | * to handle all the special cases easily in software. | ||
22 | * At the initialization the driver allocates one bridge table entry for | ||
23 | * each switch port which is used when the port is used without an | ||
24 | * explicit bridge. This prevents the frames from being forwarded | ||
25 | * between all LAN ports by default. | ||
8 | */ | 26 | */ |
9 | 27 | ||
10 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
@@ -148,19 +166,29 @@ | |||
148 | #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ | 166 | #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ |
149 | #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ | 167 | #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ |
150 | #define GSWIP_PCE_GCTRL_0 0x456 | 168 | #define GSWIP_PCE_GCTRL_0 0x456 |
169 | #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */ | ||
151 | #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) | 170 | #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) |
152 | #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ | 171 | #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ |
153 | #define GSWIP_PCE_GCTRL_1 0x457 | 172 | #define GSWIP_PCE_GCTRL_1 0x457 |
154 | #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ | 173 | #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ |
155 | #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ | 174 | #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ |
156 | #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) | 175 | #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) |
157 | #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) | 176 | #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */ |
177 | #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */ | ||
178 | #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */ | ||
158 | #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 | 179 | #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 |
159 | #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 | 180 | #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 |
160 | #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 | 181 | #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 |
161 | #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 | 182 | #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 |
162 | #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 | 183 | #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 |
163 | #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) | 184 | #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) |
185 | #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA)) | ||
186 | #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */ | ||
187 | #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */ | ||
188 | #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */ | ||
189 | #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */ | ||
190 | #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */ | ||
191 | #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA)) | ||
164 | 192 | ||
165 | #define GSWIP_MAC_FLEN 0x8C5 | 193 | #define GSWIP_MAC_FLEN 0x8C5 |
166 | #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) | 194 | #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) |
@@ -183,6 +211,11 @@ | |||
183 | #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ | 211 | #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ |
184 | #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ | 212 | #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ |
185 | 213 | ||
214 | #define GSWIP_TABLE_ACTIVE_VLAN 0x01 | ||
215 | #define GSWIP_TABLE_VLAN_MAPPING 0x02 | ||
216 | #define GSWIP_TABLE_MAC_BRIDGE 0x0b | ||
217 | #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */ | ||
218 | |||
186 | #define XRX200_GPHY_FW_ALIGN (16 * 1024) | 219 | #define XRX200_GPHY_FW_ALIGN (16 * 1024) |
187 | 220 | ||
188 | struct gswip_hw_info { | 221 | struct gswip_hw_info { |
@@ -202,6 +235,12 @@ struct gswip_gphy_fw { | |||
202 | char *fw_name; | 235 | char *fw_name; |
203 | }; | 236 | }; |
204 | 237 | ||
238 | struct gswip_vlan { | ||
239 | struct net_device *bridge; | ||
240 | u16 vid; | ||
241 | u8 fid; | ||
242 | }; | ||
243 | |||
205 | struct gswip_priv { | 244 | struct gswip_priv { |
206 | __iomem void *gswip; | 245 | __iomem void *gswip; |
207 | __iomem void *mdio; | 246 | __iomem void *mdio; |
@@ -211,8 +250,22 @@ struct gswip_priv { | |||
211 | struct dsa_switch *ds; | 250 | struct dsa_switch *ds; |
212 | struct device *dev; | 251 | struct device *dev; |
213 | struct regmap *rcu_regmap; | 252 | struct regmap *rcu_regmap; |
253 | struct gswip_vlan vlans[64]; | ||
214 | int num_gphy_fw; | 254 | int num_gphy_fw; |
215 | struct gswip_gphy_fw *gphy_fw; | 255 | struct gswip_gphy_fw *gphy_fw; |
256 | u32 port_vlan_filter; | ||
257 | }; | ||
258 | |||
259 | struct gswip_pce_table_entry { | ||
260 | u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index | ||
261 | u16 table; // PCE_TBL_CTRL.ADDR = pData->table | ||
262 | u16 key[8]; | ||
263 | u16 val[5]; | ||
264 | u16 mask; | ||
265 | u8 gmap; | ||
266 | bool type; | ||
267 | bool valid; | ||
268 | bool key_mode; | ||
216 | }; | 269 | }; |
217 | 270 | ||
218 | struct gswip_rmon_cnt_desc { | 271 | struct gswip_rmon_cnt_desc { |
@@ -447,10 +500,153 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) | |||
447 | return of_mdiobus_register(ds->slave_mii_bus, mdio_np); | 500 | return of_mdiobus_register(ds->slave_mii_bus, mdio_np); |
448 | } | 501 | } |
449 | 502 | ||
503 | static int gswip_pce_table_entry_read(struct gswip_priv *priv, | ||
504 | struct gswip_pce_table_entry *tbl) | ||
505 | { | ||
506 | int i; | ||
507 | int err; | ||
508 | u16 crtl; | ||
509 | u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : | ||
510 | GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; | ||
511 | |||
512 | err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, | ||
513 | GSWIP_PCE_TBL_CTRL_BAS); | ||
514 | if (err) | ||
515 | return err; | ||
516 | |||
517 | gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); | ||
518 | gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | | ||
519 | GSWIP_PCE_TBL_CTRL_OPMOD_MASK, | ||
520 | tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, | ||
521 | GSWIP_PCE_TBL_CTRL); | ||
522 | |||
523 | err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, | ||
524 | GSWIP_PCE_TBL_CTRL_BAS); | ||
525 | if (err) | ||
526 | return err; | ||
527 | |||
528 | for (i = 0; i < ARRAY_SIZE(tbl->key); i++) | ||
529 | tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); | ||
530 | |||
531 | for (i = 0; i < ARRAY_SIZE(tbl->val); i++) | ||
532 | tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); | ||
533 | |||
534 | tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); | ||
535 | |||
536 | crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); | ||
537 | |||
538 | tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); | ||
539 | tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); | ||
540 | tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static int gswip_pce_table_entry_write(struct gswip_priv *priv, | ||
546 | struct gswip_pce_table_entry *tbl) | ||
547 | { | ||
548 | int i; | ||
549 | int err; | ||
550 | u16 crtl; | ||
551 | u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : | ||
552 | GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; | ||
553 | |||
554 | err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, | ||
555 | GSWIP_PCE_TBL_CTRL_BAS); | ||
556 | if (err) | ||
557 | return err; | ||
558 | |||
559 | gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); | ||
560 | gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | | ||
561 | GSWIP_PCE_TBL_CTRL_OPMOD_MASK, | ||
562 | tbl->table | addr_mode, | ||
563 | GSWIP_PCE_TBL_CTRL); | ||
564 | |||
565 | for (i = 0; i < ARRAY_SIZE(tbl->key); i++) | ||
566 | gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); | ||
567 | |||
568 | for (i = 0; i < ARRAY_SIZE(tbl->val); i++) | ||
569 | gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); | ||
570 | |||
571 | gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | | ||
572 | GSWIP_PCE_TBL_CTRL_OPMOD_MASK, | ||
573 | tbl->table | addr_mode, | ||
574 | GSWIP_PCE_TBL_CTRL); | ||
575 | |||
576 | gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); | ||
577 | |||
578 | crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); | ||
579 | crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | | ||
580 | GSWIP_PCE_TBL_CTRL_GMAP_MASK); | ||
581 | if (tbl->type) | ||
582 | crtl |= GSWIP_PCE_TBL_CTRL_TYPE; | ||
583 | if (tbl->valid) | ||
584 | crtl |= GSWIP_PCE_TBL_CTRL_VLD; | ||
585 | crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; | ||
586 | crtl |= GSWIP_PCE_TBL_CTRL_BAS; | ||
587 | gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); | ||
588 | |||
589 | return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, | ||
590 | GSWIP_PCE_TBL_CTRL_BAS); | ||
591 | } | ||
592 | |||
593 | /* Add the LAN port into a bridge with the CPU port by | ||
594 | * default. This prevents automatic forwarding of | ||
595 | * packages between the LAN ports when no explicit | ||
596 | * bridge is configured. | ||
597 | */ | ||
598 | static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) | ||
599 | { | ||
600 | struct gswip_pce_table_entry vlan_active = {0,}; | ||
601 | struct gswip_pce_table_entry vlan_mapping = {0,}; | ||
602 | unsigned int cpu_port = priv->hw_info->cpu_port; | ||
603 | unsigned int max_ports = priv->hw_info->max_ports; | ||
604 | int err; | ||
605 | |||
606 | if (port >= max_ports) { | ||
607 | dev_err(priv->dev, "single port for %i supported\n", port); | ||
608 | return -EIO; | ||
609 | } | ||
610 | |||
611 | vlan_active.index = port + 1; | ||
612 | vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; | ||
613 | vlan_active.key[0] = 0; /* vid */ | ||
614 | vlan_active.val[0] = port + 1 /* fid */; | ||
615 | vlan_active.valid = add; | ||
616 | err = gswip_pce_table_entry_write(priv, &vlan_active); | ||
617 | if (err) { | ||
618 | dev_err(priv->dev, "failed to write active VLAN: %d\n", err); | ||
619 | return err; | ||
620 | } | ||
621 | |||
622 | if (!add) | ||
623 | return 0; | ||
624 | |||
625 | vlan_mapping.index = port + 1; | ||
626 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
627 | vlan_mapping.val[0] = 0 /* vid */; | ||
628 | vlan_mapping.val[1] = BIT(port) | BIT(cpu_port); | ||
629 | vlan_mapping.val[2] = 0; | ||
630 | err = gswip_pce_table_entry_write(priv, &vlan_mapping); | ||
631 | if (err) { | ||
632 | dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); | ||
633 | return err; | ||
634 | } | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
450 | static int gswip_port_enable(struct dsa_switch *ds, int port, | 639 | static int gswip_port_enable(struct dsa_switch *ds, int port, |
451 | struct phy_device *phydev) | 640 | struct phy_device *phydev) |
452 | { | 641 | { |
453 | struct gswip_priv *priv = ds->priv; | 642 | struct gswip_priv *priv = ds->priv; |
643 | int err; | ||
644 | |||
645 | if (!dsa_is_cpu_port(ds, port)) { | ||
646 | err = gswip_add_single_port_br(priv, port, true); | ||
647 | if (err) | ||
648 | return err; | ||
649 | } | ||
454 | 650 | ||
455 | /* RMON Counter Enable for port */ | 651 | /* RMON Counter Enable for port */ |
456 | gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); | 652 | gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); |
@@ -461,8 +657,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, | |||
461 | GSWIP_FDMA_PCTRLp(port)); | 657 | GSWIP_FDMA_PCTRLp(port)); |
462 | gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, | 658 | gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, |
463 | GSWIP_SDMA_PCTRLp(port)); | 659 | GSWIP_SDMA_PCTRLp(port)); |
464 | gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, | ||
465 | GSWIP_PCE_PCTRL_0p(port)); | ||
466 | 660 | ||
467 | if (!dsa_is_cpu_port(ds, port)) { | 661 | if (!dsa_is_cpu_port(ds, port)) { |
468 | u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | | 662 | u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | |
@@ -535,6 +729,39 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv) | |||
535 | return 0; | 729 | return 0; |
536 | } | 730 | } |
537 | 731 | ||
732 | static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, | ||
733 | bool vlan_filtering) | ||
734 | { | ||
735 | struct gswip_priv *priv = ds->priv; | ||
736 | struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; | ||
737 | |||
738 | /* Do not allow changing the VLAN filtering options while in bridge */ | ||
739 | if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge) | ||
740 | return -EIO; | ||
741 | |||
742 | if (vlan_filtering) { | ||
743 | /* Use port based VLAN tag */ | ||
744 | gswip_switch_mask(priv, | ||
745 | GSWIP_PCE_VCTRL_VSR, | ||
746 | GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | | ||
747 | GSWIP_PCE_VCTRL_VEMR, | ||
748 | GSWIP_PCE_VCTRL(port)); | ||
749 | gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, | ||
750 | GSWIP_PCE_PCTRL_0p(port)); | ||
751 | } else { | ||
752 | /* Use port based VLAN tag */ | ||
753 | gswip_switch_mask(priv, | ||
754 | GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | | ||
755 | GSWIP_PCE_VCTRL_VEMR, | ||
756 | GSWIP_PCE_VCTRL_VSR, | ||
757 | GSWIP_PCE_VCTRL(port)); | ||
758 | gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM, | ||
759 | GSWIP_PCE_PCTRL_0p(port)); | ||
760 | } | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
538 | static int gswip_setup(struct dsa_switch *ds) | 765 | static int gswip_setup(struct dsa_switch *ds) |
539 | { | 766 | { |
540 | struct gswip_priv *priv = ds->priv; | 767 | struct gswip_priv *priv = ds->priv; |
@@ -547,8 +774,10 @@ static int gswip_setup(struct dsa_switch *ds) | |||
547 | gswip_switch_w(priv, 0, GSWIP_SWRES); | 774 | gswip_switch_w(priv, 0, GSWIP_SWRES); |
548 | 775 | ||
549 | /* disable port fetch/store dma on all ports */ | 776 | /* disable port fetch/store dma on all ports */ |
550 | for (i = 0; i < priv->hw_info->max_ports; i++) | 777 | for (i = 0; i < priv->hw_info->max_ports; i++) { |
551 | gswip_port_disable(ds, i); | 778 | gswip_port_disable(ds, i); |
779 | gswip_port_vlan_filtering(ds, i, false); | ||
780 | } | ||
552 | 781 | ||
553 | /* enable Switch */ | 782 | /* enable Switch */ |
554 | gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); | 783 | gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); |
@@ -578,6 +807,10 @@ static int gswip_setup(struct dsa_switch *ds) | |||
578 | gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, | 807 | gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, |
579 | GSWIP_FDMA_PCTRLp(cpu_port)); | 808 | GSWIP_FDMA_PCTRLp(cpu_port)); |
580 | 809 | ||
810 | /* accept special tag in ingress direction */ | ||
811 | gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, | ||
812 | GSWIP_PCE_PCTRL_0p(cpu_port)); | ||
813 | |||
581 | gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, | 814 | gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, |
582 | GSWIP_MAC_CTRL_2p(cpu_port)); | 815 | GSWIP_MAC_CTRL_2p(cpu_port)); |
583 | gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN); | 816 | gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN); |
@@ -587,10 +820,15 @@ static int gswip_setup(struct dsa_switch *ds) | |||
587 | /* VLAN aware Switching */ | 820 | /* VLAN aware Switching */ |
588 | gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); | 821 | gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); |
589 | 822 | ||
590 | /* Mac Address Table Lock */ | 823 | /* Flush MAC Table */ |
591 | gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK | | 824 | gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0); |
592 | GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD, | 825 | |
593 | GSWIP_PCE_GCTRL_1); | 826 | err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, |
827 | GSWIP_PCE_GCTRL_0_MTFL); | ||
828 | if (err) { | ||
829 | dev_err(priv->dev, "MAC flushing didn't finish\n"); | ||
830 | return err; | ||
831 | } | ||
594 | 832 | ||
595 | gswip_port_enable(ds, cpu_port, NULL); | 833 | gswip_port_enable(ds, cpu_port, NULL); |
596 | return 0; | 834 | return 0; |
@@ -602,6 +840,551 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, | |||
602 | return DSA_TAG_PROTO_GSWIP; | 840 | return DSA_TAG_PROTO_GSWIP; |
603 | } | 841 | } |
604 | 842 | ||
843 | static int gswip_vlan_active_create(struct gswip_priv *priv, | ||
844 | struct net_device *bridge, | ||
845 | int fid, u16 vid) | ||
846 | { | ||
847 | struct gswip_pce_table_entry vlan_active = {0,}; | ||
848 | unsigned int max_ports = priv->hw_info->max_ports; | ||
849 | int idx = -1; | ||
850 | int err; | ||
851 | int i; | ||
852 | |||
853 | /* Look for a free slot */ | ||
854 | for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { | ||
855 | if (!priv->vlans[i].bridge) { | ||
856 | idx = i; | ||
857 | break; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | if (idx == -1) | ||
862 | return -ENOSPC; | ||
863 | |||
864 | if (fid == -1) | ||
865 | fid = idx; | ||
866 | |||
867 | vlan_active.index = idx; | ||
868 | vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; | ||
869 | vlan_active.key[0] = vid; | ||
870 | vlan_active.val[0] = fid; | ||
871 | vlan_active.valid = true; | ||
872 | |||
873 | err = gswip_pce_table_entry_write(priv, &vlan_active); | ||
874 | if (err) { | ||
875 | dev_err(priv->dev, "failed to write active VLAN: %d\n", err); | ||
876 | return err; | ||
877 | } | ||
878 | |||
879 | priv->vlans[idx].bridge = bridge; | ||
880 | priv->vlans[idx].vid = vid; | ||
881 | priv->vlans[idx].fid = fid; | ||
882 | |||
883 | return idx; | ||
884 | } | ||
885 | |||
886 | static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) | ||
887 | { | ||
888 | struct gswip_pce_table_entry vlan_active = {0,}; | ||
889 | int err; | ||
890 | |||
891 | vlan_active.index = idx; | ||
892 | vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; | ||
893 | vlan_active.valid = false; | ||
894 | err = gswip_pce_table_entry_write(priv, &vlan_active); | ||
895 | if (err) | ||
896 | dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); | ||
897 | priv->vlans[idx].bridge = NULL; | ||
898 | |||
899 | return err; | ||
900 | } | ||
901 | |||
902 | static int gswip_vlan_add_unaware(struct gswip_priv *priv, | ||
903 | struct net_device *bridge, int port) | ||
904 | { | ||
905 | struct gswip_pce_table_entry vlan_mapping = {0,}; | ||
906 | unsigned int max_ports = priv->hw_info->max_ports; | ||
907 | unsigned int cpu_port = priv->hw_info->cpu_port; | ||
908 | bool active_vlan_created = false; | ||
909 | int idx = -1; | ||
910 | int i; | ||
911 | int err; | ||
912 | |||
913 | /* Check if there is already a page for this bridge */ | ||
914 | for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { | ||
915 | if (priv->vlans[i].bridge == bridge) { | ||
916 | idx = i; | ||
917 | break; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | /* If this bridge is not programmed yet, add a Active VLAN table | ||
922 | * entry in a free slot and prepare the VLAN mapping table entry. | ||
923 | */ | ||
924 | if (idx == -1) { | ||
925 | idx = gswip_vlan_active_create(priv, bridge, -1, 0); | ||
926 | if (idx < 0) | ||
927 | return idx; | ||
928 | active_vlan_created = true; | ||
929 | |||
930 | vlan_mapping.index = idx; | ||
931 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
932 | /* VLAN ID byte, maps to the VLAN ID of vlan active table */ | ||
933 | vlan_mapping.val[0] = 0; | ||
934 | } else { | ||
935 | /* Read the existing VLAN mapping entry from the switch */ | ||
936 | vlan_mapping.index = idx; | ||
937 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
938 | err = gswip_pce_table_entry_read(priv, &vlan_mapping); | ||
939 | if (err) { | ||
940 | dev_err(priv->dev, "failed to read VLAN mapping: %d\n", | ||
941 | err); | ||
942 | return err; | ||
943 | } | ||
944 | } | ||
945 | |||
946 | /* Update the VLAN mapping entry and write it to the switch */ | ||
947 | vlan_mapping.val[1] |= BIT(cpu_port); | ||
948 | vlan_mapping.val[1] |= BIT(port); | ||
949 | err = gswip_pce_table_entry_write(priv, &vlan_mapping); | ||
950 | if (err) { | ||
951 | dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); | ||
952 | /* In case an Active VLAN was creaetd delete it again */ | ||
953 | if (active_vlan_created) | ||
954 | gswip_vlan_active_remove(priv, idx); | ||
955 | return err; | ||
956 | } | ||
957 | |||
958 | gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int gswip_vlan_add_aware(struct gswip_priv *priv, | ||
963 | struct net_device *bridge, int port, | ||
964 | u16 vid, bool untagged, | ||
965 | bool pvid) | ||
966 | { | ||
967 | struct gswip_pce_table_entry vlan_mapping = {0,}; | ||
968 | unsigned int max_ports = priv->hw_info->max_ports; | ||
969 | unsigned int cpu_port = priv->hw_info->cpu_port; | ||
970 | bool active_vlan_created = false; | ||
971 | int idx = -1; | ||
972 | int fid = -1; | ||
973 | int i; | ||
974 | int err; | ||
975 | |||
976 | /* Check if there is already a page for this bridge */ | ||
977 | for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { | ||
978 | if (priv->vlans[i].bridge == bridge) { | ||
979 | if (fid != -1 && fid != priv->vlans[i].fid) | ||
980 | dev_err(priv->dev, "one bridge with multiple flow ids\n"); | ||
981 | fid = priv->vlans[i].fid; | ||
982 | if (priv->vlans[i].vid == vid) { | ||
983 | idx = i; | ||
984 | break; | ||
985 | } | ||
986 | } | ||
987 | } | ||
988 | |||
989 | /* If this bridge is not programmed yet, add a Active VLAN table | ||
990 | * entry in a free slot and prepare the VLAN mapping table entry. | ||
991 | */ | ||
992 | if (idx == -1) { | ||
993 | idx = gswip_vlan_active_create(priv, bridge, fid, vid); | ||
994 | if (idx < 0) | ||
995 | return idx; | ||
996 | active_vlan_created = true; | ||
997 | |||
998 | vlan_mapping.index = idx; | ||
999 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
1000 | /* VLAN ID byte, maps to the VLAN ID of vlan active table */ | ||
1001 | vlan_mapping.val[0] = vid; | ||
1002 | } else { | ||
1003 | /* Read the existing VLAN mapping entry from the switch */ | ||
1004 | vlan_mapping.index = idx; | ||
1005 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
1006 | err = gswip_pce_table_entry_read(priv, &vlan_mapping); | ||
1007 | if (err) { | ||
1008 | dev_err(priv->dev, "failed to read VLAN mapping: %d\n", | ||
1009 | err); | ||
1010 | return err; | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | vlan_mapping.val[0] = vid; | ||
1015 | /* Update the VLAN mapping entry and write it to the switch */ | ||
1016 | vlan_mapping.val[1] |= BIT(cpu_port); | ||
1017 | vlan_mapping.val[2] |= BIT(cpu_port); | ||
1018 | vlan_mapping.val[1] |= BIT(port); | ||
1019 | if (untagged) | ||
1020 | vlan_mapping.val[2] &= ~BIT(port); | ||
1021 | else | ||
1022 | vlan_mapping.val[2] |= BIT(port); | ||
1023 | err = gswip_pce_table_entry_write(priv, &vlan_mapping); | ||
1024 | if (err) { | ||
1025 | dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); | ||
1026 | /* In case an Active VLAN was creaetd delete it again */ | ||
1027 | if (active_vlan_created) | ||
1028 | gswip_vlan_active_remove(priv, idx); | ||
1029 | return err; | ||
1030 | } | ||
1031 | |||
1032 | if (pvid) | ||
1033 | gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port)); | ||
1034 | |||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static int gswip_vlan_remove(struct gswip_priv *priv, | ||
1039 | struct net_device *bridge, int port, | ||
1040 | u16 vid, bool pvid, bool vlan_aware) | ||
1041 | { | ||
1042 | struct gswip_pce_table_entry vlan_mapping = {0,}; | ||
1043 | unsigned int max_ports = priv->hw_info->max_ports; | ||
1044 | unsigned int cpu_port = priv->hw_info->cpu_port; | ||
1045 | int idx = -1; | ||
1046 | int i; | ||
1047 | int err; | ||
1048 | |||
1049 | /* Check if there is already a page for this bridge */ | ||
1050 | for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { | ||
1051 | if (priv->vlans[i].bridge == bridge && | ||
1052 | (!vlan_aware || priv->vlans[i].vid == vid)) { | ||
1053 | idx = i; | ||
1054 | break; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | if (idx == -1) { | ||
1059 | dev_err(priv->dev, "bridge to leave does not exists\n"); | ||
1060 | return -ENOENT; | ||
1061 | } | ||
1062 | |||
1063 | vlan_mapping.index = idx; | ||
1064 | vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; | ||
1065 | err = gswip_pce_table_entry_read(priv, &vlan_mapping); | ||
1066 | if (err) { | ||
1067 | dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); | ||
1068 | return err; | ||
1069 | } | ||
1070 | |||
1071 | vlan_mapping.val[1] &= ~BIT(port); | ||
1072 | vlan_mapping.val[2] &= ~BIT(port); | ||
1073 | err = gswip_pce_table_entry_write(priv, &vlan_mapping); | ||
1074 | if (err) { | ||
1075 | dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); | ||
1076 | return err; | ||
1077 | } | ||
1078 | |||
1079 | /* In case all ports are removed from the bridge, remove the VLAN */ | ||
1080 | if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) { | ||
1081 | err = gswip_vlan_active_remove(priv, idx); | ||
1082 | if (err) { | ||
1083 | dev_err(priv->dev, "failed to write active VLAN: %d\n", | ||
1084 | err); | ||
1085 | return err; | ||
1086 | } | ||
1087 | } | ||
1088 | |||
1089 | /* GSWIP 2.2 (GRX300) and later program here the VID directly. */ | ||
1090 | if (pvid) | ||
1091 | gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static int gswip_port_bridge_join(struct dsa_switch *ds, int port, | ||
1097 | struct net_device *bridge) | ||
1098 | { | ||
1099 | struct gswip_priv *priv = ds->priv; | ||
1100 | int err; | ||
1101 | |||
1102 | /* When the bridge uses VLAN filtering we have to configure VLAN | ||
1103 | * specific bridges. No bridge is configured here. | ||
1104 | */ | ||
1105 | if (!br_vlan_enabled(bridge)) { | ||
1106 | err = gswip_vlan_add_unaware(priv, bridge, port); | ||
1107 | if (err) | ||
1108 | return err; | ||
1109 | priv->port_vlan_filter &= ~BIT(port); | ||
1110 | } else { | ||
1111 | priv->port_vlan_filter |= BIT(port); | ||
1112 | } | ||
1113 | return gswip_add_single_port_br(priv, port, false); | ||
1114 | } | ||
1115 | |||
1116 | static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, | ||
1117 | struct net_device *bridge) | ||
1118 | { | ||
1119 | struct gswip_priv *priv = ds->priv; | ||
1120 | |||
1121 | gswip_add_single_port_br(priv, port, true); | ||
1122 | |||
1123 | /* When the bridge uses VLAN filtering we have to configure VLAN | ||
1124 | * specific bridges. No bridge is configured here. | ||
1125 | */ | ||
1126 | if (!br_vlan_enabled(bridge)) | ||
1127 | gswip_vlan_remove(priv, bridge, port, 0, true, false); | ||
1128 | } | ||
1129 | |||
1130 | static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, | ||
1131 | const struct switchdev_obj_port_vlan *vlan) | ||
1132 | { | ||
1133 | struct gswip_priv *priv = ds->priv; | ||
1134 | struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; | ||
1135 | unsigned int max_ports = priv->hw_info->max_ports; | ||
1136 | u16 vid; | ||
1137 | int i; | ||
1138 | int pos = max_ports; | ||
1139 | |||
1140 | /* We only support VLAN filtering on bridges */ | ||
1141 | if (!dsa_is_cpu_port(ds, port) && !bridge) | ||
1142 | return -EOPNOTSUPP; | ||
1143 | |||
1144 | for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { | ||
1145 | int idx = -1; | ||
1146 | |||
1147 | /* Check if there is already a page for this VLAN */ | ||
1148 | for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { | ||
1149 | if (priv->vlans[i].bridge == bridge && | ||
1150 | priv->vlans[i].vid == vid) { | ||
1151 | idx = i; | ||
1152 | break; | ||
1153 | } | ||
1154 | } | ||
1155 | |||
1156 | /* If this VLAN is not programmed yet, we have to reserve | ||
1157 | * one entry in the VLAN table. Make sure we start at the | ||
1158 | * next position round. | ||
1159 | */ | ||
1160 | if (idx == -1) { | ||
1161 | /* Look for a free slot */ | ||
1162 | for (; pos < ARRAY_SIZE(priv->vlans); pos++) { | ||
1163 | if (!priv->vlans[pos].bridge) { | ||
1164 | idx = pos; | ||
1165 | pos++; | ||
1166 | break; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | if (idx == -1) | ||
1171 | return -ENOSPC; | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | static void gswip_port_vlan_add(struct dsa_switch *ds, int port, | ||
1179 | const struct switchdev_obj_port_vlan *vlan) | ||
1180 | { | ||
1181 | struct gswip_priv *priv = ds->priv; | ||
1182 | struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; | ||
1183 | bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; | ||
1184 | bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; | ||
1185 | u16 vid; | ||
1186 | |||
1187 | /* We have to receive all packets on the CPU port and should not | ||
1188 | * do any VLAN filtering here. This is also called with bridge | ||
1189 | * NULL and then we do not know for which bridge to configure | ||
1190 | * this. | ||
1191 | */ | ||
1192 | if (dsa_is_cpu_port(ds, port)) | ||
1193 | return; | ||
1194 | |||
1195 | for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) | ||
1196 | gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid); | ||
1197 | } | ||
1198 | |||
1199 | static int gswip_port_vlan_del(struct dsa_switch *ds, int port, | ||
1200 | const struct switchdev_obj_port_vlan *vlan) | ||
1201 | { | ||
1202 | struct gswip_priv *priv = ds->priv; | ||
1203 | struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; | ||
1204 | bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; | ||
1205 | u16 vid; | ||
1206 | int err; | ||
1207 | |||
1208 | /* We have to receive all packets on the CPU port and should not | ||
1209 | * do any VLAN filtering here. This is also called with bridge | ||
1210 | * NULL and then we do not know for which bridge to configure | ||
1211 | * this. | ||
1212 | */ | ||
1213 | if (dsa_is_cpu_port(ds, port)) | ||
1214 | return 0; | ||
1215 | |||
1216 | for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { | ||
1217 | err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true); | ||
1218 | if (err) | ||
1219 | return err; | ||
1220 | } | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | static void gswip_port_fast_age(struct dsa_switch *ds, int port) | ||
1226 | { | ||
1227 | struct gswip_priv *priv = ds->priv; | ||
1228 | struct gswip_pce_table_entry mac_bridge = {0,}; | ||
1229 | int i; | ||
1230 | int err; | ||
1231 | |||
1232 | for (i = 0; i < 2048; i++) { | ||
1233 | mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; | ||
1234 | mac_bridge.index = i; | ||
1235 | |||
1236 | err = gswip_pce_table_entry_read(priv, &mac_bridge); | ||
1237 | if (err) { | ||
1238 | dev_err(priv->dev, "failed to read mac brigde: %d\n", | ||
1239 | err); | ||
1240 | return; | ||
1241 | } | ||
1242 | |||
1243 | if (!mac_bridge.valid) | ||
1244 | continue; | ||
1245 | |||
1246 | if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) | ||
1247 | continue; | ||
1248 | |||
1249 | if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port) | ||
1250 | continue; | ||
1251 | |||
1252 | mac_bridge.valid = false; | ||
1253 | err = gswip_pce_table_entry_write(priv, &mac_bridge); | ||
1254 | if (err) { | ||
1255 | dev_err(priv->dev, "failed to write mac brigde: %d\n", | ||
1256 | err); | ||
1257 | return; | ||
1258 | } | ||
1259 | } | ||
1260 | } | ||
1261 | |||
1262 | static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) | ||
1263 | { | ||
1264 | struct gswip_priv *priv = ds->priv; | ||
1265 | u32 stp_state; | ||
1266 | |||
1267 | switch (state) { | ||
1268 | case BR_STATE_DISABLED: | ||
1269 | gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, | ||
1270 | GSWIP_SDMA_PCTRLp(port)); | ||
1271 | return; | ||
1272 | case BR_STATE_BLOCKING: | ||
1273 | case BR_STATE_LISTENING: | ||
1274 | stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; | ||
1275 | break; | ||
1276 | case BR_STATE_LEARNING: | ||
1277 | stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; | ||
1278 | break; | ||
1279 | case BR_STATE_FORWARDING: | ||
1280 | stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; | ||
1281 | break; | ||
1282 | default: | ||
1283 | dev_err(priv->dev, "invalid STP state: %d\n", state); | ||
1284 | return; | ||
1285 | } | ||
1286 | |||
1287 | gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, | ||
1288 | GSWIP_SDMA_PCTRLp(port)); | ||
1289 | gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state, | ||
1290 | GSWIP_PCE_PCTRL_0p(port)); | ||
1291 | } | ||
1292 | |||
1293 | static int gswip_port_fdb(struct dsa_switch *ds, int port, | ||
1294 | const unsigned char *addr, u16 vid, bool add) | ||
1295 | { | ||
1296 | struct gswip_priv *priv = ds->priv; | ||
1297 | struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; | ||
1298 | struct gswip_pce_table_entry mac_bridge = {0,}; | ||
1299 | unsigned int cpu_port = priv->hw_info->cpu_port; | ||
1300 | int fid = -1; | ||
1301 | int i; | ||
1302 | int err; | ||
1303 | |||
1304 | if (!bridge) | ||
1305 | return -EINVAL; | ||
1306 | |||
1307 | for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) { | ||
1308 | if (priv->vlans[i].bridge == bridge) { | ||
1309 | fid = priv->vlans[i].fid; | ||
1310 | break; | ||
1311 | } | ||
1312 | } | ||
1313 | |||
1314 | if (fid == -1) { | ||
1315 | dev_err(priv->dev, "Port not part of a bridge\n"); | ||
1316 | return -EINVAL; | ||
1317 | } | ||
1318 | |||
1319 | mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; | ||
1320 | mac_bridge.key_mode = true; | ||
1321 | mac_bridge.key[0] = addr[5] | (addr[4] << 8); | ||
1322 | mac_bridge.key[1] = addr[3] | (addr[2] << 8); | ||
1323 | mac_bridge.key[2] = addr[1] | (addr[0] << 8); | ||
1324 | mac_bridge.key[3] = fid; | ||
1325 | mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ | ||
1326 | mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC; | ||
1327 | mac_bridge.valid = add; | ||
1328 | |||
1329 | err = gswip_pce_table_entry_write(priv, &mac_bridge); | ||
1330 | if (err) | ||
1331 | dev_err(priv->dev, "failed to write mac brigde: %d\n", err); | ||
1332 | |||
1333 | return err; | ||
1334 | } | ||
1335 | |||
1336 | static int gswip_port_fdb_add(struct dsa_switch *ds, int port, | ||
1337 | const unsigned char *addr, u16 vid) | ||
1338 | { | ||
1339 | return gswip_port_fdb(ds, port, addr, vid, true); | ||
1340 | } | ||
1341 | |||
1342 | static int gswip_port_fdb_del(struct dsa_switch *ds, int port, | ||
1343 | const unsigned char *addr, u16 vid) | ||
1344 | { | ||
1345 | return gswip_port_fdb(ds, port, addr, vid, false); | ||
1346 | } | ||
1347 | |||
1348 | static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, | ||
1349 | dsa_fdb_dump_cb_t *cb, void *data) | ||
1350 | { | ||
1351 | struct gswip_priv *priv = ds->priv; | ||
1352 | struct gswip_pce_table_entry mac_bridge = {0,}; | ||
1353 | unsigned char addr[6]; | ||
1354 | int i; | ||
1355 | int err; | ||
1356 | |||
1357 | for (i = 0; i < 2048; i++) { | ||
1358 | mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; | ||
1359 | mac_bridge.index = i; | ||
1360 | |||
1361 | err = gswip_pce_table_entry_read(priv, &mac_bridge); | ||
1362 | if (err) { | ||
1363 | dev_err(priv->dev, "failed to write mac brigde: %d\n", | ||
1364 | err); | ||
1365 | return err; | ||
1366 | } | ||
1367 | |||
1368 | if (!mac_bridge.valid) | ||
1369 | continue; | ||
1370 | |||
1371 | addr[5] = mac_bridge.key[0] & 0xff; | ||
1372 | addr[4] = (mac_bridge.key[0] >> 8) & 0xff; | ||
1373 | addr[3] = mac_bridge.key[1] & 0xff; | ||
1374 | addr[2] = (mac_bridge.key[1] >> 8) & 0xff; | ||
1375 | addr[1] = mac_bridge.key[2] & 0xff; | ||
1376 | addr[0] = (mac_bridge.key[2] >> 8) & 0xff; | ||
1377 | if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) { | ||
1378 | if (mac_bridge.val[0] & BIT(port)) | ||
1379 | cb(addr, 0, true, data); | ||
1380 | } else { | ||
1381 | if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) | ||
1382 | cb(addr, 0, false, data); | ||
1383 | } | ||
1384 | } | ||
1385 | return 0; | ||
1386 | } | ||
1387 | |||
605 | static void gswip_phylink_validate(struct dsa_switch *ds, int port, | 1388 | static void gswip_phylink_validate(struct dsa_switch *ds, int port, |
606 | unsigned long *supported, | 1389 | unsigned long *supported, |
607 | struct phylink_link_state *state) | 1390 | struct phylink_link_state *state) |
@@ -809,6 +1592,17 @@ static const struct dsa_switch_ops gswip_switch_ops = { | |||
809 | .setup = gswip_setup, | 1592 | .setup = gswip_setup, |
810 | .port_enable = gswip_port_enable, | 1593 | .port_enable = gswip_port_enable, |
811 | .port_disable = gswip_port_disable, | 1594 | .port_disable = gswip_port_disable, |
1595 | .port_bridge_join = gswip_port_bridge_join, | ||
1596 | .port_bridge_leave = gswip_port_bridge_leave, | ||
1597 | .port_fast_age = gswip_port_fast_age, | ||
1598 | .port_vlan_filtering = gswip_port_vlan_filtering, | ||
1599 | .port_vlan_prepare = gswip_port_vlan_prepare, | ||
1600 | .port_vlan_add = gswip_port_vlan_add, | ||
1601 | .port_vlan_del = gswip_port_vlan_del, | ||
1602 | .port_stp_state_set = gswip_port_stp_state_set, | ||
1603 | .port_fdb_add = gswip_port_fdb_add, | ||
1604 | .port_fdb_del = gswip_port_fdb_del, | ||
1605 | .port_fdb_dump = gswip_port_fdb_dump, | ||
812 | .phylink_validate = gswip_phylink_validate, | 1606 | .phylink_validate = gswip_phylink_validate, |
813 | .phylink_mac_config = gswip_phylink_mac_config, | 1607 | .phylink_mac_config = gswip_phylink_mac_config, |
814 | .phylink_mac_link_down = gswip_phylink_mac_link_down, | 1608 | .phylink_mac_link_down = gswip_phylink_mac_link_down, |
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index f16e1d7d8615..c026d15721f6 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c | |||
@@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) | |||
1144 | interface = PHY_INTERFACE_MODE_GMII; | 1144 | interface = PHY_INTERFACE_MODE_GMII; |
1145 | if (gbit) | 1145 | if (gbit) |
1146 | break; | 1146 | break; |
1147 | /* fall through */ | ||
1147 | case 0: | 1148 | case 0: |
1148 | interface = PHY_INTERFACE_MODE_MII; | 1149 | interface = PHY_INTERFACE_MODE_MII; |
1149 | break; | 1150 | break; |
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 7357b4fc0185..8d531c5f21f3 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c | |||
@@ -828,11 +828,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) | |||
828 | mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, | 828 | mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, |
829 | VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); | 829 | VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); |
830 | 830 | ||
831 | priv->ports[port].vlan_filtering = false; | ||
832 | |||
833 | for (i = 0; i < MT7530_NUM_PORTS; i++) { | 831 | for (i = 0; i < MT7530_NUM_PORTS; i++) { |
834 | if (dsa_is_user_port(ds, i) && | 832 | if (dsa_is_user_port(ds, i) && |
835 | priv->ports[i].vlan_filtering) { | 833 | dsa_port_is_vlan_filtering(&ds->ports[i])) { |
836 | all_user_ports_removed = false; | 834 | all_user_ports_removed = false; |
837 | break; | 835 | break; |
838 | } | 836 | } |
@@ -891,8 +889,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, | |||
891 | * And the other port's port matrix cannot be broken when the | 889 | * And the other port's port matrix cannot be broken when the |
892 | * other port is still a VLAN-aware port. | 890 | * other port is still a VLAN-aware port. |
893 | */ | 891 | */ |
894 | if (!priv->ports[i].vlan_filtering && | 892 | if (dsa_is_user_port(ds, i) && i != port && |
895 | dsa_is_user_port(ds, i) && i != port) { | 893 | !dsa_port_is_vlan_filtering(&ds->ports[i])) { |
896 | if (dsa_to_port(ds, i)->bridge_dev != bridge) | 894 | if (dsa_to_port(ds, i)->bridge_dev != bridge) |
897 | continue; | 895 | continue; |
898 | if (priv->ports[i].enable) | 896 | if (priv->ports[i].enable) |
@@ -910,8 +908,6 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, | |||
910 | PCR_MATRIX(BIT(MT7530_CPU_PORT))); | 908 | PCR_MATRIX(BIT(MT7530_CPU_PORT))); |
911 | priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); | 909 | priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); |
912 | 910 | ||
913 | mt7530_port_set_vlan_unaware(ds, port); | ||
914 | |||
915 | mutex_unlock(&priv->reg_mutex); | 911 | mutex_unlock(&priv->reg_mutex); |
916 | } | 912 | } |
917 | 913 | ||
@@ -1013,10 +1009,6 @@ static int | |||
1013 | mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, | 1009 | mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, |
1014 | bool vlan_filtering) | 1010 | bool vlan_filtering) |
1015 | { | 1011 | { |
1016 | struct mt7530_priv *priv = ds->priv; | ||
1017 | |||
1018 | priv->ports[port].vlan_filtering = vlan_filtering; | ||
1019 | |||
1020 | if (vlan_filtering) { | 1012 | if (vlan_filtering) { |
1021 | /* The port is being kept as VLAN-unaware port when bridge is | 1013 | /* The port is being kept as VLAN-unaware port when bridge is |
1022 | * set up with vlan_filtering not being set, Otherwise, the | 1014 | * set up with vlan_filtering not being set, Otherwise, the |
@@ -1025,6 +1017,8 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, | |||
1025 | */ | 1017 | */ |
1026 | mt7530_port_set_vlan_aware(ds, port); | 1018 | mt7530_port_set_vlan_aware(ds, port); |
1027 | mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT); | 1019 | mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT); |
1020 | } else { | ||
1021 | mt7530_port_set_vlan_unaware(ds, port); | ||
1028 | } | 1022 | } |
1029 | 1023 | ||
1030 | return 0; | 1024 | return 0; |
@@ -1139,7 +1133,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port, | |||
1139 | /* The port is kept as VLAN-unaware if bridge with vlan_filtering not | 1133 | /* The port is kept as VLAN-unaware if bridge with vlan_filtering not |
1140 | * being set. | 1134 | * being set. |
1141 | */ | 1135 | */ |
1142 | if (!priv->ports[port].vlan_filtering) | 1136 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
1143 | return; | 1137 | return; |
1144 | 1138 | ||
1145 | mutex_lock(&priv->reg_mutex); | 1139 | mutex_lock(&priv->reg_mutex); |
@@ -1170,7 +1164,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, | |||
1170 | /* The port is kept as VLAN-unaware if bridge with vlan_filtering not | 1164 | /* The port is kept as VLAN-unaware if bridge with vlan_filtering not |
1171 | * being set. | 1165 | * being set. |
1172 | */ | 1166 | */ |
1173 | if (!priv->ports[port].vlan_filtering) | 1167 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
1174 | return 0; | 1168 | return 0; |
1175 | 1169 | ||
1176 | mutex_lock(&priv->reg_mutex); | 1170 | mutex_lock(&priv->reg_mutex); |
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index a95ed958df5b..1eec7bdc283a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h | |||
@@ -410,7 +410,6 @@ struct mt7530_port { | |||
410 | bool enable; | 410 | bool enable; |
411 | u32 pm; | 411 | u32 pm; |
412 | u16 pvid; | 412 | u16 pvid; |
413 | bool vlan_filtering; | ||
414 | }; | 413 | }; |
415 | 414 | ||
416 | /* struct mt7530_priv - This is the main data structure for holding the state | 415 | /* struct mt7530_priv - This is the main data structure for holding the state |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 0b3e51f248c2..2a2489b5196d 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
@@ -1,11 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips | 3 | * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips |
3 | * Copyright (c) 2008-2009 Marvell Semiconductor | 4 | * Copyright (c) 2008-2009 Marvell Semiconductor |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | 5 | */ |
10 | 6 | ||
11 | #include <linux/delay.h> | 7 | #include <linux/delay.h> |
@@ -18,40 +14,16 @@ | |||
18 | #include <net/dsa.h> | 14 | #include <net/dsa.h> |
19 | #include "mv88e6060.h" | 15 | #include "mv88e6060.h" |
20 | 16 | ||
21 | static int reg_read(struct dsa_switch *ds, int addr, int reg) | 17 | static int reg_read(struct mv88e6060_priv *priv, int addr, int reg) |
22 | { | 18 | { |
23 | struct mv88e6060_priv *priv = ds->priv; | ||
24 | |||
25 | return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg); | 19 | return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg); |
26 | } | 20 | } |
27 | 21 | ||
28 | #define REG_READ(addr, reg) \ | 22 | static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val) |
29 | ({ \ | ||
30 | int __ret; \ | ||
31 | \ | ||
32 | __ret = reg_read(ds, addr, reg); \ | ||
33 | if (__ret < 0) \ | ||
34 | return __ret; \ | ||
35 | __ret; \ | ||
36 | }) | ||
37 | |||
38 | |||
39 | static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) | ||
40 | { | 23 | { |
41 | struct mv88e6060_priv *priv = ds->priv; | ||
42 | |||
43 | return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val); | 24 | return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val); |
44 | } | 25 | } |
45 | 26 | ||
46 | #define REG_WRITE(addr, reg, val) \ | ||
47 | ({ \ | ||
48 | int __ret; \ | ||
49 | \ | ||
50 | __ret = reg_write(ds, addr, reg, val); \ | ||
51 | if (__ret < 0) \ | ||
52 | return __ret; \ | ||
53 | }) | ||
54 | |||
55 | static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) | 27 | static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) |
56 | { | 28 | { |
57 | int ret; | 29 | int ret; |
@@ -76,28 +48,7 @@ static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds, | |||
76 | return DSA_TAG_PROTO_TRAILER; | 48 | return DSA_TAG_PROTO_TRAILER; |
77 | } | 49 | } |
78 | 50 | ||
79 | static const char *mv88e6060_drv_probe(struct device *dsa_dev, | 51 | static int mv88e6060_switch_reset(struct mv88e6060_priv *priv) |
80 | struct device *host_dev, int sw_addr, | ||
81 | void **_priv) | ||
82 | { | ||
83 | struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); | ||
84 | struct mv88e6060_priv *priv; | ||
85 | const char *name; | ||
86 | |||
87 | name = mv88e6060_get_name(bus, sw_addr); | ||
88 | if (name) { | ||
89 | priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); | ||
90 | if (!priv) | ||
91 | return NULL; | ||
92 | *_priv = priv; | ||
93 | priv->bus = bus; | ||
94 | priv->sw_addr = sw_addr; | ||
95 | } | ||
96 | |||
97 | return name; | ||
98 | } | ||
99 | |||
100 | static int mv88e6060_switch_reset(struct dsa_switch *ds) | ||
101 | { | 52 | { |
102 | int i; | 53 | int i; |
103 | int ret; | 54 | int ret; |
@@ -105,23 +56,32 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) | |||
105 | 56 | ||
106 | /* Set all ports to the disabled state. */ | 57 | /* Set all ports to the disabled state. */ |
107 | for (i = 0; i < MV88E6060_PORTS; i++) { | 58 | for (i = 0; i < MV88E6060_PORTS; i++) { |
108 | ret = REG_READ(REG_PORT(i), PORT_CONTROL); | 59 | ret = reg_read(priv, REG_PORT(i), PORT_CONTROL); |
109 | REG_WRITE(REG_PORT(i), PORT_CONTROL, | 60 | if (ret < 0) |
110 | ret & ~PORT_CONTROL_STATE_MASK); | 61 | return ret; |
62 | ret = reg_write(priv, REG_PORT(i), PORT_CONTROL, | ||
63 | ret & ~PORT_CONTROL_STATE_MASK); | ||
64 | if (ret) | ||
65 | return ret; | ||
111 | } | 66 | } |
112 | 67 | ||
113 | /* Wait for transmit queues to drain. */ | 68 | /* Wait for transmit queues to drain. */ |
114 | usleep_range(2000, 4000); | 69 | usleep_range(2000, 4000); |
115 | 70 | ||
116 | /* Reset the switch. */ | 71 | /* Reset the switch. */ |
117 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 72 | ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL, |
118 | GLOBAL_ATU_CONTROL_SWRESET | | 73 | GLOBAL_ATU_CONTROL_SWRESET | |
119 | GLOBAL_ATU_CONTROL_LEARNDIS); | 74 | GLOBAL_ATU_CONTROL_LEARNDIS); |
75 | if (ret) | ||
76 | return ret; | ||
120 | 77 | ||
121 | /* Wait up to one second for reset to complete. */ | 78 | /* Wait up to one second for reset to complete. */ |
122 | timeout = jiffies + 1 * HZ; | 79 | timeout = jiffies + 1 * HZ; |
123 | while (time_before(jiffies, timeout)) { | 80 | while (time_before(jiffies, timeout)) { |
124 | ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); | 81 | ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS); |
82 | if (ret < 0) | ||
83 | return ret; | ||
84 | |||
125 | if (ret & GLOBAL_STATUS_INIT_READY) | 85 | if (ret & GLOBAL_STATUS_INIT_READY) |
126 | break; | 86 | break; |
127 | 87 | ||
@@ -133,61 +93,69 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) | |||
133 | return 0; | 93 | return 0; |
134 | } | 94 | } |
135 | 95 | ||
136 | static int mv88e6060_setup_global(struct dsa_switch *ds) | 96 | static int mv88e6060_setup_global(struct mv88e6060_priv *priv) |
137 | { | 97 | { |
98 | int ret; | ||
99 | |||
138 | /* Disable discarding of frames with excessive collisions, | 100 | /* Disable discarding of frames with excessive collisions, |
139 | * set the maximum frame size to 1536 bytes, and mask all | 101 | * set the maximum frame size to 1536 bytes, and mask all |
140 | * interrupt sources. | 102 | * interrupt sources. |
141 | */ | 103 | */ |
142 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); | 104 | ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL, |
105 | GLOBAL_CONTROL_MAX_FRAME_1536); | ||
106 | if (ret) | ||
107 | return ret; | ||
143 | 108 | ||
144 | /* Disable automatic address learning. | 109 | /* Disable automatic address learning. |
145 | */ | 110 | */ |
146 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 111 | return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL, |
147 | GLOBAL_ATU_CONTROL_LEARNDIS); | 112 | GLOBAL_ATU_CONTROL_LEARNDIS); |
148 | |||
149 | return 0; | ||
150 | } | 113 | } |
151 | 114 | ||
152 | static int mv88e6060_setup_port(struct dsa_switch *ds, int p) | 115 | static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p) |
153 | { | 116 | { |
154 | int addr = REG_PORT(p); | 117 | int addr = REG_PORT(p); |
118 | int ret; | ||
155 | 119 | ||
156 | /* Do not force flow control, disable Ingress and Egress | 120 | /* Do not force flow control, disable Ingress and Egress |
157 | * Header tagging, disable VLAN tunneling, and set the port | 121 | * Header tagging, disable VLAN tunneling, and set the port |
158 | * state to Forwarding. Additionally, if this is the CPU | 122 | * state to Forwarding. Additionally, if this is the CPU |
159 | * port, enable Ingress and Egress Trailer tagging mode. | 123 | * port, enable Ingress and Egress Trailer tagging mode. |
160 | */ | 124 | */ |
161 | REG_WRITE(addr, PORT_CONTROL, | 125 | ret = reg_write(priv, addr, PORT_CONTROL, |
162 | dsa_is_cpu_port(ds, p) ? | 126 | dsa_is_cpu_port(priv->ds, p) ? |
163 | PORT_CONTROL_TRAILER | | 127 | PORT_CONTROL_TRAILER | |
164 | PORT_CONTROL_INGRESS_MODE | | 128 | PORT_CONTROL_INGRESS_MODE | |
165 | PORT_CONTROL_STATE_FORWARDING : | 129 | PORT_CONTROL_STATE_FORWARDING : |
166 | PORT_CONTROL_STATE_FORWARDING); | 130 | PORT_CONTROL_STATE_FORWARDING); |
131 | if (ret) | ||
132 | return ret; | ||
167 | 133 | ||
168 | /* Port based VLAN map: give each port its own address | 134 | /* Port based VLAN map: give each port its own address |
169 | * database, allow the CPU port to talk to each of the 'real' | 135 | * database, allow the CPU port to talk to each of the 'real' |
170 | * ports, and allow each of the 'real' ports to only talk to | 136 | * ports, and allow each of the 'real' ports to only talk to |
171 | * the CPU port. | 137 | * the CPU port. |
172 | */ | 138 | */ |
173 | REG_WRITE(addr, PORT_VLAN_MAP, | 139 | ret = reg_write(priv, addr, PORT_VLAN_MAP, |
174 | ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | | 140 | ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | |
175 | (dsa_is_cpu_port(ds, p) ? dsa_user_ports(ds) : | 141 | (dsa_is_cpu_port(priv->ds, p) ? |
176 | BIT(dsa_to_port(ds, p)->cpu_dp->index))); | 142 | dsa_user_ports(priv->ds) : |
143 | BIT(dsa_to_port(priv->ds, p)->cpu_dp->index))); | ||
144 | if (ret) | ||
145 | return ret; | ||
177 | 146 | ||
178 | /* Port Association Vector: when learning source addresses | 147 | /* Port Association Vector: when learning source addresses |
179 | * of packets, add the address to the address database using | 148 | * of packets, add the address to the address database using |
180 | * a port bitmap that has only the bit for this port set and | 149 | * a port bitmap that has only the bit for this port set and |
181 | * the other bits clear. | 150 | * the other bits clear. |
182 | */ | 151 | */ |
183 | REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p)); | 152 | return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p)); |
184 | |||
185 | return 0; | ||
186 | } | 153 | } |
187 | 154 | ||
188 | static int mv88e6060_setup_addr(struct dsa_switch *ds) | 155 | static int mv88e6060_setup_addr(struct mv88e6060_priv *priv) |
189 | { | 156 | { |
190 | u8 addr[ETH_ALEN]; | 157 | u8 addr[ETH_ALEN]; |
158 | int ret; | ||
191 | u16 val; | 159 | u16 val; |
192 | 160 | ||
193 | eth_random_addr(addr); | 161 | eth_random_addr(addr); |
@@ -199,34 +167,43 @@ static int mv88e6060_setup_addr(struct dsa_switch *ds) | |||
199 | */ | 167 | */ |
200 | val &= 0xfeff; | 168 | val &= 0xfeff; |
201 | 169 | ||
202 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val); | 170 | ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val); |
203 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); | 171 | if (ret) |
204 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); | 172 | return ret; |
173 | |||
174 | ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23, | ||
175 | (addr[2] << 8) | addr[3]); | ||
176 | if (ret) | ||
177 | return ret; | ||
205 | 178 | ||
206 | return 0; | 179 | return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45, |
180 | (addr[4] << 8) | addr[5]); | ||
207 | } | 181 | } |
208 | 182 | ||
209 | static int mv88e6060_setup(struct dsa_switch *ds) | 183 | static int mv88e6060_setup(struct dsa_switch *ds) |
210 | { | 184 | { |
185 | struct mv88e6060_priv *priv = ds->priv; | ||
211 | int ret; | 186 | int ret; |
212 | int i; | 187 | int i; |
213 | 188 | ||
214 | ret = mv88e6060_switch_reset(ds); | 189 | priv->ds = ds; |
190 | |||
191 | ret = mv88e6060_switch_reset(priv); | ||
215 | if (ret < 0) | 192 | if (ret < 0) |
216 | return ret; | 193 | return ret; |
217 | 194 | ||
218 | /* @@@ initialise atu */ | 195 | /* @@@ initialise atu */ |
219 | 196 | ||
220 | ret = mv88e6060_setup_global(ds); | 197 | ret = mv88e6060_setup_global(priv); |
221 | if (ret < 0) | 198 | if (ret < 0) |
222 | return ret; | 199 | return ret; |
223 | 200 | ||
224 | ret = mv88e6060_setup_addr(ds); | 201 | ret = mv88e6060_setup_addr(priv); |
225 | if (ret < 0) | 202 | if (ret < 0) |
226 | return ret; | 203 | return ret; |
227 | 204 | ||
228 | for (i = 0; i < MV88E6060_PORTS; i++) { | 205 | for (i = 0; i < MV88E6060_PORTS; i++) { |
229 | ret = mv88e6060_setup_port(ds, i); | 206 | ret = mv88e6060_setup_port(priv, i); |
230 | if (ret < 0) | 207 | if (ret < 0) |
231 | return ret; | 208 | return ret; |
232 | } | 209 | } |
@@ -243,51 +220,93 @@ static int mv88e6060_port_to_phy_addr(int port) | |||
243 | 220 | ||
244 | static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum) | 221 | static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum) |
245 | { | 222 | { |
223 | struct mv88e6060_priv *priv = ds->priv; | ||
246 | int addr; | 224 | int addr; |
247 | 225 | ||
248 | addr = mv88e6060_port_to_phy_addr(port); | 226 | addr = mv88e6060_port_to_phy_addr(port); |
249 | if (addr == -1) | 227 | if (addr == -1) |
250 | return 0xffff; | 228 | return 0xffff; |
251 | 229 | ||
252 | return reg_read(ds, addr, regnum); | 230 | return reg_read(priv, addr, regnum); |
253 | } | 231 | } |
254 | 232 | ||
255 | static int | 233 | static int |
256 | mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) | 234 | mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) |
257 | { | 235 | { |
236 | struct mv88e6060_priv *priv = ds->priv; | ||
258 | int addr; | 237 | int addr; |
259 | 238 | ||
260 | addr = mv88e6060_port_to_phy_addr(port); | 239 | addr = mv88e6060_port_to_phy_addr(port); |
261 | if (addr == -1) | 240 | if (addr == -1) |
262 | return 0xffff; | 241 | return 0xffff; |
263 | 242 | ||
264 | return reg_write(ds, addr, regnum, val); | 243 | return reg_write(priv, addr, regnum, val); |
265 | } | 244 | } |
266 | 245 | ||
267 | static const struct dsa_switch_ops mv88e6060_switch_ops = { | 246 | static const struct dsa_switch_ops mv88e6060_switch_ops = { |
268 | .get_tag_protocol = mv88e6060_get_tag_protocol, | 247 | .get_tag_protocol = mv88e6060_get_tag_protocol, |
269 | .probe = mv88e6060_drv_probe, | ||
270 | .setup = mv88e6060_setup, | 248 | .setup = mv88e6060_setup, |
271 | .phy_read = mv88e6060_phy_read, | 249 | .phy_read = mv88e6060_phy_read, |
272 | .phy_write = mv88e6060_phy_write, | 250 | .phy_write = mv88e6060_phy_write, |
273 | }; | 251 | }; |
274 | 252 | ||
275 | static struct dsa_switch_driver mv88e6060_switch_drv = { | 253 | static int mv88e6060_probe(struct mdio_device *mdiodev) |
276 | .ops = &mv88e6060_switch_ops, | ||
277 | }; | ||
278 | |||
279 | static int __init mv88e6060_init(void) | ||
280 | { | 254 | { |
281 | register_switch_driver(&mv88e6060_switch_drv); | 255 | struct device *dev = &mdiodev->dev; |
282 | return 0; | 256 | struct mv88e6060_priv *priv; |
257 | struct dsa_switch *ds; | ||
258 | const char *name; | ||
259 | |||
260 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
261 | if (!priv) | ||
262 | return -ENOMEM; | ||
263 | |||
264 | priv->bus = mdiodev->bus; | ||
265 | priv->sw_addr = mdiodev->addr; | ||
266 | |||
267 | name = mv88e6060_get_name(priv->bus, priv->sw_addr); | ||
268 | if (!name) | ||
269 | return -ENODEV; | ||
270 | |||
271 | dev_info(dev, "switch %s detected\n", name); | ||
272 | |||
273 | ds = dsa_switch_alloc(dev, MV88E6060_PORTS); | ||
274 | if (!ds) | ||
275 | return -ENOMEM; | ||
276 | |||
277 | ds->priv = priv; | ||
278 | ds->dev = dev; | ||
279 | ds->ops = &mv88e6060_switch_ops; | ||
280 | |||
281 | dev_set_drvdata(dev, ds); | ||
282 | |||
283 | return dsa_register_switch(ds); | ||
283 | } | 284 | } |
284 | module_init(mv88e6060_init); | ||
285 | 285 | ||
286 | static void __exit mv88e6060_cleanup(void) | 286 | static void mv88e6060_remove(struct mdio_device *mdiodev) |
287 | { | 287 | { |
288 | unregister_switch_driver(&mv88e6060_switch_drv); | 288 | struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); |
289 | |||
290 | dsa_unregister_switch(ds); | ||
289 | } | 291 | } |
290 | module_exit(mv88e6060_cleanup); | 292 | |
293 | static const struct of_device_id mv88e6060_of_match[] = { | ||
294 | { | ||
295 | .compatible = "marvell,mv88e6060", | ||
296 | }, | ||
297 | { /* sentinel */ }, | ||
298 | }; | ||
299 | |||
300 | static struct mdio_driver mv88e6060_driver = { | ||
301 | .probe = mv88e6060_probe, | ||
302 | .remove = mv88e6060_remove, | ||
303 | .mdiodrv.driver = { | ||
304 | .name = "mv88e6060", | ||
305 | .of_match_table = mv88e6060_of_match, | ||
306 | }, | ||
307 | }; | ||
308 | |||
309 | mdio_module_driver(mv88e6060_driver); | ||
291 | 310 | ||
292 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); | 311 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); |
293 | MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip"); | 312 | MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip"); |
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h index 10249bd16292..c0e7a0f2fb6a 100644 --- a/drivers/net/dsa/mv88e6060.h +++ b/drivers/net/dsa/mv88e6060.h | |||
@@ -117,6 +117,7 @@ struct mv88e6060_priv { | |||
117 | */ | 117 | */ |
118 | struct mii_bus *bus; | 118 | struct mii_bus *bus; |
119 | int sw_addr; | 119 | int sw_addr; |
120 | struct dsa_switch *ds; | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | #endif | 123 | #endif |
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index 50de304abe2f..e85755dde90b 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile | |||
@@ -12,3 +12,4 @@ mv88e6xxx-objs += phy.o | |||
12 | mv88e6xxx-objs += port.o | 12 | mv88e6xxx-objs += port.o |
13 | mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o | 13 | mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o |
14 | mv88e6xxx-objs += serdes.o | 14 | mv88e6xxx-objs += serdes.o |
15 | mv88e6xxx-objs += smi.o | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f4e2db44ad91..28414db979b0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "port.h" | 43 | #include "port.h" |
44 | #include "ptp.h" | 44 | #include "ptp.h" |
45 | #include "serdes.h" | 45 | #include "serdes.h" |
46 | #include "smi.h" | ||
46 | 47 | ||
47 | static void assert_reg_lock(struct mv88e6xxx_chip *chip) | 48 | static void assert_reg_lock(struct mv88e6xxx_chip *chip) |
48 | { | 49 | { |
@@ -52,149 +53,6 @@ static void assert_reg_lock(struct mv88e6xxx_chip *chip) | |||
52 | } | 53 | } |
53 | } | 54 | } |
54 | 55 | ||
55 | /* The switch ADDR[4:1] configuration pins define the chip SMI device address | ||
56 | * (ADDR[0] is always zero, thus only even SMI addresses can be strapped). | ||
57 | * | ||
58 | * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it | ||
59 | * is the only device connected to the SMI master. In this mode it responds to | ||
60 | * all 32 possible SMI addresses, and thus maps directly the internal devices. | ||
61 | * | ||
62 | * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing | ||
63 | * multiple devices to share the SMI interface. In this mode it responds to only | ||
64 | * 2 registers, used to indirectly access the internal SMI devices. | ||
65 | */ | ||
66 | |||
67 | static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip, | ||
68 | int addr, int reg, u16 *val) | ||
69 | { | ||
70 | if (!chip->smi_ops) | ||
71 | return -EOPNOTSUPP; | ||
72 | |||
73 | return chip->smi_ops->read(chip, addr, reg, val); | ||
74 | } | ||
75 | |||
76 | static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip, | ||
77 | int addr, int reg, u16 val) | ||
78 | { | ||
79 | if (!chip->smi_ops) | ||
80 | return -EOPNOTSUPP; | ||
81 | |||
82 | return chip->smi_ops->write(chip, addr, reg, val); | ||
83 | } | ||
84 | |||
85 | static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip, | ||
86 | int addr, int reg, u16 *val) | ||
87 | { | ||
88 | int ret; | ||
89 | |||
90 | ret = mdiobus_read_nested(chip->bus, addr, reg); | ||
91 | if (ret < 0) | ||
92 | return ret; | ||
93 | |||
94 | *val = ret & 0xffff; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip, | ||
100 | int addr, int reg, u16 val) | ||
101 | { | ||
102 | int ret; | ||
103 | |||
104 | ret = mdiobus_write_nested(chip->bus, addr, reg, val); | ||
105 | if (ret < 0) | ||
106 | return ret; | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = { | ||
112 | .read = mv88e6xxx_smi_single_chip_read, | ||
113 | .write = mv88e6xxx_smi_single_chip_write, | ||
114 | }; | ||
115 | |||
116 | static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip) | ||
117 | { | ||
118 | int ret; | ||
119 | int i; | ||
120 | |||
121 | for (i = 0; i < 16; i++) { | ||
122 | ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD); | ||
123 | if (ret < 0) | ||
124 | return ret; | ||
125 | |||
126 | if ((ret & SMI_CMD_BUSY) == 0) | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | return -ETIMEDOUT; | ||
131 | } | ||
132 | |||
133 | static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip, | ||
134 | int addr, int reg, u16 *val) | ||
135 | { | ||
136 | int ret; | ||
137 | |||
138 | /* Wait for the bus to become free. */ | ||
139 | ret = mv88e6xxx_smi_multi_chip_wait(chip); | ||
140 | if (ret < 0) | ||
141 | return ret; | ||
142 | |||
143 | /* Transmit the read command. */ | ||
144 | ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, | ||
145 | SMI_CMD_OP_22_READ | (addr << 5) | reg); | ||
146 | if (ret < 0) | ||
147 | return ret; | ||
148 | |||
149 | /* Wait for the read command to complete. */ | ||
150 | ret = mv88e6xxx_smi_multi_chip_wait(chip); | ||
151 | if (ret < 0) | ||
152 | return ret; | ||
153 | |||
154 | /* Read the data. */ | ||
155 | ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA); | ||
156 | if (ret < 0) | ||
157 | return ret; | ||
158 | |||
159 | *val = ret & 0xffff; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip, | ||
165 | int addr, int reg, u16 val) | ||
166 | { | ||
167 | int ret; | ||
168 | |||
169 | /* Wait for the bus to become free. */ | ||
170 | ret = mv88e6xxx_smi_multi_chip_wait(chip); | ||
171 | if (ret < 0) | ||
172 | return ret; | ||
173 | |||
174 | /* Transmit the data to write. */ | ||
175 | ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val); | ||
176 | if (ret < 0) | ||
177 | return ret; | ||
178 | |||
179 | /* Transmit the write command. */ | ||
180 | ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, | ||
181 | SMI_CMD_OP_22_WRITE | (addr << 5) | reg); | ||
182 | if (ret < 0) | ||
183 | return ret; | ||
184 | |||
185 | /* Wait for the write command to complete. */ | ||
186 | ret = mv88e6xxx_smi_multi_chip_wait(chip); | ||
187 | if (ret < 0) | ||
188 | return ret; | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = { | ||
194 | .read = mv88e6xxx_smi_multi_chip_read, | ||
195 | .write = mv88e6xxx_smi_multi_chip_write, | ||
196 | }; | ||
197 | |||
198 | int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) | 56 | int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) |
199 | { | 57 | { |
200 | int err; | 58 | int err; |
@@ -553,11 +411,28 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, | |||
553 | int speed, int duplex, int pause, | 411 | int speed, int duplex, int pause, |
554 | phy_interface_t mode) | 412 | phy_interface_t mode) |
555 | { | 413 | { |
414 | struct phylink_link_state state; | ||
556 | int err; | 415 | int err; |
557 | 416 | ||
558 | if (!chip->info->ops->port_set_link) | 417 | if (!chip->info->ops->port_set_link) |
559 | return 0; | 418 | return 0; |
560 | 419 | ||
420 | if (!chip->info->ops->port_link_state) | ||
421 | return 0; | ||
422 | |||
423 | err = chip->info->ops->port_link_state(chip, port, &state); | ||
424 | if (err) | ||
425 | return err; | ||
426 | |||
427 | /* Has anything actually changed? We don't expect the | ||
428 | * interface mode to change without one of the other | ||
429 | * parameters also changing | ||
430 | */ | ||
431 | if (state.link == link && | ||
432 | state.speed == speed && | ||
433 | state.duplex == duplex) | ||
434 | return 0; | ||
435 | |||
561 | /* Port's MAC control must not be changed unless the link is down */ | 436 | /* Port's MAC control must not be changed unless the link is down */ |
562 | err = chip->info->ops->port_set_link(chip, port, 0); | 437 | err = chip->info->ops->port_set_link(chip, port, 0); |
563 | if (err) | 438 | if (err) |
@@ -2411,6 +2286,9 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port) | |||
2411 | 2286 | ||
2412 | mutex_lock(&chip->reg_lock); | 2287 | mutex_lock(&chip->reg_lock); |
2413 | 2288 | ||
2289 | if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED)) | ||
2290 | dev_err(chip->dev, "failed to disable port\n"); | ||
2291 | |||
2414 | if (chip->info->ops->serdes_irq_free) | 2292 | if (chip->info->ops->serdes_irq_free) |
2415 | chip->info->ops->serdes_irq_free(chip, port); | 2293 | chip->info->ops->serdes_irq_free(chip, port); |
2416 | 2294 | ||
@@ -2579,8 +2457,18 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) | |||
2579 | 2457 | ||
2580 | /* Setup Switch Port Registers */ | 2458 | /* Setup Switch Port Registers */ |
2581 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { | 2459 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { |
2582 | if (dsa_is_unused_port(ds, i)) | 2460 | if (dsa_is_unused_port(ds, i)) { |
2461 | err = mv88e6xxx_port_set_state(chip, i, | ||
2462 | BR_STATE_DISABLED); | ||
2463 | if (err) | ||
2464 | goto unlock; | ||
2465 | |||
2466 | err = mv88e6xxx_serdes_power(chip, i, false); | ||
2467 | if (err) | ||
2468 | goto unlock; | ||
2469 | |||
2583 | continue; | 2470 | continue; |
2471 | } | ||
2584 | 2472 | ||
2585 | err = mv88e6xxx_setup_port(chip, i); | 2473 | err = mv88e6xxx_setup_port(chip, i); |
2586 | if (err) | 2474 | if (err) |
@@ -4615,30 +4503,6 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) | |||
4615 | return chip; | 4503 | return chip; |
4616 | } | 4504 | } |
4617 | 4505 | ||
4618 | static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, | ||
4619 | struct mii_bus *bus, int sw_addr) | ||
4620 | { | ||
4621 | if (sw_addr == 0) | ||
4622 | chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; | ||
4623 | else if (chip->info->multi_chip) | ||
4624 | chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; | ||
4625 | else | ||
4626 | return -EINVAL; | ||
4627 | |||
4628 | chip->bus = bus; | ||
4629 | chip->sw_addr = sw_addr; | ||
4630 | |||
4631 | return 0; | ||
4632 | } | ||
4633 | |||
4634 | static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip) | ||
4635 | { | ||
4636 | int i; | ||
4637 | |||
4638 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) | ||
4639 | chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID; | ||
4640 | } | ||
4641 | |||
4642 | static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, | 4506 | static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, |
4643 | int port) | 4507 | int port) |
4644 | { | 4508 | { |
@@ -4647,58 +4511,6 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, | |||
4647 | return chip->info->tag_protocol; | 4511 | return chip->info->tag_protocol; |
4648 | } | 4512 | } |
4649 | 4513 | ||
4650 | #if IS_ENABLED(CONFIG_NET_DSA_LEGACY) | ||
4651 | static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, | ||
4652 | struct device *host_dev, int sw_addr, | ||
4653 | void **priv) | ||
4654 | { | ||
4655 | struct mv88e6xxx_chip *chip; | ||
4656 | struct mii_bus *bus; | ||
4657 | int err; | ||
4658 | |||
4659 | bus = dsa_host_dev_to_mii_bus(host_dev); | ||
4660 | if (!bus) | ||
4661 | return NULL; | ||
4662 | |||
4663 | chip = mv88e6xxx_alloc_chip(dsa_dev); | ||
4664 | if (!chip) | ||
4665 | return NULL; | ||
4666 | |||
4667 | /* Legacy SMI probing will only support chips similar to 88E6085 */ | ||
4668 | chip->info = &mv88e6xxx_table[MV88E6085]; | ||
4669 | |||
4670 | err = mv88e6xxx_smi_init(chip, bus, sw_addr); | ||
4671 | if (err) | ||
4672 | goto free; | ||
4673 | |||
4674 | err = mv88e6xxx_detect(chip); | ||
4675 | if (err) | ||
4676 | goto free; | ||
4677 | |||
4678 | mv88e6xxx_ports_cmode_init(chip); | ||
4679 | |||
4680 | mutex_lock(&chip->reg_lock); | ||
4681 | err = mv88e6xxx_switch_reset(chip); | ||
4682 | mutex_unlock(&chip->reg_lock); | ||
4683 | if (err) | ||
4684 | goto free; | ||
4685 | |||
4686 | mv88e6xxx_phy_init(chip); | ||
4687 | |||
4688 | err = mv88e6xxx_mdios_register(chip, NULL); | ||
4689 | if (err) | ||
4690 | goto free; | ||
4691 | |||
4692 | *priv = chip; | ||
4693 | |||
4694 | return chip->info->name; | ||
4695 | free: | ||
4696 | devm_kfree(dsa_dev, chip); | ||
4697 | |||
4698 | return NULL; | ||
4699 | } | ||
4700 | #endif | ||
4701 | |||
4702 | static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port, | 4514 | static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port, |
4703 | const struct switchdev_obj_port_mdb *mdb) | 4515 | const struct switchdev_obj_port_mdb *mdb) |
4704 | { | 4516 | { |
@@ -4753,9 +4565,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, | |||
4753 | } | 4565 | } |
4754 | 4566 | ||
4755 | static const struct dsa_switch_ops mv88e6xxx_switch_ops = { | 4567 | static const struct dsa_switch_ops mv88e6xxx_switch_ops = { |
4756 | #if IS_ENABLED(CONFIG_NET_DSA_LEGACY) | ||
4757 | .probe = mv88e6xxx_drv_probe, | ||
4758 | #endif | ||
4759 | .get_tag_protocol = mv88e6xxx_get_tag_protocol, | 4568 | .get_tag_protocol = mv88e6xxx_get_tag_protocol, |
4760 | .setup = mv88e6xxx_setup, | 4569 | .setup = mv88e6xxx_setup, |
4761 | .adjust_link = mv88e6xxx_adjust_link, | 4570 | .adjust_link = mv88e6xxx_adjust_link, |
@@ -4801,10 +4610,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { | |||
4801 | .get_ts_info = mv88e6xxx_get_ts_info, | 4610 | .get_ts_info = mv88e6xxx_get_ts_info, |
4802 | }; | 4611 | }; |
4803 | 4612 | ||
4804 | static struct dsa_switch_driver mv88e6xxx_switch_drv = { | ||
4805 | .ops = &mv88e6xxx_switch_ops, | ||
4806 | }; | ||
4807 | |||
4808 | static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) | 4613 | static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) |
4809 | { | 4614 | { |
4810 | struct device *dev = chip->dev; | 4615 | struct device *dev = chip->dev; |
@@ -4915,7 +4720,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) | |||
4915 | if (err) | 4720 | if (err) |
4916 | goto out; | 4721 | goto out; |
4917 | 4722 | ||
4918 | mv88e6xxx_ports_cmode_init(chip); | ||
4919 | mv88e6xxx_phy_init(chip); | 4723 | mv88e6xxx_phy_init(chip); |
4920 | 4724 | ||
4921 | if (chip->info->ops->get_eeprom) { | 4725 | if (chip->info->ops->get_eeprom) { |
@@ -4932,12 +4736,17 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) | |||
4932 | if (err) | 4736 | if (err) |
4933 | goto out; | 4737 | goto out; |
4934 | 4738 | ||
4935 | chip->irq = of_irq_get(np, 0); | 4739 | if (np) { |
4936 | if (chip->irq == -EPROBE_DEFER) { | 4740 | chip->irq = of_irq_get(np, 0); |
4937 | err = chip->irq; | 4741 | if (chip->irq == -EPROBE_DEFER) { |
4938 | goto out; | 4742 | err = chip->irq; |
4743 | goto out; | ||
4744 | } | ||
4939 | } | 4745 | } |
4940 | 4746 | ||
4747 | if (pdata) | ||
4748 | chip->irq = pdata->irq; | ||
4749 | |||
4941 | /* Has to be performed before the MDIO bus is created, because | 4750 | /* Has to be performed before the MDIO bus is created, because |
4942 | * the PHYs will link their interrupts to these interrupt | 4751 | * the PHYs will link their interrupts to these interrupt |
4943 | * controllers | 4752 | * controllers |
@@ -5047,19 +4856,7 @@ static struct mdio_driver mv88e6xxx_driver = { | |||
5047 | }, | 4856 | }, |
5048 | }; | 4857 | }; |
5049 | 4858 | ||
5050 | static int __init mv88e6xxx_init(void) | 4859 | mdio_module_driver(mv88e6xxx_driver); |
5051 | { | ||
5052 | register_switch_driver(&mv88e6xxx_switch_drv); | ||
5053 | return mdio_driver_register(&mv88e6xxx_driver); | ||
5054 | } | ||
5055 | module_init(mv88e6xxx_init); | ||
5056 | |||
5057 | static void __exit mv88e6xxx_cleanup(void) | ||
5058 | { | ||
5059 | mdio_driver_unregister(&mv88e6xxx_driver); | ||
5060 | unregister_switch_driver(&mv88e6xxx_switch_drv); | ||
5061 | } | ||
5062 | module_exit(mv88e6xxx_cleanup); | ||
5063 | 4860 | ||
5064 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); | 4861 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); |
5065 | MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); | 4862 | MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 19c07dff0440..faa3fa889f19 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h | |||
@@ -21,17 +21,6 @@ | |||
21 | #include <linux/timecounter.h> | 21 | #include <linux/timecounter.h> |
22 | #include <net/dsa.h> | 22 | #include <net/dsa.h> |
23 | 23 | ||
24 | #define SMI_CMD 0x00 | ||
25 | #define SMI_CMD_BUSY BIT(15) | ||
26 | #define SMI_CMD_CLAUSE_22 BIT(12) | ||
27 | #define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) | ||
28 | #define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) | ||
29 | #define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY) | ||
30 | #define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY) | ||
31 | #define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY) | ||
32 | #define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) | ||
33 | #define SMI_DATA 0x01 | ||
34 | |||
35 | #define MV88E6XXX_N_FID 4096 | 24 | #define MV88E6XXX_N_FID 4096 |
36 | 25 | ||
37 | /* PVT limits for 4-bit port and 5-bit switch */ | 26 | /* PVT limits for 4-bit port and 5-bit switch */ |
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c | |||
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
427 | return 0; | 427 | return 0; |
428 | 428 | ||
429 | lane = mv88e6390x_serdes_get_lane(chip, port); | 429 | lane = mv88e6390x_serdes_get_lane(chip, port); |
430 | if (lane < 0) | 430 | if (lane < 0 && lane != -ENODEV) |
431 | return lane; | 431 | return lane; |
432 | 432 | ||
433 | if (chip->ports[port].serdes_irq) { | 433 | if (lane >= 0) { |
434 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | 434 | if (chip->ports[port].serdes_irq) { |
435 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | ||
436 | if (err) | ||
437 | return err; | ||
438 | } | ||
439 | |||
440 | err = mv88e6390x_serdes_power(chip, port, false); | ||
435 | if (err) | 441 | if (err) |
436 | return err; | 442 | return err; |
437 | } | 443 | } |
438 | 444 | ||
439 | err = mv88e6390x_serdes_power(chip, port, false); | 445 | chip->ports[port].cmode = 0; |
440 | if (err) | ||
441 | return err; | ||
442 | 446 | ||
443 | if (cmode) { | 447 | if (cmode) { |
444 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); | 448 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); |
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
452 | if (err) | 456 | if (err) |
453 | return err; | 457 | return err; |
454 | 458 | ||
459 | chip->ports[port].cmode = cmode; | ||
460 | |||
461 | lane = mv88e6390x_serdes_get_lane(chip, port); | ||
462 | if (lane < 0) | ||
463 | return lane; | ||
464 | |||
455 | err = mv88e6390x_serdes_power(chip, port, true); | 465 | err = mv88e6390x_serdes_power(chip, port, true); |
456 | if (err) | 466 | if (err) |
457 | return err; | 467 | return err; |
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
463 | } | 473 | } |
464 | } | 474 | } |
465 | 475 | ||
466 | chip->ports[port].cmode = cmode; | ||
467 | |||
468 | return 0; | 476 | return 0; |
469 | } | 477 | } |
470 | 478 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index c7bed263a0f4..39c85e98fb92 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h | |||
@@ -52,7 +52,6 @@ | |||
52 | #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 | 52 | #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 |
53 | #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 | 53 | #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 |
54 | #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 | 54 | #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 |
55 | #define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff | ||
56 | 55 | ||
57 | /* Offset 0x01: MAC (or PCS or Physical) Control Register */ | 56 | /* Offset 0x01: MAC (or PCS or Physical) Control Register */ |
58 | #define MV88E6XXX_PORT_MAC_CTL 0x01 | 57 | #define MV88E6XXX_PORT_MAC_CTL 0x01 |
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c new file mode 100644 index 000000000000..96f7d2685bdc --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/smi.c | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * Marvell 88E6xxx System Management Interface (SMI) support | ||
3 | * | ||
4 | * Copyright (c) 2008 Marvell Semiconductor | ||
5 | * | ||
6 | * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include "chip.h" | ||
15 | #include "smi.h" | ||
16 | |||
17 | /* The switch ADDR[4:1] configuration pins define the chip SMI device address | ||
18 | * (ADDR[0] is always zero, thus only even SMI addresses can be strapped). | ||
19 | * | ||
20 | * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it | ||
21 | * is the only device connected to the SMI master. In this mode it responds to | ||
22 | * all 32 possible SMI addresses, and thus maps directly the internal devices. | ||
23 | * | ||
24 | * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing | ||
25 | * multiple devices to share the SMI interface. In this mode it responds to only | ||
26 | * 2 registers, used to indirectly access the internal SMI devices. | ||
27 | */ | ||
28 | |||
29 | static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip, | ||
30 | int dev, int reg, u16 *data) | ||
31 | { | ||
32 | int ret; | ||
33 | |||
34 | ret = mdiobus_read_nested(chip->bus, dev, reg); | ||
35 | if (ret < 0) | ||
36 | return ret; | ||
37 | |||
38 | *data = ret & 0xffff; | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip, | ||
44 | int dev, int reg, u16 data) | ||
45 | { | ||
46 | int ret; | ||
47 | |||
48 | ret = mdiobus_write_nested(chip->bus, dev, reg, data); | ||
49 | if (ret < 0) | ||
50 | return ret; | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip, | ||
56 | int dev, int reg, int bit, int val) | ||
57 | { | ||
58 | u16 data; | ||
59 | int err; | ||
60 | int i; | ||
61 | |||
62 | for (i = 0; i < 16; i++) { | ||
63 | err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data); | ||
64 | if (err) | ||
65 | return err; | ||
66 | |||
67 | if (!!(data >> bit) == !!val) | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | return -ETIMEDOUT; | ||
72 | } | ||
73 | |||
74 | static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = { | ||
75 | .read = mv88e6xxx_smi_direct_read, | ||
76 | .write = mv88e6xxx_smi_direct_write, | ||
77 | }; | ||
78 | |||
79 | /* Offset 0x00: SMI Command Register | ||
80 | * Offset 0x01: SMI Data Register | ||
81 | */ | ||
82 | |||
83 | static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip, | ||
84 | int dev, int reg, u16 *data) | ||
85 | { | ||
86 | int err; | ||
87 | |||
88 | err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr, | ||
89 | MV88E6XXX_SMI_CMD, 15, 0); | ||
90 | if (err) | ||
91 | return err; | ||
92 | |||
93 | err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr, | ||
94 | MV88E6XXX_SMI_CMD, | ||
95 | MV88E6XXX_SMI_CMD_BUSY | | ||
96 | MV88E6XXX_SMI_CMD_MODE_22 | | ||
97 | MV88E6XXX_SMI_CMD_OP_22_READ | | ||
98 | (dev << 5) | reg); | ||
99 | if (err) | ||
100 | return err; | ||
101 | |||
102 | err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr, | ||
103 | MV88E6XXX_SMI_CMD, 15, 0); | ||
104 | if (err) | ||
105 | return err; | ||
106 | |||
107 | return mv88e6xxx_smi_direct_read(chip, chip->sw_addr, | ||
108 | MV88E6XXX_SMI_DATA, data); | ||
109 | } | ||
110 | |||
111 | static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip, | ||
112 | int dev, int reg, u16 data) | ||
113 | { | ||
114 | int err; | ||
115 | |||
116 | err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr, | ||
117 | MV88E6XXX_SMI_CMD, 15, 0); | ||
118 | if (err) | ||
119 | return err; | ||
120 | |||
121 | err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr, | ||
122 | MV88E6XXX_SMI_DATA, data); | ||
123 | if (err) | ||
124 | return err; | ||
125 | |||
126 | err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr, | ||
127 | MV88E6XXX_SMI_CMD, | ||
128 | MV88E6XXX_SMI_CMD_BUSY | | ||
129 | MV88E6XXX_SMI_CMD_MODE_22 | | ||
130 | MV88E6XXX_SMI_CMD_OP_22_WRITE | | ||
131 | (dev << 5) | reg); | ||
132 | if (err) | ||
133 | return err; | ||
134 | |||
135 | return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr, | ||
136 | MV88E6XXX_SMI_CMD, 15, 0); | ||
137 | } | ||
138 | |||
139 | static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = { | ||
140 | .read = mv88e6xxx_smi_indirect_read, | ||
141 | .write = mv88e6xxx_smi_indirect_write, | ||
142 | }; | ||
143 | |||
144 | int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, | ||
145 | struct mii_bus *bus, int sw_addr) | ||
146 | { | ||
147 | if (sw_addr == 0) | ||
148 | chip->smi_ops = &mv88e6xxx_smi_direct_ops; | ||
149 | else if (chip->info->multi_chip) | ||
150 | chip->smi_ops = &mv88e6xxx_smi_indirect_ops; | ||
151 | else | ||
152 | return -EINVAL; | ||
153 | |||
154 | chip->bus = bus; | ||
155 | chip->sw_addr = sw_addr; | ||
156 | |||
157 | return 0; | ||
158 | } | ||
diff --git a/drivers/net/dsa/mv88e6xxx/smi.h b/drivers/net/dsa/mv88e6xxx/smi.h new file mode 100644 index 000000000000..35e6403b65dc --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/smi.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Marvell 88E6xxx System Management Interface (SMI) support | ||
3 | * | ||
4 | * Copyright (c) 2008 Marvell Semiconductor | ||
5 | * | ||
6 | * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _MV88E6XXX_SMI_H | ||
15 | #define _MV88E6XXX_SMI_H | ||
16 | |||
17 | #include "chip.h" | ||
18 | |||
19 | /* Offset 0x00: SMI Command Register */ | ||
20 | #define MV88E6XXX_SMI_CMD 0x00 | ||
21 | #define MV88E6XXX_SMI_CMD_BUSY 0x8000 | ||
22 | #define MV88E6XXX_SMI_CMD_MODE_MASK 0x1000 | ||
23 | #define MV88E6XXX_SMI_CMD_MODE_45 0x0000 | ||
24 | #define MV88E6XXX_SMI_CMD_MODE_22 0x1000 | ||
25 | #define MV88E6XXX_SMI_CMD_OP_MASK 0x0c00 | ||
26 | #define MV88E6XXX_SMI_CMD_OP_22_WRITE 0x0400 | ||
27 | #define MV88E6XXX_SMI_CMD_OP_22_READ 0x0800 | ||
28 | #define MV88E6XXX_SMI_CMD_OP_45_WRITE_ADDR 0x0000 | ||
29 | #define MV88E6XXX_SMI_CMD_OP_45_WRITE_DATA 0x0400 | ||
30 | #define MV88E6XXX_SMI_CMD_OP_45_READ_DATA 0x0800 | ||
31 | #define MV88E6XXX_SMI_CMD_OP_45_READ_DATA_INC 0x0c00 | ||
32 | #define MV88E6XXX_SMI_CMD_DEV_ADDR_MASK 0x003e | ||
33 | #define MV88E6XXX_SMI_CMD_REG_ADDR_MASK 0x001f | ||
34 | |||
35 | /* Offset 0x01: SMI Data Register */ | ||
36 | #define MV88E6XXX_SMI_DATA 0x01 | ||
37 | |||
38 | int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, | ||
39 | struct mii_bus *bus, int sw_addr); | ||
40 | |||
41 | static inline int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip, | ||
42 | int dev, int reg, u16 *data) | ||
43 | { | ||
44 | if (chip->smi_ops && chip->smi_ops->read) | ||
45 | return chip->smi_ops->read(chip, dev, reg, data); | ||
46 | |||
47 | return -EOPNOTSUPP; | ||
48 | } | ||
49 | |||
50 | static inline int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip, | ||
51 | int dev, int reg, u16 data) | ||
52 | { | ||
53 | if (chip->smi_ops && chip->smi_ops->write) | ||
54 | return chip->smi_ops->write(chip, dev, reg, data); | ||
55 | |||
56 | return -EOPNOTSUPP; | ||
57 | } | ||
58 | |||
59 | #endif /* _MV88E6XXX_SMI_H */ | ||
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 576b37d12a63..c4fa400efdcc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c | |||
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) | |||
481 | qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); | 481 | qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); |
482 | } | 482 | } |
483 | 483 | ||
484 | static u32 | ||
485 | qca8k_port_to_phy(int port) | ||
486 | { | ||
487 | /* From Andrew Lunn: | ||
488 | * Port 0 has no internal phy. | ||
489 | * Port 1 has an internal PHY at MDIO address 0. | ||
490 | * Port 2 has an internal PHY at MDIO address 1. | ||
491 | * ... | ||
492 | * Port 5 has an internal PHY at MDIO address 4. | ||
493 | * Port 6 has no internal PHY. | ||
494 | */ | ||
495 | |||
496 | return port - 1; | ||
497 | } | ||
498 | |||
499 | static int | ||
500 | qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) | ||
501 | { | ||
502 | u32 phy, val; | ||
503 | |||
504 | if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) | ||
505 | return -EINVAL; | ||
506 | |||
507 | /* callee is responsible for not passing bad ports, | ||
508 | * but we still would like to make spills impossible. | ||
509 | */ | ||
510 | phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; | ||
511 | val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | | ||
512 | QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | | ||
513 | QCA8K_MDIO_MASTER_REG_ADDR(regnum) | | ||
514 | QCA8K_MDIO_MASTER_DATA(data); | ||
515 | |||
516 | qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); | ||
517 | |||
518 | return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, | ||
519 | QCA8K_MDIO_MASTER_BUSY); | ||
520 | } | ||
521 | |||
522 | static int | ||
523 | qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) | ||
524 | { | ||
525 | u32 phy, val; | ||
526 | |||
527 | if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) | ||
528 | return -EINVAL; | ||
529 | |||
530 | /* callee is responsible for not passing bad ports, | ||
531 | * but we still would like to make spills impossible. | ||
532 | */ | ||
533 | phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; | ||
534 | val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | | ||
535 | QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | | ||
536 | QCA8K_MDIO_MASTER_REG_ADDR(regnum); | ||
537 | |||
538 | qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); | ||
539 | |||
540 | if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, | ||
541 | QCA8K_MDIO_MASTER_BUSY)) | ||
542 | return -ETIMEDOUT; | ||
543 | |||
544 | val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) & | ||
545 | QCA8K_MDIO_MASTER_DATA_MASK); | ||
546 | |||
547 | return val; | ||
548 | } | ||
549 | |||
550 | static int | ||
551 | qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) | ||
552 | { | ||
553 | struct qca8k_priv *priv = ds->priv; | ||
554 | |||
555 | return qca8k_mdio_write(priv, port, regnum, data); | ||
556 | } | ||
557 | |||
558 | static int | ||
559 | qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) | ||
560 | { | ||
561 | struct qca8k_priv *priv = ds->priv; | ||
562 | int ret; | ||
563 | |||
564 | ret = qca8k_mdio_read(priv, port, regnum); | ||
565 | |||
566 | if (ret < 0) | ||
567 | return 0xffff; | ||
568 | |||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | static int | ||
573 | qca8k_setup_mdio_bus(struct qca8k_priv *priv) | ||
574 | { | ||
575 | u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; | ||
576 | struct device_node *ports, *port; | ||
577 | int err; | ||
578 | |||
579 | ports = of_get_child_by_name(priv->dev->of_node, "ports"); | ||
580 | if (!ports) | ||
581 | return -EINVAL; | ||
582 | |||
583 | for_each_available_child_of_node(ports, port) { | ||
584 | err = of_property_read_u32(port, "reg", ®); | ||
585 | if (err) | ||
586 | return err; | ||
587 | |||
588 | if (!dsa_is_user_port(priv->ds, reg)) | ||
589 | continue; | ||
590 | |||
591 | if (of_property_read_bool(port, "phy-handle")) | ||
592 | external_mdio_mask |= BIT(reg); | ||
593 | else | ||
594 | internal_mdio_mask |= BIT(reg); | ||
595 | } | ||
596 | |||
597 | if (!external_mdio_mask && !internal_mdio_mask) { | ||
598 | dev_err(priv->dev, "no PHYs are defined.\n"); | ||
599 | return -EINVAL; | ||
600 | } | ||
601 | |||
602 | /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through | ||
603 | * the MDIO_MASTER register also _disconnects_ the external MDC | ||
604 | * passthrough to the internal PHYs. It's not possible to use both | ||
605 | * configurations at the same time! | ||
606 | * | ||
607 | * Because this came up during the review process: | ||
608 | * If the external mdio-bus driver is capable magically disabling | ||
609 | * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's | ||
610 | * accessors for the time being, it would be possible to pull this | ||
611 | * off. | ||
612 | */ | ||
613 | if (!!external_mdio_mask && !!internal_mdio_mask) { | ||
614 | dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); | ||
615 | return -EINVAL; | ||
616 | } | ||
617 | |||
618 | if (external_mdio_mask) { | ||
619 | /* Make sure to disable the internal mdio bus in cases | ||
620 | * a dt-overlay and driver reload changed the configuration | ||
621 | */ | ||
622 | |||
623 | qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, | ||
624 | QCA8K_MDIO_MASTER_EN); | ||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | priv->ops.phy_read = qca8k_phy_read; | ||
629 | priv->ops.phy_write = qca8k_phy_write; | ||
630 | return 0; | ||
631 | } | ||
632 | |||
484 | static int | 633 | static int |
485 | qca8k_setup(struct dsa_switch *ds) | 634 | qca8k_setup(struct dsa_switch *ds) |
486 | { | 635 | { |
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds) | |||
502 | if (IS_ERR(priv->regmap)) | 651 | if (IS_ERR(priv->regmap)) |
503 | pr_warn("regmap initialization failed"); | 652 | pr_warn("regmap initialization failed"); |
504 | 653 | ||
654 | ret = qca8k_setup_mdio_bus(priv); | ||
655 | if (ret) | ||
656 | return ret; | ||
657 | |||
505 | /* Initialize CPU port pad mode (xMII type, delays...) */ | 658 | /* Initialize CPU port pad mode (xMII type, delays...) */ |
506 | phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); | 659 | phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); |
507 | if (phy_mode < 0) { | 660 | if (phy_mode < 0) { |
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) | |||
624 | qca8k_port_set_status(priv, port, 1); | 777 | qca8k_port_set_status(priv, port, 1); |
625 | } | 778 | } |
626 | 779 | ||
627 | static int | ||
628 | qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) | ||
629 | { | ||
630 | struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; | ||
631 | |||
632 | return mdiobus_read(priv->bus, phy, regnum); | ||
633 | } | ||
634 | |||
635 | static int | ||
636 | qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) | ||
637 | { | ||
638 | struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; | ||
639 | |||
640 | return mdiobus_write(priv->bus, phy, regnum, val); | ||
641 | } | ||
642 | |||
643 | static void | 780 | static void |
644 | qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) | 781 | qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) |
645 | { | 782 | { |
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { | |||
879 | .setup = qca8k_setup, | 1016 | .setup = qca8k_setup, |
880 | .adjust_link = qca8k_adjust_link, | 1017 | .adjust_link = qca8k_adjust_link, |
881 | .get_strings = qca8k_get_strings, | 1018 | .get_strings = qca8k_get_strings, |
882 | .phy_read = qca8k_phy_read, | ||
883 | .phy_write = qca8k_phy_write, | ||
884 | .get_ethtool_stats = qca8k_get_ethtool_stats, | 1019 | .get_ethtool_stats = qca8k_get_ethtool_stats, |
885 | .get_sset_count = qca8k_get_sset_count, | 1020 | .get_sset_count = qca8k_get_sset_count, |
886 | .get_mac_eee = qca8k_get_mac_eee, | 1021 | .get_mac_eee = qca8k_get_mac_eee, |
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev) | |||
923 | return -ENOMEM; | 1058 | return -ENOMEM; |
924 | 1059 | ||
925 | priv->ds->priv = priv; | 1060 | priv->ds->priv = priv; |
926 | priv->ds->ops = &qca8k_switch_ops; | 1061 | priv->ops = qca8k_switch_ops; |
1062 | priv->ds->ops = &priv->ops; | ||
927 | mutex_init(&priv->reg_mutex); | 1063 | mutex_init(&priv->reg_mutex); |
928 | dev_set_drvdata(&mdiodev->dev, priv); | 1064 | dev_set_drvdata(&mdiodev->dev, priv); |
929 | 1065 | ||
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index d146e54c8a6c..249fd62268e5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h | |||
@@ -49,6 +49,18 @@ | |||
49 | #define QCA8K_MIB_FLUSH BIT(24) | 49 | #define QCA8K_MIB_FLUSH BIT(24) |
50 | #define QCA8K_MIB_CPU_KEEP BIT(20) | 50 | #define QCA8K_MIB_CPU_KEEP BIT(20) |
51 | #define QCA8K_MIB_BUSY BIT(17) | 51 | #define QCA8K_MIB_BUSY BIT(17) |
52 | #define QCA8K_MDIO_MASTER_CTRL 0x3c | ||
53 | #define QCA8K_MDIO_MASTER_BUSY BIT(31) | ||
54 | #define QCA8K_MDIO_MASTER_EN BIT(30) | ||
55 | #define QCA8K_MDIO_MASTER_READ BIT(27) | ||
56 | #define QCA8K_MDIO_MASTER_WRITE 0 | ||
57 | #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) | ||
58 | #define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) | ||
59 | #define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) | ||
60 | #define QCA8K_MDIO_MASTER_DATA(x) (x) | ||
61 | #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) | ||
62 | #define QCA8K_MDIO_MASTER_MAX_PORTS 5 | ||
63 | #define QCA8K_MDIO_MASTER_MAX_REG 32 | ||
52 | #define QCA8K_GOL_MAC_ADDR0 0x60 | 64 | #define QCA8K_GOL_MAC_ADDR0 0x60 |
53 | #define QCA8K_GOL_MAC_ADDR1 0x64 | 65 | #define QCA8K_GOL_MAC_ADDR1 0x64 |
54 | #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) | 66 | #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) |
@@ -169,6 +181,7 @@ struct qca8k_priv { | |||
169 | struct dsa_switch *ds; | 181 | struct dsa_switch *ds; |
170 | struct mutex reg_mutex; | 182 | struct mutex reg_mutex; |
171 | struct device *dev; | 183 | struct device *dev; |
184 | struct dsa_switch_ops ops; | ||
172 | }; | 185 | }; |
173 | 186 | ||
174 | struct qca8k_mib_desc { | 187 | struct qca8k_mib_desc { |
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig new file mode 100644 index 000000000000..757751a89819 --- /dev/null +++ b/drivers/net/dsa/sja1105/Kconfig | |||
@@ -0,0 +1,17 @@ | |||
1 | config NET_DSA_SJA1105 | ||
2 | tristate "NXP SJA1105 Ethernet switch family support" | ||
3 | depends on NET_DSA && SPI | ||
4 | select NET_DSA_TAG_SJA1105 | ||
5 | select PACKING | ||
6 | select CRC32 | ||
7 | help | ||
8 | This is the driver for the NXP SJA1105 automotive Ethernet switch | ||
9 | family. These are 5-port devices and are managed over an SPI | ||
10 | interface. Probing is handled based on OF bindings and so is the | ||
11 | linkage to phylib. The driver supports the following revisions: | ||
12 | - SJA1105E (Gen. 1, No TT-Ethernet) | ||
13 | - SJA1105T (Gen. 1, TT-Ethernet) | ||
14 | - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet) | ||
15 | - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet) | ||
16 | - SJA1105R (Gen. 2, SGMII, No TT-Ethernet) | ||
17 | - SJA1105S (Gen. 2, SGMII, TT-Ethernet) | ||
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile new file mode 100644 index 000000000000..1c2b55fec959 --- /dev/null +++ b/drivers/net/dsa/sja1105/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o | ||
2 | |||
3 | sja1105-objs := \ | ||
4 | sja1105_spi.o \ | ||
5 | sja1105_main.o \ | ||
6 | sja1105_ethtool.o \ | ||
7 | sja1105_clocking.o \ | ||
8 | sja1105_static_config.o \ | ||
9 | sja1105_dynamic_config.o \ | ||
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h new file mode 100644 index 000000000000..b043bfc408f2 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 | ||
2 | * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH | ||
3 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
4 | */ | ||
5 | #ifndef _SJA1105_H | ||
6 | #define _SJA1105_H | ||
7 | |||
8 | #include <linux/dsa/sja1105.h> | ||
9 | #include <net/dsa.h> | ||
10 | #include <linux/mutex.h> | ||
11 | #include "sja1105_static_config.h" | ||
12 | |||
13 | #define SJA1105_NUM_PORTS 5 | ||
14 | #define SJA1105_NUM_TC 8 | ||
15 | #define SJA1105ET_FDB_BIN_SIZE 4 | ||
16 | /* The hardware value is in multiples of 10 ms. | ||
17 | * The passed parameter is in multiples of 1 ms. | ||
18 | */ | ||
19 | #define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10) | ||
20 | |||
21 | /* Keeps the different addresses between E/T and P/Q/R/S */ | ||
22 | struct sja1105_regs { | ||
23 | u64 device_id; | ||
24 | u64 prod_id; | ||
25 | u64 status; | ||
26 | u64 port_control; | ||
27 | u64 rgu; | ||
28 | u64 config; | ||
29 | u64 rmii_pll1; | ||
30 | u64 pad_mii_tx[SJA1105_NUM_PORTS]; | ||
31 | u64 cgu_idiv[SJA1105_NUM_PORTS]; | ||
32 | u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS]; | ||
33 | u64 mii_tx_clk[SJA1105_NUM_PORTS]; | ||
34 | u64 mii_rx_clk[SJA1105_NUM_PORTS]; | ||
35 | u64 mii_ext_tx_clk[SJA1105_NUM_PORTS]; | ||
36 | u64 mii_ext_rx_clk[SJA1105_NUM_PORTS]; | ||
37 | u64 rgmii_tx_clk[SJA1105_NUM_PORTS]; | ||
38 | u64 rmii_ref_clk[SJA1105_NUM_PORTS]; | ||
39 | u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS]; | ||
40 | u64 mac[SJA1105_NUM_PORTS]; | ||
41 | u64 mac_hl1[SJA1105_NUM_PORTS]; | ||
42 | u64 mac_hl2[SJA1105_NUM_PORTS]; | ||
43 | u64 qlevel[SJA1105_NUM_PORTS]; | ||
44 | }; | ||
45 | |||
46 | struct sja1105_info { | ||
47 | u64 device_id; | ||
48 | /* Needed for distinction between P and R, and between Q and S | ||
49 | * (since the parts with/without SGMII share the same | ||
50 | * switch core and device_id) | ||
51 | */ | ||
52 | u64 part_no; | ||
53 | const struct sja1105_dynamic_table_ops *dyn_ops; | ||
54 | const struct sja1105_table_ops *static_ops; | ||
55 | const struct sja1105_regs *regs; | ||
56 | int (*reset_cmd)(const void *ctx, const void *data); | ||
57 | int (*setup_rgmii_delay)(const void *ctx, int port); | ||
58 | const char *name; | ||
59 | }; | ||
60 | |||
61 | struct sja1105_private { | ||
62 | struct sja1105_static_config static_config; | ||
63 | bool rgmii_rx_delay[SJA1105_NUM_PORTS]; | ||
64 | bool rgmii_tx_delay[SJA1105_NUM_PORTS]; | ||
65 | const struct sja1105_info *info; | ||
66 | struct gpio_desc *reset_gpio; | ||
67 | struct spi_device *spidev; | ||
68 | struct dsa_switch *ds; | ||
69 | struct sja1105_port ports[SJA1105_NUM_PORTS]; | ||
70 | /* Serializes transmission of management frames so that | ||
71 | * the switch doesn't confuse them with one another. | ||
72 | */ | ||
73 | struct mutex mgmt_lock; | ||
74 | }; | ||
75 | |||
76 | #include "sja1105_dynamic_config.h" | ||
77 | |||
78 | struct sja1105_spi_message { | ||
79 | u64 access; | ||
80 | u64 read_count; | ||
81 | u64 address; | ||
82 | }; | ||
83 | |||
84 | typedef enum { | ||
85 | SPI_READ = 0, | ||
86 | SPI_WRITE = 1, | ||
87 | } sja1105_spi_rw_mode_t; | ||
88 | |||
89 | /* From sja1105_spi.c */ | ||
90 | int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, | ||
91 | sja1105_spi_rw_mode_t rw, u64 reg_addr, | ||
92 | void *packed_buf, size_t size_bytes); | ||
93 | int sja1105_spi_send_int(const struct sja1105_private *priv, | ||
94 | sja1105_spi_rw_mode_t rw, u64 reg_addr, | ||
95 | u64 *value, u64 size_bytes); | ||
96 | int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv, | ||
97 | sja1105_spi_rw_mode_t rw, u64 base_addr, | ||
98 | void *packed_buf, u64 buf_len); | ||
99 | int sja1105_static_config_upload(struct sja1105_private *priv); | ||
100 | |||
101 | extern struct sja1105_info sja1105e_info; | ||
102 | extern struct sja1105_info sja1105t_info; | ||
103 | extern struct sja1105_info sja1105p_info; | ||
104 | extern struct sja1105_info sja1105q_info; | ||
105 | extern struct sja1105_info sja1105r_info; | ||
106 | extern struct sja1105_info sja1105s_info; | ||
107 | |||
108 | /* From sja1105_clocking.c */ | ||
109 | |||
110 | typedef enum { | ||
111 | XMII_MAC = 0, | ||
112 | XMII_PHY = 1, | ||
113 | } sja1105_mii_role_t; | ||
114 | |||
115 | typedef enum { | ||
116 | XMII_MODE_MII = 0, | ||
117 | XMII_MODE_RMII = 1, | ||
118 | XMII_MODE_RGMII = 2, | ||
119 | } sja1105_phy_interface_t; | ||
120 | |||
121 | typedef enum { | ||
122 | SJA1105_SPEED_10MBPS = 3, | ||
123 | SJA1105_SPEED_100MBPS = 2, | ||
124 | SJA1105_SPEED_1000MBPS = 1, | ||
125 | SJA1105_SPEED_AUTO = 0, | ||
126 | } sja1105_speed_t; | ||
127 | |||
128 | int sja1105_clocking_setup_port(struct sja1105_private *priv, int port); | ||
129 | int sja1105_clocking_setup(struct sja1105_private *priv); | ||
130 | |||
131 | /* From sja1105_ethtool.c */ | ||
132 | void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data); | ||
133 | void sja1105_get_strings(struct dsa_switch *ds, int port, | ||
134 | u32 stringset, u8 *data); | ||
135 | int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset); | ||
136 | |||
137 | /* From sja1105_dynamic_config.c */ | ||
138 | int sja1105_dynamic_config_read(struct sja1105_private *priv, | ||
139 | enum sja1105_blk_idx blk_idx, | ||
140 | int index, void *entry); | ||
141 | int sja1105_dynamic_config_write(struct sja1105_private *priv, | ||
142 | enum sja1105_blk_idx blk_idx, | ||
143 | int index, void *entry, bool keep); | ||
144 | |||
145 | u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid); | ||
146 | |||
147 | /* Common implementations for the static and dynamic configs */ | ||
148 | size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr, | ||
149 | enum packing_op op); | ||
150 | size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr, | ||
151 | enum packing_op op); | ||
152 | size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr, | ||
153 | enum packing_op op); | ||
154 | size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr, | ||
155 | enum packing_op op); | ||
156 | size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr, | ||
157 | enum packing_op op); | ||
158 | |||
159 | #endif | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c new file mode 100644 index 000000000000..94bfe0ee50a8 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c | |||
@@ -0,0 +1,601 @@ | |||
1 | // SPDX-License-Identifier: BSD-3-Clause | ||
2 | /* Copyright (c) 2016-2018, NXP Semiconductors | ||
3 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
4 | */ | ||
5 | #include <linux/packing.h> | ||
6 | #include "sja1105.h" | ||
7 | |||
8 | #define SJA1105_SIZE_CGU_CMD 4 | ||
9 | |||
10 | struct sja1105_cfg_pad_mii_tx { | ||
11 | u64 d32_os; | ||
12 | u64 d32_ipud; | ||
13 | u64 d10_os; | ||
14 | u64 d10_ipud; | ||
15 | u64 ctrl_os; | ||
16 | u64 ctrl_ipud; | ||
17 | u64 clk_os; | ||
18 | u64 clk_ih; | ||
19 | u64 clk_ipud; | ||
20 | }; | ||
21 | |||
22 | /* UM10944 Table 82. | ||
23 | * IDIV_0_C to IDIV_4_C control registers | ||
24 | * (addr. 10000Bh to 10000Fh) | ||
25 | */ | ||
26 | struct sja1105_cgu_idiv { | ||
27 | u64 clksrc; | ||
28 | u64 autoblock; | ||
29 | u64 idiv; | ||
30 | u64 pd; | ||
31 | }; | ||
32 | |||
33 | /* PLL_1_C control register | ||
34 | * | ||
35 | * SJA1105 E/T: UM10944 Table 81 (address 10000Ah) | ||
36 | * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah) | ||
37 | */ | ||
38 | struct sja1105_cgu_pll_ctrl { | ||
39 | u64 pllclksrc; | ||
40 | u64 msel; | ||
41 | u64 autoblock; | ||
42 | u64 psel; | ||
43 | u64 direct; | ||
44 | u64 fbsel; | ||
45 | u64 bypass; | ||
46 | u64 pd; | ||
47 | }; | ||
48 | |||
49 | enum { | ||
50 | CLKSRC_MII0_TX_CLK = 0x00, | ||
51 | CLKSRC_MII0_RX_CLK = 0x01, | ||
52 | CLKSRC_MII1_TX_CLK = 0x02, | ||
53 | CLKSRC_MII1_RX_CLK = 0x03, | ||
54 | CLKSRC_MII2_TX_CLK = 0x04, | ||
55 | CLKSRC_MII2_RX_CLK = 0x05, | ||
56 | CLKSRC_MII3_TX_CLK = 0x06, | ||
57 | CLKSRC_MII3_RX_CLK = 0x07, | ||
58 | CLKSRC_MII4_TX_CLK = 0x08, | ||
59 | CLKSRC_MII4_RX_CLK = 0x09, | ||
60 | CLKSRC_PLL0 = 0x0B, | ||
61 | CLKSRC_PLL1 = 0x0E, | ||
62 | CLKSRC_IDIV0 = 0x11, | ||
63 | CLKSRC_IDIV1 = 0x12, | ||
64 | CLKSRC_IDIV2 = 0x13, | ||
65 | CLKSRC_IDIV3 = 0x14, | ||
66 | CLKSRC_IDIV4 = 0x15, | ||
67 | }; | ||
68 | |||
69 | /* UM10944 Table 83. | ||
70 | * MIIx clock control registers 1 to 30 | ||
71 | * (addresses 100013h to 100035h) | ||
72 | */ | ||
73 | struct sja1105_cgu_mii_ctrl { | ||
74 | u64 clksrc; | ||
75 | u64 autoblock; | ||
76 | u64 pd; | ||
77 | }; | ||
78 | |||
79 | static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv, | ||
80 | enum packing_op op) | ||
81 | { | ||
82 | const int size = 4; | ||
83 | |||
84 | sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op); | ||
85 | sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op); | ||
86 | sja1105_packing(buf, &idiv->idiv, 5, 2, size, op); | ||
87 | sja1105_packing(buf, &idiv->pd, 0, 0, size, op); | ||
88 | } | ||
89 | |||
90 | static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port, | ||
91 | bool enabled, int factor) | ||
92 | { | ||
93 | const struct sja1105_regs *regs = priv->info->regs; | ||
94 | struct device *dev = priv->ds->dev; | ||
95 | struct sja1105_cgu_idiv idiv; | ||
96 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
97 | |||
98 | if (enabled && factor != 1 && factor != 10) { | ||
99 | dev_err(dev, "idiv factor must be 1 or 10\n"); | ||
100 | return -ERANGE; | ||
101 | } | ||
102 | |||
103 | /* Payload for packed_buf */ | ||
104 | idiv.clksrc = 0x0A; /* 25MHz */ | ||
105 | idiv.autoblock = 1; /* Block clk automatically */ | ||
106 | idiv.idiv = factor - 1; /* Divide by 1 or 10 */ | ||
107 | idiv.pd = enabled ? 0 : 1; /* Power down? */ | ||
108 | sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK); | ||
109 | |||
110 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
111 | regs->cgu_idiv[port], packed_buf, | ||
112 | SJA1105_SIZE_CGU_CMD); | ||
113 | } | ||
114 | |||
115 | static void | ||
116 | sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd, | ||
117 | enum packing_op op) | ||
118 | { | ||
119 | const int size = 4; | ||
120 | |||
121 | sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op); | ||
122 | sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op); | ||
123 | sja1105_packing(buf, &cmd->pd, 0, 0, size, op); | ||
124 | } | ||
125 | |||
126 | static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv, | ||
127 | int port, sja1105_mii_role_t role) | ||
128 | { | ||
129 | const struct sja1105_regs *regs = priv->info->regs; | ||
130 | struct sja1105_cgu_mii_ctrl mii_tx_clk; | ||
131 | const int mac_clk_sources[] = { | ||
132 | CLKSRC_MII0_TX_CLK, | ||
133 | CLKSRC_MII1_TX_CLK, | ||
134 | CLKSRC_MII2_TX_CLK, | ||
135 | CLKSRC_MII3_TX_CLK, | ||
136 | CLKSRC_MII4_TX_CLK, | ||
137 | }; | ||
138 | const int phy_clk_sources[] = { | ||
139 | CLKSRC_IDIV0, | ||
140 | CLKSRC_IDIV1, | ||
141 | CLKSRC_IDIV2, | ||
142 | CLKSRC_IDIV3, | ||
143 | CLKSRC_IDIV4, | ||
144 | }; | ||
145 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
146 | int clksrc; | ||
147 | |||
148 | if (role == XMII_MAC) | ||
149 | clksrc = mac_clk_sources[port]; | ||
150 | else | ||
151 | clksrc = phy_clk_sources[port]; | ||
152 | |||
153 | /* Payload for packed_buf */ | ||
154 | mii_tx_clk.clksrc = clksrc; | ||
155 | mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
156 | mii_tx_clk.pd = 0; /* Power Down off => enabled */ | ||
157 | sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK); | ||
158 | |||
159 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
160 | regs->mii_tx_clk[port], packed_buf, | ||
161 | SJA1105_SIZE_CGU_CMD); | ||
162 | } | ||
163 | |||
164 | static int | ||
165 | sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port) | ||
166 | { | ||
167 | const struct sja1105_regs *regs = priv->info->regs; | ||
168 | struct sja1105_cgu_mii_ctrl mii_rx_clk; | ||
169 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
170 | const int clk_sources[] = { | ||
171 | CLKSRC_MII0_RX_CLK, | ||
172 | CLKSRC_MII1_RX_CLK, | ||
173 | CLKSRC_MII2_RX_CLK, | ||
174 | CLKSRC_MII3_RX_CLK, | ||
175 | CLKSRC_MII4_RX_CLK, | ||
176 | }; | ||
177 | |||
178 | /* Payload for packed_buf */ | ||
179 | mii_rx_clk.clksrc = clk_sources[port]; | ||
180 | mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
181 | mii_rx_clk.pd = 0; /* Power Down off => enabled */ | ||
182 | sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK); | ||
183 | |||
184 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
185 | regs->mii_rx_clk[port], packed_buf, | ||
186 | SJA1105_SIZE_CGU_CMD); | ||
187 | } | ||
188 | |||
189 | static int | ||
190 | sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port) | ||
191 | { | ||
192 | const struct sja1105_regs *regs = priv->info->regs; | ||
193 | struct sja1105_cgu_mii_ctrl mii_ext_tx_clk; | ||
194 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
195 | const int clk_sources[] = { | ||
196 | CLKSRC_IDIV0, | ||
197 | CLKSRC_IDIV1, | ||
198 | CLKSRC_IDIV2, | ||
199 | CLKSRC_IDIV3, | ||
200 | CLKSRC_IDIV4, | ||
201 | }; | ||
202 | |||
203 | /* Payload for packed_buf */ | ||
204 | mii_ext_tx_clk.clksrc = clk_sources[port]; | ||
205 | mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
206 | mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */ | ||
207 | sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK); | ||
208 | |||
209 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
210 | regs->mii_ext_tx_clk[port], | ||
211 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
212 | } | ||
213 | |||
214 | static int | ||
215 | sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port) | ||
216 | { | ||
217 | const struct sja1105_regs *regs = priv->info->regs; | ||
218 | struct sja1105_cgu_mii_ctrl mii_ext_rx_clk; | ||
219 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
220 | const int clk_sources[] = { | ||
221 | CLKSRC_IDIV0, | ||
222 | CLKSRC_IDIV1, | ||
223 | CLKSRC_IDIV2, | ||
224 | CLKSRC_IDIV3, | ||
225 | CLKSRC_IDIV4, | ||
226 | }; | ||
227 | |||
228 | /* Payload for packed_buf */ | ||
229 | mii_ext_rx_clk.clksrc = clk_sources[port]; | ||
230 | mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
231 | mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */ | ||
232 | sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK); | ||
233 | |||
234 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
235 | regs->mii_ext_rx_clk[port], | ||
236 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
237 | } | ||
238 | |||
239 | static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port, | ||
240 | sja1105_mii_role_t role) | ||
241 | { | ||
242 | struct device *dev = priv->ds->dev; | ||
243 | int rc; | ||
244 | |||
245 | dev_dbg(dev, "Configuring MII-%s clocking\n", | ||
246 | (role == XMII_MAC) ? "MAC" : "PHY"); | ||
247 | /* If role is MAC, disable IDIV | ||
248 | * If role is PHY, enable IDIV and configure for 1/1 divider | ||
249 | */ | ||
250 | rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1); | ||
251 | if (rc < 0) | ||
252 | return rc; | ||
253 | |||
254 | /* Configure CLKSRC of MII_TX_CLK_n | ||
255 | * * If role is MAC, select TX_CLK_n | ||
256 | * * If role is PHY, select IDIV_n | ||
257 | */ | ||
258 | rc = sja1105_cgu_mii_tx_clk_config(priv, port, role); | ||
259 | if (rc < 0) | ||
260 | return rc; | ||
261 | |||
262 | /* Configure CLKSRC of MII_RX_CLK_n | ||
263 | * Select RX_CLK_n | ||
264 | */ | ||
265 | rc = sja1105_cgu_mii_rx_clk_config(priv, port); | ||
266 | if (rc < 0) | ||
267 | return rc; | ||
268 | |||
269 | if (role == XMII_PHY) { | ||
270 | /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */ | ||
271 | |||
272 | /* Configure CLKSRC of EXT_TX_CLK_n | ||
273 | * Select IDIV_n | ||
274 | */ | ||
275 | rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port); | ||
276 | if (rc < 0) | ||
277 | return rc; | ||
278 | |||
279 | /* Configure CLKSRC of EXT_RX_CLK_n | ||
280 | * Select IDIV_n | ||
281 | */ | ||
282 | rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port); | ||
283 | if (rc < 0) | ||
284 | return rc; | ||
285 | } | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static void | ||
290 | sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd, | ||
291 | enum packing_op op) | ||
292 | { | ||
293 | const int size = 4; | ||
294 | |||
295 | sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op); | ||
296 | sja1105_packing(buf, &cmd->msel, 23, 16, size, op); | ||
297 | sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op); | ||
298 | sja1105_packing(buf, &cmd->psel, 9, 8, size, op); | ||
299 | sja1105_packing(buf, &cmd->direct, 7, 7, size, op); | ||
300 | sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op); | ||
301 | sja1105_packing(buf, &cmd->bypass, 1, 1, size, op); | ||
302 | sja1105_packing(buf, &cmd->pd, 0, 0, size, op); | ||
303 | } | ||
304 | |||
305 | static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv, | ||
306 | int port, sja1105_speed_t speed) | ||
307 | { | ||
308 | const struct sja1105_regs *regs = priv->info->regs; | ||
309 | struct sja1105_cgu_mii_ctrl txc; | ||
310 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
311 | int clksrc; | ||
312 | |||
313 | if (speed == SJA1105_SPEED_1000MBPS) { | ||
314 | clksrc = CLKSRC_PLL0; | ||
315 | } else { | ||
316 | int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, | ||
317 | CLKSRC_IDIV3, CLKSRC_IDIV4}; | ||
318 | clksrc = clk_sources[port]; | ||
319 | } | ||
320 | |||
321 | /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */ | ||
322 | txc.clksrc = clksrc; | ||
323 | /* Autoblock clk while changing clksrc */ | ||
324 | txc.autoblock = 1; | ||
325 | /* Power Down off => enabled */ | ||
326 | txc.pd = 0; | ||
327 | sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK); | ||
328 | |||
329 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
330 | regs->rgmii_tx_clk[port], | ||
331 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
332 | } | ||
333 | |||
334 | /* AGU */ | ||
335 | static void | ||
336 | sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd, | ||
337 | enum packing_op op) | ||
338 | { | ||
339 | const int size = 4; | ||
340 | |||
341 | sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op); | ||
342 | sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op); | ||
343 | sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op); | ||
344 | sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op); | ||
345 | sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op); | ||
346 | sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op); | ||
347 | sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op); | ||
348 | sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op); | ||
349 | sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op); | ||
350 | } | ||
351 | |||
352 | static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv, | ||
353 | int port) | ||
354 | { | ||
355 | const struct sja1105_regs *regs = priv->info->regs; | ||
356 | struct sja1105_cfg_pad_mii_tx pad_mii_tx; | ||
357 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
358 | |||
359 | /* Payload */ | ||
360 | pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */ | ||
361 | /* high noise/high speed */ | ||
362 | pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */ | ||
363 | /* high noise/high speed */ | ||
364 | pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */ | ||
365 | /* plain input (default) */ | ||
366 | pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */ | ||
367 | /* plain input (default) */ | ||
368 | pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */ | ||
369 | pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */ | ||
370 | pad_mii_tx.clk_os = 3; /* TX_CLK output stage */ | ||
371 | pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */ | ||
372 | pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */ | ||
373 | sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK); | ||
374 | |||
375 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
376 | regs->rgmii_pad_mii_tx[port], | ||
377 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
378 | } | ||
379 | |||
380 | static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port) | ||
381 | { | ||
382 | struct device *dev = priv->ds->dev; | ||
383 | struct sja1105_mac_config_entry *mac; | ||
384 | sja1105_speed_t speed; | ||
385 | int rc; | ||
386 | |||
387 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
388 | speed = mac[port].speed; | ||
389 | |||
390 | dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n", | ||
391 | port, speed); | ||
392 | |||
393 | switch (speed) { | ||
394 | case SJA1105_SPEED_1000MBPS: | ||
395 | /* 1000Mbps, IDIV disabled (125 MHz) */ | ||
396 | rc = sja1105_cgu_idiv_config(priv, port, false, 1); | ||
397 | break; | ||
398 | case SJA1105_SPEED_100MBPS: | ||
399 | /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */ | ||
400 | rc = sja1105_cgu_idiv_config(priv, port, true, 1); | ||
401 | break; | ||
402 | case SJA1105_SPEED_10MBPS: | ||
403 | /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */ | ||
404 | rc = sja1105_cgu_idiv_config(priv, port, true, 10); | ||
405 | break; | ||
406 | case SJA1105_SPEED_AUTO: | ||
407 | /* Skip CGU configuration if there is no speed available | ||
408 | * (e.g. link is not established yet) | ||
409 | */ | ||
410 | dev_dbg(dev, "Speed not available, skipping CGU config\n"); | ||
411 | return 0; | ||
412 | default: | ||
413 | rc = -EINVAL; | ||
414 | } | ||
415 | |||
416 | if (rc < 0) { | ||
417 | dev_err(dev, "Failed to configure idiv\n"); | ||
418 | return rc; | ||
419 | } | ||
420 | rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed); | ||
421 | if (rc < 0) { | ||
422 | dev_err(dev, "Failed to configure RGMII Tx clock\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | rc = sja1105_rgmii_cfg_pad_tx_config(priv, port); | ||
426 | if (rc < 0) { | ||
427 | dev_err(dev, "Failed to configure Tx pad registers\n"); | ||
428 | return rc; | ||
429 | } | ||
430 | if (!priv->info->setup_rgmii_delay) | ||
431 | return 0; | ||
432 | |||
433 | return priv->info->setup_rgmii_delay(priv, port); | ||
434 | } | ||
435 | |||
436 | static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv, | ||
437 | int port) | ||
438 | { | ||
439 | const struct sja1105_regs *regs = priv->info->regs; | ||
440 | struct sja1105_cgu_mii_ctrl ref_clk; | ||
441 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
442 | const int clk_sources[] = { | ||
443 | CLKSRC_MII0_TX_CLK, | ||
444 | CLKSRC_MII1_TX_CLK, | ||
445 | CLKSRC_MII2_TX_CLK, | ||
446 | CLKSRC_MII3_TX_CLK, | ||
447 | CLKSRC_MII4_TX_CLK, | ||
448 | }; | ||
449 | |||
450 | /* Payload for packed_buf */ | ||
451 | ref_clk.clksrc = clk_sources[port]; | ||
452 | ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
453 | ref_clk.pd = 0; /* Power Down off => enabled */ | ||
454 | sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK); | ||
455 | |||
456 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
457 | regs->rmii_ref_clk[port], | ||
458 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
459 | } | ||
460 | |||
461 | static int | ||
462 | sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port) | ||
463 | { | ||
464 | const struct sja1105_regs *regs = priv->info->regs; | ||
465 | struct sja1105_cgu_mii_ctrl ext_tx_clk; | ||
466 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
467 | |||
468 | /* Payload for packed_buf */ | ||
469 | ext_tx_clk.clksrc = CLKSRC_PLL1; | ||
470 | ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ | ||
471 | ext_tx_clk.pd = 0; /* Power Down off => enabled */ | ||
472 | sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK); | ||
473 | |||
474 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, | ||
475 | regs->rmii_ext_tx_clk[port], | ||
476 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
477 | } | ||
478 | |||
479 | static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv) | ||
480 | { | ||
481 | const struct sja1105_regs *regs = priv->info->regs; | ||
482 | u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; | ||
483 | struct sja1105_cgu_pll_ctrl pll = {0}; | ||
484 | struct device *dev = priv->ds->dev; | ||
485 | int rc; | ||
486 | |||
487 | /* PLL1 must be enabled and output 50 Mhz. | ||
488 | * This is done by writing first 0x0A010941 to | ||
489 | * the PLL_1_C register and then deasserting | ||
490 | * power down (PD) 0x0A010940. | ||
491 | */ | ||
492 | |||
493 | /* Step 1: PLL1 setup for 50Mhz */ | ||
494 | pll.pllclksrc = 0xA; | ||
495 | pll.msel = 0x1; | ||
496 | pll.autoblock = 0x1; | ||
497 | pll.psel = 0x1; | ||
498 | pll.direct = 0x0; | ||
499 | pll.fbsel = 0x1; | ||
500 | pll.bypass = 0x0; | ||
501 | pll.pd = 0x1; | ||
502 | |||
503 | sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); | ||
504 | rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1, | ||
505 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
506 | if (rc < 0) { | ||
507 | dev_err(dev, "failed to configure PLL1 for 50MHz\n"); | ||
508 | return rc; | ||
509 | } | ||
510 | |||
511 | /* Step 2: Enable PLL1 */ | ||
512 | pll.pd = 0x0; | ||
513 | |||
514 | sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); | ||
515 | rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1, | ||
516 | packed_buf, SJA1105_SIZE_CGU_CMD); | ||
517 | if (rc < 0) { | ||
518 | dev_err(dev, "failed to enable PLL1\n"); | ||
519 | return rc; | ||
520 | } | ||
521 | return rc; | ||
522 | } | ||
523 | |||
524 | static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port, | ||
525 | sja1105_mii_role_t role) | ||
526 | { | ||
527 | struct device *dev = priv->ds->dev; | ||
528 | int rc; | ||
529 | |||
530 | dev_dbg(dev, "Configuring RMII-%s clocking\n", | ||
531 | (role == XMII_MAC) ? "MAC" : "PHY"); | ||
532 | /* AH1601.pdf chapter 2.5.1. Sources */ | ||
533 | if (role == XMII_MAC) { | ||
534 | /* Configure and enable PLL1 for 50Mhz output */ | ||
535 | rc = sja1105_cgu_rmii_pll_config(priv); | ||
536 | if (rc < 0) | ||
537 | return rc; | ||
538 | } | ||
539 | /* Disable IDIV for this port */ | ||
540 | rc = sja1105_cgu_idiv_config(priv, port, false, 1); | ||
541 | if (rc < 0) | ||
542 | return rc; | ||
543 | /* Source to sink mappings */ | ||
544 | rc = sja1105_cgu_rmii_ref_clk_config(priv, port); | ||
545 | if (rc < 0) | ||
546 | return rc; | ||
547 | if (role == XMII_MAC) { | ||
548 | rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port); | ||
549 | if (rc < 0) | ||
550 | return rc; | ||
551 | } | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | int sja1105_clocking_setup_port(struct sja1105_private *priv, int port) | ||
556 | { | ||
557 | struct sja1105_xmii_params_entry *mii; | ||
558 | struct device *dev = priv->ds->dev; | ||
559 | sja1105_phy_interface_t phy_mode; | ||
560 | sja1105_mii_role_t role; | ||
561 | int rc; | ||
562 | |||
563 | mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; | ||
564 | |||
565 | /* RGMII etc */ | ||
566 | phy_mode = mii->xmii_mode[port]; | ||
567 | /* MAC or PHY, for applicable types (not RGMII) */ | ||
568 | role = mii->phy_mac[port]; | ||
569 | |||
570 | switch (phy_mode) { | ||
571 | case XMII_MODE_MII: | ||
572 | rc = sja1105_mii_clocking_setup(priv, port, role); | ||
573 | break; | ||
574 | case XMII_MODE_RMII: | ||
575 | rc = sja1105_rmii_clocking_setup(priv, port, role); | ||
576 | break; | ||
577 | case XMII_MODE_RGMII: | ||
578 | rc = sja1105_rgmii_clocking_setup(priv, port); | ||
579 | break; | ||
580 | default: | ||
581 | dev_err(dev, "Invalid interface mode specified: %d\n", | ||
582 | phy_mode); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | if (rc) | ||
586 | dev_err(dev, "Clocking setup for port %d failed: %d\n", | ||
587 | port, rc); | ||
588 | return rc; | ||
589 | } | ||
590 | |||
591 | int sja1105_clocking_setup(struct sja1105_private *priv) | ||
592 | { | ||
593 | int port, rc; | ||
594 | |||
595 | for (port = 0; port < SJA1105_NUM_PORTS; port++) { | ||
596 | rc = sja1105_clocking_setup_port(priv, port); | ||
597 | if (rc < 0) | ||
598 | return rc; | ||
599 | } | ||
600 | return 0; | ||
601 | } | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c new file mode 100644 index 000000000000..e73ab28bf632 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c | |||
@@ -0,0 +1,532 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
3 | */ | ||
4 | #include "sja1105.h" | ||
5 | |||
6 | #define SJA1105_SIZE_DYN_CMD 4 | ||
7 | |||
8 | #define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \ | ||
9 | SJA1105_SIZE_DYN_CMD | ||
10 | |||
11 | #define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \ | ||
12 | (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY) | ||
13 | |||
14 | #define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \ | ||
15 | (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY) | ||
16 | |||
17 | #define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \ | ||
18 | (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY) | ||
19 | |||
20 | #define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \ | ||
21 | (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY) | ||
22 | |||
23 | #define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \ | ||
24 | (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY) | ||
25 | |||
26 | #define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \ | ||
27 | (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY) | ||
28 | |||
29 | #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \ | ||
30 | SJA1105_SIZE_DYN_CMD | ||
31 | |||
32 | #define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \ | ||
33 | SJA1105_SIZE_DYN_CMD | ||
34 | |||
35 | #define SJA1105_MAX_DYN_CMD_SIZE \ | ||
36 | SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD | ||
37 | |||
38 | static void | ||
39 | sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
40 | enum packing_op op) | ||
41 | { | ||
42 | u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; | ||
43 | const int size = SJA1105_SIZE_DYN_CMD; | ||
44 | |||
45 | sja1105_packing(p, &cmd->valid, 31, 31, size, op); | ||
46 | sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op); | ||
47 | sja1105_packing(p, &cmd->errors, 29, 29, size, op); | ||
48 | sja1105_packing(p, &cmd->valident, 27, 27, size, op); | ||
49 | /* Hack - The hardware takes the 'index' field within | ||
50 | * struct sja1105_l2_lookup_entry as the index on which this command | ||
51 | * will operate. However it will ignore everything else, so 'index' | ||
52 | * is logically part of command but physically part of entry. | ||
53 | * Populate the 'index' entry field from within the command callback, | ||
54 | * such that our API doesn't need to ask for a full-blown entry | ||
55 | * structure when e.g. a delete is requested. | ||
56 | */ | ||
57 | sja1105_packing(buf, &cmd->index, 29, 20, | ||
58 | SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op); | ||
59 | /* TODO hostcmd */ | ||
60 | } | ||
61 | |||
62 | static void | ||
63 | sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
64 | enum packing_op op) | ||
65 | { | ||
66 | u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY; | ||
67 | const int size = SJA1105_SIZE_DYN_CMD; | ||
68 | |||
69 | sja1105_packing(p, &cmd->valid, 31, 31, size, op); | ||
70 | sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op); | ||
71 | sja1105_packing(p, &cmd->errors, 29, 29, size, op); | ||
72 | sja1105_packing(p, &cmd->valident, 27, 27, size, op); | ||
73 | /* Hack - see comments above. */ | ||
74 | sja1105_packing(buf, &cmd->index, 29, 20, | ||
75 | SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op); | ||
76 | } | ||
77 | |||
78 | static void | ||
79 | sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
80 | enum packing_op op) | ||
81 | { | ||
82 | u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY; | ||
83 | u64 mgmtroute = 1; | ||
84 | |||
85 | sja1105et_l2_lookup_cmd_packing(buf, cmd, op); | ||
86 | if (op == PACK) | ||
87 | sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD); | ||
88 | } | ||
89 | |||
90 | static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr, | ||
91 | enum packing_op op) | ||
92 | { | ||
93 | struct sja1105_mgmt_entry *entry = entry_ptr; | ||
94 | const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY; | ||
95 | |||
96 | /* UM10944: To specify if a PTP egress timestamp shall be captured on | ||
97 | * each port upon transmission of the frame, the LSB of VLANID in the | ||
98 | * ENTRY field provided by the host must be set. | ||
99 | * Bit 1 of VLANID then specifies the register where the timestamp for | ||
100 | * this port is stored in. | ||
101 | */ | ||
102 | sja1105_packing(buf, &entry->tsreg, 85, 85, size, op); | ||
103 | sja1105_packing(buf, &entry->takets, 84, 84, size, op); | ||
104 | sja1105_packing(buf, &entry->macaddr, 83, 36, size, op); | ||
105 | sja1105_packing(buf, &entry->destports, 35, 31, size, op); | ||
106 | sja1105_packing(buf, &entry->enfport, 30, 30, size, op); | ||
107 | return size; | ||
108 | } | ||
109 | |||
110 | /* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29, | ||
111 | * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap | ||
112 | * between entry (0x2d, 0x2e) and command (0x30). | ||
113 | */ | ||
114 | static void | ||
115 | sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
116 | enum packing_op op) | ||
117 | { | ||
118 | u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4; | ||
119 | const int size = SJA1105_SIZE_DYN_CMD; | ||
120 | |||
121 | sja1105_packing(p, &cmd->valid, 31, 31, size, op); | ||
122 | sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op); | ||
123 | sja1105_packing(p, &cmd->valident, 27, 27, size, op); | ||
124 | /* Hack - see comments above, applied for 'vlanid' field of | ||
125 | * struct sja1105_vlan_lookup_entry. | ||
126 | */ | ||
127 | sja1105_packing(buf, &cmd->index, 38, 27, | ||
128 | SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op); | ||
129 | } | ||
130 | |||
131 | static void | ||
132 | sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
133 | enum packing_op op) | ||
134 | { | ||
135 | u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY; | ||
136 | const int size = SJA1105_SIZE_DYN_CMD; | ||
137 | |||
138 | sja1105_packing(p, &cmd->valid, 31, 31, size, op); | ||
139 | sja1105_packing(p, &cmd->errors, 30, 30, size, op); | ||
140 | sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op); | ||
141 | sja1105_packing(p, &cmd->index, 4, 0, size, op); | ||
142 | } | ||
143 | |||
144 | static void | ||
145 | sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
146 | enum packing_op op) | ||
147 | { | ||
148 | const int size = SJA1105_SIZE_DYN_CMD; | ||
149 | /* Yup, user manual definitions are reversed */ | ||
150 | u8 *reg1 = buf + 4; | ||
151 | |||
152 | sja1105_packing(reg1, &cmd->valid, 31, 31, size, op); | ||
153 | sja1105_packing(reg1, &cmd->index, 26, 24, size, op); | ||
154 | } | ||
155 | |||
156 | static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr, | ||
157 | enum packing_op op) | ||
158 | { | ||
159 | const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY; | ||
160 | struct sja1105_mac_config_entry *entry = entry_ptr; | ||
161 | /* Yup, user manual definitions are reversed */ | ||
162 | u8 *reg1 = buf + 4; | ||
163 | u8 *reg2 = buf; | ||
164 | |||
165 | sja1105_packing(reg1, &entry->speed, 30, 29, size, op); | ||
166 | sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op); | ||
167 | sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op); | ||
168 | sja1105_packing(reg1, &entry->retag, 21, 21, size, op); | ||
169 | sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op); | ||
170 | sja1105_packing(reg1, &entry->egress, 19, 19, size, op); | ||
171 | sja1105_packing(reg1, &entry->ingress, 18, 18, size, op); | ||
172 | sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op); | ||
173 | sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op); | ||
174 | sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op); | ||
175 | sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op); | ||
176 | sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op); | ||
177 | sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op); | ||
178 | /* MAC configuration table entries which can't be reconfigured: | ||
179 | * top, base, enabled, ifg, maxage, drpnona664 | ||
180 | */ | ||
181 | /* Bogus return value, not used anywhere */ | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static void | ||
186 | sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
187 | enum packing_op op) | ||
188 | { | ||
189 | const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY; | ||
190 | u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY; | ||
191 | |||
192 | sja1105_packing(p, &cmd->valid, 31, 31, size, op); | ||
193 | sja1105_packing(p, &cmd->errors, 30, 30, size, op); | ||
194 | sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op); | ||
195 | sja1105_packing(p, &cmd->index, 2, 0, size, op); | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
200 | enum packing_op op) | ||
201 | { | ||
202 | sja1105_packing(buf, &cmd->valid, 31, 31, | ||
203 | SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op); | ||
204 | } | ||
205 | |||
206 | static size_t | ||
207 | sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, | ||
208 | enum packing_op op) | ||
209 | { | ||
210 | struct sja1105_l2_lookup_params_entry *entry = entry_ptr; | ||
211 | |||
212 | sja1105_packing(buf, &entry->poly, 7, 0, | ||
213 | SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op); | ||
214 | /* Bogus return value, not used anywhere */ | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static void | ||
219 | sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | ||
220 | enum packing_op op) | ||
221 | { | ||
222 | const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD; | ||
223 | |||
224 | sja1105_packing(buf, &cmd->valid, 31, 31, size, op); | ||
225 | sja1105_packing(buf, &cmd->errors, 30, 30, size, op); | ||
226 | } | ||
227 | |||
228 | static size_t | ||
229 | sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, | ||
230 | enum packing_op op) | ||
231 | { | ||
232 | struct sja1105_general_params_entry *entry = entry_ptr; | ||
233 | const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD; | ||
234 | |||
235 | sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op); | ||
236 | /* Bogus return value, not used anywhere */ | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | #define OP_READ BIT(0) | ||
241 | #define OP_WRITE BIT(1) | ||
242 | #define OP_DEL BIT(2) | ||
243 | |||
244 | /* SJA1105E/T: First generation */ | ||
245 | struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { | ||
246 | [BLK_IDX_L2_LOOKUP] = { | ||
247 | .entry_packing = sja1105et_l2_lookup_entry_packing, | ||
248 | .cmd_packing = sja1105et_l2_lookup_cmd_packing, | ||
249 | .access = (OP_READ | OP_WRITE | OP_DEL), | ||
250 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
251 | .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD, | ||
252 | .addr = 0x20, | ||
253 | }, | ||
254 | [BLK_IDX_MGMT_ROUTE] = { | ||
255 | .entry_packing = sja1105et_mgmt_route_entry_packing, | ||
256 | .cmd_packing = sja1105et_mgmt_route_cmd_packing, | ||
257 | .access = (OP_READ | OP_WRITE), | ||
258 | .max_entry_count = SJA1105_NUM_PORTS, | ||
259 | .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD, | ||
260 | .addr = 0x20, | ||
261 | }, | ||
262 | [BLK_IDX_L2_POLICING] = {0}, | ||
263 | [BLK_IDX_VLAN_LOOKUP] = { | ||
264 | .entry_packing = sja1105_vlan_lookup_entry_packing, | ||
265 | .cmd_packing = sja1105_vlan_lookup_cmd_packing, | ||
266 | .access = (OP_WRITE | OP_DEL), | ||
267 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
268 | .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD, | ||
269 | .addr = 0x27, | ||
270 | }, | ||
271 | [BLK_IDX_L2_FORWARDING] = { | ||
272 | .entry_packing = sja1105_l2_forwarding_entry_packing, | ||
273 | .cmd_packing = sja1105_l2_forwarding_cmd_packing, | ||
274 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
275 | .access = OP_WRITE, | ||
276 | .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD, | ||
277 | .addr = 0x24, | ||
278 | }, | ||
279 | [BLK_IDX_MAC_CONFIG] = { | ||
280 | .entry_packing = sja1105et_mac_config_entry_packing, | ||
281 | .cmd_packing = sja1105et_mac_config_cmd_packing, | ||
282 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
283 | .access = OP_WRITE, | ||
284 | .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD, | ||
285 | .addr = 0x36, | ||
286 | }, | ||
287 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
288 | .entry_packing = sja1105et_l2_lookup_params_entry_packing, | ||
289 | .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, | ||
290 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
291 | .access = OP_WRITE, | ||
292 | .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, | ||
293 | .addr = 0x38, | ||
294 | }, | ||
295 | [BLK_IDX_L2_FORWARDING_PARAMS] = {0}, | ||
296 | [BLK_IDX_GENERAL_PARAMS] = { | ||
297 | .entry_packing = sja1105et_general_params_entry_packing, | ||
298 | .cmd_packing = sja1105et_general_params_cmd_packing, | ||
299 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
300 | .access = OP_WRITE, | ||
301 | .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD, | ||
302 | .addr = 0x34, | ||
303 | }, | ||
304 | [BLK_IDX_XMII_PARAMS] = {0}, | ||
305 | }; | ||
306 | |||
307 | /* SJA1105P/Q/R/S: Second generation: TODO */ | ||
308 | struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { | ||
309 | [BLK_IDX_L2_LOOKUP] = { | ||
310 | .entry_packing = sja1105pqrs_l2_lookup_entry_packing, | ||
311 | .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing, | ||
312 | .access = (OP_READ | OP_WRITE | OP_DEL), | ||
313 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
314 | .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD, | ||
315 | .addr = 0x24, | ||
316 | }, | ||
317 | [BLK_IDX_L2_POLICING] = {0}, | ||
318 | [BLK_IDX_VLAN_LOOKUP] = { | ||
319 | .entry_packing = sja1105_vlan_lookup_entry_packing, | ||
320 | .cmd_packing = sja1105_vlan_lookup_cmd_packing, | ||
321 | .access = (OP_READ | OP_WRITE | OP_DEL), | ||
322 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
323 | .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD, | ||
324 | .addr = 0x2D, | ||
325 | }, | ||
326 | [BLK_IDX_L2_FORWARDING] = { | ||
327 | .entry_packing = sja1105_l2_forwarding_entry_packing, | ||
328 | .cmd_packing = sja1105_l2_forwarding_cmd_packing, | ||
329 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
330 | .access = OP_WRITE, | ||
331 | .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD, | ||
332 | .addr = 0x2A, | ||
333 | }, | ||
334 | [BLK_IDX_MAC_CONFIG] = { | ||
335 | .entry_packing = sja1105pqrs_mac_config_entry_packing, | ||
336 | .cmd_packing = sja1105pqrs_mac_config_cmd_packing, | ||
337 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
338 | .access = (OP_READ | OP_WRITE), | ||
339 | .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD, | ||
340 | .addr = 0x4B, | ||
341 | }, | ||
342 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
343 | .entry_packing = sja1105et_l2_lookup_params_entry_packing, | ||
344 | .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, | ||
345 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
346 | .access = (OP_READ | OP_WRITE), | ||
347 | .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, | ||
348 | .addr = 0x38, | ||
349 | }, | ||
350 | [BLK_IDX_L2_FORWARDING_PARAMS] = {0}, | ||
351 | [BLK_IDX_GENERAL_PARAMS] = { | ||
352 | .entry_packing = sja1105et_general_params_entry_packing, | ||
353 | .cmd_packing = sja1105et_general_params_cmd_packing, | ||
354 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
355 | .access = OP_WRITE, | ||
356 | .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD, | ||
357 | .addr = 0x34, | ||
358 | }, | ||
359 | [BLK_IDX_XMII_PARAMS] = {0}, | ||
360 | }; | ||
361 | |||
362 | int sja1105_dynamic_config_read(struct sja1105_private *priv, | ||
363 | enum sja1105_blk_idx blk_idx, | ||
364 | int index, void *entry) | ||
365 | { | ||
366 | const struct sja1105_dynamic_table_ops *ops; | ||
367 | struct sja1105_dyn_cmd cmd = {0}; | ||
368 | /* SPI payload buffer */ | ||
369 | u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0}; | ||
370 | int retries = 3; | ||
371 | int rc; | ||
372 | |||
373 | if (blk_idx >= BLK_IDX_MAX_DYN) | ||
374 | return -ERANGE; | ||
375 | |||
376 | ops = &priv->info->dyn_ops[blk_idx]; | ||
377 | |||
378 | if (index >= ops->max_entry_count) | ||
379 | return -ERANGE; | ||
380 | if (!(ops->access & OP_READ)) | ||
381 | return -EOPNOTSUPP; | ||
382 | if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE) | ||
383 | return -ERANGE; | ||
384 | if (!ops->cmd_packing) | ||
385 | return -EOPNOTSUPP; | ||
386 | if (!ops->entry_packing) | ||
387 | return -EOPNOTSUPP; | ||
388 | |||
389 | cmd.valid = true; /* Trigger action on table entry */ | ||
390 | cmd.rdwrset = SPI_READ; /* Action is read */ | ||
391 | cmd.index = index; | ||
392 | ops->cmd_packing(packed_buf, &cmd, PACK); | ||
393 | |||
394 | /* Send SPI write operation: read config table entry */ | ||
395 | rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr, | ||
396 | packed_buf, ops->packed_size); | ||
397 | if (rc < 0) | ||
398 | return rc; | ||
399 | |||
400 | /* Loop until we have confirmation that hardware has finished | ||
401 | * processing the command and has cleared the VALID field | ||
402 | */ | ||
403 | do { | ||
404 | memset(packed_buf, 0, ops->packed_size); | ||
405 | |||
406 | /* Retrieve the read operation's result */ | ||
407 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr, | ||
408 | packed_buf, ops->packed_size); | ||
409 | if (rc < 0) | ||
410 | return rc; | ||
411 | |||
412 | cmd = (struct sja1105_dyn_cmd) {0}; | ||
413 | ops->cmd_packing(packed_buf, &cmd, UNPACK); | ||
414 | /* UM10944: [valident] will always be found cleared | ||
415 | * during a read access with MGMTROUTE set. | ||
416 | * So don't error out in that case. | ||
417 | */ | ||
418 | if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE) | ||
419 | return -EINVAL; | ||
420 | cpu_relax(); | ||
421 | } while (cmd.valid && --retries); | ||
422 | |||
423 | if (cmd.valid) | ||
424 | return -ETIMEDOUT; | ||
425 | |||
426 | /* Don't dereference possibly NULL pointer - maybe caller | ||
427 | * only wanted to see whether the entry existed or not. | ||
428 | */ | ||
429 | if (entry) | ||
430 | ops->entry_packing(packed_buf, entry, UNPACK); | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | int sja1105_dynamic_config_write(struct sja1105_private *priv, | ||
435 | enum sja1105_blk_idx blk_idx, | ||
436 | int index, void *entry, bool keep) | ||
437 | { | ||
438 | const struct sja1105_dynamic_table_ops *ops; | ||
439 | struct sja1105_dyn_cmd cmd = {0}; | ||
440 | /* SPI payload buffer */ | ||
441 | u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0}; | ||
442 | int rc; | ||
443 | |||
444 | if (blk_idx >= BLK_IDX_MAX_DYN) | ||
445 | return -ERANGE; | ||
446 | |||
447 | ops = &priv->info->dyn_ops[blk_idx]; | ||
448 | |||
449 | if (index >= ops->max_entry_count) | ||
450 | return -ERANGE; | ||
451 | if (!(ops->access & OP_WRITE)) | ||
452 | return -EOPNOTSUPP; | ||
453 | if (!keep && !(ops->access & OP_DEL)) | ||
454 | return -EOPNOTSUPP; | ||
455 | if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE) | ||
456 | return -ERANGE; | ||
457 | |||
458 | cmd.valident = keep; /* If false, deletes entry */ | ||
459 | cmd.valid = true; /* Trigger action on table entry */ | ||
460 | cmd.rdwrset = SPI_WRITE; /* Action is write */ | ||
461 | cmd.index = index; | ||
462 | |||
463 | if (!ops->cmd_packing) | ||
464 | return -EOPNOTSUPP; | ||
465 | ops->cmd_packing(packed_buf, &cmd, PACK); | ||
466 | |||
467 | if (!ops->entry_packing) | ||
468 | return -EOPNOTSUPP; | ||
469 | /* Don't dereference potentially NULL pointer if just | ||
470 | * deleting a table entry is what was requested. For cases | ||
471 | * where 'index' field is physically part of entry structure, | ||
472 | * and needed here, we deal with that in the cmd_packing callback. | ||
473 | */ | ||
474 | if (keep) | ||
475 | ops->entry_packing(packed_buf, entry, PACK); | ||
476 | |||
477 | /* Send SPI write operation: read config table entry */ | ||
478 | rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr, | ||
479 | packed_buf, ops->packed_size); | ||
480 | if (rc < 0) | ||
481 | return rc; | ||
482 | |||
483 | cmd = (struct sja1105_dyn_cmd) {0}; | ||
484 | ops->cmd_packing(packed_buf, &cmd, UNPACK); | ||
485 | if (cmd.errors) | ||
486 | return -EINVAL; | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly) | ||
492 | { | ||
493 | int i; | ||
494 | |||
495 | for (i = 0; i < 8; i++) { | ||
496 | if ((crc ^ byte) & (1 << 7)) { | ||
497 | crc <<= 1; | ||
498 | crc ^= poly; | ||
499 | } else { | ||
500 | crc <<= 1; | ||
501 | } | ||
502 | byte <<= 1; | ||
503 | } | ||
504 | return crc; | ||
505 | } | ||
506 | |||
507 | /* CRC8 algorithm with non-reversed input, non-reversed output, | ||
508 | * no input xor and no output xor. Code customized for receiving | ||
509 | * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial | ||
510 | * is also received as argument in the Koopman notation that the switch | ||
511 | * hardware stores it in. | ||
512 | */ | ||
513 | u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid) | ||
514 | { | ||
515 | struct sja1105_l2_lookup_params_entry *l2_lookup_params = | ||
516 | priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries; | ||
517 | u64 poly_koopman = l2_lookup_params->poly; | ||
518 | /* Convert polynomial from Koopman to 'normal' notation */ | ||
519 | u8 poly = (u8)(1 + (poly_koopman << 1)); | ||
520 | u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid; | ||
521 | u64 input = (vlanid << 48) | ether_addr_to_u64(addr); | ||
522 | u8 crc = 0; /* seed */ | ||
523 | int i; | ||
524 | |||
525 | /* Mask the eight bytes starting from MSB one at a time */ | ||
526 | for (i = 56; i >= 0; i -= 8) { | ||
527 | u8 byte = (input & (0xffull << i)) >> i; | ||
528 | |||
529 | crc = sja1105_crc8_add(crc, byte, poly); | ||
530 | } | ||
531 | return crc; | ||
532 | } | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h new file mode 100644 index 000000000000..77be59546a55 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 | ||
2 | * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> | ||
3 | */ | ||
4 | #ifndef _SJA1105_DYNAMIC_CONFIG_H | ||
5 | #define _SJA1105_DYNAMIC_CONFIG_H | ||
6 | |||
7 | #include "sja1105.h" | ||
8 | #include <linux/packing.h> | ||
9 | |||
10 | struct sja1105_dyn_cmd { | ||
11 | u64 valid; | ||
12 | u64 rdwrset; | ||
13 | u64 errors; | ||
14 | u64 valident; | ||
15 | u64 index; | ||
16 | }; | ||
17 | |||
18 | struct sja1105_dynamic_table_ops { | ||
19 | /* This returns size_t just to keep same prototype as the | ||
20 | * static config ops, of which we are reusing some functions. | ||
21 | */ | ||
22 | size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op); | ||
23 | void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd, | ||
24 | enum packing_op op); | ||
25 | size_t max_entry_count; | ||
26 | size_t packed_size; | ||
27 | u64 addr; | ||
28 | u8 access; | ||
29 | }; | ||
30 | |||
31 | struct sja1105_mgmt_entry { | ||
32 | u64 tsreg; | ||
33 | u64 takets; | ||
34 | u64 macaddr; | ||
35 | u64 destports; | ||
36 | u64 enfport; | ||
37 | u64 index; | ||
38 | }; | ||
39 | |||
40 | extern struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN]; | ||
41 | extern struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN]; | ||
42 | |||
43 | #endif | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c new file mode 100644 index 000000000000..ab581a28cd41 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c | |||
@@ -0,0 +1,419 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
3 | */ | ||
4 | #include "sja1105.h" | ||
5 | |||
6 | #define SJA1105_SIZE_MAC_AREA (0x02 * 4) | ||
7 | #define SJA1105_SIZE_HL1_AREA (0x10 * 4) | ||
8 | #define SJA1105_SIZE_HL2_AREA (0x4 * 4) | ||
9 | #define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */ | ||
10 | |||
11 | struct sja1105_port_status_mac { | ||
12 | u64 n_runt; | ||
13 | u64 n_soferr; | ||
14 | u64 n_alignerr; | ||
15 | u64 n_miierr; | ||
16 | u64 typeerr; | ||
17 | u64 sizeerr; | ||
18 | u64 tctimeout; | ||
19 | u64 priorerr; | ||
20 | u64 nomaster; | ||
21 | u64 memov; | ||
22 | u64 memerr; | ||
23 | u64 invtyp; | ||
24 | u64 intcyov; | ||
25 | u64 domerr; | ||
26 | u64 pcfbagdrop; | ||
27 | u64 spcprior; | ||
28 | u64 ageprior; | ||
29 | u64 portdrop; | ||
30 | u64 lendrop; | ||
31 | u64 bagdrop; | ||
32 | u64 policeerr; | ||
33 | u64 drpnona664err; | ||
34 | u64 spcerr; | ||
35 | u64 agedrp; | ||
36 | }; | ||
37 | |||
38 | struct sja1105_port_status_hl1 { | ||
39 | u64 n_n664err; | ||
40 | u64 n_vlanerr; | ||
41 | u64 n_unreleased; | ||
42 | u64 n_sizeerr; | ||
43 | u64 n_crcerr; | ||
44 | u64 n_vlnotfound; | ||
45 | u64 n_ctpolerr; | ||
46 | u64 n_polerr; | ||
47 | u64 n_rxfrmsh; | ||
48 | u64 n_rxfrm; | ||
49 | u64 n_rxbytesh; | ||
50 | u64 n_rxbyte; | ||
51 | u64 n_txfrmsh; | ||
52 | u64 n_txfrm; | ||
53 | u64 n_txbytesh; | ||
54 | u64 n_txbyte; | ||
55 | }; | ||
56 | |||
57 | struct sja1105_port_status_hl2 { | ||
58 | u64 n_qfull; | ||
59 | u64 n_part_drop; | ||
60 | u64 n_egr_disabled; | ||
61 | u64 n_not_reach; | ||
62 | u64 qlevel_hwm[8]; /* Only for P/Q/R/S */ | ||
63 | u64 qlevel[8]; /* Only for P/Q/R/S */ | ||
64 | }; | ||
65 | |||
66 | struct sja1105_port_status { | ||
67 | struct sja1105_port_status_mac mac; | ||
68 | struct sja1105_port_status_hl1 hl1; | ||
69 | struct sja1105_port_status_hl2 hl2; | ||
70 | }; | ||
71 | |||
72 | static void | ||
73 | sja1105_port_status_mac_unpack(void *buf, | ||
74 | struct sja1105_port_status_mac *status) | ||
75 | { | ||
76 | /* Make pointer arithmetic work on 4 bytes */ | ||
77 | u32 *p = buf; | ||
78 | |||
79 | sja1105_unpack(p + 0x0, &status->n_runt, 31, 24, 4); | ||
80 | sja1105_unpack(p + 0x0, &status->n_soferr, 23, 16, 4); | ||
81 | sja1105_unpack(p + 0x0, &status->n_alignerr, 15, 8, 4); | ||
82 | sja1105_unpack(p + 0x0, &status->n_miierr, 7, 0, 4); | ||
83 | sja1105_unpack(p + 0x1, &status->typeerr, 27, 27, 4); | ||
84 | sja1105_unpack(p + 0x1, &status->sizeerr, 26, 26, 4); | ||
85 | sja1105_unpack(p + 0x1, &status->tctimeout, 25, 25, 4); | ||
86 | sja1105_unpack(p + 0x1, &status->priorerr, 24, 24, 4); | ||
87 | sja1105_unpack(p + 0x1, &status->nomaster, 23, 23, 4); | ||
88 | sja1105_unpack(p + 0x1, &status->memov, 22, 22, 4); | ||
89 | sja1105_unpack(p + 0x1, &status->memerr, 21, 21, 4); | ||
90 | sja1105_unpack(p + 0x1, &status->invtyp, 19, 19, 4); | ||
91 | sja1105_unpack(p + 0x1, &status->intcyov, 18, 18, 4); | ||
92 | sja1105_unpack(p + 0x1, &status->domerr, 17, 17, 4); | ||
93 | sja1105_unpack(p + 0x1, &status->pcfbagdrop, 16, 16, 4); | ||
94 | sja1105_unpack(p + 0x1, &status->spcprior, 15, 12, 4); | ||
95 | sja1105_unpack(p + 0x1, &status->ageprior, 11, 8, 4); | ||
96 | sja1105_unpack(p + 0x1, &status->portdrop, 6, 6, 4); | ||
97 | sja1105_unpack(p + 0x1, &status->lendrop, 5, 5, 4); | ||
98 | sja1105_unpack(p + 0x1, &status->bagdrop, 4, 4, 4); | ||
99 | sja1105_unpack(p + 0x1, &status->policeerr, 3, 3, 4); | ||
100 | sja1105_unpack(p + 0x1, &status->drpnona664err, 2, 2, 4); | ||
101 | sja1105_unpack(p + 0x1, &status->spcerr, 1, 1, 4); | ||
102 | sja1105_unpack(p + 0x1, &status->agedrp, 0, 0, 4); | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | sja1105_port_status_hl1_unpack(void *buf, | ||
107 | struct sja1105_port_status_hl1 *status) | ||
108 | { | ||
109 | /* Make pointer arithmetic work on 4 bytes */ | ||
110 | u32 *p = buf; | ||
111 | |||
112 | sja1105_unpack(p + 0xF, &status->n_n664err, 31, 0, 4); | ||
113 | sja1105_unpack(p + 0xE, &status->n_vlanerr, 31, 0, 4); | ||
114 | sja1105_unpack(p + 0xD, &status->n_unreleased, 31, 0, 4); | ||
115 | sja1105_unpack(p + 0xC, &status->n_sizeerr, 31, 0, 4); | ||
116 | sja1105_unpack(p + 0xB, &status->n_crcerr, 31, 0, 4); | ||
117 | sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31, 0, 4); | ||
118 | sja1105_unpack(p + 0x9, &status->n_ctpolerr, 31, 0, 4); | ||
119 | sja1105_unpack(p + 0x8, &status->n_polerr, 31, 0, 4); | ||
120 | sja1105_unpack(p + 0x7, &status->n_rxfrmsh, 31, 0, 4); | ||
121 | sja1105_unpack(p + 0x6, &status->n_rxfrm, 31, 0, 4); | ||
122 | sja1105_unpack(p + 0x5, &status->n_rxbytesh, 31, 0, 4); | ||
123 | sja1105_unpack(p + 0x4, &status->n_rxbyte, 31, 0, 4); | ||
124 | sja1105_unpack(p + 0x3, &status->n_txfrmsh, 31, 0, 4); | ||
125 | sja1105_unpack(p + 0x2, &status->n_txfrm, 31, 0, 4); | ||
126 | sja1105_unpack(p + 0x1, &status->n_txbytesh, 31, 0, 4); | ||
127 | sja1105_unpack(p + 0x0, &status->n_txbyte, 31, 0, 4); | ||
128 | status->n_rxfrm += status->n_rxfrmsh << 32; | ||
129 | status->n_rxbyte += status->n_rxbytesh << 32; | ||
130 | status->n_txfrm += status->n_txfrmsh << 32; | ||
131 | status->n_txbyte += status->n_txbytesh << 32; | ||
132 | } | ||
133 | |||
134 | static void | ||
135 | sja1105_port_status_hl2_unpack(void *buf, | ||
136 | struct sja1105_port_status_hl2 *status) | ||
137 | { | ||
138 | /* Make pointer arithmetic work on 4 bytes */ | ||
139 | u32 *p = buf; | ||
140 | |||
141 | sja1105_unpack(p + 0x3, &status->n_qfull, 31, 0, 4); | ||
142 | sja1105_unpack(p + 0x2, &status->n_part_drop, 31, 0, 4); | ||
143 | sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31, 0, 4); | ||
144 | sja1105_unpack(p + 0x0, &status->n_not_reach, 31, 0, 4); | ||
145 | } | ||
146 | |||
147 | static void | ||
148 | sja1105pqrs_port_status_qlevel_unpack(void *buf, | ||
149 | struct sja1105_port_status_hl2 *status) | ||
150 | { | ||
151 | /* Make pointer arithmetic work on 4 bytes */ | ||
152 | u32 *p = buf; | ||
153 | int i; | ||
154 | |||
155 | for (i = 0; i < 8; i++) { | ||
156 | sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4); | ||
157 | sja1105_unpack(p + i, &status->qlevel[i], 8, 0, 4); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static int sja1105_port_status_get_mac(struct sja1105_private *priv, | ||
162 | struct sja1105_port_status_mac *status, | ||
163 | int port) | ||
164 | { | ||
165 | const struct sja1105_regs *regs = priv->info->regs; | ||
166 | u8 packed_buf[SJA1105_SIZE_MAC_AREA] = {0}; | ||
167 | int rc; | ||
168 | |||
169 | /* MAC area */ | ||
170 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port], | ||
171 | packed_buf, SJA1105_SIZE_MAC_AREA); | ||
172 | if (rc < 0) | ||
173 | return rc; | ||
174 | |||
175 | sja1105_port_status_mac_unpack(packed_buf, status); | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int sja1105_port_status_get_hl1(struct sja1105_private *priv, | ||
181 | struct sja1105_port_status_hl1 *status, | ||
182 | int port) | ||
183 | { | ||
184 | const struct sja1105_regs *regs = priv->info->regs; | ||
185 | u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0}; | ||
186 | int rc; | ||
187 | |||
188 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port], | ||
189 | packed_buf, SJA1105_SIZE_HL1_AREA); | ||
190 | if (rc < 0) | ||
191 | return rc; | ||
192 | |||
193 | sja1105_port_status_hl1_unpack(packed_buf, status); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int sja1105_port_status_get_hl2(struct sja1105_private *priv, | ||
199 | struct sja1105_port_status_hl2 *status, | ||
200 | int port) | ||
201 | { | ||
202 | const struct sja1105_regs *regs = priv->info->regs; | ||
203 | u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0}; | ||
204 | int rc; | ||
205 | |||
206 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port], | ||
207 | packed_buf, SJA1105_SIZE_HL2_AREA); | ||
208 | if (rc < 0) | ||
209 | return rc; | ||
210 | |||
211 | sja1105_port_status_hl2_unpack(packed_buf, status); | ||
212 | |||
213 | /* Code below is strictly P/Q/R/S specific. */ | ||
214 | if (priv->info->device_id == SJA1105E_DEVICE_ID || | ||
215 | priv->info->device_id == SJA1105T_DEVICE_ID) | ||
216 | return 0; | ||
217 | |||
218 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port], | ||
219 | packed_buf, SJA1105_SIZE_QLEVEL_AREA); | ||
220 | if (rc < 0) | ||
221 | return rc; | ||
222 | |||
223 | sja1105pqrs_port_status_qlevel_unpack(packed_buf, status); | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static int sja1105_port_status_get(struct sja1105_private *priv, | ||
229 | struct sja1105_port_status *status, | ||
230 | int port) | ||
231 | { | ||
232 | int rc; | ||
233 | |||
234 | rc = sja1105_port_status_get_mac(priv, &status->mac, port); | ||
235 | if (rc < 0) | ||
236 | return rc; | ||
237 | rc = sja1105_port_status_get_hl1(priv, &status->hl1, port); | ||
238 | if (rc < 0) | ||
239 | return rc; | ||
240 | rc = sja1105_port_status_get_hl2(priv, &status->hl2, port); | ||
241 | if (rc < 0) | ||
242 | return rc; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static char sja1105_port_stats[][ETH_GSTRING_LEN] = { | ||
248 | /* MAC-Level Diagnostic Counters */ | ||
249 | "n_runt", | ||
250 | "n_soferr", | ||
251 | "n_alignerr", | ||
252 | "n_miierr", | ||
253 | /* MAC-Level Diagnostic Flags */ | ||
254 | "typeerr", | ||
255 | "sizeerr", | ||
256 | "tctimeout", | ||
257 | "priorerr", | ||
258 | "nomaster", | ||
259 | "memov", | ||
260 | "memerr", | ||
261 | "invtyp", | ||
262 | "intcyov", | ||
263 | "domerr", | ||
264 | "pcfbagdrop", | ||
265 | "spcprior", | ||
266 | "ageprior", | ||
267 | "portdrop", | ||
268 | "lendrop", | ||
269 | "bagdrop", | ||
270 | "policeerr", | ||
271 | "drpnona664err", | ||
272 | "spcerr", | ||
273 | "agedrp", | ||
274 | /* High-Level Diagnostic Counters */ | ||
275 | "n_n664err", | ||
276 | "n_vlanerr", | ||
277 | "n_unreleased", | ||
278 | "n_sizeerr", | ||
279 | "n_crcerr", | ||
280 | "n_vlnotfound", | ||
281 | "n_ctpolerr", | ||
282 | "n_polerr", | ||
283 | "n_rxfrm", | ||
284 | "n_rxbyte", | ||
285 | "n_txfrm", | ||
286 | "n_txbyte", | ||
287 | "n_qfull", | ||
288 | "n_part_drop", | ||
289 | "n_egr_disabled", | ||
290 | "n_not_reach", | ||
291 | }; | ||
292 | |||
293 | static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = { | ||
294 | /* Queue Levels */ | ||
295 | "qlevel_hwm_0", | ||
296 | "qlevel_hwm_1", | ||
297 | "qlevel_hwm_2", | ||
298 | "qlevel_hwm_3", | ||
299 | "qlevel_hwm_4", | ||
300 | "qlevel_hwm_5", | ||
301 | "qlevel_hwm_6", | ||
302 | "qlevel_hwm_7", | ||
303 | "qlevel_0", | ||
304 | "qlevel_1", | ||
305 | "qlevel_2", | ||
306 | "qlevel_3", | ||
307 | "qlevel_4", | ||
308 | "qlevel_5", | ||
309 | "qlevel_6", | ||
310 | "qlevel_7", | ||
311 | }; | ||
312 | |||
313 | void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) | ||
314 | { | ||
315 | struct sja1105_private *priv = ds->priv; | ||
316 | struct sja1105_port_status status; | ||
317 | int rc, i, k = 0; | ||
318 | |||
319 | memset(&status, 0, sizeof(status)); | ||
320 | |||
321 | rc = sja1105_port_status_get(priv, &status, port); | ||
322 | if (rc < 0) { | ||
323 | dev_err(ds->dev, "Failed to read port %d counters: %d\n", | ||
324 | port, rc); | ||
325 | return; | ||
326 | } | ||
327 | memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64)); | ||
328 | data[k++] = status.mac.n_runt; | ||
329 | data[k++] = status.mac.n_soferr; | ||
330 | data[k++] = status.mac.n_alignerr; | ||
331 | data[k++] = status.mac.n_miierr; | ||
332 | data[k++] = status.mac.typeerr; | ||
333 | data[k++] = status.mac.sizeerr; | ||
334 | data[k++] = status.mac.tctimeout; | ||
335 | data[k++] = status.mac.priorerr; | ||
336 | data[k++] = status.mac.nomaster; | ||
337 | data[k++] = status.mac.memov; | ||
338 | data[k++] = status.mac.memerr; | ||
339 | data[k++] = status.mac.invtyp; | ||
340 | data[k++] = status.mac.intcyov; | ||
341 | data[k++] = status.mac.domerr; | ||
342 | data[k++] = status.mac.pcfbagdrop; | ||
343 | data[k++] = status.mac.spcprior; | ||
344 | data[k++] = status.mac.ageprior; | ||
345 | data[k++] = status.mac.portdrop; | ||
346 | data[k++] = status.mac.lendrop; | ||
347 | data[k++] = status.mac.bagdrop; | ||
348 | data[k++] = status.mac.policeerr; | ||
349 | data[k++] = status.mac.drpnona664err; | ||
350 | data[k++] = status.mac.spcerr; | ||
351 | data[k++] = status.mac.agedrp; | ||
352 | data[k++] = status.hl1.n_n664err; | ||
353 | data[k++] = status.hl1.n_vlanerr; | ||
354 | data[k++] = status.hl1.n_unreleased; | ||
355 | data[k++] = status.hl1.n_sizeerr; | ||
356 | data[k++] = status.hl1.n_crcerr; | ||
357 | data[k++] = status.hl1.n_vlnotfound; | ||
358 | data[k++] = status.hl1.n_ctpolerr; | ||
359 | data[k++] = status.hl1.n_polerr; | ||
360 | data[k++] = status.hl1.n_rxfrm; | ||
361 | data[k++] = status.hl1.n_rxbyte; | ||
362 | data[k++] = status.hl1.n_txfrm; | ||
363 | data[k++] = status.hl1.n_txbyte; | ||
364 | data[k++] = status.hl2.n_qfull; | ||
365 | data[k++] = status.hl2.n_part_drop; | ||
366 | data[k++] = status.hl2.n_egr_disabled; | ||
367 | data[k++] = status.hl2.n_not_reach; | ||
368 | |||
369 | if (priv->info->device_id == SJA1105E_DEVICE_ID || | ||
370 | priv->info->device_id == SJA1105T_DEVICE_ID) | ||
371 | return; | ||
372 | |||
373 | memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) * | ||
374 | sizeof(u64)); | ||
375 | for (i = 0; i < 8; i++) { | ||
376 | data[k++] = status.hl2.qlevel_hwm[i]; | ||
377 | data[k++] = status.hl2.qlevel[i]; | ||
378 | } | ||
379 | } | ||
380 | |||
381 | void sja1105_get_strings(struct dsa_switch *ds, int port, | ||
382 | u32 stringset, u8 *data) | ||
383 | { | ||
384 | struct sja1105_private *priv = ds->priv; | ||
385 | u8 *p = data; | ||
386 | int i; | ||
387 | |||
388 | switch (stringset) { | ||
389 | case ETH_SS_STATS: | ||
390 | for (i = 0; i < ARRAY_SIZE(sja1105_port_stats); i++) { | ||
391 | strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN); | ||
392 | p += ETH_GSTRING_LEN; | ||
393 | } | ||
394 | if (priv->info->device_id == SJA1105E_DEVICE_ID || | ||
395 | priv->info->device_id == SJA1105T_DEVICE_ID) | ||
396 | return; | ||
397 | for (i = 0; i < ARRAY_SIZE(sja1105pqrs_extra_port_stats); i++) { | ||
398 | strlcpy(p, sja1105pqrs_extra_port_stats[i], | ||
399 | ETH_GSTRING_LEN); | ||
400 | p += ETH_GSTRING_LEN; | ||
401 | } | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset) | ||
407 | { | ||
408 | int count = ARRAY_SIZE(sja1105_port_stats); | ||
409 | struct sja1105_private *priv = ds->priv; | ||
410 | |||
411 | if (sset != ETH_SS_STATS) | ||
412 | return -EOPNOTSUPP; | ||
413 | |||
414 | if (priv->info->device_id == SJA1105PR_DEVICE_ID || | ||
415 | priv->info->device_id == SJA1105QS_DEVICE_ID) | ||
416 | count += ARRAY_SIZE(sja1105pqrs_extra_port_stats); | ||
417 | |||
418 | return count; | ||
419 | } | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c new file mode 100644 index 000000000000..50ff625c85d6 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_main.c | |||
@@ -0,0 +1,1675 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH | ||
3 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
4 | */ | ||
5 | |||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/delay.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/printk.h> | ||
11 | #include <linux/spi/spi.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/gpio/consumer.h> | ||
14 | #include <linux/phylink.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/of_net.h> | ||
17 | #include <linux/of_mdio.h> | ||
18 | #include <linux/of_device.h> | ||
19 | #include <linux/netdev_features.h> | ||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/if_bridge.h> | ||
22 | #include <linux/if_ether.h> | ||
23 | #include <linux/dsa/8021q.h> | ||
24 | #include "sja1105.h" | ||
25 | |||
26 | static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, | ||
27 | unsigned int startup_delay) | ||
28 | { | ||
29 | gpiod_set_value_cansleep(gpio, 1); | ||
30 | /* Wait for minimum reset pulse length */ | ||
31 | msleep(pulse_len); | ||
32 | gpiod_set_value_cansleep(gpio, 0); | ||
33 | /* Wait until chip is ready after reset */ | ||
34 | msleep(startup_delay); | ||
35 | } | ||
36 | |||
37 | static void | ||
38 | sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd, | ||
39 | int from, int to, bool allow) | ||
40 | { | ||
41 | if (allow) { | ||
42 | l2_fwd[from].bc_domain |= BIT(to); | ||
43 | l2_fwd[from].reach_port |= BIT(to); | ||
44 | l2_fwd[from].fl_domain |= BIT(to); | ||
45 | } else { | ||
46 | l2_fwd[from].bc_domain &= ~BIT(to); | ||
47 | l2_fwd[from].reach_port &= ~BIT(to); | ||
48 | l2_fwd[from].fl_domain &= ~BIT(to); | ||
49 | } | ||
50 | } | ||
51 | |||
52 | /* Structure used to temporarily transport device tree | ||
53 | * settings into sja1105_setup | ||
54 | */ | ||
55 | struct sja1105_dt_port { | ||
56 | phy_interface_t phy_mode; | ||
57 | sja1105_mii_role_t role; | ||
58 | }; | ||
59 | |||
60 | static int sja1105_init_mac_settings(struct sja1105_private *priv) | ||
61 | { | ||
62 | struct sja1105_mac_config_entry default_mac = { | ||
63 | /* Enable all 8 priority queues on egress. | ||
64 | * Every queue i holds top[i] - base[i] frames. | ||
65 | * Sum of top[i] - base[i] is 511 (max hardware limit). | ||
66 | */ | ||
67 | .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF}, | ||
68 | .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0}, | ||
69 | .enabled = {true, true, true, true, true, true, true, true}, | ||
70 | /* Keep standard IFG of 12 bytes on egress. */ | ||
71 | .ifg = 0, | ||
72 | /* Always put the MAC speed in automatic mode, where it can be | ||
73 | * retrieved from the PHY object through phylib and | ||
74 | * sja1105_adjust_port_config. | ||
75 | */ | ||
76 | .speed = SJA1105_SPEED_AUTO, | ||
77 | /* No static correction for 1-step 1588 events */ | ||
78 | .tp_delin = 0, | ||
79 | .tp_delout = 0, | ||
80 | /* Disable aging for critical TTEthernet traffic */ | ||
81 | .maxage = 0xFF, | ||
82 | /* Internal VLAN (pvid) to apply to untagged ingress */ | ||
83 | .vlanprio = 0, | ||
84 | .vlanid = 0, | ||
85 | .ing_mirr = false, | ||
86 | .egr_mirr = false, | ||
87 | /* Don't drop traffic with other EtherType than ETH_P_IP */ | ||
88 | .drpnona664 = false, | ||
89 | /* Don't drop double-tagged traffic */ | ||
90 | .drpdtag = false, | ||
91 | /* Don't drop untagged traffic */ | ||
92 | .drpuntag = false, | ||
93 | /* Don't retag 802.1p (VID 0) traffic with the pvid */ | ||
94 | .retag = false, | ||
95 | /* Disable learning and I/O on user ports by default - | ||
96 | * STP will enable it. | ||
97 | */ | ||
98 | .dyn_learn = false, | ||
99 | .egress = false, | ||
100 | .ingress = false, | ||
101 | }; | ||
102 | struct sja1105_mac_config_entry *mac; | ||
103 | struct sja1105_table *table; | ||
104 | int i; | ||
105 | |||
106 | table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; | ||
107 | |||
108 | /* Discard previous MAC Configuration Table */ | ||
109 | if (table->entry_count) { | ||
110 | kfree(table->entries); | ||
111 | table->entry_count = 0; | ||
112 | } | ||
113 | |||
114 | table->entries = kcalloc(SJA1105_NUM_PORTS, | ||
115 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
116 | if (!table->entries) | ||
117 | return -ENOMEM; | ||
118 | |||
119 | /* Override table based on phylib DT bindings */ | ||
120 | table->entry_count = SJA1105_NUM_PORTS; | ||
121 | |||
122 | mac = table->entries; | ||
123 | |||
124 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
125 | mac[i] = default_mac; | ||
126 | if (i == dsa_upstream_port(priv->ds, i)) { | ||
127 | /* STP doesn't get called for CPU port, so we need to | ||
128 | * set the I/O parameters statically. | ||
129 | */ | ||
130 | mac[i].dyn_learn = true; | ||
131 | mac[i].ingress = true; | ||
132 | mac[i].egress = true; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int sja1105_init_mii_settings(struct sja1105_private *priv, | ||
140 | struct sja1105_dt_port *ports) | ||
141 | { | ||
142 | struct device *dev = &priv->spidev->dev; | ||
143 | struct sja1105_xmii_params_entry *mii; | ||
144 | struct sja1105_table *table; | ||
145 | int i; | ||
146 | |||
147 | table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; | ||
148 | |||
149 | /* Discard previous xMII Mode Parameters Table */ | ||
150 | if (table->entry_count) { | ||
151 | kfree(table->entries); | ||
152 | table->entry_count = 0; | ||
153 | } | ||
154 | |||
155 | table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, | ||
156 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
157 | if (!table->entries) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | /* Override table based on phylib DT bindings */ | ||
161 | table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; | ||
162 | |||
163 | mii = table->entries; | ||
164 | |||
165 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
166 | switch (ports[i].phy_mode) { | ||
167 | case PHY_INTERFACE_MODE_MII: | ||
168 | mii->xmii_mode[i] = XMII_MODE_MII; | ||
169 | break; | ||
170 | case PHY_INTERFACE_MODE_RMII: | ||
171 | mii->xmii_mode[i] = XMII_MODE_RMII; | ||
172 | break; | ||
173 | case PHY_INTERFACE_MODE_RGMII: | ||
174 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
175 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
176 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
177 | mii->xmii_mode[i] = XMII_MODE_RGMII; | ||
178 | break; | ||
179 | default: | ||
180 | dev_err(dev, "Unsupported PHY mode %s!\n", | ||
181 | phy_modes(ports[i].phy_mode)); | ||
182 | } | ||
183 | |||
184 | mii->phy_mac[i] = ports[i].role; | ||
185 | } | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int sja1105_init_static_fdb(struct sja1105_private *priv) | ||
190 | { | ||
191 | struct sja1105_table *table; | ||
192 | |||
193 | table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; | ||
194 | |||
195 | /* We only populate the FDB table through dynamic | ||
196 | * L2 Address Lookup entries | ||
197 | */ | ||
198 | if (table->entry_count) { | ||
199 | kfree(table->entries); | ||
200 | table->entry_count = 0; | ||
201 | } | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) | ||
206 | { | ||
207 | struct sja1105_table *table; | ||
208 | struct sja1105_l2_lookup_params_entry default_l2_lookup_params = { | ||
209 | /* Learned FDB entries are forgotten after 300 seconds */ | ||
210 | .maxage = SJA1105_AGEING_TIME_MS(300000), | ||
211 | /* All entries within a FDB bin are available for learning */ | ||
212 | .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE, | ||
213 | /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ | ||
214 | .poly = 0x97, | ||
215 | /* This selects between Independent VLAN Learning (IVL) and | ||
216 | * Shared VLAN Learning (SVL) | ||
217 | */ | ||
218 | .shared_learn = false, | ||
219 | /* Don't discard management traffic based on ENFPORT - | ||
220 | * we don't perform SMAC port enforcement anyway, so | ||
221 | * what we are setting here doesn't matter. | ||
222 | */ | ||
223 | .no_enf_hostprt = false, | ||
224 | /* Don't learn SMAC for mac_fltres1 and mac_fltres0. | ||
225 | * Maybe correlate with no_linklocal_learn from bridge driver? | ||
226 | */ | ||
227 | .no_mgmt_learn = true, | ||
228 | }; | ||
229 | |||
230 | table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; | ||
231 | |||
232 | if (table->entry_count) { | ||
233 | kfree(table->entries); | ||
234 | table->entry_count = 0; | ||
235 | } | ||
236 | |||
237 | table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
238 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
239 | if (!table->entries) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; | ||
243 | |||
244 | /* This table only has a single entry */ | ||
245 | ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = | ||
246 | default_l2_lookup_params; | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static int sja1105_init_static_vlan(struct sja1105_private *priv) | ||
252 | { | ||
253 | struct sja1105_table *table; | ||
254 | struct sja1105_vlan_lookup_entry pvid = { | ||
255 | .ving_mirr = 0, | ||
256 | .vegr_mirr = 0, | ||
257 | .vmemb_port = 0, | ||
258 | .vlan_bc = 0, | ||
259 | .tag_port = 0, | ||
260 | .vlanid = 0, | ||
261 | }; | ||
262 | int i; | ||
263 | |||
264 | table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; | ||
265 | |||
266 | /* The static VLAN table will only contain the initial pvid of 0. | ||
267 | * All other VLANs are to be configured through dynamic entries, | ||
268 | * and kept in the static configuration table as backing memory. | ||
269 | * The pvid of 0 is sufficient to pass traffic while the ports are | ||
270 | * standalone and when vlan_filtering is disabled. When filtering | ||
271 | * gets enabled, the switchdev core sets up the VLAN ID 1 and sets | ||
272 | * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge | ||
273 | * vlan' even when vlan_filtering is off, but it has no effect. | ||
274 | */ | ||
275 | if (table->entry_count) { | ||
276 | kfree(table->entries); | ||
277 | table->entry_count = 0; | ||
278 | } | ||
279 | |||
280 | table->entries = kcalloc(1, table->ops->unpacked_entry_size, | ||
281 | GFP_KERNEL); | ||
282 | if (!table->entries) | ||
283 | return -ENOMEM; | ||
284 | |||
285 | table->entry_count = 1; | ||
286 | |||
287 | /* VLAN ID 0: all DT-defined ports are members; no restrictions on | ||
288 | * forwarding; always transmit priority-tagged frames as untagged. | ||
289 | */ | ||
290 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
291 | pvid.vmemb_port |= BIT(i); | ||
292 | pvid.vlan_bc |= BIT(i); | ||
293 | pvid.tag_port &= ~BIT(i); | ||
294 | } | ||
295 | |||
296 | ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static int sja1105_init_l2_forwarding(struct sja1105_private *priv) | ||
301 | { | ||
302 | struct sja1105_l2_forwarding_entry *l2fwd; | ||
303 | struct sja1105_table *table; | ||
304 | int i, j; | ||
305 | |||
306 | table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; | ||
307 | |||
308 | if (table->entry_count) { | ||
309 | kfree(table->entries); | ||
310 | table->entry_count = 0; | ||
311 | } | ||
312 | |||
313 | table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, | ||
314 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
315 | if (!table->entries) | ||
316 | return -ENOMEM; | ||
317 | |||
318 | table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; | ||
319 | |||
320 | l2fwd = table->entries; | ||
321 | |||
322 | /* First 5 entries define the forwarding rules */ | ||
323 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
324 | unsigned int upstream = dsa_upstream_port(priv->ds, i); | ||
325 | |||
326 | for (j = 0; j < SJA1105_NUM_TC; j++) | ||
327 | l2fwd[i].vlan_pmap[j] = j; | ||
328 | |||
329 | if (i == upstream) | ||
330 | continue; | ||
331 | |||
332 | sja1105_port_allow_traffic(l2fwd, i, upstream, true); | ||
333 | sja1105_port_allow_traffic(l2fwd, upstream, i, true); | ||
334 | } | ||
335 | /* Next 8 entries define VLAN PCP mapping from ingress to egress. | ||
336 | * Create a one-to-one mapping. | ||
337 | */ | ||
338 | for (i = 0; i < SJA1105_NUM_TC; i++) | ||
339 | for (j = 0; j < SJA1105_NUM_PORTS; j++) | ||
340 | l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i; | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) | ||
346 | { | ||
347 | struct sja1105_l2_forwarding_params_entry default_l2fwd_params = { | ||
348 | /* Disallow dynamic reconfiguration of vlan_pmap */ | ||
349 | .max_dynp = 0, | ||
350 | /* Use a single memory partition for all ingress queues */ | ||
351 | .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 }, | ||
352 | }; | ||
353 | struct sja1105_table *table; | ||
354 | |||
355 | table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; | ||
356 | |||
357 | if (table->entry_count) { | ||
358 | kfree(table->entries); | ||
359 | table->entry_count = 0; | ||
360 | } | ||
361 | |||
362 | table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
363 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
364 | if (!table->entries) | ||
365 | return -ENOMEM; | ||
366 | |||
367 | table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; | ||
368 | |||
369 | /* This table only has a single entry */ | ||
370 | ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = | ||
371 | default_l2fwd_params; | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static int sja1105_init_general_params(struct sja1105_private *priv) | ||
377 | { | ||
378 | struct sja1105_general_params_entry default_general_params = { | ||
379 | /* Disallow dynamic changing of the mirror port */ | ||
380 | .mirr_ptacu = 0, | ||
381 | .switchid = priv->ds->index, | ||
382 | /* Priority queue for link-local frames trapped to CPU */ | ||
383 | .hostprio = 0, | ||
384 | .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, | ||
385 | .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, | ||
386 | .incl_srcpt1 = true, | ||
387 | .send_meta1 = false, | ||
388 | .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, | ||
389 | .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, | ||
390 | .incl_srcpt0 = true, | ||
391 | .send_meta0 = false, | ||
392 | /* The destination for traffic matching mac_fltres1 and | ||
393 | * mac_fltres0 on all ports except host_port. Such traffic | ||
394 | * receieved on host_port itself would be dropped, except | ||
395 | * by installing a temporary 'management route' | ||
396 | */ | ||
397 | .host_port = dsa_upstream_port(priv->ds, 0), | ||
398 | /* Same as host port */ | ||
399 | .mirr_port = dsa_upstream_port(priv->ds, 0), | ||
400 | /* Link-local traffic received on casc_port will be forwarded | ||
401 | * to host_port without embedding the source port and device ID | ||
402 | * info in the destination MAC address (presumably because it | ||
403 | * is a cascaded port and a downstream SJA switch already did | ||
404 | * that). Default to an invalid port (to disable the feature) | ||
405 | * and overwrite this if we find any DSA (cascaded) ports. | ||
406 | */ | ||
407 | .casc_port = SJA1105_NUM_PORTS, | ||
408 | /* No TTEthernet */ | ||
409 | .vllupformat = 0, | ||
410 | .vlmarker = 0, | ||
411 | .vlmask = 0, | ||
412 | /* Only update correctionField for 1-step PTP (L2 transport) */ | ||
413 | .ignore2stf = 0, | ||
414 | /* Forcefully disable VLAN filtering by telling | ||
415 | * the switch that VLAN has a different EtherType. | ||
416 | */ | ||
417 | .tpid = ETH_P_SJA1105, | ||
418 | .tpid2 = ETH_P_SJA1105, | ||
419 | }; | ||
420 | struct sja1105_table *table; | ||
421 | int i, k = 0; | ||
422 | |||
423 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
424 | if (dsa_is_dsa_port(priv->ds, i)) | ||
425 | default_general_params.casc_port = i; | ||
426 | else if (dsa_is_user_port(priv->ds, i)) | ||
427 | priv->ports[i].mgmt_slot = k++; | ||
428 | } | ||
429 | |||
430 | table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; | ||
431 | |||
432 | if (table->entry_count) { | ||
433 | kfree(table->entries); | ||
434 | table->entry_count = 0; | ||
435 | } | ||
436 | |||
437 | table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
438 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
439 | if (!table->entries) | ||
440 | return -ENOMEM; | ||
441 | |||
442 | table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; | ||
443 | |||
444 | /* This table only has a single entry */ | ||
445 | ((struct sja1105_general_params_entry *)table->entries)[0] = | ||
446 | default_general_params; | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) | ||
452 | |||
453 | static inline void | ||
454 | sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, | ||
455 | int index) | ||
456 | { | ||
457 | policing[index].sharindx = index; | ||
458 | policing[index].smax = 65535; /* Burst size in bytes */ | ||
459 | policing[index].rate = SJA1105_RATE_MBPS(1000); | ||
460 | policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | ||
461 | policing[index].partition = 0; | ||
462 | } | ||
463 | |||
464 | static int sja1105_init_l2_policing(struct sja1105_private *priv) | ||
465 | { | ||
466 | struct sja1105_l2_policing_entry *policing; | ||
467 | struct sja1105_table *table; | ||
468 | int i, j, k; | ||
469 | |||
470 | table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; | ||
471 | |||
472 | /* Discard previous L2 Policing Table */ | ||
473 | if (table->entry_count) { | ||
474 | kfree(table->entries); | ||
475 | table->entry_count = 0; | ||
476 | } | ||
477 | |||
478 | table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, | ||
479 | table->ops->unpacked_entry_size, GFP_KERNEL); | ||
480 | if (!table->entries) | ||
481 | return -ENOMEM; | ||
482 | |||
483 | table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; | ||
484 | |||
485 | policing = table->entries; | ||
486 | |||
487 | /* k sweeps through all unicast policers (0-39). | ||
488 | * bcast sweeps through policers 40-44. | ||
489 | */ | ||
490 | for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) { | ||
491 | int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i; | ||
492 | |||
493 | for (j = 0; j < SJA1105_NUM_TC; j++, k++) | ||
494 | sja1105_setup_policer(policing, k); | ||
495 | |||
496 | /* Set up this port's policer for broadcast traffic */ | ||
497 | sja1105_setup_policer(policing, bcast); | ||
498 | } | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int sja1105_static_config_load(struct sja1105_private *priv, | ||
503 | struct sja1105_dt_port *ports) | ||
504 | { | ||
505 | int rc; | ||
506 | |||
507 | sja1105_static_config_free(&priv->static_config); | ||
508 | rc = sja1105_static_config_init(&priv->static_config, | ||
509 | priv->info->static_ops, | ||
510 | priv->info->device_id); | ||
511 | if (rc) | ||
512 | return rc; | ||
513 | |||
514 | /* Build static configuration */ | ||
515 | rc = sja1105_init_mac_settings(priv); | ||
516 | if (rc < 0) | ||
517 | return rc; | ||
518 | rc = sja1105_init_mii_settings(priv, ports); | ||
519 | if (rc < 0) | ||
520 | return rc; | ||
521 | rc = sja1105_init_static_fdb(priv); | ||
522 | if (rc < 0) | ||
523 | return rc; | ||
524 | rc = sja1105_init_static_vlan(priv); | ||
525 | if (rc < 0) | ||
526 | return rc; | ||
527 | rc = sja1105_init_l2_lookup_params(priv); | ||
528 | if (rc < 0) | ||
529 | return rc; | ||
530 | rc = sja1105_init_l2_forwarding(priv); | ||
531 | if (rc < 0) | ||
532 | return rc; | ||
533 | rc = sja1105_init_l2_forwarding_params(priv); | ||
534 | if (rc < 0) | ||
535 | return rc; | ||
536 | rc = sja1105_init_l2_policing(priv); | ||
537 | if (rc < 0) | ||
538 | return rc; | ||
539 | rc = sja1105_init_general_params(priv); | ||
540 | if (rc < 0) | ||
541 | return rc; | ||
542 | |||
543 | /* Send initial configuration to hardware via SPI */ | ||
544 | return sja1105_static_config_upload(priv); | ||
545 | } | ||
546 | |||
547 | static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, | ||
548 | const struct sja1105_dt_port *ports) | ||
549 | { | ||
550 | int i; | ||
551 | |||
552 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
553 | if (ports->role == XMII_MAC) | ||
554 | continue; | ||
555 | |||
556 | if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || | ||
557 | ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) | ||
558 | priv->rgmii_rx_delay[i] = true; | ||
559 | |||
560 | if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || | ||
561 | ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) | ||
562 | priv->rgmii_tx_delay[i] = true; | ||
563 | |||
564 | if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && | ||
565 | !priv->info->setup_rgmii_delay) | ||
566 | return -EINVAL; | ||
567 | } | ||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | static int sja1105_parse_ports_node(struct sja1105_private *priv, | ||
572 | struct sja1105_dt_port *ports, | ||
573 | struct device_node *ports_node) | ||
574 | { | ||
575 | struct device *dev = &priv->spidev->dev; | ||
576 | struct device_node *child; | ||
577 | |||
578 | for_each_child_of_node(ports_node, child) { | ||
579 | struct device_node *phy_node; | ||
580 | int phy_mode; | ||
581 | u32 index; | ||
582 | |||
583 | /* Get switch port number from DT */ | ||
584 | if (of_property_read_u32(child, "reg", &index) < 0) { | ||
585 | dev_err(dev, "Port number not defined in device tree " | ||
586 | "(property \"reg\")\n"); | ||
587 | return -ENODEV; | ||
588 | } | ||
589 | |||
590 | /* Get PHY mode from DT */ | ||
591 | phy_mode = of_get_phy_mode(child); | ||
592 | if (phy_mode < 0) { | ||
593 | dev_err(dev, "Failed to read phy-mode or " | ||
594 | "phy-interface-type property for port %d\n", | ||
595 | index); | ||
596 | return -ENODEV; | ||
597 | } | ||
598 | ports[index].phy_mode = phy_mode; | ||
599 | |||
600 | phy_node = of_parse_phandle(child, "phy-handle", 0); | ||
601 | if (!phy_node) { | ||
602 | if (!of_phy_is_fixed_link(child)) { | ||
603 | dev_err(dev, "phy-handle or fixed-link " | ||
604 | "properties missing!\n"); | ||
605 | return -ENODEV; | ||
606 | } | ||
607 | /* phy-handle is missing, but fixed-link isn't. | ||
608 | * So it's a fixed link. Default to PHY role. | ||
609 | */ | ||
610 | ports[index].role = XMII_PHY; | ||
611 | } else { | ||
612 | /* phy-handle present => put port in MAC role */ | ||
613 | ports[index].role = XMII_MAC; | ||
614 | of_node_put(phy_node); | ||
615 | } | ||
616 | |||
617 | /* The MAC/PHY role can be overridden with explicit bindings */ | ||
618 | if (of_property_read_bool(child, "sja1105,role-mac")) | ||
619 | ports[index].role = XMII_MAC; | ||
620 | else if (of_property_read_bool(child, "sja1105,role-phy")) | ||
621 | ports[index].role = XMII_PHY; | ||
622 | } | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | static int sja1105_parse_dt(struct sja1105_private *priv, | ||
628 | struct sja1105_dt_port *ports) | ||
629 | { | ||
630 | struct device *dev = &priv->spidev->dev; | ||
631 | struct device_node *switch_node = dev->of_node; | ||
632 | struct device_node *ports_node; | ||
633 | int rc; | ||
634 | |||
635 | ports_node = of_get_child_by_name(switch_node, "ports"); | ||
636 | if (!ports_node) { | ||
637 | dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); | ||
638 | return -ENODEV; | ||
639 | } | ||
640 | |||
641 | rc = sja1105_parse_ports_node(priv, ports, ports_node); | ||
642 | of_node_put(ports_node); | ||
643 | |||
644 | return rc; | ||
645 | } | ||
646 | |||
647 | /* Convert back and forth MAC speed from Mbps to SJA1105 encoding */ | ||
648 | static int sja1105_speed[] = { | ||
649 | [SJA1105_SPEED_AUTO] = 0, | ||
650 | [SJA1105_SPEED_10MBPS] = 10, | ||
651 | [SJA1105_SPEED_100MBPS] = 100, | ||
652 | [SJA1105_SPEED_1000MBPS] = 1000, | ||
653 | }; | ||
654 | |||
655 | static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps) | ||
656 | { | ||
657 | int i; | ||
658 | |||
659 | for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++) | ||
660 | if (sja1105_speed[i] == speed_mbps) | ||
661 | return i; | ||
662 | return -EINVAL; | ||
663 | } | ||
664 | |||
665 | /* Set link speed and enable/disable traffic I/O in the MAC configuration | ||
666 | * for a specific port. | ||
667 | * | ||
668 | * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed. | ||
669 | * @enabled: Manage Rx and Tx settings for this port. If false, overrides the | ||
670 | * settings from the STP state, but not persistently (does not | ||
671 | * overwrite the static MAC info for this port). | ||
672 | */ | ||
673 | static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, | ||
674 | int speed_mbps, bool enabled) | ||
675 | { | ||
676 | struct sja1105_mac_config_entry dyn_mac; | ||
677 | struct sja1105_xmii_params_entry *mii; | ||
678 | struct sja1105_mac_config_entry *mac; | ||
679 | struct device *dev = priv->ds->dev; | ||
680 | sja1105_phy_interface_t phy_mode; | ||
681 | sja1105_speed_t speed; | ||
682 | int rc; | ||
683 | |||
684 | mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; | ||
685 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
686 | |||
687 | speed = sja1105_get_speed_cfg(speed_mbps); | ||
688 | if (speed_mbps && speed < 0) { | ||
689 | dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); | ||
690 | return -EINVAL; | ||
691 | } | ||
692 | |||
693 | /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC | ||
694 | * configuration table, since this will be used for the clocking setup, | ||
695 | * and we no longer need to store it in the static config (already told | ||
696 | * hardware we want auto during upload phase). | ||
697 | */ | ||
698 | if (speed_mbps) | ||
699 | mac[port].speed = speed; | ||
700 | else | ||
701 | mac[port].speed = SJA1105_SPEED_AUTO; | ||
702 | |||
703 | /* On P/Q/R/S, one can read from the device via the MAC reconfiguration | ||
704 | * tables. On E/T, MAC reconfig tables are not readable, only writable. | ||
705 | * We have to *know* what the MAC looks like. For the sake of keeping | ||
706 | * the code common, we'll use the static configuration tables as a | ||
707 | * reasonable approximation for both E/T and P/Q/R/S. | ||
708 | */ | ||
709 | dyn_mac = mac[port]; | ||
710 | dyn_mac.ingress = enabled && mac[port].ingress; | ||
711 | dyn_mac.egress = enabled && mac[port].egress; | ||
712 | |||
713 | /* Write to the dynamic reconfiguration tables */ | ||
714 | rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, | ||
715 | port, &dyn_mac, true); | ||
716 | if (rc < 0) { | ||
717 | dev_err(dev, "Failed to write MAC config: %d\n", rc); | ||
718 | return rc; | ||
719 | } | ||
720 | |||
721 | /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at | ||
722 | * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and | ||
723 | * RMII no change of the clock setup is required. Actually, changing | ||
724 | * the clock setup does interrupt the clock signal for a certain time | ||
725 | * which causes trouble for all PHYs relying on this signal. | ||
726 | */ | ||
727 | if (!enabled) | ||
728 | return 0; | ||
729 | |||
730 | phy_mode = mii->xmii_mode[port]; | ||
731 | if (phy_mode != XMII_MODE_RGMII) | ||
732 | return 0; | ||
733 | |||
734 | return sja1105_clocking_setup_port(priv, port); | ||
735 | } | ||
736 | |||
737 | static void sja1105_adjust_link(struct dsa_switch *ds, int port, | ||
738 | struct phy_device *phydev) | ||
739 | { | ||
740 | struct sja1105_private *priv = ds->priv; | ||
741 | |||
742 | if (!phydev->link) | ||
743 | sja1105_adjust_port_config(priv, port, 0, false); | ||
744 | else | ||
745 | sja1105_adjust_port_config(priv, port, phydev->speed, true); | ||
746 | } | ||
747 | |||
748 | static void sja1105_phylink_validate(struct dsa_switch *ds, int port, | ||
749 | unsigned long *supported, | ||
750 | struct phylink_link_state *state) | ||
751 | { | ||
752 | /* Construct a new mask which exhaustively contains all link features | ||
753 | * supported by the MAC, and then apply that (logical AND) to what will | ||
754 | * be sent to the PHY for "marketing". | ||
755 | */ | ||
756 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; | ||
757 | struct sja1105_private *priv = ds->priv; | ||
758 | struct sja1105_xmii_params_entry *mii; | ||
759 | |||
760 | mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; | ||
761 | |||
762 | /* The MAC does not support pause frames, and also doesn't | ||
763 | * support half-duplex traffic modes. | ||
764 | */ | ||
765 | phylink_set(mask, Autoneg); | ||
766 | phylink_set(mask, MII); | ||
767 | phylink_set(mask, 10baseT_Full); | ||
768 | phylink_set(mask, 100baseT_Full); | ||
769 | if (mii->xmii_mode[port] == XMII_MODE_RGMII) | ||
770 | phylink_set(mask, 1000baseT_Full); | ||
771 | |||
772 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
773 | bitmap_and(state->advertising, state->advertising, mask, | ||
774 | __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
775 | } | ||
776 | |||
777 | /* First-generation switches have a 4-way set associative TCAM that | ||
778 | * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of | ||
779 | * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin). | ||
780 | * For the placement of a newly learnt FDB entry, the switch selects the bin | ||
781 | * based on a hash function, and the way within that bin incrementally. | ||
782 | */ | ||
783 | static inline int sja1105et_fdb_index(int bin, int way) | ||
784 | { | ||
785 | return bin * SJA1105ET_FDB_BIN_SIZE + way; | ||
786 | } | ||
787 | |||
788 | static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, | ||
789 | const u8 *addr, u16 vid, | ||
790 | struct sja1105_l2_lookup_entry *match, | ||
791 | int *last_unused) | ||
792 | { | ||
793 | int way; | ||
794 | |||
795 | for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) { | ||
796 | struct sja1105_l2_lookup_entry l2_lookup = {0}; | ||
797 | int index = sja1105et_fdb_index(bin, way); | ||
798 | |||
799 | /* Skip unused entries, optionally marking them | ||
800 | * into the return value | ||
801 | */ | ||
802 | if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, | ||
803 | index, &l2_lookup)) { | ||
804 | if (last_unused) | ||
805 | *last_unused = way; | ||
806 | continue; | ||
807 | } | ||
808 | |||
809 | if (l2_lookup.macaddr == ether_addr_to_u64(addr) && | ||
810 | l2_lookup.vlanid == vid) { | ||
811 | if (match) | ||
812 | *match = l2_lookup; | ||
813 | return way; | ||
814 | } | ||
815 | } | ||
816 | /* Return an invalid entry index if not found */ | ||
817 | return -1; | ||
818 | } | ||
819 | |||
820 | static int sja1105_fdb_add(struct dsa_switch *ds, int port, | ||
821 | const unsigned char *addr, u16 vid) | ||
822 | { | ||
823 | struct sja1105_l2_lookup_entry l2_lookup = {0}; | ||
824 | struct sja1105_private *priv = ds->priv; | ||
825 | struct device *dev = ds->dev; | ||
826 | int last_unused = -1; | ||
827 | int bin, way; | ||
828 | |||
829 | bin = sja1105_fdb_hash(priv, addr, vid); | ||
830 | |||
831 | way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid, | ||
832 | &l2_lookup, &last_unused); | ||
833 | if (way >= 0) { | ||
834 | /* We have an FDB entry. Is our port in the destination | ||
835 | * mask? If yes, we need to do nothing. If not, we need | ||
836 | * to rewrite the entry by adding this port to it. | ||
837 | */ | ||
838 | if (l2_lookup.destports & BIT(port)) | ||
839 | return 0; | ||
840 | l2_lookup.destports |= BIT(port); | ||
841 | } else { | ||
842 | int index = sja1105et_fdb_index(bin, way); | ||
843 | |||
844 | /* We don't have an FDB entry. We construct a new one and | ||
845 | * try to find a place for it within the FDB table. | ||
846 | */ | ||
847 | l2_lookup.macaddr = ether_addr_to_u64(addr); | ||
848 | l2_lookup.destports = BIT(port); | ||
849 | l2_lookup.vlanid = vid; | ||
850 | |||
851 | if (last_unused >= 0) { | ||
852 | way = last_unused; | ||
853 | } else { | ||
854 | /* Bin is full, need to evict somebody. | ||
855 | * Choose victim at random. If you get these messages | ||
856 | * often, you may need to consider changing the | ||
857 | * distribution function: | ||
858 | * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly | ||
859 | */ | ||
860 | get_random_bytes(&way, sizeof(u8)); | ||
861 | way %= SJA1105ET_FDB_BIN_SIZE; | ||
862 | dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n", | ||
863 | bin, addr, way); | ||
864 | /* Evict entry */ | ||
865 | sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, | ||
866 | index, NULL, false); | ||
867 | } | ||
868 | } | ||
869 | l2_lookup.index = sja1105et_fdb_index(bin, way); | ||
870 | |||
871 | return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, | ||
872 | l2_lookup.index, &l2_lookup, | ||
873 | true); | ||
874 | } | ||
875 | |||
876 | static int sja1105_fdb_del(struct dsa_switch *ds, int port, | ||
877 | const unsigned char *addr, u16 vid) | ||
878 | { | ||
879 | struct sja1105_l2_lookup_entry l2_lookup = {0}; | ||
880 | struct sja1105_private *priv = ds->priv; | ||
881 | int index, bin, way; | ||
882 | bool keep; | ||
883 | |||
884 | bin = sja1105_fdb_hash(priv, addr, vid); | ||
885 | way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid, | ||
886 | &l2_lookup, NULL); | ||
887 | if (way < 0) | ||
888 | return 0; | ||
889 | index = sja1105et_fdb_index(bin, way); | ||
890 | |||
891 | /* We have an FDB entry. Is our port in the destination mask? If yes, | ||
892 | * we need to remove it. If the resulting port mask becomes empty, we | ||
893 | * need to completely evict the FDB entry. | ||
894 | * Otherwise we just write it back. | ||
895 | */ | ||
896 | if (l2_lookup.destports & BIT(port)) | ||
897 | l2_lookup.destports &= ~BIT(port); | ||
898 | if (l2_lookup.destports) | ||
899 | keep = true; | ||
900 | else | ||
901 | keep = false; | ||
902 | |||
903 | return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, | ||
904 | index, &l2_lookup, keep); | ||
905 | } | ||
906 | |||
907 | static int sja1105_fdb_dump(struct dsa_switch *ds, int port, | ||
908 | dsa_fdb_dump_cb_t *cb, void *data) | ||
909 | { | ||
910 | struct sja1105_private *priv = ds->priv; | ||
911 | struct device *dev = ds->dev; | ||
912 | int i; | ||
913 | |||
914 | for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { | ||
915 | struct sja1105_l2_lookup_entry l2_lookup = {0}; | ||
916 | u8 macaddr[ETH_ALEN]; | ||
917 | int rc; | ||
918 | |||
919 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, | ||
920 | i, &l2_lookup); | ||
921 | /* No fdb entry at i, not an issue */ | ||
922 | if (rc == -EINVAL) | ||
923 | continue; | ||
924 | if (rc) { | ||
925 | dev_err(dev, "Failed to dump FDB: %d\n", rc); | ||
926 | return rc; | ||
927 | } | ||
928 | |||
929 | /* FDB dump callback is per port. This means we have to | ||
930 | * disregard a valid entry if it's not for this port, even if | ||
931 | * only to revisit it later. This is inefficient because the | ||
932 | * 1024-sized FDB table needs to be traversed 4 times through | ||
933 | * SPI during a 'bridge fdb show' command. | ||
934 | */ | ||
935 | if (!(l2_lookup.destports & BIT(port))) | ||
936 | continue; | ||
937 | u64_to_ether_addr(l2_lookup.macaddr, macaddr); | ||
938 | cb(macaddr, l2_lookup.vlanid, false, data); | ||
939 | } | ||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | /* This callback needs to be present */ | ||
944 | static int sja1105_mdb_prepare(struct dsa_switch *ds, int port, | ||
945 | const struct switchdev_obj_port_mdb *mdb) | ||
946 | { | ||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | static void sja1105_mdb_add(struct dsa_switch *ds, int port, | ||
951 | const struct switchdev_obj_port_mdb *mdb) | ||
952 | { | ||
953 | sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); | ||
954 | } | ||
955 | |||
956 | static int sja1105_mdb_del(struct dsa_switch *ds, int port, | ||
957 | const struct switchdev_obj_port_mdb *mdb) | ||
958 | { | ||
959 | return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); | ||
960 | } | ||
961 | |||
962 | static int sja1105_bridge_member(struct dsa_switch *ds, int port, | ||
963 | struct net_device *br, bool member) | ||
964 | { | ||
965 | struct sja1105_l2_forwarding_entry *l2_fwd; | ||
966 | struct sja1105_private *priv = ds->priv; | ||
967 | int i, rc; | ||
968 | |||
969 | l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; | ||
970 | |||
971 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
972 | /* Add this port to the forwarding matrix of the | ||
973 | * other ports in the same bridge, and viceversa. | ||
974 | */ | ||
975 | if (!dsa_is_user_port(ds, i)) | ||
976 | continue; | ||
977 | /* For the ports already under the bridge, only one thing needs | ||
978 | * to be done, and that is to add this port to their | ||
979 | * reachability domain. So we can perform the SPI write for | ||
980 | * them immediately. However, for this port itself (the one | ||
981 | * that is new to the bridge), we need to add all other ports | ||
982 | * to its reachability domain. So we do that incrementally in | ||
983 | * this loop, and perform the SPI write only at the end, once | ||
984 | * the domain contains all other bridge ports. | ||
985 | */ | ||
986 | if (i == port) | ||
987 | continue; | ||
988 | if (dsa_to_port(ds, i)->bridge_dev != br) | ||
989 | continue; | ||
990 | sja1105_port_allow_traffic(l2_fwd, i, port, member); | ||
991 | sja1105_port_allow_traffic(l2_fwd, port, i, member); | ||
992 | |||
993 | rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, | ||
994 | i, &l2_fwd[i], true); | ||
995 | if (rc < 0) | ||
996 | return rc; | ||
997 | } | ||
998 | |||
999 | return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING, | ||
1000 | port, &l2_fwd[port], true); | ||
1001 | } | ||
1002 | |||
1003 | static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, | ||
1004 | u8 state) | ||
1005 | { | ||
1006 | struct sja1105_private *priv = ds->priv; | ||
1007 | struct sja1105_mac_config_entry *mac; | ||
1008 | |||
1009 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
1010 | |||
1011 | switch (state) { | ||
1012 | case BR_STATE_DISABLED: | ||
1013 | case BR_STATE_BLOCKING: | ||
1014 | /* From UM10944 description of DRPDTAG (why put this there?): | ||
1015 | * "Management traffic flows to the port regardless of the state | ||
1016 | * of the INGRESS flag". So BPDUs are still be allowed to pass. | ||
1017 | * At the moment no difference between DISABLED and BLOCKING. | ||
1018 | */ | ||
1019 | mac[port].ingress = false; | ||
1020 | mac[port].egress = false; | ||
1021 | mac[port].dyn_learn = false; | ||
1022 | break; | ||
1023 | case BR_STATE_LISTENING: | ||
1024 | mac[port].ingress = true; | ||
1025 | mac[port].egress = false; | ||
1026 | mac[port].dyn_learn = false; | ||
1027 | break; | ||
1028 | case BR_STATE_LEARNING: | ||
1029 | mac[port].ingress = true; | ||
1030 | mac[port].egress = false; | ||
1031 | mac[port].dyn_learn = true; | ||
1032 | break; | ||
1033 | case BR_STATE_FORWARDING: | ||
1034 | mac[port].ingress = true; | ||
1035 | mac[port].egress = true; | ||
1036 | mac[port].dyn_learn = true; | ||
1037 | break; | ||
1038 | default: | ||
1039 | dev_err(ds->dev, "invalid STP state: %d\n", state); | ||
1040 | return; | ||
1041 | } | ||
1042 | |||
1043 | sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, | ||
1044 | &mac[port], true); | ||
1045 | } | ||
1046 | |||
1047 | static int sja1105_bridge_join(struct dsa_switch *ds, int port, | ||
1048 | struct net_device *br) | ||
1049 | { | ||
1050 | return sja1105_bridge_member(ds, port, br, true); | ||
1051 | } | ||
1052 | |||
1053 | static void sja1105_bridge_leave(struct dsa_switch *ds, int port, | ||
1054 | struct net_device *br) | ||
1055 | { | ||
1056 | sja1105_bridge_member(ds, port, br, false); | ||
1057 | } | ||
1058 | |||
1059 | static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port) | ||
1060 | { | ||
1061 | struct sja1105_mac_config_entry *mac; | ||
1062 | |||
1063 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
1064 | |||
1065 | if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn) | ||
1066 | return BR_STATE_BLOCKING; | ||
1067 | if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn) | ||
1068 | return BR_STATE_LISTENING; | ||
1069 | if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn) | ||
1070 | return BR_STATE_LEARNING; | ||
1071 | if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn) | ||
1072 | return BR_STATE_FORWARDING; | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | |||
1076 | /* For situations where we need to change a setting at runtime that is only | ||
1077 | * available through the static configuration, resetting the switch in order | ||
1078 | * to upload the new static config is unavoidable. Back up the settings we | ||
1079 | * modify at runtime (currently only MAC) and restore them after uploading, | ||
1080 | * such that this operation is relatively seamless. | ||
1081 | */ | ||
1082 | static int sja1105_static_config_reload(struct sja1105_private *priv) | ||
1083 | { | ||
1084 | struct sja1105_mac_config_entry *mac; | ||
1085 | int speed_mbps[SJA1105_NUM_PORTS]; | ||
1086 | u8 stp_state[SJA1105_NUM_PORTS]; | ||
1087 | int rc, i; | ||
1088 | |||
1089 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
1090 | |||
1091 | /* Back up settings changed by sja1105_adjust_port_config and | ||
1092 | * sja1105_bridge_stp_state_set and restore their defaults. | ||
1093 | */ | ||
1094 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
1095 | speed_mbps[i] = sja1105_speed[mac[i].speed]; | ||
1096 | mac[i].speed = SJA1105_SPEED_AUTO; | ||
1097 | if (i == dsa_upstream_port(priv->ds, i)) { | ||
1098 | mac[i].ingress = true; | ||
1099 | mac[i].egress = true; | ||
1100 | mac[i].dyn_learn = true; | ||
1101 | } else { | ||
1102 | stp_state[i] = sja1105_stp_state_get(priv, i); | ||
1103 | mac[i].ingress = false; | ||
1104 | mac[i].egress = false; | ||
1105 | mac[i].dyn_learn = false; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | /* Reset switch and send updated static configuration */ | ||
1110 | rc = sja1105_static_config_upload(priv); | ||
1111 | if (rc < 0) | ||
1112 | goto out; | ||
1113 | |||
1114 | /* Configure the CGU (PLLs) for MII and RMII PHYs. | ||
1115 | * For these interfaces there is no dynamic configuration | ||
1116 | * needed, since PLLs have same settings at all speeds. | ||
1117 | */ | ||
1118 | rc = sja1105_clocking_setup(priv); | ||
1119 | if (rc < 0) | ||
1120 | goto out; | ||
1121 | |||
1122 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
1123 | bool enabled = (speed_mbps[i] != 0); | ||
1124 | |||
1125 | if (i != dsa_upstream_port(priv->ds, i)) | ||
1126 | sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]); | ||
1127 | |||
1128 | rc = sja1105_adjust_port_config(priv, i, speed_mbps[i], | ||
1129 | enabled); | ||
1130 | if (rc < 0) | ||
1131 | goto out; | ||
1132 | } | ||
1133 | out: | ||
1134 | return rc; | ||
1135 | } | ||
1136 | |||
1137 | /* The TPID setting belongs to the General Parameters table, | ||
1138 | * which can only be partially reconfigured at runtime (and not the TPID). | ||
1139 | * So a switch reset is required. | ||
1140 | */ | ||
1141 | static int sja1105_change_tpid(struct sja1105_private *priv, | ||
1142 | u16 tpid, u16 tpid2) | ||
1143 | { | ||
1144 | struct sja1105_general_params_entry *general_params; | ||
1145 | struct sja1105_table *table; | ||
1146 | |||
1147 | table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; | ||
1148 | general_params = table->entries; | ||
1149 | general_params->tpid = tpid; | ||
1150 | general_params->tpid2 = tpid2; | ||
1151 | return sja1105_static_config_reload(priv); | ||
1152 | } | ||
1153 | |||
1154 | static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) | ||
1155 | { | ||
1156 | struct sja1105_mac_config_entry *mac; | ||
1157 | |||
1158 | mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; | ||
1159 | |||
1160 | mac[port].vlanid = pvid; | ||
1161 | |||
1162 | return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, | ||
1163 | &mac[port], true); | ||
1164 | } | ||
1165 | |||
1166 | static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) | ||
1167 | { | ||
1168 | struct sja1105_vlan_lookup_entry *vlan; | ||
1169 | int count, i; | ||
1170 | |||
1171 | vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; | ||
1172 | count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; | ||
1173 | |||
1174 | for (i = 0; i < count; i++) | ||
1175 | if (vlan[i].vlanid == vid) | ||
1176 | return i; | ||
1177 | |||
1178 | /* Return an invalid entry index if not found */ | ||
1179 | return -1; | ||
1180 | } | ||
1181 | |||
1182 | static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid, | ||
1183 | bool enabled, bool untagged) | ||
1184 | { | ||
1185 | struct sja1105_vlan_lookup_entry *vlan; | ||
1186 | struct sja1105_table *table; | ||
1187 | bool keep = true; | ||
1188 | int match, rc; | ||
1189 | |||
1190 | table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; | ||
1191 | |||
1192 | match = sja1105_is_vlan_configured(priv, vid); | ||
1193 | if (match < 0) { | ||
1194 | /* Can't delete a missing entry. */ | ||
1195 | if (!enabled) | ||
1196 | return 0; | ||
1197 | rc = sja1105_table_resize(table, table->entry_count + 1); | ||
1198 | if (rc) | ||
1199 | return rc; | ||
1200 | match = table->entry_count - 1; | ||
1201 | } | ||
1202 | /* Assign pointer after the resize (it's new memory) */ | ||
1203 | vlan = table->entries; | ||
1204 | vlan[match].vlanid = vid; | ||
1205 | if (enabled) { | ||
1206 | vlan[match].vlan_bc |= BIT(port); | ||
1207 | vlan[match].vmemb_port |= BIT(port); | ||
1208 | } else { | ||
1209 | vlan[match].vlan_bc &= ~BIT(port); | ||
1210 | vlan[match].vmemb_port &= ~BIT(port); | ||
1211 | } | ||
1212 | /* Also unset tag_port if removing this VLAN was requested, | ||
1213 | * just so we don't have a confusing bitmap (no practical purpose). | ||
1214 | */ | ||
1215 | if (untagged || !enabled) | ||
1216 | vlan[match].tag_port &= ~BIT(port); | ||
1217 | else | ||
1218 | vlan[match].tag_port |= BIT(port); | ||
1219 | /* If there's no port left as member of this VLAN, | ||
1220 | * it's time for it to go. | ||
1221 | */ | ||
1222 | if (!vlan[match].vmemb_port) | ||
1223 | keep = false; | ||
1224 | |||
1225 | dev_dbg(priv->ds->dev, | ||
1226 | "%s: port %d, vid %llu, broadcast domain 0x%llx, " | ||
1227 | "port members 0x%llx, tagged ports 0x%llx, keep %d\n", | ||
1228 | __func__, port, vlan[match].vlanid, vlan[match].vlan_bc, | ||
1229 | vlan[match].vmemb_port, vlan[match].tag_port, keep); | ||
1230 | |||
1231 | rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, | ||
1232 | &vlan[match], keep); | ||
1233 | if (rc < 0) | ||
1234 | return rc; | ||
1235 | |||
1236 | if (!keep) | ||
1237 | return sja1105_table_delete_entry(table, match); | ||
1238 | |||
1239 | return 0; | ||
1240 | } | ||
1241 | |||
1242 | static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) | ||
1243 | { | ||
1244 | int rc, i; | ||
1245 | |||
1246 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
1247 | rc = dsa_port_setup_8021q_tagging(ds, i, enabled); | ||
1248 | if (rc < 0) { | ||
1249 | dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n", | ||
1250 | i, rc); | ||
1251 | return rc; | ||
1252 | } | ||
1253 | } | ||
1254 | dev_info(ds->dev, "%s switch tagging\n", | ||
1255 | enabled ? "Enabled" : "Disabled"); | ||
1256 | return 0; | ||
1257 | } | ||
1258 | |||
1259 | static enum dsa_tag_protocol | ||
1260 | sja1105_get_tag_protocol(struct dsa_switch *ds, int port) | ||
1261 | { | ||
1262 | return DSA_TAG_PROTO_SJA1105; | ||
1263 | } | ||
1264 | |||
1265 | /* This callback needs to be present */ | ||
1266 | static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, | ||
1267 | const struct switchdev_obj_port_vlan *vlan) | ||
1268 | { | ||
1269 | return 0; | ||
1270 | } | ||
1271 | |||
1272 | static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) | ||
1273 | { | ||
1274 | struct sja1105_private *priv = ds->priv; | ||
1275 | int rc; | ||
1276 | |||
1277 | if (enabled) | ||
1278 | /* Enable VLAN filtering. */ | ||
1279 | rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD); | ||
1280 | else | ||
1281 | /* Disable VLAN filtering. */ | ||
1282 | rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105); | ||
1283 | if (rc) | ||
1284 | dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); | ||
1285 | |||
1286 | /* Switch port identification based on 802.1Q is only passable | ||
1287 | * if we are not under a vlan_filtering bridge. So make sure | ||
1288 | * the two configurations are mutually exclusive. | ||
1289 | */ | ||
1290 | return sja1105_setup_8021q_tagging(ds, !enabled); | ||
1291 | } | ||
1292 | |||
1293 | static void sja1105_vlan_add(struct dsa_switch *ds, int port, | ||
1294 | const struct switchdev_obj_port_vlan *vlan) | ||
1295 | { | ||
1296 | struct sja1105_private *priv = ds->priv; | ||
1297 | u16 vid; | ||
1298 | int rc; | ||
1299 | |||
1300 | for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { | ||
1301 | rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags & | ||
1302 | BRIDGE_VLAN_INFO_UNTAGGED); | ||
1303 | if (rc < 0) { | ||
1304 | dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", | ||
1305 | vid, port, rc); | ||
1306 | return; | ||
1307 | } | ||
1308 | if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { | ||
1309 | rc = sja1105_pvid_apply(ds->priv, port, vid); | ||
1310 | if (rc < 0) { | ||
1311 | dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n", | ||
1312 | vid, port, rc); | ||
1313 | return; | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | } | ||
1318 | |||
1319 | static int sja1105_vlan_del(struct dsa_switch *ds, int port, | ||
1320 | const struct switchdev_obj_port_vlan *vlan) | ||
1321 | { | ||
1322 | struct sja1105_private *priv = ds->priv; | ||
1323 | u16 vid; | ||
1324 | int rc; | ||
1325 | |||
1326 | for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { | ||
1327 | rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags & | ||
1328 | BRIDGE_VLAN_INFO_UNTAGGED); | ||
1329 | if (rc < 0) { | ||
1330 | dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", | ||
1331 | vid, port, rc); | ||
1332 | return rc; | ||
1333 | } | ||
1334 | } | ||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | /* The programming model for the SJA1105 switch is "all-at-once" via static | ||
1339 | * configuration tables. Some of these can be dynamically modified at runtime, | ||
1340 | * but not the xMII mode parameters table. | ||
1341 | * Furthermode, some PHYs may not have crystals for generating their clocks | ||
1342 | * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's | ||
1343 | * ref_clk pin. So port clocking needs to be initialized early, before | ||
1344 | * connecting to PHYs is attempted, otherwise they won't respond through MDIO. | ||
1345 | * Setting correct PHY link speed does not matter now. | ||
1346 | * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY | ||
1347 | * bindings are not yet parsed by DSA core. We need to parse early so that we | ||
1348 | * can populate the xMII mode parameters table. | ||
1349 | */ | ||
1350 | static int sja1105_setup(struct dsa_switch *ds) | ||
1351 | { | ||
1352 | struct sja1105_dt_port ports[SJA1105_NUM_PORTS]; | ||
1353 | struct sja1105_private *priv = ds->priv; | ||
1354 | int rc; | ||
1355 | |||
1356 | rc = sja1105_parse_dt(priv, ports); | ||
1357 | if (rc < 0) { | ||
1358 | dev_err(ds->dev, "Failed to parse DT: %d\n", rc); | ||
1359 | return rc; | ||
1360 | } | ||
1361 | |||
1362 | /* Error out early if internal delays are required through DT | ||
1363 | * and we can't apply them. | ||
1364 | */ | ||
1365 | rc = sja1105_parse_rgmii_delays(priv, ports); | ||
1366 | if (rc < 0) { | ||
1367 | dev_err(ds->dev, "RGMII delay not supported\n"); | ||
1368 | return rc; | ||
1369 | } | ||
1370 | |||
1371 | /* Create and send configuration down to device */ | ||
1372 | rc = sja1105_static_config_load(priv, ports); | ||
1373 | if (rc < 0) { | ||
1374 | dev_err(ds->dev, "Failed to load static config: %d\n", rc); | ||
1375 | return rc; | ||
1376 | } | ||
1377 | /* Configure the CGU (PHY link modes and speeds) */ | ||
1378 | rc = sja1105_clocking_setup(priv); | ||
1379 | if (rc < 0) { | ||
1380 | dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc); | ||
1381 | return rc; | ||
1382 | } | ||
1383 | /* On SJA1105, VLAN filtering per se is always enabled in hardware. | ||
1384 | * The only thing we can do to disable it is lie about what the 802.1Q | ||
1385 | * EtherType is. | ||
1386 | * So it will still try to apply VLAN filtering, but all ingress | ||
1387 | * traffic (except frames received with EtherType of ETH_P_SJA1105) | ||
1388 | * will be internally tagged with a distorted VLAN header where the | ||
1389 | * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. | ||
1390 | */ | ||
1391 | ds->vlan_filtering_is_global = true; | ||
1392 | |||
1393 | /* The DSA/switchdev model brings up switch ports in standalone mode by | ||
1394 | * default, and that means vlan_filtering is 0 since they're not under | ||
1395 | * a bridge, so it's safe to set up switch tagging at this time. | ||
1396 | */ | ||
1397 | return sja1105_setup_8021q_tagging(ds, true); | ||
1398 | } | ||
1399 | |||
1400 | static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, | ||
1401 | struct sk_buff *skb) | ||
1402 | { | ||
1403 | struct sja1105_mgmt_entry mgmt_route = {0}; | ||
1404 | struct sja1105_private *priv = ds->priv; | ||
1405 | struct ethhdr *hdr; | ||
1406 | int timeout = 10; | ||
1407 | int rc; | ||
1408 | |||
1409 | hdr = eth_hdr(skb); | ||
1410 | |||
1411 | mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest); | ||
1412 | mgmt_route.destports = BIT(port); | ||
1413 | mgmt_route.enfport = 1; | ||
1414 | |||
1415 | rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, | ||
1416 | slot, &mgmt_route, true); | ||
1417 | if (rc < 0) { | ||
1418 | kfree_skb(skb); | ||
1419 | return rc; | ||
1420 | } | ||
1421 | |||
1422 | /* Transfer skb to the host port. */ | ||
1423 | dsa_enqueue_skb(skb, ds->ports[port].slave); | ||
1424 | |||
1425 | /* Wait until the switch has processed the frame */ | ||
1426 | do { | ||
1427 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE, | ||
1428 | slot, &mgmt_route); | ||
1429 | if (rc < 0) { | ||
1430 | dev_err_ratelimited(priv->ds->dev, | ||
1431 | "failed to poll for mgmt route\n"); | ||
1432 | continue; | ||
1433 | } | ||
1434 | |||
1435 | /* UM10944: The ENFPORT flag of the respective entry is | ||
1436 | * cleared when a match is found. The host can use this | ||
1437 | * flag as an acknowledgment. | ||
1438 | */ | ||
1439 | cpu_relax(); | ||
1440 | } while (mgmt_route.enfport && --timeout); | ||
1441 | |||
1442 | if (!timeout) { | ||
1443 | /* Clean up the management route so that a follow-up | ||
1444 | * frame may not match on it by mistake. | ||
1445 | */ | ||
1446 | sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE, | ||
1447 | slot, &mgmt_route, false); | ||
1448 | dev_err_ratelimited(priv->ds->dev, "xmit timed out\n"); | ||
1449 | } | ||
1450 | |||
1451 | return NETDEV_TX_OK; | ||
1452 | } | ||
1453 | |||
1454 | /* Deferred work is unfortunately necessary because setting up the management | ||
1455 | * route cannot be done from atomit context (SPI transfer takes a sleepable | ||
1456 | * lock on the bus) | ||
1457 | */ | ||
1458 | static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, | ||
1459 | struct sk_buff *skb) | ||
1460 | { | ||
1461 | struct sja1105_private *priv = ds->priv; | ||
1462 | struct sja1105_port *sp = &priv->ports[port]; | ||
1463 | int slot = sp->mgmt_slot; | ||
1464 | |||
1465 | /* The tragic fact about the switch having 4x2 slots for installing | ||
1466 | * management routes is that all of them except one are actually | ||
1467 | * useless. | ||
1468 | * If 2 slots are simultaneously configured for two BPDUs sent to the | ||
1469 | * same (multicast) DMAC but on different egress ports, the switch | ||
1470 | * would confuse them and redirect first frame it receives on the CPU | ||
1471 | * port towards the port configured on the numerically first slot | ||
1472 | * (therefore wrong port), then second received frame on second slot | ||
1473 | * (also wrong port). | ||
1474 | * So for all practical purposes, there needs to be a lock that | ||
1475 | * prevents that from happening. The slot used here is utterly useless | ||
1476 | * (could have simply been 0 just as fine), but we are doing it | ||
1477 | * nonetheless, in case a smarter idea ever comes up in the future. | ||
1478 | */ | ||
1479 | mutex_lock(&priv->mgmt_lock); | ||
1480 | |||
1481 | sja1105_mgmt_xmit(ds, port, slot, skb); | ||
1482 | |||
1483 | mutex_unlock(&priv->mgmt_lock); | ||
1484 | return NETDEV_TX_OK; | ||
1485 | } | ||
1486 | |||
1487 | /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, | ||
1488 | * which cannot be reconfigured at runtime. So a switch reset is required. | ||
1489 | */ | ||
1490 | static int sja1105_set_ageing_time(struct dsa_switch *ds, | ||
1491 | unsigned int ageing_time) | ||
1492 | { | ||
1493 | struct sja1105_l2_lookup_params_entry *l2_lookup_params; | ||
1494 | struct sja1105_private *priv = ds->priv; | ||
1495 | struct sja1105_table *table; | ||
1496 | unsigned int maxage; | ||
1497 | |||
1498 | table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; | ||
1499 | l2_lookup_params = table->entries; | ||
1500 | |||
1501 | maxage = SJA1105_AGEING_TIME_MS(ageing_time); | ||
1502 | |||
1503 | if (l2_lookup_params->maxage == maxage) | ||
1504 | return 0; | ||
1505 | |||
1506 | l2_lookup_params->maxage = maxage; | ||
1507 | |||
1508 | return sja1105_static_config_reload(priv); | ||
1509 | } | ||
1510 | |||
1511 | static const struct dsa_switch_ops sja1105_switch_ops = { | ||
1512 | .get_tag_protocol = sja1105_get_tag_protocol, | ||
1513 | .setup = sja1105_setup, | ||
1514 | .adjust_link = sja1105_adjust_link, | ||
1515 | .set_ageing_time = sja1105_set_ageing_time, | ||
1516 | .phylink_validate = sja1105_phylink_validate, | ||
1517 | .get_strings = sja1105_get_strings, | ||
1518 | .get_ethtool_stats = sja1105_get_ethtool_stats, | ||
1519 | .get_sset_count = sja1105_get_sset_count, | ||
1520 | .port_fdb_dump = sja1105_fdb_dump, | ||
1521 | .port_fdb_add = sja1105_fdb_add, | ||
1522 | .port_fdb_del = sja1105_fdb_del, | ||
1523 | .port_bridge_join = sja1105_bridge_join, | ||
1524 | .port_bridge_leave = sja1105_bridge_leave, | ||
1525 | .port_stp_state_set = sja1105_bridge_stp_state_set, | ||
1526 | .port_vlan_prepare = sja1105_vlan_prepare, | ||
1527 | .port_vlan_filtering = sja1105_vlan_filtering, | ||
1528 | .port_vlan_add = sja1105_vlan_add, | ||
1529 | .port_vlan_del = sja1105_vlan_del, | ||
1530 | .port_mdb_prepare = sja1105_mdb_prepare, | ||
1531 | .port_mdb_add = sja1105_mdb_add, | ||
1532 | .port_mdb_del = sja1105_mdb_del, | ||
1533 | .port_deferred_xmit = sja1105_port_deferred_xmit, | ||
1534 | }; | ||
1535 | |||
1536 | static int sja1105_check_device_id(struct sja1105_private *priv) | ||
1537 | { | ||
1538 | const struct sja1105_regs *regs = priv->info->regs; | ||
1539 | u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; | ||
1540 | struct device *dev = &priv->spidev->dev; | ||
1541 | u64 device_id; | ||
1542 | u64 part_no; | ||
1543 | int rc; | ||
1544 | |||
1545 | rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id, | ||
1546 | &device_id, SJA1105_SIZE_DEVICE_ID); | ||
1547 | if (rc < 0) | ||
1548 | return rc; | ||
1549 | |||
1550 | if (device_id != priv->info->device_id) { | ||
1551 | dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n", | ||
1552 | priv->info->device_id, device_id); | ||
1553 | return -ENODEV; | ||
1554 | } | ||
1555 | |||
1556 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id, | ||
1557 | prod_id, SJA1105_SIZE_DEVICE_ID); | ||
1558 | if (rc < 0) | ||
1559 | return rc; | ||
1560 | |||
1561 | sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); | ||
1562 | |||
1563 | if (part_no != priv->info->part_no) { | ||
1564 | dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", | ||
1565 | priv->info->part_no, part_no); | ||
1566 | return -ENODEV; | ||
1567 | } | ||
1568 | |||
1569 | return 0; | ||
1570 | } | ||
1571 | |||
1572 | static int sja1105_probe(struct spi_device *spi) | ||
1573 | { | ||
1574 | struct device *dev = &spi->dev; | ||
1575 | struct sja1105_private *priv; | ||
1576 | struct dsa_switch *ds; | ||
1577 | int rc, i; | ||
1578 | |||
1579 | if (!dev->of_node) { | ||
1580 | dev_err(dev, "No DTS bindings for SJA1105 driver\n"); | ||
1581 | return -EINVAL; | ||
1582 | } | ||
1583 | |||
1584 | priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); | ||
1585 | if (!priv) | ||
1586 | return -ENOMEM; | ||
1587 | |||
1588 | /* Configure the optional reset pin and bring up switch */ | ||
1589 | priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); | ||
1590 | if (IS_ERR(priv->reset_gpio)) | ||
1591 | dev_dbg(dev, "reset-gpios not defined, ignoring\n"); | ||
1592 | else | ||
1593 | sja1105_hw_reset(priv->reset_gpio, 1, 1); | ||
1594 | |||
1595 | /* Populate our driver private structure (priv) based on | ||
1596 | * the device tree node that was probed (spi) | ||
1597 | */ | ||
1598 | priv->spidev = spi; | ||
1599 | spi_set_drvdata(spi, priv); | ||
1600 | |||
1601 | /* Configure the SPI bus */ | ||
1602 | spi->bits_per_word = 8; | ||
1603 | rc = spi_setup(spi); | ||
1604 | if (rc < 0) { | ||
1605 | dev_err(dev, "Could not init SPI\n"); | ||
1606 | return rc; | ||
1607 | } | ||
1608 | |||
1609 | priv->info = of_device_get_match_data(dev); | ||
1610 | |||
1611 | /* Detect hardware device */ | ||
1612 | rc = sja1105_check_device_id(priv); | ||
1613 | if (rc < 0) { | ||
1614 | dev_err(dev, "Device ID check failed: %d\n", rc); | ||
1615 | return rc; | ||
1616 | } | ||
1617 | |||
1618 | dev_info(dev, "Probed switch chip: %s\n", priv->info->name); | ||
1619 | |||
1620 | ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS); | ||
1621 | if (!ds) | ||
1622 | return -ENOMEM; | ||
1623 | |||
1624 | ds->ops = &sja1105_switch_ops; | ||
1625 | ds->priv = priv; | ||
1626 | priv->ds = ds; | ||
1627 | |||
1628 | /* Connections between dsa_port and sja1105_port */ | ||
1629 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | ||
1630 | struct sja1105_port *sp = &priv->ports[i]; | ||
1631 | |||
1632 | ds->ports[i].priv = sp; | ||
1633 | sp->dp = &ds->ports[i]; | ||
1634 | } | ||
1635 | mutex_init(&priv->mgmt_lock); | ||
1636 | |||
1637 | return dsa_register_switch(priv->ds); | ||
1638 | } | ||
1639 | |||
1640 | static int sja1105_remove(struct spi_device *spi) | ||
1641 | { | ||
1642 | struct sja1105_private *priv = spi_get_drvdata(spi); | ||
1643 | |||
1644 | dsa_unregister_switch(priv->ds); | ||
1645 | sja1105_static_config_free(&priv->static_config); | ||
1646 | return 0; | ||
1647 | } | ||
1648 | |||
1649 | static const struct of_device_id sja1105_dt_ids[] = { | ||
1650 | { .compatible = "nxp,sja1105e", .data = &sja1105e_info }, | ||
1651 | { .compatible = "nxp,sja1105t", .data = &sja1105t_info }, | ||
1652 | { .compatible = "nxp,sja1105p", .data = &sja1105p_info }, | ||
1653 | { .compatible = "nxp,sja1105q", .data = &sja1105q_info }, | ||
1654 | { .compatible = "nxp,sja1105r", .data = &sja1105r_info }, | ||
1655 | { .compatible = "nxp,sja1105s", .data = &sja1105s_info }, | ||
1656 | { /* sentinel */ }, | ||
1657 | }; | ||
1658 | MODULE_DEVICE_TABLE(of, sja1105_dt_ids); | ||
1659 | |||
1660 | static struct spi_driver sja1105_driver = { | ||
1661 | .driver = { | ||
1662 | .name = "sja1105", | ||
1663 | .owner = THIS_MODULE, | ||
1664 | .of_match_table = of_match_ptr(sja1105_dt_ids), | ||
1665 | }, | ||
1666 | .probe = sja1105_probe, | ||
1667 | .remove = sja1105_remove, | ||
1668 | }; | ||
1669 | |||
1670 | module_spi_driver(sja1105_driver); | ||
1671 | |||
1672 | MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>"); | ||
1673 | MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>"); | ||
1674 | MODULE_DESCRIPTION("SJA1105 Driver"); | ||
1675 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c new file mode 100644 index 000000000000..244a94ccfc18 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_spi.c | |||
@@ -0,0 +1,590 @@ | |||
1 | // SPDX-License-Identifier: BSD-3-Clause | ||
2 | /* Copyright (c) 2016-2018, NXP Semiconductors | ||
3 | * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH | ||
4 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
5 | */ | ||
6 | #include <linux/spi/spi.h> | ||
7 | #include <linux/packing.h> | ||
8 | #include "sja1105.h" | ||
9 | |||
10 | #define SJA1105_SIZE_PORT_CTRL 4 | ||
11 | #define SJA1105_SIZE_RESET_CMD 4 | ||
12 | #define SJA1105_SIZE_SPI_MSG_HEADER 4 | ||
13 | #define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4) | ||
14 | #define SJA1105_SIZE_SPI_TRANSFER_MAX \ | ||
15 | (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN) | ||
16 | |||
17 | static int sja1105_spi_transfer(const struct sja1105_private *priv, | ||
18 | const void *tx, void *rx, int size) | ||
19 | { | ||
20 | struct spi_device *spi = priv->spidev; | ||
21 | struct spi_transfer transfer = { | ||
22 | .tx_buf = tx, | ||
23 | .rx_buf = rx, | ||
24 | .len = size, | ||
25 | }; | ||
26 | struct spi_message msg; | ||
27 | int rc; | ||
28 | |||
29 | if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) { | ||
30 | dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n", | ||
31 | size, SJA1105_SIZE_SPI_TRANSFER_MAX); | ||
32 | return -EMSGSIZE; | ||
33 | } | ||
34 | |||
35 | spi_message_init(&msg); | ||
36 | spi_message_add_tail(&transfer, &msg); | ||
37 | |||
38 | rc = spi_sync(spi, &msg); | ||
39 | if (rc < 0) { | ||
40 | dev_err(&spi->dev, "SPI transfer failed: %d\n", rc); | ||
41 | return rc; | ||
42 | } | ||
43 | |||
44 | return rc; | ||
45 | } | ||
46 | |||
47 | static void | ||
48 | sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg) | ||
49 | { | ||
50 | const int size = SJA1105_SIZE_SPI_MSG_HEADER; | ||
51 | |||
52 | memset(buf, 0, size); | ||
53 | |||
54 | sja1105_pack(buf, &msg->access, 31, 31, size); | ||
55 | sja1105_pack(buf, &msg->read_count, 30, 25, size); | ||
56 | sja1105_pack(buf, &msg->address, 24, 4, size); | ||
57 | } | ||
58 | |||
59 | /* If @rw is: | ||
60 | * - SPI_WRITE: creates and sends an SPI write message at absolute | ||
61 | * address reg_addr, taking size_bytes from *packed_buf | ||
62 | * - SPI_READ: creates and sends an SPI read message from absolute | ||
63 | * address reg_addr, writing size_bytes into *packed_buf | ||
64 | * | ||
65 | * This function should only be called if it is priorly known that | ||
66 | * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers | ||
67 | * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below. | ||
68 | */ | ||
69 | int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, | ||
70 | sja1105_spi_rw_mode_t rw, u64 reg_addr, | ||
71 | void *packed_buf, size_t size_bytes) | ||
72 | { | ||
73 | u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0}; | ||
74 | u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0}; | ||
75 | const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER; | ||
76 | struct sja1105_spi_message msg = {0}; | ||
77 | int rc; | ||
78 | |||
79 | if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX) | ||
80 | return -ERANGE; | ||
81 | |||
82 | msg.access = rw; | ||
83 | msg.address = reg_addr; | ||
84 | if (rw == SPI_READ) | ||
85 | msg.read_count = size_bytes / 4; | ||
86 | |||
87 | sja1105_spi_message_pack(tx_buf, &msg); | ||
88 | |||
89 | if (rw == SPI_WRITE) | ||
90 | memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER, | ||
91 | packed_buf, size_bytes); | ||
92 | |||
93 | rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len); | ||
94 | if (rc < 0) | ||
95 | return rc; | ||
96 | |||
97 | if (rw == SPI_READ) | ||
98 | memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER, | ||
99 | size_bytes); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /* If @rw is: | ||
105 | * - SPI_WRITE: creates and sends an SPI write message at absolute | ||
106 | * address reg_addr, taking size_bytes from *packed_buf | ||
107 | * - SPI_READ: creates and sends an SPI read message from absolute | ||
108 | * address reg_addr, writing size_bytes into *packed_buf | ||
109 | * | ||
110 | * The u64 *value is unpacked, meaning that it's stored in the native | ||
111 | * CPU endianness and directly usable by software running on the core. | ||
112 | * | ||
113 | * This is a wrapper around sja1105_spi_send_packed_buf(). | ||
114 | */ | ||
115 | int sja1105_spi_send_int(const struct sja1105_private *priv, | ||
116 | sja1105_spi_rw_mode_t rw, u64 reg_addr, | ||
117 | u64 *value, u64 size_bytes) | ||
118 | { | ||
119 | u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN]; | ||
120 | int rc; | ||
121 | |||
122 | if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN) | ||
123 | return -ERANGE; | ||
124 | |||
125 | if (rw == SPI_WRITE) | ||
126 | sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0, | ||
127 | size_bytes); | ||
128 | |||
129 | rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf, | ||
130 | size_bytes); | ||
131 | |||
132 | if (rw == SPI_READ) | ||
133 | sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0, | ||
134 | size_bytes); | ||
135 | |||
136 | return rc; | ||
137 | } | ||
138 | |||
139 | /* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN | ||
140 | * must be sent/received. Splitting the buffer into chunks and assembling | ||
141 | * those into SPI messages is done automatically by this function. | ||
142 | */ | ||
143 | int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv, | ||
144 | sja1105_spi_rw_mode_t rw, u64 base_addr, | ||
145 | void *packed_buf, u64 buf_len) | ||
146 | { | ||
147 | struct chunk { | ||
148 | void *buf_ptr; | ||
149 | int len; | ||
150 | u64 spi_address; | ||
151 | } chunk; | ||
152 | int distance_to_end; | ||
153 | int rc; | ||
154 | |||
155 | /* Initialize chunk */ | ||
156 | chunk.buf_ptr = packed_buf; | ||
157 | chunk.spi_address = base_addr; | ||
158 | chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN); | ||
159 | |||
160 | while (chunk.len) { | ||
161 | rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address, | ||
162 | chunk.buf_ptr, chunk.len); | ||
163 | if (rc < 0) | ||
164 | return rc; | ||
165 | |||
166 | chunk.buf_ptr += chunk.len; | ||
167 | chunk.spi_address += chunk.len / 4; | ||
168 | distance_to_end = (uintptr_t)(packed_buf + buf_len - | ||
169 | chunk.buf_ptr); | ||
170 | chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN); | ||
171 | } | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | /* Back-ported structure from UM11040 Table 112. | ||
177 | * Reset control register (addr. 100440h) | ||
178 | * In the SJA1105 E/T, only warm_rst and cold_rst are | ||
179 | * supported (exposed in UM10944 as rst_ctrl), but the bit | ||
180 | * offsets of warm_rst and cold_rst are actually reversed. | ||
181 | */ | ||
182 | struct sja1105_reset_cmd { | ||
183 | u64 switch_rst; | ||
184 | u64 cfg_rst; | ||
185 | u64 car_rst; | ||
186 | u64 otp_rst; | ||
187 | u64 warm_rst; | ||
188 | u64 cold_rst; | ||
189 | u64 por_rst; | ||
190 | }; | ||
191 | |||
192 | static void | ||
193 | sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset) | ||
194 | { | ||
195 | const int size = SJA1105_SIZE_RESET_CMD; | ||
196 | |||
197 | memset(buf, 0, size); | ||
198 | |||
199 | sja1105_pack(buf, &reset->cold_rst, 3, 3, size); | ||
200 | sja1105_pack(buf, &reset->warm_rst, 2, 2, size); | ||
201 | } | ||
202 | |||
203 | static void | ||
204 | sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset) | ||
205 | { | ||
206 | const int size = SJA1105_SIZE_RESET_CMD; | ||
207 | |||
208 | memset(buf, 0, size); | ||
209 | |||
210 | sja1105_pack(buf, &reset->switch_rst, 8, 8, size); | ||
211 | sja1105_pack(buf, &reset->cfg_rst, 7, 7, size); | ||
212 | sja1105_pack(buf, &reset->car_rst, 5, 5, size); | ||
213 | sja1105_pack(buf, &reset->otp_rst, 4, 4, size); | ||
214 | sja1105_pack(buf, &reset->warm_rst, 3, 3, size); | ||
215 | sja1105_pack(buf, &reset->cold_rst, 2, 2, size); | ||
216 | sja1105_pack(buf, &reset->por_rst, 1, 1, size); | ||
217 | } | ||
218 | |||
219 | static int sja1105et_reset_cmd(const void *ctx, const void *data) | ||
220 | { | ||
221 | const struct sja1105_private *priv = ctx; | ||
222 | const struct sja1105_reset_cmd *reset = data; | ||
223 | const struct sja1105_regs *regs = priv->info->regs; | ||
224 | struct device *dev = priv->ds->dev; | ||
225 | u8 packed_buf[SJA1105_SIZE_RESET_CMD]; | ||
226 | |||
227 | if (reset->switch_rst || | ||
228 | reset->cfg_rst || | ||
229 | reset->car_rst || | ||
230 | reset->otp_rst || | ||
231 | reset->por_rst) { | ||
232 | dev_err(dev, "Only warm and cold reset is supported " | ||
233 | "for SJA1105 E/T!\n"); | ||
234 | return -EINVAL; | ||
235 | } | ||
236 | |||
237 | if (reset->warm_rst) | ||
238 | dev_dbg(dev, "Warm reset requested\n"); | ||
239 | if (reset->cold_rst) | ||
240 | dev_dbg(dev, "Cold reset requested\n"); | ||
241 | |||
242 | sja1105et_reset_cmd_pack(packed_buf, reset); | ||
243 | |||
244 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu, | ||
245 | packed_buf, SJA1105_SIZE_RESET_CMD); | ||
246 | } | ||
247 | |||
248 | static int sja1105pqrs_reset_cmd(const void *ctx, const void *data) | ||
249 | { | ||
250 | const struct sja1105_private *priv = ctx; | ||
251 | const struct sja1105_reset_cmd *reset = data; | ||
252 | const struct sja1105_regs *regs = priv->info->regs; | ||
253 | struct device *dev = priv->ds->dev; | ||
254 | u8 packed_buf[SJA1105_SIZE_RESET_CMD]; | ||
255 | |||
256 | if (reset->switch_rst) | ||
257 | dev_dbg(dev, "Main reset for all functional modules requested\n"); | ||
258 | if (reset->cfg_rst) | ||
259 | dev_dbg(dev, "Chip configuration reset requested\n"); | ||
260 | if (reset->car_rst) | ||
261 | dev_dbg(dev, "Clock and reset control logic reset requested\n"); | ||
262 | if (reset->otp_rst) | ||
263 | dev_dbg(dev, "OTP read cycle for reading product " | ||
264 | "config settings requested\n"); | ||
265 | if (reset->warm_rst) | ||
266 | dev_dbg(dev, "Warm reset requested\n"); | ||
267 | if (reset->cold_rst) | ||
268 | dev_dbg(dev, "Cold reset requested\n"); | ||
269 | if (reset->por_rst) | ||
270 | dev_dbg(dev, "Power-on reset requested\n"); | ||
271 | |||
272 | sja1105pqrs_reset_cmd_pack(packed_buf, reset); | ||
273 | |||
274 | return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu, | ||
275 | packed_buf, SJA1105_SIZE_RESET_CMD); | ||
276 | } | ||
277 | |||
278 | static int sja1105_cold_reset(const struct sja1105_private *priv) | ||
279 | { | ||
280 | struct sja1105_reset_cmd reset = {0}; | ||
281 | |||
282 | reset.cold_rst = 1; | ||
283 | return priv->info->reset_cmd(priv, &reset); | ||
284 | } | ||
285 | |||
286 | static int sja1105_inhibit_tx(const struct sja1105_private *priv, | ||
287 | const unsigned long *port_bitmap) | ||
288 | { | ||
289 | const struct sja1105_regs *regs = priv->info->regs; | ||
290 | u64 inhibit_cmd; | ||
291 | int port, rc; | ||
292 | |||
293 | rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control, | ||
294 | &inhibit_cmd, SJA1105_SIZE_PORT_CTRL); | ||
295 | if (rc < 0) | ||
296 | return rc; | ||
297 | |||
298 | for_each_set_bit(port, port_bitmap, SJA1105_NUM_PORTS) | ||
299 | inhibit_cmd |= BIT(port); | ||
300 | |||
301 | return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control, | ||
302 | &inhibit_cmd, SJA1105_SIZE_PORT_CTRL); | ||
303 | } | ||
304 | |||
305 | struct sja1105_status { | ||
306 | u64 configs; | ||
307 | u64 crcchkl; | ||
308 | u64 ids; | ||
309 | u64 crcchkg; | ||
310 | }; | ||
311 | |||
312 | /* This is not reading the entire General Status area, which is also | ||
313 | * divergent between E/T and P/Q/R/S, but only the relevant bits for | ||
314 | * ensuring that the static config upload procedure was successful. | ||
315 | */ | ||
316 | static void sja1105_status_unpack(void *buf, struct sja1105_status *status) | ||
317 | { | ||
318 | /* So that addition translates to 4 bytes */ | ||
319 | u32 *p = buf; | ||
320 | |||
321 | /* device_id is missing from the buffer, but we don't | ||
322 | * want to diverge from the manual definition of the | ||
323 | * register addresses, so we'll back off one step with | ||
324 | * the register pointer, and never access p[0]. | ||
325 | */ | ||
326 | p--; | ||
327 | sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4); | ||
328 | sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4); | ||
329 | sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4); | ||
330 | sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4); | ||
331 | } | ||
332 | |||
333 | static int sja1105_status_get(struct sja1105_private *priv, | ||
334 | struct sja1105_status *status) | ||
335 | { | ||
336 | const struct sja1105_regs *regs = priv->info->regs; | ||
337 | u8 packed_buf[4]; | ||
338 | int rc; | ||
339 | |||
340 | rc = sja1105_spi_send_packed_buf(priv, SPI_READ, | ||
341 | regs->status, | ||
342 | packed_buf, 4); | ||
343 | if (rc < 0) | ||
344 | return rc; | ||
345 | |||
346 | sja1105_status_unpack(packed_buf, status); | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | /* Not const because unpacking priv->static_config into buffers and preparing | ||
352 | * for upload requires the recalculation of table CRCs and updating the | ||
353 | * structures with these. | ||
354 | */ | ||
355 | static int | ||
356 | static_config_buf_prepare_for_upload(struct sja1105_private *priv, | ||
357 | void *config_buf, int buf_len) | ||
358 | { | ||
359 | struct sja1105_static_config *config = &priv->static_config; | ||
360 | struct sja1105_table_header final_header; | ||
361 | sja1105_config_valid_t valid; | ||
362 | char *final_header_ptr; | ||
363 | int crc_len; | ||
364 | |||
365 | valid = sja1105_static_config_check_valid(config); | ||
366 | if (valid != SJA1105_CONFIG_OK) { | ||
367 | dev_err(&priv->spidev->dev, | ||
368 | sja1105_static_config_error_msg[valid]); | ||
369 | return -EINVAL; | ||
370 | } | ||
371 | |||
372 | /* Write Device ID and config tables to config_buf */ | ||
373 | sja1105_static_config_pack(config_buf, config); | ||
374 | /* Recalculate CRC of the last header (right now 0xDEADBEEF). | ||
375 | * Don't include the CRC field itself. | ||
376 | */ | ||
377 | crc_len = buf_len - 4; | ||
378 | /* Read the whole table header */ | ||
379 | final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER; | ||
380 | sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK); | ||
381 | /* Modify */ | ||
382 | final_header.crc = sja1105_crc32(config_buf, crc_len); | ||
383 | /* Rewrite */ | ||
384 | sja1105_table_header_packing(final_header_ptr, &final_header, PACK); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | #define RETRIES 10 | ||
390 | |||
391 | int sja1105_static_config_upload(struct sja1105_private *priv) | ||
392 | { | ||
393 | unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0); | ||
394 | struct sja1105_static_config *config = &priv->static_config; | ||
395 | const struct sja1105_regs *regs = priv->info->regs; | ||
396 | struct device *dev = &priv->spidev->dev; | ||
397 | struct sja1105_status status; | ||
398 | int rc, retries = RETRIES; | ||
399 | u8 *config_buf; | ||
400 | int buf_len; | ||
401 | |||
402 | buf_len = sja1105_static_config_get_length(config); | ||
403 | config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL); | ||
404 | if (!config_buf) | ||
405 | return -ENOMEM; | ||
406 | |||
407 | rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len); | ||
408 | if (rc < 0) { | ||
409 | dev_err(dev, "Invalid config, cannot upload\n"); | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | /* Prevent PHY jabbering during switch reset by inhibiting | ||
413 | * Tx on all ports and waiting for current packet to drain. | ||
414 | * Otherwise, the PHY will see an unterminated Ethernet packet. | ||
415 | */ | ||
416 | rc = sja1105_inhibit_tx(priv, &port_bitmap); | ||
417 | if (rc < 0) { | ||
418 | dev_err(dev, "Failed to inhibit Tx on ports\n"); | ||
419 | return -ENXIO; | ||
420 | } | ||
421 | /* Wait for an eventual egress packet to finish transmission | ||
422 | * (reach IFG). It is guaranteed that a second one will not | ||
423 | * follow, and that switch cold reset is thus safe | ||
424 | */ | ||
425 | usleep_range(500, 1000); | ||
426 | do { | ||
427 | /* Put the SJA1105 in programming mode */ | ||
428 | rc = sja1105_cold_reset(priv); | ||
429 | if (rc < 0) { | ||
430 | dev_err(dev, "Failed to reset switch, retrying...\n"); | ||
431 | continue; | ||
432 | } | ||
433 | /* Wait for the switch to come out of reset */ | ||
434 | usleep_range(1000, 5000); | ||
435 | /* Upload the static config to the device */ | ||
436 | rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE, | ||
437 | regs->config, | ||
438 | config_buf, buf_len); | ||
439 | if (rc < 0) { | ||
440 | dev_err(dev, "Failed to upload config, retrying...\n"); | ||
441 | continue; | ||
442 | } | ||
443 | /* Check that SJA1105 responded well to the config upload */ | ||
444 | rc = sja1105_status_get(priv, &status); | ||
445 | if (rc < 0) | ||
446 | continue; | ||
447 | |||
448 | if (status.ids == 1) { | ||
449 | dev_err(dev, "Mismatch between hardware and static config " | ||
450 | "device id. Wrote 0x%llx, wants 0x%llx\n", | ||
451 | config->device_id, priv->info->device_id); | ||
452 | continue; | ||
453 | } | ||
454 | if (status.crcchkl == 1) { | ||
455 | dev_err(dev, "Switch reported invalid local CRC on " | ||
456 | "the uploaded config, retrying...\n"); | ||
457 | continue; | ||
458 | } | ||
459 | if (status.crcchkg == 1) { | ||
460 | dev_err(dev, "Switch reported invalid global CRC on " | ||
461 | "the uploaded config, retrying...\n"); | ||
462 | continue; | ||
463 | } | ||
464 | if (status.configs == 0) { | ||
465 | dev_err(dev, "Switch reported that configuration is " | ||
466 | "invalid, retrying...\n"); | ||
467 | continue; | ||
468 | } | ||
469 | } while (--retries && (status.crcchkl == 1 || status.crcchkg == 1 || | ||
470 | status.configs == 0 || status.ids == 1)); | ||
471 | |||
472 | if (!retries) { | ||
473 | rc = -EIO; | ||
474 | dev_err(dev, "Failed to upload config to device, giving up\n"); | ||
475 | goto out; | ||
476 | } else if (retries != RETRIES - 1) { | ||
477 | dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries); | ||
478 | } | ||
479 | |||
480 | dev_info(dev, "Reset switch and programmed static config\n"); | ||
481 | out: | ||
482 | kfree(config_buf); | ||
483 | return rc; | ||
484 | } | ||
485 | |||
486 | struct sja1105_regs sja1105et_regs = { | ||
487 | .device_id = 0x0, | ||
488 | .prod_id = 0x100BC3, | ||
489 | .status = 0x1, | ||
490 | .port_control = 0x11, | ||
491 | .config = 0x020000, | ||
492 | .rgu = 0x100440, | ||
493 | .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808}, | ||
494 | .rmii_pll1 = 0x10000A, | ||
495 | .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F}, | ||
496 | /* UM10944.pdf, Table 86, ACU Register overview */ | ||
497 | .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808}, | ||
498 | .mac = {0x200, 0x202, 0x204, 0x206, 0x208}, | ||
499 | .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440}, | ||
500 | .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640}, | ||
501 | /* UM10944.pdf, Table 78, CGU Register overview */ | ||
502 | .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F}, | ||
503 | .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030}, | ||
504 | .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034}, | ||
505 | .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035}, | ||
506 | .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032}, | ||
507 | .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031}, | ||
508 | .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034}, | ||
509 | }; | ||
510 | |||
511 | struct sja1105_regs sja1105pqrs_regs = { | ||
512 | .device_id = 0x0, | ||
513 | .prod_id = 0x100BC3, | ||
514 | .status = 0x1, | ||
515 | .port_control = 0x12, | ||
516 | .config = 0x020000, | ||
517 | .rgu = 0x100440, | ||
518 | .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808}, | ||
519 | .rmii_pll1 = 0x10000A, | ||
520 | .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F}, | ||
521 | /* UM10944.pdf, Table 86, ACU Register overview */ | ||
522 | .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808}, | ||
523 | .mac = {0x200, 0x202, 0x204, 0x206, 0x208}, | ||
524 | .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440}, | ||
525 | .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640}, | ||
526 | /* UM11040.pdf, Table 114 */ | ||
527 | .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B}, | ||
528 | .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C}, | ||
529 | .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F}, | ||
530 | .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030}, | ||
531 | .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E}, | ||
532 | .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D}, | ||
533 | .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F}, | ||
534 | .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644}, | ||
535 | }; | ||
536 | |||
537 | struct sja1105_info sja1105e_info = { | ||
538 | .device_id = SJA1105E_DEVICE_ID, | ||
539 | .part_no = SJA1105ET_PART_NO, | ||
540 | .static_ops = sja1105e_table_ops, | ||
541 | .dyn_ops = sja1105et_dyn_ops, | ||
542 | .reset_cmd = sja1105et_reset_cmd, | ||
543 | .regs = &sja1105et_regs, | ||
544 | .name = "SJA1105E", | ||
545 | }; | ||
546 | struct sja1105_info sja1105t_info = { | ||
547 | .device_id = SJA1105T_DEVICE_ID, | ||
548 | .part_no = SJA1105ET_PART_NO, | ||
549 | .static_ops = sja1105t_table_ops, | ||
550 | .dyn_ops = sja1105et_dyn_ops, | ||
551 | .reset_cmd = sja1105et_reset_cmd, | ||
552 | .regs = &sja1105et_regs, | ||
553 | .name = "SJA1105T", | ||
554 | }; | ||
555 | struct sja1105_info sja1105p_info = { | ||
556 | .device_id = SJA1105PR_DEVICE_ID, | ||
557 | .part_no = SJA1105P_PART_NO, | ||
558 | .static_ops = sja1105p_table_ops, | ||
559 | .dyn_ops = sja1105pqrs_dyn_ops, | ||
560 | .reset_cmd = sja1105pqrs_reset_cmd, | ||
561 | .regs = &sja1105pqrs_regs, | ||
562 | .name = "SJA1105P", | ||
563 | }; | ||
564 | struct sja1105_info sja1105q_info = { | ||
565 | .device_id = SJA1105QS_DEVICE_ID, | ||
566 | .part_no = SJA1105Q_PART_NO, | ||
567 | .static_ops = sja1105q_table_ops, | ||
568 | .dyn_ops = sja1105pqrs_dyn_ops, | ||
569 | .reset_cmd = sja1105pqrs_reset_cmd, | ||
570 | .regs = &sja1105pqrs_regs, | ||
571 | .name = "SJA1105Q", | ||
572 | }; | ||
573 | struct sja1105_info sja1105r_info = { | ||
574 | .device_id = SJA1105PR_DEVICE_ID, | ||
575 | .part_no = SJA1105R_PART_NO, | ||
576 | .static_ops = sja1105r_table_ops, | ||
577 | .dyn_ops = sja1105pqrs_dyn_ops, | ||
578 | .reset_cmd = sja1105pqrs_reset_cmd, | ||
579 | .regs = &sja1105pqrs_regs, | ||
580 | .name = "SJA1105R", | ||
581 | }; | ||
582 | struct sja1105_info sja1105s_info = { | ||
583 | .device_id = SJA1105QS_DEVICE_ID, | ||
584 | .part_no = SJA1105S_PART_NO, | ||
585 | .static_ops = sja1105s_table_ops, | ||
586 | .dyn_ops = sja1105pqrs_dyn_ops, | ||
587 | .regs = &sja1105pqrs_regs, | ||
588 | .reset_cmd = sja1105pqrs_reset_cmd, | ||
589 | .name = "SJA1105S", | ||
590 | }; | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c new file mode 100644 index 000000000000..b3c992b0abb0 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c | |||
@@ -0,0 +1,987 @@ | |||
1 | // SPDX-License-Identifier: BSD-3-Clause | ||
2 | /* Copyright (c) 2016-2018, NXP Semiconductors | ||
3 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
4 | */ | ||
5 | #include "sja1105_static_config.h" | ||
6 | #include <linux/crc32.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/string.h> | ||
9 | #include <linux/errno.h> | ||
10 | |||
11 | /* Convenience wrappers over the generic packing functions. These take into | ||
12 | * account the SJA1105 memory layout quirks and provide some level of | ||
13 | * programmer protection against incorrect API use. The errors are not expected | ||
14 | * to occur durring runtime, therefore printing and swallowing them here is | ||
15 | * appropriate instead of clutterring up higher-level code. | ||
16 | */ | ||
17 | void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len) | ||
18 | { | ||
19 | int rc = packing(buf, (u64 *)val, start, end, len, | ||
20 | PACK, QUIRK_LSW32_IS_FIRST); | ||
21 | |||
22 | if (likely(!rc)) | ||
23 | return; | ||
24 | |||
25 | if (rc == -EINVAL) { | ||
26 | pr_err("Start bit (%d) expected to be larger than end (%d)\n", | ||
27 | start, end); | ||
28 | } else if (rc == -ERANGE) { | ||
29 | if ((start - end + 1) > 64) | ||
30 | pr_err("Field %d-%d too large for 64 bits!\n", | ||
31 | start, end); | ||
32 | else | ||
33 | pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n", | ||
34 | *val, start, end); | ||
35 | } | ||
36 | dump_stack(); | ||
37 | } | ||
38 | |||
39 | void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len) | ||
40 | { | ||
41 | int rc = packing((void *)buf, val, start, end, len, | ||
42 | UNPACK, QUIRK_LSW32_IS_FIRST); | ||
43 | |||
44 | if (likely(!rc)) | ||
45 | return; | ||
46 | |||
47 | if (rc == -EINVAL) | ||
48 | pr_err("Start bit (%d) expected to be larger than end (%d)\n", | ||
49 | start, end); | ||
50 | else if (rc == -ERANGE) | ||
51 | pr_err("Field %d-%d too large for 64 bits!\n", | ||
52 | start, end); | ||
53 | dump_stack(); | ||
54 | } | ||
55 | |||
56 | void sja1105_packing(void *buf, u64 *val, int start, int end, | ||
57 | size_t len, enum packing_op op) | ||
58 | { | ||
59 | int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST); | ||
60 | |||
61 | if (likely(!rc)) | ||
62 | return; | ||
63 | |||
64 | if (rc == -EINVAL) { | ||
65 | pr_err("Start bit (%d) expected to be larger than end (%d)\n", | ||
66 | start, end); | ||
67 | } else if (rc == -ERANGE) { | ||
68 | if ((start - end + 1) > 64) | ||
69 | pr_err("Field %d-%d too large for 64 bits!\n", | ||
70 | start, end); | ||
71 | else | ||
72 | pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n", | ||
73 | *val, start, end); | ||
74 | } | ||
75 | dump_stack(); | ||
76 | } | ||
77 | |||
78 | /* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */ | ||
79 | u32 sja1105_crc32(const void *buf, size_t len) | ||
80 | { | ||
81 | unsigned int i; | ||
82 | u64 word; | ||
83 | u32 crc; | ||
84 | |||
85 | /* seed */ | ||
86 | crc = ~0; | ||
87 | for (i = 0; i < len; i += 4) { | ||
88 | sja1105_unpack((void *)buf + i, &word, 31, 0, 4); | ||
89 | crc = crc32_le(crc, (u8 *)&word, 4); | ||
90 | } | ||
91 | return ~crc; | ||
92 | } | ||
93 | |||
94 | static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, | ||
95 | enum packing_op op) | ||
96 | { | ||
97 | const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY; | ||
98 | struct sja1105_general_params_entry *entry = entry_ptr; | ||
99 | |||
100 | sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op); | ||
101 | sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op); | ||
102 | sja1105_packing(buf, &entry->switchid, 317, 315, size, op); | ||
103 | sja1105_packing(buf, &entry->hostprio, 314, 312, size, op); | ||
104 | sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op); | ||
105 | sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op); | ||
106 | sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op); | ||
107 | sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op); | ||
108 | sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op); | ||
109 | sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op); | ||
110 | sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op); | ||
111 | sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op); | ||
112 | sja1105_packing(buf, &entry->casc_port, 115, 113, size, op); | ||
113 | sja1105_packing(buf, &entry->host_port, 112, 110, size, op); | ||
114 | sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op); | ||
115 | sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op); | ||
116 | sja1105_packing(buf, &entry->vlmask, 74, 43, size, op); | ||
117 | sja1105_packing(buf, &entry->tpid, 42, 27, size, op); | ||
118 | sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op); | ||
119 | sja1105_packing(buf, &entry->tpid2, 25, 10, size, op); | ||
120 | return size; | ||
121 | } | ||
122 | |||
123 | static size_t | ||
124 | sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, | ||
125 | enum packing_op op) | ||
126 | { | ||
127 | const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY; | ||
128 | struct sja1105_general_params_entry *entry = entry_ptr; | ||
129 | |||
130 | sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op); | ||
131 | sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op); | ||
132 | sja1105_packing(buf, &entry->switchid, 349, 347, size, op); | ||
133 | sja1105_packing(buf, &entry->hostprio, 346, 344, size, op); | ||
134 | sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op); | ||
135 | sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op); | ||
136 | sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op); | ||
137 | sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op); | ||
138 | sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op); | ||
139 | sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op); | ||
140 | sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op); | ||
141 | sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op); | ||
142 | sja1105_packing(buf, &entry->casc_port, 147, 145, size, op); | ||
143 | sja1105_packing(buf, &entry->host_port, 144, 142, size, op); | ||
144 | sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op); | ||
145 | sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op); | ||
146 | sja1105_packing(buf, &entry->vlmask, 106, 75, size, op); | ||
147 | sja1105_packing(buf, &entry->tpid, 74, 59, size, op); | ||
148 | sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op); | ||
149 | sja1105_packing(buf, &entry->tpid2, 57, 42, size, op); | ||
150 | sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op); | ||
151 | sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op); | ||
152 | sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op); | ||
153 | sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op); | ||
154 | sja1105_packing(buf, &entry->replay_port, 24, 22, size, op); | ||
155 | return size; | ||
156 | } | ||
157 | |||
158 | static size_t | ||
159 | sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr, | ||
160 | enum packing_op op) | ||
161 | { | ||
162 | const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY; | ||
163 | struct sja1105_l2_forwarding_params_entry *entry = entry_ptr; | ||
164 | int offset, i; | ||
165 | |||
166 | sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op); | ||
167 | for (i = 0, offset = 13; i < 8; i++, offset += 10) | ||
168 | sja1105_packing(buf, &entry->part_spc[i], | ||
169 | offset + 9, offset + 0, size, op); | ||
170 | return size; | ||
171 | } | ||
172 | |||
173 | size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr, | ||
174 | enum packing_op op) | ||
175 | { | ||
176 | const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY; | ||
177 | struct sja1105_l2_forwarding_entry *entry = entry_ptr; | ||
178 | int offset, i; | ||
179 | |||
180 | sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op); | ||
181 | sja1105_packing(buf, &entry->reach_port, 58, 54, size, op); | ||
182 | sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op); | ||
183 | for (i = 0, offset = 25; i < 8; i++, offset += 3) | ||
184 | sja1105_packing(buf, &entry->vlan_pmap[i], | ||
185 | offset + 2, offset + 0, size, op); | ||
186 | return size; | ||
187 | } | ||
188 | |||
189 | static size_t | ||
190 | sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, | ||
191 | enum packing_op op) | ||
192 | { | ||
193 | const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY; | ||
194 | struct sja1105_l2_lookup_params_entry *entry = entry_ptr; | ||
195 | |||
196 | sja1105_packing(buf, &entry->maxage, 31, 17, size, op); | ||
197 | sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op); | ||
198 | sja1105_packing(buf, &entry->poly, 13, 6, size, op); | ||
199 | sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op); | ||
200 | sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op); | ||
201 | sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op); | ||
202 | return size; | ||
203 | } | ||
204 | |||
205 | static size_t | ||
206 | sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr, | ||
207 | enum packing_op op) | ||
208 | { | ||
209 | const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY; | ||
210 | struct sja1105_l2_lookup_params_entry *entry = entry_ptr; | ||
211 | |||
212 | sja1105_packing(buf, &entry->maxage, 57, 43, size, op); | ||
213 | sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op); | ||
214 | sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op); | ||
215 | sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op); | ||
216 | return size; | ||
217 | } | ||
218 | |||
219 | size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr, | ||
220 | enum packing_op op) | ||
221 | { | ||
222 | const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY; | ||
223 | struct sja1105_l2_lookup_entry *entry = entry_ptr; | ||
224 | |||
225 | sja1105_packing(buf, &entry->vlanid, 95, 84, size, op); | ||
226 | sja1105_packing(buf, &entry->macaddr, 83, 36, size, op); | ||
227 | sja1105_packing(buf, &entry->destports, 35, 31, size, op); | ||
228 | sja1105_packing(buf, &entry->enfport, 30, 30, size, op); | ||
229 | sja1105_packing(buf, &entry->index, 29, 20, size, op); | ||
230 | return size; | ||
231 | } | ||
232 | |||
233 | size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr, | ||
234 | enum packing_op op) | ||
235 | { | ||
236 | const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY; | ||
237 | struct sja1105_l2_lookup_entry *entry = entry_ptr; | ||
238 | |||
239 | /* These are static L2 lookup entries, so the structure | ||
240 | * should match UM11040 Table 16/17 definitions when | ||
241 | * LOCKEDS is 1. | ||
242 | */ | ||
243 | sja1105_packing(buf, &entry->vlanid, 81, 70, size, op); | ||
244 | sja1105_packing(buf, &entry->macaddr, 69, 22, size, op); | ||
245 | sja1105_packing(buf, &entry->destports, 21, 17, size, op); | ||
246 | sja1105_packing(buf, &entry->enfport, 16, 16, size, op); | ||
247 | sja1105_packing(buf, &entry->index, 15, 6, size, op); | ||
248 | return size; | ||
249 | } | ||
250 | |||
251 | static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr, | ||
252 | enum packing_op op) | ||
253 | { | ||
254 | const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY; | ||
255 | struct sja1105_l2_policing_entry *entry = entry_ptr; | ||
256 | |||
257 | sja1105_packing(buf, &entry->sharindx, 63, 58, size, op); | ||
258 | sja1105_packing(buf, &entry->smax, 57, 42, size, op); | ||
259 | sja1105_packing(buf, &entry->rate, 41, 26, size, op); | ||
260 | sja1105_packing(buf, &entry->maxlen, 25, 15, size, op); | ||
261 | sja1105_packing(buf, &entry->partition, 14, 12, size, op); | ||
262 | return size; | ||
263 | } | ||
264 | |||
265 | static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr, | ||
266 | enum packing_op op) | ||
267 | { | ||
268 | const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY; | ||
269 | struct sja1105_mac_config_entry *entry = entry_ptr; | ||
270 | int offset, i; | ||
271 | |||
272 | for (i = 0, offset = 72; i < 8; i++, offset += 19) { | ||
273 | sja1105_packing(buf, &entry->enabled[i], | ||
274 | offset + 0, offset + 0, size, op); | ||
275 | sja1105_packing(buf, &entry->base[i], | ||
276 | offset + 9, offset + 1, size, op); | ||
277 | sja1105_packing(buf, &entry->top[i], | ||
278 | offset + 18, offset + 10, size, op); | ||
279 | } | ||
280 | sja1105_packing(buf, &entry->ifg, 71, 67, size, op); | ||
281 | sja1105_packing(buf, &entry->speed, 66, 65, size, op); | ||
282 | sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op); | ||
283 | sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op); | ||
284 | sja1105_packing(buf, &entry->maxage, 32, 25, size, op); | ||
285 | sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op); | ||
286 | sja1105_packing(buf, &entry->vlanid, 21, 10, size, op); | ||
287 | sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op); | ||
288 | sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op); | ||
289 | sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op); | ||
290 | sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op); | ||
291 | sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op); | ||
292 | sja1105_packing(buf, &entry->retag, 4, 4, size, op); | ||
293 | sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op); | ||
294 | sja1105_packing(buf, &entry->egress, 2, 2, size, op); | ||
295 | sja1105_packing(buf, &entry->ingress, 1, 1, size, op); | ||
296 | return size; | ||
297 | } | ||
298 | |||
299 | size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr, | ||
300 | enum packing_op op) | ||
301 | { | ||
302 | const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY; | ||
303 | struct sja1105_mac_config_entry *entry = entry_ptr; | ||
304 | int offset, i; | ||
305 | |||
306 | for (i = 0, offset = 104; i < 8; i++, offset += 19) { | ||
307 | sja1105_packing(buf, &entry->enabled[i], | ||
308 | offset + 0, offset + 0, size, op); | ||
309 | sja1105_packing(buf, &entry->base[i], | ||
310 | offset + 9, offset + 1, size, op); | ||
311 | sja1105_packing(buf, &entry->top[i], | ||
312 | offset + 18, offset + 10, size, op); | ||
313 | } | ||
314 | sja1105_packing(buf, &entry->ifg, 103, 99, size, op); | ||
315 | sja1105_packing(buf, &entry->speed, 98, 97, size, op); | ||
316 | sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op); | ||
317 | sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op); | ||
318 | sja1105_packing(buf, &entry->maxage, 64, 57, size, op); | ||
319 | sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op); | ||
320 | sja1105_packing(buf, &entry->vlanid, 53, 42, size, op); | ||
321 | sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op); | ||
322 | sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op); | ||
323 | sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op); | ||
324 | sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op); | ||
325 | sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op); | ||
326 | sja1105_packing(buf, &entry->retag, 34, 34, size, op); | ||
327 | sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op); | ||
328 | sja1105_packing(buf, &entry->egress, 32, 32, size, op); | ||
329 | sja1105_packing(buf, &entry->ingress, 31, 31, size, op); | ||
330 | return size; | ||
331 | } | ||
332 | |||
333 | size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr, | ||
334 | enum packing_op op) | ||
335 | { | ||
336 | const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY; | ||
337 | struct sja1105_vlan_lookup_entry *entry = entry_ptr; | ||
338 | |||
339 | sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op); | ||
340 | sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op); | ||
341 | sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op); | ||
342 | sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op); | ||
343 | sja1105_packing(buf, &entry->tag_port, 43, 39, size, op); | ||
344 | sja1105_packing(buf, &entry->vlanid, 38, 27, size, op); | ||
345 | return size; | ||
346 | } | ||
347 | |||
348 | static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr, | ||
349 | enum packing_op op) | ||
350 | { | ||
351 | const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY; | ||
352 | struct sja1105_xmii_params_entry *entry = entry_ptr; | ||
353 | int offset, i; | ||
354 | |||
355 | for (i = 0, offset = 17; i < 5; i++, offset += 3) { | ||
356 | sja1105_packing(buf, &entry->xmii_mode[i], | ||
357 | offset + 1, offset + 0, size, op); | ||
358 | sja1105_packing(buf, &entry->phy_mac[i], | ||
359 | offset + 2, offset + 2, size, op); | ||
360 | } | ||
361 | return size; | ||
362 | } | ||
363 | |||
364 | size_t sja1105_table_header_packing(void *buf, void *entry_ptr, | ||
365 | enum packing_op op) | ||
366 | { | ||
367 | const size_t size = SJA1105_SIZE_TABLE_HEADER; | ||
368 | struct sja1105_table_header *entry = entry_ptr; | ||
369 | |||
370 | sja1105_packing(buf, &entry->block_id, 31, 24, size, op); | ||
371 | sja1105_packing(buf, &entry->len, 55, 32, size, op); | ||
372 | sja1105_packing(buf, &entry->crc, 95, 64, size, op); | ||
373 | return size; | ||
374 | } | ||
375 | |||
376 | /* WARNING: the *hdr pointer is really non-const, because it is | ||
377 | * modifying the CRC of the header for a 2-stage packing operation | ||
378 | */ | ||
379 | void | ||
380 | sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr) | ||
381 | { | ||
382 | /* First pack the table as-is, then calculate the CRC, and | ||
383 | * finally put the proper CRC into the packed buffer | ||
384 | */ | ||
385 | memset(buf, 0, SJA1105_SIZE_TABLE_HEADER); | ||
386 | sja1105_table_header_packing(buf, hdr, PACK); | ||
387 | hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4); | ||
388 | sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4); | ||
389 | } | ||
390 | |||
391 | static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr) | ||
392 | { | ||
393 | u64 computed_crc; | ||
394 | int len_bytes; | ||
395 | |||
396 | len_bytes = (uintptr_t)(crc_ptr - table_start); | ||
397 | computed_crc = sja1105_crc32(table_start, len_bytes); | ||
398 | sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4); | ||
399 | } | ||
400 | |||
401 | /* The block IDs that the switches support are unfortunately sparse, so keep a | ||
402 | * mapping table to "block indices" and translate back and forth so that we | ||
403 | * don't waste useless memory in struct sja1105_static_config. | ||
404 | * Also, since the block id comes from essentially untrusted input (unpacking | ||
405 | * the static config from userspace) it has to be sanitized (range-checked) | ||
406 | * before blindly indexing kernel memory with the blk_idx. | ||
407 | */ | ||
408 | static u64 blk_id_map[BLK_IDX_MAX] = { | ||
409 | [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP, | ||
410 | [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING, | ||
411 | [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP, | ||
412 | [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING, | ||
413 | [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG, | ||
414 | [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS, | ||
415 | [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS, | ||
416 | [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS, | ||
417 | [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS, | ||
418 | }; | ||
419 | |||
420 | const char *sja1105_static_config_error_msg[] = { | ||
421 | [SJA1105_CONFIG_OK] = "", | ||
422 | [SJA1105_MISSING_L2_POLICING_TABLE] = | ||
423 | "l2-policing-table needs to have at least one entry", | ||
424 | [SJA1105_MISSING_L2_FORWARDING_TABLE] = | ||
425 | "l2-forwarding-table is either missing or incomplete", | ||
426 | [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] = | ||
427 | "l2-forwarding-parameters-table is missing", | ||
428 | [SJA1105_MISSING_GENERAL_PARAMS_TABLE] = | ||
429 | "general-parameters-table is missing", | ||
430 | [SJA1105_MISSING_VLAN_TABLE] = | ||
431 | "vlan-lookup-table needs to have at least the default untagged VLAN", | ||
432 | [SJA1105_MISSING_XMII_TABLE] = | ||
433 | "xmii-table is missing", | ||
434 | [SJA1105_MISSING_MAC_TABLE] = | ||
435 | "mac-configuration-table needs to contain an entry for each port", | ||
436 | [SJA1105_OVERCOMMITTED_FRAME_MEMORY] = | ||
437 | "Not allowed to overcommit frame memory. L2 memory partitions " | ||
438 | "and VL memory partitions share the same space. The sum of all " | ||
439 | "16 memory partitions is not allowed to be larger than 929 " | ||
440 | "128-byte blocks (or 910 with retagging). Please adjust " | ||
441 | "l2-forwarding-parameters-table.part_spc and/or " | ||
442 | "vl-forwarding-parameters-table.partspc.", | ||
443 | }; | ||
444 | |||
445 | sja1105_config_valid_t | ||
446 | static_config_check_memory_size(const struct sja1105_table *tables) | ||
447 | { | ||
448 | const struct sja1105_l2_forwarding_params_entry *l2_fwd_params; | ||
449 | int i, mem = 0; | ||
450 | |||
451 | l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries; | ||
452 | |||
453 | for (i = 0; i < 8; i++) | ||
454 | mem += l2_fwd_params->part_spc[i]; | ||
455 | |||
456 | if (mem > SJA1105_MAX_FRAME_MEMORY) | ||
457 | return SJA1105_OVERCOMMITTED_FRAME_MEMORY; | ||
458 | |||
459 | return SJA1105_CONFIG_OK; | ||
460 | } | ||
461 | |||
462 | sja1105_config_valid_t | ||
463 | sja1105_static_config_check_valid(const struct sja1105_static_config *config) | ||
464 | { | ||
465 | const struct sja1105_table *tables = config->tables; | ||
466 | #define IS_FULL(blk_idx) \ | ||
467 | (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count) | ||
468 | |||
469 | if (tables[BLK_IDX_L2_POLICING].entry_count == 0) | ||
470 | return SJA1105_MISSING_L2_POLICING_TABLE; | ||
471 | |||
472 | if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0) | ||
473 | return SJA1105_MISSING_VLAN_TABLE; | ||
474 | |||
475 | if (!IS_FULL(BLK_IDX_L2_FORWARDING)) | ||
476 | return SJA1105_MISSING_L2_FORWARDING_TABLE; | ||
477 | |||
478 | if (!IS_FULL(BLK_IDX_MAC_CONFIG)) | ||
479 | return SJA1105_MISSING_MAC_TABLE; | ||
480 | |||
481 | if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS)) | ||
482 | return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE; | ||
483 | |||
484 | if (!IS_FULL(BLK_IDX_GENERAL_PARAMS)) | ||
485 | return SJA1105_MISSING_GENERAL_PARAMS_TABLE; | ||
486 | |||
487 | if (!IS_FULL(BLK_IDX_XMII_PARAMS)) | ||
488 | return SJA1105_MISSING_XMII_TABLE; | ||
489 | |||
490 | return static_config_check_memory_size(tables); | ||
491 | #undef IS_FULL | ||
492 | } | ||
493 | |||
494 | void | ||
495 | sja1105_static_config_pack(void *buf, struct sja1105_static_config *config) | ||
496 | { | ||
497 | struct sja1105_table_header header = {0}; | ||
498 | enum sja1105_blk_idx i; | ||
499 | char *p = buf; | ||
500 | int j; | ||
501 | |||
502 | sja1105_pack(p, &config->device_id, 31, 0, 4); | ||
503 | p += SJA1105_SIZE_DEVICE_ID; | ||
504 | |||
505 | for (i = 0; i < BLK_IDX_MAX; i++) { | ||
506 | const struct sja1105_table *table; | ||
507 | char *table_start; | ||
508 | |||
509 | table = &config->tables[i]; | ||
510 | if (!table->entry_count) | ||
511 | continue; | ||
512 | |||
513 | header.block_id = blk_id_map[i]; | ||
514 | header.len = table->entry_count * | ||
515 | table->ops->packed_entry_size / 4; | ||
516 | sja1105_table_header_pack_with_crc(p, &header); | ||
517 | p += SJA1105_SIZE_TABLE_HEADER; | ||
518 | table_start = p; | ||
519 | for (j = 0; j < table->entry_count; j++) { | ||
520 | u8 *entry_ptr = table->entries; | ||
521 | |||
522 | entry_ptr += j * table->ops->unpacked_entry_size; | ||
523 | memset(p, 0, table->ops->packed_entry_size); | ||
524 | table->ops->packing(p, entry_ptr, PACK); | ||
525 | p += table->ops->packed_entry_size; | ||
526 | } | ||
527 | sja1105_table_write_crc(table_start, p); | ||
528 | p += 4; | ||
529 | } | ||
530 | /* Final header: | ||
531 | * Block ID does not matter | ||
532 | * Length of 0 marks that header is final | ||
533 | * CRC will be replaced on-the-fly on "config upload" | ||
534 | */ | ||
535 | header.block_id = 0; | ||
536 | header.len = 0; | ||
537 | header.crc = 0xDEADBEEF; | ||
538 | memset(p, 0, SJA1105_SIZE_TABLE_HEADER); | ||
539 | sja1105_table_header_packing(p, &header, PACK); | ||
540 | } | ||
541 | |||
542 | size_t | ||
543 | sja1105_static_config_get_length(const struct sja1105_static_config *config) | ||
544 | { | ||
545 | unsigned int sum; | ||
546 | unsigned int header_count; | ||
547 | enum sja1105_blk_idx i; | ||
548 | |||
549 | /* Ending header */ | ||
550 | header_count = 1; | ||
551 | sum = SJA1105_SIZE_DEVICE_ID; | ||
552 | |||
553 | /* Tables (headers and entries) */ | ||
554 | for (i = 0; i < BLK_IDX_MAX; i++) { | ||
555 | const struct sja1105_table *table; | ||
556 | |||
557 | table = &config->tables[i]; | ||
558 | if (table->entry_count) | ||
559 | header_count++; | ||
560 | |||
561 | sum += table->ops->packed_entry_size * table->entry_count; | ||
562 | } | ||
563 | /* Headers have an additional CRC at the end */ | ||
564 | sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4); | ||
565 | /* Last header does not have an extra CRC because there is no data */ | ||
566 | sum -= 4; | ||
567 | |||
568 | return sum; | ||
569 | } | ||
570 | |||
571 | /* Compatibility matrices */ | ||
572 | |||
573 | /* SJA1105E: First generation, no TTEthernet */ | ||
574 | struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { | ||
575 | [BLK_IDX_L2_LOOKUP] = { | ||
576 | .packing = sja1105et_l2_lookup_entry_packing, | ||
577 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
578 | .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY, | ||
579 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
580 | }, | ||
581 | [BLK_IDX_L2_POLICING] = { | ||
582 | .packing = sja1105_l2_policing_entry_packing, | ||
583 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
584 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
585 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
586 | }, | ||
587 | [BLK_IDX_VLAN_LOOKUP] = { | ||
588 | .packing = sja1105_vlan_lookup_entry_packing, | ||
589 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
590 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
591 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
592 | }, | ||
593 | [BLK_IDX_L2_FORWARDING] = { | ||
594 | .packing = sja1105_l2_forwarding_entry_packing, | ||
595 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
596 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
597 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
598 | }, | ||
599 | [BLK_IDX_MAC_CONFIG] = { | ||
600 | .packing = sja1105et_mac_config_entry_packing, | ||
601 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
602 | .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY, | ||
603 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
604 | }, | ||
605 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
606 | .packing = sja1105et_l2_lookup_params_entry_packing, | ||
607 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
608 | .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
609 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
610 | }, | ||
611 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
612 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
613 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
614 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
615 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
616 | }, | ||
617 | [BLK_IDX_GENERAL_PARAMS] = { | ||
618 | .packing = sja1105et_general_params_entry_packing, | ||
619 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
620 | .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY, | ||
621 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
622 | }, | ||
623 | [BLK_IDX_XMII_PARAMS] = { | ||
624 | .packing = sja1105_xmii_params_entry_packing, | ||
625 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
626 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
627 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
628 | }, | ||
629 | }; | ||
630 | |||
631 | /* SJA1105T: First generation, TTEthernet */ | ||
632 | struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { | ||
633 | [BLK_IDX_L2_LOOKUP] = { | ||
634 | .packing = sja1105et_l2_lookup_entry_packing, | ||
635 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
636 | .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY, | ||
637 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
638 | }, | ||
639 | [BLK_IDX_L2_POLICING] = { | ||
640 | .packing = sja1105_l2_policing_entry_packing, | ||
641 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
642 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
643 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
644 | }, | ||
645 | [BLK_IDX_VLAN_LOOKUP] = { | ||
646 | .packing = sja1105_vlan_lookup_entry_packing, | ||
647 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
648 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
649 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
650 | }, | ||
651 | [BLK_IDX_L2_FORWARDING] = { | ||
652 | .packing = sja1105_l2_forwarding_entry_packing, | ||
653 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
654 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
655 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
656 | }, | ||
657 | [BLK_IDX_MAC_CONFIG] = { | ||
658 | .packing = sja1105et_mac_config_entry_packing, | ||
659 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
660 | .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY, | ||
661 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
662 | }, | ||
663 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
664 | .packing = sja1105et_l2_lookup_params_entry_packing, | ||
665 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
666 | .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
667 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
668 | }, | ||
669 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
670 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
671 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
672 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
673 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
674 | }, | ||
675 | [BLK_IDX_GENERAL_PARAMS] = { | ||
676 | .packing = sja1105et_general_params_entry_packing, | ||
677 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
678 | .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY, | ||
679 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
680 | }, | ||
681 | [BLK_IDX_XMII_PARAMS] = { | ||
682 | .packing = sja1105_xmii_params_entry_packing, | ||
683 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
684 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
685 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
686 | }, | ||
687 | }; | ||
688 | |||
689 | /* SJA1105P: Second generation, no TTEthernet, no SGMII */ | ||
690 | struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { | ||
691 | [BLK_IDX_L2_LOOKUP] = { | ||
692 | .packing = sja1105pqrs_l2_lookup_entry_packing, | ||
693 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
694 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, | ||
695 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
696 | }, | ||
697 | [BLK_IDX_L2_POLICING] = { | ||
698 | .packing = sja1105_l2_policing_entry_packing, | ||
699 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
700 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
701 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
702 | }, | ||
703 | [BLK_IDX_VLAN_LOOKUP] = { | ||
704 | .packing = sja1105_vlan_lookup_entry_packing, | ||
705 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
706 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
707 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
708 | }, | ||
709 | [BLK_IDX_L2_FORWARDING] = { | ||
710 | .packing = sja1105_l2_forwarding_entry_packing, | ||
711 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
712 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
713 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
714 | }, | ||
715 | [BLK_IDX_MAC_CONFIG] = { | ||
716 | .packing = sja1105pqrs_mac_config_entry_packing, | ||
717 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
718 | .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, | ||
719 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
720 | }, | ||
721 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
722 | .packing = sja1105pqrs_l2_lookup_params_entry_packing, | ||
723 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
724 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
725 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
726 | }, | ||
727 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
728 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
729 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
730 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
731 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
732 | }, | ||
733 | [BLK_IDX_GENERAL_PARAMS] = { | ||
734 | .packing = sja1105pqrs_general_params_entry_packing, | ||
735 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
736 | .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY, | ||
737 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
738 | }, | ||
739 | [BLK_IDX_XMII_PARAMS] = { | ||
740 | .packing = sja1105_xmii_params_entry_packing, | ||
741 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
742 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
743 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
744 | }, | ||
745 | }; | ||
746 | |||
747 | /* SJA1105Q: Second generation, TTEthernet, no SGMII */ | ||
748 | struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { | ||
749 | [BLK_IDX_L2_LOOKUP] = { | ||
750 | .packing = sja1105pqrs_l2_lookup_entry_packing, | ||
751 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
752 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, | ||
753 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
754 | }, | ||
755 | [BLK_IDX_L2_POLICING] = { | ||
756 | .packing = sja1105_l2_policing_entry_packing, | ||
757 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
758 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
759 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
760 | }, | ||
761 | [BLK_IDX_VLAN_LOOKUP] = { | ||
762 | .packing = sja1105_vlan_lookup_entry_packing, | ||
763 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
764 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
765 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
766 | }, | ||
767 | [BLK_IDX_L2_FORWARDING] = { | ||
768 | .packing = sja1105_l2_forwarding_entry_packing, | ||
769 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
770 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
771 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
772 | }, | ||
773 | [BLK_IDX_MAC_CONFIG] = { | ||
774 | .packing = sja1105pqrs_mac_config_entry_packing, | ||
775 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
776 | .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, | ||
777 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
778 | }, | ||
779 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
780 | .packing = sja1105pqrs_l2_lookup_params_entry_packing, | ||
781 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
782 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
783 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
784 | }, | ||
785 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
786 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
787 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
788 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
789 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
790 | }, | ||
791 | [BLK_IDX_GENERAL_PARAMS] = { | ||
792 | .packing = sja1105pqrs_general_params_entry_packing, | ||
793 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
794 | .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY, | ||
795 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
796 | }, | ||
797 | [BLK_IDX_XMII_PARAMS] = { | ||
798 | .packing = sja1105_xmii_params_entry_packing, | ||
799 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
800 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
801 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
802 | }, | ||
803 | }; | ||
804 | |||
805 | /* SJA1105R: Second generation, no TTEthernet, SGMII */ | ||
806 | struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { | ||
807 | [BLK_IDX_L2_LOOKUP] = { | ||
808 | .packing = sja1105pqrs_l2_lookup_entry_packing, | ||
809 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
810 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, | ||
811 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
812 | }, | ||
813 | [BLK_IDX_L2_POLICING] = { | ||
814 | .packing = sja1105_l2_policing_entry_packing, | ||
815 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
816 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
817 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
818 | }, | ||
819 | [BLK_IDX_VLAN_LOOKUP] = { | ||
820 | .packing = sja1105_vlan_lookup_entry_packing, | ||
821 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
822 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
823 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
824 | }, | ||
825 | [BLK_IDX_L2_FORWARDING] = { | ||
826 | .packing = sja1105_l2_forwarding_entry_packing, | ||
827 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
828 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
829 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
830 | }, | ||
831 | [BLK_IDX_MAC_CONFIG] = { | ||
832 | .packing = sja1105pqrs_mac_config_entry_packing, | ||
833 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
834 | .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, | ||
835 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
836 | }, | ||
837 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
838 | .packing = sja1105pqrs_l2_lookup_params_entry_packing, | ||
839 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
840 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
841 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
842 | }, | ||
843 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
844 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
845 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
846 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
847 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
848 | }, | ||
849 | [BLK_IDX_GENERAL_PARAMS] = { | ||
850 | .packing = sja1105pqrs_general_params_entry_packing, | ||
851 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
852 | .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY, | ||
853 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
854 | }, | ||
855 | [BLK_IDX_XMII_PARAMS] = { | ||
856 | .packing = sja1105_xmii_params_entry_packing, | ||
857 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
858 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
859 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
860 | }, | ||
861 | }; | ||
862 | |||
863 | /* SJA1105S: Second generation, TTEthernet, SGMII */ | ||
864 | struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { | ||
865 | [BLK_IDX_L2_LOOKUP] = { | ||
866 | .packing = sja1105pqrs_l2_lookup_entry_packing, | ||
867 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), | ||
868 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, | ||
869 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | ||
870 | }, | ||
871 | [BLK_IDX_L2_POLICING] = { | ||
872 | .packing = sja1105_l2_policing_entry_packing, | ||
873 | .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry), | ||
874 | .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY, | ||
875 | .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT, | ||
876 | }, | ||
877 | [BLK_IDX_VLAN_LOOKUP] = { | ||
878 | .packing = sja1105_vlan_lookup_entry_packing, | ||
879 | .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry), | ||
880 | .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY, | ||
881 | .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT, | ||
882 | }, | ||
883 | [BLK_IDX_L2_FORWARDING] = { | ||
884 | .packing = sja1105_l2_forwarding_entry_packing, | ||
885 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry), | ||
886 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY, | ||
887 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT, | ||
888 | }, | ||
889 | [BLK_IDX_MAC_CONFIG] = { | ||
890 | .packing = sja1105pqrs_mac_config_entry_packing, | ||
891 | .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry), | ||
892 | .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, | ||
893 | .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, | ||
894 | }, | ||
895 | [BLK_IDX_L2_LOOKUP_PARAMS] = { | ||
896 | .packing = sja1105pqrs_l2_lookup_params_entry_packing, | ||
897 | .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), | ||
898 | .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY, | ||
899 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, | ||
900 | }, | ||
901 | [BLK_IDX_L2_FORWARDING_PARAMS] = { | ||
902 | .packing = sja1105_l2_forwarding_params_entry_packing, | ||
903 | .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry), | ||
904 | .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY, | ||
905 | .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, | ||
906 | }, | ||
907 | [BLK_IDX_GENERAL_PARAMS] = { | ||
908 | .packing = sja1105pqrs_general_params_entry_packing, | ||
909 | .unpacked_entry_size = sizeof(struct sja1105_general_params_entry), | ||
910 | .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY, | ||
911 | .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT, | ||
912 | }, | ||
913 | [BLK_IDX_XMII_PARAMS] = { | ||
914 | .packing = sja1105_xmii_params_entry_packing, | ||
915 | .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry), | ||
916 | .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY, | ||
917 | .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT, | ||
918 | }, | ||
919 | }; | ||
920 | |||
921 | int sja1105_static_config_init(struct sja1105_static_config *config, | ||
922 | const struct sja1105_table_ops *static_ops, | ||
923 | u64 device_id) | ||
924 | { | ||
925 | enum sja1105_blk_idx i; | ||
926 | |||
927 | *config = (struct sja1105_static_config) {0}; | ||
928 | |||
929 | /* Transfer static_ops array from priv into per-table ops | ||
930 | * for handier access | ||
931 | */ | ||
932 | for (i = 0; i < BLK_IDX_MAX; i++) | ||
933 | config->tables[i].ops = &static_ops[i]; | ||
934 | |||
935 | config->device_id = device_id; | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | void sja1105_static_config_free(struct sja1105_static_config *config) | ||
940 | { | ||
941 | enum sja1105_blk_idx i; | ||
942 | |||
943 | for (i = 0; i < BLK_IDX_MAX; i++) { | ||
944 | if (config->tables[i].entry_count) { | ||
945 | kfree(config->tables[i].entries); | ||
946 | config->tables[i].entry_count = 0; | ||
947 | } | ||
948 | } | ||
949 | } | ||
950 | |||
951 | int sja1105_table_delete_entry(struct sja1105_table *table, int i) | ||
952 | { | ||
953 | size_t entry_size = table->ops->unpacked_entry_size; | ||
954 | u8 *entries = table->entries; | ||
955 | |||
956 | if (i > table->entry_count) | ||
957 | return -ERANGE; | ||
958 | |||
959 | memmove(entries + i * entry_size, entries + (i + 1) * entry_size, | ||
960 | (table->entry_count - i) * entry_size); | ||
961 | |||
962 | table->entry_count--; | ||
963 | |||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | /* No pointers to table->entries should be kept when this is called. */ | ||
968 | int sja1105_table_resize(struct sja1105_table *table, size_t new_count) | ||
969 | { | ||
970 | size_t entry_size = table->ops->unpacked_entry_size; | ||
971 | void *new_entries, *old_entries = table->entries; | ||
972 | |||
973 | if (new_count > table->ops->max_entry_count) | ||
974 | return -ERANGE; | ||
975 | |||
976 | new_entries = kcalloc(new_count, entry_size, GFP_KERNEL); | ||
977 | if (!new_entries) | ||
978 | return -ENOMEM; | ||
979 | |||
980 | memcpy(new_entries, old_entries, min(new_count, table->entry_count) * | ||
981 | entry_size); | ||
982 | |||
983 | table->entries = new_entries; | ||
984 | table->entry_count = new_count; | ||
985 | kfree(old_entries); | ||
986 | return 0; | ||
987 | } | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h new file mode 100644 index 000000000000..069ca8fd059c --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_static_config.h | |||
@@ -0,0 +1,253 @@ | |||
1 | /* SPDX-License-Identifier: BSD-3-Clause | ||
2 | * Copyright (c) 2016-2018, NXP Semiconductors | ||
3 | * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> | ||
4 | */ | ||
5 | #ifndef _SJA1105_STATIC_CONFIG_H | ||
6 | #define _SJA1105_STATIC_CONFIG_H | ||
7 | |||
8 | #include <linux/packing.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <asm/types.h> | ||
11 | |||
12 | #define SJA1105_SIZE_DEVICE_ID 4 | ||
13 | #define SJA1105_SIZE_TABLE_HEADER 12 | ||
14 | #define SJA1105_SIZE_L2_POLICING_ENTRY 8 | ||
15 | #define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8 | ||
16 | #define SJA1105_SIZE_L2_FORWARDING_ENTRY 8 | ||
17 | #define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12 | ||
18 | #define SJA1105_SIZE_XMII_PARAMS_ENTRY 4 | ||
19 | #define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12 | ||
20 | #define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28 | ||
21 | #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4 | ||
22 | #define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40 | ||
23 | #define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20 | ||
24 | #define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32 | ||
25 | #define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16 | ||
26 | #define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44 | ||
27 | |||
28 | /* UM10944.pdf Page 11, Table 2. Configuration Blocks */ | ||
29 | enum { | ||
30 | BLKID_L2_LOOKUP = 0x05, | ||
31 | BLKID_L2_POLICING = 0x06, | ||
32 | BLKID_VLAN_LOOKUP = 0x07, | ||
33 | BLKID_L2_FORWARDING = 0x08, | ||
34 | BLKID_MAC_CONFIG = 0x09, | ||
35 | BLKID_L2_LOOKUP_PARAMS = 0x0D, | ||
36 | BLKID_L2_FORWARDING_PARAMS = 0x0E, | ||
37 | BLKID_GENERAL_PARAMS = 0x11, | ||
38 | BLKID_XMII_PARAMS = 0x4E, | ||
39 | }; | ||
40 | |||
41 | enum sja1105_blk_idx { | ||
42 | BLK_IDX_L2_LOOKUP = 0, | ||
43 | BLK_IDX_L2_POLICING, | ||
44 | BLK_IDX_VLAN_LOOKUP, | ||
45 | BLK_IDX_L2_FORWARDING, | ||
46 | BLK_IDX_MAC_CONFIG, | ||
47 | BLK_IDX_L2_LOOKUP_PARAMS, | ||
48 | BLK_IDX_L2_FORWARDING_PARAMS, | ||
49 | BLK_IDX_GENERAL_PARAMS, | ||
50 | BLK_IDX_XMII_PARAMS, | ||
51 | BLK_IDX_MAX, | ||
52 | /* Fake block indices that are only valid for dynamic access */ | ||
53 | BLK_IDX_MGMT_ROUTE, | ||
54 | BLK_IDX_MAX_DYN, | ||
55 | BLK_IDX_INVAL = -1, | ||
56 | }; | ||
57 | |||
58 | #define SJA1105_MAX_L2_LOOKUP_COUNT 1024 | ||
59 | #define SJA1105_MAX_L2_POLICING_COUNT 45 | ||
60 | #define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096 | ||
61 | #define SJA1105_MAX_L2_FORWARDING_COUNT 13 | ||
62 | #define SJA1105_MAX_MAC_CONFIG_COUNT 5 | ||
63 | #define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1 | ||
64 | #define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1 | ||
65 | #define SJA1105_MAX_GENERAL_PARAMS_COUNT 1 | ||
66 | #define SJA1105_MAX_XMII_PARAMS_COUNT 1 | ||
67 | |||
68 | #define SJA1105_MAX_FRAME_MEMORY 929 | ||
69 | |||
70 | #define SJA1105E_DEVICE_ID 0x9C00000Cull | ||
71 | #define SJA1105T_DEVICE_ID 0x9E00030Eull | ||
72 | #define SJA1105PR_DEVICE_ID 0xAF00030Eull | ||
73 | #define SJA1105QS_DEVICE_ID 0xAE00030Eull | ||
74 | |||
75 | #define SJA1105ET_PART_NO 0x9A83 | ||
76 | #define SJA1105P_PART_NO 0x9A84 | ||
77 | #define SJA1105Q_PART_NO 0x9A85 | ||
78 | #define SJA1105R_PART_NO 0x9A86 | ||
79 | #define SJA1105S_PART_NO 0x9A87 | ||
80 | |||
81 | struct sja1105_general_params_entry { | ||
82 | u64 vllupformat; | ||
83 | u64 mirr_ptacu; | ||
84 | u64 switchid; | ||
85 | u64 hostprio; | ||
86 | u64 mac_fltres1; | ||
87 | u64 mac_fltres0; | ||
88 | u64 mac_flt1; | ||
89 | u64 mac_flt0; | ||
90 | u64 incl_srcpt1; | ||
91 | u64 incl_srcpt0; | ||
92 | u64 send_meta1; | ||
93 | u64 send_meta0; | ||
94 | u64 casc_port; | ||
95 | u64 host_port; | ||
96 | u64 mirr_port; | ||
97 | u64 vlmarker; | ||
98 | u64 vlmask; | ||
99 | u64 tpid; | ||
100 | u64 ignore2stf; | ||
101 | u64 tpid2; | ||
102 | /* P/Q/R/S only */ | ||
103 | u64 queue_ts; | ||
104 | u64 egrmirrvid; | ||
105 | u64 egrmirrpcp; | ||
106 | u64 egrmirrdei; | ||
107 | u64 replay_port; | ||
108 | }; | ||
109 | |||
110 | struct sja1105_vlan_lookup_entry { | ||
111 | u64 ving_mirr; | ||
112 | u64 vegr_mirr; | ||
113 | u64 vmemb_port; | ||
114 | u64 vlan_bc; | ||
115 | u64 tag_port; | ||
116 | u64 vlanid; | ||
117 | }; | ||
118 | |||
119 | struct sja1105_l2_lookup_entry { | ||
120 | u64 vlanid; | ||
121 | u64 macaddr; | ||
122 | u64 destports; | ||
123 | u64 enfport; | ||
124 | u64 index; | ||
125 | }; | ||
126 | |||
127 | struct sja1105_l2_lookup_params_entry { | ||
128 | u64 maxage; /* Shared */ | ||
129 | u64 dyn_tbsz; /* E/T only */ | ||
130 | u64 poly; /* E/T only */ | ||
131 | u64 shared_learn; /* Shared */ | ||
132 | u64 no_enf_hostprt; /* Shared */ | ||
133 | u64 no_mgmt_learn; /* Shared */ | ||
134 | }; | ||
135 | |||
136 | struct sja1105_l2_forwarding_entry { | ||
137 | u64 bc_domain; | ||
138 | u64 reach_port; | ||
139 | u64 fl_domain; | ||
140 | u64 vlan_pmap[8]; | ||
141 | }; | ||
142 | |||
143 | struct sja1105_l2_forwarding_params_entry { | ||
144 | u64 max_dynp; | ||
145 | u64 part_spc[8]; | ||
146 | }; | ||
147 | |||
148 | struct sja1105_l2_policing_entry { | ||
149 | u64 sharindx; | ||
150 | u64 smax; | ||
151 | u64 rate; | ||
152 | u64 maxlen; | ||
153 | u64 partition; | ||
154 | }; | ||
155 | |||
156 | struct sja1105_mac_config_entry { | ||
157 | u64 top[8]; | ||
158 | u64 base[8]; | ||
159 | u64 enabled[8]; | ||
160 | u64 ifg; | ||
161 | u64 speed; | ||
162 | u64 tp_delin; | ||
163 | u64 tp_delout; | ||
164 | u64 maxage; | ||
165 | u64 vlanprio; | ||
166 | u64 vlanid; | ||
167 | u64 ing_mirr; | ||
168 | u64 egr_mirr; | ||
169 | u64 drpnona664; | ||
170 | u64 drpdtag; | ||
171 | u64 drpuntag; | ||
172 | u64 retag; | ||
173 | u64 dyn_learn; | ||
174 | u64 egress; | ||
175 | u64 ingress; | ||
176 | }; | ||
177 | |||
178 | struct sja1105_xmii_params_entry { | ||
179 | u64 phy_mac[5]; | ||
180 | u64 xmii_mode[5]; | ||
181 | }; | ||
182 | |||
183 | struct sja1105_table_header { | ||
184 | u64 block_id; | ||
185 | u64 len; | ||
186 | u64 crc; | ||
187 | }; | ||
188 | |||
189 | struct sja1105_table_ops { | ||
190 | size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op); | ||
191 | size_t unpacked_entry_size; | ||
192 | size_t packed_entry_size; | ||
193 | size_t max_entry_count; | ||
194 | }; | ||
195 | |||
196 | struct sja1105_table { | ||
197 | const struct sja1105_table_ops *ops; | ||
198 | size_t entry_count; | ||
199 | void *entries; | ||
200 | }; | ||
201 | |||
202 | struct sja1105_static_config { | ||
203 | u64 device_id; | ||
204 | struct sja1105_table tables[BLK_IDX_MAX]; | ||
205 | }; | ||
206 | |||
207 | extern struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX]; | ||
208 | extern struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX]; | ||
209 | extern struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX]; | ||
210 | extern struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX]; | ||
211 | extern struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX]; | ||
212 | extern struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX]; | ||
213 | |||
214 | size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op); | ||
215 | void | ||
216 | sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr); | ||
217 | size_t | ||
218 | sja1105_static_config_get_length(const struct sja1105_static_config *config); | ||
219 | |||
220 | typedef enum { | ||
221 | SJA1105_CONFIG_OK = 0, | ||
222 | SJA1105_MISSING_L2_POLICING_TABLE, | ||
223 | SJA1105_MISSING_L2_FORWARDING_TABLE, | ||
224 | SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE, | ||
225 | SJA1105_MISSING_GENERAL_PARAMS_TABLE, | ||
226 | SJA1105_MISSING_VLAN_TABLE, | ||
227 | SJA1105_MISSING_XMII_TABLE, | ||
228 | SJA1105_MISSING_MAC_TABLE, | ||
229 | SJA1105_OVERCOMMITTED_FRAME_MEMORY, | ||
230 | } sja1105_config_valid_t; | ||
231 | |||
232 | extern const char *sja1105_static_config_error_msg[]; | ||
233 | |||
234 | sja1105_config_valid_t | ||
235 | sja1105_static_config_check_valid(const struct sja1105_static_config *config); | ||
236 | void | ||
237 | sja1105_static_config_pack(void *buf, struct sja1105_static_config *config); | ||
238 | int sja1105_static_config_init(struct sja1105_static_config *config, | ||
239 | const struct sja1105_table_ops *static_ops, | ||
240 | u64 device_id); | ||
241 | void sja1105_static_config_free(struct sja1105_static_config *config); | ||
242 | |||
243 | int sja1105_table_delete_entry(struct sja1105_table *table, int i); | ||
244 | int sja1105_table_resize(struct sja1105_table *table, size_t new_count); | ||
245 | |||
246 | u32 sja1105_crc32(const void *buf, size_t len); | ||
247 | |||
248 | void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len); | ||
249 | void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len); | ||
250 | void sja1105_packing(void *buf, u64 *val, int start, int end, | ||
251 | size_t len, enum packing_op op); | ||
252 | |||
253 | #endif | ||