aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-31 22:18:27 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-31 22:18:27 -0400
commit56c03cbf8c4cbd413a19e8541850b0f02958fdcb (patch)
treee72614d71a293466f86fb222d54c7660ffb2536e
parent5e8b270fcf1116d47e4954704fa1b7c58f272622 (diff)
parent37c3347eb247a79a3371d589175f646ea3a488de (diff)
Merge branch 'thunderx-DMAC-filtering'
Vadim Lomovtsev says: ==================== net: thunderx: implement DMAC filtering support By default CN88XX BGX accepts all incoming multicast and broadcast packets and filtering is disabled. The nic driver doesn't provide an ability to change such behaviour. This series is to implement DMAC filtering management for CN88XX nic driver allowing user to enable/disable filtering and configure specific MAC addresses to filter traffic. Changes from v1: build issues: - update code in order to address compiler warnings; checkpatch.pl reported issues: - update code in order to fit 80 symbols length; - update commit descriptions in order to fit 80 symbols length; ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c45
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c110
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c201
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h19
5 files changed, 374 insertions, 30 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4cacce5d2b16..5fc46c5a4f36 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -265,6 +265,22 @@ struct nicvf_drv_stats {
265 265
266struct cavium_ptp; 266struct cavium_ptp;
267 267
268struct xcast_addr {
269 struct list_head list;
270 u64 addr;
271};
272
273struct xcast_addr_list {
274 struct list_head list;
275 int count;
276};
277
278struct nicvf_work {
279 struct delayed_work work;
280 u8 mode;
281 struct xcast_addr_list *mc;
282};
283
268struct nicvf { 284struct nicvf {
269 struct nicvf *pnicvf; 285 struct nicvf *pnicvf;
270 struct net_device *netdev; 286 struct net_device *netdev;
@@ -313,6 +329,7 @@ struct nicvf {
313 struct nicvf_pfc pfc; 329 struct nicvf_pfc pfc;
314 struct tasklet_struct qs_err_task; 330 struct tasklet_struct qs_err_task;
315 struct work_struct reset_task; 331 struct work_struct reset_task;
332 struct nicvf_work rx_mode_work;
316 333
317 /* PTP timestamp */ 334 /* PTP timestamp */
318 struct cavium_ptp *ptp_clock; 335 struct cavium_ptp *ptp_clock;
@@ -403,6 +420,9 @@ struct nicvf {
403#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */ 420#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
404#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ 421#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
405#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ 422#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
423#define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */
424#define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* Add MAC to DCAM filters */
425#define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST RX mode */
406 426
407struct nic_cfg_msg { 427struct nic_cfg_msg {
408 u8 msg; 428 u8 msg;
@@ -556,6 +576,14 @@ struct set_ptp {
556 bool enable; 576 bool enable;
557}; 577};
558 578
579struct xcast {
580 u8 msg;
581 union {
582 u8 mode;
583 u64 mac;
584 } data;
585};
586
559/* 128 bit shared memory between PF and each VF */ 587/* 128 bit shared memory between PF and each VF */
560union nic_mbx { 588union nic_mbx {
561 struct { u8 msg; } msg; 589 struct { u8 msg; } msg;
@@ -576,6 +604,7 @@ union nic_mbx {
576 struct reset_stat_cfg reset_stat; 604 struct reset_stat_cfg reset_stat;
577 struct pfc pfc; 605 struct pfc pfc;
578 struct set_ptp ptp; 606 struct set_ptp ptp;
607 struct xcast xcast;
579}; 608};
580 609
581#define NIC_NODE_ID_MASK 0x03 610#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 7ff66a8194e2..55af04fa03a7 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -21,6 +21,8 @@
21#define DRV_NAME "nicpf" 21#define DRV_NAME "nicpf"
22#define DRV_VERSION "1.0" 22#define DRV_VERSION "1.0"
23 23
24#define NIC_VF_PER_MBX_REG 64
25
24struct hw_info { 26struct hw_info {
25 u8 bgx_cnt; 27 u8 bgx_cnt;
26 u8 chans_per_lmac; 28 u8 chans_per_lmac;
@@ -1072,6 +1074,40 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1072 case NIC_MBOX_MSG_PTP_CFG: 1074 case NIC_MBOX_MSG_PTP_CFG:
1073 nic_config_timestamp(nic, vf, &mbx.ptp); 1075 nic_config_timestamp(nic, vf, &mbx.ptp);
1074 break; 1076 break;
1077 case NIC_MBOX_MSG_RESET_XCAST:
1078 if (vf >= nic->num_vf_en) {
1079 ret = -1; /* NACK */
1080 break;
1081 }
1082 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1083 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1084 bgx_reset_xcast_mode(nic->node, bgx, lmac,
1085 vf < NIC_VF_PER_MBX_REG ? vf :
1086 vf - NIC_VF_PER_MBX_REG);
1087 break;
1088
1089 case NIC_MBOX_MSG_ADD_MCAST:
1090 if (vf >= nic->num_vf_en) {
1091 ret = -1; /* NACK */
1092 break;
1093 }
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG);
1100 break;
1101
1102 case NIC_MBOX_MSG_SET_XCAST:
1103 if (vf >= nic->num_vf_en) {
1104 ret = -1; /* NACK */
1105 break;
1106 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
1110 break;
1075 default: 1111 default:
1076 dev_err(&nic->pdev->dev, 1112 dev_err(&nic->pdev->dev,
1077 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1094,7 +1130,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
1094 struct nicpf *nic = (struct nicpf *)nic_irq; 1130 struct nicpf *nic = (struct nicpf *)nic_irq;
1095 int mbx; 1131 int mbx;
1096 u64 intr; 1132 u64 intr;
1097 u8 vf, vf_per_mbx_reg = 64; 1133 u8 vf;
1098 1134
1099 if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) 1135 if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
1100 mbx = 0; 1136 mbx = 0;
@@ -1103,12 +1139,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
1103 1139
1104 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1140 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
1105 dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); 1141 dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
1106 for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1142 for (vf = 0; vf < NIC_VF_PER_MBX_REG; vf++) {
1107 if (intr & (1ULL << vf)) { 1143 if (intr & (1ULL << vf)) {
1108 dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", 1144 dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
1109 vf + (mbx * vf_per_mbx_reg)); 1145 vf + (mbx * NIC_VF_PER_MBX_REG));
1110 1146
1111 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1147 nic_handle_mbx_intr(nic, vf +
1148 (mbx * NIC_VF_PER_MBX_REG));
1112 nic_clear_mbx_intr(nic, vf, mbx); 1149 nic_clear_mbx_intr(nic, vf, mbx);
1113 } 1150 }
1114 } 1151 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 73fe3881414b..1e9a31fef729 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -21,6 +21,7 @@
21#include <linux/bpf_trace.h> 21#include <linux/bpf_trace.h>
22#include <linux/filter.h> 22#include <linux/filter.h>
23#include <linux/net_tstamp.h> 23#include <linux/net_tstamp.h>
24#include <linux/workqueue.h>
24 25
25#include "nic_reg.h" 26#include "nic_reg.h"
26#include "nic.h" 27#include "nic.h"
@@ -67,6 +68,9 @@ module_param(cpi_alg, int, 0444);
67MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
68 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
69 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
70static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
71{ 75{
72 if (nic->sqs_mode) 76 if (nic->sqs_mode)
@@ -1919,6 +1923,100 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1919 } 1923 }
1920} 1924}
1921 1925
1926static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1927{
1928 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1929 work.work);
1930 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1931 union nic_mbx mbx = {};
1932 struct xcast_addr *xaddr, *next;
1933
1934 if (!vf_work)
1935 return;
1936
1937 /* From the inside of VM code flow we have only 128 bits memory
1938 * available to send message to host's PF, so send all mc addrs
1939 * one by one, starting from flush command in case if kernel
1940 * requests to configure specific MAC filtering
1941 */
1942
1943 /* flush DMAC filters and reset RX mode */
1944 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1945 nicvf_send_msg_to_pf(nic, &mbx);
1946
1947 if (vf_work->mode & BGX_XCAST_MCAST_FILTER) {
1948 /* once enabling filtering, we need to signal to PF to add
1949 * its' own LMAC to the filter to accept packets for it.
1950 */
1951 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1952 mbx.xcast.data.mac = 0;
1953 nicvf_send_msg_to_pf(nic, &mbx);
1954 }
1955
1956 /* check if we have any specific MACs to be added to PF DMAC filter */
1957 if (vf_work->mc) {
1958 /* now go through kernel list of MACs and add them one by one */
1959 list_for_each_entry_safe(xaddr, next,
1960 &vf_work->mc->list, list) {
1961 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1962 mbx.xcast.data.mac = xaddr->addr;
1963 nicvf_send_msg_to_pf(nic, &mbx);
1964
1965 /* after receiving ACK from PF release memory */
1966 list_del(&xaddr->list);
1967 kfree(xaddr);
1968 vf_work->mc->count--;
1969 }
1970 kfree(vf_work->mc);
1971 }
1972
1973 /* and finally set rx mode for PF accordingly */
1974 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1975 mbx.xcast.data.mode = vf_work->mode;
1976
1977 nicvf_send_msg_to_pf(nic, &mbx);
1978}
1979
1980static void nicvf_set_rx_mode(struct net_device *netdev)
1981{
1982 struct nicvf *nic = netdev_priv(netdev);
1983 struct netdev_hw_addr *ha;
1984 struct xcast_addr_list *mc_list = NULL;
1985 u8 mode = 0;
1986
1987 if (netdev->flags & IFF_PROMISC) {
1988 mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
1989 } else {
1990 if (netdev->flags & IFF_BROADCAST)
1991 mode |= BGX_XCAST_BCAST_ACCEPT;
1992
1993 if (netdev->flags & IFF_ALLMULTI) {
1994 mode |= BGX_XCAST_MCAST_ACCEPT;
1995 } else if (netdev->flags & IFF_MULTICAST) {
1996 mode |= BGX_XCAST_MCAST_FILTER;
1997 /* here we need to copy mc addrs */
1998 if (netdev_mc_count(netdev)) {
1999 struct xcast_addr *xaddr;
2000
2001 mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
2002 INIT_LIST_HEAD(&mc_list->list);
2003 netdev_hw_addr_list_for_each(ha, &netdev->mc) {
2004 xaddr = kmalloc(sizeof(*xaddr),
2005 GFP_ATOMIC);
2006 xaddr->addr =
2007 ether_addr_to_u64(ha->addr);
2008 list_add_tail(&xaddr->list,
2009 &mc_list->list);
2010 mc_list->count++;
2011 }
2012 }
2013 }
2014 }
2015 nic->rx_mode_work.mc = mc_list;
2016 nic->rx_mode_work.mode = mode;
2017 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ);
2018}
2019
1922static const struct net_device_ops nicvf_netdev_ops = { 2020static const struct net_device_ops nicvf_netdev_ops = {
1923 .ndo_open = nicvf_open, 2021 .ndo_open = nicvf_open,
1924 .ndo_stop = nicvf_stop, 2022 .ndo_stop = nicvf_stop,
@@ -1931,6 +2029,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
1931 .ndo_set_features = nicvf_set_features, 2029 .ndo_set_features = nicvf_set_features,
1932 .ndo_bpf = nicvf_xdp, 2030 .ndo_bpf = nicvf_xdp,
1933 .ndo_do_ioctl = nicvf_ioctl, 2031 .ndo_do_ioctl = nicvf_ioctl,
2032 .ndo_set_rx_mode = nicvf_set_rx_mode,
1934}; 2033};
1935 2034
1936static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2035static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2071,6 +2170,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2071 2170
2072 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2171 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2073 2172
2173 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2174
2074 err = register_netdev(netdev); 2175 err = register_netdev(netdev);
2075 if (err) { 2176 if (err) {
2076 dev_err(dev, "Failed to register netdevice\n"); 2177 dev_err(dev, "Failed to register netdevice\n");
@@ -2109,6 +2210,8 @@ static void nicvf_remove(struct pci_dev *pdev)
2109 nic = netdev_priv(netdev); 2210 nic = netdev_priv(netdev);
2110 pnetdev = nic->pnicvf->netdev; 2211 pnetdev = nic->pnicvf->netdev;
2111 2212
2213 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2214
2112 /* Check if this Qset is assigned to different VF. 2215 /* Check if this Qset is assigned to different VF.
2113 * If yes, clean primary and all secondary Qsets. 2216 * If yes, clean primary and all secondary Qsets.
2114 */ 2217 */
@@ -2140,12 +2243,17 @@ static struct pci_driver nicvf_driver = {
2140static int __init nicvf_init_module(void) 2243static int __init nicvf_init_module(void)
2141{ 2244{
2142 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2245 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2143 2246 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2247 WQ_MEM_RECLAIM);
2144 return pci_register_driver(&nicvf_driver); 2248 return pci_register_driver(&nicvf_driver);
2145} 2249}
2146 2250
2147static void __exit nicvf_cleanup_module(void) 2251static void __exit nicvf_cleanup_module(void)
2148{ 2252{
2253 if (nicvf_rx_mode_wq) {
2254 destroy_workqueue(nicvf_rx_mode_wq);
2255 nicvf_rx_mode_wq = NULL;
2256 }
2149 pci_unregister_driver(&nicvf_driver); 2257 pci_unregister_driver(&nicvf_driver);
2150} 2258}
2151 2259
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 91d34ea40e2c..5d08d2aeb172 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -24,9 +24,31 @@
24#define DRV_NAME "thunder_bgx" 24#define DRV_NAME "thunder_bgx"
25#define DRV_VERSION "1.0" 25#define DRV_VERSION "1.0"
26 26
27/* RX_DMAC_CTL configuration */
28enum MCAST_MODE {
29 MCAST_MODE_REJECT = 0x0,
30 MCAST_MODE_ACCEPT = 0x1,
31 MCAST_MODE_CAM_FILTER = 0x2,
32 RSVD = 0x3
33};
34
35#define BCAST_ACCEPT BIT(0)
36#define CAM_ACCEPT BIT(3)
37#define MCAST_MODE_MASK 0x3
38#define BGX_MCAST_MODE(x) (x << 1)
39
40struct dmac_map {
41 u64 vf_map;
42 u64 dmac;
43};
44
27struct lmac { 45struct lmac {
28 struct bgx *bgx; 46 struct bgx *bgx;
29 int dmac; 47 /* actual number of DMACs configured */
48 u8 dmacs_cfg;
49 /* overal number of possible DMACs could be configured per LMAC */
50 u8 dmacs_count;
51 struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */
30 u8 mac[ETH_ALEN]; 52 u8 mac[ETH_ALEN];
31 u8 lmac_type; 53 u8 lmac_type;
32 u8 lane_to_sds; 54 u8 lane_to_sds;
@@ -223,6 +245,163 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
223} 245}
224EXPORT_SYMBOL(bgx_set_lmac_mac); 246EXPORT_SYMBOL(bgx_set_lmac_mac);
225 247
248static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid)
249{
250 struct lmac *lmac = NULL;
251 u8 idx = 0;
252
253 lmac = &bgx->lmac[lmacid];
254 /* reset CAM filters */
255 for (idx = 0; idx < lmac->dmacs_count; idx++)
256 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
257 ((lmacid * lmac->dmacs_count) + idx) *
258 sizeof(u64), 0);
259}
260
261static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id)
262{
263 int i = 0;
264
265 if (!lmac)
266 return;
267
268 /* We've got reset filters request from some of attached VF, while the
269 * others might want to keep their configuration. So in this case lets
270 * iterate over all of configured filters and decrease number of
271 * referencies. if some addresses get zero refs remove them from list
272 */
273 for (i = lmac->dmacs_cfg - 1; i >= 0; i--) {
274 lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id);
275 if (!lmac->dmacs[i].vf_map) {
276 lmac->dmacs_cfg--;
277 lmac->dmacs[i].dmac = 0;
278 lmac->dmacs[i].vf_map = 0;
279 }
280 }
281}
282
283static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id)
284{
285 u8 i = 0;
286
287 if (!lmac)
288 return -1;
289
290 /* At the same time we could have several VFs 'attached' to some
291 * particular LMAC, and each VF is represented as network interface
292 * for kernel. So from user perspective it should be possible to
293 * manipulate with its' (VF) receive modes. However from PF
294 * driver perspective we need to keep track of filter configurations
295 * for different VFs to prevent filter values dupes
296 */
297 for (i = 0; i < lmac->dmacs_cfg; i++) {
298 if (lmac->dmacs[i].dmac == dmac) {
299 lmac->dmacs[i].vf_map |= BIT_ULL(vf_id);
300 return -1;
301 }
302 }
303
304 if (!(lmac->dmacs_cfg < lmac->dmacs_count))
305 return -1;
306
307 /* keep it for further tracking */
308 lmac->dmacs[lmac->dmacs_cfg].dmac = dmac;
309 lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id);
310 lmac->dmacs_cfg++;
311 return 0;
312}
313
314static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid,
315 u64 cam_dmac, u8 idx)
316{
317 struct lmac *lmac = NULL;
318 u64 cfg = 0;
319
320 /* skip zero addresses as meaningless */
321 if (!cam_dmac || !bgx)
322 return -1;
323
324 lmac = &bgx->lmac[lmacid];
325
326 /* configure DCAM filtering for designated LMAC */
327 cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) |
328 RX_DMACX_CAM_EN | cam_dmac;
329 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
330 ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg);
331 return 0;
332}
333
334void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid,
335 u64 cam_dmac, u8 vf_id)
336{
337 struct bgx *bgx = get_bgx(node, bgx_idx);
338 struct lmac *lmac = NULL;
339
340 if (!bgx)
341 return;
342
343 lmac = &bgx->lmac[lmacid];
344
345 if (!cam_dmac)
346 cam_dmac = ether_addr_to_u64(lmac->mac);
347
348 /* since we might have several VFs attached to particular LMAC
349 * and kernel could call mcast config for each of them with the
350 * same MAC, check if requested MAC is already in filtering list and
351 * updare/prepare list of MACs to be applied later to HW filters
352 */
353 bgx_lmac_save_filter(lmac, cam_dmac, vf_id);
354}
355EXPORT_SYMBOL(bgx_set_dmac_cam_filter);
356
357void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
358{
359 struct bgx *bgx = get_bgx(node, bgx_idx);
360 struct lmac *lmac = NULL;
361 u64 cfg = 0;
362 u8 i = 0;
363
364 if (!bgx)
365 return;
366
367 lmac = &bgx->lmac[lmacid];
368
369 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL);
370 if (mode & BGX_XCAST_BCAST_ACCEPT)
371 cfg |= BCAST_ACCEPT;
372 else
373 cfg &= ~BCAST_ACCEPT;
374
375 /* disable all MCASTs and DMAC filtering */
376 cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK));
377
378 /* check requested bits and set filtergin mode appropriately */
379 if (mode & (BGX_XCAST_MCAST_ACCEPT)) {
380 cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT));
381 } else if (mode & BGX_XCAST_MCAST_FILTER) {
382 cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT);
383 for (i = 0; i < lmac->dmacs_cfg; i++)
384 bgx_set_dmac_cam_filter_mac(bgx, lmacid,
385 lmac->dmacs[i].dmac, i);
386 }
387 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg);
388}
389EXPORT_SYMBOL(bgx_set_xcast_mode);
390
391void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id)
392{
393 struct bgx *bgx = get_bgx(node, bgx_idx);
394
395 if (!bgx)
396 return;
397
398 bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id);
399 bgx_flush_dmac_cam_filter(bgx, lmacid);
400 bgx_set_xcast_mode(node, bgx_idx, lmacid,
401 (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT));
402}
403EXPORT_SYMBOL(bgx_reset_xcast_mode);
404
226void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 405void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
227{ 406{
228 struct bgx *bgx = get_bgx(node, bgx_idx); 407 struct bgx *bgx = get_bgx(node, bgx_idx);
@@ -468,18 +647,6 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
468} 647}
469EXPORT_SYMBOL(bgx_get_tx_stats); 648EXPORT_SYMBOL(bgx_get_tx_stats);
470 649
471static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
472{
473 u64 offset;
474
475 while (bgx->lmac[lmac].dmac > 0) {
476 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
477 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
478 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
479 bgx->lmac[lmac].dmac--;
480 }
481}
482
483/* Configure BGX LMAC in internal loopback mode */ 650/* Configure BGX LMAC in internal loopback mode */
484void bgx_lmac_internal_loopback(int node, int bgx_idx, 651void bgx_lmac_internal_loopback(int node, int bgx_idx,
485 int lmac_idx, bool enable) 652 int lmac_idx, bool enable)
@@ -912,6 +1079,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
912 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 1079 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
913 } 1080 }
914 1081
1082 /* actual number of filters available to exact LMAC */
1083 lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
1084 lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
1085 GFP_KERNEL);
1086
915 /* Enable lmac */ 1087 /* Enable lmac */
916 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 1088 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
917 1089
@@ -998,7 +1170,8 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
998 cfg &= ~CMR_EN; 1170 cfg &= ~CMR_EN;
999 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 1171 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1000 1172
1001 bgx_flush_dmac_addrs(bgx, lmacid); 1173 bgx_flush_dmac_cam_filter(bgx, lmacid);
1174 kfree(lmac->dmacs);
1002 1175
1003 if ((lmac->lmac_type != BGX_MODE_XFI) && 1176 if ((lmac->lmac_type != BGX_MODE_XFI) &&
1004 (lmac->lmac_type != BGX_MODE_XLAUI) && 1177 (lmac->lmac_type != BGX_MODE_XLAUI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 5a7567d31138..cbdd20b9ee6f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -30,6 +30,7 @@
30#define DEFAULT_PAUSE_TIME 0xFFFF 30#define DEFAULT_PAUSE_TIME 0xFFFF
31 31
32#define BGX_ID_MASK 0x3 32#define BGX_ID_MASK 0x3
33#define LMAC_ID_MASK 0x3
33 34
34#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 35#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
35 36
@@ -57,7 +58,7 @@
57#define BGX_CMRX_RX_FIFO_LEN 0x108 58#define BGX_CMRX_RX_FIFO_LEN 0x108
58#define BGX_CMR_RX_DMACX_CAM 0x200 59#define BGX_CMR_RX_DMACX_CAM 0x200
59#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
60#define RX_DMACX_CAM_LMACID(x) (x << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
61#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
62#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STREERING 0x300
63#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
@@ -205,17 +206,13 @@
205#define LMAC_INTR_LINK_UP BIT(0) 206#define LMAC_INTR_LINK_UP BIT(0)
206#define LMAC_INTR_LINK_DOWN BIT(1) 207#define LMAC_INTR_LINK_DOWN BIT(1)
207 208
208/* RX_DMAC_CTL configuration*/ 209#define BGX_XCAST_BCAST_ACCEPT BIT(0)
209enum MCAST_MODE { 210#define BGX_XCAST_MCAST_ACCEPT BIT(1)
210 MCAST_MODE_REJECT, 211#define BGX_XCAST_MCAST_FILTER BIT(2)
211 MCAST_MODE_ACCEPT,
212 MCAST_MODE_CAM_FILTER,
213 RSVD
214};
215
216#define BCAST_ACCEPT 1
217#define CAM_ACCEPT 1
218 212
213void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
214void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
215void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
219void octeon_mdiobus_force_mod_depencency(void); 216void octeon_mdiobus_force_mod_depencency(void);
220void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); 217void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
221void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); 218void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);