aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c241
5 files changed, 312 insertions, 32 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 91d8a885deba..20390f6afbb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
7 7
8cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ 8cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
9 cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ 9 cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
10 cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \ 10 cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
11 cudbg_common.o cudbg_lib.o cudbg_zlib.o 11 cudbg_common.o cudbg_lib.o cudbg_zlib.o
12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 12cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 13cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index db2ec46ba6b6..1fbb640e896a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -905,10 +905,6 @@ struct mbox_list {
905 struct list_head list; 905 struct list_head list;
906}; 906};
907 907
908struct mps_encap_entry {
909 atomic_t refcnt;
910};
911
912#if IS_ENABLED(CONFIG_THERMAL) 908#if IS_ENABLED(CONFIG_THERMAL)
913struct ch_thermal { 909struct ch_thermal {
914 struct thermal_zone_device *tzdev; 910 struct thermal_zone_device *tzdev;
@@ -917,6 +913,14 @@ struct ch_thermal {
917}; 913};
918#endif 914#endif
919 915
916struct mps_entries_ref {
917 struct list_head list;
918 u8 addr[ETH_ALEN];
919 u8 mask[ETH_ALEN];
920 u16 idx;
921 refcount_t refcnt;
922};
923
920struct adapter { 924struct adapter {
921 void __iomem *regs; 925 void __iomem *regs;
922 void __iomem *bar2; 926 void __iomem *bar2;
@@ -969,7 +973,6 @@ struct adapter {
969 unsigned int rawf_start; 973 unsigned int rawf_start;
970 unsigned int rawf_cnt; 974 unsigned int rawf_cnt;
971 struct smt_data *smt; 975 struct smt_data *smt;
972 struct mps_encap_entry *mps_encap;
973 struct cxgb4_uld_info *uld; 976 struct cxgb4_uld_info *uld;
974 void *uld_handle[CXGB4_ULD_MAX]; 977 void *uld_handle[CXGB4_ULD_MAX];
975 unsigned int num_uld; 978 unsigned int num_uld;
@@ -977,6 +980,8 @@ struct adapter {
977 struct list_head list_node; 980 struct list_head list_node;
978 struct list_head rcu_node; 981 struct list_head rcu_node;
979 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ 982 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
983 struct list_head mps_ref;
984 spinlock_t mps_ref_lock; /* lock for syncing mps ref/def activities */
980 985
981 void *iscsi_ppm; 986 void *iscsi_ppm;
982 987
@@ -1906,4 +1911,42 @@ int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
1906 cpumask_var_t *aff_mask, int idx); 1911 cpumask_var_t *aff_mask, int idx);
1907void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask); 1912void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask);
1908 1913
1914int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
1915 int *tcam_idx, const u8 *addr,
1916 bool persistent, u8 *smt_idx);
1917
1918int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
1919 bool free, unsigned int naddr,
1920 const u8 **addr, u16 *idx,
1921 u64 *hash, bool sleep_ok);
1922int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
1923 unsigned int naddr, const u8 **addr, bool sleep_ok);
1924int cxgb4_init_mps_ref_entries(struct adapter *adap);
1925void cxgb4_free_mps_ref_entries(struct adapter *adap);
1926int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
1927 const u8 *addr, const u8 *mask,
1928 unsigned int vni, unsigned int vni_mask,
1929 u8 dip_hit, u8 lookup_type, bool sleep_ok);
1930int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
1931 int idx, bool sleep_ok);
1932int cxgb4_free_raw_mac_filt(struct adapter *adap,
1933 unsigned int viid,
1934 const u8 *addr,
1935 const u8 *mask,
1936 unsigned int idx,
1937 u8 lookup_type,
1938 u8 port_id,
1939 bool sleep_ok);
1940int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
1941 unsigned int viid,
1942 const u8 *addr,
1943 const u8 *mask,
1944 unsigned int idx,
1945 u8 lookup_type,
1946 u8 port_id,
1947 bool sleep_ok);
1948int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
1949 int *tcam_idx, const u8 *addr,
1950 bool persistent, u8 *smt_idx);
1951
1909#endif /* __CXGB4_H__ */ 1952#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 6232236d7abc..43b0f8c57da7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -727,10 +727,8 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
727 cxgb4_smt_release(f->smt); 727 cxgb4_smt_release(f->smt);
728 728
729 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) 729 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
730 if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan & 730 t4_free_encap_mac_filt(adap, pi->viid,
731 0x1ff].refcnt)) 731 f->fs.val.ovlan & 0x1ff, 0);
732 t4_free_encap_mac_filt(adap, pi->viid,
733 f->fs.val.ovlan & 0x1ff, 0);
734 732
735 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) 733 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
736 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); 734 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
@@ -1177,7 +1175,6 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
1177 if (ret < 0) 1175 if (ret < 0)
1178 goto free_atid; 1176 goto free_atid;
1179 1177
1180 atomic_inc(&adapter->mps_encap[ret].refcnt);
1181 f->fs.val.ovlan = ret; 1178 f->fs.val.ovlan = ret;
1182 f->fs.mask.ovlan = 0xffff; 1179 f->fs.mask.ovlan = 0xffff;
1183 f->fs.val.ovlan_vld = 1; 1180 f->fs.val.ovlan_vld = 1;
@@ -1420,7 +1417,6 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
1420 if (ret < 0) 1417 if (ret < 0)
1421 goto free_clip; 1418 goto free_clip;
1422 1419
1423 atomic_inc(&adapter->mps_encap[ret].refcnt);
1424 f->fs.val.ovlan = ret; 1420 f->fs.val.ovlan = ret;
1425 f->fs.mask.ovlan = 0x1ff; 1421 f->fs.mask.ovlan = 0x1ff;
1426 f->fs.val.ovlan_vld = 1; 1422 f->fs.val.ovlan_vld = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 54908002c786..b08efc48d42f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -366,13 +366,19 @@ static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
366 int ret; 366 int ret;
367 u64 mhash = 0; 367 u64 mhash = 0;
368 u64 uhash = 0; 368 u64 uhash = 0;
369 /* idx stores the index of allocated filters,
370 * its size should be modified based on the number of
371 * MAC addresses that we allocate filters for
372 */
373
374 u16 idx[1] = {};
369 bool free = false; 375 bool free = false;
370 bool ucast = is_unicast_ether_addr(mac_addr); 376 bool ucast = is_unicast_ether_addr(mac_addr);
371 const u8 *maclist[1] = {mac_addr}; 377 const u8 *maclist[1] = {mac_addr};
372 struct hash_mac_addr *new_entry; 378 struct hash_mac_addr *new_entry;
373 379
374 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, 380 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
375 NULL, ucast ? &uhash : &mhash, false); 381 idx, ucast ? &uhash : &mhash, false);
376 if (ret < 0) 382 if (ret < 0)
377 goto out; 383 goto out;
378 /* if hash != 0, then add the addr to hash addr list 384 /* if hash != 0, then add the addr to hash addr list
@@ -410,7 +416,7 @@ static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
410 } 416 }
411 } 417 }
412 418
413 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); 419 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
414 return ret < 0 ? -EINVAL : 0; 420 return ret < 0 ? -EINVAL : 0;
415} 421}
416 422
@@ -449,9 +455,9 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
449 * Addresses are programmed to hash region, if tcam runs out of entries. 455 * Addresses are programmed to hash region, if tcam runs out of entries.
450 * 456 *
451 */ 457 */
452static int cxgb4_change_mac(struct port_info *pi, unsigned int viid, 458int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
453 int *tcam_idx, const u8 *addr, bool persist, 459 int *tcam_idx, const u8 *addr, bool persist,
454 u8 *smt_idx) 460 u8 *smt_idx)
455{ 461{
456 struct adapter *adapter = pi->adapter; 462 struct adapter *adapter = pi->adapter;
457 struct hash_mac_addr *entry, *new_entry; 463 struct hash_mac_addr *entry, *new_entry;
@@ -505,8 +511,8 @@ static int link_start(struct net_device *dev)
505 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 511 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
506 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); 512 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
507 if (ret == 0) 513 if (ret == 0)
508 ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt, 514 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
509 dev->dev_addr, true, &pi->smt_idx); 515 dev->dev_addr, true, &pi->smt_idx);
510 if (ret == 0) 516 if (ret == 0)
511 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, 517 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
512 &pi->link_cfg); 518 &pi->link_cfg);
@@ -3020,8 +3026,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3020 if (!is_valid_ether_addr(addr->sa_data)) 3026 if (!is_valid_ether_addr(addr->sa_data))
3021 return -EADDRNOTAVAIL; 3027 return -EADDRNOTAVAIL;
3022 3028
3023 ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt, 3029 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3024 addr->sa_data, true, &pi->smt_idx); 3030 addr->sa_data, true, &pi->smt_idx);
3025 if (ret < 0) 3031 if (ret < 0)
3026 return ret; 3032 return ret;
3027 3033
@@ -3273,8 +3279,6 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
3273 i); 3279 i);
3274 return; 3280 return;
3275 } 3281 }
3276 atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3277 pi->port_id].refcnt);
3278 } 3282 }
3279} 3283}
3280 3284
@@ -3363,7 +3367,6 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
3363 cxgb_del_udp_tunnel(netdev, ti); 3367 cxgb_del_udp_tunnel(netdev, ti);
3364 return; 3368 return;
3365 } 3369 }
3366 atomic_inc(&adapter->mps_encap[ret].refcnt);
3367 } 3370 }
3368} 3371}
3369 3372
@@ -5446,7 +5449,6 @@ static void free_some_resources(struct adapter *adapter)
5446{ 5449{
5447 unsigned int i; 5450 unsigned int i;
5448 5451
5449 kvfree(adapter->mps_encap);
5450 kvfree(adapter->smt); 5452 kvfree(adapter->smt);
5451 kvfree(adapter->l2t); 5453 kvfree(adapter->l2t);
5452 kvfree(adapter->srq); 5454 kvfree(adapter->srq);
@@ -5972,12 +5974,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5972 adapter->params.offload = 0; 5974 adapter->params.offload = 0;
5973 } 5975 }
5974 5976
5975 adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
5976 sizeof(struct mps_encap_entry),
5977 GFP_KERNEL);
5978 if (!adapter->mps_encap)
5979 dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
5980
5981#if IS_ENABLED(CONFIG_IPV6) 5977#if IS_ENABLED(CONFIG_IPV6)
5982 if (chip_ver <= CHELSIO_T5 && 5978 if (chip_ver <= CHELSIO_T5 &&
5983 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { 5979 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
@@ -6053,6 +6049,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6053 /* check for PCI Express bandwidth capabiltites */ 6049 /* check for PCI Express bandwidth capabiltites */
6054 pcie_print_link_status(pdev); 6050 pcie_print_link_status(pdev);
6055 6051
6052 cxgb4_init_mps_ref_entries(adapter);
6053
6056 err = init_rss(adapter); 6054 err = init_rss(adapter);
6057 if (err) 6055 if (err)
6058 goto out_free_dev; 6056 goto out_free_dev;
@@ -6179,6 +6177,8 @@ static void remove_one(struct pci_dev *pdev)
6179 6177
6180 disable_interrupts(adapter); 6178 disable_interrupts(adapter);
6181 6179
6180 cxgb4_free_mps_ref_entries(adapter);
6181
6182 for_each_port(adapter, i) 6182 for_each_port(adapter, i)
6183 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 6183 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6184 unregister_netdev(adapter->port[i]); 6184 unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
new file mode 100644
index 000000000000..b1a073eea60b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -0,0 +1,241 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Chelsio Communications, Inc. All rights reserved. */
3
4#include "cxgb4.h"
5
6static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
7 const u8 *addr, const u8 *mask)
8{
9 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
10 struct mps_entries_ref *mps_entry, *tmp;
11 int ret = -EINVAL;
12
13 spin_lock_bh(&adap->mps_ref_lock);
14 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
15 if (ether_addr_equal(mps_entry->addr, addr) &&
16 ether_addr_equal(mps_entry->mask, mask ? mask : bitmask)) {
17 if (!refcount_dec_and_test(&mps_entry->refcnt)) {
18 spin_unlock_bh(&adap->mps_ref_lock);
19 return -EBUSY;
20 }
21 list_del(&mps_entry->list);
22 kfree(mps_entry);
23 ret = 0;
24 break;
25 }
26 }
27 spin_unlock_bh(&adap->mps_ref_lock);
28 return ret;
29}
30
31static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
32{
33 struct mps_entries_ref *mps_entry, *tmp;
34 int ret = -EINVAL;
35
36 spin_lock(&adap->mps_ref_lock);
37 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
38 if (mps_entry->idx == idx) {
39 if (!refcount_dec_and_test(&mps_entry->refcnt)) {
40 spin_unlock(&adap->mps_ref_lock);
41 return -EBUSY;
42 }
43 list_del(&mps_entry->list);
44 kfree(mps_entry);
45 ret = 0;
46 break;
47 }
48 }
49 spin_unlock(&adap->mps_ref_lock);
50 return ret;
51}
52
53static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
54 u16 idx, const u8 *mask)
55{
56 u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
57 struct mps_entries_ref *mps_entry;
58 int ret = 0;
59
60 spin_lock_bh(&adap->mps_ref_lock);
61 list_for_each_entry(mps_entry, &adap->mps_ref, list) {
62 if (mps_entry->idx == idx) {
63 refcount_inc(&mps_entry->refcnt);
64 goto unlock;
65 }
66 }
67 mps_entry = kzalloc(sizeof(*mps_entry), GFP_ATOMIC);
68 if (!mps_entry) {
69 ret = -ENOMEM;
70 goto unlock;
71 }
72 ether_addr_copy(mps_entry->mask, mask ? mask : bitmask);
73 ether_addr_copy(mps_entry->addr, mac_addr);
74 mps_entry->idx = idx;
75 refcount_set(&mps_entry->refcnt, 1);
76 list_add_tail(&mps_entry->list, &adap->mps_ref);
77unlock:
78 spin_unlock_bh(&adap->mps_ref_lock);
79 return ret;
80}
81
82int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
83 unsigned int naddr, const u8 **addr, bool sleep_ok)
84{
85 int ret, i;
86
87 for (i = 0; i < naddr; i++) {
88 if (!cxgb4_mps_ref_dec_by_mac(adap, addr[i], NULL)) {
89 ret = t4_free_mac_filt(adap, adap->mbox, viid,
90 1, &addr[i], sleep_ok);
91 if (ret < 0)
92 return ret;
93 }
94 }
95
96 /* return number of filters freed */
97 return naddr;
98}
99
100int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
101 bool free, unsigned int naddr, const u8 **addr,
102 u16 *idx, u64 *hash, bool sleep_ok)
103{
104 int ret, i;
105
106 ret = t4_alloc_mac_filt(adap, adap->mbox, viid, free,
107 naddr, addr, idx, hash, sleep_ok);
108 if (ret < 0)
109 return ret;
110
111 for (i = 0; i < naddr; i++) {
112 if (idx[i] != 0xffff) {
113 if (cxgb4_mps_ref_inc(adap, addr[i], idx[i], NULL)) {
114 ret = -ENOMEM;
115 goto error;
116 }
117 }
118 }
119
120 goto out;
121error:
122 cxgb4_free_mac_filt(adap, viid, naddr, addr, sleep_ok);
123
124out:
125 /* Returns a negative error number or the number of filters allocated */
126 return ret;
127}
128
129int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
130 int *tcam_idx, const u8 *addr,
131 bool persistent, u8 *smt_idx)
132{
133 int ret;
134
135 ret = cxgb4_change_mac(pi, viid, tcam_idx,
136 addr, persistent, smt_idx);
137 if (ret < 0)
138 return ret;
139
140 cxgb4_mps_ref_inc(pi->adapter, addr, *tcam_idx, NULL);
141 return ret;
142}
143
144int cxgb4_free_raw_mac_filt(struct adapter *adap,
145 unsigned int viid,
146 const u8 *addr,
147 const u8 *mask,
148 unsigned int idx,
149 u8 lookup_type,
150 u8 port_id,
151 bool sleep_ok)
152{
153 int ret = 0;
154
155 if (!cxgb4_mps_ref_dec(adap, idx))
156 ret = t4_free_raw_mac_filt(adap, viid, addr,
157 mask, idx, lookup_type,
158 port_id, sleep_ok);
159
160 return ret;
161}
162
163int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
164 unsigned int viid,
165 const u8 *addr,
166 const u8 *mask,
167 unsigned int idx,
168 u8 lookup_type,
169 u8 port_id,
170 bool sleep_ok)
171{
172 int ret;
173
174 ret = t4_alloc_raw_mac_filt(adap, viid, addr,
175 mask, idx, lookup_type,
176 port_id, sleep_ok);
177 if (ret < 0)
178 return ret;
179
180 if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
181 ret = -ENOMEM;
182 t4_free_raw_mac_filt(adap, viid, addr,
183 mask, idx, lookup_type,
184 port_id, sleep_ok);
185 }
186
187 return ret;
188}
189
190int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
191 int idx, bool sleep_ok)
192{
193 int ret = 0;
194
195 if (!cxgb4_mps_ref_dec(adap, idx))
196 ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
197
198 return ret;
199}
200
201int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
202 const u8 *addr, const u8 *mask,
203 unsigned int vni, unsigned int vni_mask,
204 u8 dip_hit, u8 lookup_type, bool sleep_ok)
205{
206 int ret;
207
208 ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
209 dip_hit, lookup_type, sleep_ok);
210 if (ret < 0)
211 return ret;
212
213 if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
214 ret = -ENOMEM;
215 t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
216 }
217 return ret;
218}
219
220int cxgb4_init_mps_ref_entries(struct adapter *adap)
221{
222 spin_lock_init(&adap->mps_ref_lock);
223 INIT_LIST_HEAD(&adap->mps_ref);
224
225 return 0;
226}
227
228void cxgb4_free_mps_ref_entries(struct adapter *adap)
229{
230 struct mps_entries_ref *mps_entry, *tmp;
231
232 if (!list_empty(&adap->mps_ref))
233 return;
234
235 spin_lock(&adap->mps_ref_lock);
236 list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
237 list_del(&mps_entry->list);
238 kfree(mps_entry);
239 }
240 spin_unlock(&adap->mps_ref_lock);
241}