aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-11-07 20:46:53 -0500
committerBen Hutchings <bhutchings@solarflare.com>2013-08-22 14:25:57 -0400
commitadd7247718c003c8f7c275954083f7db85405bd9 (patch)
treea555da7e564bc67689cfc1bff6cf14ccd0907b45
parent9a0a943321cc89a9efc8726e28d8473eafa73e29 (diff)
sfc: Make most filter operations NIC-type-specific
Aside from accelerated RFS, there is almost nothing that can be shared between the filter table implementations for the Falcon architecture and EF10. Move the few shared functions into efx.c and rx.c and the rest into farch.c. Introduce efx_nic_type operations for the implementation and inline wrapper functions that call these. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/efx.c40
-rw-r--r--drivers/net/ethernet/sfc/efx.h107
-rw-r--r--drivers/net/ethernet/sfc/falcon.c31
-rw-r--r--drivers/net/ethernet/sfc/farch.c1108
-rw-r--r--drivers/net/ethernet/sfc/filter.c1244
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h46
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/rx.c94
-rw-r--r--drivers/net/ethernet/sfc/siena.c16
10 files changed, 1448 insertions, 1267 deletions
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ef7410f014d6..a61272661a73 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,5 +1,4 @@
1sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ 1sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
2 filter.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 2 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 3 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_port.o mcdi_mon.o ptp.o 4 mcdi.o mcdi_port.o mcdi_mon.o ptp.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 49d06ca79d7d..a2daaae266d7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -611,7 +611,7 @@ static void efx_start_datapath(struct efx_nic *efx)
611 611
612 /* RX filters also have scatter-enabled flags */ 612 /* RX filters also have scatter-enabled flags */
613 if (efx->rx_scatter != old_rx_scatter) 613 if (efx->rx_scatter != old_rx_scatter)
614 efx_filter_update_rx_scatter(efx); 614 efx->type->filter_update_rx_scatter(efx);
615 615
616 /* We must keep at least one descriptor in a TX ring empty. 616 /* We must keep at least one descriptor in a TX ring empty.
617 * We could avoid this when the queue size does not exactly 617 * We could avoid this when the queue size does not exactly
@@ -1499,6 +1499,44 @@ static void efx_remove_nic(struct efx_nic *efx)
1499 efx->type->remove(efx); 1499 efx->type->remove(efx);
1500} 1500}
1501 1501
1502static int efx_probe_filters(struct efx_nic *efx)
1503{
1504 int rc;
1505
1506 spin_lock_init(&efx->filter_lock);
1507
1508 rc = efx->type->filter_table_probe(efx);
1509 if (rc)
1510 return rc;
1511
1512#ifdef CONFIG_RFS_ACCEL
1513 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1514 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
1515 sizeof(*efx->rps_flow_id),
1516 GFP_KERNEL);
1517 if (!efx->rps_flow_id) {
1518 efx->type->filter_table_remove(efx);
1519 return -ENOMEM;
1520 }
1521 }
1522#endif
1523
1524 return 0;
1525}
1526
1527static void efx_remove_filters(struct efx_nic *efx)
1528{
1529#ifdef CONFIG_RFS_ACCEL
1530 kfree(efx->rps_flow_id);
1531#endif
1532 efx->type->filter_table_remove(efx);
1533}
1534
1535static void efx_restore_filters(struct efx_nic *efx)
1536{
1537 efx->type->filter_table_restore(efx);
1538}
1539
1502/************************************************************************** 1540/**************************************************************************
1503 * 1541 *
1504 * NIC startup/shutdown 1542 * NIC startup/shutdown
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 45de5b9fedbe..9e3573872e57 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
68#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 68#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
69 69
70/* Filters */ 70/* Filters */
71extern int efx_probe_filters(struct efx_nic *efx); 71
72extern void efx_restore_filters(struct efx_nic *efx); 72/**
73extern void efx_remove_filters(struct efx_nic *efx); 73 * efx_filter_insert_filter - add or replace a filter
74extern void efx_filter_update_rx_scatter(struct efx_nic *efx); 74 * @efx: NIC in which to insert the filter
75extern s32 efx_filter_insert_filter(struct efx_nic *efx, 75 * @spec: Specification for the filter
76 struct efx_filter_spec *spec, 76 * @replace_equal: Flag for whether the specified filter may replace an
77 bool replace); 77 * existing filter with equal priority
78extern int efx_filter_remove_id_safe(struct efx_nic *efx, 78 *
79 enum efx_filter_priority priority, 79 * On success, return the filter ID.
80 u32 filter_id); 80 * On failure, return a negative error code.
81extern int efx_filter_get_filter_safe(struct efx_nic *efx, 81 *
82 enum efx_filter_priority priority, 82 * If an existing filter has equal match values to the new filter
83 u32 filter_id, struct efx_filter_spec *); 83 * spec, then the new filter might replace it, depending on the
84extern void efx_filter_clear_rx(struct efx_nic *efx, 84 * relative priorities. If the existing filter has lower priority, or
85 enum efx_filter_priority priority); 85 * if @replace_equal is set and it has equal priority, then it is
86extern u32 efx_filter_count_rx_used(struct efx_nic *efx, 86 * replaced. Otherwise the function fails, returning -%EPERM if
87 enum efx_filter_priority priority); 87 * the existing filter has higher priority or -%EEXIST if it has
88extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); 88 * equal priority.
89extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, 89 */
90 enum efx_filter_priority priority, 90static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
91 u32 *buf, u32 size); 91 struct efx_filter_spec *spec,
92 bool replace_equal)
93{
94 return efx->type->filter_insert(efx, spec, replace_equal);
95}
96
97/**
98 * efx_filter_remove_id_safe - remove a filter by ID, carefully
99 * @efx: NIC from which to remove the filter
100 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
101 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
102 *
103 * This function will range-check @filter_id, so it is safe to call
104 * with a value passed from userland.
105 */
106static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
107 enum efx_filter_priority priority,
108 u32 filter_id)
109{
110 return efx->type->filter_remove_safe(efx, priority, filter_id);
111}
112
113/**
114 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
115 * @efx: NIC from which to remove the filter
116 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
117 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
118 * @spec: Buffer in which to store filter specification
119 *
120 * This function will range-check @filter_id, so it is safe to call
121 * with a value passed from userland.
122 */
123static inline int
124efx_filter_get_filter_safe(struct efx_nic *efx,
125 enum efx_filter_priority priority,
126 u32 filter_id, struct efx_filter_spec *spec)
127{
128 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
129}
130
131/**
132 * efx_farch_filter_clear_rx - remove RX filters by priority
133 * @efx: NIC from which to remove the filters
134 * @priority: Maximum priority to remove
135 */
136static inline void efx_filter_clear_rx(struct efx_nic *efx,
137 enum efx_filter_priority priority)
138{
139 return efx->type->filter_clear_rx(efx, priority);
140}
141
142static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
143 enum efx_filter_priority priority)
144{
145 return efx->type->filter_count_rx_used(efx, priority);
146}
147static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
148{
149 return efx->type->filter_get_rx_id_limit(efx);
150}
151static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
152 enum efx_filter_priority priority,
153 u32 *buf, u32 size)
154{
155 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
156}
92#ifdef CONFIG_RFS_ACCEL 157#ifdef CONFIG_RFS_ACCEL
93extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 158extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
94 u16 rxq_index, u32 flow_id); 159 u16 rxq_index, u32 flow_id);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 0fd8a88905a6..6ea28f8e8792 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2400,6 +2400,21 @@ const struct efx_nic_type falcon_a1_nic_type = {
2400 .ev_read_ack = efx_farch_ev_read_ack, 2400 .ev_read_ack = efx_farch_ev_read_ack,
2401 .ev_test_generate = efx_farch_ev_test_generate, 2401 .ev_test_generate = efx_farch_ev_test_generate,
2402 2402
2403 /* We don't expose the filter table on Falcon A1 as it is not
2404 * mapped into function 0, but these implementations still
2405 * work with a degenerate case of all tables set to size 0.
2406 */
2407 .filter_table_probe = efx_farch_filter_table_probe,
2408 .filter_table_restore = efx_farch_filter_table_restore,
2409 .filter_table_remove = efx_farch_filter_table_remove,
2410 .filter_insert = efx_farch_filter_insert,
2411 .filter_remove_safe = efx_farch_filter_remove_safe,
2412 .filter_get_safe = efx_farch_filter_get_safe,
2413 .filter_clear_rx = efx_farch_filter_clear_rx,
2414 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2415 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2416 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2417
2403 .revision = EFX_REV_FALCON_A1, 2418 .revision = EFX_REV_FALCON_A1,
2404 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, 2419 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2405 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, 2420 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
@@ -2468,6 +2483,21 @@ const struct efx_nic_type falcon_b0_nic_type = {
2468 .ev_process = efx_farch_ev_process, 2483 .ev_process = efx_farch_ev_process,
2469 .ev_read_ack = efx_farch_ev_read_ack, 2484 .ev_read_ack = efx_farch_ev_read_ack,
2470 .ev_test_generate = efx_farch_ev_test_generate, 2485 .ev_test_generate = efx_farch_ev_test_generate,
2486 .filter_table_probe = efx_farch_filter_table_probe,
2487 .filter_table_restore = efx_farch_filter_table_restore,
2488 .filter_table_remove = efx_farch_filter_table_remove,
2489 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
2490 .filter_insert = efx_farch_filter_insert,
2491 .filter_remove_safe = efx_farch_filter_remove_safe,
2492 .filter_get_safe = efx_farch_filter_get_safe,
2493 .filter_clear_rx = efx_farch_filter_clear_rx,
2494 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2495 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2496 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2497#ifdef CONFIG_RFS_ACCEL
2498 .filter_rfs_insert = efx_farch_filter_rfs_insert,
2499 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
2500#endif
2471 2501
2472 .revision = EFX_REV_FALCON_B0, 2502 .revision = EFX_REV_FALCON_B0,
2473 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 2503 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -2483,5 +2513,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
2483 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2513 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2484 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 2514 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2485 .mcdi_max_ver = -1, 2515 .mcdi_max_ver = -1,
2516 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2486}; 2517};
2487 2518
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 7f50882f11ab..19418045f3e0 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1779,3 +1779,1111 @@ void efx_farch_init_common(struct efx_nic *efx)
1779 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1779 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1780 } 1780 }
1781} 1781}
1782
1783/**************************************************************************
1784 *
1785 * Filter tables
1786 *
1787 **************************************************************************
1788 */
1789
1790/* "Fudge factors" - difference between programmed value and actual depth.
1791 * Due to pipelined implementation we need to program H/W with a value that
1792 * is larger than the hop limit we want.
1793 */
1794#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1795#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1796
1797/* Hard maximum search limit. Hardware will time-out beyond 200-something.
1798 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1799 * table is full.
1800 */
1801#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1802
1803/* Don't try very hard to find space for performance hints, as this is
1804 * counter-productive. */
1805#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1806
1807enum efx_farch_filter_type {
1808 EFX_FARCH_FILTER_TCP_FULL = 0,
1809 EFX_FARCH_FILTER_TCP_WILD,
1810 EFX_FARCH_FILTER_UDP_FULL,
1811 EFX_FARCH_FILTER_UDP_WILD,
1812 EFX_FARCH_FILTER_MAC_FULL = 4,
1813 EFX_FARCH_FILTER_MAC_WILD,
1814 EFX_FARCH_FILTER_UC_DEF = 8,
1815 EFX_FARCH_FILTER_MC_DEF,
1816 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1817};
1818
1819enum efx_farch_filter_table_id {
1820 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1821 EFX_FARCH_FILTER_TABLE_RX_MAC,
1822 EFX_FARCH_FILTER_TABLE_RX_DEF,
1823 EFX_FARCH_FILTER_TABLE_TX_MAC,
1824 EFX_FARCH_FILTER_TABLE_COUNT,
1825};
1826
1827enum efx_farch_filter_index {
1828 EFX_FARCH_FILTER_INDEX_UC_DEF,
1829 EFX_FARCH_FILTER_INDEX_MC_DEF,
1830 EFX_FARCH_FILTER_SIZE_RX_DEF,
1831};
1832
1833struct efx_farch_filter_spec {
1834 u8 type:4;
1835 u8 priority:4;
1836 u8 flags;
1837 u16 dmaq_id;
1838 u32 data[3];
1839};
1840
1841struct efx_farch_filter_table {
1842 enum efx_farch_filter_table_id id;
1843 u32 offset; /* address of table relative to BAR */
1844 unsigned size; /* number of entries */
1845 unsigned step; /* step between entries */
1846 unsigned used; /* number currently used */
1847 unsigned long *used_bitmap;
1848 struct efx_farch_filter_spec *spec;
1849 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1850};
1851
1852struct efx_farch_filter_state {
1853 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1854};
1855
1856static void
1857efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1858 struct efx_farch_filter_table *table,
1859 unsigned int filter_idx);
1860
1861/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1862 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1863static u16 efx_farch_filter_hash(u32 key)
1864{
1865 u16 tmp;
1866
1867 /* First 16 rounds */
1868 tmp = 0x1fff ^ key >> 16;
1869 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1870 tmp = tmp ^ tmp >> 9;
1871 /* Last 16 rounds */
1872 tmp = tmp ^ tmp << 13 ^ key;
1873 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1874 return tmp ^ tmp >> 9;
1875}
1876
1877/* To allow for hash collisions, filter search continues at these
1878 * increments from the first possible entry selected by the hash. */
1879static u16 efx_farch_filter_increment(u32 key)
1880{
1881 return key * 2 - 1;
1882}
1883
1884static enum efx_farch_filter_table_id
1885efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1886{
1887 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1888 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1889 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1890 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1891 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1892 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1893 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1894 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1895 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1896 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1897 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1898 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1899 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1900 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1901 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1902}
1903
1904static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1905{
1906 struct efx_farch_filter_state *state = efx->filter_state;
1907 struct efx_farch_filter_table *table;
1908 efx_oword_t filter_ctl;
1909
1910 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1911
1912 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1913 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1914 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1915 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1916 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1917 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1918 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1919 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1920 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1921 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1922 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1923 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1924 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1925
1926 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1927 if (table->size) {
1928 EFX_SET_OWORD_FIELD(
1929 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1930 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1931 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1932 EFX_SET_OWORD_FIELD(
1933 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1934 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1935 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1936 }
1937
1938 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1939 if (table->size) {
1940 EFX_SET_OWORD_FIELD(
1941 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1942 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1943 EFX_SET_OWORD_FIELD(
1944 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1945 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1946 EFX_FILTER_FLAG_RX_RSS));
1947 EFX_SET_OWORD_FIELD(
1948 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1949 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1950 EFX_SET_OWORD_FIELD(
1951 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1952 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1953 EFX_FILTER_FLAG_RX_RSS));
1954
1955 /* There is a single bit to enable RX scatter for all
1956 * unmatched packets. Only set it if scatter is
1957 * enabled in both filter specs.
1958 */
1959 EFX_SET_OWORD_FIELD(
1960 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1961 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1962 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1963 EFX_FILTER_FLAG_RX_SCATTER));
1964 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1965 /* We don't expose 'default' filters because unmatched
1966 * packets always go to the queue number found in the
1967 * RSS table. But we still need to set the RX scatter
1968 * bit here.
1969 */
1970 EFX_SET_OWORD_FIELD(
1971 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1972 efx->rx_scatter);
1973 }
1974
1975 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1976}
1977
1978static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
1979{
1980 struct efx_farch_filter_state *state = efx->filter_state;
1981 struct efx_farch_filter_table *table;
1982 efx_oword_t tx_cfg;
1983
1984 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1985
1986 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1987 if (table->size) {
1988 EFX_SET_OWORD_FIELD(
1989 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1990 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1991 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1992 EFX_SET_OWORD_FIELD(
1993 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1994 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1995 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1996 }
1997
1998 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1999}
2000
2001static int
2002efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2003 const struct efx_filter_spec *gen_spec)
2004{
2005 bool is_full = false;
2006
2007 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2008 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2009 return -EINVAL;
2010
2011 spec->priority = gen_spec->priority;
2012 spec->flags = gen_spec->flags;
2013 spec->dmaq_id = gen_spec->dmaq_id;
2014
2015 switch (gen_spec->match_flags) {
2016 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2017 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2018 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2019 is_full = true;
2020 /* fall through */
2021 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2022 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2023 __be32 rhost, host1, host2;
2024 __be16 rport, port1, port2;
2025
2026 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2027
2028 if (gen_spec->ether_type != htons(ETH_P_IP))
2029 return -EPROTONOSUPPORT;
2030 if (gen_spec->loc_port == 0 ||
2031 (is_full && gen_spec->rem_port == 0))
2032 return -EADDRNOTAVAIL;
2033 switch (gen_spec->ip_proto) {
2034 case IPPROTO_TCP:
2035 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2036 EFX_FARCH_FILTER_TCP_WILD);
2037 break;
2038 case IPPROTO_UDP:
2039 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2040 EFX_FARCH_FILTER_UDP_WILD);
2041 break;
2042 default:
2043 return -EPROTONOSUPPORT;
2044 }
2045
2046 /* Filter is constructed in terms of source and destination,
2047 * with the odd wrinkle that the ports are swapped in a UDP
2048 * wildcard filter. We need to convert from local and remote
2049 * (= zero for wildcard) addresses.
2050 */
2051 rhost = is_full ? gen_spec->rem_host[0] : 0;
2052 rport = is_full ? gen_spec->rem_port : 0;
2053 host1 = rhost;
2054 host2 = gen_spec->loc_host[0];
2055 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2056 port1 = gen_spec->loc_port;
2057 port2 = rport;
2058 } else {
2059 port1 = rport;
2060 port2 = gen_spec->loc_port;
2061 }
2062 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2063 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2064 spec->data[2] = ntohl(host2);
2065
2066 break;
2067 }
2068
2069 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2070 is_full = true;
2071 /* fall through */
2072 case EFX_FILTER_MATCH_LOC_MAC:
2073 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2074 EFX_FARCH_FILTER_MAC_WILD);
2075 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2076 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2077 gen_spec->loc_mac[3] << 16 |
2078 gen_spec->loc_mac[4] << 8 |
2079 gen_spec->loc_mac[5]);
2080 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2081 gen_spec->loc_mac[1]);
2082 break;
2083
2084 case EFX_FILTER_MATCH_LOC_MAC_IG:
2085 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2086 EFX_FARCH_FILTER_MC_DEF :
2087 EFX_FARCH_FILTER_UC_DEF);
2088 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2089 break;
2090
2091 default:
2092 return -EPROTONOSUPPORT;
2093 }
2094
2095 return 0;
2096}
2097
2098static void
2099efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2100 const struct efx_farch_filter_spec *spec)
2101{
2102 bool is_full = false;
2103
2104 /* *gen_spec should be completely initialised, to be consistent
2105 * with efx_filter_init_{rx,tx}() and in case we want to copy
2106 * it back to userland.
2107 */
2108 memset(gen_spec, 0, sizeof(*gen_spec));
2109
2110 gen_spec->priority = spec->priority;
2111 gen_spec->flags = spec->flags;
2112 gen_spec->dmaq_id = spec->dmaq_id;
2113
2114 switch (spec->type) {
2115 case EFX_FARCH_FILTER_TCP_FULL:
2116 case EFX_FARCH_FILTER_UDP_FULL:
2117 is_full = true;
2118 /* fall through */
2119 case EFX_FARCH_FILTER_TCP_WILD:
2120 case EFX_FARCH_FILTER_UDP_WILD: {
2121 __be32 host1, host2;
2122 __be16 port1, port2;
2123
2124 gen_spec->match_flags =
2125 EFX_FILTER_MATCH_ETHER_TYPE |
2126 EFX_FILTER_MATCH_IP_PROTO |
2127 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2128 if (is_full)
2129 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2130 EFX_FILTER_MATCH_REM_PORT);
2131 gen_spec->ether_type = htons(ETH_P_IP);
2132 gen_spec->ip_proto =
2133 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2134 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2135 IPPROTO_TCP : IPPROTO_UDP;
2136
2137 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2138 port1 = htons(spec->data[0]);
2139 host2 = htonl(spec->data[2]);
2140 port2 = htons(spec->data[1] >> 16);
2141 if (spec->flags & EFX_FILTER_FLAG_TX) {
2142 gen_spec->loc_host[0] = host1;
2143 gen_spec->rem_host[0] = host2;
2144 } else {
2145 gen_spec->loc_host[0] = host2;
2146 gen_spec->rem_host[0] = host1;
2147 }
2148 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2149 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2150 gen_spec->loc_port = port1;
2151 gen_spec->rem_port = port2;
2152 } else {
2153 gen_spec->loc_port = port2;
2154 gen_spec->rem_port = port1;
2155 }
2156
2157 break;
2158 }
2159
2160 case EFX_FARCH_FILTER_MAC_FULL:
2161 is_full = true;
2162 /* fall through */
2163 case EFX_FARCH_FILTER_MAC_WILD:
2164 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2165 if (is_full)
2166 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2167 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2168 gen_spec->loc_mac[1] = spec->data[2];
2169 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2170 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2171 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2172 gen_spec->loc_mac[5] = spec->data[1];
2173 gen_spec->outer_vid = htons(spec->data[0]);
2174 break;
2175
2176 case EFX_FARCH_FILTER_UC_DEF:
2177 case EFX_FARCH_FILTER_MC_DEF:
2178 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2179 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2180 break;
2181
2182 default:
2183 WARN_ON(1);
2184 break;
2185 }
2186}
2187
2188static void
2189efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
2190{
2191 struct efx_farch_filter_state *state = efx->filter_state;
2192 struct efx_farch_filter_table *table =
2193 &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2194 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2195
2196 /* If there's only one channel then disable RSS for non VF
2197 * traffic, thereby allowing VFs to use RSS when the PF can't.
2198 */
2199 spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx;
2200 spec->priority = EFX_FILTER_PRI_MANUAL;
2201 spec->flags = (EFX_FILTER_FLAG_RX |
2202 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2203 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2204 spec->dmaq_id = 0;
2205 table->used_bitmap[0] |= 1 << filter_idx;
2206}
2207
2208/* Build a filter entry and return its n-tuple key. */
2209static u32 efx_farch_filter_build(efx_oword_t *filter,
2210 struct efx_farch_filter_spec *spec)
2211{
2212 u32 data3;
2213
2214 switch (efx_farch_filter_spec_table_id(spec)) {
2215 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2216 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2217 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2218 EFX_POPULATE_OWORD_7(
2219 *filter,
2220 FRF_BZ_RSS_EN,
2221 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2222 FRF_BZ_SCATTER_EN,
2223 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2224 FRF_BZ_TCP_UDP, is_udp,
2225 FRF_BZ_RXQ_ID, spec->dmaq_id,
2226 EFX_DWORD_2, spec->data[2],
2227 EFX_DWORD_1, spec->data[1],
2228 EFX_DWORD_0, spec->data[0]);
2229 data3 = is_udp;
2230 break;
2231 }
2232
2233 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2234 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2235 EFX_POPULATE_OWORD_7(
2236 *filter,
2237 FRF_CZ_RMFT_RSS_EN,
2238 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2239 FRF_CZ_RMFT_SCATTER_EN,
2240 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2241 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2242 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2243 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2244 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2245 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2246 data3 = is_wild;
2247 break;
2248 }
2249
2250 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2251 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2252 EFX_POPULATE_OWORD_5(*filter,
2253 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2254 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2255 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2256 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2257 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2258 data3 = is_wild | spec->dmaq_id << 1;
2259 break;
2260 }
2261
2262 default:
2263 BUG();
2264 }
2265
2266 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2267}
2268
2269static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2270 const struct efx_farch_filter_spec *right)
2271{
2272 if (left->type != right->type ||
2273 memcmp(left->data, right->data, sizeof(left->data)))
2274 return false;
2275
2276 if (left->flags & EFX_FILTER_FLAG_TX &&
2277 left->dmaq_id != right->dmaq_id)
2278 return false;
2279
2280 return true;
2281}
2282
2283/*
2284 * Construct/deconstruct external filter IDs. At least the RX filter
2285 * IDs must be ordered by matching priority, for RX NFC semantics.
2286 *
2287 * Deconstruction needs to be robust against invalid IDs so that
2288 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2289 * accept user-provided IDs.
2290 */
2291
2292#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2293
2294static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2295 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2296 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2297 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2298 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2299 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2300 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2301 [EFX_FARCH_FILTER_UC_DEF] = 4,
2302 [EFX_FARCH_FILTER_MC_DEF] = 4,
2303};
2304
2305static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2306 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2307 EFX_FARCH_FILTER_TABLE_RX_IP,
2308 EFX_FARCH_FILTER_TABLE_RX_MAC,
2309 EFX_FARCH_FILTER_TABLE_RX_MAC,
2310 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2311 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2312 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2313};
2314
2315#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2316#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2317
2318static inline u32
2319efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2320 unsigned int index)
2321{
2322 unsigned int range;
2323
2324 range = efx_farch_filter_type_match_pri[spec->type];
2325 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2326 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2327
2328 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2329}
2330
2331static inline enum efx_farch_filter_table_id
2332efx_farch_filter_id_table_id(u32 id)
2333{
2334 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2335
2336 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2337 return efx_farch_filter_range_table[range];
2338 else
2339 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2340}
2341
2342static inline unsigned int efx_farch_filter_id_index(u32 id)
2343{
2344 return id & EFX_FARCH_FILTER_INDEX_MASK;
2345}
2346
2347u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2348{
2349 struct efx_farch_filter_state *state = efx->filter_state;
2350 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2351 enum efx_farch_filter_table_id table_id;
2352
2353 do {
2354 table_id = efx_farch_filter_range_table[range];
2355 if (state->table[table_id].size != 0)
2356 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2357 state->table[table_id].size;
2358 } while (range--);
2359
2360 return 0;
2361}
2362
2363s32 efx_farch_filter_insert(struct efx_nic *efx,
2364 struct efx_filter_spec *gen_spec,
2365 bool replace_equal)
2366{
2367 struct efx_farch_filter_state *state = efx->filter_state;
2368 struct efx_farch_filter_table *table;
2369 struct efx_farch_filter_spec spec;
2370 efx_oword_t filter;
2371 int rep_index, ins_index;
2372 unsigned int depth = 0;
2373 int rc;
2374
2375 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2376 if (rc)
2377 return rc;
2378
2379 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2380 if (table->size == 0)
2381 return -EINVAL;
2382
2383 netif_vdbg(efx, hw, efx->net_dev,
2384 "%s: type %d search_limit=%d", __func__, spec.type,
2385 table->search_limit[spec.type]);
2386
2387 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2388 /* One filter spec per type */
2389 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2390 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2391 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2392 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2393 ins_index = rep_index;
2394
2395 spin_lock_bh(&efx->filter_lock);
2396 } else {
2397 /* Search concurrently for
2398 * (1) a filter to be replaced (rep_index): any filter
2399 * with the same match values, up to the current
2400 * search depth for this type, and
2401 * (2) the insertion point (ins_index): (1) or any
2402 * free slot before it or up to the maximum search
2403 * depth for this priority
2404 * We fail if we cannot find (2).
2405 *
2406 * We can stop once either
2407 * (a) we find (1), in which case we have definitely
2408 * found (2) as well; or
2409 * (b) we have searched exhaustively for (1), and have
2410 * either found (2) or searched exhaustively for it
2411 */
2412 u32 key = efx_farch_filter_build(&filter, &spec);
2413 unsigned int hash = efx_farch_filter_hash(key);
2414 unsigned int incr = efx_farch_filter_increment(key);
2415 unsigned int max_rep_depth = table->search_limit[spec.type];
2416 unsigned int max_ins_depth =
2417 spec.priority <= EFX_FILTER_PRI_HINT ?
2418 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2419 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2420 unsigned int i = hash & (table->size - 1);
2421
2422 ins_index = -1;
2423 depth = 1;
2424
2425 spin_lock_bh(&efx->filter_lock);
2426
2427 for (;;) {
2428 if (!test_bit(i, table->used_bitmap)) {
2429 if (ins_index < 0)
2430 ins_index = i;
2431 } else if (efx_farch_filter_equal(&spec,
2432 &table->spec[i])) {
2433 /* Case (a) */
2434 if (ins_index < 0)
2435 ins_index = i;
2436 rep_index = i;
2437 break;
2438 }
2439
2440 if (depth >= max_rep_depth &&
2441 (ins_index >= 0 || depth >= max_ins_depth)) {
2442 /* Case (b) */
2443 if (ins_index < 0) {
2444 rc = -EBUSY;
2445 goto out;
2446 }
2447 rep_index = -1;
2448 break;
2449 }
2450
2451 i = (i + incr) & (table->size - 1);
2452 ++depth;
2453 }
2454 }
2455
2456 /* If we found a filter to be replaced, check whether we
2457 * should do so
2458 */
2459 if (rep_index >= 0) {
2460 struct efx_farch_filter_spec *saved_spec =
2461 &table->spec[rep_index];
2462
2463 if (spec.priority == saved_spec->priority && !replace_equal) {
2464 rc = -EEXIST;
2465 goto out;
2466 }
2467 if (spec.priority < saved_spec->priority) {
2468 rc = -EPERM;
2469 goto out;
2470 }
2471 }
2472
2473 /* Insert the filter */
2474 if (ins_index != rep_index) {
2475 __set_bit(ins_index, table->used_bitmap);
2476 ++table->used;
2477 }
2478 table->spec[ins_index] = spec;
2479
2480 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2481 efx_farch_filter_push_rx_config(efx);
2482 } else {
2483 if (table->search_limit[spec.type] < depth) {
2484 table->search_limit[spec.type] = depth;
2485 if (spec.flags & EFX_FILTER_FLAG_TX)
2486 efx_farch_filter_push_tx_limits(efx);
2487 else
2488 efx_farch_filter_push_rx_config(efx);
2489 }
2490
2491 efx_writeo(efx, &filter,
2492 table->offset + table->step * ins_index);
2493
2494 /* If we were able to replace a filter by inserting
2495 * at a lower depth, clear the replaced filter
2496 */
2497 if (ins_index != rep_index && rep_index >= 0)
2498 efx_farch_filter_table_clear_entry(efx, table,
2499 rep_index);
2500 }
2501
2502 netif_vdbg(efx, hw, efx->net_dev,
2503 "%s: filter type %d index %d rxq %u set",
2504 __func__, spec.type, ins_index, spec.dmaq_id);
2505 rc = efx_farch_filter_make_id(&spec, ins_index);
2506
2507out:
2508 spin_unlock_bh(&efx->filter_lock);
2509 return rc;
2510}
2511
2512static void
2513efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2514 struct efx_farch_filter_table *table,
2515 unsigned int filter_idx)
2516{
2517 static efx_oword_t filter;
2518
2519 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2520 /* RX default filters must always exist */
2521 efx_farch_filter_reset_rx_def(efx, filter_idx);
2522 efx_farch_filter_push_rx_config(efx);
2523 } else if (test_bit(filter_idx, table->used_bitmap)) {
2524 __clear_bit(filter_idx, table->used_bitmap);
2525 --table->used;
2526 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2527
2528 efx_writeo(efx, &filter,
2529 table->offset + table->step * filter_idx);
2530
2531 /* If this filter required a greater search depth than
2532 * any other, the search limit for its type can now be
2533 * decreased. However, it is hard to determine that
2534 * unless the table has become completely empty - in
2535 * which case, all its search limits can be set to 0.
2536 */
2537 if (unlikely(table->used == 0)) {
2538 memset(table->search_limit, 0,
2539 sizeof(table->search_limit));
2540 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2541 efx_farch_filter_push_tx_limits(efx);
2542 else
2543 efx_farch_filter_push_rx_config(efx);
2544 }
2545 }
2546}
2547
2548int efx_farch_filter_remove_safe(struct efx_nic *efx,
2549 enum efx_filter_priority priority,
2550 u32 filter_id)
2551{
2552 struct efx_farch_filter_state *state = efx->filter_state;
2553 enum efx_farch_filter_table_id table_id;
2554 struct efx_farch_filter_table *table;
2555 unsigned int filter_idx;
2556 struct efx_farch_filter_spec *spec;
2557 int rc;
2558
2559 table_id = efx_farch_filter_id_table_id(filter_id);
2560 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2561 return -ENOENT;
2562 table = &state->table[table_id];
2563
2564 filter_idx = efx_farch_filter_id_index(filter_id);
2565 if (filter_idx >= table->size)
2566 return -ENOENT;
2567 spec = &table->spec[filter_idx];
2568
2569 spin_lock_bh(&efx->filter_lock);
2570
2571 if (test_bit(filter_idx, table->used_bitmap) &&
2572 spec->priority == priority) {
2573 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2574 rc = 0;
2575 } else {
2576 rc = -ENOENT;
2577 }
2578
2579 spin_unlock_bh(&efx->filter_lock);
2580
2581 return rc;
2582}
2583
2584int efx_farch_filter_get_safe(struct efx_nic *efx,
2585 enum efx_filter_priority priority,
2586 u32 filter_id, struct efx_filter_spec *spec_buf)
2587{
2588 struct efx_farch_filter_state *state = efx->filter_state;
2589 enum efx_farch_filter_table_id table_id;
2590 struct efx_farch_filter_table *table;
2591 struct efx_farch_filter_spec *spec;
2592 unsigned int filter_idx;
2593 int rc;
2594
2595 table_id = efx_farch_filter_id_table_id(filter_id);
2596 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2597 return -ENOENT;
2598 table = &state->table[table_id];
2599
2600 filter_idx = efx_farch_filter_id_index(filter_id);
2601 if (filter_idx >= table->size)
2602 return -ENOENT;
2603 spec = &table->spec[filter_idx];
2604
2605 spin_lock_bh(&efx->filter_lock);
2606
2607 if (test_bit(filter_idx, table->used_bitmap) &&
2608 spec->priority == priority) {
2609 efx_farch_filter_to_gen_spec(spec_buf, spec);
2610 rc = 0;
2611 } else {
2612 rc = -ENOENT;
2613 }
2614
2615 spin_unlock_bh(&efx->filter_lock);
2616
2617 return rc;
2618}
2619
2620static void
2621efx_farch_filter_table_clear(struct efx_nic *efx,
2622 enum efx_farch_filter_table_id table_id,
2623 enum efx_filter_priority priority)
2624{
2625 struct efx_farch_filter_state *state = efx->filter_state;
2626 struct efx_farch_filter_table *table = &state->table[table_id];
2627 unsigned int filter_idx;
2628
2629 spin_lock_bh(&efx->filter_lock);
2630 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
2631 if (table->spec[filter_idx].priority <= priority)
2632 efx_farch_filter_table_clear_entry(efx, table,
2633 filter_idx);
2634 spin_unlock_bh(&efx->filter_lock);
2635}
2636
2637void efx_farch_filter_clear_rx(struct efx_nic *efx,
2638 enum efx_filter_priority priority)
2639{
2640 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2641 priority);
2642 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2643 priority);
2644}
2645
2646u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2647 enum efx_filter_priority priority)
2648{
2649 struct efx_farch_filter_state *state = efx->filter_state;
2650 enum efx_farch_filter_table_id table_id;
2651 struct efx_farch_filter_table *table;
2652 unsigned int filter_idx;
2653 u32 count = 0;
2654
2655 spin_lock_bh(&efx->filter_lock);
2656
2657 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2658 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2659 table_id++) {
2660 table = &state->table[table_id];
2661 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2662 if (test_bit(filter_idx, table->used_bitmap) &&
2663 table->spec[filter_idx].priority == priority)
2664 ++count;
2665 }
2666 }
2667
2668 spin_unlock_bh(&efx->filter_lock);
2669
2670 return count;
2671}
2672
2673s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2674 enum efx_filter_priority priority,
2675 u32 *buf, u32 size)
2676{
2677 struct efx_farch_filter_state *state = efx->filter_state;
2678 enum efx_farch_filter_table_id table_id;
2679 struct efx_farch_filter_table *table;
2680 unsigned int filter_idx;
2681 s32 count = 0;
2682
2683 spin_lock_bh(&efx->filter_lock);
2684
2685 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2686 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2687 table_id++) {
2688 table = &state->table[table_id];
2689 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2690 if (test_bit(filter_idx, table->used_bitmap) &&
2691 table->spec[filter_idx].priority == priority) {
2692 if (count == size) {
2693 count = -EMSGSIZE;
2694 goto out;
2695 }
2696 buf[count++] = efx_farch_filter_make_id(
2697 &table->spec[filter_idx], filter_idx);
2698 }
2699 }
2700 }
2701out:
2702 spin_unlock_bh(&efx->filter_lock);
2703
2704 return count;
2705}
2706
2707/* Restore filter stater after reset */
2708void efx_farch_filter_table_restore(struct efx_nic *efx)
2709{
2710 struct efx_farch_filter_state *state = efx->filter_state;
2711 enum efx_farch_filter_table_id table_id;
2712 struct efx_farch_filter_table *table;
2713 efx_oword_t filter;
2714 unsigned int filter_idx;
2715
2716 spin_lock_bh(&efx->filter_lock);
2717
2718 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2719 table = &state->table[table_id];
2720
2721 /* Check whether this is a regular register table */
2722 if (table->step == 0)
2723 continue;
2724
2725 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2726 if (!test_bit(filter_idx, table->used_bitmap))
2727 continue;
2728 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2729 efx_writeo(efx, &filter,
2730 table->offset + table->step * filter_idx);
2731 }
2732 }
2733
2734 efx_farch_filter_push_rx_config(efx);
2735 efx_farch_filter_push_tx_limits(efx);
2736
2737 spin_unlock_bh(&efx->filter_lock);
2738}
2739
2740void efx_farch_filter_table_remove(struct efx_nic *efx)
2741{
2742 struct efx_farch_filter_state *state = efx->filter_state;
2743 enum efx_farch_filter_table_id table_id;
2744
2745 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2746 kfree(state->table[table_id].used_bitmap);
2747 vfree(state->table[table_id].spec);
2748 }
2749 kfree(state);
2750}
2751
2752int efx_farch_filter_table_probe(struct efx_nic *efx)
2753{
2754 struct efx_farch_filter_state *state;
2755 struct efx_farch_filter_table *table;
2756 unsigned table_id;
2757
2758 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2759 if (!state)
2760 return -ENOMEM;
2761 efx->filter_state = state;
2762
2763 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2764 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2765 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2766 table->offset = FR_BZ_RX_FILTER_TBL0;
2767 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2768 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2769 }
2770
2771 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2772 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2773 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2774 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2775 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2776 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2777
2778 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2779 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2780 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2781
2782 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2783 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2784 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2785 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2786 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2787 }
2788
2789 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2790 table = &state->table[table_id];
2791 if (table->size == 0)
2792 continue;
2793 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2794 sizeof(unsigned long),
2795 GFP_KERNEL);
2796 if (!table->used_bitmap)
2797 goto fail;
2798 table->spec = vzalloc(table->size * sizeof(*table->spec));
2799 if (!table->spec)
2800 goto fail;
2801 }
2802
2803 if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) {
2804 /* RX default filters must always exist */
2805 unsigned i;
2806 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++)
2807 efx_farch_filter_reset_rx_def(efx, i);
2808 }
2809
2810 efx_farch_filter_push_rx_config(efx);
2811
2812 return 0;
2813
2814fail:
2815 efx_farch_filter_table_remove(efx);
2816 return -ENOMEM;
2817}
2818
2819/* Update scatter enable flags for filters pointing to our own RX queues */
2820void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2821{
2822 struct efx_farch_filter_state *state = efx->filter_state;
2823 enum efx_farch_filter_table_id table_id;
2824 struct efx_farch_filter_table *table;
2825 efx_oword_t filter;
2826 unsigned int filter_idx;
2827
2828 spin_lock_bh(&efx->filter_lock);
2829
2830 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2831 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2832 table_id++) {
2833 table = &state->table[table_id];
2834
2835 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2836 if (!test_bit(filter_idx, table->used_bitmap) ||
2837 table->spec[filter_idx].dmaq_id >=
2838 efx->n_rx_channels)
2839 continue;
2840
2841 if (efx->rx_scatter)
2842 table->spec[filter_idx].flags |=
2843 EFX_FILTER_FLAG_RX_SCATTER;
2844 else
2845 table->spec[filter_idx].flags &=
2846 ~EFX_FILTER_FLAG_RX_SCATTER;
2847
2848 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2849 /* Pushed by efx_farch_filter_push_rx_config() */
2850 continue;
2851
2852 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2853 efx_writeo(efx, &filter,
2854 table->offset + table->step * filter_idx);
2855 }
2856 }
2857
2858 efx_farch_filter_push_rx_config(efx);
2859
2860 spin_unlock_bh(&efx->filter_lock);
2861}
2862
2863#ifdef CONFIG_RFS_ACCEL
2864
2865s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2866 struct efx_filter_spec *gen_spec)
2867{
2868 return efx_farch_filter_insert(efx, gen_spec, true);
2869}
2870
2871bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2872 unsigned int index)
2873{
2874 struct efx_farch_filter_state *state = efx->filter_state;
2875 struct efx_farch_filter_table *table =
2876 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2877
2878 if (test_bit(index, table->used_bitmap) &&
2879 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2880 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2881 flow_id, index)) {
2882 efx_farch_filter_table_clear_entry(efx, table, index);
2883 return true;
2884 }
2885
2886 return false;
2887}
2888
2889#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index ad66376e9d8c..000000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1244 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/in.h>
11#include <net/ip.h>
12#include "efx.h"
13#include "filter.h"
14#include "io.h"
15#include "nic.h"
16#include "farch_regs.h"
17
18/* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
21 */
22#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
23#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
24
25/* Hard maximum search limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_farch_filter_search() when the
27 * table is full.
28 */
29#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
34
35enum efx_farch_filter_type {
36 EFX_FARCH_FILTER_TCP_FULL = 0,
37 EFX_FARCH_FILTER_TCP_WILD,
38 EFX_FARCH_FILTER_UDP_FULL,
39 EFX_FARCH_FILTER_UDP_WILD,
40 EFX_FARCH_FILTER_MAC_FULL = 4,
41 EFX_FARCH_FILTER_MAC_WILD,
42 EFX_FARCH_FILTER_UC_DEF = 8,
43 EFX_FARCH_FILTER_MC_DEF,
44 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
45};
46
47enum efx_farch_filter_table_id {
48 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
49 EFX_FARCH_FILTER_TABLE_RX_MAC,
50 EFX_FARCH_FILTER_TABLE_RX_DEF,
51 EFX_FARCH_FILTER_TABLE_TX_MAC,
52 EFX_FARCH_FILTER_TABLE_COUNT,
53};
54
55enum efx_farch_filter_index {
56 EFX_FARCH_FILTER_INDEX_UC_DEF,
57 EFX_FARCH_FILTER_INDEX_MC_DEF,
58 EFX_FARCH_FILTER_SIZE_RX_DEF,
59};
60
61struct efx_farch_filter_spec {
62 u8 type:4;
63 u8 priority:4;
64 u8 flags;
65 u16 dmaq_id;
66 u32 data[3];
67};
68
69struct efx_farch_filter_table {
70 enum efx_farch_filter_table_id id;
71 u32 offset; /* address of table relative to BAR */
72 unsigned size; /* number of entries */
73 unsigned step; /* step between entries */
74 unsigned used; /* number currently used */
75 unsigned long *used_bitmap;
76 struct efx_farch_filter_spec *spec;
77 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
78};
79
80struct efx_farch_filter_state {
81 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
82};
83
84static void
85efx_farch_filter_table_clear_entry(struct efx_nic *efx,
86 struct efx_farch_filter_table *table,
87 unsigned int filter_idx);
88
89/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
90 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
91static u16 efx_farch_filter_hash(u32 key)
92{
93 u16 tmp;
94
95 /* First 16 rounds */
96 tmp = 0x1fff ^ key >> 16;
97 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
98 tmp = tmp ^ tmp >> 9;
99 /* Last 16 rounds */
100 tmp = tmp ^ tmp << 13 ^ key;
101 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
102 return tmp ^ tmp >> 9;
103}
104
105/* To allow for hash collisions, filter search continues at these
106 * increments from the first possible entry selected by the hash. */
107static u16 efx_farch_filter_increment(u32 key)
108{
109 return key * 2 - 1;
110}
111
112static enum efx_farch_filter_table_id
113efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
114{
115 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
116 (EFX_FARCH_FILTER_TCP_FULL >> 2));
117 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
118 (EFX_FARCH_FILTER_TCP_WILD >> 2));
119 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
120 (EFX_FARCH_FILTER_UDP_FULL >> 2));
121 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
122 (EFX_FARCH_FILTER_UDP_WILD >> 2));
123 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
124 (EFX_FARCH_FILTER_MAC_FULL >> 2));
125 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
126 (EFX_FARCH_FILTER_MAC_WILD >> 2));
127 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
128 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
129 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
130}
131
132static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
133{
134 struct efx_farch_filter_state *state = efx->filter_state;
135 struct efx_farch_filter_table *table;
136 efx_oword_t filter_ctl;
137
138 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
139
140 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
141 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
142 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
143 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
144 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
145 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
146 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
147 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
148 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
149 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
150 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
151 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
152 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
153
154 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
155 if (table->size) {
156 EFX_SET_OWORD_FIELD(
157 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
158 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
159 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
160 EFX_SET_OWORD_FIELD(
161 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
162 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
163 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
164 }
165
166 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
167 if (table->size) {
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
177 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
178 EFX_SET_OWORD_FIELD(
179 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
180 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
181 EFX_FILTER_FLAG_RX_RSS));
182
183 /* There is a single bit to enable RX scatter for all
184 * unmatched packets. Only set it if scatter is
185 * enabled in both filter specs.
186 */
187 EFX_SET_OWORD_FIELD(
188 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
189 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
190 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
191 EFX_FILTER_FLAG_RX_SCATTER));
192 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
193 /* We don't expose 'default' filters because unmatched
194 * packets always go to the queue number found in the
195 * RSS table. But we still need to set the RX scatter
196 * bit here.
197 */
198 EFX_SET_OWORD_FIELD(
199 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
200 efx->rx_scatter);
201 }
202
203 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
204}
205
206static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
207{
208 struct efx_farch_filter_state *state = efx->filter_state;
209 struct efx_farch_filter_table *table;
210 efx_oword_t tx_cfg;
211
212 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
213
214 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
215 if (table->size) {
216 EFX_SET_OWORD_FIELD(
217 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
218 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
219 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
220 EFX_SET_OWORD_FIELD(
221 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
222 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
223 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
224 }
225
226 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
227}
228
229static int
230efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
231 const struct efx_filter_spec *gen_spec)
232{
233 bool is_full = false;
234
235 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
236 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
237 return -EINVAL;
238
239 spec->priority = gen_spec->priority;
240 spec->flags = gen_spec->flags;
241 spec->dmaq_id = gen_spec->dmaq_id;
242
243 switch (gen_spec->match_flags) {
244 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
245 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
246 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
247 is_full = true;
248 /* fall through */
249 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
250 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
251 __be32 rhost, host1, host2;
252 __be16 rport, port1, port2;
253
254 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
255
256 if (gen_spec->ether_type != htons(ETH_P_IP))
257 return -EPROTONOSUPPORT;
258 if (gen_spec->loc_port == 0 ||
259 (is_full && gen_spec->rem_port == 0))
260 return -EADDRNOTAVAIL;
261 switch (gen_spec->ip_proto) {
262 case IPPROTO_TCP:
263 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
264 EFX_FARCH_FILTER_TCP_WILD);
265 break;
266 case IPPROTO_UDP:
267 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
268 EFX_FARCH_FILTER_UDP_WILD);
269 break;
270 default:
271 return -EPROTONOSUPPORT;
272 }
273
274 /* Filter is constructed in terms of source and destination,
275 * with the odd wrinkle that the ports are swapped in a UDP
276 * wildcard filter. We need to convert from local and remote
277 * (= zero for wildcard) addresses.
278 */
279 rhost = is_full ? gen_spec->rem_host[0] : 0;
280 rport = is_full ? gen_spec->rem_port : 0;
281 host1 = rhost;
282 host2 = gen_spec->loc_host[0];
283 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
284 port1 = gen_spec->loc_port;
285 port2 = rport;
286 } else {
287 port1 = rport;
288 port2 = gen_spec->loc_port;
289 }
290 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
291 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
292 spec->data[2] = ntohl(host2);
293
294 break;
295 }
296
297 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
298 is_full = true;
299 /* fall through */
300 case EFX_FILTER_MATCH_LOC_MAC:
301 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
302 EFX_FARCH_FILTER_MAC_WILD);
303 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
304 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
305 gen_spec->loc_mac[3] << 16 |
306 gen_spec->loc_mac[4] << 8 |
307 gen_spec->loc_mac[5]);
308 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
309 gen_spec->loc_mac[1]);
310 break;
311
312 case EFX_FILTER_MATCH_LOC_MAC_IG:
313 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
314 EFX_FARCH_FILTER_MC_DEF :
315 EFX_FARCH_FILTER_UC_DEF);
316 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
317 break;
318
319 default:
320 return -EPROTONOSUPPORT;
321 }
322
323 return 0;
324}
325
326static void
327efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
328 const struct efx_farch_filter_spec *spec)
329{
330 bool is_full = false;
331
332 /* *gen_spec should be completely initialised, to be consistent
333 * with efx_filter_init_{rx,tx}() and in case we want to copy
334 * it back to userland.
335 */
336 memset(gen_spec, 0, sizeof(*gen_spec));
337
338 gen_spec->priority = spec->priority;
339 gen_spec->flags = spec->flags;
340 gen_spec->dmaq_id = spec->dmaq_id;
341
342 switch (spec->type) {
343 case EFX_FARCH_FILTER_TCP_FULL:
344 case EFX_FARCH_FILTER_UDP_FULL:
345 is_full = true;
346 /* fall through */
347 case EFX_FARCH_FILTER_TCP_WILD:
348 case EFX_FARCH_FILTER_UDP_WILD: {
349 __be32 host1, host2;
350 __be16 port1, port2;
351
352 gen_spec->match_flags =
353 EFX_FILTER_MATCH_ETHER_TYPE |
354 EFX_FILTER_MATCH_IP_PROTO |
355 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
356 if (is_full)
357 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
358 EFX_FILTER_MATCH_REM_PORT);
359 gen_spec->ether_type = htons(ETH_P_IP);
360 gen_spec->ip_proto =
361 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
362 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
363 IPPROTO_TCP : IPPROTO_UDP;
364
365 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
366 port1 = htons(spec->data[0]);
367 host2 = htonl(spec->data[2]);
368 port2 = htons(spec->data[1] >> 16);
369 if (spec->flags & EFX_FILTER_FLAG_TX) {
370 gen_spec->loc_host[0] = host1;
371 gen_spec->rem_host[0] = host2;
372 } else {
373 gen_spec->loc_host[0] = host2;
374 gen_spec->rem_host[0] = host1;
375 }
376 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
377 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
378 gen_spec->loc_port = port1;
379 gen_spec->rem_port = port2;
380 } else {
381 gen_spec->loc_port = port2;
382 gen_spec->rem_port = port1;
383 }
384
385 break;
386 }
387
388 case EFX_FARCH_FILTER_MAC_FULL:
389 is_full = true;
390 /* fall through */
391 case EFX_FARCH_FILTER_MAC_WILD:
392 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
393 if (is_full)
394 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
395 gen_spec->loc_mac[0] = spec->data[2] >> 8;
396 gen_spec->loc_mac[1] = spec->data[2];
397 gen_spec->loc_mac[2] = spec->data[1] >> 24;
398 gen_spec->loc_mac[3] = spec->data[1] >> 16;
399 gen_spec->loc_mac[4] = spec->data[1] >> 8;
400 gen_spec->loc_mac[5] = spec->data[1];
401 gen_spec->outer_vid = htons(spec->data[0]);
402 break;
403
404 case EFX_FARCH_FILTER_UC_DEF:
405 case EFX_FARCH_FILTER_MC_DEF:
406 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
407 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
408 break;
409
410 default:
411 WARN_ON(1);
412 break;
413 }
414}
415
416static void
417efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
418{
419 struct efx_farch_filter_state *state = efx->filter_state;
420 struct efx_farch_filter_table *table =
421 &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
422 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
423
424 /* If there's only one channel then disable RSS for non VF
425 * traffic, thereby allowing VFs to use RSS when the PF can't.
426 */
427 spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx;
428 spec->priority = EFX_FILTER_PRI_MANUAL;
429 spec->flags = (EFX_FILTER_FLAG_RX |
430 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
431 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
432 spec->dmaq_id = 0;
433 table->used_bitmap[0] |= 1 << filter_idx;
434}
435
436/* Build a filter entry and return its n-tuple key. */
437static u32 efx_farch_filter_build(efx_oword_t *filter,
438 struct efx_farch_filter_spec *spec)
439{
440 u32 data3;
441
442 switch (efx_farch_filter_spec_table_id(spec)) {
443 case EFX_FARCH_FILTER_TABLE_RX_IP: {
444 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
445 spec->type == EFX_FARCH_FILTER_UDP_WILD);
446 EFX_POPULATE_OWORD_7(
447 *filter,
448 FRF_BZ_RSS_EN,
449 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
450 FRF_BZ_SCATTER_EN,
451 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
452 FRF_BZ_TCP_UDP, is_udp,
453 FRF_BZ_RXQ_ID, spec->dmaq_id,
454 EFX_DWORD_2, spec->data[2],
455 EFX_DWORD_1, spec->data[1],
456 EFX_DWORD_0, spec->data[0]);
457 data3 = is_udp;
458 break;
459 }
460
461 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
462 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
463 EFX_POPULATE_OWORD_7(
464 *filter,
465 FRF_CZ_RMFT_RSS_EN,
466 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
467 FRF_CZ_RMFT_SCATTER_EN,
468 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
469 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
470 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
471 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
472 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
473 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
474 data3 = is_wild;
475 break;
476 }
477
478 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
479 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
480 EFX_POPULATE_OWORD_5(*filter,
481 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
482 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
483 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
484 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
485 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
486 data3 = is_wild | spec->dmaq_id << 1;
487 break;
488 }
489
490 default:
491 BUG();
492 }
493
494 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
495}
496
497static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
498 const struct efx_farch_filter_spec *right)
499{
500 if (left->type != right->type ||
501 memcmp(left->data, right->data, sizeof(left->data)))
502 return false;
503
504 if (left->flags & EFX_FILTER_FLAG_TX &&
505 left->dmaq_id != right->dmaq_id)
506 return false;
507
508 return true;
509}
510
511/*
512 * Construct/deconstruct external filter IDs. At least the RX filter
513 * IDs must be ordered by matching priority, for RX NFC semantics.
514 *
515 * Deconstruction needs to be robust against invalid IDs so that
516 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
517 * accept user-provided IDs.
518 */
519
520#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
521
522static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
523 [EFX_FARCH_FILTER_TCP_FULL] = 0,
524 [EFX_FARCH_FILTER_UDP_FULL] = 0,
525 [EFX_FARCH_FILTER_TCP_WILD] = 1,
526 [EFX_FARCH_FILTER_UDP_WILD] = 1,
527 [EFX_FARCH_FILTER_MAC_FULL] = 2,
528 [EFX_FARCH_FILTER_MAC_WILD] = 3,
529 [EFX_FARCH_FILTER_UC_DEF] = 4,
530 [EFX_FARCH_FILTER_MC_DEF] = 4,
531};
532
533static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
534 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
535 EFX_FARCH_FILTER_TABLE_RX_IP,
536 EFX_FARCH_FILTER_TABLE_RX_MAC,
537 EFX_FARCH_FILTER_TABLE_RX_MAC,
538 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
539 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
540 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
541};
542
543#define EFX_FARCH_FILTER_INDEX_WIDTH 13
544#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
545
546static inline u32
547efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
548 unsigned int index)
549{
550 unsigned int range;
551
552 range = efx_farch_filter_type_match_pri[spec->type];
553 if (!(spec->flags & EFX_FILTER_FLAG_RX))
554 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
555
556 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
557}
558
559static inline enum efx_farch_filter_table_id
560efx_farch_filter_id_table_id(u32 id)
561{
562 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
563
564 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
565 return efx_farch_filter_range_table[range];
566 else
567 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
568}
569
570static inline unsigned int efx_farch_filter_id_index(u32 id)
571{
572 return id & EFX_FARCH_FILTER_INDEX_MASK;
573}
574
575u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
576{
577 struct efx_farch_filter_state *state = efx->filter_state;
578 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
579 enum efx_farch_filter_table_id table_id;
580
581 do {
582 table_id = efx_farch_filter_range_table[range];
583 if (state->table[table_id].size != 0)
584 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
585 state->table[table_id].size;
586 } while (range--);
587
588 return 0;
589}
590
591/**
592 * efx_filter_insert_filter - add or replace a filter
593 * @efx: NIC in which to insert the filter
594 * @spec: Specification for the filter
595 * @replace_equal: Flag for whether the specified filter may replace an
596 * existing filter with equal priority
597 *
598 * On success, return the filter ID.
599 * On failure, return a negative error code.
600 *
601 * If an existing filter has equal match values to the new filter
602 * spec, then the new filter might replace it, depending on the
603 * relative priorities. If the existing filter has lower priority, or
604 * if @replace_equal is set and it has equal priority, then it is
605 * replaced. Otherwise the function fails, returning -%EPERM if
606 * the existing filter has higher priority or -%EEXIST if it has
607 * equal priority.
608 */
609s32 efx_filter_insert_filter(struct efx_nic *efx,
610 struct efx_filter_spec *gen_spec,
611 bool replace_equal)
612{
613 struct efx_farch_filter_state *state = efx->filter_state;
614 struct efx_farch_filter_table *table;
615 struct efx_farch_filter_spec spec;
616 efx_oword_t filter;
617 int rep_index, ins_index;
618 unsigned int depth = 0;
619 int rc;
620
621 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
622 if (rc)
623 return rc;
624
625 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
626 if (table->size == 0)
627 return -EINVAL;
628
629 netif_vdbg(efx, hw, efx->net_dev,
630 "%s: type %d search_limit=%d", __func__, spec.type,
631 table->search_limit[spec.type]);
632
633 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
634 /* One filter spec per type */
635 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
636 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
637 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
638 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
639 ins_index = rep_index;
640
641 spin_lock_bh(&efx->filter_lock);
642 } else {
643 /* Search concurrently for
644 * (1) a filter to be replaced (rep_index): any filter
645 * with the same match values, up to the current
646 * search depth for this type, and
647 * (2) the insertion point (ins_index): (1) or any
648 * free slot before it or up to the maximum search
649 * depth for this priority
650 * We fail if we cannot find (2).
651 *
652 * We can stop once either
653 * (a) we find (1), in which case we have definitely
654 * found (2) as well; or
655 * (b) we have searched exhaustively for (1), and have
656 * either found (2) or searched exhaustively for it
657 */
658 u32 key = efx_farch_filter_build(&filter, &spec);
659 unsigned int hash = efx_farch_filter_hash(key);
660 unsigned int incr = efx_farch_filter_increment(key);
661 unsigned int max_rep_depth = table->search_limit[spec.type];
662 unsigned int max_ins_depth =
663 spec.priority <= EFX_FILTER_PRI_HINT ?
664 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
665 EFX_FARCH_FILTER_CTL_SRCH_MAX;
666 unsigned int i = hash & (table->size - 1);
667
668 ins_index = -1;
669 depth = 1;
670
671 spin_lock_bh(&efx->filter_lock);
672
673 for (;;) {
674 if (!test_bit(i, table->used_bitmap)) {
675 if (ins_index < 0)
676 ins_index = i;
677 } else if (efx_farch_filter_equal(&spec,
678 &table->spec[i])) {
679 /* Case (a) */
680 if (ins_index < 0)
681 ins_index = i;
682 rep_index = i;
683 break;
684 }
685
686 if (depth >= max_rep_depth &&
687 (ins_index >= 0 || depth >= max_ins_depth)) {
688 /* Case (b) */
689 if (ins_index < 0) {
690 rc = -EBUSY;
691 goto out;
692 }
693 rep_index = -1;
694 break;
695 }
696
697 i = (i + incr) & (table->size - 1);
698 ++depth;
699 }
700 }
701
702 /* If we found a filter to be replaced, check whether we
703 * should do so
704 */
705 if (rep_index >= 0) {
706 struct efx_farch_filter_spec *saved_spec =
707 &table->spec[rep_index];
708
709 if (spec.priority == saved_spec->priority && !replace_equal) {
710 rc = -EEXIST;
711 goto out;
712 }
713 if (spec.priority < saved_spec->priority) {
714 rc = -EPERM;
715 goto out;
716 }
717 }
718
719 /* Insert the filter */
720 if (ins_index != rep_index) {
721 __set_bit(ins_index, table->used_bitmap);
722 ++table->used;
723 }
724 table->spec[ins_index] = spec;
725
726 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
727 efx_farch_filter_push_rx_config(efx);
728 } else {
729 if (table->search_limit[spec.type] < depth) {
730 table->search_limit[spec.type] = depth;
731 if (spec.flags & EFX_FILTER_FLAG_TX)
732 efx_farch_filter_push_tx_limits(efx);
733 else
734 efx_farch_filter_push_rx_config(efx);
735 }
736
737 efx_writeo(efx, &filter,
738 table->offset + table->step * ins_index);
739
740 /* If we were able to replace a filter by inserting
741 * at a lower depth, clear the replaced filter
742 */
743 if (ins_index != rep_index && rep_index >= 0)
744 efx_farch_filter_table_clear_entry(efx, table,
745 rep_index);
746 }
747
748 netif_vdbg(efx, hw, efx->net_dev,
749 "%s: filter type %d index %d rxq %u set",
750 __func__, spec.type, ins_index, spec.dmaq_id);
751 rc = efx_farch_filter_make_id(&spec, ins_index);
752
753out:
754 spin_unlock_bh(&efx->filter_lock);
755 return rc;
756}
757
758static void
759efx_farch_filter_table_clear_entry(struct efx_nic *efx,
760 struct efx_farch_filter_table *table,
761 unsigned int filter_idx)
762{
763 static efx_oword_t filter;
764
765 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
766 /* RX default filters must always exist */
767 efx_farch_filter_reset_rx_def(efx, filter_idx);
768 efx_farch_filter_push_rx_config(efx);
769 } else if (test_bit(filter_idx, table->used_bitmap)) {
770 __clear_bit(filter_idx, table->used_bitmap);
771 --table->used;
772 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
773
774 efx_writeo(efx, &filter,
775 table->offset + table->step * filter_idx);
776
777 /* If this filter required a greater search depth than
778 * any other, the search limit for its type can now be
779 * decreased. However, it is hard to determine that
780 * unless the table has become completely empty - in
781 * which case, all its search limits can be set to 0.
782 */
783 if (unlikely(table->used == 0)) {
784 memset(table->search_limit, 0,
785 sizeof(table->search_limit));
786 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
787 efx_farch_filter_push_tx_limits(efx);
788 else
789 efx_farch_filter_push_rx_config(efx);
790 }
791 }
792}
793
794/**
795 * efx_filter_remove_id_safe - remove a filter by ID, carefully
796 * @efx: NIC from which to remove the filter
797 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
798 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
799 *
800 * This function will range-check @filter_id, so it is safe to call
801 * with a value passed from userland.
802 */
803int efx_filter_remove_id_safe(struct efx_nic *efx,
804 enum efx_filter_priority priority,
805 u32 filter_id)
806{
807 struct efx_farch_filter_state *state = efx->filter_state;
808 enum efx_farch_filter_table_id table_id;
809 struct efx_farch_filter_table *table;
810 unsigned int filter_idx;
811 struct efx_farch_filter_spec *spec;
812 int rc;
813
814 table_id = efx_farch_filter_id_table_id(filter_id);
815 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
816 return -ENOENT;
817 table = &state->table[table_id];
818
819 filter_idx = efx_farch_filter_id_index(filter_id);
820 if (filter_idx >= table->size)
821 return -ENOENT;
822 spec = &table->spec[filter_idx];
823
824 spin_lock_bh(&efx->filter_lock);
825
826 if (test_bit(filter_idx, table->used_bitmap) &&
827 spec->priority == priority) {
828 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
829 rc = 0;
830 } else {
831 rc = -ENOENT;
832 }
833
834 spin_unlock_bh(&efx->filter_lock);
835
836 return rc;
837}
838
839/**
840 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
841 * @efx: NIC from which to remove the filter
842 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
843 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
844 * @spec: Buffer in which to store filter specification
845 *
846 * This function will range-check @filter_id, so it is safe to call
847 * with a value passed from userland.
848 */
849int efx_filter_get_filter_safe(struct efx_nic *efx,
850 enum efx_filter_priority priority,
851 u32 filter_id, struct efx_filter_spec *spec_buf)
852{
853 struct efx_farch_filter_state *state = efx->filter_state;
854 enum efx_farch_filter_table_id table_id;
855 struct efx_farch_filter_table *table;
856 struct efx_farch_filter_spec *spec;
857 unsigned int filter_idx;
858 int rc;
859
860 table_id = efx_farch_filter_id_table_id(filter_id);
861 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
862 return -ENOENT;
863 table = &state->table[table_id];
864
865 filter_idx = efx_farch_filter_id_index(filter_id);
866 if (filter_idx >= table->size)
867 return -ENOENT;
868 spec = &table->spec[filter_idx];
869
870 spin_lock_bh(&efx->filter_lock);
871
872 if (test_bit(filter_idx, table->used_bitmap) &&
873 spec->priority == priority) {
874 efx_farch_filter_to_gen_spec(spec_buf, spec);
875 rc = 0;
876 } else {
877 rc = -ENOENT;
878 }
879
880 spin_unlock_bh(&efx->filter_lock);
881
882 return rc;
883}
884
885static void
886efx_farch_filter_table_clear(struct efx_nic *efx,
887 enum efx_farch_filter_table_id table_id,
888 enum efx_filter_priority priority)
889{
890 struct efx_farch_filter_state *state = efx->filter_state;
891 struct efx_farch_filter_table *table = &state->table[table_id];
892 unsigned int filter_idx;
893
894 spin_lock_bh(&efx->filter_lock);
895 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
896 if (table->spec[filter_idx].priority <= priority)
897 efx_farch_filter_table_clear_entry(efx, table,
898 filter_idx);
899 spin_unlock_bh(&efx->filter_lock);
900}
901
902/**
903 * efx_filter_clear_rx - remove RX filters by priority
904 * @efx: NIC from which to remove the filters
905 * @priority: Maximum priority to remove
906 */
907void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
908{
909 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
910 priority);
911 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
912 priority);
913}
914
915u32 efx_filter_count_rx_used(struct efx_nic *efx,
916 enum efx_filter_priority priority)
917{
918 struct efx_farch_filter_state *state = efx->filter_state;
919 enum efx_farch_filter_table_id table_id;
920 struct efx_farch_filter_table *table;
921 unsigned int filter_idx;
922 u32 count = 0;
923
924 spin_lock_bh(&efx->filter_lock);
925
926 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
927 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
928 table_id++) {
929 table = &state->table[table_id];
930 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
931 if (test_bit(filter_idx, table->used_bitmap) &&
932 table->spec[filter_idx].priority == priority)
933 ++count;
934 }
935 }
936
937 spin_unlock_bh(&efx->filter_lock);
938
939 return count;
940}
941
942s32 efx_filter_get_rx_ids(struct efx_nic *efx,
943 enum efx_filter_priority priority,
944 u32 *buf, u32 size)
945{
946 struct efx_farch_filter_state *state = efx->filter_state;
947 enum efx_farch_filter_table_id table_id;
948 struct efx_farch_filter_table *table;
949 unsigned int filter_idx;
950 s32 count = 0;
951
952 spin_lock_bh(&efx->filter_lock);
953
954 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
955 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
956 table_id++) {
957 table = &state->table[table_id];
958 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
959 if (test_bit(filter_idx, table->used_bitmap) &&
960 table->spec[filter_idx].priority == priority) {
961 if (count == size) {
962 count = -EMSGSIZE;
963 goto out;
964 }
965 buf[count++] = efx_farch_filter_make_id(
966 &table->spec[filter_idx], filter_idx);
967 }
968 }
969 }
970out:
971 spin_unlock_bh(&efx->filter_lock);
972
973 return count;
974}
975
976/* Restore filter stater after reset */
977void efx_restore_filters(struct efx_nic *efx)
978{
979 struct efx_farch_filter_state *state = efx->filter_state;
980 enum efx_farch_filter_table_id table_id;
981 struct efx_farch_filter_table *table;
982 efx_oword_t filter;
983 unsigned int filter_idx;
984
985 spin_lock_bh(&efx->filter_lock);
986
987 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
988 table = &state->table[table_id];
989
990 /* Check whether this is a regular register table */
991 if (table->step == 0)
992 continue;
993
994 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
995 if (!test_bit(filter_idx, table->used_bitmap))
996 continue;
997 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
998 efx_writeo(efx, &filter,
999 table->offset + table->step * filter_idx);
1000 }
1001 }
1002
1003 efx_farch_filter_push_rx_config(efx);
1004 efx_farch_filter_push_tx_limits(efx);
1005
1006 spin_unlock_bh(&efx->filter_lock);
1007}
1008
1009int efx_probe_filters(struct efx_nic *efx)
1010{
1011 struct efx_farch_filter_state *state;
1012 struct efx_farch_filter_table *table;
1013 unsigned table_id;
1014
1015 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
1016 if (!state)
1017 return -ENOMEM;
1018 efx->filter_state = state;
1019
1020 spin_lock_init(&efx->filter_lock);
1021
1022 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1023#ifdef CONFIG_RFS_ACCEL
1024 efx->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1025 sizeof(*efx->rps_flow_id),
1026 GFP_KERNEL);
1027 if (!efx->rps_flow_id)
1028 goto fail;
1029#endif
1030 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1031 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
1032 table->offset = FR_BZ_RX_FILTER_TBL0;
1033 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1034 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1035 }
1036
1037 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1038 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1039 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
1040 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1041 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1042 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1043
1044 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1045 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
1046 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
1047
1048 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1049 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
1050 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1051 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1052 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1053 }
1054
1055 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
1056 table = &state->table[table_id];
1057 if (table->size == 0)
1058 continue;
1059 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1060 sizeof(unsigned long),
1061 GFP_KERNEL);
1062 if (!table->used_bitmap)
1063 goto fail;
1064 table->spec = vzalloc(table->size * sizeof(*table->spec));
1065 if (!table->spec)
1066 goto fail;
1067 }
1068
1069 if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) {
1070 /* RX default filters must always exist */
1071 unsigned i;
1072 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++)
1073 efx_farch_filter_reset_rx_def(efx, i);
1074 }
1075
1076 efx_farch_filter_push_rx_config(efx);
1077
1078 return 0;
1079
1080fail:
1081 efx_remove_filters(efx);
1082 return -ENOMEM;
1083}
1084
1085void efx_remove_filters(struct efx_nic *efx)
1086{
1087 struct efx_farch_filter_state *state = efx->filter_state;
1088 enum efx_farch_filter_table_id table_id;
1089
1090 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
1091 kfree(state->table[table_id].used_bitmap);
1092 vfree(state->table[table_id].spec);
1093 }
1094#ifdef CONFIG_RFS_ACCEL
1095 kfree(efx->rps_flow_id);
1096#endif
1097 kfree(state);
1098}
1099
1100/* Update scatter enable flags for filters pointing to our own RX queues */
1101void efx_filter_update_rx_scatter(struct efx_nic *efx)
1102{
1103 struct efx_farch_filter_state *state = efx->filter_state;
1104 enum efx_farch_filter_table_id table_id;
1105 struct efx_farch_filter_table *table;
1106 efx_oword_t filter;
1107 unsigned int filter_idx;
1108
1109 spin_lock_bh(&efx->filter_lock);
1110
1111 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
1112 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
1113 table_id++) {
1114 table = &state->table[table_id];
1115
1116 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1117 if (!test_bit(filter_idx, table->used_bitmap) ||
1118 table->spec[filter_idx].dmaq_id >=
1119 efx->n_rx_channels)
1120 continue;
1121
1122 if (efx->rx_scatter)
1123 table->spec[filter_idx].flags |=
1124 EFX_FILTER_FLAG_RX_SCATTER;
1125 else
1126 table->spec[filter_idx].flags &=
1127 ~EFX_FILTER_FLAG_RX_SCATTER;
1128
1129 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
1130 /* Pushed by efx_farch_filter_push_rx_config() */
1131 continue;
1132
1133 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
1134 efx_writeo(efx, &filter,
1135 table->offset + table->step * filter_idx);
1136 }
1137 }
1138
1139 efx_farch_filter_push_rx_config(efx);
1140
1141 spin_unlock_bh(&efx->filter_lock);
1142}
1143
1144#ifdef CONFIG_RFS_ACCEL
1145
1146int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1147 u16 rxq_index, u32 flow_id)
1148{
1149 struct efx_nic *efx = netdev_priv(net_dev);
1150 struct efx_channel *channel;
1151 struct efx_filter_spec spec;
1152 const struct iphdr *ip;
1153 const __be16 *ports;
1154 int nhoff;
1155 int rc;
1156
1157 nhoff = skb_network_offset(skb);
1158
1159 if (skb->protocol == htons(ETH_P_8021Q)) {
1160 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1161 nhoff + sizeof(struct vlan_hdr));
1162 if (((const struct vlan_hdr *)skb->data + nhoff)->
1163 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1164 return -EPROTONOSUPPORT;
1165
1166 /* This is IP over 802.1q VLAN. We can't filter on the
1167 * IP 5-tuple and the vlan together, so just strip the
1168 * vlan header and filter on the IP part.
1169 */
1170 nhoff += sizeof(struct vlan_hdr);
1171 } else if (skb->protocol != htons(ETH_P_IP)) {
1172 return -EPROTONOSUPPORT;
1173 }
1174
1175 /* RFS must validate the IP header length before calling us */
1176 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1177 ip = (const struct iphdr *)(skb->data + nhoff);
1178 if (ip_is_fragment(ip))
1179 return -EPROTONOSUPPORT;
1180 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1181 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1182
1183 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1184 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1185 rxq_index);
1186 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1187 ip->daddr, ports[1], ip->saddr, ports[0]);
1188 if (rc)
1189 return rc;
1190
1191 rc = efx_filter_insert_filter(efx, &spec, true);
1192 if (rc < 0)
1193 return rc;
1194
1195 /* Remember this so we can check whether to expire the filter later */
1196 efx->rps_flow_id[rc] = flow_id;
1197 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1198 ++channel->rfs_filters_added;
1199
1200 netif_info(efx, rx_status, efx->net_dev,
1201 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1202 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1203 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1204 rxq_index, flow_id, rc);
1205
1206 return rc;
1207}
1208
1209bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1210{
1211 struct efx_farch_filter_state *state = efx->filter_state;
1212 struct efx_farch_filter_table *table =
1213 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1214 unsigned mask = table->size - 1;
1215 unsigned index;
1216 unsigned stop;
1217
1218 if (!spin_trylock_bh(&efx->filter_lock))
1219 return false;
1220
1221 index = efx->rps_expire_index;
1222 stop = (index + quota) & mask;
1223
1224 while (index != stop) {
1225 if (test_bit(index, table->used_bitmap) &&
1226 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1227 rps_may_expire_flow(efx->net_dev,
1228 table->spec[index].dmaq_id,
1229 efx->rps_flow_id[index], index)) {
1230 netif_info(efx, rx_status, efx->net_dev,
1231 "expiring filter %d [flow %u]\n",
1232 index, efx->rps_flow_id[index]);
1233 efx_farch_filter_table_clear_entry(efx, table, index);
1234 }
1235 index = (index + 1) & mask;
1236 }
1237
1238 efx->rps_expire_index = stop;
1239
1240 spin_unlock_bh(&efx->filter_lock);
1241 return true;
1242}
1243
1244#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5287a3cd639c..d35ce1410376 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -30,6 +30,7 @@
30 30
31#include "enum.h" 31#include "enum.h"
32#include "bitfield.h" 32#include "bitfield.h"
33#include "filter.h"
33 34
34/************************************************************************** 35/**************************************************************************
35 * 36 *
@@ -1025,6 +1026,24 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
1025 * @ev_process: Process events for a queue, up to the given NAPI quota 1026 * @ev_process: Process events for a queue, up to the given NAPI quota
1026 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ 1027 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
1027 * @ev_test_generate: Generate a test event 1028 * @ev_test_generate: Generate a test event
1029 * @filter_table_probe: Probe filter capabilities and set up filter software state
1030 * @filter_table_restore: Restore filters removed from hardware
1031 * @filter_table_remove: Remove filters from hardware and tear down software state
1032 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
1033 * @filter_insert: add or replace a filter
1034 * @filter_remove_safe: remove a filter by ID, carefully
1035 * @filter_get_safe: retrieve a filter by ID, carefully
1036 * @filter_clear_rx: remove RX filters by priority
1037 * @filter_count_rx_used: Get the number of filters in use at a given priority
1038 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
1039 * @filter_get_rx_ids: Get list of RX filters at a given priority
1040 * @filter_rfs_insert: Add or replace a filter for RFS. This must be
1041 * atomic. The hardware change may be asynchronous but should
1042 * not be delayed for long. It may fail if this can't be done
1043 * atomically.
1044 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
1045 * This must check whether the specified table entry is used by RFS
1046 * and that rps_may_expire_flow() returns true for it.
1028 * @revision: Hardware architecture revision 1047 * @revision: Hardware architecture revision
1029 * @txd_ptr_tbl_base: TX descriptor ring base address 1048 * @txd_ptr_tbl_base: TX descriptor ring base address
1030 * @rxd_ptr_tbl_base: RX descriptor ring base address 1049 * @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1102,6 +1121,32 @@ struct efx_nic_type {
1102 int (*ev_process)(struct efx_channel *channel, int quota); 1121 int (*ev_process)(struct efx_channel *channel, int quota);
1103 void (*ev_read_ack)(struct efx_channel *channel); 1122 void (*ev_read_ack)(struct efx_channel *channel);
1104 void (*ev_test_generate)(struct efx_channel *channel); 1123 void (*ev_test_generate)(struct efx_channel *channel);
1124 int (*filter_table_probe)(struct efx_nic *efx);
1125 void (*filter_table_restore)(struct efx_nic *efx);
1126 void (*filter_table_remove)(struct efx_nic *efx);
1127 void (*filter_update_rx_scatter)(struct efx_nic *efx);
1128 s32 (*filter_insert)(struct efx_nic *efx,
1129 struct efx_filter_spec *spec, bool replace);
1130 int (*filter_remove_safe)(struct efx_nic *efx,
1131 enum efx_filter_priority priority,
1132 u32 filter_id);
1133 int (*filter_get_safe)(struct efx_nic *efx,
1134 enum efx_filter_priority priority,
1135 u32 filter_id, struct efx_filter_spec *);
1136 void (*filter_clear_rx)(struct efx_nic *efx,
1137 enum efx_filter_priority priority);
1138 u32 (*filter_count_rx_used)(struct efx_nic *efx,
1139 enum efx_filter_priority priority);
1140 u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
1141 s32 (*filter_get_rx_ids)(struct efx_nic *efx,
1142 enum efx_filter_priority priority,
1143 u32 *buf, u32 size);
1144#ifdef CONFIG_RFS_ACCEL
1145 s32 (*filter_rfs_insert)(struct efx_nic *efx,
1146 struct efx_filter_spec *spec);
1147 bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
1148 unsigned int index);
1149#endif
1105 1150
1106 int revision; 1151 int revision;
1107 unsigned int txd_ptr_tbl_base; 1152 unsigned int txd_ptr_tbl_base;
@@ -1117,6 +1162,7 @@ struct efx_nic_type {
1117 unsigned int timer_period_max; 1162 unsigned int timer_period_max;
1118 netdev_features_t offload_features; 1163 netdev_features_t offload_features;
1119 int mcdi_max_ver; 1164 int mcdi_max_ver;
1165 unsigned int max_rx_ip_filters;
1120}; 1166};
1121 1167
1122/************************************************************************** 1168/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 25e25b635798..69298c918e97 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -404,6 +404,34 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
404extern void efx_farch_ev_read_ack(struct efx_channel *channel); 404extern void efx_farch_ev_read_ack(struct efx_channel *channel);
405extern void efx_farch_ev_test_generate(struct efx_channel *channel); 405extern void efx_farch_ev_test_generate(struct efx_channel *channel);
406 406
407/* Falcon/Siena filter operations */
408extern int efx_farch_filter_table_probe(struct efx_nic *efx);
409extern void efx_farch_filter_table_restore(struct efx_nic *efx);
410extern void efx_farch_filter_table_remove(struct efx_nic *efx);
411extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
412extern s32 efx_farch_filter_insert(struct efx_nic *efx,
413 struct efx_filter_spec *spec, bool replace);
414extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
415 enum efx_filter_priority priority,
416 u32 filter_id);
417extern int efx_farch_filter_get_safe(struct efx_nic *efx,
418 enum efx_filter_priority priority,
419 u32 filter_id, struct efx_filter_spec *);
420extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
421 enum efx_filter_priority priority);
422extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
423 enum efx_filter_priority priority);
424extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
425extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
426 enum efx_filter_priority priority,
427 u32 *buf, u32 size);
428#ifdef CONFIG_RFS_ACCEL
429extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
430 struct efx_filter_spec *spec);
431extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
432 unsigned int index);
433#endif
434
407extern bool efx_nic_event_present(struct efx_channel *channel); 435extern bool efx_nic_event_present(struct efx_channel *channel);
408 436
409/* Some statistics are computed as A - B where A and B each increase 437/* Some statistics are computed as A - B where A and B each increase
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index f2b78cd2baf8..12990929e274 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -21,6 +21,7 @@
21#include <net/checksum.h> 21#include <net/checksum.h>
22#include "net_driver.h" 22#include "net_driver.h"
23#include "efx.h" 23#include "efx.h"
24#include "filter.h"
24#include "nic.h" 25#include "nic.h"
25#include "selftest.h" 26#include "selftest.h"
26#include "workarounds.h" 27#include "workarounds.h"
@@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444);
802MODULE_PARM_DESC(rx_refill_threshold, 803MODULE_PARM_DESC(rx_refill_threshold,
803 "RX descriptor ring refill threshold (%)"); 804 "RX descriptor ring refill threshold (%)");
804 805
806#ifdef CONFIG_RFS_ACCEL
807
808int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
809 u16 rxq_index, u32 flow_id)
810{
811 struct efx_nic *efx = netdev_priv(net_dev);
812 struct efx_channel *channel;
813 struct efx_filter_spec spec;
814 const struct iphdr *ip;
815 const __be16 *ports;
816 int nhoff;
817 int rc;
818
819 nhoff = skb_network_offset(skb);
820
821 if (skb->protocol == htons(ETH_P_8021Q)) {
822 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
823 nhoff + sizeof(struct vlan_hdr));
824 if (((const struct vlan_hdr *)skb->data + nhoff)->
825 h_vlan_encapsulated_proto != htons(ETH_P_IP))
826 return -EPROTONOSUPPORT;
827
828 /* This is IP over 802.1q VLAN. We can't filter on the
829 * IP 5-tuple and the vlan together, so just strip the
830 * vlan header and filter on the IP part.
831 */
832 nhoff += sizeof(struct vlan_hdr);
833 } else if (skb->protocol != htons(ETH_P_IP)) {
834 return -EPROTONOSUPPORT;
835 }
836
837 /* RFS must validate the IP header length before calling us */
838 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
839 ip = (const struct iphdr *)(skb->data + nhoff);
840 if (ip_is_fragment(ip))
841 return -EPROTONOSUPPORT;
842 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
843 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
844
845 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
846 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
847 rxq_index);
848 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
849 ip->daddr, ports[1], ip->saddr, ports[0]);
850 if (rc)
851 return rc;
852
853 rc = efx->type->filter_rfs_insert(efx, &spec);
854 if (rc < 0)
855 return rc;
856
857 /* Remember this so we can check whether to expire the filter later */
858 efx->rps_flow_id[rc] = flow_id;
859 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
860 ++channel->rfs_filters_added;
861
862 netif_info(efx, rx_status, efx->net_dev,
863 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
864 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
865 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
866 rxq_index, flow_id, rc);
867
868 return rc;
869}
870
871bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
872{
873 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
874 unsigned int index, size;
875 u32 flow_id;
876
877 if (!spin_trylock_bh(&efx->filter_lock))
878 return false;
879
880 expire_one = efx->type->filter_rfs_expire_one;
881 index = efx->rps_expire_index;
882 size = efx->type->max_rx_ip_filters;
883 while (quota--) {
884 flow_id = efx->rps_flow_id[index];
885 if (expire_one(efx, flow_id, index))
886 netif_info(efx, rx_status, efx->net_dev,
887 "expired filter %d [flow %u]\n",
888 index, flow_id);
889 if (++index == size)
890 index = 0;
891 }
892 efx->rps_expire_index = index;
893
894 spin_unlock_bh(&efx->filter_lock);
895 return true;
896}
897
898#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 23e573149bd6..5120cd8f706b 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -736,6 +736,21 @@ const struct efx_nic_type siena_a0_nic_type = {
736 .ev_process = efx_farch_ev_process, 736 .ev_process = efx_farch_ev_process,
737 .ev_read_ack = efx_farch_ev_read_ack, 737 .ev_read_ack = efx_farch_ev_read_ack,
738 .ev_test_generate = efx_farch_ev_test_generate, 738 .ev_test_generate = efx_farch_ev_test_generate,
739 .filter_table_probe = efx_farch_filter_table_probe,
740 .filter_table_restore = efx_farch_filter_table_restore,
741 .filter_table_remove = efx_farch_filter_table_remove,
742 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
743 .filter_insert = efx_farch_filter_insert,
744 .filter_remove_safe = efx_farch_filter_remove_safe,
745 .filter_get_safe = efx_farch_filter_get_safe,
746 .filter_clear_rx = efx_farch_filter_clear_rx,
747 .filter_count_rx_used = efx_farch_filter_count_rx_used,
748 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
749 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
750#ifdef CONFIG_RFS_ACCEL
751 .filter_rfs_insert = efx_farch_filter_rfs_insert,
752 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
753#endif
739 754
740 .revision = EFX_REV_SIENA_A0, 755 .revision = EFX_REV_SIENA_A0,
741 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 756 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -752,4 +767,5 @@ const struct efx_nic_type siena_a0_nic_type = {
752 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 767 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
753 NETIF_F_RXHASH | NETIF_F_NTUPLE), 768 NETIF_F_RXHASH | NETIF_F_NTUPLE),
754 .mcdi_max_ver = 1, 769 .mcdi_max_ver = 1,
770 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
755}; 771};