diff options
author | David S. Miller <davem@davemloft.net> | 2010-12-10 13:20:43 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-10 13:20:43 -0500 |
commit | cf78f8ee3de7d8d5b47d371c95716d0e4facf1c4 (patch) | |
tree | ffd211dfe1d4f0d91fe10396b05e261865f62b61 /drivers | |
parent | 1e13f863ca88014d9550876c05c939fdab1017d1 (diff) | |
parent | c39d35ebffeea5996a6f8fd8430fae9acfb8aeaf (diff) |
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/sfc/efx.h | 5 | ||||
-rw-r--r-- | drivers/net/sfc/ethtool.c | 99 | ||||
-rw-r--r-- | drivers/net/sfc/filter.c | 252 | ||||
-rw-r--r-- | drivers/net/sfc/filter.h | 149 | ||||
-rw-r--r-- | drivers/net/sfc/io.h | 153 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 57 | ||||
-rw-r--r-- | drivers/net/sfc/nic.c | 42 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 17 |
8 files changed, 468 insertions, 306 deletions
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 10a1bf40da96..003fdb35b4bb 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -74,9 +74,8 @@ extern int efx_filter_insert_filter(struct efx_nic *efx, | |||
74 | bool replace); | 74 | bool replace); |
75 | extern int efx_filter_remove_filter(struct efx_nic *efx, | 75 | extern int efx_filter_remove_filter(struct efx_nic *efx, |
76 | struct efx_filter_spec *spec); | 76 | struct efx_filter_spec *spec); |
77 | extern void efx_filter_table_clear(struct efx_nic *efx, | 77 | extern void efx_filter_clear_rx(struct efx_nic *efx, |
78 | enum efx_filter_table_id table_id, | 78 | enum efx_filter_priority priority); |
79 | enum efx_filter_priority priority); | ||
80 | 79 | ||
81 | /* Channels */ | 80 | /* Channels */ |
82 | extern void efx_process_channel_now(struct efx_channel *channel); | 81 | extern void efx_process_channel_now(struct efx_channel *channel); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index aae756bf47ee..5e50e57b0ae2 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
13 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
14 | #include <linux/in.h> | ||
14 | #include "net_driver.h" | 15 | #include "net_driver.h" |
15 | #include "workarounds.h" | 16 | #include "workarounds.h" |
16 | #include "selftest.h" | 17 | #include "selftest.h" |
@@ -558,12 +559,8 @@ static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) | |||
558 | if (rc) | 559 | if (rc) |
559 | return rc; | 560 | return rc; |
560 | 561 | ||
561 | if (!(data & ETH_FLAG_NTUPLE)) { | 562 | if (!(data & ETH_FLAG_NTUPLE)) |
562 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, | 563 | efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); |
563 | EFX_FILTER_PRI_MANUAL); | ||
564 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, | ||
565 | EFX_FILTER_PRI_MANUAL); | ||
566 | } | ||
567 | 564 | ||
568 | return 0; | 565 | return 0; |
569 | } | 566 | } |
@@ -582,6 +579,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
582 | goto fail1; | 579 | goto fail1; |
583 | } | 580 | } |
584 | 581 | ||
582 | netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", | ||
583 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | ||
584 | |||
585 | /* We need rx buffers and interrupts. */ | 585 | /* We need rx buffers and interrupts. */ |
586 | already_up = (efx->net_dev->flags & IFF_UP); | 586 | already_up = (efx->net_dev->flags & IFF_UP); |
587 | if (!already_up) { | 587 | if (!already_up) { |
@@ -600,9 +600,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
600 | if (!already_up) | 600 | if (!already_up) |
601 | dev_close(efx->net_dev); | 601 | dev_close(efx->net_dev); |
602 | 602 | ||
603 | netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", | 603 | netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", |
604 | rc == 0 ? "passed" : "failed", | 604 | rc == 0 ? "passed" : "failed", |
605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
606 | 606 | ||
607 | fail2: | 607 | fail2: |
608 | fail1: | 608 | fail1: |
@@ -921,6 +921,7 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | |||
921 | struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; | 921 | struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; |
922 | struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; | 922 | struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; |
923 | struct efx_filter_spec filter; | 923 | struct efx_filter_spec filter; |
924 | int rc; | ||
924 | 925 | ||
925 | /* Range-check action */ | 926 | /* Range-check action */ |
926 | if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || | 927 | if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || |
@@ -930,9 +931,16 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | |||
930 | if (~ntuple->fs.data_mask) | 931 | if (~ntuple->fs.data_mask) |
931 | return -EINVAL; | 932 | return -EINVAL; |
932 | 933 | ||
934 | efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, | ||
935 | (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? | ||
936 | 0xfff : ntuple->fs.action); | ||
937 | |||
933 | switch (ntuple->fs.flow_type) { | 938 | switch (ntuple->fs.flow_type) { |
934 | case TCP_V4_FLOW: | 939 | case TCP_V4_FLOW: |
935 | case UDP_V4_FLOW: | 940 | case UDP_V4_FLOW: { |
941 | u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? | ||
942 | IPPROTO_TCP : IPPROTO_UDP); | ||
943 | |||
936 | /* Must match all of destination, */ | 944 | /* Must match all of destination, */ |
937 | if (ip_mask->ip4dst | ip_mask->pdst) | 945 | if (ip_mask->ip4dst | ip_mask->pdst) |
938 | return -EINVAL; | 946 | return -EINVAL; |
@@ -944,7 +952,22 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | |||
944 | /* and nothing else */ | 952 | /* and nothing else */ |
945 | if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) | 953 | if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) |
946 | return -EINVAL; | 954 | return -EINVAL; |
955 | |||
956 | if (!ip_mask->ip4src) | ||
957 | rc = efx_filter_set_ipv4_full(&filter, proto, | ||
958 | ip_entry->ip4dst, | ||
959 | ip_entry->pdst, | ||
960 | ip_entry->ip4src, | ||
961 | ip_entry->psrc); | ||
962 | else | ||
963 | rc = efx_filter_set_ipv4_local(&filter, proto, | ||
964 | ip_entry->ip4dst, | ||
965 | ip_entry->pdst); | ||
966 | if (rc) | ||
967 | return rc; | ||
947 | break; | 968 | break; |
969 | } | ||
970 | |||
948 | case ETHER_FLOW: | 971 | case ETHER_FLOW: |
949 | /* Must match all of destination, */ | 972 | /* Must match all of destination, */ |
950 | if (!is_zero_ether_addr(mac_mask->h_dest)) | 973 | if (!is_zero_ether_addr(mac_mask->h_dest)) |
@@ -957,58 +980,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | |||
957 | if (!is_broadcast_ether_addr(mac_mask->h_source) || | 980 | if (!is_broadcast_ether_addr(mac_mask->h_source) || |
958 | mac_mask->h_proto != htons(0xffff)) | 981 | mac_mask->h_proto != htons(0xffff)) |
959 | return -EINVAL; | 982 | return -EINVAL; |
983 | |||
984 | rc = efx_filter_set_eth_local( | ||
985 | &filter, | ||
986 | (ntuple->fs.vlan_tag_mask == 0xf000) ? | ||
987 | ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, | ||
988 | mac_entry->h_dest); | ||
989 | if (rc) | ||
990 | return rc; | ||
960 | break; | 991 | break; |
992 | |||
961 | default: | 993 | default: |
962 | return -EINVAL; | 994 | return -EINVAL; |
963 | } | 995 | } |
964 | 996 | ||
965 | filter.priority = EFX_FILTER_PRI_MANUAL; | 997 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) |
966 | filter.flags = 0; | ||
967 | |||
968 | switch (ntuple->fs.flow_type) { | ||
969 | case TCP_V4_FLOW: | ||
970 | if (!ip_mask->ip4src) | ||
971 | efx_filter_set_rx_tcp_full(&filter, | ||
972 | htonl(ip_entry->ip4src), | ||
973 | htons(ip_entry->psrc), | ||
974 | htonl(ip_entry->ip4dst), | ||
975 | htons(ip_entry->pdst)); | ||
976 | else | ||
977 | efx_filter_set_rx_tcp_wild(&filter, | ||
978 | htonl(ip_entry->ip4dst), | ||
979 | htons(ip_entry->pdst)); | ||
980 | break; | ||
981 | case UDP_V4_FLOW: | ||
982 | if (!ip_mask->ip4src) | ||
983 | efx_filter_set_rx_udp_full(&filter, | ||
984 | htonl(ip_entry->ip4src), | ||
985 | htons(ip_entry->psrc), | ||
986 | htonl(ip_entry->ip4dst), | ||
987 | htons(ip_entry->pdst)); | ||
988 | else | ||
989 | efx_filter_set_rx_udp_wild(&filter, | ||
990 | htonl(ip_entry->ip4dst), | ||
991 | htons(ip_entry->pdst)); | ||
992 | break; | ||
993 | case ETHER_FLOW: | ||
994 | if (ntuple->fs.vlan_tag_mask == 0xf000) | ||
995 | efx_filter_set_rx_mac_full(&filter, | ||
996 | ntuple->fs.vlan_tag & 0xfff, | ||
997 | mac_entry->h_dest); | ||
998 | else | ||
999 | efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest); | ||
1000 | break; | ||
1001 | } | ||
1002 | |||
1003 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) { | ||
1004 | return efx_filter_remove_filter(efx, &filter); | 998 | return efx_filter_remove_filter(efx, &filter); |
1005 | } else { | 999 | else |
1006 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
1007 | filter.dmaq_id = 0xfff; | ||
1008 | else | ||
1009 | filter.dmaq_id = ntuple->fs.action; | ||
1010 | return efx_filter_insert_filter(efx, &filter, true); | 1000 | return efx_filter_insert_filter(efx, &filter, true); |
1011 | } | ||
1012 | } | 1001 | } |
1013 | 1002 | ||
1014 | static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, | 1003 | static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, |
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c index 44500b54fd5f..d4722c41c4ce 100644 --- a/drivers/net/sfc/filter.c +++ b/drivers/net/sfc/filter.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/in.h> | ||
10 | #include "efx.h" | 11 | #include "efx.h" |
11 | #include "filter.h" | 12 | #include "filter.h" |
12 | #include "io.h" | 13 | #include "io.h" |
@@ -26,19 +27,26 @@ | |||
26 | */ | 27 | */ |
27 | #define FILTER_CTL_SRCH_MAX 200 | 28 | #define FILTER_CTL_SRCH_MAX 200 |
28 | 29 | ||
30 | enum efx_filter_table_id { | ||
31 | EFX_FILTER_TABLE_RX_IP = 0, | ||
32 | EFX_FILTER_TABLE_RX_MAC, | ||
33 | EFX_FILTER_TABLE_COUNT, | ||
34 | }; | ||
35 | |||
29 | struct efx_filter_table { | 36 | struct efx_filter_table { |
37 | enum efx_filter_table_id id; | ||
30 | u32 offset; /* address of table relative to BAR */ | 38 | u32 offset; /* address of table relative to BAR */ |
31 | unsigned size; /* number of entries */ | 39 | unsigned size; /* number of entries */ |
32 | unsigned step; /* step between entries */ | 40 | unsigned step; /* step between entries */ |
33 | unsigned used; /* number currently used */ | 41 | unsigned used; /* number currently used */ |
34 | unsigned long *used_bitmap; | 42 | unsigned long *used_bitmap; |
35 | struct efx_filter_spec *spec; | 43 | struct efx_filter_spec *spec; |
44 | unsigned search_depth[EFX_FILTER_TYPE_COUNT]; | ||
36 | }; | 45 | }; |
37 | 46 | ||
38 | struct efx_filter_state { | 47 | struct efx_filter_state { |
39 | spinlock_t lock; | 48 | spinlock_t lock; |
40 | struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; | 49 | struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; |
41 | unsigned search_depth[EFX_FILTER_TYPE_COUNT]; | ||
42 | }; | 50 | }; |
43 | 51 | ||
44 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit | 52 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit |
@@ -65,68 +73,203 @@ static u16 efx_filter_increment(u32 key) | |||
65 | } | 73 | } |
66 | 74 | ||
67 | static enum efx_filter_table_id | 75 | static enum efx_filter_table_id |
68 | efx_filter_type_table_id(enum efx_filter_type type) | 76 | efx_filter_spec_table_id(const struct efx_filter_spec *spec) |
77 | { | ||
78 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2)); | ||
79 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2)); | ||
80 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2)); | ||
81 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); | ||
82 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); | ||
83 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); | ||
84 | EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); | ||
85 | return spec->type >> 2; | ||
86 | } | ||
87 | |||
88 | static struct efx_filter_table * | ||
89 | efx_filter_spec_table(struct efx_filter_state *state, | ||
90 | const struct efx_filter_spec *spec) | ||
69 | { | 91 | { |
70 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2)); | 92 | if (spec->type == EFX_FILTER_UNSPEC) |
71 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2)); | 93 | return NULL; |
72 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2)); | 94 | else |
73 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2)); | 95 | return &state->table[efx_filter_spec_table_id(spec)]; |
74 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2)); | ||
75 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2)); | ||
76 | return type >> 2; | ||
77 | } | 96 | } |
78 | 97 | ||
79 | static void | 98 | static void efx_filter_table_reset_search_depth(struct efx_filter_table *table) |
80 | efx_filter_table_reset_search_depth(struct efx_filter_state *state, | ||
81 | enum efx_filter_table_id table_id) | ||
82 | { | 99 | { |
83 | memset(state->search_depth + (table_id << 2), 0, | 100 | memset(table->search_depth, 0, sizeof(table->search_depth)); |
84 | sizeof(state->search_depth[0]) << 2); | ||
85 | } | 101 | } |
86 | 102 | ||
87 | static void efx_filter_push_rx_limits(struct efx_nic *efx) | 103 | static void efx_filter_push_rx_limits(struct efx_nic *efx) |
88 | { | 104 | { |
89 | struct efx_filter_state *state = efx->filter_state; | 105 | struct efx_filter_state *state = efx->filter_state; |
106 | struct efx_filter_table *table; | ||
90 | efx_oword_t filter_ctl; | 107 | efx_oword_t filter_ctl; |
91 | 108 | ||
92 | efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | 109 | efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); |
93 | 110 | ||
111 | table = &state->table[EFX_FILTER_TABLE_RX_IP]; | ||
94 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, | 112 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, |
95 | state->search_depth[EFX_FILTER_RX_TCP_FULL] + | 113 | table->search_depth[EFX_FILTER_TCP_FULL] + |
96 | FILTER_CTL_SRCH_FUDGE_FULL); | 114 | FILTER_CTL_SRCH_FUDGE_FULL); |
97 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, | 115 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, |
98 | state->search_depth[EFX_FILTER_RX_TCP_WILD] + | 116 | table->search_depth[EFX_FILTER_TCP_WILD] + |
99 | FILTER_CTL_SRCH_FUDGE_WILD); | 117 | FILTER_CTL_SRCH_FUDGE_WILD); |
100 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, | 118 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, |
101 | state->search_depth[EFX_FILTER_RX_UDP_FULL] + | 119 | table->search_depth[EFX_FILTER_UDP_FULL] + |
102 | FILTER_CTL_SRCH_FUDGE_FULL); | 120 | FILTER_CTL_SRCH_FUDGE_FULL); |
103 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, | 121 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, |
104 | state->search_depth[EFX_FILTER_RX_UDP_WILD] + | 122 | table->search_depth[EFX_FILTER_UDP_WILD] + |
105 | FILTER_CTL_SRCH_FUDGE_WILD); | 123 | FILTER_CTL_SRCH_FUDGE_WILD); |
106 | 124 | ||
107 | if (state->table[EFX_FILTER_TABLE_RX_MAC].size) { | 125 | table = &state->table[EFX_FILTER_TABLE_RX_MAC]; |
126 | if (table->size) { | ||
108 | EFX_SET_OWORD_FIELD( | 127 | EFX_SET_OWORD_FIELD( |
109 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, | 128 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, |
110 | state->search_depth[EFX_FILTER_RX_MAC_FULL] + | 129 | table->search_depth[EFX_FILTER_MAC_FULL] + |
111 | FILTER_CTL_SRCH_FUDGE_FULL); | 130 | FILTER_CTL_SRCH_FUDGE_FULL); |
112 | EFX_SET_OWORD_FIELD( | 131 | EFX_SET_OWORD_FIELD( |
113 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, | 132 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, |
114 | state->search_depth[EFX_FILTER_RX_MAC_WILD] + | 133 | table->search_depth[EFX_FILTER_MAC_WILD] + |
115 | FILTER_CTL_SRCH_FUDGE_WILD); | 134 | FILTER_CTL_SRCH_FUDGE_WILD); |
116 | } | 135 | } |
117 | 136 | ||
118 | efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | 137 | efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); |
119 | } | 138 | } |
120 | 139 | ||
140 | static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, | ||
141 | __be32 host1, __be16 port1, | ||
142 | __be32 host2, __be16 port2) | ||
143 | { | ||
144 | spec->data[0] = ntohl(host1) << 16 | ntohs(port1); | ||
145 | spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; | ||
146 | spec->data[2] = ntohl(host2); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port | ||
151 | * @spec: Specification to initialise | ||
152 | * @proto: Transport layer protocol number | ||
153 | * @host: Local host address (network byte order) | ||
154 | * @port: Local port (network byte order) | ||
155 | */ | ||
156 | int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, | ||
157 | __be32 host, __be16 port) | ||
158 | { | ||
159 | __be32 host1; | ||
160 | __be16 port1; | ||
161 | |||
162 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
163 | |||
164 | /* This cannot currently be combined with other filtering */ | ||
165 | if (spec->type != EFX_FILTER_UNSPEC) | ||
166 | return -EPROTONOSUPPORT; | ||
167 | |||
168 | if (port == 0) | ||
169 | return -EINVAL; | ||
170 | |||
171 | switch (proto) { | ||
172 | case IPPROTO_TCP: | ||
173 | spec->type = EFX_FILTER_TCP_WILD; | ||
174 | break; | ||
175 | case IPPROTO_UDP: | ||
176 | spec->type = EFX_FILTER_UDP_WILD; | ||
177 | break; | ||
178 | default: | ||
179 | return -EPROTONOSUPPORT; | ||
180 | } | ||
181 | |||
182 | /* Filter is constructed in terms of source and destination, | ||
183 | * with the odd wrinkle that the ports are swapped in a UDP | ||
184 | * wildcard filter. We need to convert from local and remote | ||
185 | * (= zero for wildcard) addresses. | ||
186 | */ | ||
187 | host1 = 0; | ||
188 | if (proto != IPPROTO_UDP) { | ||
189 | port1 = 0; | ||
190 | } else { | ||
191 | port1 = port; | ||
192 | port = 0; | ||
193 | } | ||
194 | |||
195 | __efx_filter_set_ipv4(spec, host1, port1, host, port); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports | ||
201 | * @spec: Specification to initialise | ||
202 | * @proto: Transport layer protocol number | ||
203 | * @host: Local host address (network byte order) | ||
204 | * @port: Local port (network byte order) | ||
205 | * @rhost: Remote host address (network byte order) | ||
206 | * @rport: Remote port (network byte order) | ||
207 | */ | ||
208 | int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, | ||
209 | __be32 host, __be16 port, | ||
210 | __be32 rhost, __be16 rport) | ||
211 | { | ||
212 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
213 | |||
214 | /* This cannot currently be combined with other filtering */ | ||
215 | if (spec->type != EFX_FILTER_UNSPEC) | ||
216 | return -EPROTONOSUPPORT; | ||
217 | |||
218 | if (port == 0 || rport == 0) | ||
219 | return -EINVAL; | ||
220 | |||
221 | switch (proto) { | ||
222 | case IPPROTO_TCP: | ||
223 | spec->type = EFX_FILTER_TCP_FULL; | ||
224 | break; | ||
225 | case IPPROTO_UDP: | ||
226 | spec->type = EFX_FILTER_UDP_FULL; | ||
227 | break; | ||
228 | default: | ||
229 | return -EPROTONOSUPPORT; | ||
230 | } | ||
231 | |||
232 | __efx_filter_set_ipv4(spec, rhost, rport, host, port); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * efx_filter_set_eth_local - specify local Ethernet address and optional VID | ||
238 | * @spec: Specification to initialise | ||
239 | * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC | ||
240 | * @addr: Local Ethernet MAC address | ||
241 | */ | ||
242 | int efx_filter_set_eth_local(struct efx_filter_spec *spec, | ||
243 | u16 vid, const u8 *addr) | ||
244 | { | ||
245 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
246 | |||
247 | /* This cannot currently be combined with other filtering */ | ||
248 | if (spec->type != EFX_FILTER_UNSPEC) | ||
249 | return -EPROTONOSUPPORT; | ||
250 | |||
251 | if (vid == EFX_FILTER_VID_UNSPEC) { | ||
252 | spec->type = EFX_FILTER_MAC_WILD; | ||
253 | spec->data[0] = 0; | ||
254 | } else { | ||
255 | spec->type = EFX_FILTER_MAC_FULL; | ||
256 | spec->data[0] = vid; | ||
257 | } | ||
258 | |||
259 | spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; | ||
260 | spec->data[2] = addr[0] << 8 | addr[1]; | ||
261 | return 0; | ||
262 | } | ||
263 | |||
121 | /* Build a filter entry and return its n-tuple key. */ | 264 | /* Build a filter entry and return its n-tuple key. */ |
122 | static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) | 265 | static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) |
123 | { | 266 | { |
124 | u32 data3; | 267 | u32 data3; |
125 | 268 | ||
126 | switch (efx_filter_type_table_id(spec->type)) { | 269 | switch (efx_filter_spec_table_id(spec)) { |
127 | case EFX_FILTER_TABLE_RX_IP: { | 270 | case EFX_FILTER_TABLE_RX_IP: { |
128 | bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL || | 271 | bool is_udp = (spec->type == EFX_FILTER_UDP_FULL || |
129 | spec->type == EFX_FILTER_RX_UDP_WILD); | 272 | spec->type == EFX_FILTER_UDP_WILD); |
130 | EFX_POPULATE_OWORD_7( | 273 | EFX_POPULATE_OWORD_7( |
131 | *filter, | 274 | *filter, |
132 | FRF_BZ_RSS_EN, | 275 | FRF_BZ_RSS_EN, |
@@ -143,7 +286,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) | |||
143 | } | 286 | } |
144 | 287 | ||
145 | case EFX_FILTER_TABLE_RX_MAC: { | 288 | case EFX_FILTER_TABLE_RX_MAC: { |
146 | bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD; | 289 | bool is_wild = spec->type == EFX_FILTER_MAC_WILD; |
147 | EFX_POPULATE_OWORD_8( | 290 | EFX_POPULATE_OWORD_8( |
148 | *filter, | 291 | *filter, |
149 | FRF_CZ_RMFT_RSS_EN, | 292 | FRF_CZ_RMFT_RSS_EN, |
@@ -206,6 +349,14 @@ found: | |||
206 | return filter_idx; | 349 | return filter_idx; |
207 | } | 350 | } |
208 | 351 | ||
352 | /* Construct/deconstruct external filter IDs */ | ||
353 | |||
354 | static inline int | ||
355 | efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) | ||
356 | { | ||
357 | return table_id << 16 | index; | ||
358 | } | ||
359 | |||
209 | /** | 360 | /** |
210 | * efx_filter_insert_filter - add or replace a filter | 361 | * efx_filter_insert_filter - add or replace a filter |
211 | * @efx: NIC in which to insert the filter | 362 | * @efx: NIC in which to insert the filter |
@@ -213,30 +364,28 @@ found: | |||
213 | * @replace: Flag for whether the specified filter may replace a filter | 364 | * @replace: Flag for whether the specified filter may replace a filter |
214 | * with an identical match expression and equal or lower priority | 365 | * with an identical match expression and equal or lower priority |
215 | * | 366 | * |
216 | * On success, return the filter index within its table. | 367 | * On success, return the filter ID. |
217 | * On failure, return a negative error code. | 368 | * On failure, return a negative error code. |
218 | */ | 369 | */ |
219 | int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | 370 | int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, |
220 | bool replace) | 371 | bool replace) |
221 | { | 372 | { |
222 | struct efx_filter_state *state = efx->filter_state; | 373 | struct efx_filter_state *state = efx->filter_state; |
223 | enum efx_filter_table_id table_id = | 374 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); |
224 | efx_filter_type_table_id(spec->type); | ||
225 | struct efx_filter_table *table = &state->table[table_id]; | ||
226 | struct efx_filter_spec *saved_spec; | 375 | struct efx_filter_spec *saved_spec; |
227 | efx_oword_t filter; | 376 | efx_oword_t filter; |
228 | int filter_idx, depth; | 377 | int filter_idx, depth; |
229 | u32 key; | 378 | u32 key; |
230 | int rc; | 379 | int rc; |
231 | 380 | ||
232 | if (table->size == 0) | 381 | if (!table || table->size == 0) |
233 | return -EINVAL; | 382 | return -EINVAL; |
234 | 383 | ||
235 | key = efx_filter_build(&filter, spec); | 384 | key = efx_filter_build(&filter, spec); |
236 | 385 | ||
237 | netif_vdbg(efx, hw, efx->net_dev, | 386 | netif_vdbg(efx, hw, efx->net_dev, |
238 | "%s: type %d search_depth=%d", __func__, spec->type, | 387 | "%s: type %d search_depth=%d", __func__, spec->type, |
239 | state->search_depth[spec->type]); | 388 | table->search_depth[spec->type]); |
240 | 389 | ||
241 | spin_lock_bh(&state->lock); | 390 | spin_lock_bh(&state->lock); |
242 | 391 | ||
@@ -263,8 +412,8 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
263 | } | 412 | } |
264 | *saved_spec = *spec; | 413 | *saved_spec = *spec; |
265 | 414 | ||
266 | if (state->search_depth[spec->type] < depth) { | 415 | if (table->search_depth[spec->type] < depth) { |
267 | state->search_depth[spec->type] = depth; | 416 | table->search_depth[spec->type] = depth; |
268 | efx_filter_push_rx_limits(efx); | 417 | efx_filter_push_rx_limits(efx); |
269 | } | 418 | } |
270 | 419 | ||
@@ -273,6 +422,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
273 | netif_vdbg(efx, hw, efx->net_dev, | 422 | netif_vdbg(efx, hw, efx->net_dev, |
274 | "%s: filter type %d index %d rxq %u set", | 423 | "%s: filter type %d index %d rxq %u set", |
275 | __func__, spec->type, filter_idx, spec->dmaq_id); | 424 | __func__, spec->type, filter_idx, spec->dmaq_id); |
425 | rc = efx_filter_make_id(table->id, filter_idx); | ||
276 | 426 | ||
277 | out: | 427 | out: |
278 | spin_unlock_bh(&state->lock); | 428 | spin_unlock_bh(&state->lock); |
@@ -306,15 +456,16 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx, | |||
306 | int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) | 456 | int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) |
307 | { | 457 | { |
308 | struct efx_filter_state *state = efx->filter_state; | 458 | struct efx_filter_state *state = efx->filter_state; |
309 | enum efx_filter_table_id table_id = | 459 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); |
310 | efx_filter_type_table_id(spec->type); | ||
311 | struct efx_filter_table *table = &state->table[table_id]; | ||
312 | struct efx_filter_spec *saved_spec; | 460 | struct efx_filter_spec *saved_spec; |
313 | efx_oword_t filter; | 461 | efx_oword_t filter; |
314 | int filter_idx, depth; | 462 | int filter_idx, depth; |
315 | u32 key; | 463 | u32 key; |
316 | int rc; | 464 | int rc; |
317 | 465 | ||
466 | if (!table) | ||
467 | return -EINVAL; | ||
468 | |||
318 | key = efx_filter_build(&filter, spec); | 469 | key = efx_filter_build(&filter, spec); |
319 | 470 | ||
320 | spin_lock_bh(&state->lock); | 471 | spin_lock_bh(&state->lock); |
@@ -332,7 +483,7 @@ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) | |||
332 | 483 | ||
333 | efx_filter_table_clear_entry(efx, table, filter_idx); | 484 | efx_filter_table_clear_entry(efx, table, filter_idx); |
334 | if (table->used == 0) | 485 | if (table->used == 0) |
335 | efx_filter_table_reset_search_depth(state, table_id); | 486 | efx_filter_table_reset_search_depth(table); |
336 | rc = 0; | 487 | rc = 0; |
337 | 488 | ||
338 | out: | 489 | out: |
@@ -340,15 +491,9 @@ out: | |||
340 | return rc; | 491 | return rc; |
341 | } | 492 | } |
342 | 493 | ||
343 | /** | 494 | static void efx_filter_table_clear(struct efx_nic *efx, |
344 | * efx_filter_table_clear - remove filters from a table by priority | 495 | enum efx_filter_table_id table_id, |
345 | * @efx: NIC from which to remove the filters | 496 | enum efx_filter_priority priority) |
346 | * @table_id: Table from which to remove the filters | ||
347 | * @priority: Maximum priority to remove | ||
348 | */ | ||
349 | void efx_filter_table_clear(struct efx_nic *efx, | ||
350 | enum efx_filter_table_id table_id, | ||
351 | enum efx_filter_priority priority) | ||
352 | { | 497 | { |
353 | struct efx_filter_state *state = efx->filter_state; | 498 | struct efx_filter_state *state = efx->filter_state; |
354 | struct efx_filter_table *table = &state->table[table_id]; | 499 | struct efx_filter_table *table = &state->table[table_id]; |
@@ -360,11 +505,22 @@ void efx_filter_table_clear(struct efx_nic *efx, | |||
360 | if (table->spec[filter_idx].priority <= priority) | 505 | if (table->spec[filter_idx].priority <= priority) |
361 | efx_filter_table_clear_entry(efx, table, filter_idx); | 506 | efx_filter_table_clear_entry(efx, table, filter_idx); |
362 | if (table->used == 0) | 507 | if (table->used == 0) |
363 | efx_filter_table_reset_search_depth(state, table_id); | 508 | efx_filter_table_reset_search_depth(table); |
364 | 509 | ||
365 | spin_unlock_bh(&state->lock); | 510 | spin_unlock_bh(&state->lock); |
366 | } | 511 | } |
367 | 512 | ||
513 | /** | ||
514 | * efx_filter_clear_rx - remove RX filters by priority | ||
515 | * @efx: NIC from which to remove the filters | ||
516 | * @priority: Maximum priority to remove | ||
517 | */ | ||
518 | void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) | ||
519 | { | ||
520 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority); | ||
521 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); | ||
522 | } | ||
523 | |||
368 | /* Restore filter stater after reset */ | 524 | /* Restore filter stater after reset */ |
369 | void efx_restore_filters(struct efx_nic *efx) | 525 | void efx_restore_filters(struct efx_nic *efx) |
370 | { | 526 | { |
@@ -407,6 +563,7 @@ int efx_probe_filters(struct efx_nic *efx) | |||
407 | 563 | ||
408 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 564 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
409 | table = &state->table[EFX_FILTER_TABLE_RX_IP]; | 565 | table = &state->table[EFX_FILTER_TABLE_RX_IP]; |
566 | table->id = EFX_FILTER_TABLE_RX_IP; | ||
410 | table->offset = FR_BZ_RX_FILTER_TBL0; | 567 | table->offset = FR_BZ_RX_FILTER_TBL0; |
411 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; | 568 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; |
412 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; | 569 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; |
@@ -414,6 +571,7 @@ int efx_probe_filters(struct efx_nic *efx) | |||
414 | 571 | ||
415 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | 572 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { |
416 | table = &state->table[EFX_FILTER_TABLE_RX_MAC]; | 573 | table = &state->table[EFX_FILTER_TABLE_RX_MAC]; |
574 | table->id = EFX_FILTER_TABLE_RX_MAC; | ||
417 | table->offset = FR_CZ_RX_MAC_FILTER_TBL0; | 575 | table->offset = FR_CZ_RX_MAC_FILTER_TBL0; |
418 | table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; | 576 | table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; |
419 | table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; | 577 | table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; |
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h index a53319ded79c..872f2132a496 100644 --- a/drivers/net/sfc/filter.h +++ b/drivers/net/sfc/filter.h | |||
@@ -12,31 +12,27 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | 14 | ||
15 | enum efx_filter_table_id { | ||
16 | EFX_FILTER_TABLE_RX_IP = 0, | ||
17 | EFX_FILTER_TABLE_RX_MAC, | ||
18 | EFX_FILTER_TABLE_COUNT, | ||
19 | }; | ||
20 | |||
21 | /** | 15 | /** |
22 | * enum efx_filter_type - type of hardware filter | 16 | * enum efx_filter_type - type of hardware filter |
23 | * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple | 17 | * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple |
24 | * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port) | 18 | * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) |
25 | * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple | 19 | * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple |
26 | * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port) | 20 | * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) |
27 | * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID | 21 | * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID |
28 | * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address | 22 | * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address |
23 | * @EFX_FILTER_UNSPEC: Match type is unspecified | ||
29 | * | 24 | * |
30 | * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types. | 25 | * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. |
31 | */ | 26 | */ |
32 | enum efx_filter_type { | 27 | enum efx_filter_type { |
33 | EFX_FILTER_RX_TCP_FULL = 0, | 28 | EFX_FILTER_TCP_FULL = 0, |
34 | EFX_FILTER_RX_TCP_WILD, | 29 | EFX_FILTER_TCP_WILD, |
35 | EFX_FILTER_RX_UDP_FULL, | 30 | EFX_FILTER_UDP_FULL, |
36 | EFX_FILTER_RX_UDP_WILD, | 31 | EFX_FILTER_UDP_WILD, |
37 | EFX_FILTER_RX_MAC_FULL = 4, | 32 | EFX_FILTER_MAC_FULL = 4, |
38 | EFX_FILTER_RX_MAC_WILD, | 33 | EFX_FILTER_MAC_WILD, |
39 | EFX_FILTER_TYPE_COUNT, | 34 | EFX_FILTER_TYPE_COUNT, /* number of specific types */ |
35 | EFX_FILTER_UNSPEC = 0xf, | ||
40 | }; | 36 | }; |
41 | 37 | ||
42 | /** | 38 | /** |
@@ -63,13 +59,13 @@ enum efx_filter_priority { | |||
63 | * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override | 59 | * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override |
64 | * any IP filter that matches the same packet. By default, IP | 60 | * any IP filter that matches the same packet. By default, IP |
65 | * filters take precedence. | 61 | * filters take precedence. |
66 | * | 62 | * @EFX_FILTER_FLAG_RX: Filter is for RX |
67 | * Currently, no flags are defined for TX filters. | ||
68 | */ | 63 | */ |
69 | enum efx_filter_flags { | 64 | enum efx_filter_flags { |
70 | EFX_FILTER_FLAG_RX_RSS = 0x01, | 65 | EFX_FILTER_FLAG_RX_RSS = 0x01, |
71 | EFX_FILTER_FLAG_RX_SCATTER = 0x02, | 66 | EFX_FILTER_FLAG_RX_SCATTER = 0x02, |
72 | EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, | 67 | EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, |
68 | EFX_FILTER_FLAG_RX = 0x08, | ||
73 | }; | 69 | }; |
74 | 70 | ||
75 | /** | 71 | /** |
@@ -91,99 +87,26 @@ struct efx_filter_spec { | |||
91 | u32 data[3]; | 87 | u32 data[3]; |
92 | }; | 88 | }; |
93 | 89 | ||
94 | /** | 90 | static inline void efx_filter_init_rx(struct efx_filter_spec *spec, |
95 | * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match | 91 | enum efx_filter_priority priority, |
96 | * @spec: Specification to initialise | 92 | enum efx_filter_flags flags, |
97 | * @shost: Source host address (host byte order) | 93 | unsigned rxq_id) |
98 | * @sport: Source port (host byte order) | ||
99 | * @dhost: Destination host address (host byte order) | ||
100 | * @dport: Destination port (host byte order) | ||
101 | */ | ||
102 | static inline void | ||
103 | efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec, | ||
104 | u32 shost, u16 sport, u32 dhost, u16 dport) | ||
105 | { | ||
106 | spec->type = EFX_FILTER_RX_TCP_FULL; | ||
107 | spec->data[0] = sport | shost << 16; | ||
108 | spec->data[1] = dport << 16 | shost >> 16; | ||
109 | spec->data[2] = dhost; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match | ||
114 | * @spec: Specification to initialise | ||
115 | * @dhost: Destination host address (host byte order) | ||
116 | * @dport: Destination port (host byte order) | ||
117 | */ | ||
118 | static inline void | ||
119 | efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport) | ||
120 | { | ||
121 | spec->type = EFX_FILTER_RX_TCP_WILD; | ||
122 | spec->data[0] = 0; | ||
123 | spec->data[1] = dport << 16; | ||
124 | spec->data[2] = dhost; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match | ||
129 | * @spec: Specification to initialise | ||
130 | * @shost: Source host address (host byte order) | ||
131 | * @sport: Source port (host byte order) | ||
132 | * @dhost: Destination host address (host byte order) | ||
133 | * @dport: Destination port (host byte order) | ||
134 | */ | ||
135 | static inline void | ||
136 | efx_filter_set_rx_udp_full(struct efx_filter_spec *spec, | ||
137 | u32 shost, u16 sport, u32 dhost, u16 dport) | ||
138 | { | ||
139 | spec->type = EFX_FILTER_RX_UDP_FULL; | ||
140 | spec->data[0] = sport | shost << 16; | ||
141 | spec->data[1] = dport << 16 | shost >> 16; | ||
142 | spec->data[2] = dhost; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match | ||
147 | * @spec: Specification to initialise | ||
148 | * @dhost: Destination host address (host byte order) | ||
149 | * @dport: Destination port (host byte order) | ||
150 | */ | ||
151 | static inline void | ||
152 | efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport) | ||
153 | { | 94 | { |
154 | spec->type = EFX_FILTER_RX_UDP_WILD; | 95 | spec->type = EFX_FILTER_UNSPEC; |
155 | spec->data[0] = dport; | 96 | spec->priority = priority; |
156 | spec->data[1] = 0; | 97 | spec->flags = EFX_FILTER_FLAG_RX | flags; |
157 | spec->data[2] = dhost; | 98 | spec->dmaq_id = rxq_id; |
158 | } | 99 | } |
159 | 100 | ||
160 | /** | 101 | extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, |
161 | * efx_filter_set_rx_mac_full - specify RX filter with MAC full match | 102 | __be32 host, __be16 port); |
162 | * @spec: Specification to initialise | 103 | extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, |
163 | * @vid: VLAN ID | 104 | __be32 host, __be16 port, |
164 | * @addr: Destination MAC address | 105 | __be32 rhost, __be16 rport); |
165 | */ | 106 | extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, |
166 | static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec, | 107 | u16 vid, const u8 *addr); |
167 | u16 vid, const u8 *addr) | 108 | enum { |
168 | { | 109 | EFX_FILTER_VID_UNSPEC = 0xffff, |
169 | spec->type = EFX_FILTER_RX_MAC_FULL; | 110 | }; |
170 | spec->data[0] = vid; | ||
171 | spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; | ||
172 | spec->data[2] = addr[0] << 8 | addr[1]; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match | ||
177 | * @spec: Specification to initialise | ||
178 | * @addr: Destination MAC address | ||
179 | */ | ||
180 | static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec, | ||
181 | const u8 *addr) | ||
182 | { | ||
183 | spec->type = EFX_FILTER_RX_MAC_WILD; | ||
184 | spec->data[0] = 0; | ||
185 | spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; | ||
186 | spec->data[2] = addr[0] << 8 | addr[1]; | ||
187 | } | ||
188 | 111 | ||
189 | #endif /* EFX_FILTER_H */ | 112 | #endif /* EFX_FILTER_H */ |
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index 85a99fe87437..6da4ae20a039 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h | |||
@@ -22,28 +22,39 @@ | |||
22 | * | 22 | * |
23 | * Notes on locking strategy: | 23 | * Notes on locking strategy: |
24 | * | 24 | * |
25 | * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes | 25 | * Most CSRs are 128-bit (oword) and therefore cannot be read or |
26 | * which necessitates locking. | 26 | * written atomically. Access from the host is buffered by the Bus |
27 | * Under normal operation few writes to NIC registers are made and these | 27 | * Interface Unit (BIU). Whenever the host reads from the lowest |
28 | * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special | 28 | * address of such a register, or from the address of a different such |
29 | * cased to allow 4-byte (hence lockless) accesses. | 29 | * register, the BIU latches the register's value. Subsequent reads |
30 | * from higher addresses of the same register will read the latched | ||
31 | * value. Whenever the host writes part of such a register, the BIU | ||
32 | * collects the written value and does not write to the underlying | ||
33 | * register until all 4 dwords have been written. A similar buffering | ||
34 | * scheme applies to host access to the NIC's 64-bit SRAM. | ||
30 | * | 35 | * |
31 | * It *is* safe to write to these 4-byte registers in the middle of an | 36 | * Access to different CSRs and 64-bit SRAM words must be serialised, |
32 | * access to an 8-byte or 16-byte register. We therefore use a | 37 | * since interleaved access can result in lost writes or lost |
33 | * spinlock to protect accesses to the larger registers, but no locks | 38 | * information from read-to-clear fields. We use efx_nic::biu_lock |
34 | * for the 4-byte registers. | 39 | * for this. (We could use separate locks for read and write, but |
40 | * this is not normally a performance bottleneck.) | ||
35 | * | 41 | * |
36 | * A write barrier is needed to ensure that DW3 is written after DW0/1/2 | 42 | * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are |
37 | * due to the way the 16byte registers are "collected" in the BIU. | 43 | * 128-bit but are special-cased in the BIU to avoid the need for |
44 | * locking in the host: | ||
38 | * | 45 | * |
39 | * We also lock when carrying out reads, to ensure consistency of the | 46 | * - They are write-only. |
40 | * data (made possible since the BIU reads all 128 bits into a cache). | 47 | * - The semantics of writing to these registers are such that |
41 | * Reads are very rare, so this isn't a significant performance | 48 | * replacing the low 96 bits with zero does not affect functionality. |
42 | * impact. (Most data transferred from NIC to host is DMAed directly | 49 | * - If the host writes to the last dword address of such a register |
43 | * into host memory). | 50 | * (i.e. the high 32 bits) the underlying register will always be |
44 | * | 51 | * written. If the collector does not hold values for the low 96 |
45 | * I/O BAR access uses locks for both reads and writes (but is only provided | 52 | * bits of the register, they will be written as zero. Writing to |
46 | * for testing purposes). | 53 | * the last qword does not have this effect and must not be done. |
54 | * - If the host writes to the address of any other part of such a | ||
55 | * register while the collector already holds values for some other | ||
56 | * register, the write is discarded and the collector maintains its | ||
57 | * current state. | ||
47 | */ | 58 | */ |
48 | 59 | ||
49 | #if BITS_PER_LONG == 64 | 60 | #if BITS_PER_LONG == 64 |
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) | |||
72 | return (__force __le32)__raw_readl(efx->membase + reg); | 83 | return (__force __le32)__raw_readl(efx->membase + reg); |
73 | } | 84 | } |
74 | 85 | ||
75 | /* Writes to a normal 16-byte Efx register, locking as appropriate. */ | 86 | /* Write a normal 128-bit CSR, locking as appropriate. */ |
76 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | 87 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, |
77 | unsigned int reg) | 88 | unsigned int reg) |
78 | { | 89 | { |
@@ -85,21 +96,18 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | |||
85 | spin_lock_irqsave(&efx->biu_lock, flags); | 96 | spin_lock_irqsave(&efx->biu_lock, flags); |
86 | #ifdef EFX_USE_QWORD_IO | 97 | #ifdef EFX_USE_QWORD_IO |
87 | _efx_writeq(efx, value->u64[0], reg + 0); | 98 | _efx_writeq(efx, value->u64[0], reg + 0); |
88 | wmb(); | ||
89 | _efx_writeq(efx, value->u64[1], reg + 8); | 99 | _efx_writeq(efx, value->u64[1], reg + 8); |
90 | #else | 100 | #else |
91 | _efx_writed(efx, value->u32[0], reg + 0); | 101 | _efx_writed(efx, value->u32[0], reg + 0); |
92 | _efx_writed(efx, value->u32[1], reg + 4); | 102 | _efx_writed(efx, value->u32[1], reg + 4); |
93 | _efx_writed(efx, value->u32[2], reg + 8); | 103 | _efx_writed(efx, value->u32[2], reg + 8); |
94 | wmb(); | ||
95 | _efx_writed(efx, value->u32[3], reg + 12); | 104 | _efx_writed(efx, value->u32[3], reg + 12); |
96 | #endif | 105 | #endif |
97 | mmiowb(); | 106 | mmiowb(); |
98 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 107 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
99 | } | 108 | } |
100 | 109 | ||
101 | /* Write an 8-byte NIC SRAM entry through the supplied mapping, | 110 | /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ |
102 | * locking as appropriate. */ | ||
103 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | 111 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, |
104 | efx_qword_t *value, unsigned int index) | 112 | efx_qword_t *value, unsigned int index) |
105 | { | 113 | { |
@@ -115,36 +123,25 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | |||
115 | __raw_writeq((__force u64)value->u64[0], membase + addr); | 123 | __raw_writeq((__force u64)value->u64[0], membase + addr); |
116 | #else | 124 | #else |
117 | __raw_writel((__force u32)value->u32[0], membase + addr); | 125 | __raw_writel((__force u32)value->u32[0], membase + addr); |
118 | wmb(); | ||
119 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | 126 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); |
120 | #endif | 127 | #endif |
121 | mmiowb(); | 128 | mmiowb(); |
122 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 129 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
123 | } | 130 | } |
124 | 131 | ||
125 | /* Write dword to NIC register that allows partial writes | 132 | /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ |
126 | * | ||
127 | * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and | ||
128 | * TX_DESC_UPD_REG) can be written to as a single dword. This allows | ||
129 | * for lockless writes. | ||
130 | */ | ||
131 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | 133 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, |
132 | unsigned int reg) | 134 | unsigned int reg) |
133 | { | 135 | { |
134 | netif_vdbg(efx, hw, efx->net_dev, | 136 | netif_vdbg(efx, hw, efx->net_dev, |
135 | "writing partial register %x with "EFX_DWORD_FMT"\n", | 137 | "writing register %x with "EFX_DWORD_FMT"\n", |
136 | reg, EFX_DWORD_VAL(*value)); | 138 | reg, EFX_DWORD_VAL(*value)); |
137 | 139 | ||
138 | /* No lock required */ | 140 | /* No lock required */ |
139 | _efx_writed(efx, value->u32[0], reg); | 141 | _efx_writed(efx, value->u32[0], reg); |
140 | } | 142 | } |
141 | 143 | ||
142 | /* Read from a NIC register | 144 | /* Read a 128-bit CSR, locking as appropriate. */ |
143 | * | ||
144 | * This reads an entire 16-byte register in one go, locking as | ||
145 | * appropriate. It is essential to read the first dword first, as this | ||
146 | * prompts the NIC to load the current value into the shadow register. | ||
147 | */ | ||
148 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | 145 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, |
149 | unsigned int reg) | 146 | unsigned int reg) |
150 | { | 147 | { |
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
152 | 149 | ||
153 | spin_lock_irqsave(&efx->biu_lock, flags); | 150 | spin_lock_irqsave(&efx->biu_lock, flags); |
154 | value->u32[0] = _efx_readd(efx, reg + 0); | 151 | value->u32[0] = _efx_readd(efx, reg + 0); |
155 | rmb(); | ||
156 | value->u32[1] = _efx_readd(efx, reg + 4); | 152 | value->u32[1] = _efx_readd(efx, reg + 4); |
157 | value->u32[2] = _efx_readd(efx, reg + 8); | 153 | value->u32[2] = _efx_readd(efx, reg + 8); |
158 | value->u32[3] = _efx_readd(efx, reg + 12); | 154 | value->u32[3] = _efx_readd(efx, reg + 12); |
@@ -163,8 +159,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
163 | EFX_OWORD_VAL(*value)); | 159 | EFX_OWORD_VAL(*value)); |
164 | } | 160 | } |
165 | 161 | ||
166 | /* Read an 8-byte SRAM entry through supplied mapping, | 162 | /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ |
167 | * locking as appropriate. */ | ||
168 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | 163 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, |
169 | efx_qword_t *value, unsigned int index) | 164 | efx_qword_t *value, unsigned int index) |
170 | { | 165 | { |
@@ -176,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
176 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 171 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
177 | #else | 172 | #else |
178 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 173 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
179 | rmb(); | ||
180 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 174 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
181 | #endif | 175 | #endif |
182 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 176 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
@@ -186,7 +180,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
186 | addr, EFX_QWORD_VAL(*value)); | 180 | addr, EFX_QWORD_VAL(*value)); |
187 | } | 181 | } |
188 | 182 | ||
189 | /* Read dword from register that allows partial writes (sic) */ | 183 | /* Read a 32-bit CSR or SRAM */ |
190 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | 184 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, |
191 | unsigned int reg) | 185 | unsigned int reg) |
192 | { | 186 | { |
@@ -196,28 +190,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | |||
196 | reg, EFX_DWORD_VAL(*value)); | 190 | reg, EFX_DWORD_VAL(*value)); |
197 | } | 191 | } |
198 | 192 | ||
199 | /* Write to a register forming part of a table */ | 193 | /* Write a 128-bit CSR forming part of a table */ |
200 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, | 194 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, |
201 | unsigned int reg, unsigned int index) | 195 | unsigned int reg, unsigned int index) |
202 | { | 196 | { |
203 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); | 197 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); |
204 | } | 198 | } |
205 | 199 | ||
206 | /* Read to a register forming part of a table */ | 200 | /* Read a 128-bit CSR forming part of a table */ |
207 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, | 201 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, |
208 | unsigned int reg, unsigned int index) | 202 | unsigned int reg, unsigned int index) |
209 | { | 203 | { |
210 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); | 204 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); |
211 | } | 205 | } |
212 | 206 | ||
213 | /* Write to a dword register forming part of a table */ | 207 | /* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */ |
214 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, | 208 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, |
215 | unsigned int reg, unsigned int index) | 209 | unsigned int reg, unsigned int index) |
216 | { | 210 | { |
217 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); | 211 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); |
218 | } | 212 | } |
219 | 213 | ||
220 | /* Read from a dword register forming part of a table */ | 214 | /* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */ |
221 | static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, | 215 | static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, |
222 | unsigned int reg, unsigned int index) | 216 | unsigned int reg, unsigned int index) |
223 | { | 217 | { |
@@ -231,29 +225,54 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, | |||
231 | #define EFX_PAGED_REG(page, reg) \ | 225 | #define EFX_PAGED_REG(page, reg) \ |
232 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) | 226 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) |
233 | 227 | ||
234 | /* As for efx_writeo(), but for a page-mapped register. */ | 228 | /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ |
235 | static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | 229 | static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, |
236 | unsigned int reg, unsigned int page) | 230 | unsigned int reg, unsigned int page) |
237 | { | 231 | { |
238 | efx_writeo(efx, value, EFX_PAGED_REG(page, reg)); | 232 | reg = EFX_PAGED_REG(page, reg); |
239 | } | ||
240 | 233 | ||
241 | /* As for efx_writed(), but for a page-mapped register. */ | 234 | netif_vdbg(efx, hw, efx->net_dev, |
242 | static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value, | 235 | "writing register %x with " EFX_OWORD_FMT "\n", reg, |
243 | unsigned int reg, unsigned int page) | 236 | EFX_OWORD_VAL(*value)); |
237 | |||
238 | #ifdef EFX_USE_QWORD_IO | ||
239 | _efx_writeq(efx, value->u64[0], reg + 0); | ||
240 | #else | ||
241 | _efx_writed(efx, value->u32[0], reg + 0); | ||
242 | _efx_writed(efx, value->u32[1], reg + 4); | ||
243 | #endif | ||
244 | _efx_writed(efx, value->u32[2], reg + 8); | ||
245 | _efx_writed(efx, value->u32[3], reg + 12); | ||
246 | } | ||
247 | #define efx_writeo_page(efx, value, reg, page) \ | ||
248 | _efx_writeo_page(efx, value, \ | ||
249 | reg + \ | ||
250 | BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ | ||
251 | page) | ||
252 | |||
253 | /* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of | ||
254 | * RX_DESC_UPD or TX_DESC_UPD) | ||
255 | */ | ||
256 | static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, | ||
257 | unsigned int reg, unsigned int page) | ||
244 | { | 258 | { |
245 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | 259 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); |
246 | } | 260 | } |
247 | 261 | #define efx_writed_page(efx, value, reg, page) \ | |
248 | /* Write dword to page-mapped register with an extra lock. | 262 | _efx_writed_page(efx, value, \ |
249 | * | 263 | reg + \ |
250 | * As for efx_writed_page(), but for a register that suffers from | 264 | BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ |
251 | * SFC bug 3181. Take out a lock so the BIU collector cannot be | 265 | && (reg) != 0xa1c), \ |
252 | * confused. */ | 266 | page) |
253 | static inline void efx_writed_page_locked(struct efx_nic *efx, | 267 | |
254 | efx_dword_t *value, | 268 | /* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug |
255 | unsigned int reg, | 269 | * in the BIU means that writes to TIMER_COMMAND[0] invalidate the |
256 | unsigned int page) | 270 | * collector register. |
271 | */ | ||
272 | static inline void _efx_writed_page_locked(struct efx_nic *efx, | ||
273 | efx_dword_t *value, | ||
274 | unsigned int reg, | ||
275 | unsigned int page) | ||
257 | { | 276 | { |
258 | unsigned long flags __attribute__ ((unused)); | 277 | unsigned long flags __attribute__ ((unused)); |
259 | 278 | ||
@@ -265,5 +284,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx, | |||
265 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | 284 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); |
266 | } | 285 | } |
267 | } | 286 | } |
287 | #define efx_writed_page_locked(efx, value, reg, page) \ | ||
288 | _efx_writed_page_locked(efx, value, \ | ||
289 | reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ | ||
290 | page) | ||
268 | 291 | ||
269 | #endif /* EFX_IO_H */ | 292 | #endif /* EFX_IO_H */ |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 4c12332434b7..76f2fb197f0a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -142,6 +142,12 @@ struct efx_tx_buffer { | |||
142 | * @flushed: Used when handling queue flushing | 142 | * @flushed: Used when handling queue flushing |
143 | * @read_count: Current read pointer. | 143 | * @read_count: Current read pointer. |
144 | * This is the number of buffers that have been removed from both rings. | 144 | * This is the number of buffers that have been removed from both rings. |
145 | * @old_write_count: The value of @write_count when last checked. | ||
146 | * This is here for performance reasons. The xmit path will | ||
147 | * only get the up-to-date value of @write_count if this | ||
148 | * variable indicates that the queue is empty. This is to | ||
149 | * avoid cache-line ping-pong between the xmit path and the | ||
150 | * completion path. | ||
145 | * @stopped: Stopped count. | 151 | * @stopped: Stopped count. |
146 | * Set if this TX queue is currently stopping its port. | 152 | * Set if this TX queue is currently stopping its port. |
147 | * @insert_count: Current insert pointer | 153 | * @insert_count: Current insert pointer |
@@ -163,6 +169,10 @@ struct efx_tx_buffer { | |||
163 | * @tso_long_headers: Number of packets with headers too long for standard | 169 | * @tso_long_headers: Number of packets with headers too long for standard |
164 | * blocks | 170 | * blocks |
165 | * @tso_packets: Number of packets via the TSO xmit path | 171 | * @tso_packets: Number of packets via the TSO xmit path |
172 | * @pushes: Number of times the TX push feature has been used | ||
173 | * @empty_read_count: If the completion path has seen the queue as empty | ||
174 | * and the transmission path has not yet checked this, the value of | ||
175 | * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. | ||
166 | */ | 176 | */ |
167 | struct efx_tx_queue { | 177 | struct efx_tx_queue { |
168 | /* Members which don't change on the fast path */ | 178 | /* Members which don't change on the fast path */ |
@@ -177,6 +187,7 @@ struct efx_tx_queue { | |||
177 | 187 | ||
178 | /* Members used mainly on the completion path */ | 188 | /* Members used mainly on the completion path */ |
179 | unsigned int read_count ____cacheline_aligned_in_smp; | 189 | unsigned int read_count ____cacheline_aligned_in_smp; |
190 | unsigned int old_write_count; | ||
180 | int stopped; | 191 | int stopped; |
181 | 192 | ||
182 | /* Members used only on the xmit path */ | 193 | /* Members used only on the xmit path */ |
@@ -187,6 +198,11 @@ struct efx_tx_queue { | |||
187 | unsigned int tso_bursts; | 198 | unsigned int tso_bursts; |
188 | unsigned int tso_long_headers; | 199 | unsigned int tso_long_headers; |
189 | unsigned int tso_packets; | 200 | unsigned int tso_packets; |
201 | unsigned int pushes; | ||
202 | |||
203 | /* Members shared between paths and sometimes updated */ | ||
204 | unsigned int empty_read_count ____cacheline_aligned_in_smp; | ||
205 | #define EFX_EMPTY_COUNT_VALID 0x80000000 | ||
190 | }; | 206 | }; |
191 | 207 | ||
192 | /** | 208 | /** |
@@ -626,10 +642,8 @@ struct efx_filter_state; | |||
626 | * Work items do not hold and must not acquire RTNL. | 642 | * Work items do not hold and must not acquire RTNL. |
627 | * @workqueue_name: Name of workqueue | 643 | * @workqueue_name: Name of workqueue |
628 | * @reset_work: Scheduled reset workitem | 644 | * @reset_work: Scheduled reset workitem |
629 | * @monitor_work: Hardware monitor workitem | ||
630 | * @membase_phys: Memory BAR value as physical address | 645 | * @membase_phys: Memory BAR value as physical address |
631 | * @membase: Memory BAR value | 646 | * @membase: Memory BAR value |
632 | * @biu_lock: BIU (bus interface unit) lock | ||
633 | * @interrupt_mode: Interrupt mode | 647 | * @interrupt_mode: Interrupt mode |
634 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | 648 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues |
635 | * @irq_rx_moderation: IRQ moderation time for RX event queues | 649 | * @irq_rx_moderation: IRQ moderation time for RX event queues |
@@ -653,14 +667,9 @@ struct efx_filter_state; | |||
653 | * @int_error_count: Number of internal errors seen recently | 667 | * @int_error_count: Number of internal errors seen recently |
654 | * @int_error_expire: Time at which error count will be expired | 668 | * @int_error_expire: Time at which error count will be expired |
655 | * @irq_status: Interrupt status buffer | 669 | * @irq_status: Interrupt status buffer |
656 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
657 | * This register is written with the SMP processor ID whenever an | ||
658 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
659 | * to verify that an interrupt has occurred. | ||
660 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | 670 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 |
661 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | 671 | * @fatal_irq_level: IRQ level (bit number) used for serious errors |
662 | * @mtd_list: List of MTDs attached to the NIC | 672 | * @mtd_list: List of MTDs attached to the NIC |
663 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
664 | * @nic_data: Hardware dependant state | 673 | * @nic_data: Hardware dependant state |
665 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | 674 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, |
666 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() | 675 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() |
@@ -673,11 +682,7 @@ struct efx_filter_state; | |||
673 | * @port_initialized: Port initialized? | 682 | * @port_initialized: Port initialized? |
674 | * @net_dev: Operating system network device. Consider holding the rtnl lock | 683 | * @net_dev: Operating system network device. Consider holding the rtnl lock |
675 | * @rx_checksum_enabled: RX checksumming enabled | 684 | * @rx_checksum_enabled: RX checksumming enabled |
676 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
677 | * can provide. Generic code converts these into a standard | ||
678 | * &struct net_device_stats. | ||
679 | * @stats_buffer: DMA buffer for statistics | 685 | * @stats_buffer: DMA buffer for statistics |
680 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
681 | * @mac_op: MAC interface | 686 | * @mac_op: MAC interface |
682 | * @phy_type: PHY type | 687 | * @phy_type: PHY type |
683 | * @phy_op: PHY interface | 688 | * @phy_op: PHY interface |
@@ -695,10 +700,23 @@ struct efx_filter_state; | |||
695 | * @loopback_mode: Loopback status | 700 | * @loopback_mode: Loopback status |
696 | * @loopback_modes: Supported loopback mode bitmask | 701 | * @loopback_modes: Supported loopback mode bitmask |
697 | * @loopback_selftest: Offline self-test private state | 702 | * @loopback_selftest: Offline self-test private state |
703 | * @monitor_work: Hardware monitor workitem | ||
704 | * @biu_lock: BIU (bus interface unit) lock | ||
705 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
706 | * This register is written with the SMP processor ID whenever an | ||
707 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
708 | * to verify that an interrupt has occurred. | ||
709 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
710 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
711 | * can provide. Generic code converts these into a standard | ||
712 | * &struct net_device_stats. | ||
713 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
698 | * | 714 | * |
699 | * This is stored in the private area of the &struct net_device. | 715 | * This is stored in the private area of the &struct net_device. |
700 | */ | 716 | */ |
701 | struct efx_nic { | 717 | struct efx_nic { |
718 | /* The following fields should be written very rarely */ | ||
719 | |||
702 | char name[IFNAMSIZ]; | 720 | char name[IFNAMSIZ]; |
703 | struct pci_dev *pci_dev; | 721 | struct pci_dev *pci_dev; |
704 | const struct efx_nic_type *type; | 722 | const struct efx_nic_type *type; |
@@ -707,10 +725,9 @@ struct efx_nic { | |||
707 | struct workqueue_struct *workqueue; | 725 | struct workqueue_struct *workqueue; |
708 | char workqueue_name[16]; | 726 | char workqueue_name[16]; |
709 | struct work_struct reset_work; | 727 | struct work_struct reset_work; |
710 | struct delayed_work monitor_work; | ||
711 | resource_size_t membase_phys; | 728 | resource_size_t membase_phys; |
712 | void __iomem *membase; | 729 | void __iomem *membase; |
713 | spinlock_t biu_lock; | 730 | |
714 | enum efx_int_mode interrupt_mode; | 731 | enum efx_int_mode interrupt_mode; |
715 | bool irq_rx_adaptive; | 732 | bool irq_rx_adaptive; |
716 | unsigned int irq_rx_moderation; | 733 | unsigned int irq_rx_moderation; |
@@ -737,7 +754,6 @@ struct efx_nic { | |||
737 | unsigned long int_error_expire; | 754 | unsigned long int_error_expire; |
738 | 755 | ||
739 | struct efx_buffer irq_status; | 756 | struct efx_buffer irq_status; |
740 | volatile signed int last_irq_cpu; | ||
741 | unsigned irq_zero_count; | 757 | unsigned irq_zero_count; |
742 | unsigned fatal_irq_level; | 758 | unsigned fatal_irq_level; |
743 | 759 | ||
@@ -745,8 +761,6 @@ struct efx_nic { | |||
745 | struct list_head mtd_list; | 761 | struct list_head mtd_list; |
746 | #endif | 762 | #endif |
747 | 763 | ||
748 | unsigned n_rx_nodesc_drop_cnt; | ||
749 | |||
750 | void *nic_data; | 764 | void *nic_data; |
751 | 765 | ||
752 | struct mutex mac_lock; | 766 | struct mutex mac_lock; |
@@ -758,9 +772,7 @@ struct efx_nic { | |||
758 | struct net_device *net_dev; | 772 | struct net_device *net_dev; |
759 | bool rx_checksum_enabled; | 773 | bool rx_checksum_enabled; |
760 | 774 | ||
761 | struct efx_mac_stats mac_stats; | ||
762 | struct efx_buffer stats_buffer; | 775 | struct efx_buffer stats_buffer; |
763 | spinlock_t stats_lock; | ||
764 | 776 | ||
765 | struct efx_mac_operations *mac_op; | 777 | struct efx_mac_operations *mac_op; |
766 | 778 | ||
@@ -786,6 +798,15 @@ struct efx_nic { | |||
786 | void *loopback_selftest; | 798 | void *loopback_selftest; |
787 | 799 | ||
788 | struct efx_filter_state *filter_state; | 800 | struct efx_filter_state *filter_state; |
801 | |||
802 | /* The following fields may be written more often */ | ||
803 | |||
804 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; | ||
805 | spinlock_t biu_lock; | ||
806 | volatile signed int last_irq_cpu; | ||
807 | unsigned n_rx_nodesc_drop_cnt; | ||
808 | struct efx_mac_stats mac_stats; | ||
809 | spinlock_t stats_lock; | ||
789 | }; | 810 | }; |
790 | 811 | ||
791 | static inline int efx_dev_registered(struct efx_nic *efx) | 812 | static inline int efx_dev_registered(struct efx_nic *efx) |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 399b12abe2fd..da386599ab68 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -362,6 +362,35 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | |||
362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | 362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
363 | } | 363 | } |
364 | 364 | ||
365 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
366 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
367 | const efx_qword_t *txd) | ||
368 | { | ||
369 | unsigned write_ptr; | ||
370 | efx_oword_t reg; | ||
371 | |||
372 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
373 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
374 | |||
375 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
376 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
377 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
378 | reg.qword[0] = *txd; | ||
379 | efx_writeo_page(tx_queue->efx, ®, | ||
380 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
381 | } | ||
382 | |||
383 | static inline bool | ||
384 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | ||
385 | { | ||
386 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | ||
387 | |||
388 | if (empty_read_count == 0) | ||
389 | return false; | ||
390 | |||
391 | tx_queue->empty_read_count = 0; | ||
392 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | ||
393 | } | ||
365 | 394 | ||
366 | /* For each entry inserted into the software descriptor ring, create a | 395 | /* For each entry inserted into the software descriptor ring, create a |
367 | * descriptor in the hardware TX descriptor ring (in host memory), and | 396 | * descriptor in the hardware TX descriptor ring (in host memory), and |
@@ -373,6 +402,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
373 | struct efx_tx_buffer *buffer; | 402 | struct efx_tx_buffer *buffer; |
374 | efx_qword_t *txd; | 403 | efx_qword_t *txd; |
375 | unsigned write_ptr; | 404 | unsigned write_ptr; |
405 | unsigned old_write_count = tx_queue->write_count; | ||
376 | 406 | ||
377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | 407 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); |
378 | 408 | ||
@@ -391,7 +421,15 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |||
391 | } while (tx_queue->write_count != tx_queue->insert_count); | 421 | } while (tx_queue->write_count != tx_queue->insert_count); |
392 | 422 | ||
393 | wmb(); /* Ensure descriptors are written before they are fetched */ | 423 | wmb(); /* Ensure descriptors are written before they are fetched */ |
394 | efx_notify_tx_desc(tx_queue); | 424 | |
425 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { | ||
426 | txd = efx_tx_desc(tx_queue, | ||
427 | old_write_count & tx_queue->ptr_mask); | ||
428 | efx_push_tx_desc(tx_queue, txd); | ||
429 | ++tx_queue->pushes; | ||
430 | } else { | ||
431 | efx_notify_tx_desc(tx_queue); | ||
432 | } | ||
395 | } | 433 | } |
396 | 434 | ||
397 | /* Allocate hardware resources for a TX queue */ | 435 | /* Allocate hardware resources for a TX queue */ |
@@ -1632,7 +1670,7 @@ void efx_nic_init_common(struct efx_nic *efx) | |||
1632 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | 1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1633 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | 1671 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1634 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | 1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1635 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); | 1673 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); |
1636 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | 1674 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1637 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | 1675 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1638 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | 1676 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 03194f7c0954..bdb92b4af683 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -240,8 +240,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
240 | * of read_count. */ | 240 | * of read_count. */ |
241 | smp_mb(); | 241 | smp_mb(); |
242 | tx_queue->old_read_count = | 242 | tx_queue->old_read_count = |
243 | *(volatile unsigned *) | 243 | ACCESS_ONCE(tx_queue->read_count); |
244 | &tx_queue->read_count; | ||
245 | fill_level = (tx_queue->insert_count | 244 | fill_level = (tx_queue->insert_count |
246 | - tx_queue->old_read_count); | 245 | - tx_queue->old_read_count); |
247 | q_space = efx->txq_entries - 1 - fill_level; | 246 | q_space = efx->txq_entries - 1 - fill_level; |
@@ -429,6 +428,16 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
429 | __netif_tx_unlock(queue); | 428 | __netif_tx_unlock(queue); |
430 | } | 429 | } |
431 | } | 430 | } |
431 | |||
432 | /* Check whether the hardware queue is now empty */ | ||
433 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | ||
434 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | ||
435 | if (tx_queue->read_count == tx_queue->old_write_count) { | ||
436 | smp_mb(); | ||
437 | tx_queue->empty_read_count = | ||
438 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | ||
439 | } | ||
440 | } | ||
432 | } | 441 | } |
433 | 442 | ||
434 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | 443 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
@@ -474,8 +483,10 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | |||
474 | 483 | ||
475 | tx_queue->insert_count = 0; | 484 | tx_queue->insert_count = 0; |
476 | tx_queue->write_count = 0; | 485 | tx_queue->write_count = 0; |
486 | tx_queue->old_write_count = 0; | ||
477 | tx_queue->read_count = 0; | 487 | tx_queue->read_count = 0; |
478 | tx_queue->old_read_count = 0; | 488 | tx_queue->old_read_count = 0; |
489 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; | ||
479 | BUG_ON(tx_queue->stopped); | 490 | BUG_ON(tx_queue->stopped); |
480 | 491 | ||
481 | /* Set up TX descriptor ring */ | 492 | /* Set up TX descriptor ring */ |
@@ -764,7 +775,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
764 | * stopped from the access of read_count. */ | 775 | * stopped from the access of read_count. */ |
765 | smp_mb(); | 776 | smp_mb(); |
766 | tx_queue->old_read_count = | 777 | tx_queue->old_read_count = |
767 | *(volatile unsigned *)&tx_queue->read_count; | 778 | ACCESS_ONCE(tx_queue->read_count); |
768 | fill_level = (tx_queue->insert_count | 779 | fill_level = (tx_queue->insert_count |
769 | - tx_queue->old_read_count); | 780 | - tx_queue->old_read_count); |
770 | q_space = efx->txq_entries - 1 - fill_level; | 781 | q_space = efx->txq_entries - 1 - fill_level; |