aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-11-21 14:02:22 -0500
committerBen Hutchings <bhutchings@solarflare.com>2013-12-12 17:07:23 -0500
commitb59e6ef87c337541c1c57742116d780813c9601c (patch)
treef5df3bd7f9d509e6d8ca32488a1f4ee1bbc06317
parent7665d1abea22cb44d4f0bac99e77275eba39bbf1 (diff)
sfc: Don't refer to 'stack' in filter implementation
Change all the 'stack' naming to 'auto' (or other meaningful term); the device address list is based on more than just what the network stack wants, and the no-match filters aren't really what the stack wants at all. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/ef10.c104
-rw-r--r--drivers/net/ethernet/sfc/farch.c8
2 files changed, 56 insertions, 56 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e5eeac9063d2..2adc8e45461b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -53,25 +53,25 @@ struct efx_ef10_filter_table {
53 53
54 struct { 54 struct {
55 unsigned long spec; /* pointer to spec plus flag bits */ 55 unsigned long spec; /* pointer to spec plus flag bits */
56/* BUSY flag indicates that an update is in progress. STACK_OLD is 56/* BUSY flag indicates that an update is in progress. AUTO_OLD is
57 * used to mark and sweep stack-owned MAC filters. 57 * used to mark and sweep MAC filters for the device address lists.
58 */ 58 */
59#define EFX_EF10_FILTER_FLAG_BUSY 1UL 59#define EFX_EF10_FILTER_FLAG_BUSY 1UL
60#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL 60#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
61#define EFX_EF10_FILTER_FLAGS 3UL 61#define EFX_EF10_FILTER_FLAGS 3UL
62 u64 handle; /* firmware handle */ 62 u64 handle; /* firmware handle */
63 } *entry; 63 } *entry;
64 wait_queue_head_t waitq; 64 wait_queue_head_t waitq;
65/* Shadow of net_device address lists, guarded by mac_lock */ 65/* Shadow of net_device address lists, guarded by mac_lock */
66#define EFX_EF10_FILTER_STACK_UC_MAX 32 66#define EFX_EF10_FILTER_DEV_UC_MAX 32
67#define EFX_EF10_FILTER_STACK_MC_MAX 256 67#define EFX_EF10_FILTER_DEV_MC_MAX 256
68 struct { 68 struct {
69 u8 addr[ETH_ALEN]; 69 u8 addr[ETH_ALEN];
70 u16 id; 70 u16 id;
71 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], 71 } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
72 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; 72 dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
73 int stack_uc_count; /* negative for PROMISC */ 73 int dev_uc_count; /* negative for PROMISC */
74 int stack_mc_count; /* negative for PROMISC/ALLMULTI */ 74 int dev_mc_count; /* negative for PROMISC/ALLMULTI */
75}; 75};
76 76
77/* An arbitrary search limit for the software hash table */ 77/* An arbitrary search limit for the software hash table */
@@ -2401,7 +2401,7 @@ found:
2401 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 2401 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
2402 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 2402 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2403 table->entry[ins_index].spec &= 2403 table->entry[ins_index].spec &=
2404 ~EFX_EF10_FILTER_FLAG_STACK_OLD; 2404 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2405 rc = ins_index; 2405 rc = ins_index;
2406 goto out_unlock; 2406 goto out_unlock;
2407 } 2407 }
@@ -2514,13 +2514,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2514} 2514}
2515 2515
2516/* Remove a filter. 2516/* Remove a filter.
2517 * If !stack_requested, remove by ID 2517 * If !by_index, remove by ID
2518 * If stack_requested, remove by index 2518 * If by_index, remove by index
2519 * Filter ID may come from userland and must be range-checked. 2519 * Filter ID may come from userland and must be range-checked.
2520 */ 2520 */
2521static int efx_ef10_filter_remove_internal(struct efx_nic *efx, 2521static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2522 enum efx_filter_priority priority, 2522 enum efx_filter_priority priority,
2523 u32 filter_id, bool stack_requested) 2523 u32 filter_id, bool by_index)
2524{ 2524{
2525 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; 2525 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2526 struct efx_ef10_filter_table *table = efx->filter_state; 2526 struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2547,7 +2547,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2547 2547
2548 spec = efx_ef10_filter_entry_spec(table, filter_idx); 2548 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2549 if (!spec || 2549 if (!spec ||
2550 (!stack_requested && 2550 (!by_index &&
2551 efx_ef10_filter_rx_match_pri(table, spec->match_flags) != 2551 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2552 filter_id / HUNT_FILTER_TBL_ROWS)) { 2552 filter_id / HUNT_FILTER_TBL_ROWS)) {
2553 rc = -ENOENT; 2553 rc = -ENOENT;
@@ -2558,7 +2558,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2558 priority == EFX_FILTER_PRI_AUTO) { 2558 priority == EFX_FILTER_PRI_AUTO) {
2559 /* Just remove flags */ 2559 /* Just remove flags */
2560 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 2560 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
2561 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_STACK_OLD; 2561 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2562 rc = 0; 2562 rc = 0;
2563 goto out_unlock; 2563 goto out_unlock;
2564 } 2564 }
@@ -2572,7 +2572,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2572 spin_unlock_bh(&efx->filter_lock); 2572 spin_unlock_bh(&efx->filter_lock);
2573 2573
2574 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 2574 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2575 /* Reset steering of a stack-owned filter */ 2575 /* Reset to an automatic filter */
2576 2576
2577 struct efx_filter_spec new_spec = *spec; 2577 struct efx_filter_spec new_spec = *spec;
2578 2578
@@ -3086,15 +3086,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3086 3086
3087 /* Mark old filters that may need to be removed */ 3087 /* Mark old filters that may need to be removed */
3088 spin_lock_bh(&efx->filter_lock); 3088 spin_lock_bh(&efx->filter_lock);
3089 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; 3089 n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
3090 for (i = 0; i < n; i++) { 3090 for (i = 0; i < n; i++) {
3091 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; 3091 filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
3092 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; 3092 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3093 } 3093 }
3094 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; 3094 n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
3095 for (i = 0; i < n; i++) { 3095 for (i = 0; i < n; i++) {
3096 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; 3096 filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
3097 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; 3097 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3098 } 3098 }
3099 spin_unlock_bh(&efx->filter_lock); 3099 spin_unlock_bh(&efx->filter_lock);
3100 3100
@@ -3103,28 +3103,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3103 */ 3103 */
3104 netif_addr_lock_bh(net_dev); 3104 netif_addr_lock_bh(net_dev);
3105 if (net_dev->flags & IFF_PROMISC || 3105 if (net_dev->flags & IFF_PROMISC ||
3106 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { 3106 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
3107 table->stack_uc_count = -1; 3107 table->dev_uc_count = -1;
3108 } else { 3108 } else {
3109 table->stack_uc_count = 1 + netdev_uc_count(net_dev); 3109 table->dev_uc_count = 1 + netdev_uc_count(net_dev);
3110 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, 3110 memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
3111 ETH_ALEN); 3111 ETH_ALEN);
3112 i = 1; 3112 i = 1;
3113 netdev_for_each_uc_addr(uc, net_dev) { 3113 netdev_for_each_uc_addr(uc, net_dev) {
3114 memcpy(table->stack_uc_list[i].addr, 3114 memcpy(table->dev_uc_list[i].addr,
3115 uc->addr, ETH_ALEN); 3115 uc->addr, ETH_ALEN);
3116 i++; 3116 i++;
3117 } 3117 }
3118 } 3118 }
3119 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || 3119 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
3120 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { 3120 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
3121 table->stack_mc_count = -1; 3121 table->dev_mc_count = -1;
3122 } else { 3122 } else {
3123 table->stack_mc_count = 1 + netdev_mc_count(net_dev); 3123 table->dev_mc_count = 1 + netdev_mc_count(net_dev);
3124 eth_broadcast_addr(table->stack_mc_list[0].addr); 3124 eth_broadcast_addr(table->dev_mc_list[0].addr);
3125 i = 1; 3125 i = 1;
3126 netdev_for_each_mc_addr(mc, net_dev) { 3126 netdev_for_each_mc_addr(mc, net_dev) {
3127 memcpy(table->stack_mc_list[i].addr, 3127 memcpy(table->dev_mc_list[i].addr,
3128 mc->addr, ETH_ALEN); 3128 mc->addr, ETH_ALEN);
3129 i++; 3129 i++;
3130 } 3130 }
@@ -3132,27 +3132,27 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3132 netif_addr_unlock_bh(net_dev); 3132 netif_addr_unlock_bh(net_dev);
3133 3133
3134 /* Insert/renew unicast filters */ 3134 /* Insert/renew unicast filters */
3135 if (table->stack_uc_count >= 0) { 3135 if (table->dev_uc_count >= 0) {
3136 for (i = 0; i < table->stack_uc_count; i++) { 3136 for (i = 0; i < table->dev_uc_count; i++) {
3137 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3137 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3138 EFX_FILTER_FLAG_RX_RSS, 3138 EFX_FILTER_FLAG_RX_RSS,
3139 0); 3139 0);
3140 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3140 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3141 table->stack_uc_list[i].addr); 3141 table->dev_uc_list[i].addr);
3142 rc = efx_ef10_filter_insert(efx, &spec, true); 3142 rc = efx_ef10_filter_insert(efx, &spec, true);
3143 if (rc < 0) { 3143 if (rc < 0) {
3144 /* Fall back to unicast-promisc */ 3144 /* Fall back to unicast-promisc */
3145 while (i--) 3145 while (i--)
3146 efx_ef10_filter_remove_safe( 3146 efx_ef10_filter_remove_safe(
3147 efx, EFX_FILTER_PRI_AUTO, 3147 efx, EFX_FILTER_PRI_AUTO,
3148 table->stack_uc_list[i].id); 3148 table->dev_uc_list[i].id);
3149 table->stack_uc_count = -1; 3149 table->dev_uc_count = -1;
3150 break; 3150 break;
3151 } 3151 }
3152 table->stack_uc_list[i].id = rc; 3152 table->dev_uc_list[i].id = rc;
3153 } 3153 }
3154 } 3154 }
3155 if (table->stack_uc_count < 0) { 3155 if (table->dev_uc_count < 0) {
3156 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3156 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3157 EFX_FILTER_FLAG_RX_RSS, 3157 EFX_FILTER_FLAG_RX_RSS,
3158 0); 3158 0);
@@ -3160,34 +3160,34 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3160 rc = efx_ef10_filter_insert(efx, &spec, true); 3160 rc = efx_ef10_filter_insert(efx, &spec, true);
3161 if (rc < 0) { 3161 if (rc < 0) {
3162 WARN_ON(1); 3162 WARN_ON(1);
3163 table->stack_uc_count = 0; 3163 table->dev_uc_count = 0;
3164 } else { 3164 } else {
3165 table->stack_uc_list[0].id = rc; 3165 table->dev_uc_list[0].id = rc;
3166 } 3166 }
3167 } 3167 }
3168 3168
3169 /* Insert/renew multicast filters */ 3169 /* Insert/renew multicast filters */
3170 if (table->stack_mc_count >= 0) { 3170 if (table->dev_mc_count >= 0) {
3171 for (i = 0; i < table->stack_mc_count; i++) { 3171 for (i = 0; i < table->dev_mc_count; i++) {
3172 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3172 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3173 EFX_FILTER_FLAG_RX_RSS, 3173 EFX_FILTER_FLAG_RX_RSS,
3174 0); 3174 0);
3175 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3175 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3176 table->stack_mc_list[i].addr); 3176 table->dev_mc_list[i].addr);
3177 rc = efx_ef10_filter_insert(efx, &spec, true); 3177 rc = efx_ef10_filter_insert(efx, &spec, true);
3178 if (rc < 0) { 3178 if (rc < 0) {
3179 /* Fall back to multicast-promisc */ 3179 /* Fall back to multicast-promisc */
3180 while (i--) 3180 while (i--)
3181 efx_ef10_filter_remove_safe( 3181 efx_ef10_filter_remove_safe(
3182 efx, EFX_FILTER_PRI_AUTO, 3182 efx, EFX_FILTER_PRI_AUTO,
3183 table->stack_mc_list[i].id); 3183 table->dev_mc_list[i].id);
3184 table->stack_mc_count = -1; 3184 table->dev_mc_count = -1;
3185 break; 3185 break;
3186 } 3186 }
3187 table->stack_mc_list[i].id = rc; 3187 table->dev_mc_list[i].id = rc;
3188 } 3188 }
3189 } 3189 }
3190 if (table->stack_mc_count < 0) { 3190 if (table->dev_mc_count < 0) {
3191 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3191 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3192 EFX_FILTER_FLAG_RX_RSS, 3192 EFX_FILTER_FLAG_RX_RSS,
3193 0); 3193 0);
@@ -3195,20 +3195,20 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3195 rc = efx_ef10_filter_insert(efx, &spec, true); 3195 rc = efx_ef10_filter_insert(efx, &spec, true);
3196 if (rc < 0) { 3196 if (rc < 0) {
3197 WARN_ON(1); 3197 WARN_ON(1);
3198 table->stack_mc_count = 0; 3198 table->dev_mc_count = 0;
3199 } else { 3199 } else {
3200 table->stack_mc_list[0].id = rc; 3200 table->dev_mc_list[0].id = rc;
3201 } 3201 }
3202 } 3202 }
3203 3203
3204 /* Remove filters that weren't renewed. Since nothing else 3204 /* Remove filters that weren't renewed. Since nothing else
3205 * changes the STACK_OLD flag or removes these filters, we 3205 * changes the AUTO_OLD flag or removes these filters, we
3206 * don't need to hold the filter_lock while scanning for 3206 * don't need to hold the filter_lock while scanning for
3207 * these filters. 3207 * these filters.
3208 */ 3208 */
3209 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 3209 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3210 if (ACCESS_ONCE(table->entry[i].spec) & 3210 if (ACCESS_ONCE(table->entry[i].spec) &
3211 EFX_EF10_FILTER_FLAG_STACK_OLD) { 3211 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
3212 if (efx_ef10_filter_remove_internal( 3212 if (efx_ef10_filter_remove_internal(
3213 efx, EFX_FILTER_PRI_AUTO, i, true) < 0) 3213 efx, EFX_FILTER_PRI_AUTO, i, true) < 0)
3214 remove_failed = true; 3214 remove_failed = true;
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index fbd923ddf546..378d6b968c4c 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2184,8 +2184,8 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2184} 2184}
2185 2185
2186static void 2186static void
2187efx_farch_filter_init_rx_for_stack(struct efx_nic *efx, 2187efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2188 struct efx_farch_filter_spec *spec) 2188 struct efx_farch_filter_spec *spec)
2189{ 2189{
2190 /* If there's only one channel then disable RSS for non VF 2190 /* If there's only one channel then disable RSS for non VF
2191 * traffic, thereby allowing VFs to use RSS when the PF can't. 2191 * traffic, thereby allowing VFs to use RSS when the PF can't.
@@ -2547,7 +2547,7 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
2547 return -ENOENT; 2547 return -ENOENT;
2548 2548
2549 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 2549 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2550 efx_farch_filter_init_rx_for_stack(efx, spec); 2550 efx_farch_filter_init_rx_auto(efx, spec);
2551 efx_farch_filter_push_rx_config(efx); 2551 efx_farch_filter_push_rx_config(efx);
2552 } else { 2552 } else {
2553 efx_farch_filter_table_clear_entry(efx, table, filter_idx); 2553 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2815,7 +2815,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
2815 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { 2815 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2816 spec = &table->spec[i]; 2816 spec = &table->spec[i];
2817 spec->type = EFX_FARCH_FILTER_UC_DEF + i; 2817 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2818 efx_farch_filter_init_rx_for_stack(efx, spec); 2818 efx_farch_filter_init_rx_auto(efx, spec);
2819 __set_bit(i, table->used_bitmap); 2819 __set_bit(i, table->used_bitmap);
2820 } 2820 }
2821 } 2821 }