aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h41
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c213
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c13
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h2
6 files changed, 279 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 14f465f239d6..b9b9178e174e 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -99,6 +99,44 @@ struct enic_port_profile {
99 u8 mac_addr[ETH_ALEN]; 99 u8 mac_addr[ETH_ALEN];
100}; 100};
101 101
102#ifdef CONFIG_RFS_ACCEL
103/* enic_rfs_fltr_node - rfs filter node in hash table
104 * @@keys: IPv4 5 tuple
105 * @flow_id: flow_id of clsf filter provided by kernel
106 * @fltr_id: filter id of clsf filter returned by adaptor
107 * @rq_id: desired rq index
108 * @node: hlist_node
109 */
110struct enic_rfs_fltr_node {
111 struct flow_keys keys;
112 u32 flow_id;
113 u16 fltr_id;
114 u16 rq_id;
115 struct hlist_node node;
116};
117
118/* enic_rfs_flw_tbl - rfs flow table
119 * @max: Maximum number of filters vNIC supports
120 * @free: Number of free filters available
121 * @toclean: hash table index to clean next
122 * @ht_head: hash table list head
123 * @lock: spin lock
124 * @rfs_may_expire: timer function for enic_rps_may_expire_flow
125 */
126struct enic_rfs_flw_tbl {
127 u16 max;
128 int free;
129
130#define ENIC_RFS_FLW_BITSHIFT (10)
131#define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
132 u16 toclean:ENIC_RFS_FLW_BITSHIFT;
133 struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
134 spinlock_t lock;
135 struct timer_list rfs_may_expire;
136};
137
138#endif /* CONFIG_RFS_ACCEL */
139
102/* Per-instance private data structure */ 140/* Per-instance private data structure */
103struct enic { 141struct enic {
104 struct net_device *netdev; 142 struct net_device *netdev;
@@ -150,6 +188,9 @@ struct enic {
150 /* completion queue cache line section */ 188 /* completion queue cache line section */
151 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; 189 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
152 unsigned int cq_count; 190 unsigned int cq_count;
191#ifdef CONFIG_RFS_ACCEL
192 struct enic_rfs_flw_tbl rfs_h;
193#endif
153}; 194};
154 195
155static inline struct device *enic_get_dev(struct enic *enic) 196static inline struct device *enic_get_dev(struct enic *enic)
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index f6703c4f76a9..7f27a4c7fbfd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -64,3 +64,216 @@ int enic_delfltr(struct enic *enic, u16 filter_id)
64 64
65 return ret; 65 return ret;
66} 66}
67
68#ifdef CONFIG_RFS_ACCEL
69void enic_flow_may_expire(unsigned long data)
70{
71 struct enic *enic = (struct enic *)data;
72 bool res;
73 int j;
74
75 spin_lock(&enic->rfs_h.lock);
76 for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
77 struct hlist_head *hhead;
78 struct hlist_node *tmp;
79 struct enic_rfs_fltr_node *n;
80
81 hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
82 hlist_for_each_entry_safe(n, tmp, hhead, node) {
83 res = rps_may_expire_flow(enic->netdev, n->rq_id,
84 n->flow_id, n->fltr_id);
85 if (res) {
86 res = enic_delfltr(enic, n->fltr_id);
87 if (unlikely(res))
88 continue;
89 hlist_del(&n->node);
90 kfree(n);
91 enic->rfs_h.free++;
92 }
93 }
94 }
95 spin_unlock(&enic->rfs_h.lock);
96 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
97}
98
99/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
100 * @enic: enic data
101 */
102void enic_rfs_flw_tbl_init(struct enic *enic)
103{
104 int i;
105
106 spin_lock_init(&enic->rfs_h.lock);
107 for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
108 INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
109 enic->rfs_h.max = enic->config.num_arfs;
110 enic->rfs_h.free = enic->rfs_h.max;
111 enic->rfs_h.toclean = 0;
112 init_timer(&enic->rfs_h.rfs_may_expire);
113 enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
114 enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
115 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
116}
117
118void enic_rfs_flw_tbl_free(struct enic *enic)
119{
120 int i, res;
121
122 del_timer_sync(&enic->rfs_h.rfs_may_expire);
123 spin_lock(&enic->rfs_h.lock);
124 enic->rfs_h.free = 0;
125 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
126 struct hlist_head *hhead;
127 struct hlist_node *tmp;
128 struct enic_rfs_fltr_node *n;
129
130 hhead = &enic->rfs_h.ht_head[i];
131 hlist_for_each_entry_safe(n, tmp, hhead, node) {
132 enic_delfltr(enic, n->fltr_id);
133 hlist_del(&n->node);
134 kfree(n);
135 }
136 }
137 spin_unlock(&enic->rfs_h.lock);
138}
139
140static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
141 struct flow_keys *k)
142{
143 struct enic_rfs_fltr_node *tpos;
144
145 hlist_for_each_entry(tpos, h, node)
146 if (tpos->keys.src == k->src &&
147 tpos->keys.dst == k->dst &&
148 tpos->keys.ports == k->ports &&
149 tpos->keys.ip_proto == k->ip_proto &&
150 tpos->keys.n_proto == k->n_proto)
151 return tpos;
152 return NULL;
153}
154
155int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
156 u16 rxq_index, u32 flow_id)
157{
158 struct flow_keys keys;
159 struct enic_rfs_fltr_node *n;
160 struct enic *enic;
161 u16 tbl_idx;
162 int res, i;
163
164 enic = netdev_priv(dev);
165 res = skb_flow_dissect(skb, &keys);
166 if (!res || keys.n_proto != htons(ETH_P_IP) ||
167 (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
168 return -EPROTONOSUPPORT;
169
170 tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
171 spin_lock(&enic->rfs_h.lock);
172 n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
173
174 if (n) { /* entry already present */
175 if (rxq_index == n->rq_id) {
176 res = -EEXIST;
177 goto ret_unlock;
178 }
179
180 /* desired rq changed for the flow, we need to delete
181 * old fltr and add new one
182 *
183 * The moment we delete the fltr, the upcoming pkts
184 * are put it default rq based on rss. When we add
185 * new filter, upcoming pkts are put in desired queue.
186 * This could cause ooo pkts.
187 *
188 * Lets 1st try adding new fltr and then del old one.
189 */
190 i = --enic->rfs_h.free;
191 /* clsf tbl is full, we have to del old fltr first*/
192 if (unlikely(i < 0)) {
193 enic->rfs_h.free++;
194 res = enic_delfltr(enic, n->fltr_id);
195 if (unlikely(res < 0))
196 goto ret_unlock;
197 res = enic_addfltr_5t(enic, &keys, rxq_index);
198 if (res < 0) {
199 hlist_del(&n->node);
200 enic->rfs_h.free++;
201 goto ret_unlock;
202 }
203 /* add new fltr 1st then del old fltr */
204 } else {
205 int ret;
206
207 res = enic_addfltr_5t(enic, &keys, rxq_index);
208 if (res < 0) {
209 enic->rfs_h.free++;
210 goto ret_unlock;
211 }
212 ret = enic_delfltr(enic, n->fltr_id);
213 /* deleting old fltr failed. Add old fltr to list.
214 * enic_flow_may_expire() will try to delete it later.
215 */
216 if (unlikely(ret < 0)) {
217 struct enic_rfs_fltr_node *d;
218 struct hlist_head *head;
219
220 head = &enic->rfs_h.ht_head[tbl_idx];
221 d = kmalloc(sizeof(*d), GFP_ATOMIC);
222 if (d) {
223 d->fltr_id = n->fltr_id;
224 INIT_HLIST_NODE(&d->node);
225 hlist_add_head(&d->node, head);
226 }
227 } else {
228 enic->rfs_h.free++;
229 }
230 }
231 n->rq_id = rxq_index;
232 n->fltr_id = res;
233 n->flow_id = flow_id;
234 /* entry not present */
235 } else {
236 i = --enic->rfs_h.free;
237 if (i <= 0) {
238 enic->rfs_h.free++;
239 res = -EBUSY;
240 goto ret_unlock;
241 }
242
243 n = kmalloc(sizeof(*n), GFP_ATOMIC);
244 if (!n) {
245 res = -ENOMEM;
246 enic->rfs_h.free++;
247 goto ret_unlock;
248 }
249
250 res = enic_addfltr_5t(enic, &keys, rxq_index);
251 if (res < 0) {
252 kfree(n);
253 enic->rfs_h.free++;
254 goto ret_unlock;
255 }
256 n->rq_id = rxq_index;
257 n->fltr_id = res;
258 n->flow_id = flow_id;
259 n->keys = keys;
260 INIT_HLIST_NODE(&n->node);
261 hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
262 }
263
264ret_unlock:
265 spin_unlock(&enic->rfs_h.lock);
266 return res;
267}
268
269#else
270
271void enic_rfs_flw_tbl_init(struct enic *enic)
272{
273}
274
275void enic_rfs_flw_tbl_free(struct enic *enic)
276{
277}
278
279#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index b6925b368b77..76a85bb0bb73 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -4,7 +4,16 @@
4#include "vnic_dev.h" 4#include "vnic_dev.h"
5#include "enic.h" 5#include "enic.h"
6 6
7#define ENIC_CLSF_EXPIRE_COUNT 128
8
7int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq); 9int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
8int enic_delfltr(struct enic *enic, u16 filter_id); 10int enic_delfltr(struct enic *enic, u16 filter_id);
9 11
12#ifdef CONFIG_RFS_ACCEL
13void enic_rfs_flw_tbl_init(struct enic *enic);
14void enic_rfs_flw_tbl_free(struct enic *enic);
15int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
16 u16 rxq_index, u32 flow_id);
17#endif /* CONFIG_RFS_ACCEL */
18
10#endif /* _ENIC_CLSF_H_ */ 19#endif /* _ENIC_CLSF_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 151b375337a9..a302f1b3e8ff 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -52,6 +52,7 @@
52#include "enic.h" 52#include "enic.h"
53#include "enic_dev.h" 53#include "enic_dev.h"
54#include "enic_pp.h" 54#include "enic_pp.h"
55#include "enic_clsf.h"
55 56
56#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 57#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
57#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 58#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -1546,6 +1547,7 @@ static int enic_open(struct net_device *netdev)
1546 vnic_intr_unmask(&enic->intr[i]); 1547 vnic_intr_unmask(&enic->intr[i]);
1547 1548
1548 enic_notify_timer_start(enic); 1549 enic_notify_timer_start(enic);
1550 enic_rfs_flw_tbl_init(enic);
1549 1551
1550 return 0; 1552 return 0;
1551 1553
@@ -1572,6 +1574,7 @@ static int enic_stop(struct net_device *netdev)
1572 enic_synchronize_irqs(enic); 1574 enic_synchronize_irqs(enic);
1573 1575
1574 del_timer_sync(&enic->notify_timer); 1576 del_timer_sync(&enic->notify_timer);
1577 enic_rfs_flw_tbl_free(enic);
1575 1578
1576 enic_dev_disable(enic); 1579 enic_dev_disable(enic);
1577 1580
@@ -2064,6 +2067,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2064#ifdef CONFIG_NET_POLL_CONTROLLER 2067#ifdef CONFIG_NET_POLL_CONTROLLER
2065 .ndo_poll_controller = enic_poll_controller, 2068 .ndo_poll_controller = enic_poll_controller,
2066#endif 2069#endif
2070#ifdef CONFIG_RFS_ACCEL
2071 .ndo_rx_flow_steer = enic_rx_flow_steer,
2072#endif
2067}; 2073};
2068 2074
2069static const struct net_device_ops enic_netdev_ops = { 2075static const struct net_device_ops enic_netdev_ops = {
@@ -2084,6 +2090,9 @@ static const struct net_device_ops enic_netdev_ops = {
2084#ifdef CONFIG_NET_POLL_CONTROLLER 2090#ifdef CONFIG_NET_POLL_CONTROLLER
2085 .ndo_poll_controller = enic_poll_controller, 2091 .ndo_poll_controller = enic_poll_controller,
2086#endif 2092#endif
2093#ifdef CONFIG_RFS_ACCEL
2094 .ndo_rx_flow_steer = enic_rx_flow_steer,
2095#endif
2087}; 2096};
2088 2097
2089static void enic_dev_deinit(struct enic *enic) 2098static void enic_dev_deinit(struct enic *enic)
@@ -2429,6 +2438,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2429 2438
2430 netdev->features |= netdev->hw_features; 2439 netdev->features |= netdev->hw_features;
2431 2440
2441#ifdef CONFIG_RFS_ACCEL
2442 netdev->hw_features |= NETIF_F_NTUPLE;
2443#endif
2444
2432 if (using_dac) 2445 if (using_dac)
2433 netdev->features |= NETIF_F_HIGHDMA; 2446 netdev->features |= NETIF_F_HIGHDMA;
2434 2447
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 31d658880c3c..9c96911fb2c8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -71,6 +71,7 @@ int enic_get_vnic_config(struct enic *enic)
71 GET_CONFIG(intr_mode); 71 GET_CONFIG(intr_mode);
72 GET_CONFIG(intr_timer_usec); 72 GET_CONFIG(intr_timer_usec);
73 GET_CONFIG(loop_tag); 73 GET_CONFIG(loop_tag);
74 GET_CONFIG(num_arfs);
74 75
75 c->wq_desc_count = 76 c->wq_desc_count =
76 min_t(u32, ENIC_MAX_WQ_DESCS, 77 min_t(u32, ENIC_MAX_WQ_DESCS,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 609542848e02..75aced2de869 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -32,6 +32,8 @@ struct vnic_enet_config {
32 char devname[16]; 32 char devname[16];
33 u32 intr_timer_usec; 33 u32 intr_timer_usec;
34 u16 loop_tag; 34 u16 loop_tag;
35 u16 vf_rq_count;
36 u16 num_arfs;
35}; 37};
36 38
37#define VENETF_TSO 0x1 /* TSO enabled */ 39#define VENETF_TSO 0x1 /* TSO enabled */