diff options
author | David S. Miller <davem@davemloft.net> | 2014-06-23 17:40:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-23 17:40:29 -0400 |
commit | bb05ea7e59f26ebd7808bb39deb91b7f19f589b7 (patch) | |
tree | c3f002369ac3d8c478efb797561e9069f7c35f98 | |
parent | eef929622b0ce86fcdaf6bd9ab6d7502470728d5 (diff) | |
parent | 4cfe878537cec0e9c0f84b93cc6aa9526f6942b5 (diff) |
Merge branch 'enic'
Govindarajulu Varadarajan says:
====================
enic updates
This series fixes minor bugs and adds new features like Accelerated RFS,
busy_poll, tx clean-up in napi_poll.
v3:
* While doing tx cleanup in napi, ignore budget and clean up all desc possible.
v2:
* Fix #ifdef coding style issue in '[PATCH 4/8] enic: alloc/free rx_cpu_rmap'
And [PATCH 5/8] enic: Add Accelerated RFS support'
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/cisco/enic/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic.h | 43 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_api.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_clsf.c | 279 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_clsf.h | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_dev.c | 80 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_dev.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 245 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_res.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_dev.c | 65 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_dev.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_enet.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_rq.h | 122 | ||||
-rw-r--r-- | include/net/flow_keys.h | 14 | ||||
-rw-r--r-- | include/net/sch_generic.h | 2 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 1 |
17 files changed, 786 insertions, 104 deletions
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index 239e1e46545d..aadcaf7876ce 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile | |||
@@ -2,5 +2,5 @@ obj-$(CONFIG_ENIC) := enic.o | |||
2 | 2 | ||
3 | enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ | 3 | enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ |
4 | enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ | 4 | enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ |
5 | enic_ethtool.o enic_api.o | 5 | enic_ethtool.o enic_api.o enic_clsf.o |
6 | 6 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 14f465f239d6..c8aa9fb81d3c 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h | |||
@@ -99,6 +99,44 @@ struct enic_port_profile { | |||
99 | u8 mac_addr[ETH_ALEN]; | 99 | u8 mac_addr[ETH_ALEN]; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | #ifdef CONFIG_RFS_ACCEL | ||
103 | /* enic_rfs_fltr_node - rfs filter node in hash table | ||
104 | * @@keys: IPv4 5 tuple | ||
105 | * @flow_id: flow_id of clsf filter provided by kernel | ||
106 | * @fltr_id: filter id of clsf filter returned by adaptor | ||
107 | * @rq_id: desired rq index | ||
108 | * @node: hlist_node | ||
109 | */ | ||
110 | struct enic_rfs_fltr_node { | ||
111 | struct flow_keys keys; | ||
112 | u32 flow_id; | ||
113 | u16 fltr_id; | ||
114 | u16 rq_id; | ||
115 | struct hlist_node node; | ||
116 | }; | ||
117 | |||
118 | /* enic_rfs_flw_tbl - rfs flow table | ||
119 | * @max: Maximum number of filters vNIC supports | ||
120 | * @free: Number of free filters available | ||
121 | * @toclean: hash table index to clean next | ||
122 | * @ht_head: hash table list head | ||
123 | * @lock: spin lock | ||
124 | * @rfs_may_expire: timer function for enic_rps_may_expire_flow | ||
125 | */ | ||
126 | struct enic_rfs_flw_tbl { | ||
127 | u16 max; | ||
128 | int free; | ||
129 | |||
130 | #define ENIC_RFS_FLW_BITSHIFT (10) | ||
131 | #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1) | ||
132 | u16 toclean:ENIC_RFS_FLW_BITSHIFT; | ||
133 | struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT]; | ||
134 | spinlock_t lock; | ||
135 | struct timer_list rfs_may_expire; | ||
136 | }; | ||
137 | |||
138 | #endif /* CONFIG_RFS_ACCEL */ | ||
139 | |||
102 | /* Per-instance private data structure */ | 140 | /* Per-instance private data structure */ |
103 | struct enic { | 141 | struct enic { |
104 | struct net_device *netdev; | 142 | struct net_device *netdev; |
@@ -140,7 +178,7 @@ struct enic { | |||
140 | unsigned int rq_count; | 178 | unsigned int rq_count; |
141 | u64 rq_truncated_pkts; | 179 | u64 rq_truncated_pkts; |
142 | u64 rq_bad_fcs; | 180 | u64 rq_bad_fcs; |
143 | struct napi_struct napi[ENIC_RQ_MAX]; | 181 | struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; |
144 | 182 | ||
145 | /* interrupt resource cache line section */ | 183 | /* interrupt resource cache line section */ |
146 | ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; | 184 | ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; |
@@ -150,6 +188,9 @@ struct enic { | |||
150 | /* completion queue cache line section */ | 188 | /* completion queue cache line section */ |
151 | ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; | 189 | ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; |
152 | unsigned int cq_count; | 190 | unsigned int cq_count; |
191 | #ifdef CONFIG_RFS_ACCEL | ||
192 | struct enic_rfs_flw_tbl rfs_h; | ||
193 | #endif | ||
153 | }; | 194 | }; |
154 | 195 | ||
155 | static inline struct device *enic_get_dev(struct enic *enic) | 196 | static inline struct device *enic_get_dev(struct enic *enic) |
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c index e13efbdaa2ed..b161f24522b8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_api.c +++ b/drivers/net/ethernet/cisco/enic/enic_api.c | |||
@@ -34,13 +34,13 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, | |||
34 | struct vnic_dev *vdev = enic->vdev; | 34 | struct vnic_dev *vdev = enic->vdev; |
35 | 35 | ||
36 | spin_lock(&enic->enic_api_lock); | 36 | spin_lock(&enic->enic_api_lock); |
37 | spin_lock(&enic->devcmd_lock); | 37 | spin_lock_bh(&enic->devcmd_lock); |
38 | 38 | ||
39 | vnic_dev_cmd_proxy_by_index_start(vdev, vf); | 39 | vnic_dev_cmd_proxy_by_index_start(vdev, vf); |
40 | err = vnic_dev_cmd(vdev, cmd, a0, a1, wait); | 40 | err = vnic_dev_cmd(vdev, cmd, a0, a1, wait); |
41 | vnic_dev_cmd_proxy_end(vdev); | 41 | vnic_dev_cmd_proxy_end(vdev); |
42 | 42 | ||
43 | spin_unlock(&enic->devcmd_lock); | 43 | spin_unlock_bh(&enic->devcmd_lock); |
44 | spin_unlock(&enic->enic_api_lock); | 44 | spin_unlock(&enic->enic_api_lock); |
45 | 45 | ||
46 | return err; | 46 | return err; |
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c new file mode 100644 index 000000000000..7f27a4c7fbfd --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c | |||
@@ -0,0 +1,279 @@ | |||
1 | #include <linux/if.h> | ||
2 | #include <linux/if_ether.h> | ||
3 | #include <linux/if_link.h> | ||
4 | #include <linux/netdevice.h> | ||
5 | #include <linux/in.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/skbuff.h> | ||
8 | #include <net/flow_keys.h> | ||
9 | #include "enic_res.h" | ||
10 | #include "enic_clsf.h" | ||
11 | |||
12 | /* enic_addfltr_5t - Add ipv4 5tuple filter | ||
13 | * @enic: enic struct of vnic | ||
14 | * @keys: flow_keys of ipv4 5tuple | ||
15 | * @rq: rq number to steer to | ||
16 | * | ||
17 | * This function returns filter_id(hardware_id) of the filter | ||
18 | * added. In case of error it returns an negative number. | ||
19 | */ | ||
20 | int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) | ||
21 | { | ||
22 | int res; | ||
23 | struct filter data; | ||
24 | |||
25 | switch (keys->ip_proto) { | ||
26 | case IPPROTO_TCP: | ||
27 | data.u.ipv4.protocol = PROTO_TCP; | ||
28 | break; | ||
29 | case IPPROTO_UDP: | ||
30 | data.u.ipv4.protocol = PROTO_UDP; | ||
31 | break; | ||
32 | default: | ||
33 | return -EPROTONOSUPPORT; | ||
34 | }; | ||
35 | data.type = FILTER_IPV4_5TUPLE; | ||
36 | data.u.ipv4.src_addr = ntohl(keys->src); | ||
37 | data.u.ipv4.dst_addr = ntohl(keys->dst); | ||
38 | data.u.ipv4.src_port = ntohs(keys->port16[0]); | ||
39 | data.u.ipv4.dst_port = ntohs(keys->port16[1]); | ||
40 | data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; | ||
41 | |||
42 | spin_lock_bh(&enic->devcmd_lock); | ||
43 | res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); | ||
44 | spin_unlock_bh(&enic->devcmd_lock); | ||
45 | res = (res == 0) ? rq : res; | ||
46 | |||
47 | return res; | ||
48 | } | ||
49 | |||
50 | /* enic_delfltr - Delete clsf filter | ||
51 | * @enic: enic struct of vnic | ||
52 | * @filter_id: filter_is(hardware_id) of filter to be deleted | ||
53 | * | ||
54 | * This function returns zero in case of success, negative number incase of | ||
55 | * error. | ||
56 | */ | ||
57 | int enic_delfltr(struct enic *enic, u16 filter_id) | ||
58 | { | ||
59 | int ret; | ||
60 | |||
61 | spin_lock_bh(&enic->devcmd_lock); | ||
62 | ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL); | ||
63 | spin_unlock_bh(&enic->devcmd_lock); | ||
64 | |||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | #ifdef CONFIG_RFS_ACCEL | ||
69 | void enic_flow_may_expire(unsigned long data) | ||
70 | { | ||
71 | struct enic *enic = (struct enic *)data; | ||
72 | bool res; | ||
73 | int j; | ||
74 | |||
75 | spin_lock(&enic->rfs_h.lock); | ||
76 | for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { | ||
77 | struct hlist_head *hhead; | ||
78 | struct hlist_node *tmp; | ||
79 | struct enic_rfs_fltr_node *n; | ||
80 | |||
81 | hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++]; | ||
82 | hlist_for_each_entry_safe(n, tmp, hhead, node) { | ||
83 | res = rps_may_expire_flow(enic->netdev, n->rq_id, | ||
84 | n->flow_id, n->fltr_id); | ||
85 | if (res) { | ||
86 | res = enic_delfltr(enic, n->fltr_id); | ||
87 | if (unlikely(res)) | ||
88 | continue; | ||
89 | hlist_del(&n->node); | ||
90 | kfree(n); | ||
91 | enic->rfs_h.free++; | ||
92 | } | ||
93 | } | ||
94 | } | ||
95 | spin_unlock(&enic->rfs_h.lock); | ||
96 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); | ||
97 | } | ||
98 | |||
99 | /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members | ||
100 | * @enic: enic data | ||
101 | */ | ||
102 | void enic_rfs_flw_tbl_init(struct enic *enic) | ||
103 | { | ||
104 | int i; | ||
105 | |||
106 | spin_lock_init(&enic->rfs_h.lock); | ||
107 | for (i = 0; i <= ENIC_RFS_FLW_MASK; i++) | ||
108 | INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]); | ||
109 | enic->rfs_h.max = enic->config.num_arfs; | ||
110 | enic->rfs_h.free = enic->rfs_h.max; | ||
111 | enic->rfs_h.toclean = 0; | ||
112 | init_timer(&enic->rfs_h.rfs_may_expire); | ||
113 | enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire; | ||
114 | enic->rfs_h.rfs_may_expire.data = (unsigned long)enic; | ||
115 | mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); | ||
116 | } | ||
117 | |||
118 | void enic_rfs_flw_tbl_free(struct enic *enic) | ||
119 | { | ||
120 | int i, res; | ||
121 | |||
122 | del_timer_sync(&enic->rfs_h.rfs_may_expire); | ||
123 | spin_lock(&enic->rfs_h.lock); | ||
124 | enic->rfs_h.free = 0; | ||
125 | for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { | ||
126 | struct hlist_head *hhead; | ||
127 | struct hlist_node *tmp; | ||
128 | struct enic_rfs_fltr_node *n; | ||
129 | |||
130 | hhead = &enic->rfs_h.ht_head[i]; | ||
131 | hlist_for_each_entry_safe(n, tmp, hhead, node) { | ||
132 | enic_delfltr(enic, n->fltr_id); | ||
133 | hlist_del(&n->node); | ||
134 | kfree(n); | ||
135 | } | ||
136 | } | ||
137 | spin_unlock(&enic->rfs_h.lock); | ||
138 | } | ||
139 | |||
140 | static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, | ||
141 | struct flow_keys *k) | ||
142 | { | ||
143 | struct enic_rfs_fltr_node *tpos; | ||
144 | |||
145 | hlist_for_each_entry(tpos, h, node) | ||
146 | if (tpos->keys.src == k->src && | ||
147 | tpos->keys.dst == k->dst && | ||
148 | tpos->keys.ports == k->ports && | ||
149 | tpos->keys.ip_proto == k->ip_proto && | ||
150 | tpos->keys.n_proto == k->n_proto) | ||
151 | return tpos; | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | ||
156 | u16 rxq_index, u32 flow_id) | ||
157 | { | ||
158 | struct flow_keys keys; | ||
159 | struct enic_rfs_fltr_node *n; | ||
160 | struct enic *enic; | ||
161 | u16 tbl_idx; | ||
162 | int res, i; | ||
163 | |||
164 | enic = netdev_priv(dev); | ||
165 | res = skb_flow_dissect(skb, &keys); | ||
166 | if (!res || keys.n_proto != htons(ETH_P_IP) || | ||
167 | (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP)) | ||
168 | return -EPROTONOSUPPORT; | ||
169 | |||
170 | tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; | ||
171 | spin_lock(&enic->rfs_h.lock); | ||
172 | n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); | ||
173 | |||
174 | if (n) { /* entry already present */ | ||
175 | if (rxq_index == n->rq_id) { | ||
176 | res = -EEXIST; | ||
177 | goto ret_unlock; | ||
178 | } | ||
179 | |||
180 | /* desired rq changed for the flow, we need to delete | ||
181 | * old fltr and add new one | ||
182 | * | ||
183 | * The moment we delete the fltr, the upcoming pkts | ||
184 | * are put it default rq based on rss. When we add | ||
185 | * new filter, upcoming pkts are put in desired queue. | ||
186 | * This could cause ooo pkts. | ||
187 | * | ||
188 | * Lets 1st try adding new fltr and then del old one. | ||
189 | */ | ||
190 | i = --enic->rfs_h.free; | ||
191 | /* clsf tbl is full, we have to del old fltr first*/ | ||
192 | if (unlikely(i < 0)) { | ||
193 | enic->rfs_h.free++; | ||
194 | res = enic_delfltr(enic, n->fltr_id); | ||
195 | if (unlikely(res < 0)) | ||
196 | goto ret_unlock; | ||
197 | res = enic_addfltr_5t(enic, &keys, rxq_index); | ||
198 | if (res < 0) { | ||
199 | hlist_del(&n->node); | ||
200 | enic->rfs_h.free++; | ||
201 | goto ret_unlock; | ||
202 | } | ||
203 | /* add new fltr 1st then del old fltr */ | ||
204 | } else { | ||
205 | int ret; | ||
206 | |||
207 | res = enic_addfltr_5t(enic, &keys, rxq_index); | ||
208 | if (res < 0) { | ||
209 | enic->rfs_h.free++; | ||
210 | goto ret_unlock; | ||
211 | } | ||
212 | ret = enic_delfltr(enic, n->fltr_id); | ||
213 | /* deleting old fltr failed. Add old fltr to list. | ||
214 | * enic_flow_may_expire() will try to delete it later. | ||
215 | */ | ||
216 | if (unlikely(ret < 0)) { | ||
217 | struct enic_rfs_fltr_node *d; | ||
218 | struct hlist_head *head; | ||
219 | |||
220 | head = &enic->rfs_h.ht_head[tbl_idx]; | ||
221 | d = kmalloc(sizeof(*d), GFP_ATOMIC); | ||
222 | if (d) { | ||
223 | d->fltr_id = n->fltr_id; | ||
224 | INIT_HLIST_NODE(&d->node); | ||
225 | hlist_add_head(&d->node, head); | ||
226 | } | ||
227 | } else { | ||
228 | enic->rfs_h.free++; | ||
229 | } | ||
230 | } | ||
231 | n->rq_id = rxq_index; | ||
232 | n->fltr_id = res; | ||
233 | n->flow_id = flow_id; | ||
234 | /* entry not present */ | ||
235 | } else { | ||
236 | i = --enic->rfs_h.free; | ||
237 | if (i <= 0) { | ||
238 | enic->rfs_h.free++; | ||
239 | res = -EBUSY; | ||
240 | goto ret_unlock; | ||
241 | } | ||
242 | |||
243 | n = kmalloc(sizeof(*n), GFP_ATOMIC); | ||
244 | if (!n) { | ||
245 | res = -ENOMEM; | ||
246 | enic->rfs_h.free++; | ||
247 | goto ret_unlock; | ||
248 | } | ||
249 | |||
250 | res = enic_addfltr_5t(enic, &keys, rxq_index); | ||
251 | if (res < 0) { | ||
252 | kfree(n); | ||
253 | enic->rfs_h.free++; | ||
254 | goto ret_unlock; | ||
255 | } | ||
256 | n->rq_id = rxq_index; | ||
257 | n->fltr_id = res; | ||
258 | n->flow_id = flow_id; | ||
259 | n->keys = keys; | ||
260 | INIT_HLIST_NODE(&n->node); | ||
261 | hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]); | ||
262 | } | ||
263 | |||
264 | ret_unlock: | ||
265 | spin_unlock(&enic->rfs_h.lock); | ||
266 | return res; | ||
267 | } | ||
268 | |||
269 | #else | ||
270 | |||
271 | void enic_rfs_flw_tbl_init(struct enic *enic) | ||
272 | { | ||
273 | } | ||
274 | |||
275 | void enic_rfs_flw_tbl_free(struct enic *enic) | ||
276 | { | ||
277 | } | ||
278 | |||
279 | #endif /* CONFIG_RFS_ACCEL */ | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h new file mode 100644 index 000000000000..76a85bb0bb73 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ENIC_CLSF_H_ | ||
2 | #define _ENIC_CLSF_H_ | ||
3 | |||
4 | #include "vnic_dev.h" | ||
5 | #include "enic.h" | ||
6 | |||
7 | #define ENIC_CLSF_EXPIRE_COUNT 128 | ||
8 | |||
9 | int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq); | ||
10 | int enic_delfltr(struct enic *enic, u16 filter_id); | ||
11 | |||
12 | #ifdef CONFIG_RFS_ACCEL | ||
13 | void enic_rfs_flw_tbl_init(struct enic *enic); | ||
14 | void enic_rfs_flw_tbl_free(struct enic *enic); | ||
15 | int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | ||
16 | u16 rxq_index, u32 flow_id); | ||
17 | #endif /* CONFIG_RFS_ACCEL */ | ||
18 | |||
19 | #endif /* _ENIC_CLSF_H_ */ | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c index 3e27df522847..87ddc44b590e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.c +++ b/drivers/net/ethernet/cisco/enic/enic_dev.c | |||
@@ -29,9 +29,9 @@ int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info) | |||
29 | { | 29 | { |
30 | int err; | 30 | int err; |
31 | 31 | ||
32 | spin_lock(&enic->devcmd_lock); | 32 | spin_lock_bh(&enic->devcmd_lock); |
33 | err = vnic_dev_fw_info(enic->vdev, fw_info); | 33 | err = vnic_dev_fw_info(enic->vdev, fw_info); |
34 | spin_unlock(&enic->devcmd_lock); | 34 | spin_unlock_bh(&enic->devcmd_lock); |
35 | 35 | ||
36 | return err; | 36 | return err; |
37 | } | 37 | } |
@@ -40,9 +40,9 @@ int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) | |||
40 | { | 40 | { |
41 | int err; | 41 | int err; |
42 | 42 | ||
43 | spin_lock(&enic->devcmd_lock); | 43 | spin_lock_bh(&enic->devcmd_lock); |
44 | err = vnic_dev_stats_dump(enic->vdev, vstats); | 44 | err = vnic_dev_stats_dump(enic->vdev, vstats); |
45 | spin_unlock(&enic->devcmd_lock); | 45 | spin_unlock_bh(&enic->devcmd_lock); |
46 | 46 | ||
47 | return err; | 47 | return err; |
48 | } | 48 | } |
@@ -54,9 +54,9 @@ int enic_dev_add_station_addr(struct enic *enic) | |||
54 | if (!is_valid_ether_addr(enic->netdev->dev_addr)) | 54 | if (!is_valid_ether_addr(enic->netdev->dev_addr)) |
55 | return -EADDRNOTAVAIL; | 55 | return -EADDRNOTAVAIL; |
56 | 56 | ||
57 | spin_lock(&enic->devcmd_lock); | 57 | spin_lock_bh(&enic->devcmd_lock); |
58 | err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); | 58 | err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); |
59 | spin_unlock(&enic->devcmd_lock); | 59 | spin_unlock_bh(&enic->devcmd_lock); |
60 | 60 | ||
61 | return err; | 61 | return err; |
62 | } | 62 | } |
@@ -68,9 +68,9 @@ int enic_dev_del_station_addr(struct enic *enic) | |||
68 | if (!is_valid_ether_addr(enic->netdev->dev_addr)) | 68 | if (!is_valid_ether_addr(enic->netdev->dev_addr)) |
69 | return -EADDRNOTAVAIL; | 69 | return -EADDRNOTAVAIL; |
70 | 70 | ||
71 | spin_lock(&enic->devcmd_lock); | 71 | spin_lock_bh(&enic->devcmd_lock); |
72 | err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); | 72 | err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); |
73 | spin_unlock(&enic->devcmd_lock); | 73 | spin_unlock_bh(&enic->devcmd_lock); |
74 | 74 | ||
75 | return err; | 75 | return err; |
76 | } | 76 | } |
@@ -80,10 +80,10 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, | |||
80 | { | 80 | { |
81 | int err; | 81 | int err; |
82 | 82 | ||
83 | spin_lock(&enic->devcmd_lock); | 83 | spin_lock_bh(&enic->devcmd_lock); |
84 | err = vnic_dev_packet_filter(enic->vdev, directed, | 84 | err = vnic_dev_packet_filter(enic->vdev, directed, |
85 | multicast, broadcast, promisc, allmulti); | 85 | multicast, broadcast, promisc, allmulti); |
86 | spin_unlock(&enic->devcmd_lock); | 86 | spin_unlock_bh(&enic->devcmd_lock); |
87 | 87 | ||
88 | return err; | 88 | return err; |
89 | } | 89 | } |
@@ -92,9 +92,9 @@ int enic_dev_add_addr(struct enic *enic, const u8 *addr) | |||
92 | { | 92 | { |
93 | int err; | 93 | int err; |
94 | 94 | ||
95 | spin_lock(&enic->devcmd_lock); | 95 | spin_lock_bh(&enic->devcmd_lock); |
96 | err = vnic_dev_add_addr(enic->vdev, addr); | 96 | err = vnic_dev_add_addr(enic->vdev, addr); |
97 | spin_unlock(&enic->devcmd_lock); | 97 | spin_unlock_bh(&enic->devcmd_lock); |
98 | 98 | ||
99 | return err; | 99 | return err; |
100 | } | 100 | } |
@@ -103,9 +103,9 @@ int enic_dev_del_addr(struct enic *enic, const u8 *addr) | |||
103 | { | 103 | { |
104 | int err; | 104 | int err; |
105 | 105 | ||
106 | spin_lock(&enic->devcmd_lock); | 106 | spin_lock_bh(&enic->devcmd_lock); |
107 | err = vnic_dev_del_addr(enic->vdev, addr); | 107 | err = vnic_dev_del_addr(enic->vdev, addr); |
108 | spin_unlock(&enic->devcmd_lock); | 108 | spin_unlock_bh(&enic->devcmd_lock); |
109 | 109 | ||
110 | return err; | 110 | return err; |
111 | } | 111 | } |
@@ -114,9 +114,9 @@ int enic_dev_notify_unset(struct enic *enic) | |||
114 | { | 114 | { |
115 | int err; | 115 | int err; |
116 | 116 | ||
117 | spin_lock(&enic->devcmd_lock); | 117 | spin_lock_bh(&enic->devcmd_lock); |
118 | err = vnic_dev_notify_unset(enic->vdev); | 118 | err = vnic_dev_notify_unset(enic->vdev); |
119 | spin_unlock(&enic->devcmd_lock); | 119 | spin_unlock_bh(&enic->devcmd_lock); |
120 | 120 | ||
121 | return err; | 121 | return err; |
122 | } | 122 | } |
@@ -125,9 +125,9 @@ int enic_dev_hang_notify(struct enic *enic) | |||
125 | { | 125 | { |
126 | int err; | 126 | int err; |
127 | 127 | ||
128 | spin_lock(&enic->devcmd_lock); | 128 | spin_lock_bh(&enic->devcmd_lock); |
129 | err = vnic_dev_hang_notify(enic->vdev); | 129 | err = vnic_dev_hang_notify(enic->vdev); |
130 | spin_unlock(&enic->devcmd_lock); | 130 | spin_unlock_bh(&enic->devcmd_lock); |
131 | 131 | ||
132 | return err; | 132 | return err; |
133 | } | 133 | } |
@@ -136,10 +136,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) | |||
136 | { | 136 | { |
137 | int err; | 137 | int err; |
138 | 138 | ||
139 | spin_lock(&enic->devcmd_lock); | 139 | spin_lock_bh(&enic->devcmd_lock); |
140 | err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, | 140 | err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, |
141 | IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); | 141 | IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); |
142 | spin_unlock(&enic->devcmd_lock); | 142 | spin_unlock_bh(&enic->devcmd_lock); |
143 | 143 | ||
144 | return err; | 144 | return err; |
145 | } | 145 | } |
@@ -148,9 +148,9 @@ int enic_dev_enable(struct enic *enic) | |||
148 | { | 148 | { |
149 | int err; | 149 | int err; |
150 | 150 | ||
151 | spin_lock(&enic->devcmd_lock); | 151 | spin_lock_bh(&enic->devcmd_lock); |
152 | err = vnic_dev_enable_wait(enic->vdev); | 152 | err = vnic_dev_enable_wait(enic->vdev); |
153 | spin_unlock(&enic->devcmd_lock); | 153 | spin_unlock_bh(&enic->devcmd_lock); |
154 | 154 | ||
155 | return err; | 155 | return err; |
156 | } | 156 | } |
@@ -159,9 +159,9 @@ int enic_dev_disable(struct enic *enic) | |||
159 | { | 159 | { |
160 | int err; | 160 | int err; |
161 | 161 | ||
162 | spin_lock(&enic->devcmd_lock); | 162 | spin_lock_bh(&enic->devcmd_lock); |
163 | err = vnic_dev_disable(enic->vdev); | 163 | err = vnic_dev_disable(enic->vdev); |
164 | spin_unlock(&enic->devcmd_lock); | 164 | spin_unlock_bh(&enic->devcmd_lock); |
165 | 165 | ||
166 | return err; | 166 | return err; |
167 | } | 167 | } |
@@ -170,9 +170,9 @@ int enic_dev_intr_coal_timer_info(struct enic *enic) | |||
170 | { | 170 | { |
171 | int err; | 171 | int err; |
172 | 172 | ||
173 | spin_lock(&enic->devcmd_lock); | 173 | spin_lock_bh(&enic->devcmd_lock); |
174 | err = vnic_dev_intr_coal_timer_info(enic->vdev); | 174 | err = vnic_dev_intr_coal_timer_info(enic->vdev); |
175 | spin_unlock(&enic->devcmd_lock); | 175 | spin_unlock_bh(&enic->devcmd_lock); |
176 | 176 | ||
177 | return err; | 177 | return err; |
178 | } | 178 | } |
@@ -181,9 +181,9 @@ int enic_vnic_dev_deinit(struct enic *enic) | |||
181 | { | 181 | { |
182 | int err; | 182 | int err; |
183 | 183 | ||
184 | spin_lock(&enic->devcmd_lock); | 184 | spin_lock_bh(&enic->devcmd_lock); |
185 | err = vnic_dev_deinit(enic->vdev); | 185 | err = vnic_dev_deinit(enic->vdev); |
186 | spin_unlock(&enic->devcmd_lock); | 186 | spin_unlock_bh(&enic->devcmd_lock); |
187 | 187 | ||
188 | return err; | 188 | return err; |
189 | } | 189 | } |
@@ -192,10 +192,10 @@ int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp) | |||
192 | { | 192 | { |
193 | int err; | 193 | int err; |
194 | 194 | ||
195 | spin_lock(&enic->devcmd_lock); | 195 | spin_lock_bh(&enic->devcmd_lock); |
196 | err = vnic_dev_init_prov2(enic->vdev, | 196 | err = vnic_dev_init_prov2(enic->vdev, |
197 | (u8 *)vp, vic_provinfo_size(vp)); | 197 | (u8 *)vp, vic_provinfo_size(vp)); |
198 | spin_unlock(&enic->devcmd_lock); | 198 | spin_unlock_bh(&enic->devcmd_lock); |
199 | 199 | ||
200 | return err; | 200 | return err; |
201 | } | 201 | } |
@@ -204,9 +204,9 @@ int enic_dev_deinit_done(struct enic *enic, int *status) | |||
204 | { | 204 | { |
205 | int err; | 205 | int err; |
206 | 206 | ||
207 | spin_lock(&enic->devcmd_lock); | 207 | spin_lock_bh(&enic->devcmd_lock); |
208 | err = vnic_dev_deinit_done(enic->vdev, status); | 208 | err = vnic_dev_deinit_done(enic->vdev, status); |
209 | spin_unlock(&enic->devcmd_lock); | 209 | spin_unlock_bh(&enic->devcmd_lock); |
210 | 210 | ||
211 | return err; | 211 | return err; |
212 | } | 212 | } |
@@ -217,9 +217,9 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
217 | struct enic *enic = netdev_priv(netdev); | 217 | struct enic *enic = netdev_priv(netdev); |
218 | int err; | 218 | int err; |
219 | 219 | ||
220 | spin_lock(&enic->devcmd_lock); | 220 | spin_lock_bh(&enic->devcmd_lock); |
221 | err = enic_add_vlan(enic, vid); | 221 | err = enic_add_vlan(enic, vid); |
222 | spin_unlock(&enic->devcmd_lock); | 222 | spin_unlock_bh(&enic->devcmd_lock); |
223 | 223 | ||
224 | return err; | 224 | return err; |
225 | } | 225 | } |
@@ -230,9 +230,9 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
230 | struct enic *enic = netdev_priv(netdev); | 230 | struct enic *enic = netdev_priv(netdev); |
231 | int err; | 231 | int err; |
232 | 232 | ||
233 | spin_lock(&enic->devcmd_lock); | 233 | spin_lock_bh(&enic->devcmd_lock); |
234 | err = enic_del_vlan(enic, vid); | 234 | err = enic_del_vlan(enic, vid); |
235 | spin_unlock(&enic->devcmd_lock); | 235 | spin_unlock_bh(&enic->devcmd_lock); |
236 | 236 | ||
237 | return err; | 237 | return err; |
238 | } | 238 | } |
@@ -241,9 +241,9 @@ int enic_dev_enable2(struct enic *enic, int active) | |||
241 | { | 241 | { |
242 | int err; | 242 | int err; |
243 | 243 | ||
244 | spin_lock(&enic->devcmd_lock); | 244 | spin_lock_bh(&enic->devcmd_lock); |
245 | err = vnic_dev_enable2(enic->vdev, active); | 245 | err = vnic_dev_enable2(enic->vdev, active); |
246 | spin_unlock(&enic->devcmd_lock); | 246 | spin_unlock_bh(&enic->devcmd_lock); |
247 | 247 | ||
248 | return err; | 248 | return err; |
249 | } | 249 | } |
@@ -252,9 +252,9 @@ int enic_dev_enable2_done(struct enic *enic, int *status) | |||
252 | { | 252 | { |
253 | int err; | 253 | int err; |
254 | 254 | ||
255 | spin_lock(&enic->devcmd_lock); | 255 | spin_lock_bh(&enic->devcmd_lock); |
256 | err = vnic_dev_enable2_done(enic->vdev, status); | 256 | err = vnic_dev_enable2_done(enic->vdev, status); |
257 | spin_unlock(&enic->devcmd_lock); | 257 | spin_unlock_bh(&enic->devcmd_lock); |
258 | 258 | ||
259 | return err; | 259 | return err; |
260 | } | 260 | } |
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h index 36ea1ab25f6a..10bb970b2f35 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h | |||
@@ -28,7 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | #define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \ | 29 | #define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \ |
30 | do { \ | 30 | do { \ |
31 | spin_lock(&enic->devcmd_lock); \ | 31 | spin_lock_bh(&enic->devcmd_lock); \ |
32 | if (enic_is_valid_vf(enic, vf)) { \ | 32 | if (enic_is_valid_vf(enic, vf)) { \ |
33 | vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \ | 33 | vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \ |
34 | err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ | 34 | err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ |
@@ -36,7 +36,7 @@ | |||
36 | } else { \ | 36 | } else { \ |
37 | err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ | 37 | err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ |
38 | } \ | 38 | } \ |
39 | spin_unlock(&enic->devcmd_lock); \ | 39 | spin_unlock_bh(&enic->devcmd_lock); \ |
40 | } while (0) | 40 | } while (0) |
41 | 41 | ||
42 | int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info); | 42 | int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info); |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f32f828b7f3d..9348febc0743 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -39,6 +39,12 @@ | |||
39 | #include <linux/prefetch.h> | 39 | #include <linux/prefetch.h> |
40 | #include <net/ip6_checksum.h> | 40 | #include <net/ip6_checksum.h> |
41 | #include <linux/ktime.h> | 41 | #include <linux/ktime.h> |
42 | #ifdef CONFIG_RFS_ACCEL | ||
43 | #include <linux/cpu_rmap.h> | ||
44 | #endif | ||
45 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
46 | #include <net/busy_poll.h> | ||
47 | #endif | ||
42 | 48 | ||
43 | #include "cq_enet_desc.h" | 49 | #include "cq_enet_desc.h" |
44 | #include "vnic_dev.h" | 50 | #include "vnic_dev.h" |
@@ -49,6 +55,7 @@ | |||
49 | #include "enic.h" | 55 | #include "enic.h" |
50 | #include "enic_dev.h" | 56 | #include "enic_dev.h" |
51 | #include "enic_pp.h" | 57 | #include "enic_pp.h" |
58 | #include "enic_clsf.h" | ||
52 | 59 | ||
53 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | 60 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) |
54 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) | 61 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) |
@@ -309,40 +316,15 @@ static irqreturn_t enic_isr_msi(int irq, void *data) | |||
309 | return IRQ_HANDLED; | 316 | return IRQ_HANDLED; |
310 | } | 317 | } |
311 | 318 | ||
312 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | 319 | static irqreturn_t enic_isr_msix(int irq, void *data) |
313 | { | 320 | { |
314 | struct napi_struct *napi = data; | 321 | struct napi_struct *napi = data; |
315 | 322 | ||
316 | /* schedule NAPI polling for RQ cleanup */ | ||
317 | napi_schedule(napi); | 323 | napi_schedule(napi); |
318 | 324 | ||
319 | return IRQ_HANDLED; | 325 | return IRQ_HANDLED; |
320 | } | 326 | } |
321 | 327 | ||
322 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | ||
323 | { | ||
324 | struct enic *enic = data; | ||
325 | unsigned int cq; | ||
326 | unsigned int intr; | ||
327 | unsigned int wq_work_to_do = -1; /* no limit */ | ||
328 | unsigned int wq_work_done; | ||
329 | unsigned int wq_irq; | ||
330 | |||
331 | wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector; | ||
332 | cq = enic_cq_wq(enic, wq_irq); | ||
333 | intr = enic_msix_wq_intr(enic, wq_irq); | ||
334 | |||
335 | wq_work_done = vnic_cq_service(&enic->cq[cq], | ||
336 | wq_work_to_do, enic_wq_service, NULL); | ||
337 | |||
338 | vnic_intr_return_credits(&enic->intr[intr], | ||
339 | wq_work_done, | ||
340 | 1 /* unmask intr */, | ||
341 | 1 /* reset intr timer */); | ||
342 | |||
343 | return IRQ_HANDLED; | ||
344 | } | ||
345 | |||
346 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | 328 | static irqreturn_t enic_isr_msix_err(int irq, void *data) |
347 | { | 329 | { |
348 | struct enic *enic = data; | 330 | struct enic *enic = data; |
@@ -1049,10 +1031,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1049 | if (vlan_stripped) | 1031 | if (vlan_stripped) |
1050 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); | 1032 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); |
1051 | 1033 | ||
1052 | if (netdev->features & NETIF_F_GRO) | 1034 | skb_mark_napi_id(skb, &enic->napi[rq->index]); |
1053 | napi_gro_receive(&enic->napi[q_number], skb); | 1035 | if (enic_poll_busy_polling(rq) || |
1054 | else | 1036 | !(netdev->features & NETIF_F_GRO)) |
1055 | netif_receive_skb(skb); | 1037 | netif_receive_skb(skb); |
1038 | else | ||
1039 | napi_gro_receive(&enic->napi[q_number], skb); | ||
1056 | if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) | 1040 | if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) |
1057 | enic_intr_update_pkt_size(&cq->pkt_size_counter, | 1041 | enic_intr_update_pkt_size(&cq->pkt_size_counter, |
1058 | bytes_written); | 1042 | bytes_written); |
@@ -1089,16 +1073,22 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
1089 | unsigned int work_done, rq_work_done = 0, wq_work_done; | 1073 | unsigned int work_done, rq_work_done = 0, wq_work_done; |
1090 | int err; | 1074 | int err; |
1091 | 1075 | ||
1092 | /* Service RQ (first) and WQ | 1076 | wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, |
1093 | */ | 1077 | enic_wq_service, NULL); |
1078 | |||
1079 | if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { | ||
1080 | if (wq_work_done > 0) | ||
1081 | vnic_intr_return_credits(&enic->intr[intr], | ||
1082 | wq_work_done, | ||
1083 | 0 /* dont unmask intr */, | ||
1084 | 0 /* dont reset intr timer */); | ||
1085 | return rq_work_done; | ||
1086 | } | ||
1094 | 1087 | ||
1095 | if (budget > 0) | 1088 | if (budget > 0) |
1096 | rq_work_done = vnic_cq_service(&enic->cq[cq_rq], | 1089 | rq_work_done = vnic_cq_service(&enic->cq[cq_rq], |
1097 | rq_work_to_do, enic_rq_service, NULL); | 1090 | rq_work_to_do, enic_rq_service, NULL); |
1098 | 1091 | ||
1099 | wq_work_done = vnic_cq_service(&enic->cq[cq_wq], | ||
1100 | wq_work_to_do, enic_wq_service, NULL); | ||
1101 | |||
1102 | /* Accumulate intr event credits for this polling | 1092 | /* Accumulate intr event credits for this polling |
1103 | * cycle. An intr event is the completion of a | 1093 | * cycle. An intr event is the completion of a |
1104 | * a WQ or RQ packet. | 1094 | * a WQ or RQ packet. |
@@ -1130,6 +1120,7 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
1130 | napi_complete(napi); | 1120 | napi_complete(napi); |
1131 | vnic_intr_unmask(&enic->intr[intr]); | 1121 | vnic_intr_unmask(&enic->intr[intr]); |
1132 | } | 1122 | } |
1123 | enic_poll_unlock_napi(&enic->rq[cq_rq]); | ||
1133 | 1124 | ||
1134 | return rq_work_done; | 1125 | return rq_work_done; |
1135 | } | 1126 | } |
@@ -1192,7 +1183,102 @@ static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) | |||
1192 | pkt_size_counter->small_pkt_bytes_cnt = 0; | 1183 | pkt_size_counter->small_pkt_bytes_cnt = 0; |
1193 | } | 1184 | } |
1194 | 1185 | ||
1195 | static int enic_poll_msix(struct napi_struct *napi, int budget) | 1186 | #ifdef CONFIG_RFS_ACCEL |
1187 | static void enic_free_rx_cpu_rmap(struct enic *enic) | ||
1188 | { | ||
1189 | free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); | ||
1190 | enic->netdev->rx_cpu_rmap = NULL; | ||
1191 | } | ||
1192 | |||
1193 | static void enic_set_rx_cpu_rmap(struct enic *enic) | ||
1194 | { | ||
1195 | int i, res; | ||
1196 | |||
1197 | if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { | ||
1198 | enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); | ||
1199 | if (unlikely(!enic->netdev->rx_cpu_rmap)) | ||
1200 | return; | ||
1201 | for (i = 0; i < enic->rq_count; i++) { | ||
1202 | res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, | ||
1203 | enic->msix_entry[i].vector); | ||
1204 | if (unlikely(res)) { | ||
1205 | enic_free_rx_cpu_rmap(enic); | ||
1206 | return; | ||
1207 | } | ||
1208 | } | ||
1209 | } | ||
1210 | } | ||
1211 | |||
1212 | #else | ||
1213 | |||
1214 | static void enic_free_rx_cpu_rmap(struct enic *enic) | ||
1215 | { | ||
1216 | } | ||
1217 | |||
1218 | static void enic_set_rx_cpu_rmap(struct enic *enic) | ||
1219 | { | ||
1220 | } | ||
1221 | |||
1222 | #endif /* CONFIG_RFS_ACCEL */ | ||
1223 | |||
1224 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
1225 | int enic_busy_poll(struct napi_struct *napi) | ||
1226 | { | ||
1227 | struct net_device *netdev = napi->dev; | ||
1228 | struct enic *enic = netdev_priv(netdev); | ||
1229 | unsigned int rq = (napi - &enic->napi[0]); | ||
1230 | unsigned int cq = enic_cq_rq(enic, rq); | ||
1231 | unsigned int intr = enic_msix_rq_intr(enic, rq); | ||
1232 | unsigned int work_to_do = -1; /* clean all pkts possible */ | ||
1233 | unsigned int work_done; | ||
1234 | |||
1235 | if (!enic_poll_lock_poll(&enic->rq[rq])) | ||
1236 | return LL_FLUSH_BUSY; | ||
1237 | work_done = vnic_cq_service(&enic->cq[cq], work_to_do, | ||
1238 | enic_rq_service, NULL); | ||
1239 | |||
1240 | if (work_done > 0) | ||
1241 | vnic_intr_return_credits(&enic->intr[intr], | ||
1242 | work_done, 0, 0); | ||
1243 | vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); | ||
1244 | if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) | ||
1245 | enic_calc_int_moderation(enic, &enic->rq[rq]); | ||
1246 | enic_poll_unlock_poll(&enic->rq[rq]); | ||
1247 | |||
1248 | return work_done; | ||
1249 | } | ||
1250 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | ||
1251 | |||
1252 | static int enic_poll_msix_wq(struct napi_struct *napi, int budget) | ||
1253 | { | ||
1254 | struct net_device *netdev = napi->dev; | ||
1255 | struct enic *enic = netdev_priv(netdev); | ||
1256 | unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; | ||
1257 | struct vnic_wq *wq = &enic->wq[wq_index]; | ||
1258 | unsigned int cq; | ||
1259 | unsigned int intr; | ||
1260 | unsigned int wq_work_to_do = -1; /* clean all desc possible */ | ||
1261 | unsigned int wq_work_done; | ||
1262 | unsigned int wq_irq; | ||
1263 | |||
1264 | wq_irq = wq->index; | ||
1265 | cq = enic_cq_wq(enic, wq_irq); | ||
1266 | intr = enic_msix_wq_intr(enic, wq_irq); | ||
1267 | wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, | ||
1268 | enic_wq_service, NULL); | ||
1269 | |||
1270 | vnic_intr_return_credits(&enic->intr[intr], wq_work_done, | ||
1271 | 0 /* don't unmask intr */, | ||
1272 | 1 /* reset intr timer */); | ||
1273 | if (!wq_work_done) { | ||
1274 | napi_complete(napi); | ||
1275 | vnic_intr_unmask(&enic->intr[intr]); | ||
1276 | } | ||
1277 | |||
1278 | return 0; | ||
1279 | } | ||
1280 | |||
1281 | static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | ||
1196 | { | 1282 | { |
1197 | struct net_device *netdev = napi->dev; | 1283 | struct net_device *netdev = napi->dev; |
1198 | struct enic *enic = netdev_priv(netdev); | 1284 | struct enic *enic = netdev_priv(netdev); |
@@ -1203,6 +1289,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
1203 | unsigned int work_done = 0; | 1289 | unsigned int work_done = 0; |
1204 | int err; | 1290 | int err; |
1205 | 1291 | ||
1292 | if (!enic_poll_lock_napi(&enic->rq[rq])) | ||
1293 | return work_done; | ||
1206 | /* Service RQ | 1294 | /* Service RQ |
1207 | */ | 1295 | */ |
1208 | 1296 | ||
@@ -1248,6 +1336,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
1248 | enic_set_int_moderation(enic, &enic->rq[rq]); | 1336 | enic_set_int_moderation(enic, &enic->rq[rq]); |
1249 | vnic_intr_unmask(&enic->intr[intr]); | 1337 | vnic_intr_unmask(&enic->intr[intr]); |
1250 | } | 1338 | } |
1339 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
1251 | 1340 | ||
1252 | return work_done; | 1341 | return work_done; |
1253 | } | 1342 | } |
@@ -1267,6 +1356,7 @@ static void enic_free_intr(struct enic *enic) | |||
1267 | struct net_device *netdev = enic->netdev; | 1356 | struct net_device *netdev = enic->netdev; |
1268 | unsigned int i; | 1357 | unsigned int i; |
1269 | 1358 | ||
1359 | enic_free_rx_cpu_rmap(enic); | ||
1270 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1360 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1271 | case VNIC_DEV_INTR_MODE_INTX: | 1361 | case VNIC_DEV_INTR_MODE_INTX: |
1272 | free_irq(enic->pdev->irq, netdev); | 1362 | free_irq(enic->pdev->irq, netdev); |
@@ -1291,6 +1381,7 @@ static int enic_request_intr(struct enic *enic) | |||
1291 | unsigned int i, intr; | 1381 | unsigned int i, intr; |
1292 | int err = 0; | 1382 | int err = 0; |
1293 | 1383 | ||
1384 | enic_set_rx_cpu_rmap(enic); | ||
1294 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1385 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1295 | 1386 | ||
1296 | case VNIC_DEV_INTR_MODE_INTX: | 1387 | case VNIC_DEV_INTR_MODE_INTX: |
@@ -1312,17 +1403,19 @@ static int enic_request_intr(struct enic *enic) | |||
1312 | snprintf(enic->msix[intr].devname, | 1403 | snprintf(enic->msix[intr].devname, |
1313 | sizeof(enic->msix[intr].devname), | 1404 | sizeof(enic->msix[intr].devname), |
1314 | "%.11s-rx-%d", netdev->name, i); | 1405 | "%.11s-rx-%d", netdev->name, i); |
1315 | enic->msix[intr].isr = enic_isr_msix_rq; | 1406 | enic->msix[intr].isr = enic_isr_msix; |
1316 | enic->msix[intr].devid = &enic->napi[i]; | 1407 | enic->msix[intr].devid = &enic->napi[i]; |
1317 | } | 1408 | } |
1318 | 1409 | ||
1319 | for (i = 0; i < enic->wq_count; i++) { | 1410 | for (i = 0; i < enic->wq_count; i++) { |
1411 | int wq = enic_cq_wq(enic, i); | ||
1412 | |||
1320 | intr = enic_msix_wq_intr(enic, i); | 1413 | intr = enic_msix_wq_intr(enic, i); |
1321 | snprintf(enic->msix[intr].devname, | 1414 | snprintf(enic->msix[intr].devname, |
1322 | sizeof(enic->msix[intr].devname), | 1415 | sizeof(enic->msix[intr].devname), |
1323 | "%.11s-tx-%d", netdev->name, i); | 1416 | "%.11s-tx-%d", netdev->name, i); |
1324 | enic->msix[intr].isr = enic_isr_msix_wq; | 1417 | enic->msix[intr].isr = enic_isr_msix; |
1325 | enic->msix[intr].devid = enic; | 1418 | enic->msix[intr].devid = &enic->napi[wq]; |
1326 | } | 1419 | } |
1327 | 1420 | ||
1328 | intr = enic_msix_err_intr(enic); | 1421 | intr = enic_msix_err_intr(enic); |
@@ -1421,7 +1514,7 @@ static int enic_dev_notify_set(struct enic *enic) | |||
1421 | { | 1514 | { |
1422 | int err; | 1515 | int err; |
1423 | 1516 | ||
1424 | spin_lock(&enic->devcmd_lock); | 1517 | spin_lock_bh(&enic->devcmd_lock); |
1425 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1518 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1426 | case VNIC_DEV_INTR_MODE_INTX: | 1519 | case VNIC_DEV_INTR_MODE_INTX: |
1427 | err = vnic_dev_notify_set(enic->vdev, | 1520 | err = vnic_dev_notify_set(enic->vdev, |
@@ -1435,7 +1528,7 @@ static int enic_dev_notify_set(struct enic *enic) | |||
1435 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | 1528 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); |
1436 | break; | 1529 | break; |
1437 | } | 1530 | } |
1438 | spin_unlock(&enic->devcmd_lock); | 1531 | spin_unlock_bh(&enic->devcmd_lock); |
1439 | 1532 | ||
1440 | return err; | 1533 | return err; |
1441 | } | 1534 | } |
@@ -1494,15 +1587,20 @@ static int enic_open(struct net_device *netdev) | |||
1494 | 1587 | ||
1495 | netif_tx_wake_all_queues(netdev); | 1588 | netif_tx_wake_all_queues(netdev); |
1496 | 1589 | ||
1497 | for (i = 0; i < enic->rq_count; i++) | 1590 | for (i = 0; i < enic->rq_count; i++) { |
1591 | enic_busy_poll_init_lock(&enic->rq[i]); | ||
1498 | napi_enable(&enic->napi[i]); | 1592 | napi_enable(&enic->napi[i]); |
1499 | 1593 | } | |
1594 | if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) | ||
1595 | for (i = 0; i < enic->wq_count; i++) | ||
1596 | napi_enable(&enic->napi[enic_cq_wq(enic, i)]); | ||
1500 | enic_dev_enable(enic); | 1597 | enic_dev_enable(enic); |
1501 | 1598 | ||
1502 | for (i = 0; i < enic->intr_count; i++) | 1599 | for (i = 0; i < enic->intr_count; i++) |
1503 | vnic_intr_unmask(&enic->intr[i]); | 1600 | vnic_intr_unmask(&enic->intr[i]); |
1504 | 1601 | ||
1505 | enic_notify_timer_start(enic); | 1602 | enic_notify_timer_start(enic); |
1603 | enic_rfs_flw_tbl_init(enic); | ||
1506 | 1604 | ||
1507 | return 0; | 1605 | return 0; |
1508 | 1606 | ||
@@ -1529,14 +1627,23 @@ static int enic_stop(struct net_device *netdev) | |||
1529 | enic_synchronize_irqs(enic); | 1627 | enic_synchronize_irqs(enic); |
1530 | 1628 | ||
1531 | del_timer_sync(&enic->notify_timer); | 1629 | del_timer_sync(&enic->notify_timer); |
1630 | enic_rfs_flw_tbl_free(enic); | ||
1532 | 1631 | ||
1533 | enic_dev_disable(enic); | 1632 | enic_dev_disable(enic); |
1534 | 1633 | ||
1535 | for (i = 0; i < enic->rq_count; i++) | 1634 | local_bh_disable(); |
1635 | for (i = 0; i < enic->rq_count; i++) { | ||
1536 | napi_disable(&enic->napi[i]); | 1636 | napi_disable(&enic->napi[i]); |
1637 | while (!enic_poll_lock_napi(&enic->rq[i])) | ||
1638 | mdelay(1); | ||
1639 | } | ||
1640 | local_bh_enable(); | ||
1537 | 1641 | ||
1538 | netif_carrier_off(netdev); | 1642 | netif_carrier_off(netdev); |
1539 | netif_tx_disable(netdev); | 1643 | netif_tx_disable(netdev); |
1644 | if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) | ||
1645 | for (i = 0; i < enic->wq_count; i++) | ||
1646 | napi_disable(&enic->napi[enic_cq_wq(enic, i)]); | ||
1540 | 1647 | ||
1541 | if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) | 1648 | if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) |
1542 | enic_dev_del_station_addr(enic); | 1649 | enic_dev_del_station_addr(enic); |
@@ -1656,13 +1763,14 @@ static void enic_poll_controller(struct net_device *netdev) | |||
1656 | case VNIC_DEV_INTR_MODE_MSIX: | 1763 | case VNIC_DEV_INTR_MODE_MSIX: |
1657 | for (i = 0; i < enic->rq_count; i++) { | 1764 | for (i = 0; i < enic->rq_count; i++) { |
1658 | intr = enic_msix_rq_intr(enic, i); | 1765 | intr = enic_msix_rq_intr(enic, i); |
1659 | enic_isr_msix_rq(enic->msix_entry[intr].vector, | 1766 | enic_isr_msix(enic->msix_entry[intr].vector, |
1660 | &enic->napi[i]); | 1767 | &enic->napi[i]); |
1661 | } | 1768 | } |
1662 | 1769 | ||
1663 | for (i = 0; i < enic->wq_count; i++) { | 1770 | for (i = 0; i < enic->wq_count; i++) { |
1664 | intr = enic_msix_wq_intr(enic, i); | 1771 | intr = enic_msix_wq_intr(enic, i); |
1665 | enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); | 1772 | enic_isr_msix(enic->msix_entry[intr].vector, |
1773 | &enic->napi[enic_cq_wq(enic, i)]); | ||
1666 | } | 1774 | } |
1667 | 1775 | ||
1668 | break; | 1776 | break; |
@@ -1758,11 +1866,11 @@ static int enic_set_rsskey(struct enic *enic) | |||
1758 | 1866 | ||
1759 | memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); | 1867 | memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); |
1760 | 1868 | ||
1761 | spin_lock(&enic->devcmd_lock); | 1869 | spin_lock_bh(&enic->devcmd_lock); |
1762 | err = enic_set_rss_key(enic, | 1870 | err = enic_set_rss_key(enic, |
1763 | rss_key_buf_pa, | 1871 | rss_key_buf_pa, |
1764 | sizeof(union vnic_rss_key)); | 1872 | sizeof(union vnic_rss_key)); |
1765 | spin_unlock(&enic->devcmd_lock); | 1873 | spin_unlock_bh(&enic->devcmd_lock); |
1766 | 1874 | ||
1767 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), | 1875 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), |
1768 | rss_key_buf_va, rss_key_buf_pa); | 1876 | rss_key_buf_va, rss_key_buf_pa); |
@@ -1785,11 +1893,11 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) | |||
1785 | for (i = 0; i < (1 << rss_hash_bits); i++) | 1893 | for (i = 0; i < (1 << rss_hash_bits); i++) |
1786 | (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; | 1894 | (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; |
1787 | 1895 | ||
1788 | spin_lock(&enic->devcmd_lock); | 1896 | spin_lock_bh(&enic->devcmd_lock); |
1789 | err = enic_set_rss_cpu(enic, | 1897 | err = enic_set_rss_cpu(enic, |
1790 | rss_cpu_buf_pa, | 1898 | rss_cpu_buf_pa, |
1791 | sizeof(union vnic_rss_cpu)); | 1899 | sizeof(union vnic_rss_cpu)); |
1792 | spin_unlock(&enic->devcmd_lock); | 1900 | spin_unlock_bh(&enic->devcmd_lock); |
1793 | 1901 | ||
1794 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), | 1902 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), |
1795 | rss_cpu_buf_va, rss_cpu_buf_pa); | 1903 | rss_cpu_buf_va, rss_cpu_buf_pa); |
@@ -1807,13 +1915,13 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, | |||
1807 | /* Enable VLAN tag stripping. | 1915 | /* Enable VLAN tag stripping. |
1808 | */ | 1916 | */ |
1809 | 1917 | ||
1810 | spin_lock(&enic->devcmd_lock); | 1918 | spin_lock_bh(&enic->devcmd_lock); |
1811 | err = enic_set_nic_cfg(enic, | 1919 | err = enic_set_nic_cfg(enic, |
1812 | rss_default_cpu, rss_hash_type, | 1920 | rss_default_cpu, rss_hash_type, |
1813 | rss_hash_bits, rss_base_cpu, | 1921 | rss_hash_bits, rss_base_cpu, |
1814 | rss_enable, tso_ipid_split_en, | 1922 | rss_enable, tso_ipid_split_en, |
1815 | ig_vlan_strip_en); | 1923 | ig_vlan_strip_en); |
1816 | spin_unlock(&enic->devcmd_lock); | 1924 | spin_unlock_bh(&enic->devcmd_lock); |
1817 | 1925 | ||
1818 | return err; | 1926 | return err; |
1819 | } | 1927 | } |
@@ -2021,6 +2129,12 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { | |||
2021 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2129 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2022 | .ndo_poll_controller = enic_poll_controller, | 2130 | .ndo_poll_controller = enic_poll_controller, |
2023 | #endif | 2131 | #endif |
2132 | #ifdef CONFIG_RFS_ACCEL | ||
2133 | .ndo_rx_flow_steer = enic_rx_flow_steer, | ||
2134 | #endif | ||
2135 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
2136 | .ndo_busy_poll = enic_busy_poll, | ||
2137 | #endif | ||
2024 | }; | 2138 | }; |
2025 | 2139 | ||
2026 | static const struct net_device_ops enic_netdev_ops = { | 2140 | static const struct net_device_ops enic_netdev_ops = { |
@@ -2041,14 +2155,25 @@ static const struct net_device_ops enic_netdev_ops = { | |||
2041 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2155 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2042 | .ndo_poll_controller = enic_poll_controller, | 2156 | .ndo_poll_controller = enic_poll_controller, |
2043 | #endif | 2157 | #endif |
2158 | #ifdef CONFIG_RFS_ACCEL | ||
2159 | .ndo_rx_flow_steer = enic_rx_flow_steer, | ||
2160 | #endif | ||
2161 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
2162 | .ndo_busy_poll = enic_busy_poll, | ||
2163 | #endif | ||
2044 | }; | 2164 | }; |
2045 | 2165 | ||
2046 | static void enic_dev_deinit(struct enic *enic) | 2166 | static void enic_dev_deinit(struct enic *enic) |
2047 | { | 2167 | { |
2048 | unsigned int i; | 2168 | unsigned int i; |
2049 | 2169 | ||
2050 | for (i = 0; i < enic->rq_count; i++) | 2170 | for (i = 0; i < enic->rq_count; i++) { |
2171 | napi_hash_del(&enic->napi[i]); | ||
2051 | netif_napi_del(&enic->napi[i]); | 2172 | netif_napi_del(&enic->napi[i]); |
2173 | } | ||
2174 | if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) | ||
2175 | for (i = 0; i < enic->wq_count; i++) | ||
2176 | netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); | ||
2052 | 2177 | ||
2053 | enic_free_vnic_resources(enic); | 2178 | enic_free_vnic_resources(enic); |
2054 | enic_clear_intr_mode(enic); | 2179 | enic_clear_intr_mode(enic); |
@@ -2114,11 +2239,17 @@ static int enic_dev_init(struct enic *enic) | |||
2114 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 2239 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
2115 | default: | 2240 | default: |
2116 | netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); | 2241 | netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); |
2242 | napi_hash_add(&enic->napi[0]); | ||
2117 | break; | 2243 | break; |
2118 | case VNIC_DEV_INTR_MODE_MSIX: | 2244 | case VNIC_DEV_INTR_MODE_MSIX: |
2119 | for (i = 0; i < enic->rq_count; i++) | 2245 | for (i = 0; i < enic->rq_count; i++) { |
2120 | netif_napi_add(netdev, &enic->napi[i], | 2246 | netif_napi_add(netdev, &enic->napi[i], |
2121 | enic_poll_msix, 64); | 2247 | enic_poll_msix_rq, NAPI_POLL_WEIGHT); |
2248 | napi_hash_add(&enic->napi[i]); | ||
2249 | } | ||
2250 | for (i = 0; i < enic->wq_count; i++) | ||
2251 | netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], | ||
2252 | enic_poll_msix_wq, NAPI_POLL_WEIGHT); | ||
2122 | break; | 2253 | break; |
2123 | } | 2254 | } |
2124 | 2255 | ||
@@ -2386,6 +2517,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2386 | 2517 | ||
2387 | netdev->features |= netdev->hw_features; | 2518 | netdev->features |= netdev->hw_features; |
2388 | 2519 | ||
2520 | #ifdef CONFIG_RFS_ACCEL | ||
2521 | netdev->hw_features |= NETIF_F_NTUPLE; | ||
2522 | #endif | ||
2523 | |||
2389 | if (using_dac) | 2524 | if (using_dac) |
2390 | netdev->features |= NETIF_F_HIGHDMA; | 2525 | netdev->features |= NETIF_F_HIGHDMA; |
2391 | 2526 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 31d658880c3c..9c96911fb2c8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c | |||
@@ -71,6 +71,7 @@ int enic_get_vnic_config(struct enic *enic) | |||
71 | GET_CONFIG(intr_mode); | 71 | GET_CONFIG(intr_mode); |
72 | GET_CONFIG(intr_timer_usec); | 72 | GET_CONFIG(intr_timer_usec); |
73 | GET_CONFIG(loop_tag); | 73 | GET_CONFIG(loop_tag); |
74 | GET_CONFIG(num_arfs); | ||
74 | 75 | ||
75 | c->wq_desc_count = | 76 | c->wq_desc_count = |
76 | min_t(u32, ENIC_MAX_WQ_DESCS, | 77 | min_t(u32, ENIC_MAX_WQ_DESCS, |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index e86a45cb9e68..5abc496bcf29 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c | |||
@@ -312,12 +312,12 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
312 | err = (int)readq(&devcmd->args[0]); | 312 | err = (int)readq(&devcmd->args[0]); |
313 | if (err == ERR_EINVAL && | 313 | if (err == ERR_EINVAL && |
314 | cmd == CMD_CAPABILITY) | 314 | cmd == CMD_CAPABILITY) |
315 | return err; | 315 | return -err; |
316 | if (err != ERR_ECMDUNKNOWN || | 316 | if (err != ERR_ECMDUNKNOWN || |
317 | cmd != CMD_CAPABILITY) | 317 | cmd != CMD_CAPABILITY) |
318 | pr_err("Error %d devcmd %d\n", | 318 | pr_err("Error %d devcmd %d\n", |
319 | err, _CMD_N(cmd)); | 319 | err, _CMD_N(cmd)); |
320 | return err; | 320 | return -err; |
321 | } | 321 | } |
322 | 322 | ||
323 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { | 323 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { |
@@ -1048,3 +1048,64 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) | |||
1048 | 1048 | ||
1049 | return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); | 1049 | return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); |
1050 | } | 1050 | } |
1051 | |||
1052 | /* vnic_dev_classifier: Add/Delete classifier entries | ||
1053 | * @vdev: vdev of the device | ||
1054 | * @cmd: CLSF_ADD for Add filter | ||
1055 | * CLSF_DEL for Delete filter | ||
1056 | * @entry: In case of ADD filter, the caller passes the RQ number in this | ||
1057 | * variable. | ||
1058 | * | ||
1059 | * This function stores the filter_id returned by the firmware in the | ||
1060 | * same variable before return; | ||
1061 | * | ||
1062 | * In case of DEL filter, the caller passes the RQ number. Return | ||
1063 | * value is irrelevant. | ||
1064 | * @data: filter data | ||
1065 | */ | ||
1066 | int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, | ||
1067 | struct filter *data) | ||
1068 | { | ||
1069 | u64 a0, a1; | ||
1070 | int wait = 1000; | ||
1071 | dma_addr_t tlv_pa; | ||
1072 | int ret = -EINVAL; | ||
1073 | struct filter_tlv *tlv, *tlv_va; | ||
1074 | struct filter_action *action; | ||
1075 | u64 tlv_size; | ||
1076 | |||
1077 | if (cmd == CLSF_ADD) { | ||
1078 | tlv_size = sizeof(struct filter) + | ||
1079 | sizeof(struct filter_action) + | ||
1080 | 2 * sizeof(struct filter_tlv); | ||
1081 | tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa); | ||
1082 | if (!tlv_va) | ||
1083 | return -ENOMEM; | ||
1084 | tlv = tlv_va; | ||
1085 | a0 = tlv_pa; | ||
1086 | a1 = tlv_size; | ||
1087 | memset(tlv, 0, tlv_size); | ||
1088 | tlv->type = CLSF_TLV_FILTER; | ||
1089 | tlv->length = sizeof(struct filter); | ||
1090 | *(struct filter *)&tlv->val = *data; | ||
1091 | |||
1092 | tlv = (struct filter_tlv *)((char *)tlv + | ||
1093 | sizeof(struct filter_tlv) + | ||
1094 | sizeof(struct filter)); | ||
1095 | |||
1096 | tlv->type = CLSF_TLV_ACTION; | ||
1097 | tlv->length = sizeof(struct filter_action); | ||
1098 | action = (struct filter_action *)&tlv->val; | ||
1099 | action->type = FILTER_ACTION_RQ_STEERING; | ||
1100 | action->u.rq_idx = *entry; | ||
1101 | |||
1102 | ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); | ||
1103 | *entry = (u16)a0; | ||
1104 | pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); | ||
1105 | } else if (cmd == CLSF_DEL) { | ||
1106 | a0 = *entry; | ||
1107 | ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); | ||
1108 | } | ||
1109 | |||
1110 | return ret; | ||
1111 | } | ||
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 1f3b301f8225..1fb214efceba 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h | |||
@@ -133,5 +133,7 @@ int vnic_dev_enable2(struct vnic_dev *vdev, int active); | |||
133 | int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); | 133 | int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); |
134 | int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); | 134 | int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); |
135 | int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); | 135 | int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); |
136 | int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, | ||
137 | struct filter *data); | ||
136 | 138 | ||
137 | #endif /* _VNIC_DEV_H_ */ | 139 | #endif /* _VNIC_DEV_H_ */ |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index b9a0d78fd639..435d0cd96c22 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h | |||
@@ -603,6 +603,11 @@ struct filter_tlv { | |||
603 | u_int32_t val[0]; | 603 | u_int32_t val[0]; |
604 | }; | 604 | }; |
605 | 605 | ||
606 | enum { | ||
607 | CLSF_ADD = 0, | ||
608 | CLSF_DEL = 1, | ||
609 | }; | ||
610 | |||
606 | /* | 611 | /* |
607 | * Writing cmd register causes STAT_BUSY to get set in status register. | 612 | * Writing cmd register causes STAT_BUSY to get set in status register. |
608 | * When cmd completes, STAT_BUSY will be cleared. | 613 | * When cmd completes, STAT_BUSY will be cleared. |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h index 609542848e02..75aced2de869 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_enet.h +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h | |||
@@ -32,6 +32,8 @@ struct vnic_enet_config { | |||
32 | char devname[16]; | 32 | char devname[16]; |
33 | u32 intr_timer_usec; | 33 | u32 intr_timer_usec; |
34 | u16 loop_tag; | 34 | u16 loop_tag; |
35 | u16 vf_rq_count; | ||
36 | u16 num_arfs; | ||
35 | }; | 37 | }; |
36 | 38 | ||
37 | #define VENETF_TSO 0x1 /* TSO enabled */ | 39 | #define VENETF_TSO 0x1 /* TSO enabled */ |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index ee7bc95af278..8111d5202df2 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h | |||
@@ -85,6 +85,21 @@ struct vnic_rq { | |||
85 | struct vnic_rq_buf *to_clean; | 85 | struct vnic_rq_buf *to_clean; |
86 | void *os_buf_head; | 86 | void *os_buf_head; |
87 | unsigned int pkts_outstanding; | 87 | unsigned int pkts_outstanding; |
88 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
89 | #define ENIC_POLL_STATE_IDLE 0 | ||
90 | #define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ | ||
91 | #define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ | ||
92 | #define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ | ||
93 | #define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ | ||
94 | #define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ | ||
95 | ENIC_POLL_STATE_POLL_YIELD) | ||
96 | #define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ | ||
97 | ENIC_POLL_STATE_POLL) | ||
98 | #define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ | ||
99 | ENIC_POLL_STATE_POLL_YIELD) | ||
100 | unsigned int bpoll_state; | ||
101 | spinlock_t bpoll_lock; | ||
102 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | ||
88 | }; | 103 | }; |
89 | 104 | ||
90 | static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) | 105 | static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) |
@@ -197,6 +212,113 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, | |||
197 | return 0; | 212 | return 0; |
198 | } | 213 | } |
199 | 214 | ||
215 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
216 | static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) | ||
217 | { | ||
218 | spin_lock_init(&rq->bpoll_lock); | ||
219 | rq->bpoll_state = ENIC_POLL_STATE_IDLE; | ||
220 | } | ||
221 | |||
222 | static inline bool enic_poll_lock_napi(struct vnic_rq *rq) | ||
223 | { | ||
224 | bool rc = true; | ||
225 | |||
226 | spin_lock(&rq->bpoll_lock); | ||
227 | if (rq->bpoll_state & ENIC_POLL_LOCKED) { | ||
228 | WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); | ||
229 | rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; | ||
230 | rc = false; | ||
231 | } else { | ||
232 | rq->bpoll_state = ENIC_POLL_STATE_NAPI; | ||
233 | } | ||
234 | spin_unlock(&rq->bpoll_lock); | ||
235 | |||
236 | return rc; | ||
237 | } | ||
238 | |||
239 | static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) | ||
240 | { | ||
241 | bool rc = false; | ||
242 | |||
243 | spin_lock(&rq->bpoll_lock); | ||
244 | WARN_ON(rq->bpoll_state & | ||
245 | (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); | ||
246 | if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) | ||
247 | rc = true; | ||
248 | rq->bpoll_state = ENIC_POLL_STATE_IDLE; | ||
249 | spin_unlock(&rq->bpoll_lock); | ||
250 | |||
251 | return rc; | ||
252 | } | ||
253 | |||
254 | static inline bool enic_poll_lock_poll(struct vnic_rq *rq) | ||
255 | { | ||
256 | bool rc = true; | ||
257 | |||
258 | spin_lock_bh(&rq->bpoll_lock); | ||
259 | if (rq->bpoll_state & ENIC_POLL_LOCKED) { | ||
260 | rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; | ||
261 | rc = false; | ||
262 | } else { | ||
263 | rq->bpoll_state |= ENIC_POLL_STATE_POLL; | ||
264 | } | ||
265 | spin_unlock_bh(&rq->bpoll_lock); | ||
266 | |||
267 | return rc; | ||
268 | } | ||
269 | |||
270 | static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) | ||
271 | { | ||
272 | bool rc = false; | ||
273 | |||
274 | spin_lock_bh(&rq->bpoll_lock); | ||
275 | WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); | ||
276 | if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) | ||
277 | rc = true; | ||
278 | rq->bpoll_state = ENIC_POLL_STATE_IDLE; | ||
279 | spin_unlock_bh(&rq->bpoll_lock); | ||
280 | |||
281 | return rc; | ||
282 | } | ||
283 | |||
284 | static inline bool enic_poll_busy_polling(struct vnic_rq *rq) | ||
285 | { | ||
286 | WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); | ||
287 | return rq->bpoll_state & ENIC_POLL_USER_PEND; | ||
288 | } | ||
289 | |||
290 | #else | ||
291 | |||
292 | static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) | ||
293 | { | ||
294 | } | ||
295 | |||
296 | static inline bool enic_poll_lock_napi(struct vnic_rq *rq) | ||
297 | { | ||
298 | return true; | ||
299 | } | ||
300 | |||
301 | static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) | ||
302 | { | ||
303 | return false; | ||
304 | } | ||
305 | |||
306 | static inline bool enic_poll_lock_poll(struct vnic_rq *rq) | ||
307 | { | ||
308 | return false; | ||
309 | } | ||
310 | |||
311 | static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) | ||
312 | { | ||
313 | return false; | ||
314 | } | ||
315 | |||
316 | static inline bool enic_poll_ll_polling(struct vnic_rq *rq) | ||
317 | { | ||
318 | return false; | ||
319 | } | ||
320 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | ||
321 | |||
200 | void vnic_rq_free(struct vnic_rq *rq); | 322 | void vnic_rq_free(struct vnic_rq *rq); |
201 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, | 323 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, |
202 | unsigned int desc_count, unsigned int desc_size); | 324 | unsigned int desc_count, unsigned int desc_size); |
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 7e64bd8bbda9..fbefdca5e283 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h | |||
@@ -1,6 +1,19 @@ | |||
1 | #ifndef _NET_FLOW_KEYS_H | 1 | #ifndef _NET_FLOW_KEYS_H |
2 | #define _NET_FLOW_KEYS_H | 2 | #define _NET_FLOW_KEYS_H |
3 | 3 | ||
4 | /* struct flow_keys: | ||
5 | * @src: source ip address in case of IPv4 | ||
6 | * For IPv6 it contains 32bit hash of src address | ||
7 | * @dst: destination ip address in case of IPv4 | ||
8 | * For IPv6 it contains 32bit hash of dst address | ||
9 | * @ports: port numbers of Transport header | ||
10 | * port16[0]: src port number | ||
11 | * port16[1]: dst port number | ||
12 | * @thoff: Transport header offset | ||
13 | * @n_proto: Network header protocol (eg. IPv4/IPv6) | ||
14 | * @ip_proto: Transport header protocol (eg. TCP/UDP) | ||
15 | * All the members, except thoff, are in network byte order. | ||
16 | */ | ||
4 | struct flow_keys { | 17 | struct flow_keys { |
5 | /* (src,dst) must be grouped, in the same way than in IP header */ | 18 | /* (src,dst) must be grouped, in the same way than in IP header */ |
6 | __be32 src; | 19 | __be32 src; |
@@ -10,6 +23,7 @@ struct flow_keys { | |||
10 | __be16 port16[2]; | 23 | __be16 port16[2]; |
11 | }; | 24 | }; |
12 | u16 thoff; | 25 | u16 thoff; |
26 | u16 n_proto; | ||
13 | u8 ip_proto; | 27 | u8 ip_proto; |
14 | }; | 28 | }; |
15 | 29 | ||
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 624f9857c83e..a3cfb8ebeb53 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -231,7 +231,7 @@ struct qdisc_skb_cb { | |||
231 | unsigned int pkt_len; | 231 | unsigned int pkt_len; |
232 | u16 slave_dev_queue_mapping; | 232 | u16 slave_dev_queue_mapping; |
233 | u16 _pad; | 233 | u16 _pad; |
234 | unsigned char data[20]; | 234 | unsigned char data[24]; |
235 | }; | 235 | }; |
236 | 236 | ||
237 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 237 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 107ed12a5323..c2b53c1b21d2 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -175,6 +175,7 @@ ipv6: | |||
175 | break; | 175 | break; |
176 | } | 176 | } |
177 | 177 | ||
178 | flow->n_proto = proto; | ||
178 | flow->ip_proto = ip_proto; | 179 | flow->ip_proto = ip_proto; |
179 | flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); | 180 | flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); |
180 | flow->thoff = (u16) nhoff; | 181 | flow->thoff = (u16) nhoff; |