diff options
author | Sony Chacko <sony.chacko@qlogic.com> | 2012-11-27 23:34:26 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-28 11:07:43 -0500 |
commit | c70001a952e561775222d28a9e2f2a0075af51f3 (patch) | |
tree | 8698785c3426bc26eeb98af21f06a19d6d687e1c /drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |
parent | 3a858a86922f3aeb85a352f3d1ce44c2a99b69a6 (diff) |
qlcnic: create file qlcnic_io.c for datapath routines
Physical refactoring of 82xx adapter data path routines.
Move data path code to new file qlcnic_io.c
Existing data path code has coding stye issues, the code is
moved to the new file without fixing the style issues.
There is a seperate patch to fix the style issues in qlcnic_io.c
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 1346 |
1 files changed, 1346 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c new file mode 100644 index 000000000000..25e6ffc8b9ac --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -0,0 +1,1346 @@ | |||
1 | #include <linux/netdevice.h> | ||
2 | #include <linux/if_vlan.h> | ||
3 | #include <net/ip.h> | ||
4 | #include <linux/ipv6.h> | ||
5 | |||
6 | #include "qlcnic.h" | ||
7 | |||
8 | #define QLCNIC_MAC_HASH(MAC)\ | ||
9 | ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) | ||
10 | |||
11 | #define TX_ETHER_PKT 0x01 | ||
12 | #define TX_TCP_PKT 0x02 | ||
13 | #define TX_UDP_PKT 0x03 | ||
14 | #define TX_IP_PKT 0x04 | ||
15 | #define TX_TCP_LSO 0x05 | ||
16 | #define TX_TCP_LSO6 0x06 | ||
17 | #define TX_TCPV6_PKT 0x0b | ||
18 | #define TX_UDPV6_PKT 0x0c | ||
19 | #define FLAGS_VLAN_TAGGED 0x10 | ||
20 | #define FLAGS_VLAN_OOB 0x40 | ||
21 | |||
22 | #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ | ||
23 | (cmd_desc)->vlan_TCI = cpu_to_le16(v); | ||
24 | #define qlcnic_set_cmd_desc_port(cmd_desc, var) \ | ||
25 | ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) | ||
26 | #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ | ||
27 | ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) | ||
28 | |||
29 | #define qlcnic_set_tx_port(_desc, _port) \ | ||
30 | ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) | ||
31 | |||
32 | #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ | ||
33 | ((_desc)->flags_opcode |= \ | ||
34 | cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) | ||
35 | |||
36 | #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ | ||
37 | ((_desc)->nfrags__length = \ | ||
38 | cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) | ||
39 | |||
40 | /* owner bits of status_desc */ | ||
41 | #define STATUS_OWNER_HOST (0x1ULL << 56) | ||
42 | #define STATUS_OWNER_PHANTOM (0x2ULL << 56) | ||
43 | |||
44 | /* Status descriptor: | ||
45 | 0-3 port, 4-7 status, 8-11 type, 12-27 total_length | ||
46 | 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset | ||
47 | 53-55 desc_cnt, 56-57 owner, 58-63 opcode | ||
48 | */ | ||
49 | #define qlcnic_get_sts_port(sts_data) \ | ||
50 | ((sts_data) & 0x0F) | ||
51 | #define qlcnic_get_sts_status(sts_data) \ | ||
52 | (((sts_data) >> 4) & 0x0F) | ||
53 | #define qlcnic_get_sts_type(sts_data) \ | ||
54 | (((sts_data) >> 8) & 0x0F) | ||
55 | #define qlcnic_get_sts_totallength(sts_data) \ | ||
56 | (((sts_data) >> 12) & 0xFFFF) | ||
57 | #define qlcnic_get_sts_refhandle(sts_data) \ | ||
58 | (((sts_data) >> 28) & 0xFFFF) | ||
59 | #define qlcnic_get_sts_prot(sts_data) \ | ||
60 | (((sts_data) >> 44) & 0x0F) | ||
61 | #define qlcnic_get_sts_pkt_offset(sts_data) \ | ||
62 | (((sts_data) >> 48) & 0x1F) | ||
63 | #define qlcnic_get_sts_desc_cnt(sts_data) \ | ||
64 | (((sts_data) >> 53) & 0x7) | ||
65 | #define qlcnic_get_sts_opcode(sts_data) \ | ||
66 | (((sts_data) >> 58) & 0x03F) | ||
67 | |||
68 | #define qlcnic_get_lro_sts_refhandle(sts_data) \ | ||
69 | ((sts_data) & 0x0FFFF) | ||
70 | #define qlcnic_get_lro_sts_length(sts_data) \ | ||
71 | (((sts_data) >> 16) & 0x0FFFF) | ||
72 | #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ | ||
73 | (((sts_data) >> 32) & 0x0FF) | ||
74 | #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ | ||
75 | (((sts_data) >> 40) & 0x0FF) | ||
76 | #define qlcnic_get_lro_sts_timestamp(sts_data) \ | ||
77 | (((sts_data) >> 48) & 0x1) | ||
78 | #define qlcnic_get_lro_sts_type(sts_data) \ | ||
79 | (((sts_data) >> 49) & 0x7) | ||
80 | #define qlcnic_get_lro_sts_push_flag(sts_data) \ | ||
81 | (((sts_data) >> 52) & 0x1) | ||
82 | #define qlcnic_get_lro_sts_seq_number(sts_data) \ | ||
83 | ((sts_data) & 0x0FFFFFFFF) | ||
84 | #define qlcnic_get_lro_sts_mss(sts_data1) \ | ||
85 | ((sts_data1 >> 32) & 0x0FFFF) | ||
86 | |||
87 | /* opcode field in status_desc */ | ||
88 | #define QLCNIC_SYN_OFFLOAD 0x03 | ||
89 | #define QLCNIC_RXPKT_DESC 0x04 | ||
90 | #define QLCNIC_OLD_RXPKT_DESC 0x3f | ||
91 | #define QLCNIC_RESPONSE_DESC 0x05 | ||
92 | #define QLCNIC_LRO_DESC 0x12 | ||
93 | |||
94 | /* for status field in status_desc */ | ||
95 | #define STATUS_CKSUM_LOOP 0 | ||
96 | #define STATUS_CKSUM_OK 2 | ||
97 | static void qlcnic_change_filter(struct qlcnic_adapter *adapter, | ||
98 | u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) | ||
99 | { | ||
100 | struct cmd_desc_type0 *hwdesc; | ||
101 | struct qlcnic_nic_req *req; | ||
102 | struct qlcnic_mac_req *mac_req; | ||
103 | struct qlcnic_vlan_req *vlan_req; | ||
104 | u32 producer; | ||
105 | u64 word; | ||
106 | |||
107 | producer = tx_ring->producer; | ||
108 | hwdesc = &tx_ring->desc_head[tx_ring->producer]; | ||
109 | |||
110 | req = (struct qlcnic_nic_req *)hwdesc; | ||
111 | memset(req, 0, sizeof(struct qlcnic_nic_req)); | ||
112 | req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); | ||
113 | |||
114 | word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); | ||
115 | req->req_hdr = cpu_to_le64(word); | ||
116 | |||
117 | mac_req = (struct qlcnic_mac_req *)&(req->words[0]); | ||
118 | mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; | ||
119 | memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); | ||
120 | |||
121 | vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; | ||
122 | vlan_req->vlan_id = vlan_id; | ||
123 | |||
124 | tx_ring->producer = get_next_index(producer, tx_ring->num_desc); | ||
125 | smp_mb(); | ||
126 | } | ||
127 | |||
128 | static void | ||
129 | qlcnic_send_filter(struct qlcnic_adapter *adapter, | ||
130 | struct qlcnic_host_tx_ring *tx_ring, | ||
131 | struct cmd_desc_type0 *first_desc, | ||
132 | struct sk_buff *skb) | ||
133 | { | ||
134 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); | ||
135 | struct qlcnic_filter *fil, *tmp_fil; | ||
136 | struct hlist_node *tmp_hnode, *n; | ||
137 | struct hlist_head *head; | ||
138 | u64 src_addr = 0; | ||
139 | __le16 vlan_id = 0; | ||
140 | u8 hindex; | ||
141 | |||
142 | if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) | ||
143 | return; | ||
144 | |||
145 | if (adapter->fhash.fnum >= adapter->fhash.fmax) | ||
146 | return; | ||
147 | |||
148 | /* Only NPAR capable devices support vlan based learning*/ | ||
149 | if (adapter->flags & QLCNIC_ESWITCH_ENABLED) | ||
150 | vlan_id = first_desc->vlan_TCI; | ||
151 | memcpy(&src_addr, phdr->h_source, ETH_ALEN); | ||
152 | hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); | ||
153 | head = &(adapter->fhash.fhead[hindex]); | ||
154 | |||
155 | hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { | ||
156 | if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && | ||
157 | tmp_fil->vlan_id == vlan_id) { | ||
158 | |||
159 | if (jiffies > | ||
160 | (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) | ||
161 | qlcnic_change_filter(adapter, src_addr, vlan_id, | ||
162 | tx_ring); | ||
163 | tmp_fil->ftime = jiffies; | ||
164 | return; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); | ||
169 | if (!fil) | ||
170 | return; | ||
171 | |||
172 | qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); | ||
173 | |||
174 | fil->ftime = jiffies; | ||
175 | fil->vlan_id = vlan_id; | ||
176 | memcpy(fil->faddr, &src_addr, ETH_ALEN); | ||
177 | spin_lock(&adapter->mac_learn_lock); | ||
178 | hlist_add_head(&(fil->fnode), head); | ||
179 | adapter->fhash.fnum++; | ||
180 | spin_unlock(&adapter->mac_learn_lock); | ||
181 | } | ||
182 | |||
183 | static int | ||
184 | qlcnic_tx_pkt(struct qlcnic_adapter *adapter, | ||
185 | struct cmd_desc_type0 *first_desc, | ||
186 | struct sk_buff *skb) | ||
187 | { | ||
188 | u8 opcode = 0, hdr_len = 0; | ||
189 | u16 flags = 0, vlan_tci = 0; | ||
190 | int copied, offset, copy_len; | ||
191 | struct cmd_desc_type0 *hwdesc; | ||
192 | struct vlan_ethhdr *vh; | ||
193 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | ||
194 | u16 protocol = ntohs(skb->protocol); | ||
195 | u32 producer = tx_ring->producer; | ||
196 | |||
197 | if (protocol == ETH_P_8021Q) { | ||
198 | vh = (struct vlan_ethhdr *)skb->data; | ||
199 | flags = FLAGS_VLAN_TAGGED; | ||
200 | vlan_tci = ntohs(vh->h_vlan_TCI); | ||
201 | protocol = ntohs(vh->h_vlan_encapsulated_proto); | ||
202 | } else if (vlan_tx_tag_present(skb)) { | ||
203 | flags = FLAGS_VLAN_OOB; | ||
204 | vlan_tci = vlan_tx_tag_get(skb); | ||
205 | } | ||
206 | if (unlikely(adapter->pvid)) { | ||
207 | if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
208 | return -EIO; | ||
209 | if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
210 | goto set_flags; | ||
211 | |||
212 | flags = FLAGS_VLAN_OOB; | ||
213 | vlan_tci = adapter->pvid; | ||
214 | } | ||
215 | set_flags: | ||
216 | qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); | ||
217 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); | ||
218 | |||
219 | if (*(skb->data) & BIT_0) { | ||
220 | flags |= BIT_0; | ||
221 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | ||
222 | } | ||
223 | opcode = TX_ETHER_PKT; | ||
224 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | ||
225 | skb_shinfo(skb)->gso_size > 0) { | ||
226 | |||
227 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
228 | |||
229 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
230 | first_desc->total_hdr_length = hdr_len; | ||
231 | |||
232 | opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; | ||
233 | |||
234 | /* For LSO, we need to copy the MAC/IP/TCP headers into | ||
235 | * the descriptor ring */ | ||
236 | copied = 0; | ||
237 | offset = 2; | ||
238 | |||
239 | if (flags & FLAGS_VLAN_OOB) { | ||
240 | first_desc->total_hdr_length += VLAN_HLEN; | ||
241 | first_desc->tcp_hdr_offset = VLAN_HLEN; | ||
242 | first_desc->ip_hdr_offset = VLAN_HLEN; | ||
243 | /* Only in case of TSO on vlan device */ | ||
244 | flags |= FLAGS_VLAN_TAGGED; | ||
245 | |||
246 | /* Create a TSO vlan header template for firmware */ | ||
247 | |||
248 | hwdesc = &tx_ring->desc_head[producer]; | ||
249 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
250 | |||
251 | copy_len = min((int)sizeof(struct cmd_desc_type0) - | ||
252 | offset, hdr_len + VLAN_HLEN); | ||
253 | |||
254 | vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); | ||
255 | skb_copy_from_linear_data(skb, vh, 12); | ||
256 | vh->h_vlan_proto = htons(ETH_P_8021Q); | ||
257 | vh->h_vlan_TCI = htons(vlan_tci); | ||
258 | |||
259 | skb_copy_from_linear_data_offset(skb, 12, | ||
260 | (char *)vh + 16, copy_len - 16); | ||
261 | |||
262 | copied = copy_len - VLAN_HLEN; | ||
263 | offset = 0; | ||
264 | |||
265 | producer = get_next_index(producer, tx_ring->num_desc); | ||
266 | } | ||
267 | |||
268 | while (copied < hdr_len) { | ||
269 | |||
270 | copy_len = min((int)sizeof(struct cmd_desc_type0) - | ||
271 | offset, (hdr_len - copied)); | ||
272 | |||
273 | hwdesc = &tx_ring->desc_head[producer]; | ||
274 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
275 | |||
276 | skb_copy_from_linear_data_offset(skb, copied, | ||
277 | (char *) hwdesc + offset, copy_len); | ||
278 | |||
279 | copied += copy_len; | ||
280 | offset = 0; | ||
281 | |||
282 | producer = get_next_index(producer, tx_ring->num_desc); | ||
283 | } | ||
284 | |||
285 | tx_ring->producer = producer; | ||
286 | smp_mb(); | ||
287 | adapter->stats.lso_frames++; | ||
288 | |||
289 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
290 | u8 l4proto; | ||
291 | |||
292 | if (protocol == ETH_P_IP) { | ||
293 | l4proto = ip_hdr(skb)->protocol; | ||
294 | |||
295 | if (l4proto == IPPROTO_TCP) | ||
296 | opcode = TX_TCP_PKT; | ||
297 | else if (l4proto == IPPROTO_UDP) | ||
298 | opcode = TX_UDP_PKT; | ||
299 | } else if (protocol == ETH_P_IPV6) { | ||
300 | l4proto = ipv6_hdr(skb)->nexthdr; | ||
301 | |||
302 | if (l4proto == IPPROTO_TCP) | ||
303 | opcode = TX_TCPV6_PKT; | ||
304 | else if (l4proto == IPPROTO_UDP) | ||
305 | opcode = TX_UDPV6_PKT; | ||
306 | } | ||
307 | } | ||
308 | first_desc->tcp_hdr_offset += skb_transport_offset(skb); | ||
309 | first_desc->ip_hdr_offset += skb_network_offset(skb); | ||
310 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int | ||
316 | qlcnic_map_tx_skb(struct pci_dev *pdev, | ||
317 | struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) | ||
318 | { | ||
319 | struct qlcnic_skb_frag *nf; | ||
320 | struct skb_frag_struct *frag; | ||
321 | int i, nr_frags; | ||
322 | dma_addr_t map; | ||
323 | |||
324 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
325 | nf = &pbuf->frag_array[0]; | ||
326 | |||
327 | map = pci_map_single(pdev, skb->data, | ||
328 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
329 | if (pci_dma_mapping_error(pdev, map)) | ||
330 | goto out_err; | ||
331 | |||
332 | nf->dma = map; | ||
333 | nf->length = skb_headlen(skb); | ||
334 | |||
335 | for (i = 0; i < nr_frags; i++) { | ||
336 | frag = &skb_shinfo(skb)->frags[i]; | ||
337 | nf = &pbuf->frag_array[i+1]; | ||
338 | |||
339 | map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
340 | DMA_TO_DEVICE); | ||
341 | if (dma_mapping_error(&pdev->dev, map)) | ||
342 | goto unwind; | ||
343 | |||
344 | nf->dma = map; | ||
345 | nf->length = skb_frag_size(frag); | ||
346 | } | ||
347 | |||
348 | return 0; | ||
349 | |||
350 | unwind: | ||
351 | while (--i >= 0) { | ||
352 | nf = &pbuf->frag_array[i+1]; | ||
353 | pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); | ||
354 | } | ||
355 | |||
356 | nf = &pbuf->frag_array[0]; | ||
357 | pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); | ||
358 | |||
359 | out_err: | ||
360 | return -ENOMEM; | ||
361 | } | ||
362 | |||
363 | static void | ||
364 | qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, | ||
365 | struct qlcnic_cmd_buffer *pbuf) | ||
366 | { | ||
367 | struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; | ||
368 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
369 | int i; | ||
370 | |||
371 | for (i = 0; i < nr_frags; i++) { | ||
372 | nf = &pbuf->frag_array[i+1]; | ||
373 | pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); | ||
374 | } | ||
375 | |||
376 | nf = &pbuf->frag_array[0]; | ||
377 | pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); | ||
378 | pbuf->skb = NULL; | ||
379 | } | ||
380 | |||
381 | static inline void | ||
382 | qlcnic_clear_cmddesc(u64 *desc) | ||
383 | { | ||
384 | desc[0] = 0ULL; | ||
385 | desc[2] = 0ULL; | ||
386 | desc[7] = 0ULL; | ||
387 | } | ||
388 | |||
389 | netdev_tx_t | ||
390 | qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
391 | { | ||
392 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | ||
393 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | ||
394 | struct qlcnic_cmd_buffer *pbuf; | ||
395 | struct qlcnic_skb_frag *buffrag; | ||
396 | struct cmd_desc_type0 *hwdesc, *first_desc; | ||
397 | struct pci_dev *pdev; | ||
398 | struct ethhdr *phdr; | ||
399 | int delta = 0; | ||
400 | int i, k; | ||
401 | |||
402 | u32 producer; | ||
403 | int frag_count; | ||
404 | u32 num_txd = tx_ring->num_desc; | ||
405 | |||
406 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | ||
407 | netif_stop_queue(netdev); | ||
408 | return NETDEV_TX_BUSY; | ||
409 | } | ||
410 | |||
411 | if (adapter->flags & QLCNIC_MACSPOOF) { | ||
412 | phdr = (struct ethhdr *)skb->data; | ||
413 | if (!ether_addr_equal(phdr->h_source, adapter->mac_addr)) | ||
414 | goto drop_packet; | ||
415 | } | ||
416 | |||
417 | frag_count = skb_shinfo(skb)->nr_frags + 1; | ||
418 | /* 14 frags supported for normal packet and | ||
419 | * 32 frags supported for TSO packet | ||
420 | */ | ||
421 | if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { | ||
422 | |||
423 | for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) | ||
424 | delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); | ||
425 | |||
426 | if (!__pskb_pull_tail(skb, delta)) | ||
427 | goto drop_packet; | ||
428 | |||
429 | frag_count = 1 + skb_shinfo(skb)->nr_frags; | ||
430 | } | ||
431 | |||
432 | if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { | ||
433 | netif_stop_queue(netdev); | ||
434 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) | ||
435 | netif_start_queue(netdev); | ||
436 | else { | ||
437 | adapter->stats.xmit_off++; | ||
438 | return NETDEV_TX_BUSY; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | producer = tx_ring->producer; | ||
443 | pbuf = &tx_ring->cmd_buf_arr[producer]; | ||
444 | |||
445 | pdev = adapter->pdev; | ||
446 | |||
447 | first_desc = hwdesc = &tx_ring->desc_head[producer]; | ||
448 | qlcnic_clear_cmddesc((u64 *)hwdesc); | ||
449 | |||
450 | if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { | ||
451 | adapter->stats.tx_dma_map_error++; | ||
452 | goto drop_packet; | ||
453 | } | ||
454 | |||
455 | pbuf->skb = skb; | ||
456 | pbuf->frag_count = frag_count; | ||
457 | |||
458 | qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); | ||
459 | qlcnic_set_tx_port(first_desc, adapter->portnum); | ||
460 | |||
461 | for (i = 0; i < frag_count; i++) { | ||
462 | |||
463 | k = i % 4; | ||
464 | |||
465 | if ((k == 0) && (i > 0)) { | ||
466 | /* move to next desc.*/ | ||
467 | producer = get_next_index(producer, num_txd); | ||
468 | hwdesc = &tx_ring->desc_head[producer]; | ||
469 | qlcnic_clear_cmddesc((u64 *)hwdesc); | ||
470 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
471 | } | ||
472 | |||
473 | buffrag = &pbuf->frag_array[i]; | ||
474 | |||
475 | hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); | ||
476 | switch (k) { | ||
477 | case 0: | ||
478 | hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); | ||
479 | break; | ||
480 | case 1: | ||
481 | hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); | ||
482 | break; | ||
483 | case 2: | ||
484 | hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); | ||
485 | break; | ||
486 | case 3: | ||
487 | hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); | ||
488 | break; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | tx_ring->producer = get_next_index(producer, num_txd); | ||
493 | smp_mb(); | ||
494 | |||
495 | if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) | ||
496 | goto unwind_buff; | ||
497 | |||
498 | if (adapter->mac_learn) | ||
499 | qlcnic_send_filter(adapter, tx_ring, first_desc, skb); | ||
500 | |||
501 | adapter->stats.txbytes += skb->len; | ||
502 | adapter->stats.xmitcalled++; | ||
503 | |||
504 | qlcnic_update_cmd_producer(tx_ring); | ||
505 | |||
506 | return NETDEV_TX_OK; | ||
507 | |||
508 | unwind_buff: | ||
509 | qlcnic_unmap_buffers(pdev, skb, pbuf); | ||
510 | drop_packet: | ||
511 | adapter->stats.txdropped++; | ||
512 | dev_kfree_skb_any(skb); | ||
513 | return NETDEV_TX_OK; | ||
514 | } | ||
515 | |||
516 | void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) | ||
517 | { | ||
518 | struct net_device *netdev = adapter->netdev; | ||
519 | |||
520 | if (adapter->ahw->linkup && !linkup) { | ||
521 | netdev_info(netdev, "NIC Link is down\n"); | ||
522 | adapter->ahw->linkup = 0; | ||
523 | if (netif_running(netdev)) { | ||
524 | netif_carrier_off(netdev); | ||
525 | netif_stop_queue(netdev); | ||
526 | } | ||
527 | } else if (!adapter->ahw->linkup && linkup) { | ||
528 | netdev_info(netdev, "NIC Link is up\n"); | ||
529 | adapter->ahw->linkup = 1; | ||
530 | if (netif_running(netdev)) { | ||
531 | netif_carrier_on(netdev); | ||
532 | netif_wake_queue(netdev); | ||
533 | } | ||
534 | } | ||
535 | } | ||
536 | |||
537 | static int | ||
538 | qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, | ||
539 | struct qlcnic_host_rds_ring *rds_ring, | ||
540 | struct qlcnic_rx_buffer *buffer) | ||
541 | { | ||
542 | struct sk_buff *skb; | ||
543 | dma_addr_t dma; | ||
544 | struct pci_dev *pdev = adapter->pdev; | ||
545 | |||
546 | skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); | ||
547 | if (!skb) { | ||
548 | adapter->stats.skb_alloc_failure++; | ||
549 | return -ENOMEM; | ||
550 | } | ||
551 | |||
552 | skb_reserve(skb, NET_IP_ALIGN); | ||
553 | |||
554 | dma = pci_map_single(pdev, skb->data, | ||
555 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
556 | |||
557 | if (pci_dma_mapping_error(pdev, dma)) { | ||
558 | adapter->stats.rx_dma_map_error++; | ||
559 | dev_kfree_skb_any(skb); | ||
560 | return -ENOMEM; | ||
561 | } | ||
562 | |||
563 | buffer->skb = skb; | ||
564 | buffer->dma = dma; | ||
565 | |||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, | ||
570 | struct qlcnic_host_rds_ring *rds_ring) | ||
571 | { | ||
572 | struct rcv_desc *pdesc; | ||
573 | struct qlcnic_rx_buffer *buffer; | ||
574 | int count = 0; | ||
575 | uint32_t producer; | ||
576 | struct list_head *head; | ||
577 | |||
578 | if (!spin_trylock(&rds_ring->lock)) | ||
579 | return; | ||
580 | |||
581 | producer = rds_ring->producer; | ||
582 | |||
583 | head = &rds_ring->free_list; | ||
584 | while (!list_empty(head)) { | ||
585 | |||
586 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); | ||
587 | |||
588 | if (!buffer->skb) { | ||
589 | if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) | ||
590 | break; | ||
591 | } | ||
592 | |||
593 | count++; | ||
594 | list_del(&buffer->list); | ||
595 | |||
596 | /* make a rcv descriptor */ | ||
597 | pdesc = &rds_ring->desc_head[producer]; | ||
598 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | ||
599 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | ||
600 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | ||
601 | |||
602 | producer = get_next_index(producer, rds_ring->num_desc); | ||
603 | } | ||
604 | |||
605 | if (count) { | ||
606 | rds_ring->producer = producer; | ||
607 | writel((producer - 1) & (rds_ring->num_desc - 1), | ||
608 | rds_ring->crb_rcv_producer); | ||
609 | } | ||
610 | spin_unlock(&rds_ring->lock); | ||
611 | } | ||
612 | |||
613 | static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) | ||
614 | { | ||
615 | u32 sw_consumer, hw_consumer; | ||
616 | int count = 0, i; | ||
617 | struct qlcnic_cmd_buffer *buffer; | ||
618 | struct pci_dev *pdev = adapter->pdev; | ||
619 | struct net_device *netdev = adapter->netdev; | ||
620 | struct qlcnic_skb_frag *frag; | ||
621 | int done; | ||
622 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | ||
623 | |||
624 | if (!spin_trylock(&adapter->tx_clean_lock)) | ||
625 | return 1; | ||
626 | |||
627 | sw_consumer = tx_ring->sw_consumer; | ||
628 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | ||
629 | |||
630 | while (sw_consumer != hw_consumer) { | ||
631 | buffer = &tx_ring->cmd_buf_arr[sw_consumer]; | ||
632 | if (buffer->skb) { | ||
633 | frag = &buffer->frag_array[0]; | ||
634 | pci_unmap_single(pdev, frag->dma, frag->length, | ||
635 | PCI_DMA_TODEVICE); | ||
636 | frag->dma = 0ULL; | ||
637 | for (i = 1; i < buffer->frag_count; i++) { | ||
638 | frag++; | ||
639 | pci_unmap_page(pdev, frag->dma, frag->length, | ||
640 | PCI_DMA_TODEVICE); | ||
641 | frag->dma = 0ULL; | ||
642 | } | ||
643 | |||
644 | adapter->stats.xmitfinished++; | ||
645 | dev_kfree_skb_any(buffer->skb); | ||
646 | buffer->skb = NULL; | ||
647 | } | ||
648 | |||
649 | sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); | ||
650 | if (++count >= MAX_STATUS_HANDLE) | ||
651 | break; | ||
652 | } | ||
653 | |||
654 | if (count && netif_running(netdev)) { | ||
655 | tx_ring->sw_consumer = sw_consumer; | ||
656 | |||
657 | smp_mb(); | ||
658 | |||
659 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { | ||
660 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { | ||
661 | netif_wake_queue(netdev); | ||
662 | adapter->stats.xmit_on++; | ||
663 | } | ||
664 | } | ||
665 | adapter->tx_timeo_cnt = 0; | ||
666 | } | ||
667 | /* | ||
668 | * If everything is freed up to consumer then check if the ring is full | ||
669 | * If the ring is full then check if more needs to be freed and | ||
670 | * schedule the call back again. | ||
671 | * | ||
672 | * This happens when there are 2 CPUs. One could be freeing and the | ||
673 | * other filling it. If the ring is full when we get out of here and | ||
674 | * the card has already interrupted the host then the host can miss the | ||
675 | * interrupt. | ||
676 | * | ||
677 | * There is still a possible race condition and the host could miss an | ||
678 | * interrupt. The card has to take care of this. | ||
679 | */ | ||
680 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | ||
681 | done = (sw_consumer == hw_consumer); | ||
682 | spin_unlock(&adapter->tx_clean_lock); | ||
683 | |||
684 | return done; | ||
685 | } | ||
686 | |||
687 | static int qlcnic_poll(struct napi_struct *napi, int budget) | ||
688 | { | ||
689 | struct qlcnic_host_sds_ring *sds_ring = | ||
690 | container_of(napi, struct qlcnic_host_sds_ring, napi); | ||
691 | |||
692 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
693 | |||
694 | int tx_complete; | ||
695 | int work_done; | ||
696 | |||
697 | tx_complete = qlcnic_process_cmd_ring(adapter); | ||
698 | |||
699 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); | ||
700 | |||
701 | if ((work_done < budget) && tx_complete) { | ||
702 | napi_complete(&sds_ring->napi); | ||
703 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
704 | qlcnic_enable_int(sds_ring); | ||
705 | } | ||
706 | |||
707 | return work_done; | ||
708 | } | ||
709 | |||
710 | static int qlcnic_rx_poll(struct napi_struct *napi, int budget) | ||
711 | { | ||
712 | struct qlcnic_host_sds_ring *sds_ring = | ||
713 | container_of(napi, struct qlcnic_host_sds_ring, napi); | ||
714 | |||
715 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
716 | int work_done; | ||
717 | |||
718 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); | ||
719 | |||
720 | if (work_done < budget) { | ||
721 | napi_complete(&sds_ring->napi); | ||
722 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
723 | qlcnic_enable_int(sds_ring); | ||
724 | } | ||
725 | |||
726 | return work_done; | ||
727 | } | ||
728 | |||
729 | static void | ||
730 | qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, | ||
731 | struct qlcnic_fw_msg *msg) | ||
732 | { | ||
733 | u32 cable_OUI; | ||
734 | u16 cable_len; | ||
735 | u16 link_speed; | ||
736 | u8 link_status, module, duplex, autoneg; | ||
737 | u8 lb_status = 0; | ||
738 | struct net_device *netdev = adapter->netdev; | ||
739 | |||
740 | adapter->has_link_events = 1; | ||
741 | |||
742 | cable_OUI = msg->body[1] & 0xffffffff; | ||
743 | cable_len = (msg->body[1] >> 32) & 0xffff; | ||
744 | link_speed = (msg->body[1] >> 48) & 0xffff; | ||
745 | |||
746 | link_status = msg->body[2] & 0xff; | ||
747 | duplex = (msg->body[2] >> 16) & 0xff; | ||
748 | autoneg = (msg->body[2] >> 24) & 0xff; | ||
749 | lb_status = (msg->body[2] >> 32) & 0x3; | ||
750 | |||
751 | module = (msg->body[2] >> 8) & 0xff; | ||
752 | if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) | ||
753 | dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " | ||
754 | "length %d\n", cable_OUI, cable_len); | ||
755 | else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) | ||
756 | dev_info(&netdev->dev, "unsupported cable length %d\n", | ||
757 | cable_len); | ||
758 | |||
759 | if (!link_status && (lb_status == QLCNIC_ILB_MODE || | ||
760 | lb_status == QLCNIC_ELB_MODE)) | ||
761 | adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; | ||
762 | |||
763 | qlcnic_advert_link_change(adapter, link_status); | ||
764 | |||
765 | if (duplex == LINKEVENT_FULL_DUPLEX) | ||
766 | adapter->link_duplex = DUPLEX_FULL; | ||
767 | else | ||
768 | adapter->link_duplex = DUPLEX_HALF; | ||
769 | |||
770 | adapter->module_type = module; | ||
771 | adapter->link_autoneg = autoneg; | ||
772 | |||
773 | if (link_status) { | ||
774 | adapter->link_speed = link_speed; | ||
775 | } else { | ||
776 | adapter->link_speed = SPEED_UNKNOWN; | ||
777 | adapter->link_duplex = DUPLEX_UNKNOWN; | ||
778 | } | ||
779 | } | ||
780 | |||
781 | static void | ||
782 | qlcnic_handle_fw_message(int desc_cnt, int index, | ||
783 | struct qlcnic_host_sds_ring *sds_ring) | ||
784 | { | ||
785 | struct qlcnic_fw_msg msg; | ||
786 | struct status_desc *desc; | ||
787 | struct qlcnic_adapter *adapter; | ||
788 | struct device *dev; | ||
789 | int i = 0, opcode, ret; | ||
790 | |||
791 | while (desc_cnt > 0 && i < 8) { | ||
792 | desc = &sds_ring->desc_head[index]; | ||
793 | msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); | ||
794 | msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); | ||
795 | |||
796 | index = get_next_index(index, sds_ring->num_desc); | ||
797 | desc_cnt--; | ||
798 | } | ||
799 | |||
800 | adapter = sds_ring->adapter; | ||
801 | dev = &adapter->pdev->dev; | ||
802 | opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); | ||
803 | |||
804 | switch (opcode) { | ||
805 | case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: | ||
806 | qlcnic_handle_linkevent(adapter, &msg); | ||
807 | break; | ||
808 | case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: | ||
809 | ret = (u32)(msg.body[1]); | ||
810 | switch (ret) { | ||
811 | case 0: | ||
812 | adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; | ||
813 | break; | ||
814 | case 1: | ||
815 | dev_info(dev, "loopback already in progress\n"); | ||
816 | adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; | ||
817 | break; | ||
818 | case 2: | ||
819 | dev_info(dev, "loopback cable is not connected\n"); | ||
820 | adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; | ||
821 | break; | ||
822 | default: | ||
823 | dev_info(dev, "loopback configure request failed," | ||
824 | " ret %x\n", ret); | ||
825 | adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; | ||
826 | break; | ||
827 | } | ||
828 | break; | ||
829 | default: | ||
830 | break; | ||
831 | } | ||
832 | } | ||
833 | |||
834 | static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, | ||
835 | struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) | ||
836 | { | ||
837 | struct qlcnic_rx_buffer *buffer; | ||
838 | struct sk_buff *skb; | ||
839 | |||
840 | buffer = &rds_ring->rx_buf_arr[index]; | ||
841 | |||
842 | if (unlikely(buffer->skb == NULL)) { | ||
843 | WARN_ON(1); | ||
844 | return NULL; | ||
845 | } | ||
846 | |||
847 | pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, | ||
848 | PCI_DMA_FROMDEVICE); | ||
849 | |||
850 | skb = buffer->skb; | ||
851 | |||
852 | if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && | ||
853 | (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { | ||
854 | adapter->stats.csummed++; | ||
855 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
856 | } else { | ||
857 | skb_checksum_none_assert(skb); | ||
858 | } | ||
859 | |||
860 | buffer->skb = NULL; | ||
861 | |||
862 | return skb; | ||
863 | } | ||
864 | |||
865 | static inline int | ||
866 | qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, | ||
867 | u16 *vlan_tag) | ||
868 | { | ||
869 | struct ethhdr *eth_hdr; | ||
870 | |||
871 | if (!__vlan_get_tag(skb, vlan_tag)) { | ||
872 | eth_hdr = (struct ethhdr *) skb->data; | ||
873 | memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); | ||
874 | skb_pull(skb, VLAN_HLEN); | ||
875 | } | ||
876 | if (!adapter->pvid) | ||
877 | return 0; | ||
878 | |||
879 | if (*vlan_tag == adapter->pvid) { | ||
880 | /* Outer vlan tag. Packet should follow non-vlan path */ | ||
881 | *vlan_tag = 0xffff; | ||
882 | return 0; | ||
883 | } | ||
884 | if (adapter->flags & QLCNIC_TAGGING_ENABLED) | ||
885 | return 0; | ||
886 | |||
887 | return -EINVAL; | ||
888 | } | ||
889 | |||
890 | static struct qlcnic_rx_buffer * | ||
891 | qlcnic_process_rcv(struct qlcnic_adapter *adapter, | ||
892 | struct qlcnic_host_sds_ring *sds_ring, | ||
893 | int ring, u64 sts_data0) | ||
894 | { | ||
895 | struct net_device *netdev = adapter->netdev; | ||
896 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
897 | struct qlcnic_rx_buffer *buffer; | ||
898 | struct sk_buff *skb; | ||
899 | struct qlcnic_host_rds_ring *rds_ring; | ||
900 | int index, length, cksum, pkt_offset; | ||
901 | u16 vid = 0xffff; | ||
902 | |||
903 | if (unlikely(ring >= adapter->max_rds_rings)) | ||
904 | return NULL; | ||
905 | |||
906 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
907 | |||
908 | index = qlcnic_get_sts_refhandle(sts_data0); | ||
909 | if (unlikely(index >= rds_ring->num_desc)) | ||
910 | return NULL; | ||
911 | |||
912 | buffer = &rds_ring->rx_buf_arr[index]; | ||
913 | |||
914 | length = qlcnic_get_sts_totallength(sts_data0); | ||
915 | cksum = qlcnic_get_sts_status(sts_data0); | ||
916 | pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); | ||
917 | |||
918 | skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); | ||
919 | if (!skb) | ||
920 | return buffer; | ||
921 | |||
922 | if (length > rds_ring->skb_size) | ||
923 | skb_put(skb, rds_ring->skb_size); | ||
924 | else | ||
925 | skb_put(skb, length); | ||
926 | |||
927 | if (pkt_offset) | ||
928 | skb_pull(skb, pkt_offset); | ||
929 | |||
930 | if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { | ||
931 | adapter->stats.rxdropped++; | ||
932 | dev_kfree_skb(skb); | ||
933 | return buffer; | ||
934 | } | ||
935 | |||
936 | skb->protocol = eth_type_trans(skb, netdev); | ||
937 | |||
938 | if (vid != 0xffff) | ||
939 | __vlan_hwaccel_put_tag(skb, vid); | ||
940 | |||
941 | napi_gro_receive(&sds_ring->napi, skb); | ||
942 | |||
943 | adapter->stats.rx_pkts++; | ||
944 | adapter->stats.rxbytes += length; | ||
945 | |||
946 | return buffer; | ||
947 | } | ||
948 | |||
949 | #define QLC_TCP_HDR_SIZE 20 | ||
950 | #define QLC_TCP_TS_OPTION_SIZE 12 | ||
951 | #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) | ||
952 | |||
953 | static struct qlcnic_rx_buffer * | ||
954 | qlcnic_process_lro(struct qlcnic_adapter *adapter, | ||
955 | int ring, u64 sts_data0, u64 sts_data1) | ||
956 | { | ||
957 | struct net_device *netdev = adapter->netdev; | ||
958 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
959 | struct qlcnic_rx_buffer *buffer; | ||
960 | struct sk_buff *skb; | ||
961 | struct qlcnic_host_rds_ring *rds_ring; | ||
962 | struct iphdr *iph; | ||
963 | struct tcphdr *th; | ||
964 | bool push, timestamp; | ||
965 | int l2_hdr_offset, l4_hdr_offset; | ||
966 | int index; | ||
967 | u16 lro_length, length, data_offset; | ||
968 | u32 seq_number; | ||
969 | u16 vid = 0xffff; | ||
970 | |||
971 | if (unlikely(ring > adapter->max_rds_rings)) | ||
972 | return NULL; | ||
973 | |||
974 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
975 | |||
976 | index = qlcnic_get_lro_sts_refhandle(sts_data0); | ||
977 | if (unlikely(index > rds_ring->num_desc)) | ||
978 | return NULL; | ||
979 | |||
980 | buffer = &rds_ring->rx_buf_arr[index]; | ||
981 | |||
982 | timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); | ||
983 | lro_length = qlcnic_get_lro_sts_length(sts_data0); | ||
984 | l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); | ||
985 | l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); | ||
986 | push = qlcnic_get_lro_sts_push_flag(sts_data0); | ||
987 | seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); | ||
988 | |||
989 | skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); | ||
990 | if (!skb) | ||
991 | return buffer; | ||
992 | |||
993 | if (timestamp) | ||
994 | data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; | ||
995 | else | ||
996 | data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; | ||
997 | |||
998 | skb_put(skb, lro_length + data_offset); | ||
999 | |||
1000 | skb_pull(skb, l2_hdr_offset); | ||
1001 | |||
1002 | if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { | ||
1003 | adapter->stats.rxdropped++; | ||
1004 | dev_kfree_skb(skb); | ||
1005 | return buffer; | ||
1006 | } | ||
1007 | |||
1008 | skb->protocol = eth_type_trans(skb, netdev); | ||
1009 | |||
1010 | iph = (struct iphdr *)skb->data; | ||
1011 | th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); | ||
1012 | |||
1013 | length = (iph->ihl << 2) + (th->doff << 2) + lro_length; | ||
1014 | iph->tot_len = htons(length); | ||
1015 | iph->check = 0; | ||
1016 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | ||
1017 | th->psh = push; | ||
1018 | th->seq = htonl(seq_number); | ||
1019 | |||
1020 | length = skb->len; | ||
1021 | |||
1022 | if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) | ||
1023 | skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); | ||
1024 | |||
1025 | if (vid != 0xffff) | ||
1026 | __vlan_hwaccel_put_tag(skb, vid); | ||
1027 | netif_receive_skb(skb); | ||
1028 | |||
1029 | adapter->stats.lro_pkts++; | ||
1030 | adapter->stats.lrobytes += length; | ||
1031 | |||
1032 | return buffer; | ||
1033 | } | ||
1034 | |||
1035 | int | ||
1036 | qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) | ||
1037 | { | ||
1038 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
1039 | struct list_head *cur; | ||
1040 | struct status_desc *desc; | ||
1041 | struct qlcnic_rx_buffer *rxbuf; | ||
1042 | u64 sts_data0, sts_data1; | ||
1043 | |||
1044 | int count = 0; | ||
1045 | int opcode, ring, desc_cnt; | ||
1046 | u32 consumer = sds_ring->consumer; | ||
1047 | |||
1048 | while (count < max) { | ||
1049 | desc = &sds_ring->desc_head[consumer]; | ||
1050 | sts_data0 = le64_to_cpu(desc->status_desc_data[0]); | ||
1051 | |||
1052 | if (!(sts_data0 & STATUS_OWNER_HOST)) | ||
1053 | break; | ||
1054 | |||
1055 | desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); | ||
1056 | opcode = qlcnic_get_sts_opcode(sts_data0); | ||
1057 | |||
1058 | switch (opcode) { | ||
1059 | case QLCNIC_RXPKT_DESC: | ||
1060 | case QLCNIC_OLD_RXPKT_DESC: | ||
1061 | case QLCNIC_SYN_OFFLOAD: | ||
1062 | ring = qlcnic_get_sts_type(sts_data0); | ||
1063 | rxbuf = qlcnic_process_rcv(adapter, sds_ring, | ||
1064 | ring, sts_data0); | ||
1065 | break; | ||
1066 | case QLCNIC_LRO_DESC: | ||
1067 | ring = qlcnic_get_lro_sts_type(sts_data0); | ||
1068 | sts_data1 = le64_to_cpu(desc->status_desc_data[1]); | ||
1069 | rxbuf = qlcnic_process_lro(adapter, ring, sts_data0, | ||
1070 | sts_data1); | ||
1071 | break; | ||
1072 | case QLCNIC_RESPONSE_DESC: | ||
1073 | qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); | ||
1074 | default: | ||
1075 | goto skip; | ||
1076 | } | ||
1077 | |||
1078 | WARN_ON(desc_cnt > 1); | ||
1079 | |||
1080 | if (likely(rxbuf)) | ||
1081 | list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); | ||
1082 | else | ||
1083 | adapter->stats.null_rxbuf++; | ||
1084 | |||
1085 | skip: | ||
1086 | for (; desc_cnt > 0; desc_cnt--) { | ||
1087 | desc = &sds_ring->desc_head[consumer]; | ||
1088 | desc->status_desc_data[0] = | ||
1089 | cpu_to_le64(STATUS_OWNER_PHANTOM); | ||
1090 | consumer = get_next_index(consumer, sds_ring->num_desc); | ||
1091 | } | ||
1092 | count++; | ||
1093 | } | ||
1094 | |||
1095 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
1096 | struct qlcnic_host_rds_ring *rds_ring = | ||
1097 | &adapter->recv_ctx->rds_rings[ring]; | ||
1098 | |||
1099 | if (!list_empty(&sds_ring->free_list[ring])) { | ||
1100 | list_for_each(cur, &sds_ring->free_list[ring]) { | ||
1101 | rxbuf = list_entry(cur, | ||
1102 | struct qlcnic_rx_buffer, list); | ||
1103 | qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); | ||
1104 | } | ||
1105 | spin_lock(&rds_ring->lock); | ||
1106 | list_splice_tail_init(&sds_ring->free_list[ring], | ||
1107 | &rds_ring->free_list); | ||
1108 | spin_unlock(&rds_ring->lock); | ||
1109 | } | ||
1110 | |||
1111 | qlcnic_post_rx_buffers_nodb(adapter, rds_ring); | ||
1112 | } | ||
1113 | |||
1114 | if (count) { | ||
1115 | sds_ring->consumer = consumer; | ||
1116 | writel(consumer, sds_ring->crb_sts_consumer); | ||
1117 | } | ||
1118 | |||
1119 | return count; | ||
1120 | } | ||
1121 | |||
1122 | void | ||
1123 | qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, | ||
1124 | struct qlcnic_host_rds_ring *rds_ring) | ||
1125 | { | ||
1126 | struct rcv_desc *pdesc; | ||
1127 | struct qlcnic_rx_buffer *buffer; | ||
1128 | int count = 0; | ||
1129 | u32 producer; | ||
1130 | struct list_head *head; | ||
1131 | |||
1132 | producer = rds_ring->producer; | ||
1133 | |||
1134 | head = &rds_ring->free_list; | ||
1135 | while (!list_empty(head)) { | ||
1136 | |||
1137 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); | ||
1138 | |||
1139 | if (!buffer->skb) { | ||
1140 | if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) | ||
1141 | break; | ||
1142 | } | ||
1143 | |||
1144 | count++; | ||
1145 | list_del(&buffer->list); | ||
1146 | |||
1147 | /* make a rcv descriptor */ | ||
1148 | pdesc = &rds_ring->desc_head[producer]; | ||
1149 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | ||
1150 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | ||
1151 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | ||
1152 | |||
1153 | producer = get_next_index(producer, rds_ring->num_desc); | ||
1154 | } | ||
1155 | |||
1156 | if (count) { | ||
1157 | rds_ring->producer = producer; | ||
1158 | writel((producer-1) & (rds_ring->num_desc-1), | ||
1159 | rds_ring->crb_rcv_producer); | ||
1160 | } | ||
1161 | } | ||
1162 | |||
1163 | static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) | ||
1164 | { | ||
1165 | int i; | ||
1166 | unsigned char *data = skb->data; | ||
1167 | |||
1168 | printk(KERN_INFO "\n"); | ||
1169 | for (i = 0; i < skb->len; i++) { | ||
1170 | QLCDB(adapter, DRV, "%02x ", data[i]); | ||
1171 | if ((i & 0x0f) == 8) | ||
1172 | printk(KERN_INFO "\n"); | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, | ||
1177 | u64 sts_data0) | ||
1178 | { | ||
1179 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
1180 | struct sk_buff *skb; | ||
1181 | struct qlcnic_host_rds_ring *rds_ring; | ||
1182 | int index, length, cksum, pkt_offset; | ||
1183 | |||
1184 | if (unlikely(ring >= adapter->max_rds_rings)) | ||
1185 | return; | ||
1186 | |||
1187 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
1188 | |||
1189 | index = qlcnic_get_sts_refhandle(sts_data0); | ||
1190 | length = qlcnic_get_sts_totallength(sts_data0); | ||
1191 | if (unlikely(index >= rds_ring->num_desc)) | ||
1192 | return; | ||
1193 | |||
1194 | cksum = qlcnic_get_sts_status(sts_data0); | ||
1195 | pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); | ||
1196 | |||
1197 | skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); | ||
1198 | if (!skb) | ||
1199 | return; | ||
1200 | |||
1201 | if (length > rds_ring->skb_size) | ||
1202 | skb_put(skb, rds_ring->skb_size); | ||
1203 | else | ||
1204 | skb_put(skb, length); | ||
1205 | |||
1206 | if (pkt_offset) | ||
1207 | skb_pull(skb, pkt_offset); | ||
1208 | |||
1209 | if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) | ||
1210 | adapter->diag_cnt++; | ||
1211 | else | ||
1212 | dump_skb(skb, adapter); | ||
1213 | |||
1214 | dev_kfree_skb_any(skb); | ||
1215 | adapter->stats.rx_pkts++; | ||
1216 | adapter->stats.rxbytes += length; | ||
1217 | |||
1218 | return; | ||
1219 | } | ||
1220 | |||
1221 | void | ||
1222 | qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) | ||
1223 | { | ||
1224 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
1225 | struct status_desc *desc; | ||
1226 | u64 sts_data0; | ||
1227 | int ring, opcode, desc_cnt; | ||
1228 | |||
1229 | u32 consumer = sds_ring->consumer; | ||
1230 | |||
1231 | desc = &sds_ring->desc_head[consumer]; | ||
1232 | sts_data0 = le64_to_cpu(desc->status_desc_data[0]); | ||
1233 | |||
1234 | if (!(sts_data0 & STATUS_OWNER_HOST)) | ||
1235 | return; | ||
1236 | |||
1237 | desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); | ||
1238 | opcode = qlcnic_get_sts_opcode(sts_data0); | ||
1239 | switch (opcode) { | ||
1240 | case QLCNIC_RESPONSE_DESC: | ||
1241 | qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); | ||
1242 | break; | ||
1243 | default: | ||
1244 | ring = qlcnic_get_sts_type(sts_data0); | ||
1245 | qlcnic_process_rcv_diag(adapter, ring, sts_data0); | ||
1246 | break; | ||
1247 | } | ||
1248 | |||
1249 | for (; desc_cnt > 0; desc_cnt--) { | ||
1250 | desc = &sds_ring->desc_head[consumer]; | ||
1251 | desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); | ||
1252 | consumer = get_next_index(consumer, sds_ring->num_desc); | ||
1253 | } | ||
1254 | |||
1255 | sds_ring->consumer = consumer; | ||
1256 | writel(consumer, sds_ring->crb_sts_consumer); | ||
1257 | } | ||
1258 | |||
1259 | void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac) | ||
1260 | { | ||
1261 | u32 mac_low, mac_high; | ||
1262 | int i; | ||
1263 | |||
1264 | mac_low = off1; | ||
1265 | mac_high = off2; | ||
1266 | |||
1267 | if (alt_mac) { | ||
1268 | mac_low |= (mac_low >> 16) | (mac_high << 16); | ||
1269 | mac_high >>= 16; | ||
1270 | } | ||
1271 | |||
1272 | for (i = 0; i < 2; i++) | ||
1273 | mac[i] = (u8)(mac_high >> ((1 - i) * 8)); | ||
1274 | for (i = 2; i < 6; i++) | ||
1275 | mac[i] = (u8)(mac_low >> ((5 - i) * 8)); | ||
1276 | } | ||
1277 | |||
1278 | int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) | ||
1279 | { | ||
1280 | int ring; | ||
1281 | struct qlcnic_host_sds_ring *sds_ring; | ||
1282 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
1283 | |||
1284 | if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) | ||
1285 | return -ENOMEM; | ||
1286 | |||
1287 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
1288 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
1289 | |||
1290 | if (ring == adapter->max_sds_rings - 1) | ||
1291 | netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, | ||
1292 | QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); | ||
1293 | else | ||
1294 | netif_napi_add(netdev, &sds_ring->napi, | ||
1295 | qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); | ||
1296 | } | ||
1297 | |||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | void qlcnic_napi_del(struct qlcnic_adapter *adapter) | ||
1302 | { | ||
1303 | int ring; | ||
1304 | struct qlcnic_host_sds_ring *sds_ring; | ||
1305 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
1306 | |||
1307 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
1308 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
1309 | netif_napi_del(&sds_ring->napi); | ||
1310 | } | ||
1311 | |||
1312 | qlcnic_free_sds_rings(adapter->recv_ctx); | ||
1313 | } | ||
1314 | |||
1315 | void qlcnic_napi_enable(struct qlcnic_adapter *adapter) | ||
1316 | { | ||
1317 | int ring; | ||
1318 | struct qlcnic_host_sds_ring *sds_ring; | ||
1319 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
1320 | |||
1321 | if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) | ||
1322 | return; | ||
1323 | |||
1324 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
1325 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
1326 | napi_enable(&sds_ring->napi); | ||
1327 | qlcnic_enable_int(sds_ring); | ||
1328 | } | ||
1329 | } | ||
1330 | |||
1331 | void qlcnic_napi_disable(struct qlcnic_adapter *adapter) | ||
1332 | { | ||
1333 | int ring; | ||
1334 | struct qlcnic_host_sds_ring *sds_ring; | ||
1335 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | ||
1336 | |||
1337 | if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) | ||
1338 | return; | ||
1339 | |||
1340 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
1341 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
1342 | qlcnic_disable_int(sds_ring); | ||
1343 | napi_synchronize(&sds_ring->napi); | ||
1344 | napi_disable(&sds_ring->napi); | ||
1345 | } | ||
1346 | } | ||