diff options
author | Sony Chacko <sony.chacko@qlogic.com> | 2012-11-27 23:34:27 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-28 11:07:43 -0500 |
commit | d17dd0d9dfb235952c62703ffd34c5bf05257e4d (patch) | |
tree | c8f9d409b1634f86efc64d0b7cc87019646ba3e3 /drivers/net/ethernet/qlogic | |
parent | c70001a952e561775222d28a9e2f2a0075af51f3 (diff) |
qlcnic: fix coding style issues in qlcnic_io.c
Fix coding style issues in qlcnic_io.c
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 263 |
1 files changed, 113 insertions, 150 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 25e6ffc8b9ac..ba352c18c358 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -94,8 +94,10 @@ | |||
94 | /* for status field in status_desc */ | 94 | /* for status field in status_desc */ |
95 | #define STATUS_CKSUM_LOOP 0 | 95 | #define STATUS_CKSUM_LOOP 0 |
96 | #define STATUS_CKSUM_OK 2 | 96 | #define STATUS_CKSUM_OK 2 |
97 | |||
97 | static void qlcnic_change_filter(struct qlcnic_adapter *adapter, | 98 | static void qlcnic_change_filter(struct qlcnic_adapter *adapter, |
98 | u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) | 99 | u64 uaddr, __le16 vlan_id, |
100 | struct qlcnic_host_tx_ring *tx_ring) | ||
99 | { | 101 | { |
100 | struct cmd_desc_type0 *hwdesc; | 102 | struct cmd_desc_type0 *hwdesc; |
101 | struct qlcnic_nic_req *req; | 103 | struct qlcnic_nic_req *req; |
@@ -125,11 +127,10 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter, | |||
125 | smp_mb(); | 127 | smp_mb(); |
126 | } | 128 | } |
127 | 129 | ||
128 | static void | 130 | static void qlcnic_send_filter(struct qlcnic_adapter *adapter, |
129 | qlcnic_send_filter(struct qlcnic_adapter *adapter, | 131 | struct qlcnic_host_tx_ring *tx_ring, |
130 | struct qlcnic_host_tx_ring *tx_ring, | 132 | struct cmd_desc_type0 *first_desc, |
131 | struct cmd_desc_type0 *first_desc, | 133 | struct sk_buff *skb) |
132 | struct sk_buff *skb) | ||
133 | { | 134 | { |
134 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); | 135 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); |
135 | struct qlcnic_filter *fil, *tmp_fil; | 136 | struct qlcnic_filter *fil, *tmp_fil; |
@@ -156,10 +157,9 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter, | |||
156 | if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && | 157 | if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && |
157 | tmp_fil->vlan_id == vlan_id) { | 158 | tmp_fil->vlan_id == vlan_id) { |
158 | 159 | ||
159 | if (jiffies > | 160 | if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) |
160 | (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) | ||
161 | qlcnic_change_filter(adapter, src_addr, vlan_id, | 161 | qlcnic_change_filter(adapter, src_addr, vlan_id, |
162 | tx_ring); | 162 | tx_ring); |
163 | tmp_fil->ftime = jiffies; | 163 | tmp_fil->ftime = jiffies; |
164 | return; | 164 | return; |
165 | } | 165 | } |
@@ -174,20 +174,21 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter, | |||
174 | fil->ftime = jiffies; | 174 | fil->ftime = jiffies; |
175 | fil->vlan_id = vlan_id; | 175 | fil->vlan_id = vlan_id; |
176 | memcpy(fil->faddr, &src_addr, ETH_ALEN); | 176 | memcpy(fil->faddr, &src_addr, ETH_ALEN); |
177 | |||
177 | spin_lock(&adapter->mac_learn_lock); | 178 | spin_lock(&adapter->mac_learn_lock); |
179 | |||
178 | hlist_add_head(&(fil->fnode), head); | 180 | hlist_add_head(&(fil->fnode), head); |
179 | adapter->fhash.fnum++; | 181 | adapter->fhash.fnum++; |
182 | |||
180 | spin_unlock(&adapter->mac_learn_lock); | 183 | spin_unlock(&adapter->mac_learn_lock); |
181 | } | 184 | } |
182 | 185 | ||
183 | static int | 186 | static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, |
184 | qlcnic_tx_pkt(struct qlcnic_adapter *adapter, | 187 | struct cmd_desc_type0 *first_desc, struct sk_buff *skb) |
185 | struct cmd_desc_type0 *first_desc, | ||
186 | struct sk_buff *skb) | ||
187 | { | 188 | { |
188 | u8 opcode = 0, hdr_len = 0; | 189 | u8 l4proto, opcode = 0, hdr_len = 0; |
189 | u16 flags = 0, vlan_tci = 0; | 190 | u16 flags = 0, vlan_tci = 0; |
190 | int copied, offset, copy_len; | 191 | int copied, offset, copy_len, size; |
191 | struct cmd_desc_type0 *hwdesc; | 192 | struct cmd_desc_type0 *hwdesc; |
192 | struct vlan_ethhdr *vh; | 193 | struct vlan_ethhdr *vh; |
193 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | 194 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; |
@@ -222,13 +223,10 @@ set_flags: | |||
222 | } | 223 | } |
223 | opcode = TX_ETHER_PKT; | 224 | opcode = TX_ETHER_PKT; |
224 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 225 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && |
225 | skb_shinfo(skb)->gso_size > 0) { | 226 | skb_shinfo(skb)->gso_size > 0) { |
226 | |||
227 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 227 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
228 | |||
229 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 228 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
230 | first_desc->total_hdr_length = hdr_len; | 229 | first_desc->total_hdr_length = hdr_len; |
231 | |||
232 | opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; | 230 | opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; |
233 | 231 | ||
234 | /* For LSO, we need to copy the MAC/IP/TCP headers into | 232 | /* For LSO, we need to copy the MAC/IP/TCP headers into |
@@ -240,16 +238,16 @@ set_flags: | |||
240 | first_desc->total_hdr_length += VLAN_HLEN; | 238 | first_desc->total_hdr_length += VLAN_HLEN; |
241 | first_desc->tcp_hdr_offset = VLAN_HLEN; | 239 | first_desc->tcp_hdr_offset = VLAN_HLEN; |
242 | first_desc->ip_hdr_offset = VLAN_HLEN; | 240 | first_desc->ip_hdr_offset = VLAN_HLEN; |
241 | |||
243 | /* Only in case of TSO on vlan device */ | 242 | /* Only in case of TSO on vlan device */ |
244 | flags |= FLAGS_VLAN_TAGGED; | 243 | flags |= FLAGS_VLAN_TAGGED; |
245 | 244 | ||
246 | /* Create a TSO vlan header template for firmware */ | 245 | /* Create a TSO vlan header template for firmware */ |
247 | |||
248 | hwdesc = &tx_ring->desc_head[producer]; | 246 | hwdesc = &tx_ring->desc_head[producer]; |
249 | tx_ring->cmd_buf_arr[producer].skb = NULL; | 247 | tx_ring->cmd_buf_arr[producer].skb = NULL; |
250 | 248 | ||
251 | copy_len = min((int)sizeof(struct cmd_desc_type0) - | 249 | copy_len = min((int)sizeof(struct cmd_desc_type0) - |
252 | offset, hdr_len + VLAN_HLEN); | 250 | offset, hdr_len + VLAN_HLEN); |
253 | 251 | ||
254 | vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); | 252 | vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); |
255 | skb_copy_from_linear_data(skb, vh, 12); | 253 | skb_copy_from_linear_data(skb, vh, 12); |
@@ -257,28 +255,23 @@ set_flags: | |||
257 | vh->h_vlan_TCI = htons(vlan_tci); | 255 | vh->h_vlan_TCI = htons(vlan_tci); |
258 | 256 | ||
259 | skb_copy_from_linear_data_offset(skb, 12, | 257 | skb_copy_from_linear_data_offset(skb, 12, |
260 | (char *)vh + 16, copy_len - 16); | 258 | (char *)vh + 16, |
261 | 259 | copy_len - 16); | |
262 | copied = copy_len - VLAN_HLEN; | 260 | copied = copy_len - VLAN_HLEN; |
263 | offset = 0; | 261 | offset = 0; |
264 | |||
265 | producer = get_next_index(producer, tx_ring->num_desc); | 262 | producer = get_next_index(producer, tx_ring->num_desc); |
266 | } | 263 | } |
267 | 264 | ||
268 | while (copied < hdr_len) { | 265 | while (copied < hdr_len) { |
269 | 266 | size = (int)sizeof(struct cmd_desc_type0) - offset; | |
270 | copy_len = min((int)sizeof(struct cmd_desc_type0) - | 267 | copy_len = min(size, (hdr_len - copied)); |
271 | offset, (hdr_len - copied)); | ||
272 | |||
273 | hwdesc = &tx_ring->desc_head[producer]; | 268 | hwdesc = &tx_ring->desc_head[producer]; |
274 | tx_ring->cmd_buf_arr[producer].skb = NULL; | 269 | tx_ring->cmd_buf_arr[producer].skb = NULL; |
275 | |||
276 | skb_copy_from_linear_data_offset(skb, copied, | 270 | skb_copy_from_linear_data_offset(skb, copied, |
277 | (char *) hwdesc + offset, copy_len); | 271 | (char *)hwdesc + |
278 | 272 | offset, copy_len); | |
279 | copied += copy_len; | 273 | copied += copy_len; |
280 | offset = 0; | 274 | offset = 0; |
281 | |||
282 | producer = get_next_index(producer, tx_ring->num_desc); | 275 | producer = get_next_index(producer, tx_ring->num_desc); |
283 | } | 276 | } |
284 | 277 | ||
@@ -287,8 +280,6 @@ set_flags: | |||
287 | adapter->stats.lso_frames++; | 280 | adapter->stats.lso_frames++; |
288 | 281 | ||
289 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 282 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
290 | u8 l4proto; | ||
291 | |||
292 | if (protocol == ETH_P_IP) { | 283 | if (protocol == ETH_P_IP) { |
293 | l4proto = ip_hdr(skb)->protocol; | 284 | l4proto = ip_hdr(skb)->protocol; |
294 | 285 | ||
@@ -312,9 +303,8 @@ set_flags: | |||
312 | return 0; | 303 | return 0; |
313 | } | 304 | } |
314 | 305 | ||
315 | static int | 306 | static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, |
316 | qlcnic_map_tx_skb(struct pci_dev *pdev, | 307 | struct qlcnic_cmd_buffer *pbuf) |
317 | struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) | ||
318 | { | 308 | { |
319 | struct qlcnic_skb_frag *nf; | 309 | struct qlcnic_skb_frag *nf; |
320 | struct skb_frag_struct *frag; | 310 | struct skb_frag_struct *frag; |
@@ -324,8 +314,8 @@ qlcnic_map_tx_skb(struct pci_dev *pdev, | |||
324 | nr_frags = skb_shinfo(skb)->nr_frags; | 314 | nr_frags = skb_shinfo(skb)->nr_frags; |
325 | nf = &pbuf->frag_array[0]; | 315 | nf = &pbuf->frag_array[0]; |
326 | 316 | ||
327 | map = pci_map_single(pdev, skb->data, | 317 | map = pci_map_single(pdev, skb->data, skb_headlen(skb), |
328 | skb_headlen(skb), PCI_DMA_TODEVICE); | 318 | PCI_DMA_TODEVICE); |
329 | if (pci_dma_mapping_error(pdev, map)) | 319 | if (pci_dma_mapping_error(pdev, map)) |
330 | goto out_err; | 320 | goto out_err; |
331 | 321 | ||
@@ -335,7 +325,6 @@ qlcnic_map_tx_skb(struct pci_dev *pdev, | |||
335 | for (i = 0; i < nr_frags; i++) { | 325 | for (i = 0; i < nr_frags; i++) { |
336 | frag = &skb_shinfo(skb)->frags[i]; | 326 | frag = &skb_shinfo(skb)->frags[i]; |
337 | nf = &pbuf->frag_array[i+1]; | 327 | nf = &pbuf->frag_array[i+1]; |
338 | |||
339 | map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | 328 | map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), |
340 | DMA_TO_DEVICE); | 329 | DMA_TO_DEVICE); |
341 | if (dma_mapping_error(&pdev->dev, map)) | 330 | if (dma_mapping_error(&pdev->dev, map)) |
@@ -360,13 +349,11 @@ out_err: | |||
360 | return -ENOMEM; | 349 | return -ENOMEM; |
361 | } | 350 | } |
362 | 351 | ||
363 | static void | 352 | static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, |
364 | qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, | 353 | struct qlcnic_cmd_buffer *pbuf) |
365 | struct qlcnic_cmd_buffer *pbuf) | ||
366 | { | 354 | { |
367 | struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; | 355 | struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; |
368 | int nr_frags = skb_shinfo(skb)->nr_frags; | 356 | int i, nr_frags = skb_shinfo(skb)->nr_frags; |
369 | int i; | ||
370 | 357 | ||
371 | for (i = 0; i < nr_frags; i++) { | 358 | for (i = 0; i < nr_frags; i++) { |
372 | nf = &pbuf->frag_array[i+1]; | 359 | nf = &pbuf->frag_array[i+1]; |
@@ -378,16 +365,14 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, | |||
378 | pbuf->skb = NULL; | 365 | pbuf->skb = NULL; |
379 | } | 366 | } |
380 | 367 | ||
381 | static inline void | 368 | static inline void qlcnic_clear_cmddesc(u64 *desc) |
382 | qlcnic_clear_cmddesc(u64 *desc) | ||
383 | { | 369 | { |
384 | desc[0] = 0ULL; | 370 | desc[0] = 0ULL; |
385 | desc[2] = 0ULL; | 371 | desc[2] = 0ULL; |
386 | desc[7] = 0ULL; | 372 | desc[7] = 0ULL; |
387 | } | 373 | } |
388 | 374 | ||
389 | netdev_tx_t | 375 | netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
390 | qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
391 | { | 376 | { |
392 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 377 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
393 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | 378 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; |
@@ -396,12 +381,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
396 | struct cmd_desc_type0 *hwdesc, *first_desc; | 381 | struct cmd_desc_type0 *hwdesc, *first_desc; |
397 | struct pci_dev *pdev; | 382 | struct pci_dev *pdev; |
398 | struct ethhdr *phdr; | 383 | struct ethhdr *phdr; |
399 | int delta = 0; | 384 | int i, k, frag_count, delta = 0; |
400 | int i, k; | 385 | u32 producer, num_txd; |
401 | 386 | ||
402 | u32 producer; | 387 | num_txd = tx_ring->num_desc; |
403 | int frag_count; | ||
404 | u32 num_txd = tx_ring->num_desc; | ||
405 | 388 | ||
406 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 389 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { |
407 | netif_stop_queue(netdev); | 390 | netif_stop_queue(netdev); |
@@ -419,7 +402,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
419 | * 32 frags supported for TSO packet | 402 | * 32 frags supported for TSO packet |
420 | */ | 403 | */ |
421 | if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { | 404 | if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { |
422 | |||
423 | for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) | 405 | for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) |
424 | delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); | 406 | delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); |
425 | 407 | ||
@@ -431,9 +413,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
431 | 413 | ||
432 | if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { | 414 | if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { |
433 | netif_stop_queue(netdev); | 415 | netif_stop_queue(netdev); |
434 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) | 416 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { |
435 | netif_start_queue(netdev); | 417 | netif_start_queue(netdev); |
436 | else { | 418 | } else { |
437 | adapter->stats.xmit_off++; | 419 | adapter->stats.xmit_off++; |
438 | return NETDEV_TX_BUSY; | 420 | return NETDEV_TX_BUSY; |
439 | } | 421 | } |
@@ -441,10 +423,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
441 | 423 | ||
442 | producer = tx_ring->producer; | 424 | producer = tx_ring->producer; |
443 | pbuf = &tx_ring->cmd_buf_arr[producer]; | 425 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
444 | |||
445 | pdev = adapter->pdev; | 426 | pdev = adapter->pdev; |
446 | 427 | first_desc = &tx_ring->desc_head[producer]; | |
447 | first_desc = hwdesc = &tx_ring->desc_head[producer]; | 428 | hwdesc = &tx_ring->desc_head[producer]; |
448 | qlcnic_clear_cmddesc((u64 *)hwdesc); | 429 | qlcnic_clear_cmddesc((u64 *)hwdesc); |
449 | 430 | ||
450 | if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { | 431 | if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { |
@@ -459,7 +440,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
459 | qlcnic_set_tx_port(first_desc, adapter->portnum); | 440 | qlcnic_set_tx_port(first_desc, adapter->portnum); |
460 | 441 | ||
461 | for (i = 0; i < frag_count; i++) { | 442 | for (i = 0; i < frag_count; i++) { |
462 | |||
463 | k = i % 4; | 443 | k = i % 4; |
464 | 444 | ||
465 | if ((k == 0) && (i > 0)) { | 445 | if ((k == 0) && (i > 0)) { |
@@ -471,7 +451,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
471 | } | 451 | } |
472 | 452 | ||
473 | buffrag = &pbuf->frag_array[i]; | 453 | buffrag = &pbuf->frag_array[i]; |
474 | |||
475 | hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); | 454 | hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); |
476 | switch (k) { | 455 | switch (k) { |
477 | case 0: | 456 | case 0: |
@@ -534,10 +513,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) | |||
534 | } | 513 | } |
535 | } | 514 | } |
536 | 515 | ||
537 | static int | 516 | static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, |
538 | qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, | 517 | struct qlcnic_host_rds_ring *rds_ring, |
539 | struct qlcnic_host_rds_ring *rds_ring, | 518 | struct qlcnic_rx_buffer *buffer) |
540 | struct qlcnic_rx_buffer *buffer) | ||
541 | { | 519 | { |
542 | struct sk_buff *skb; | 520 | struct sk_buff *skb; |
543 | dma_addr_t dma; | 521 | dma_addr_t dma; |
@@ -550,9 +528,8 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, | |||
550 | } | 528 | } |
551 | 529 | ||
552 | skb_reserve(skb, NET_IP_ALIGN); | 530 | skb_reserve(skb, NET_IP_ALIGN); |
553 | 531 | dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, | |
554 | dma = pci_map_single(pdev, skb->data, | 532 | PCI_DMA_FROMDEVICE); |
555 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
556 | 533 | ||
557 | if (pci_dma_mapping_error(pdev, dma)) { | 534 | if (pci_dma_mapping_error(pdev, dma)) { |
558 | adapter->stats.rx_dma_map_error++; | 535 | adapter->stats.rx_dma_map_error++; |
@@ -579,10 +556,9 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, | |||
579 | return; | 556 | return; |
580 | 557 | ||
581 | producer = rds_ring->producer; | 558 | producer = rds_ring->producer; |
582 | |||
583 | head = &rds_ring->free_list; | 559 | head = &rds_ring->free_list; |
584 | while (!list_empty(head)) { | ||
585 | 560 | ||
561 | while (!list_empty(head)) { | ||
586 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); | 562 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); |
587 | 563 | ||
588 | if (!buffer->skb) { | 564 | if (!buffer->skb) { |
@@ -598,27 +574,26 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, | |||
598 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 574 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
599 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 575 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
600 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 576 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
601 | |||
602 | producer = get_next_index(producer, rds_ring->num_desc); | 577 | producer = get_next_index(producer, rds_ring->num_desc); |
603 | } | 578 | } |
604 | 579 | ||
605 | if (count) { | 580 | if (count) { |
606 | rds_ring->producer = producer; | 581 | rds_ring->producer = producer; |
607 | writel((producer - 1) & (rds_ring->num_desc - 1), | 582 | writel((producer - 1) & (rds_ring->num_desc - 1), |
608 | rds_ring->crb_rcv_producer); | 583 | rds_ring->crb_rcv_producer); |
609 | } | 584 | } |
585 | |||
610 | spin_unlock(&rds_ring->lock); | 586 | spin_unlock(&rds_ring->lock); |
611 | } | 587 | } |
612 | 588 | ||
613 | static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) | 589 | static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) |
614 | { | 590 | { |
615 | u32 sw_consumer, hw_consumer; | 591 | u32 sw_consumer, hw_consumer; |
616 | int count = 0, i; | 592 | int i, done, count = 0; |
617 | struct qlcnic_cmd_buffer *buffer; | 593 | struct qlcnic_cmd_buffer *buffer; |
618 | struct pci_dev *pdev = adapter->pdev; | 594 | struct pci_dev *pdev = adapter->pdev; |
619 | struct net_device *netdev = adapter->netdev; | 595 | struct net_device *netdev = adapter->netdev; |
620 | struct qlcnic_skb_frag *frag; | 596 | struct qlcnic_skb_frag *frag; |
621 | int done; | ||
622 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | 597 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; |
623 | 598 | ||
624 | if (!spin_trylock(&adapter->tx_clean_lock)) | 599 | if (!spin_trylock(&adapter->tx_clean_lock)) |
@@ -679,6 +654,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) | |||
679 | */ | 654 | */ |
680 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 655 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
681 | done = (sw_consumer == hw_consumer); | 656 | done = (sw_consumer == hw_consumer); |
657 | |||
682 | spin_unlock(&adapter->tx_clean_lock); | 658 | spin_unlock(&adapter->tx_clean_lock); |
683 | 659 | ||
684 | return done; | 660 | return done; |
@@ -686,16 +662,14 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) | |||
686 | 662 | ||
687 | static int qlcnic_poll(struct napi_struct *napi, int budget) | 663 | static int qlcnic_poll(struct napi_struct *napi, int budget) |
688 | { | 664 | { |
689 | struct qlcnic_host_sds_ring *sds_ring = | 665 | struct qlcnic_host_sds_ring *sds_ring; |
690 | container_of(napi, struct qlcnic_host_sds_ring, napi); | 666 | struct qlcnic_adapter *adapter; |
691 | 667 | int tx_complete, work_done; | |
692 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
693 | 668 | ||
694 | int tx_complete; | 669 | sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); |
695 | int work_done; | 670 | adapter = sds_ring->adapter; |
696 | 671 | ||
697 | tx_complete = qlcnic_process_cmd_ring(adapter); | 672 | tx_complete = qlcnic_process_cmd_ring(adapter); |
698 | |||
699 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); | 673 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); |
700 | 674 | ||
701 | if ((work_done < budget) && tx_complete) { | 675 | if ((work_done < budget) && tx_complete) { |
@@ -709,12 +683,13 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) | |||
709 | 683 | ||
710 | static int qlcnic_rx_poll(struct napi_struct *napi, int budget) | 684 | static int qlcnic_rx_poll(struct napi_struct *napi, int budget) |
711 | { | 685 | { |
712 | struct qlcnic_host_sds_ring *sds_ring = | 686 | struct qlcnic_host_sds_ring *sds_ring; |
713 | container_of(napi, struct qlcnic_host_sds_ring, napi); | 687 | struct qlcnic_adapter *adapter; |
714 | |||
715 | struct qlcnic_adapter *adapter = sds_ring->adapter; | ||
716 | int work_done; | 688 | int work_done; |
717 | 689 | ||
690 | sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); | ||
691 | adapter = sds_ring->adapter; | ||
692 | |||
718 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); | 693 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); |
719 | 694 | ||
720 | if (work_done < budget) { | 695 | if (work_done < budget) { |
@@ -726,15 +701,12 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) | |||
726 | return work_done; | 701 | return work_done; |
727 | } | 702 | } |
728 | 703 | ||
729 | static void | 704 | static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, |
730 | qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, | 705 | struct qlcnic_fw_msg *msg) |
731 | struct qlcnic_fw_msg *msg) | ||
732 | { | 706 | { |
733 | u32 cable_OUI; | 707 | u32 cable_OUI; |
734 | u16 cable_len; | 708 | u16 cable_len, link_speed; |
735 | u16 link_speed; | 709 | u8 link_status, module, duplex, autoneg, lb_status = 0; |
736 | u8 link_status, module, duplex, autoneg; | ||
737 | u8 lb_status = 0; | ||
738 | struct net_device *netdev = adapter->netdev; | 710 | struct net_device *netdev = adapter->netdev; |
739 | 711 | ||
740 | adapter->has_link_events = 1; | 712 | adapter->has_link_events = 1; |
@@ -750,11 +722,12 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, | |||
750 | 722 | ||
751 | module = (msg->body[2] >> 8) & 0xff; | 723 | module = (msg->body[2] >> 8) & 0xff; |
752 | if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) | 724 | if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) |
753 | dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " | 725 | dev_info(&netdev->dev, |
754 | "length %d\n", cable_OUI, cable_len); | 726 | "unsupported cable: OUI 0x%x, length %d\n", |
727 | cable_OUI, cable_len); | ||
755 | else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) | 728 | else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) |
756 | dev_info(&netdev->dev, "unsupported cable length %d\n", | 729 | dev_info(&netdev->dev, "unsupported cable length %d\n", |
757 | cable_len); | 730 | cable_len); |
758 | 731 | ||
759 | if (!link_status && (lb_status == QLCNIC_ILB_MODE || | 732 | if (!link_status && (lb_status == QLCNIC_ILB_MODE || |
760 | lb_status == QLCNIC_ELB_MODE)) | 733 | lb_status == QLCNIC_ELB_MODE)) |
@@ -778,9 +751,8 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, | |||
778 | } | 751 | } |
779 | } | 752 | } |
780 | 753 | ||
781 | static void | 754 | static void qlcnic_handle_fw_message(int desc_cnt, int index, |
782 | qlcnic_handle_fw_message(int desc_cnt, int index, | 755 | struct qlcnic_host_sds_ring *sds_ring) |
783 | struct qlcnic_host_sds_ring *sds_ring) | ||
784 | { | 756 | { |
785 | struct qlcnic_fw_msg msg; | 757 | struct qlcnic_fw_msg msg; |
786 | struct status_desc *desc; | 758 | struct status_desc *desc; |
@@ -820,8 +792,9 @@ qlcnic_handle_fw_message(int desc_cnt, int index, | |||
820 | adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; | 792 | adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; |
821 | break; | 793 | break; |
822 | default: | 794 | default: |
823 | dev_info(dev, "loopback configure request failed," | 795 | dev_info(dev, |
824 | " ret %x\n", ret); | 796 | "loopback configure request failed, err %x\n", |
797 | ret); | ||
825 | adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; | 798 | adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; |
826 | break; | 799 | break; |
827 | } | 800 | } |
@@ -831,8 +804,10 @@ qlcnic_handle_fw_message(int desc_cnt, int index, | |||
831 | } | 804 | } |
832 | } | 805 | } |
833 | 806 | ||
834 | static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, | 807 | static struct sk_buff * |
835 | struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) | 808 | qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, |
809 | struct qlcnic_host_rds_ring *rds_ring, u16 index, | ||
810 | u16 cksum) | ||
836 | { | 811 | { |
837 | struct qlcnic_rx_buffer *buffer; | 812 | struct qlcnic_rx_buffer *buffer; |
838 | struct sk_buff *skb; | 813 | struct sk_buff *skb; |
@@ -845,12 +820,12 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, | |||
845 | } | 820 | } |
846 | 821 | ||
847 | pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, | 822 | pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, |
848 | PCI_DMA_FROMDEVICE); | 823 | PCI_DMA_FROMDEVICE); |
849 | 824 | ||
850 | skb = buffer->skb; | 825 | skb = buffer->skb; |
851 | 826 | ||
852 | if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && | 827 | if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && |
853 | (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { | 828 | (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { |
854 | adapter->stats.csummed++; | 829 | adapter->stats.csummed++; |
855 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 830 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
856 | } else { | 831 | } else { |
@@ -862,14 +837,13 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, | |||
862 | return skb; | 837 | return skb; |
863 | } | 838 | } |
864 | 839 | ||
865 | static inline int | 840 | static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, |
866 | qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, | 841 | struct sk_buff *skb, u16 *vlan_tag) |
867 | u16 *vlan_tag) | ||
868 | { | 842 | { |
869 | struct ethhdr *eth_hdr; | 843 | struct ethhdr *eth_hdr; |
870 | 844 | ||
871 | if (!__vlan_get_tag(skb, vlan_tag)) { | 845 | if (!__vlan_get_tag(skb, vlan_tag)) { |
872 | eth_hdr = (struct ethhdr *) skb->data; | 846 | eth_hdr = (struct ethhdr *)skb->data; |
873 | memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); | 847 | memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); |
874 | skb_pull(skb, VLAN_HLEN); | 848 | skb_pull(skb, VLAN_HLEN); |
875 | } | 849 | } |
@@ -889,8 +863,8 @@ qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, | |||
889 | 863 | ||
890 | static struct qlcnic_rx_buffer * | 864 | static struct qlcnic_rx_buffer * |
891 | qlcnic_process_rcv(struct qlcnic_adapter *adapter, | 865 | qlcnic_process_rcv(struct qlcnic_adapter *adapter, |
892 | struct qlcnic_host_sds_ring *sds_ring, | 866 | struct qlcnic_host_sds_ring *sds_ring, int ring, |
893 | int ring, u64 sts_data0) | 867 | u64 sts_data0) |
894 | { | 868 | { |
895 | struct net_device *netdev = adapter->netdev; | 869 | struct net_device *netdev = adapter->netdev; |
896 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | 870 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; |
@@ -910,7 +884,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, | |||
910 | return NULL; | 884 | return NULL; |
911 | 885 | ||
912 | buffer = &rds_ring->rx_buf_arr[index]; | 886 | buffer = &rds_ring->rx_buf_arr[index]; |
913 | |||
914 | length = qlcnic_get_sts_totallength(sts_data0); | 887 | length = qlcnic_get_sts_totallength(sts_data0); |
915 | cksum = qlcnic_get_sts_status(sts_data0); | 888 | cksum = qlcnic_get_sts_status(sts_data0); |
916 | pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); | 889 | pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); |
@@ -952,7 +925,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, | |||
952 | 925 | ||
953 | static struct qlcnic_rx_buffer * | 926 | static struct qlcnic_rx_buffer * |
954 | qlcnic_process_lro(struct qlcnic_adapter *adapter, | 927 | qlcnic_process_lro(struct qlcnic_adapter *adapter, |
955 | int ring, u64 sts_data0, u64 sts_data1) | 928 | int ring, u64 sts_data0, u64 sts_data1) |
956 | { | 929 | { |
957 | struct net_device *netdev = adapter->netdev; | 930 | struct net_device *netdev = adapter->netdev; |
958 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | 931 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; |
@@ -962,11 +935,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
962 | struct iphdr *iph; | 935 | struct iphdr *iph; |
963 | struct tcphdr *th; | 936 | struct tcphdr *th; |
964 | bool push, timestamp; | 937 | bool push, timestamp; |
965 | int l2_hdr_offset, l4_hdr_offset; | 938 | int index, l2_hdr_offset, l4_hdr_offset; |
966 | int index; | 939 | u16 lro_length, length, data_offset, vid = 0xffff; |
967 | u16 lro_length, length, data_offset; | ||
968 | u32 seq_number; | 940 | u32 seq_number; |
969 | u16 vid = 0xffff; | ||
970 | 941 | ||
971 | if (unlikely(ring > adapter->max_rds_rings)) | 942 | if (unlikely(ring > adapter->max_rds_rings)) |
972 | return NULL; | 943 | return NULL; |
@@ -996,7 +967,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
996 | data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; | 967 | data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; |
997 | 968 | ||
998 | skb_put(skb, lro_length + data_offset); | 969 | skb_put(skb, lro_length + data_offset); |
999 | |||
1000 | skb_pull(skb, l2_hdr_offset); | 970 | skb_pull(skb, l2_hdr_offset); |
1001 | 971 | ||
1002 | if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { | 972 | if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { |
@@ -1006,17 +976,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
1006 | } | 976 | } |
1007 | 977 | ||
1008 | skb->protocol = eth_type_trans(skb, netdev); | 978 | skb->protocol = eth_type_trans(skb, netdev); |
1009 | |||
1010 | iph = (struct iphdr *)skb->data; | 979 | iph = (struct iphdr *)skb->data; |
1011 | th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); | 980 | th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); |
1012 | |||
1013 | length = (iph->ihl << 2) + (th->doff << 2) + lro_length; | 981 | length = (iph->ihl << 2) + (th->doff << 2) + lro_length; |
1014 | iph->tot_len = htons(length); | 982 | iph->tot_len = htons(length); |
1015 | iph->check = 0; | 983 | iph->check = 0; |
1016 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 984 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
1017 | th->psh = push; | 985 | th->psh = push; |
1018 | th->seq = htonl(seq_number); | 986 | th->seq = htonl(seq_number); |
1019 | |||
1020 | length = skb->len; | 987 | length = skb->len; |
1021 | 988 | ||
1022 | if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) | 989 | if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) |
@@ -1032,17 +999,16 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
1032 | return buffer; | 999 | return buffer; |
1033 | } | 1000 | } |
1034 | 1001 | ||
1035 | int | 1002 | int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) |
1036 | qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) | ||
1037 | { | 1003 | { |
1004 | struct qlcnic_host_rds_ring *rds_ring; | ||
1038 | struct qlcnic_adapter *adapter = sds_ring->adapter; | 1005 | struct qlcnic_adapter *adapter = sds_ring->adapter; |
1039 | struct list_head *cur; | 1006 | struct list_head *cur; |
1040 | struct status_desc *desc; | 1007 | struct status_desc *desc; |
1041 | struct qlcnic_rx_buffer *rxbuf; | 1008 | struct qlcnic_rx_buffer *rxbuf; |
1042 | u64 sts_data0, sts_data1; | 1009 | u64 sts_data0, sts_data1; |
1043 | 1010 | __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM); | |
1044 | int count = 0; | 1011 | int opcode, ring, desc_cnt, count = 0; |
1045 | int opcode, ring, desc_cnt; | ||
1046 | u32 consumer = sds_ring->consumer; | 1012 | u32 consumer = sds_ring->consumer; |
1047 | 1013 | ||
1048 | while (count < max) { | 1014 | while (count < max) { |
@@ -1060,8 +1026,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) | |||
1060 | case QLCNIC_OLD_RXPKT_DESC: | 1026 | case QLCNIC_OLD_RXPKT_DESC: |
1061 | case QLCNIC_SYN_OFFLOAD: | 1027 | case QLCNIC_SYN_OFFLOAD: |
1062 | ring = qlcnic_get_sts_type(sts_data0); | 1028 | ring = qlcnic_get_sts_type(sts_data0); |
1063 | rxbuf = qlcnic_process_rcv(adapter, sds_ring, | 1029 | rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring, |
1064 | ring, sts_data0); | 1030 | sts_data0); |
1065 | break; | 1031 | break; |
1066 | case QLCNIC_LRO_DESC: | 1032 | case QLCNIC_LRO_DESC: |
1067 | ring = qlcnic_get_lro_sts_type(sts_data0); | 1033 | ring = qlcnic_get_lro_sts_type(sts_data0); |
@@ -1085,26 +1051,24 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) | |||
1085 | skip: | 1051 | skip: |
1086 | for (; desc_cnt > 0; desc_cnt--) { | 1052 | for (; desc_cnt > 0; desc_cnt--) { |
1087 | desc = &sds_ring->desc_head[consumer]; | 1053 | desc = &sds_ring->desc_head[consumer]; |
1088 | desc->status_desc_data[0] = | 1054 | desc->status_desc_data[0] = owner_phantom; |
1089 | cpu_to_le64(STATUS_OWNER_PHANTOM); | ||
1090 | consumer = get_next_index(consumer, sds_ring->num_desc); | 1055 | consumer = get_next_index(consumer, sds_ring->num_desc); |
1091 | } | 1056 | } |
1092 | count++; | 1057 | count++; |
1093 | } | 1058 | } |
1094 | 1059 | ||
1095 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 1060 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
1096 | struct qlcnic_host_rds_ring *rds_ring = | 1061 | rds_ring = &adapter->recv_ctx->rds_rings[ring]; |
1097 | &adapter->recv_ctx->rds_rings[ring]; | ||
1098 | 1062 | ||
1099 | if (!list_empty(&sds_ring->free_list[ring])) { | 1063 | if (!list_empty(&sds_ring->free_list[ring])) { |
1100 | list_for_each(cur, &sds_ring->free_list[ring]) { | 1064 | list_for_each(cur, &sds_ring->free_list[ring]) { |
1101 | rxbuf = list_entry(cur, | 1065 | rxbuf = list_entry(cur, struct qlcnic_rx_buffer, |
1102 | struct qlcnic_rx_buffer, list); | 1066 | list); |
1103 | qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); | 1067 | qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); |
1104 | } | 1068 | } |
1105 | spin_lock(&rds_ring->lock); | 1069 | spin_lock(&rds_ring->lock); |
1106 | list_splice_tail_init(&sds_ring->free_list[ring], | 1070 | list_splice_tail_init(&sds_ring->free_list[ring], |
1107 | &rds_ring->free_list); | 1071 | &rds_ring->free_list); |
1108 | spin_unlock(&rds_ring->lock); | 1072 | spin_unlock(&rds_ring->lock); |
1109 | } | 1073 | } |
1110 | 1074 | ||
@@ -1119,9 +1083,8 @@ skip: | |||
1119 | return count; | 1083 | return count; |
1120 | } | 1084 | } |
1121 | 1085 | ||
1122 | void | 1086 | void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, |
1123 | qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, | 1087 | struct qlcnic_host_rds_ring *rds_ring) |
1124 | struct qlcnic_host_rds_ring *rds_ring) | ||
1125 | { | 1088 | { |
1126 | struct rcv_desc *pdesc; | 1089 | struct rcv_desc *pdesc; |
1127 | struct qlcnic_rx_buffer *buffer; | 1090 | struct qlcnic_rx_buffer *buffer; |
@@ -1130,8 +1093,8 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, | |||
1130 | struct list_head *head; | 1093 | struct list_head *head; |
1131 | 1094 | ||
1132 | producer = rds_ring->producer; | 1095 | producer = rds_ring->producer; |
1133 | |||
1134 | head = &rds_ring->free_list; | 1096 | head = &rds_ring->free_list; |
1097 | |||
1135 | while (!list_empty(head)) { | 1098 | while (!list_empty(head)) { |
1136 | 1099 | ||
1137 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); | 1100 | buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); |
@@ -1149,14 +1112,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, | |||
1149 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1112 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1150 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1113 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1151 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1114 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1152 | |||
1153 | producer = get_next_index(producer, rds_ring->num_desc); | 1115 | producer = get_next_index(producer, rds_ring->num_desc); |
1154 | } | 1116 | } |
1155 | 1117 | ||
1156 | if (count) { | 1118 | if (count) { |
1157 | rds_ring->producer = producer; | 1119 | rds_ring->producer = producer; |
1158 | writel((producer-1) & (rds_ring->num_desc-1), | 1120 | writel((producer-1) & (rds_ring->num_desc-1), |
1159 | rds_ring->crb_rcv_producer); | 1121 | rds_ring->crb_rcv_producer); |
1160 | } | 1122 | } |
1161 | } | 1123 | } |
1162 | 1124 | ||
@@ -1165,11 +1127,11 @@ static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) | |||
1165 | int i; | 1127 | int i; |
1166 | unsigned char *data = skb->data; | 1128 | unsigned char *data = skb->data; |
1167 | 1129 | ||
1168 | printk(KERN_INFO "\n"); | 1130 | pr_info(KERN_INFO "\n"); |
1169 | for (i = 0; i < skb->len; i++) { | 1131 | for (i = 0; i < skb->len; i++) { |
1170 | QLCDB(adapter, DRV, "%02x ", data[i]); | 1132 | QLCDB(adapter, DRV, "%02x ", data[i]); |
1171 | if ((i & 0x0f) == 8) | 1133 | if ((i & 0x0f) == 8) |
1172 | printk(KERN_INFO "\n"); | 1134 | pr_info(KERN_INFO "\n"); |
1173 | } | 1135 | } |
1174 | } | 1136 | } |
1175 | 1137 | ||
@@ -1218,8 +1180,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, | |||
1218 | return; | 1180 | return; |
1219 | } | 1181 | } |
1220 | 1182 | ||
1221 | void | 1183 | void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) |
1222 | qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) | ||
1223 | { | 1184 | { |
1224 | struct qlcnic_adapter *adapter = sds_ring->adapter; | 1185 | struct qlcnic_adapter *adapter = sds_ring->adapter; |
1225 | struct status_desc *desc; | 1186 | struct status_desc *desc; |
@@ -1277,22 +1238,24 @@ void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac) | |||
1277 | 1238 | ||
1278 | int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) | 1239 | int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) |
1279 | { | 1240 | { |
1280 | int ring; | 1241 | int ring, max_sds_rings; |
1281 | struct qlcnic_host_sds_ring *sds_ring; | 1242 | struct qlcnic_host_sds_ring *sds_ring; |
1282 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; | 1243 | struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; |
1283 | 1244 | ||
1284 | if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) | 1245 | if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) |
1285 | return -ENOMEM; | 1246 | return -ENOMEM; |
1286 | 1247 | ||
1248 | max_sds_rings = adapter->max_sds_rings; | ||
1249 | |||
1287 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 1250 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
1288 | sds_ring = &recv_ctx->sds_rings[ring]; | 1251 | sds_ring = &recv_ctx->sds_rings[ring]; |
1289 | 1252 | ||
1290 | if (ring == adapter->max_sds_rings - 1) | 1253 | if (ring == max_sds_rings - 1) |
1291 | netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, | 1254 | netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, |
1292 | QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); | 1255 | QLCNIC_NETDEV_WEIGHT / max_sds_rings); |
1293 | else | 1256 | else |
1294 | netif_napi_add(netdev, &sds_ring->napi, | 1257 | netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, |
1295 | qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); | 1258 | QLCNIC_NETDEV_WEIGHT*2); |
1296 | } | 1259 | } |
1297 | 1260 | ||
1298 | return 0; | 1261 | return 0; |