diff options
author | David S. Miller <davem@davemloft.net> | 2017-09-26 16:44:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-09-26 16:44:32 -0400 |
commit | 2760f5a34421bba937dafa29a9decdbad11d2718 (patch) | |
tree | e2aea687a71fb5df82235f822c4facd51b84cad6 | |
parent | 62b982eeb4589b2e6d7c01a90590e3a4c2b2ca19 (diff) | |
parent | c7545689244b50c562b1fbbc71905fba224c8a05 (diff) |
Merge branch 'aquantia-fixes'
Igor Russkikh says:
====================
aquantia: Atlantic driver bugfixes und improvements
This series contains bugfixes for aQuantia Atlantic driver.
Changes in v2:
Review comments applied:
- min_mtu set removed
- extra mtu range check is removed
- err codes handling improved
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
8 files changed, 129 insertions, 98 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 214986436ece..0fdaaa643073 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
@@ -51,6 +51,10 @@ | |||
51 | 51 | ||
52 | #define AQ_CFG_SKB_FRAGS_MAX 32U | 52 | #define AQ_CFG_SKB_FRAGS_MAX 32U |
53 | 53 | ||
54 | /* Number of descriptors available in one ring to resume this ring queue | ||
55 | */ | ||
56 | #define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) | ||
57 | |||
54 | #define AQ_CFG_NAPI_WEIGHT 64U | 58 | #define AQ_CFG_NAPI_WEIGHT 64U |
55 | 59 | ||
56 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U | 60 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6ac9e2602d6d..0a5bb4114eb4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self) | |||
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int aq_nic_update_link_status(struct aq_nic_s *self) | ||
123 | { | ||
124 | int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | ||
125 | |||
126 | if (err) | ||
127 | return err; | ||
128 | |||
129 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) | ||
130 | pr_info("%s: link change old %d new %d\n", | ||
131 | AQ_CFG_DRV_NAME, self->link_status.mbps, | ||
132 | self->aq_hw->aq_link_status.mbps); | ||
133 | |||
134 | self->link_status = self->aq_hw->aq_link_status; | ||
135 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { | ||
136 | aq_utils_obj_set(&self->header.flags, | ||
137 | AQ_NIC_FLAG_STARTED); | ||
138 | aq_utils_obj_clear(&self->header.flags, | ||
139 | AQ_NIC_LINK_DOWN); | ||
140 | netif_carrier_on(self->ndev); | ||
141 | netif_tx_wake_all_queues(self->ndev); | ||
142 | } | ||
143 | if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { | ||
144 | netif_carrier_off(self->ndev); | ||
145 | netif_tx_disable(self->ndev); | ||
146 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
147 | } | ||
148 | return 0; | ||
149 | } | ||
150 | |||
122 | static void aq_nic_service_timer_cb(unsigned long param) | 151 | static void aq_nic_service_timer_cb(unsigned long param) |
123 | { | 152 | { |
124 | struct aq_nic_s *self = (struct aq_nic_s *)param; | 153 | struct aq_nic_s *self = (struct aq_nic_s *)param; |
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
131 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 160 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
132 | goto err_exit; | 161 | goto err_exit; |
133 | 162 | ||
134 | err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | 163 | err = aq_nic_update_link_status(self); |
135 | if (err < 0) | 164 | if (err) |
136 | goto err_exit; | 165 | goto err_exit; |
137 | 166 | ||
138 | self->link_status = self->aq_hw->aq_link_status; | ||
139 | |||
140 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 167 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, |
141 | self->aq_nic_cfg.is_interrupt_moderation); | 168 | self->aq_nic_cfg.is_interrupt_moderation); |
142 | 169 | ||
143 | if (self->link_status.mbps) { | ||
144 | aq_utils_obj_set(&self->header.flags, | ||
145 | AQ_NIC_FLAG_STARTED); | ||
146 | aq_utils_obj_clear(&self->header.flags, | ||
147 | AQ_NIC_LINK_DOWN); | ||
148 | netif_carrier_on(self->ndev); | ||
149 | } else { | ||
150 | netif_carrier_off(self->ndev); | ||
151 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
152 | } | ||
153 | |||
154 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 170 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
155 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 171 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
156 | for (i = AQ_DIMOF(self->aq_vec); i--;) { | 172 | for (i = AQ_DIMOF(self->aq_vec); i--;) { |
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
214 | SET_NETDEV_DEV(ndev, dev); | 230 | SET_NETDEV_DEV(ndev, dev); |
215 | 231 | ||
216 | ndev->if_port = port; | 232 | ndev->if_port = port; |
217 | ndev->min_mtu = ETH_MIN_MTU; | ||
218 | self->ndev = ndev; | 233 | self->ndev = ndev; |
219 | 234 | ||
220 | self->aq_pci_func = aq_pci_func; | 235 | self->aq_pci_func = aq_pci_func; |
@@ -241,7 +256,6 @@ err_exit: | |||
241 | int aq_nic_ndev_register(struct aq_nic_s *self) | 256 | int aq_nic_ndev_register(struct aq_nic_s *self) |
242 | { | 257 | { |
243 | int err = 0; | 258 | int err = 0; |
244 | unsigned int i = 0U; | ||
245 | 259 | ||
246 | if (!self->ndev) { | 260 | if (!self->ndev) { |
247 | err = -EINVAL; | 261 | err = -EINVAL; |
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) | |||
263 | 277 | ||
264 | netif_carrier_off(self->ndev); | 278 | netif_carrier_off(self->ndev); |
265 | 279 | ||
266 | for (i = AQ_CFG_VECS_MAX; i--;) | 280 | netif_tx_disable(self->ndev); |
267 | aq_nic_ndev_queue_stop(self, i); | ||
268 | 281 | ||
269 | err = register_netdev(self->ndev); | 282 | err = register_netdev(self->ndev); |
270 | if (err < 0) | 283 | if (err < 0) |
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self) | |||
283 | self->ndev->features = aq_hw_caps->hw_features; | 296 | self->ndev->features = aq_hw_caps->hw_features; |
284 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; | 297 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
285 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; | 298 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; |
299 | self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; | ||
286 | 300 | ||
287 | return 0; | 301 | return 0; |
288 | } | 302 | } |
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) | |||
318 | err = -EINVAL; | 332 | err = -EINVAL; |
319 | goto err_exit; | 333 | goto err_exit; |
320 | } | 334 | } |
321 | if (netif_running(ndev)) { | 335 | if (netif_running(ndev)) |
322 | unsigned int i; | 336 | netif_tx_disable(ndev); |
323 | |||
324 | for (i = AQ_CFG_VECS_MAX; i--;) | ||
325 | netif_stop_subqueue(ndev, i); | ||
326 | } | ||
327 | 337 | ||
328 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; | 338 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; |
329 | self->aq_vecs++) { | 339 | self->aq_vecs++) { |
@@ -383,16 +393,6 @@ err_exit: | |||
383 | return err; | 393 | return err; |
384 | } | 394 | } |
385 | 395 | ||
386 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) | ||
387 | { | ||
388 | netif_start_subqueue(self->ndev, idx); | ||
389 | } | ||
390 | |||
391 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) | ||
392 | { | ||
393 | netif_stop_subqueue(self->ndev, idx); | ||
394 | } | ||
395 | |||
396 | int aq_nic_start(struct aq_nic_s *self) | 396 | int aq_nic_start(struct aq_nic_s *self) |
397 | { | 397 | { |
398 | struct aq_vec_s *aq_vec = NULL; | 398 | struct aq_vec_s *aq_vec = NULL; |
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self) | |||
451 | goto err_exit; | 451 | goto err_exit; |
452 | } | 452 | } |
453 | 453 | ||
454 | for (i = 0U, aq_vec = self->aq_vec[0]; | ||
455 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
456 | aq_nic_ndev_queue_start(self, i); | ||
457 | |||
458 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); | 454 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); |
459 | if (err < 0) | 455 | if (err < 0) |
460 | goto err_exit; | 456 | goto err_exit; |
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self) | |||
463 | if (err < 0) | 459 | if (err < 0) |
464 | goto err_exit; | 460 | goto err_exit; |
465 | 461 | ||
462 | netif_tx_start_all_queues(self->ndev); | ||
463 | |||
466 | err_exit: | 464 | err_exit: |
467 | return err; | 465 | return err; |
468 | } | 466 | } |
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
475 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 473 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
476 | unsigned int frag_count = 0U; | 474 | unsigned int frag_count = 0U; |
477 | unsigned int dx = ring->sw_tail; | 475 | unsigned int dx = ring->sw_tail; |
476 | struct aq_ring_buff_s *first = NULL; | ||
478 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; | 477 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; |
479 | 478 | ||
480 | if (unlikely(skb_is_gso(skb))) { | 479 | if (unlikely(skb_is_gso(skb))) { |
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
485 | dx_buff->len_l4 = tcp_hdrlen(skb); | 484 | dx_buff->len_l4 = tcp_hdrlen(skb); |
486 | dx_buff->mss = skb_shinfo(skb)->gso_size; | 485 | dx_buff->mss = skb_shinfo(skb)->gso_size; |
487 | dx_buff->is_txc = 1U; | 486 | dx_buff->is_txc = 1U; |
487 | dx_buff->eop_index = 0xffffU; | ||
488 | 488 | ||
489 | dx_buff->is_ipv6 = | 489 | dx_buff->is_ipv6 = |
490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; | 490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; |
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) | 504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) |
505 | goto exit; | 505 | goto exit; |
506 | 506 | ||
507 | first = dx_buff; | ||
507 | dx_buff->len_pkt = skb->len; | 508 | dx_buff->len_pkt = skb->len; |
508 | dx_buff->is_sop = 1U; | 509 | dx_buff->is_sop = 1U; |
509 | dx_buff->is_mapped = 1U; | 510 | dx_buff->is_mapped = 1U; |
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
532 | 533 | ||
533 | for (; nr_frags--; ++frag_count) { | 534 | for (; nr_frags--; ++frag_count) { |
534 | unsigned int frag_len = 0U; | 535 | unsigned int frag_len = 0U; |
536 | unsigned int buff_offset = 0U; | ||
537 | unsigned int buff_size = 0U; | ||
535 | dma_addr_t frag_pa; | 538 | dma_addr_t frag_pa; |
536 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; | 539 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; |
537 | 540 | ||
538 | frag_len = skb_frag_size(frag); | 541 | frag_len = skb_frag_size(frag); |
539 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, | ||
540 | frag_len, DMA_TO_DEVICE); | ||
541 | 542 | ||
542 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) | 543 | while (frag_len) { |
543 | goto mapping_error; | 544 | if (frag_len > AQ_CFG_TX_FRAME_MAX) |
545 | buff_size = AQ_CFG_TX_FRAME_MAX; | ||
546 | else | ||
547 | buff_size = frag_len; | ||
548 | |||
549 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), | ||
550 | frag, | ||
551 | buff_offset, | ||
552 | buff_size, | ||
553 | DMA_TO_DEVICE); | ||
554 | |||
555 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), | ||
556 | frag_pa))) | ||
557 | goto mapping_error; | ||
544 | 558 | ||
545 | while (frag_len > AQ_CFG_TX_FRAME_MAX) { | ||
546 | dx = aq_ring_next_dx(ring, dx); | 559 | dx = aq_ring_next_dx(ring, dx); |
547 | dx_buff = &ring->buff_ring[dx]; | 560 | dx_buff = &ring->buff_ring[dx]; |
548 | 561 | ||
549 | dx_buff->flags = 0U; | 562 | dx_buff->flags = 0U; |
550 | dx_buff->len = AQ_CFG_TX_FRAME_MAX; | 563 | dx_buff->len = buff_size; |
551 | dx_buff->pa = frag_pa; | 564 | dx_buff->pa = frag_pa; |
552 | dx_buff->is_mapped = 1U; | 565 | dx_buff->is_mapped = 1U; |
566 | dx_buff->eop_index = 0xffffU; | ||
567 | |||
568 | frag_len -= buff_size; | ||
569 | buff_offset += buff_size; | ||
553 | 570 | ||
554 | frag_len -= AQ_CFG_TX_FRAME_MAX; | ||
555 | frag_pa += AQ_CFG_TX_FRAME_MAX; | ||
556 | ++ret; | 571 | ++ret; |
557 | } | 572 | } |
558 | |||
559 | dx = aq_ring_next_dx(ring, dx); | ||
560 | dx_buff = &ring->buff_ring[dx]; | ||
561 | |||
562 | dx_buff->flags = 0U; | ||
563 | dx_buff->len = frag_len; | ||
564 | dx_buff->pa = frag_pa; | ||
565 | dx_buff->is_mapped = 1U; | ||
566 | ++ret; | ||
567 | } | 573 | } |
568 | 574 | ||
575 | first->eop_index = dx; | ||
569 | dx_buff->is_eop = 1U; | 576 | dx_buff->is_eop = 1U; |
570 | dx_buff->skb = skb; | 577 | dx_buff->skb = skb; |
571 | goto exit; | 578 | goto exit; |
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
602 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; | 609 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; |
603 | unsigned int tc = 0U; | 610 | unsigned int tc = 0U; |
604 | int err = NETDEV_TX_OK; | 611 | int err = NETDEV_TX_OK; |
605 | bool is_nic_in_bad_state; | ||
606 | 612 | ||
607 | frags = skb_shinfo(skb)->nr_frags + 1; | 613 | frags = skb_shinfo(skb)->nr_frags + 1; |
608 | 614 | ||
@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
613 | goto err_exit; | 619 | goto err_exit; |
614 | } | 620 | } |
615 | 621 | ||
616 | is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, | 622 | aq_ring_update_queue_state(ring); |
617 | AQ_NIC_FLAGS_IS_NOT_TX_READY) || | ||
618 | (aq_ring_avail_dx(ring) < | ||
619 | AQ_CFG_SKB_FRAGS_MAX); | ||
620 | 623 | ||
621 | if (is_nic_in_bad_state) { | 624 | /* Above status update may stop the queue. Check this. */ |
622 | aq_nic_ndev_queue_stop(self, ring->idx); | 625 | if (__netif_subqueue_stopped(self->ndev, ring->idx)) { |
623 | err = NETDEV_TX_BUSY; | 626 | err = NETDEV_TX_BUSY; |
624 | goto err_exit; | 627 | goto err_exit; |
625 | } | 628 | } |
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
631 | ring, | 634 | ring, |
632 | frags); | 635 | frags); |
633 | if (err >= 0) { | 636 | if (err >= 0) { |
634 | if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) | ||
635 | aq_nic_ndev_queue_stop(self, ring->idx); | ||
636 | |||
637 | ++ring->stats.tx.packets; | 637 | ++ring->stats.tx.packets; |
638 | ring->stats.tx.bytes += skb->len; | 638 | ring->stats.tx.bytes += skb->len; |
639 | } | 639 | } |
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | |||
693 | 693 | ||
694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) | 694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
695 | { | 695 | { |
696 | int err = 0; | ||
697 | |||
698 | if (new_mtu > self->aq_hw_caps.mtu) { | ||
699 | err = -EINVAL; | ||
700 | goto err_exit; | ||
701 | } | ||
702 | self->aq_nic_cfg.mtu = new_mtu; | 696 | self->aq_nic_cfg.mtu = new_mtu; |
703 | 697 | ||
704 | err_exit: | 698 | return 0; |
705 | return err; | ||
706 | } | 699 | } |
707 | 700 | ||
708 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) | 701 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) |
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self) | |||
905 | struct aq_vec_s *aq_vec = NULL; | 898 | struct aq_vec_s *aq_vec = NULL; |
906 | unsigned int i = 0U; | 899 | unsigned int i = 0U; |
907 | 900 | ||
908 | for (i = 0U, aq_vec = self->aq_vec[0]; | 901 | netif_tx_disable(self->ndev); |
909 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
910 | aq_nic_ndev_queue_stop(self, i); | ||
911 | 902 | ||
912 | del_timer_sync(&self->service_timer); | 903 | del_timer_sync(&self->service_timer); |
913 | 904 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 7fc2a5ecb2b7..0ddd556ff901 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); | |||
83 | int aq_nic_init(struct aq_nic_s *self); | 83 | int aq_nic_init(struct aq_nic_s *self); |
84 | int aq_nic_cfg_start(struct aq_nic_s *self); | 84 | int aq_nic_cfg_start(struct aq_nic_s *self); |
85 | int aq_nic_ndev_register(struct aq_nic_s *self); | 85 | int aq_nic_ndev_register(struct aq_nic_s *self); |
86 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); | ||
87 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); | ||
88 | void aq_nic_ndev_free(struct aq_nic_s *self); | 86 | void aq_nic_ndev_free(struct aq_nic_s *self); |
89 | int aq_nic_start(struct aq_nic_s *self); | 87 | int aq_nic_start(struct aq_nic_s *self); |
90 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); | 88 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4eee1996a825..0654e0c76bc2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self) | |||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, | ||
108 | unsigned int t) | ||
109 | { | ||
110 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
111 | } | ||
112 | |||
113 | void aq_ring_update_queue_state(struct aq_ring_s *ring) | ||
114 | { | ||
115 | if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) | ||
116 | aq_ring_queue_stop(ring); | ||
117 | else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) | ||
118 | aq_ring_queue_wake(ring); | ||
119 | } | ||
120 | |||
121 | void aq_ring_queue_wake(struct aq_ring_s *ring) | ||
122 | { | ||
123 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
124 | |||
125 | if (__netif_subqueue_stopped(ndev, ring->idx)) { | ||
126 | netif_wake_subqueue(ndev, ring->idx); | ||
127 | ring->stats.tx.queue_restarts++; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | void aq_ring_queue_stop(struct aq_ring_s *ring) | ||
132 | { | ||
133 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
134 | |||
135 | if (!__netif_subqueue_stopped(ndev, ring->idx)) | ||
136 | netif_stop_subqueue(ndev, ring->idx); | ||
137 | } | ||
138 | |||
107 | void aq_ring_tx_clean(struct aq_ring_s *self) | 139 | void aq_ring_tx_clean(struct aq_ring_s *self) |
108 | { | 140 | { |
109 | struct device *dev = aq_nic_get_dev(self->aq_nic); | 141 | struct device *dev = aq_nic_get_dev(self->aq_nic); |
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self) | |||
113 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; | 145 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
114 | 146 | ||
115 | if (likely(buff->is_mapped)) { | 147 | if (likely(buff->is_mapped)) { |
116 | if (unlikely(buff->is_sop)) | 148 | if (unlikely(buff->is_sop)) { |
149 | if (!buff->is_eop && | ||
150 | buff->eop_index != 0xffffU && | ||
151 | (!aq_ring_dx_in_range(self->sw_head, | ||
152 | buff->eop_index, | ||
153 | self->hw_head))) | ||
154 | break; | ||
155 | |||
117 | dma_unmap_single(dev, buff->pa, buff->len, | 156 | dma_unmap_single(dev, buff->pa, buff->len, |
118 | DMA_TO_DEVICE); | 157 | DMA_TO_DEVICE); |
119 | else | 158 | } else { |
120 | dma_unmap_page(dev, buff->pa, buff->len, | 159 | dma_unmap_page(dev, buff->pa, buff->len, |
121 | DMA_TO_DEVICE); | 160 | DMA_TO_DEVICE); |
161 | } | ||
122 | } | 162 | } |
123 | 163 | ||
124 | if (unlikely(buff->is_eop)) | 164 | if (unlikely(buff->is_eop)) |
125 | dev_kfree_skb_any(buff->skb); | 165 | dev_kfree_skb_any(buff->skb); |
126 | } | ||
127 | } | ||
128 | 166 | ||
129 | static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, | 167 | buff->pa = 0U; |
130 | unsigned int t) | 168 | buff->eop_index = 0xffffU; |
131 | { | 169 | } |
132 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
133 | } | 170 | } |
134 | 171 | ||
135 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 172 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 782176c5f4f8..5844078764bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h | |||
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s { | |||
65 | }; | 65 | }; |
66 | union { | 66 | union { |
67 | struct { | 67 | struct { |
68 | u32 len:16; | 68 | u16 len; |
69 | u32 is_ip_cso:1; | 69 | u32 is_ip_cso:1; |
70 | u32 is_udp_cso:1; | 70 | u32 is_udp_cso:1; |
71 | u32 is_tcp_cso:1; | 71 | u32 is_tcp_cso:1; |
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s { | |||
77 | u32 is_cleaned:1; | 77 | u32 is_cleaned:1; |
78 | u32 is_error:1; | 78 | u32 is_error:1; |
79 | u32 rsvd3:6; | 79 | u32 rsvd3:6; |
80 | u16 eop_index; | ||
81 | u16 rsvd4; | ||
80 | }; | 82 | }; |
81 | u32 flags; | 83 | u64 flags; |
82 | }; | 84 | }; |
83 | }; | 85 | }; |
84 | 86 | ||
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s { | |||
94 | u64 errors; | 96 | u64 errors; |
95 | u64 packets; | 97 | u64 packets; |
96 | u64 bytes; | 98 | u64 bytes; |
99 | u64 queue_restarts; | ||
97 | }; | 100 | }; |
98 | 101 | ||
99 | union aq_ring_stats_s { | 102 | union aq_ring_stats_s { |
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, | |||
147 | int aq_ring_init(struct aq_ring_s *self); | 150 | int aq_ring_init(struct aq_ring_s *self); |
148 | void aq_ring_rx_deinit(struct aq_ring_s *self); | 151 | void aq_ring_rx_deinit(struct aq_ring_s *self); |
149 | void aq_ring_free(struct aq_ring_s *self); | 152 | void aq_ring_free(struct aq_ring_s *self); |
153 | void aq_ring_update_queue_state(struct aq_ring_s *ring); | ||
154 | void aq_ring_queue_wake(struct aq_ring_s *ring); | ||
155 | void aq_ring_queue_stop(struct aq_ring_s *ring); | ||
150 | void aq_ring_tx_clean(struct aq_ring_s *self); | 156 | void aq_ring_tx_clean(struct aq_ring_s *self); |
151 | int aq_ring_rx_clean(struct aq_ring_s *self, | 157 | int aq_ring_rx_clean(struct aq_ring_s *self, |
152 | struct napi_struct *napi, | 158 | struct napi_struct *napi, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ebf588004c46..305ff8ffac2c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) | |||
59 | if (ring[AQ_VEC_TX_ID].sw_head != | 59 | if (ring[AQ_VEC_TX_ID].sw_head != |
60 | ring[AQ_VEC_TX_ID].hw_head) { | 60 | ring[AQ_VEC_TX_ID].hw_head) { |
61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); | 61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); |
62 | 62 | aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); | |
63 | if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > | ||
64 | AQ_CFG_SKB_FRAGS_MAX) { | ||
65 | aq_nic_ndev_queue_start(self->aq_nic, | ||
66 | ring[AQ_VEC_TX_ID].idx); | ||
67 | } | ||
68 | was_tx_cleaned = true; | 63 | was_tx_cleaned = true; |
69 | } | 64 | } |
70 | 65 | ||
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self, | |||
364 | stats_tx->packets += tx->packets; | 359 | stats_tx->packets += tx->packets; |
365 | stats_tx->bytes += tx->bytes; | 360 | stats_tx->bytes += tx->bytes; |
366 | stats_tx->errors += tx->errors; | 361 | stats_tx->errors += tx->errors; |
362 | stats_tx->queue_restarts += tx->queue_restarts; | ||
367 | } | 363 | } |
368 | } | 364 | } |
369 | 365 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index f3957e930340..fcf89e25a773 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include "../aq_common.h" | 17 | #include "../aq_common.h" |
18 | 18 | ||
19 | #define HW_ATL_B0_MTU_JUMBO (16000U) | 19 | #define HW_ATL_B0_MTU_JUMBO 16352U |
20 | #define HW_ATL_B0_MTU 1514U | 20 | #define HW_ATL_B0_MTU 1514U |
21 | 21 | ||
22 | #define HW_ATL_B0_TX_RINGS 4U | 22 | #define HW_ATL_B0_TX_RINGS 4U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 4f5ec9a0fbfb..bf734b32e44b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) | |||
351 | break; | 351 | break; |
352 | 352 | ||
353 | default: | 353 | default: |
354 | link_status->mbps = 0U; | 354 | return -EBUSY; |
355 | break; | ||
356 | } | 355 | } |
357 | } | 356 | } |
358 | 357 | ||