diff options
author | David S. Miller <davem@davemloft.net> | 2017-02-20 17:11:11 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-02-20 17:11:11 -0500 |
commit | 8676ea8f3f9b7dc93544477821f107004c0366cf (patch) | |
tree | 969cd6df84a1250f2a4f55c9ae6cac67aa04cd20 | |
parent | 45ee2440a33f1c8768cd37a365154343eb5589e3 (diff) | |
parent | e399553d233678687ce4b149c822194d17e07675 (diff) |
Merge branch 'aquantia-next'
Pavel Belous says:
====================
net: ethernet: aquantia: improvements and fixes
The following patchset contains improvements and fixes for aQuantia
AQtion ethernet driver from net-next tree.
Most fixes are based on the comments from Lino Sanfilippo.
Sanity testing was performed on real HW. No regression found.
v1->v2: 1)Removed buffers copying.
2)Fixed dma error handling.
v2->v3: 1)Fixes for aq_ndev_change_mtu:
-Use core MTU checking for min_mtu.
-Removed extra new_mtu assigment.
2)Reverse XMAS tree in aq_ring_rx_fill.
v3->v4: 1)Use ndev->reg_state instead "is_ndev_registered" flag.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
8 files changed, 121 insertions, 162 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index c17c70adef0d..dad63623be6a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c | |||
@@ -87,33 +87,17 @@ err_exit: | |||
87 | static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) | 87 | static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
88 | { | 88 | { |
89 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 89 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
90 | int err = 0; | ||
91 | |||
92 | err = aq_nic_xmit(aq_nic, skb); | ||
93 | if (err < 0) | ||
94 | goto err_exit; | ||
95 | 90 | ||
96 | err_exit: | 91 | return aq_nic_xmit(aq_nic, skb); |
97 | return err; | ||
98 | } | 92 | } |
99 | 93 | ||
100 | static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) | 94 | static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) |
101 | { | 95 | { |
102 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 96 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
103 | int err = 0; | 97 | int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); |
104 | 98 | ||
105 | if (new_mtu == ndev->mtu) { | ||
106 | err = 0; | ||
107 | goto err_exit; | ||
108 | } | ||
109 | if (new_mtu < 68) { | ||
110 | err = -EINVAL; | ||
111 | goto err_exit; | ||
112 | } | ||
113 | err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); | ||
114 | if (err < 0) | 99 | if (err < 0) |
115 | goto err_exit; | 100 | goto err_exit; |
116 | ndev->mtu = new_mtu; | ||
117 | 101 | ||
118 | if (netif_running(ndev)) { | 102 | if (netif_running(ndev)) { |
119 | aq_ndev_close(ndev); | 103 | aq_ndev_close(ndev); |
@@ -252,22 +236,4 @@ static struct pci_driver aq_pci_ops = { | |||
252 | .resume = aq_pci_resume, | 236 | .resume = aq_pci_resume, |
253 | }; | 237 | }; |
254 | 238 | ||
255 | static int __init aq_module_init(void) | 239 | module_pci_driver(aq_pci_ops); |
256 | { | ||
257 | int err = 0; | ||
258 | |||
259 | err = pci_register_driver(&aq_pci_ops); | ||
260 | if (err < 0) | ||
261 | goto err_exit; | ||
262 | |||
263 | err_exit: | ||
264 | return err; | ||
265 | } | ||
266 | |||
267 | static void __exit aq_module_exit(void) | ||
268 | { | ||
269 | pci_unregister_driver(&aq_pci_ops); | ||
270 | } | ||
271 | |||
272 | module_init(aq_module_init); | ||
273 | module_exit(aq_module_exit); | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index aa22a7ce710b..ee78444bfb88 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -122,14 +122,11 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
122 | struct aq_nic_s *self = (struct aq_nic_s *)param; | 122 | struct aq_nic_s *self = (struct aq_nic_s *)param; |
123 | struct net_device *ndev = aq_nic_get_ndev(self); | 123 | struct net_device *ndev = aq_nic_get_ndev(self); |
124 | int err = 0; | 124 | int err = 0; |
125 | bool is_busy = false; | ||
126 | unsigned int i = 0U; | 125 | unsigned int i = 0U; |
127 | struct aq_hw_link_status_s link_status; | 126 | struct aq_hw_link_status_s link_status; |
128 | struct aq_ring_stats_rx_s stats_rx; | 127 | struct aq_ring_stats_rx_s stats_rx; |
129 | struct aq_ring_stats_tx_s stats_tx; | 128 | struct aq_ring_stats_tx_s stats_tx; |
130 | 129 | ||
131 | atomic_inc(&self->header.busy_count); | ||
132 | is_busy = true; | ||
133 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 130 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
134 | goto err_exit; | 131 | goto err_exit; |
135 | 132 | ||
@@ -170,8 +167,6 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
170 | ndev->stats.tx_errors = stats_tx.errors; | 167 | ndev->stats.tx_errors = stats_tx.errors; |
171 | 168 | ||
172 | err_exit: | 169 | err_exit: |
173 | if (is_busy) | ||
174 | atomic_dec(&self->header.busy_count); | ||
175 | mod_timer(&self->service_timer, | 170 | mod_timer(&self->service_timer, |
176 | jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); | 171 | jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); |
177 | } | 172 | } |
@@ -207,18 +202,20 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
207 | int err = 0; | 202 | int err = 0; |
208 | 203 | ||
209 | ndev = aq_nic_ndev_alloc(); | 204 | ndev = aq_nic_ndev_alloc(); |
210 | self = netdev_priv(ndev); | 205 | if (!ndev) { |
211 | if (!self) { | 206 | err = -ENOMEM; |
212 | err = -EINVAL; | ||
213 | goto err_exit; | 207 | goto err_exit; |
214 | } | 208 | } |
215 | 209 | ||
210 | self = netdev_priv(ndev); | ||
211 | |||
216 | ndev->netdev_ops = ndev_ops; | 212 | ndev->netdev_ops = ndev_ops; |
217 | ndev->ethtool_ops = et_ops; | 213 | ndev->ethtool_ops = et_ops; |
218 | 214 | ||
219 | SET_NETDEV_DEV(ndev, dev); | 215 | SET_NETDEV_DEV(ndev, dev); |
220 | 216 | ||
221 | ndev->if_port = port; | 217 | ndev->if_port = port; |
218 | ndev->min_mtu = ETH_MIN_MTU; | ||
222 | self->ndev = ndev; | 219 | self->ndev = ndev; |
223 | 220 | ||
224 | self->aq_pci_func = aq_pci_func; | 221 | self->aq_pci_func = aq_pci_func; |
@@ -264,16 +261,16 @@ int aq_nic_ndev_register(struct aq_nic_s *self) | |||
264 | ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); | 261 | ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); |
265 | } | 262 | } |
266 | #endif | 263 | #endif |
267 | err = register_netdev(self->ndev); | ||
268 | if (err < 0) | ||
269 | goto err_exit; | ||
270 | 264 | ||
271 | self->is_ndev_registered = true; | ||
272 | netif_carrier_off(self->ndev); | 265 | netif_carrier_off(self->ndev); |
273 | 266 | ||
274 | for (i = AQ_CFG_VECS_MAX; i--;) | 267 | for (i = AQ_CFG_VECS_MAX; i--;) |
275 | aq_nic_ndev_queue_stop(self, i); | 268 | aq_nic_ndev_queue_stop(self, i); |
276 | 269 | ||
270 | err = register_netdev(self->ndev); | ||
271 | if (err < 0) | ||
272 | goto err_exit; | ||
273 | |||
277 | err_exit: | 274 | err_exit: |
278 | return err; | 275 | return err; |
279 | } | 276 | } |
@@ -296,7 +293,7 @@ void aq_nic_ndev_free(struct aq_nic_s *self) | |||
296 | if (!self->ndev) | 293 | if (!self->ndev) |
297 | goto err_exit; | 294 | goto err_exit; |
298 | 295 | ||
299 | if (self->is_ndev_registered) | 296 | if (self->ndev->reg_state == NETREG_REGISTERED) |
300 | unregister_netdev(self->ndev); | 297 | unregister_netdev(self->ndev); |
301 | 298 | ||
302 | if (self->aq_hw) | 299 | if (self->aq_hw) |
@@ -471,95 +468,116 @@ err_exit: | |||
471 | return err; | 468 | return err; |
472 | } | 469 | } |
473 | 470 | ||
474 | static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, | 471 | static unsigned int aq_nic_map_skb(struct aq_nic_s *self, |
475 | struct sk_buff *skb, | 472 | struct sk_buff *skb, |
476 | struct aq_ring_buff_s *dx) | 473 | struct aq_ring_s *ring) |
477 | { | 474 | { |
478 | unsigned int ret = 0U; | 475 | unsigned int ret = 0U; |
479 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 476 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
480 | unsigned int frag_count = 0U; | 477 | unsigned int frag_count = 0U; |
478 | unsigned int dx = ring->sw_tail; | ||
479 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; | ||
481 | 480 | ||
482 | dx->flags = 0U; | 481 | if (unlikely(skb_is_gso(skb))) { |
483 | dx->len = skb_headlen(skb); | 482 | dx_buff->flags = 0U; |
484 | dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, | 483 | dx_buff->len_pkt = skb->len; |
485 | DMA_TO_DEVICE); | 484 | dx_buff->len_l2 = ETH_HLEN; |
486 | dx->len_pkt = skb->len; | 485 | dx_buff->len_l3 = ip_hdrlen(skb); |
487 | dx->is_sop = 1U; | 486 | dx_buff->len_l4 = tcp_hdrlen(skb); |
488 | dx->is_mapped = 1U; | 487 | dx_buff->mss = skb_shinfo(skb)->gso_size; |
488 | dx_buff->is_txc = 1U; | ||
489 | |||
490 | dx = aq_ring_next_dx(ring, dx); | ||
491 | dx_buff = &ring->buff_ring[dx]; | ||
492 | ++ret; | ||
493 | } | ||
489 | 494 | ||
495 | dx_buff->flags = 0U; | ||
496 | dx_buff->len = skb_headlen(skb); | ||
497 | dx_buff->pa = dma_map_single(aq_nic_get_dev(self), | ||
498 | skb->data, | ||
499 | dx_buff->len, | ||
500 | DMA_TO_DEVICE); | ||
501 | |||
502 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) | ||
503 | goto exit; | ||
504 | |||
505 | dx_buff->len_pkt = skb->len; | ||
506 | dx_buff->is_sop = 1U; | ||
507 | dx_buff->is_mapped = 1U; | ||
490 | ++ret; | 508 | ++ret; |
491 | 509 | ||
492 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 510 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
493 | dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; | 511 | dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? |
494 | dx->is_tcp_cso = | 512 | 1U : 0U; |
513 | dx_buff->is_tcp_cso = | ||
495 | (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; | 514 | (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; |
496 | dx->is_udp_cso = | 515 | dx_buff->is_udp_cso = |
497 | (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; | 516 | (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; |
498 | } | 517 | } |
499 | 518 | ||
500 | for (; nr_frags--; ++frag_count) { | 519 | for (; nr_frags--; ++frag_count) { |
501 | unsigned int frag_len; | 520 | unsigned int frag_len = 0U; |
502 | dma_addr_t frag_pa; | 521 | dma_addr_t frag_pa; |
503 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; | 522 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; |
504 | 523 | ||
505 | frag_len = skb_frag_size(frag); | 524 | frag_len = skb_frag_size(frag); |
506 | |||
507 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, | 525 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, |
508 | frag_len, DMA_TO_DEVICE); | 526 | frag_len, DMA_TO_DEVICE); |
509 | 527 | ||
528 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) | ||
529 | goto mapping_error; | ||
530 | |||
510 | while (frag_len > AQ_CFG_TX_FRAME_MAX) { | 531 | while (frag_len > AQ_CFG_TX_FRAME_MAX) { |
511 | ++dx; | 532 | dx = aq_ring_next_dx(ring, dx); |
512 | ++ret; | 533 | dx_buff = &ring->buff_ring[dx]; |
513 | dx->flags = 0U; | 534 | |
514 | dx->len = AQ_CFG_TX_FRAME_MAX; | 535 | dx_buff->flags = 0U; |
515 | dx->pa = frag_pa; | 536 | dx_buff->len = AQ_CFG_TX_FRAME_MAX; |
516 | dx->is_mapped = 1U; | 537 | dx_buff->pa = frag_pa; |
538 | dx_buff->is_mapped = 1U; | ||
517 | 539 | ||
518 | frag_len -= AQ_CFG_TX_FRAME_MAX; | 540 | frag_len -= AQ_CFG_TX_FRAME_MAX; |
519 | frag_pa += AQ_CFG_TX_FRAME_MAX; | 541 | frag_pa += AQ_CFG_TX_FRAME_MAX; |
542 | ++ret; | ||
520 | } | 543 | } |
521 | 544 | ||
522 | ++dx; | 545 | dx = aq_ring_next_dx(ring, dx); |
523 | ++ret; | 546 | dx_buff = &ring->buff_ring[dx]; |
524 | 547 | ||
525 | dx->flags = 0U; | 548 | dx_buff->flags = 0U; |
526 | dx->len = frag_len; | 549 | dx_buff->len = frag_len; |
527 | dx->pa = frag_pa; | 550 | dx_buff->pa = frag_pa; |
528 | dx->is_mapped = 1U; | 551 | dx_buff->is_mapped = 1U; |
552 | ++ret; | ||
529 | } | 553 | } |
530 | 554 | ||
531 | dx->is_eop = 1U; | 555 | dx_buff->is_eop = 1U; |
532 | dx->skb = skb; | 556 | dx_buff->skb = skb; |
533 | 557 | goto exit; | |
534 | return ret; | 558 | |
535 | } | 559 | mapping_error: |
536 | 560 | for (dx = ring->sw_tail; | |
537 | static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, | 561 | ret > 0; |
538 | struct sk_buff *skb, | 562 | --ret, dx = aq_ring_next_dx(ring, dx)) { |
539 | struct aq_ring_buff_s *dx) | 563 | dx_buff = &ring->buff_ring[dx]; |
540 | { | 564 | |
541 | dx->flags = 0U; | 565 | if (!dx_buff->is_txc && dx_buff->pa) { |
542 | dx->len_pkt = skb->len; | 566 | if (unlikely(dx_buff->is_sop)) { |
543 | dx->len_l2 = ETH_HLEN; | 567 | dma_unmap_single(aq_nic_get_dev(self), |
544 | dx->len_l3 = ip_hdrlen(skb); | 568 | dx_buff->pa, |
545 | dx->len_l4 = tcp_hdrlen(skb); | 569 | dx_buff->len, |
546 | dx->mss = skb_shinfo(skb)->gso_size; | 570 | DMA_TO_DEVICE); |
547 | dx->is_txc = 1U; | 571 | } else { |
548 | return 1U; | 572 | dma_unmap_page(aq_nic_get_dev(self), |
549 | } | 573 | dx_buff->pa, |
550 | 574 | dx_buff->len, | |
551 | static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, | 575 | DMA_TO_DEVICE); |
552 | struct aq_ring_buff_s *dx) | 576 | } |
553 | { | 577 | } |
554 | unsigned int ret = 0U; | ||
555 | |||
556 | if (unlikely(skb_is_gso(skb))) { | ||
557 | ret = aq_nic_map_skb_lso(self, skb, dx); | ||
558 | ++dx; | ||
559 | } | 578 | } |
560 | 579 | ||
561 | ret += aq_nic_map_skb_frag(self, skb, dx); | 580 | exit: |
562 | |||
563 | return ret; | 581 | return ret; |
564 | } | 582 | } |
565 | 583 | ||
@@ -572,18 +590,13 @@ __acquires(&ring->lock) | |||
572 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; | 590 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; |
573 | unsigned int tc = 0U; | 591 | unsigned int tc = 0U; |
574 | unsigned int trys = AQ_CFG_LOCK_TRYS; | 592 | unsigned int trys = AQ_CFG_LOCK_TRYS; |
575 | int err = 0; | 593 | int err = NETDEV_TX_OK; |
576 | bool is_nic_in_bad_state; | 594 | bool is_nic_in_bad_state; |
577 | bool is_busy = false; | ||
578 | struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; | ||
579 | 595 | ||
580 | frags = skb_shinfo(skb)->nr_frags + 1; | 596 | frags = skb_shinfo(skb)->nr_frags + 1; |
581 | 597 | ||
582 | ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; | 598 | ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; |
583 | 599 | ||
584 | atomic_inc(&self->header.busy_count); | ||
585 | is_busy = true; | ||
586 | |||
587 | if (frags > AQ_CFG_SKB_FRAGS_MAX) { | 600 | if (frags > AQ_CFG_SKB_FRAGS_MAX) { |
588 | dev_kfree_skb_any(skb); | 601 | dev_kfree_skb_any(skb); |
589 | goto err_exit; | 602 | goto err_exit; |
@@ -602,23 +615,27 @@ __acquires(&ring->lock) | |||
602 | 615 | ||
603 | do { | 616 | do { |
604 | if (spin_trylock(&ring->header.lock)) { | 617 | if (spin_trylock(&ring->header.lock)) { |
605 | frags = aq_nic_map_skb(self, skb, &buffers[0]); | 618 | frags = aq_nic_map_skb(self, skb, ring); |
606 | 619 | ||
607 | aq_ring_tx_append_buffs(ring, &buffers[0], frags); | 620 | if (likely(frags)) { |
608 | 621 | err = self->aq_hw_ops.hw_ring_tx_xmit( | |
609 | err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, | 622 | self->aq_hw, |
610 | ring, frags); | 623 | ring, frags); |
611 | if (err >= 0) { | 624 | if (err >= 0) { |
612 | if (aq_ring_avail_dx(ring) < | 625 | if (aq_ring_avail_dx(ring) < |
613 | AQ_CFG_SKB_FRAGS_MAX + 1) | 626 | AQ_CFG_SKB_FRAGS_MAX + 1) |
614 | aq_nic_ndev_queue_stop(self, ring->idx); | 627 | aq_nic_ndev_queue_stop( |
628 | self, | ||
629 | ring->idx); | ||
630 | |||
631 | ++ring->stats.tx.packets; | ||
632 | ring->stats.tx.bytes += skb->len; | ||
633 | } | ||
634 | } else { | ||
635 | err = NETDEV_TX_BUSY; | ||
615 | } | 636 | } |
616 | spin_unlock(&ring->header.lock); | ||
617 | 637 | ||
618 | if (err >= 0) { | 638 | spin_unlock(&ring->header.lock); |
619 | ++ring->stats.tx.packets; | ||
620 | ring->stats.tx.bytes += skb->len; | ||
621 | } | ||
622 | break; | 639 | break; |
623 | } | 640 | } |
624 | } while (--trys); | 641 | } while (--trys); |
@@ -629,8 +646,6 @@ __acquires(&ring->lock) | |||
629 | } | 646 | } |
630 | 647 | ||
631 | err_exit: | 648 | err_exit: |
632 | if (is_busy) | ||
633 | atomic_dec(&self->header.busy_count); | ||
634 | return err; | 649 | return err; |
635 | } | 650 | } |
636 | 651 | ||
@@ -942,7 +957,7 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) | |||
942 | 957 | ||
943 | if (!netif_running(self->ndev)) { | 958 | if (!netif_running(self->ndev)) { |
944 | err = 0; | 959 | err = 0; |
945 | goto err_exit; | 960 | goto out; |
946 | } | 961 | } |
947 | rtnl_lock(); | 962 | rtnl_lock(); |
948 | if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { | 963 | if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { |
@@ -967,8 +982,9 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) | |||
967 | netif_device_attach(self->ndev); | 982 | netif_device_attach(self->ndev); |
968 | netif_tx_start_all_queues(self->ndev); | 983 | netif_tx_start_all_queues(self->ndev); |
969 | } | 984 | } |
970 | rtnl_unlock(); | ||
971 | 985 | ||
972 | err_exit: | 986 | err_exit: |
987 | rtnl_unlock(); | ||
988 | out: | ||
973 | return err; | 989 | return err; |
974 | } | 990 | } |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h index f81738a71c42..e7d2711dc165 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h | |||
@@ -22,7 +22,6 @@ struct aq_nic_s { | |||
22 | unsigned int aq_vecs; | 22 | unsigned int aq_vecs; |
23 | unsigned int packet_filter; | 23 | unsigned int packet_filter; |
24 | unsigned int power_state; | 24 | unsigned int power_state; |
25 | bool is_ndev_registered; | ||
26 | u8 port; | 25 | u8 port; |
27 | struct aq_hw_ops aq_hw_ops; | 26 | struct aq_hw_ops aq_hw_ops; |
28 | struct aq_hw_caps_s aq_hw_caps; | 27 | struct aq_hw_caps_s aq_hw_caps; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index fed6ac51559f..0358e6072d45 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self) | |||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | void aq_ring_tx_append_buffs(struct aq_ring_s *self, | ||
108 | struct aq_ring_buff_s *buffer, | ||
109 | unsigned int buffers) | ||
110 | { | ||
111 | if (likely(self->sw_tail + buffers < self->size)) { | ||
112 | memcpy(&self->buff_ring[self->sw_tail], buffer, | ||
113 | sizeof(buffer[0]) * buffers); | ||
114 | } else { | ||
115 | unsigned int first_part = self->size - self->sw_tail; | ||
116 | unsigned int second_part = buffers - first_part; | ||
117 | |||
118 | memcpy(&self->buff_ring[self->sw_tail], buffer, | ||
119 | sizeof(buffer[0]) * first_part); | ||
120 | |||
121 | memcpy(&self->buff_ring[0], &buffer[first_part], | ||
122 | sizeof(buffer[0]) * second_part); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | void aq_ring_tx_clean(struct aq_ring_s *self) | 107 | void aq_ring_tx_clean(struct aq_ring_s *self) |
127 | { | 108 | { |
128 | struct device *dev = aq_nic_get_dev(self->aq_nic); | 109 | struct device *dev = aq_nic_get_dev(self->aq_nic); |
@@ -209,7 +190,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) | |||
209 | goto err_exit; | 190 | goto err_exit; |
210 | } | 191 | } |
211 | 192 | ||
212 | skb->dev = ndev; | ||
213 | skb_put(skb, buff->len); | 193 | skb_put(skb, buff->len); |
214 | } else { | 194 | } else { |
215 | skb = netdev_alloc_skb(ndev, ETH_HLEN); | 195 | skb = netdev_alloc_skb(ndev, ETH_HLEN); |
@@ -271,6 +251,8 @@ err_exit: | |||
271 | 251 | ||
272 | int aq_ring_rx_fill(struct aq_ring_s *self) | 252 | int aq_ring_rx_fill(struct aq_ring_s *self) |
273 | { | 253 | { |
254 | unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + | ||
255 | (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; | ||
274 | struct aq_ring_buff_s *buff = NULL; | 256 | struct aq_ring_buff_s *buff = NULL; |
275 | int err = 0; | 257 | int err = 0; |
276 | int i = 0; | 258 | int i = 0; |
@@ -283,7 +265,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) | |||
283 | buff->len = AQ_CFG_RX_FRAME_MAX; | 265 | buff->len = AQ_CFG_RX_FRAME_MAX; |
284 | 266 | ||
285 | buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | | 267 | buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | |
286 | __GFP_COMP, 0); | 268 | __GFP_COMP, pages_order); |
287 | if (!buff->page) { | 269 | if (!buff->page) { |
288 | err = -ENOMEM; | 270 | err = -ENOMEM; |
289 | goto err_exit; | 271 | goto err_exit; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index fb296b3fa7fd..257254645068 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h | |||
@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, | |||
146 | int aq_ring_init(struct aq_ring_s *self); | 146 | int aq_ring_init(struct aq_ring_s *self); |
147 | void aq_ring_rx_deinit(struct aq_ring_s *self); | 147 | void aq_ring_rx_deinit(struct aq_ring_s *self); |
148 | void aq_ring_free(struct aq_ring_s *self); | 148 | void aq_ring_free(struct aq_ring_s *self); |
149 | void aq_ring_tx_append_buffs(struct aq_ring_s *ring, | ||
150 | struct aq_ring_buff_s *buffer, | ||
151 | unsigned int buffers); | ||
152 | void aq_ring_tx_clean(struct aq_ring_s *self); | 149 | void aq_ring_tx_clean(struct aq_ring_s *self); |
153 | int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); | 150 | int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); |
154 | int aq_ring_rx_fill(struct aq_ring_s *self); | 151 | int aq_ring_rx_fill(struct aq_ring_s *self); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h index 4446bd90fd86..f6012b34abe6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h | |||
@@ -19,7 +19,6 @@ | |||
19 | struct aq_obj_s { | 19 | struct aq_obj_s { |
20 | spinlock_t lock; /* spinlock for nic/rings processing */ | 20 | spinlock_t lock; /* spinlock for nic/rings processing */ |
21 | atomic_t flags; | 21 | atomic_t flags; |
22 | atomic_t busy_count; | ||
23 | }; | 22 | }; |
24 | 23 | ||
25 | static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) | 24 | static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 1f388054a6c7..a2b746a2dd50 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
@@ -659,8 +659,8 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
659 | } | 659 | } |
660 | 660 | ||
661 | if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { | 661 | if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { |
662 | buff->len = (rxd_wb->pkt_len & | 662 | buff->len = rxd_wb->pkt_len % |
663 | (AQ_CFG_RX_FRAME_MAX - 1U)); | 663 | AQ_CFG_RX_FRAME_MAX; |
664 | buff->len = buff->len ? | 664 | buff->len = buff->len ? |
665 | buff->len : AQ_CFG_RX_FRAME_MAX; | 665 | buff->len : AQ_CFG_RX_FRAME_MAX; |
666 | buff->next = 0U; | 666 | buff->next = 0U; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index e7e694f693bd..cab2931dab9a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
@@ -673,8 +673,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
673 | } | 673 | } |
674 | 674 | ||
675 | if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { | 675 | if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { |
676 | buff->len = (rxd_wb->pkt_len & | 676 | buff->len = rxd_wb->pkt_len % |
677 | (AQ_CFG_RX_FRAME_MAX - 1U)); | 677 | AQ_CFG_RX_FRAME_MAX; |
678 | buff->len = buff->len ? | 678 | buff->len = buff->len ? |
679 | buff->len : AQ_CFG_RX_FRAME_MAX; | 679 | buff->len : AQ_CFG_RX_FRAME_MAX; |
680 | buff->next = 0U; | 680 | buff->next = 0U; |