diff options
62 files changed, 656 insertions, 352 deletions
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index 3fc360523bc9..41b3f3f864e8 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt | |||
@@ -19,6 +19,12 @@ The following properties are common to the Ethernet controllers: | |||
19 | - phy: the same as "phy-handle" property, not recommended for new bindings. | 19 | - phy: the same as "phy-handle" property, not recommended for new bindings. |
20 | - phy-device: the same as "phy-handle" property, not recommended for new | 20 | - phy-device: the same as "phy-handle" property, not recommended for new |
21 | bindings. | 21 | bindings. |
22 | - rx-fifo-depth: the size of the controller's receive fifo in bytes. This | ||
23 | is used for components that can have configurable receive fifo sizes, | ||
24 | and is useful for determining certain configuration settings such as | ||
25 | flow control thresholds. | ||
26 | - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This | ||
27 | is used for components that can have configurable fifo sizes. | ||
22 | 28 | ||
23 | Child nodes of the Ethernet controller are typically the individual PHY devices | 29 | Child nodes of the Ethernet controller are typically the individual PHY devices |
24 | connected via the MDIO bus (sometimes the MDIO bus controller is separate). | 30 | connected via the MDIO bus (sometimes the MDIO bus controller is separate). |
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 29aca8591b16..f34fc3c81a75 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt | |||
@@ -45,6 +45,8 @@ Optional properties: | |||
45 | If not passed then the system clock will be used and this is fine on some | 45 | If not passed then the system clock will be used and this is fine on some |
46 | platforms. | 46 | platforms. |
47 | - snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register. | 47 | - snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register. |
48 | - tx-fifo-depth: See ethernet.txt file in the same directory | ||
49 | - rx-fifo-depth: See ethernet.txt file in the same directory | ||
48 | 50 | ||
49 | Examples: | 51 | Examples: |
50 | 52 | ||
@@ -59,6 +61,8 @@ Examples: | |||
59 | phy-mode = "gmii"; | 61 | phy-mode = "gmii"; |
60 | snps,multicast-filter-bins = <256>; | 62 | snps,multicast-filter-bins = <256>; |
61 | snps,perfect-filter-entries = <128>; | 63 | snps,perfect-filter-entries = <128>; |
64 | rx-fifo-depth = <16384>; | ||
65 | tx-fifo-depth = <16384>; | ||
62 | clocks = <&clock>; | 66 | clocks = <&clock>; |
63 | clock-names = "stmmaceth"; | 67 | clock-names = "stmmaceth"; |
64 | }; | 68 | }; |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 6791fd16272c..3ef0cf9f5c44 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -73,7 +73,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, | |||
73 | c4iw_init_wr_wait(&wr_wait); | 73 | c4iw_init_wr_wait(&wr_wait); |
74 | wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); | 74 | wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); |
75 | 75 | ||
76 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 76 | skb = alloc_skb(wr_len, GFP_KERNEL); |
77 | if (!skb) | 77 | if (!skb) |
78 | return -ENOMEM; | 78 | return -ENOMEM; |
79 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 79 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 657b89b1d291..915ad04a827e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -846,6 +846,11 @@ static int ipoib_get_iflink(const struct net_device *dev) | |||
846 | { | 846 | { |
847 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 847 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
848 | 848 | ||
849 | /* parent interface */ | ||
850 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) | ||
851 | return dev->ifindex; | ||
852 | |||
853 | /* child/vlan interface */ | ||
849 | return priv->parent->ifindex; | 854 | return priv->parent->ifindex; |
850 | } | 855 | } |
851 | 856 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4dd1313056a4..fca1a882de27 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -58,6 +58,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, | |||
58 | /* MTU will be reset when mcast join happens */ | 58 | /* MTU will be reset when mcast join happens */ |
59 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); | 59 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); |
60 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | 60 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; |
61 | priv->parent = ppriv->dev; | ||
61 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); | 62 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
62 | 63 | ||
63 | result = ipoib_set_dev_features(priv, ppriv->ca); | 64 | result = ipoib_set_dev_features(priv, ppriv->ca); |
@@ -84,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, | |||
84 | goto register_failed; | 85 | goto register_failed; |
85 | } | 86 | } |
86 | 87 | ||
87 | priv->parent = ppriv->dev; | ||
88 | |||
89 | ipoib_create_debug_files(priv->dev); | 88 | ipoib_create_debug_files(priv->dev); |
90 | 89 | ||
91 | /* RTNL childs don't need proprietary sysfs entries */ | 90 | /* RTNL childs don't need proprietary sysfs entries */ |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index fc8d3b6ffe8e..9f0c2b9d58ae 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -602,8 +602,6 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, | |||
602 | u32 high = 0; | 602 | u32 high = 0; |
603 | 603 | ||
604 | if (s->reg >= 0x100) { | 604 | if (s->reg >= 0x100) { |
605 | int ret; | ||
606 | |||
607 | ret = mv88e6xxx_reg_read(ds, REG_PORT(port), | 605 | ret = mv88e6xxx_reg_read(ds, REG_PORT(port), |
608 | s->reg - 0x100); | 606 | s->reg - 0x100); |
609 | if (ret < 0) | 607 | if (ret < 0) |
@@ -902,14 +900,16 @@ static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid) | |||
902 | static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) | 900 | static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) |
903 | { | 901 | { |
904 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | 902 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); |
905 | int reg, ret; | 903 | int reg, ret = 0; |
906 | u8 oldstate; | 904 | u8 oldstate; |
907 | 905 | ||
908 | mutex_lock(&ps->smi_mutex); | 906 | mutex_lock(&ps->smi_mutex); |
909 | 907 | ||
910 | reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); | 908 | reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); |
911 | if (reg < 0) | 909 | if (reg < 0) { |
910 | ret = reg; | ||
912 | goto abort; | 911 | goto abort; |
912 | } | ||
913 | 913 | ||
914 | oldstate = reg & PORT_CONTROL_STATE_MASK; | 914 | oldstate = reg & PORT_CONTROL_STATE_MASK; |
915 | if (oldstate != state) { | 915 | if (oldstate != state) { |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 79ea35869e1e..90a76306ad0f 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -376,8 +376,13 @@ static int tse_rx(struct altera_tse_private *priv, int limit) | |||
376 | u16 pktlength; | 376 | u16 pktlength; |
377 | u16 pktstatus; | 377 | u16 pktstatus; |
378 | 378 | ||
379 | while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && | 379 | /* Check for count < limit first as get_rx_status is changing |
380 | (count < limit)) { | 380 | * the response-fifo so we must process the next packet |
381 | * after calling get_rx_status if a response is pending. | ||
382 | * (reading the last byte of the response pops the value from the fifo.) | ||
383 | */ | ||
384 | while ((count < limit) && | ||
385 | ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { | ||
381 | pktstatus = rxstatus >> 16; | 386 | pktstatus = rxstatus >> 16; |
382 | pktlength = rxstatus & 0xffff; | 387 | pktlength = rxstatus & 0xffff; |
383 | 388 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4085c4b31047..355d5fea5be9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -531,20 +531,8 @@ struct bnx2x_fastpath { | |||
531 | struct napi_struct napi; | 531 | struct napi_struct napi; |
532 | 532 | ||
533 | #ifdef CONFIG_NET_RX_BUSY_POLL | 533 | #ifdef CONFIG_NET_RX_BUSY_POLL |
534 | unsigned int state; | 534 | unsigned long busy_poll_state; |
535 | #define BNX2X_FP_STATE_IDLE 0 | 535 | #endif |
536 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ | ||
537 | #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ | ||
538 | #define BNX2X_FP_STATE_DISABLED (1 << 2) | ||
539 | #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ | ||
540 | #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ | ||
541 | #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) | ||
542 | #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) | ||
543 | #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) | ||
544 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) | ||
545 | /* protect state */ | ||
546 | spinlock_t lock; | ||
547 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | ||
548 | 536 | ||
549 | union host_hc_status_block status_blk; | 537 | union host_hc_status_block status_blk; |
550 | /* chip independent shortcuts into sb structure */ | 538 | /* chip independent shortcuts into sb structure */ |
@@ -619,104 +607,83 @@ struct bnx2x_fastpath { | |||
619 | #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) | 607 | #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) |
620 | 608 | ||
621 | #ifdef CONFIG_NET_RX_BUSY_POLL | 609 | #ifdef CONFIG_NET_RX_BUSY_POLL |
622 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 610 | |
611 | enum bnx2x_fp_state { | ||
612 | BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ | ||
613 | |||
614 | BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ | ||
615 | BNX2X_STATE_FP_NAPI_REQ = BIT(1), | ||
616 | |||
617 | BNX2X_STATE_FP_POLL_BIT = 2, | ||
618 | BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ | ||
619 | |||
620 | BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ | ||
621 | }; | ||
622 | |||
623 | static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) | ||
623 | { | 624 | { |
624 | spin_lock_init(&fp->lock); | 625 | WRITE_ONCE(fp->busy_poll_state, 0); |
625 | fp->state = BNX2X_FP_STATE_IDLE; | ||
626 | } | 626 | } |
627 | 627 | ||
628 | /* called from the device poll routine to get ownership of a FP */ | 628 | /* called from the device poll routine to get ownership of a FP */ |
629 | static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | 629 | static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) |
630 | { | 630 | { |
631 | bool rc = true; | 631 | unsigned long prev, old = READ_ONCE(fp->busy_poll_state); |
632 | 632 | ||
633 | spin_lock_bh(&fp->lock); | 633 | while (1) { |
634 | if (fp->state & BNX2X_FP_LOCKED) { | 634 | switch (old) { |
635 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); | 635 | case BNX2X_STATE_FP_POLL: |
636 | fp->state |= BNX2X_FP_STATE_NAPI_YIELD; | 636 | /* make sure bnx2x_fp_lock_poll() wont starve us */ |
637 | rc = false; | 637 | set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, |
638 | } else { | 638 | &fp->busy_poll_state); |
639 | /* we don't care if someone yielded */ | 639 | /* fallthrough */ |
640 | fp->state = BNX2X_FP_STATE_NAPI; | 640 | case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: |
641 | return false; | ||
642 | default: | ||
643 | break; | ||
644 | } | ||
645 | prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); | ||
646 | if (unlikely(prev != old)) { | ||
647 | old = prev; | ||
648 | continue; | ||
649 | } | ||
650 | return true; | ||
641 | } | 651 | } |
642 | spin_unlock_bh(&fp->lock); | ||
643 | return rc; | ||
644 | } | 652 | } |
645 | 653 | ||
646 | /* returns true is someone tried to get the FP while napi had it */ | 654 | static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
647 | static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) | ||
648 | { | 655 | { |
649 | bool rc = false; | 656 | smp_wmb(); |
650 | 657 | fp->busy_poll_state = 0; | |
651 | spin_lock_bh(&fp->lock); | ||
652 | WARN_ON(fp->state & | ||
653 | (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); | ||
654 | |||
655 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | ||
656 | rc = true; | ||
657 | |||
658 | /* state ==> idle, unless currently disabled */ | ||
659 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
660 | spin_unlock_bh(&fp->lock); | ||
661 | return rc; | ||
662 | } | 658 | } |
663 | 659 | ||
664 | /* called from bnx2x_low_latency_poll() */ | 660 | /* called from bnx2x_low_latency_poll() */ |
665 | static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) | 661 | static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) |
666 | { | 662 | { |
667 | bool rc = true; | 663 | return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; |
668 | |||
669 | spin_lock_bh(&fp->lock); | ||
670 | if ((fp->state & BNX2X_FP_LOCKED)) { | ||
671 | fp->state |= BNX2X_FP_STATE_POLL_YIELD; | ||
672 | rc = false; | ||
673 | } else { | ||
674 | /* preserve yield marks */ | ||
675 | fp->state |= BNX2X_FP_STATE_POLL; | ||
676 | } | ||
677 | spin_unlock_bh(&fp->lock); | ||
678 | return rc; | ||
679 | } | 664 | } |
680 | 665 | ||
681 | /* returns true if someone tried to get the FP while it was locked */ | 666 | static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
682 | static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | ||
683 | { | 667 | { |
684 | bool rc = false; | 668 | smp_mb__before_atomic(); |
685 | 669 | clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); | |
686 | spin_lock_bh(&fp->lock); | ||
687 | WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); | ||
688 | |||
689 | if (fp->state & BNX2X_FP_STATE_POLL_YIELD) | ||
690 | rc = true; | ||
691 | |||
692 | /* state ==> idle, unless currently disabled */ | ||
693 | fp->state &= BNX2X_FP_STATE_DISABLED; | ||
694 | spin_unlock_bh(&fp->lock); | ||
695 | return rc; | ||
696 | } | 670 | } |
697 | 671 | ||
698 | /* true if a socket is polling, even if it did not get the lock */ | 672 | /* true if a socket is polling */ |
699 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | 673 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
700 | { | 674 | { |
701 | WARN_ON(!(fp->state & BNX2X_FP_OWNED)); | 675 | return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; |
702 | return fp->state & BNX2X_FP_USER_PEND; | ||
703 | } | 676 | } |
704 | 677 | ||
705 | /* false if fp is currently owned */ | 678 | /* false if fp is currently owned */ |
706 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) | 679 | static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) |
707 | { | 680 | { |
708 | int rc = true; | 681 | set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); |
709 | 682 | return !bnx2x_fp_ll_polling(fp); | |
710 | spin_lock_bh(&fp->lock); | ||
711 | if (fp->state & BNX2X_FP_OWNED) | ||
712 | rc = false; | ||
713 | fp->state |= BNX2X_FP_STATE_DISABLED; | ||
714 | spin_unlock_bh(&fp->lock); | ||
715 | 683 | ||
716 | return rc; | ||
717 | } | 684 | } |
718 | #else | 685 | #else |
719 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 686 | static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) |
720 | { | 687 | { |
721 | } | 688 | } |
722 | 689 | ||
@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) | |||
725 | return true; | 692 | return true; |
726 | } | 693 | } |
727 | 694 | ||
728 | static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) | 695 | static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
729 | { | 696 | { |
730 | return false; | ||
731 | } | 697 | } |
732 | 698 | ||
733 | static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) | 699 | static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) |
@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) | |||
735 | return false; | 701 | return false; |
736 | } | 702 | } |
737 | 703 | ||
738 | static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) | 704 | static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
739 | { | 705 | { |
740 | return false; | ||
741 | } | 706 | } |
742 | 707 | ||
743 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | 708 | static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 0a9faa134a9a..2f63467bce46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) | |||
1849 | int i; | 1849 | int i; |
1850 | 1850 | ||
1851 | for_each_rx_queue_cnic(bp, i) { | 1851 | for_each_rx_queue_cnic(bp, i) { |
1852 | bnx2x_fp_init_lock(&bp->fp[i]); | 1852 | bnx2x_fp_busy_poll_init(&bp->fp[i]); |
1853 | napi_enable(&bnx2x_fp(bp, i, napi)); | 1853 | napi_enable(&bnx2x_fp(bp, i, napi)); |
1854 | } | 1854 | } |
1855 | } | 1855 | } |
@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) | |||
1859 | int i; | 1859 | int i; |
1860 | 1860 | ||
1861 | for_each_eth_queue(bp, i) { | 1861 | for_each_eth_queue(bp, i) { |
1862 | bnx2x_fp_init_lock(&bp->fp[i]); | 1862 | bnx2x_fp_busy_poll_init(&bp->fp[i]); |
1863 | napi_enable(&bnx2x_fp(bp, i, napi)); | 1863 | napi_enable(&bnx2x_fp(bp, i, napi)); |
1864 | } | 1864 | } |
1865 | } | 1865 | } |
@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
3191 | } | 3191 | } |
3192 | } | 3192 | } |
3193 | 3193 | ||
3194 | bnx2x_fp_unlock_napi(fp); | ||
3195 | |||
3194 | /* Fall out from the NAPI loop if needed */ | 3196 | /* Fall out from the NAPI loop if needed */ |
3195 | if (!bnx2x_fp_unlock_napi(fp) && | 3197 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
3196 | !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
3197 | 3198 | ||
3198 | /* No need to update SB for FCoE L2 ring as long as | 3199 | /* No need to update SB for FCoE L2 ring as long as |
3199 | * it's connected to the default SB and the SB | 3200 | * it's connected to the default SB and the SB |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6de054404156..803d91beec6f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -1140,6 +1140,10 @@ static int set_filter_wr(struct adapter *adapter, int fidx) | |||
1140 | struct fw_filter_wr *fwr; | 1140 | struct fw_filter_wr *fwr; |
1141 | unsigned int ftid; | 1141 | unsigned int ftid; |
1142 | 1142 | ||
1143 | skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); | ||
1144 | if (!skb) | ||
1145 | return -ENOMEM; | ||
1146 | |||
1143 | /* If the new filter requires loopback Destination MAC and/or VLAN | 1147 | /* If the new filter requires loopback Destination MAC and/or VLAN |
1144 | * rewriting then we need to allocate a Layer 2 Table (L2T) entry for | 1148 | * rewriting then we need to allocate a Layer 2 Table (L2T) entry for |
1145 | * the filter. | 1149 | * the filter. |
@@ -1147,19 +1151,21 @@ static int set_filter_wr(struct adapter *adapter, int fidx) | |||
1147 | if (f->fs.newdmac || f->fs.newvlan) { | 1151 | if (f->fs.newdmac || f->fs.newvlan) { |
1148 | /* allocate L2T entry for new filter */ | 1152 | /* allocate L2T entry for new filter */ |
1149 | f->l2t = t4_l2t_alloc_switching(adapter->l2t); | 1153 | f->l2t = t4_l2t_alloc_switching(adapter->l2t); |
1150 | if (f->l2t == NULL) | 1154 | if (f->l2t == NULL) { |
1155 | kfree_skb(skb); | ||
1151 | return -EAGAIN; | 1156 | return -EAGAIN; |
1157 | } | ||
1152 | if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, | 1158 | if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, |
1153 | f->fs.eport, f->fs.dmac)) { | 1159 | f->fs.eport, f->fs.dmac)) { |
1154 | cxgb4_l2t_release(f->l2t); | 1160 | cxgb4_l2t_release(f->l2t); |
1155 | f->l2t = NULL; | 1161 | f->l2t = NULL; |
1162 | kfree_skb(skb); | ||
1156 | return -ENOMEM; | 1163 | return -ENOMEM; |
1157 | } | 1164 | } |
1158 | } | 1165 | } |
1159 | 1166 | ||
1160 | ftid = adapter->tids.ftid_base + fidx; | 1167 | ftid = adapter->tids.ftid_base + fidx; |
1161 | 1168 | ||
1162 | skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); | ||
1163 | fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); | 1169 | fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); |
1164 | memset(fwr, 0, sizeof(*fwr)); | 1170 | memset(fwr, 0, sizeof(*fwr)); |
1165 | 1171 | ||
@@ -1257,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx) | |||
1257 | len = sizeof(*fwr); | 1263 | len = sizeof(*fwr); |
1258 | ftid = adapter->tids.ftid_base + fidx; | 1264 | ftid = adapter->tids.ftid_base + fidx; |
1259 | 1265 | ||
1260 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); | 1266 | skb = alloc_skb(len, GFP_KERNEL); |
1267 | if (!skb) | ||
1268 | return -ENOMEM; | ||
1269 | |||
1261 | fwr = (struct fw_filter_wr *)__skb_put(skb, len); | 1270 | fwr = (struct fw_filter_wr *)__skb_put(skb, len); |
1262 | t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); | 1271 | t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); |
1263 | 1272 | ||
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index b72d238695d7..3b39fdddeb57 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -413,6 +413,15 @@ out: | |||
413 | return count; | 413 | return count; |
414 | } | 414 | } |
415 | 415 | ||
416 | static void hip04_start_tx_timer(struct hip04_priv *priv) | ||
417 | { | ||
418 | unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2; | ||
419 | |||
420 | /* allow timer to fire after half the time at the earliest */ | ||
421 | hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns), | ||
422 | ns, HRTIMER_MODE_REL); | ||
423 | } | ||
424 | |||
416 | static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | 425 | static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
417 | { | 426 | { |
418 | struct hip04_priv *priv = netdev_priv(ndev); | 427 | struct hip04_priv *priv = netdev_priv(ndev); |
@@ -466,8 +475,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
466 | } | 475 | } |
467 | } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { | 476 | } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { |
468 | /* cleanup not pending yet, start a new timer */ | 477 | /* cleanup not pending yet, start a new timer */ |
469 | hrtimer_start_expires(&priv->tx_coalesce_timer, | 478 | hip04_start_tx_timer(priv); |
470 | HRTIMER_MODE_REL); | ||
471 | } | 479 | } |
472 | 480 | ||
473 | return NETDEV_TX_OK; | 481 | return NETDEV_TX_OK; |
@@ -549,7 +557,7 @@ done: | |||
549 | /* clean up tx descriptors and start a new timer if necessary */ | 557 | /* clean up tx descriptors and start a new timer if necessary */ |
550 | tx_remaining = hip04_tx_reclaim(ndev, false); | 558 | tx_remaining = hip04_tx_reclaim(ndev, false); |
551 | if (rx < budget && tx_remaining) | 559 | if (rx < budget && tx_remaining) |
552 | hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL); | 560 | hip04_start_tx_timer(priv); |
553 | 561 | ||
554 | return rx; | 562 | return rx; |
555 | } | 563 | } |
@@ -809,7 +817,6 @@ static int hip04_mac_probe(struct platform_device *pdev) | |||
809 | struct hip04_priv *priv; | 817 | struct hip04_priv *priv; |
810 | struct resource *res; | 818 | struct resource *res; |
811 | unsigned int irq; | 819 | unsigned int irq; |
812 | ktime_t txtime; | ||
813 | int ret; | 820 | int ret; |
814 | 821 | ||
815 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); | 822 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); |
@@ -846,9 +853,6 @@ static int hip04_mac_probe(struct platform_device *pdev) | |||
846 | */ | 853 | */ |
847 | priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; | 854 | priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; |
848 | priv->tx_coalesce_usecs = 200; | 855 | priv->tx_coalesce_usecs = 200; |
849 | /* allow timer to fire after half the time at the earliest */ | ||
850 | txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2); | ||
851 | hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime); | ||
852 | priv->tx_coalesce_timer.function = tx_done; | 856 | priv->tx_coalesce_timer.function = tx_done; |
853 | 857 | ||
854 | priv->map = syscon_node_to_regmap(arg.np); | 858 | priv->map = syscon_node_to_regmap(arg.np); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d596f6624025..0bae22da014d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -2397,6 +2397,7 @@ i40e_aq_erase_nvm_exit: | |||
2397 | #define I40E_DEV_FUNC_CAP_LED 0x61 | 2397 | #define I40E_DEV_FUNC_CAP_LED 0x61 |
2398 | #define I40E_DEV_FUNC_CAP_SDP 0x62 | 2398 | #define I40E_DEV_FUNC_CAP_SDP 0x62 |
2399 | #define I40E_DEV_FUNC_CAP_MDIO 0x63 | 2399 | #define I40E_DEV_FUNC_CAP_MDIO 0x63 |
2400 | #define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 | ||
2400 | 2401 | ||
2401 | /** | 2402 | /** |
2402 | * i40e_parse_discover_capabilities | 2403 | * i40e_parse_discover_capabilities |
@@ -2541,11 +2542,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, | |||
2541 | p->fd_filters_guaranteed = number; | 2542 | p->fd_filters_guaranteed = number; |
2542 | p->fd_filters_best_effort = logical_id; | 2543 | p->fd_filters_best_effort = logical_id; |
2543 | break; | 2544 | break; |
2545 | case I40E_DEV_FUNC_CAP_WR_CSR_PROT: | ||
2546 | p->wr_csr_prot = (u64)number; | ||
2547 | p->wr_csr_prot |= (u64)logical_id << 32; | ||
2548 | break; | ||
2544 | default: | 2549 | default: |
2545 | break; | 2550 | break; |
2546 | } | 2551 | } |
2547 | } | 2552 | } |
2548 | 2553 | ||
2554 | if (p->fcoe) | ||
2555 | i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); | ||
2556 | |||
2549 | /* Software override ensuring FCoE is disabled if npar or mfp | 2557 | /* Software override ensuring FCoE is disabled if npar or mfp |
2550 | * mode because it is not supported in these modes. | 2558 | * mode because it is not supported in these modes. |
2551 | */ | 2559 | */ |
@@ -3503,6 +3511,63 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) | |||
3503 | } | 3511 | } |
3504 | 3512 | ||
3505 | /** | 3513 | /** |
3514 | * i40e_aq_debug_dump | ||
3515 | * @hw: pointer to the hardware structure | ||
3516 | * @cluster_id: specific cluster to dump | ||
3517 | * @table_id: table id within cluster | ||
3518 | * @start_index: index of line in the block to read | ||
3519 | * @buff_size: dump buffer size | ||
3520 | * @buff: dump buffer | ||
3521 | * @ret_buff_size: actual buffer size returned | ||
3522 | * @ret_next_table: next block to read | ||
3523 | * @ret_next_index: next index to read | ||
3524 | * | ||
3525 | * Dump internal FW/HW data for debug purposes. | ||
3526 | * | ||
3527 | **/ | ||
3528 | i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, | ||
3529 | u8 table_id, u32 start_index, u16 buff_size, | ||
3530 | void *buff, u16 *ret_buff_size, | ||
3531 | u8 *ret_next_table, u32 *ret_next_index, | ||
3532 | struct i40e_asq_cmd_details *cmd_details) | ||
3533 | { | ||
3534 | struct i40e_aq_desc desc; | ||
3535 | struct i40e_aqc_debug_dump_internals *cmd = | ||
3536 | (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; | ||
3537 | struct i40e_aqc_debug_dump_internals *resp = | ||
3538 | (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; | ||
3539 | i40e_status status; | ||
3540 | |||
3541 | if (buff_size == 0 || !buff) | ||
3542 | return I40E_ERR_PARAM; | ||
3543 | |||
3544 | i40e_fill_default_direct_cmd_desc(&desc, | ||
3545 | i40e_aqc_opc_debug_dump_internals); | ||
3546 | /* Indirect Command */ | ||
3547 | desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); | ||
3548 | if (buff_size > I40E_AQ_LARGE_BUF) | ||
3549 | desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); | ||
3550 | |||
3551 | cmd->cluster_id = cluster_id; | ||
3552 | cmd->table_id = table_id; | ||
3553 | cmd->idx = cpu_to_le32(start_index); | ||
3554 | |||
3555 | desc.datalen = cpu_to_le16(buff_size); | ||
3556 | |||
3557 | status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); | ||
3558 | if (!status) { | ||
3559 | if (ret_buff_size) | ||
3560 | *ret_buff_size = le16_to_cpu(desc.datalen); | ||
3561 | if (ret_next_table) | ||
3562 | *ret_next_table = resp->table_id; | ||
3563 | if (ret_next_index) | ||
3564 | *ret_next_index = le32_to_cpu(resp->idx); | ||
3565 | } | ||
3566 | |||
3567 | return status; | ||
3568 | } | ||
3569 | |||
3570 | /** | ||
3506 | * i40e_read_bw_from_alt_ram | 3571 | * i40e_read_bw_from_alt_ram |
3507 | * @hw: pointer to the hardware structure | 3572 | * @hw: pointer to the hardware structure |
3508 | * @max_bw: pointer for max_bw read | 3573 | * @max_bw: pointer for max_bw read |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 6e1466756760..2547aa21b2ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c | |||
@@ -419,7 +419,7 @@ static void i40e_cee_to_dcb_v1_config( | |||
419 | { | 419 | { |
420 | u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); | 420 | u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); |
421 | u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); | 421 | u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); |
422 | u8 i, tc, err, sync, oper; | 422 | u8 i, tc, err; |
423 | 423 | ||
424 | /* CEE PG data to ETS config */ | 424 | /* CEE PG data to ETS config */ |
425 | dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; | 425 | dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; |
@@ -456,9 +456,7 @@ static void i40e_cee_to_dcb_v1_config( | |||
456 | status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> | 456 | status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> |
457 | I40E_AQC_CEE_APP_STATUS_SHIFT; | 457 | I40E_AQC_CEE_APP_STATUS_SHIFT; |
458 | err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; | 458 | err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; |
459 | sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; | 459 | /* Add APPs if Error is False */ |
460 | oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; | ||
461 | /* Add APPs if Error is False and Oper/Sync is True */ | ||
462 | if (!err) { | 460 | if (!err) { |
463 | /* CEE operating configuration supports FCoE/iSCSI/FIP only */ | 461 | /* CEE operating configuration supports FCoE/iSCSI/FIP only */ |
464 | dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; | 462 | dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index daa88263af66..34170eabca7d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -1388,6 +1388,50 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
1388 | r_cfg->app[i].selector, | 1388 | r_cfg->app[i].selector, |
1389 | r_cfg->app[i].protocolid); | 1389 | r_cfg->app[i].protocolid); |
1390 | } | 1390 | } |
1391 | } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { | ||
1392 | int cluster_id, table_id; | ||
1393 | int index, ret; | ||
1394 | u16 buff_len = 4096; | ||
1395 | u32 next_index; | ||
1396 | u8 next_table; | ||
1397 | u8 *buff; | ||
1398 | u16 rlen; | ||
1399 | |||
1400 | cnt = sscanf(&cmd_buf[18], "%i %i %i", | ||
1401 | &cluster_id, &table_id, &index); | ||
1402 | if (cnt != 3) { | ||
1403 | dev_info(&pf->pdev->dev, | ||
1404 | "dump debug fwdata <cluster_id> <table_id> <index>\n"); | ||
1405 | goto command_write_done; | ||
1406 | } | ||
1407 | |||
1408 | dev_info(&pf->pdev->dev, | ||
1409 | "AQ debug dump fwdata params %x %x %x %x\n", | ||
1410 | cluster_id, table_id, index, buff_len); | ||
1411 | buff = kzalloc(buff_len, GFP_KERNEL); | ||
1412 | if (!buff) | ||
1413 | goto command_write_done; | ||
1414 | |||
1415 | ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, | ||
1416 | index, buff_len, buff, &rlen, | ||
1417 | &next_table, &next_index, | ||
1418 | NULL); | ||
1419 | if (ret) { | ||
1420 | dev_info(&pf->pdev->dev, | ||
1421 | "debug dump fwdata AQ Failed %d 0x%x\n", | ||
1422 | ret, pf->hw.aq.asq_last_status); | ||
1423 | kfree(buff); | ||
1424 | buff = NULL; | ||
1425 | goto command_write_done; | ||
1426 | } | ||
1427 | dev_info(&pf->pdev->dev, | ||
1428 | "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", | ||
1429 | rlen, next_table, next_index); | ||
1430 | print_hex_dump(KERN_INFO, "AQ buffer WB: ", | ||
1431 | DUMP_PREFIX_OFFSET, 16, 1, | ||
1432 | buff, rlen, true); | ||
1433 | kfree(buff); | ||
1434 | buff = NULL; | ||
1391 | } else { | 1435 | } else { |
1392 | dev_info(&pf->pdev->dev, | 1436 | dev_info(&pf->pdev->dev, |
1393 | "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); | 1437 | "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); |
@@ -1903,6 +1947,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
1903 | dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); | 1947 | dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); |
1904 | dev_info(&pf->pdev->dev, " dump desc aq\n"); | 1948 | dev_info(&pf->pdev->dev, " dump desc aq\n"); |
1905 | dev_info(&pf->pdev->dev, " dump reset stats\n"); | 1949 | dev_info(&pf->pdev->dev, " dump reset stats\n"); |
1950 | dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); | ||
1906 | dev_info(&pf->pdev->dev, " msg_enable [level]\n"); | 1951 | dev_info(&pf->pdev->dev, " msg_enable [level]\n"); |
1907 | dev_info(&pf->pdev->dev, " read <reg>\n"); | 1952 | dev_info(&pf->pdev->dev, " read <reg>\n"); |
1908 | dev_info(&pf->pdev->dev, " write <reg> <value>\n"); | 1953 | dev_info(&pf->pdev->dev, " write <reg> <value>\n"); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c848b1862512..4cbaaeb902c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -356,8 +356,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, | |||
356 | /* Set speed and duplex */ | 356 | /* Set speed and duplex */ |
357 | switch (link_speed) { | 357 | switch (link_speed) { |
358 | case I40E_LINK_SPEED_40GB: | 358 | case I40E_LINK_SPEED_40GB: |
359 | /* need a SPEED_40000 in ethtool.h */ | 359 | ethtool_cmd_speed_set(ecmd, SPEED_40000); |
360 | ethtool_cmd_speed_set(ecmd, 40000); | ||
361 | break; | 360 | break; |
362 | case I40E_LINK_SPEED_20GB: | 361 | case I40E_LINK_SPEED_20GB: |
363 | ethtool_cmd_speed_set(ecmd, SPEED_20000); | 362 | ethtool_cmd_speed_set(ecmd, SPEED_20000); |
@@ -1914,6 +1913,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, | |||
1914 | else | 1913 | else |
1915 | fsp->ring_cookie = rule->q_index; | 1914 | fsp->ring_cookie = rule->q_index; |
1916 | 1915 | ||
1916 | if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { | ||
1917 | struct i40e_vsi *vsi; | ||
1918 | |||
1919 | vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); | ||
1920 | if (vsi && vsi->type == I40E_VSI_SRIOV) { | ||
1921 | fsp->h_ext.data[1] = htonl(vsi->vf_id); | ||
1922 | fsp->m_ext.data[1] = htonl(0x1); | ||
1923 | } | ||
1924 | } | ||
1925 | |||
1917 | return 0; | 1926 | return 0; |
1918 | } | 1927 | } |
1919 | 1928 | ||
@@ -2207,6 +2216,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, | |||
2207 | struct i40e_fdir_filter *input; | 2216 | struct i40e_fdir_filter *input; |
2208 | struct i40e_pf *pf; | 2217 | struct i40e_pf *pf; |
2209 | int ret = -EINVAL; | 2218 | int ret = -EINVAL; |
2219 | u16 vf_id; | ||
2210 | 2220 | ||
2211 | if (!vsi) | 2221 | if (!vsi) |
2212 | return -EINVAL; | 2222 | return -EINVAL; |
@@ -2267,7 +2277,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, | |||
2267 | input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; | 2277 | input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; |
2268 | input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; | 2278 | input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; |
2269 | 2279 | ||
2280 | if (ntohl(fsp->m_ext.data[1])) { | ||
2281 | if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { | ||
2282 | netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); | ||
2283 | goto free_input; | ||
2284 | } | ||
2285 | vf_id = ntohl(fsp->h_ext.data[1]); | ||
2286 | /* Find vsi id from vf id and override dest vsi */ | ||
2287 | input->dest_vsi = pf->vf[vf_id].lan_vsi_id; | ||
2288 | if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { | ||
2289 | netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); | ||
2290 | goto free_input; | ||
2291 | } | ||
2292 | } | ||
2293 | |||
2270 | ret = i40e_add_del_fdir(vsi, input, true); | 2294 | ret = i40e_add_del_fdir(vsi, input, true); |
2295 | free_input: | ||
2271 | if (ret) | 2296 | if (ret) |
2272 | kfree(input); | 2297 | kfree(input); |
2273 | else | 2298 | else |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 63de3f4b7a94..24481cd7e59a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = | |||
39 | 39 | ||
40 | #define DRV_VERSION_MAJOR 1 | 40 | #define DRV_VERSION_MAJOR 1 |
41 | #define DRV_VERSION_MINOR 3 | 41 | #define DRV_VERSION_MINOR 3 |
42 | #define DRV_VERSION_BUILD 1 | 42 | #define DRV_VERSION_BUILD 2 |
43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ | 43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
44 | __stringify(DRV_VERSION_MINOR) "." \ | 44 | __stringify(DRV_VERSION_MINOR) "." \ |
45 | __stringify(DRV_VERSION_BUILD) DRV_KERN | 45 | __stringify(DRV_VERSION_BUILD) DRV_KERN |
@@ -7301,7 +7301,7 @@ err_out: | |||
7301 | * i40e_init_interrupt_scheme - Determine proper interrupt scheme | 7301 | * i40e_init_interrupt_scheme - Determine proper interrupt scheme |
7302 | * @pf: board private structure to initialize | 7302 | * @pf: board private structure to initialize |
7303 | **/ | 7303 | **/ |
7304 | static void i40e_init_interrupt_scheme(struct i40e_pf *pf) | 7304 | static int i40e_init_interrupt_scheme(struct i40e_pf *pf) |
7305 | { | 7305 | { |
7306 | int vectors = 0; | 7306 | int vectors = 0; |
7307 | ssize_t size; | 7307 | ssize_t size; |
@@ -7343,11 +7343,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) | |||
7343 | /* set up vector assignment tracking */ | 7343 | /* set up vector assignment tracking */ |
7344 | size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); | 7344 | size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); |
7345 | pf->irq_pile = kzalloc(size, GFP_KERNEL); | 7345 | pf->irq_pile = kzalloc(size, GFP_KERNEL); |
7346 | if (!pf->irq_pile) { | ||
7347 | dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); | ||
7348 | return -ENOMEM; | ||
7349 | } | ||
7346 | pf->irq_pile->num_entries = vectors; | 7350 | pf->irq_pile->num_entries = vectors; |
7347 | pf->irq_pile->search_hint = 0; | 7351 | pf->irq_pile->search_hint = 0; |
7348 | 7352 | ||
7349 | /* track first vector for misc interrupts */ | 7353 | /* track first vector for misc interrupts, ignore return */ |
7350 | (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); | 7354 | (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); |
7355 | |||
7356 | return 0; | ||
7351 | } | 7357 | } |
7352 | 7358 | ||
7353 | /** | 7359 | /** |
@@ -9827,7 +9833,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
9827 | 9833 | ||
9828 | /* set up the main switch operations */ | 9834 | /* set up the main switch operations */ |
9829 | i40e_determine_queue_usage(pf); | 9835 | i40e_determine_queue_usage(pf); |
9830 | i40e_init_interrupt_scheme(pf); | 9836 | err = i40e_init_interrupt_scheme(pf); |
9837 | if (err) | ||
9838 | goto err_switch_setup; | ||
9831 | 9839 | ||
9832 | /* The number of VSIs reported by the FW is the minimum guaranteed | 9840 | /* The number of VSIs reported by the FW is the minimum guaranteed |
9833 | * to us; HW supports far more and we share the remaining pool with | 9841 | * to us; HW supports far more and we share the remaining pool with |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index e49acd2accd3..554e49d02683 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
@@ -821,13 +821,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, | |||
821 | int *errno) | 821 | int *errno) |
822 | { | 822 | { |
823 | enum i40e_nvmupd_cmd upd_cmd; | 823 | enum i40e_nvmupd_cmd upd_cmd; |
824 | u8 transaction, module; | 824 | u8 transaction; |
825 | 825 | ||
826 | /* anything that doesn't match a recognized case is an error */ | 826 | /* anything that doesn't match a recognized case is an error */ |
827 | upd_cmd = I40E_NVMUPD_INVALID; | 827 | upd_cmd = I40E_NVMUPD_INVALID; |
828 | 828 | ||
829 | transaction = i40e_nvmupd_get_transaction(cmd->config); | 829 | transaction = i40e_nvmupd_get_transaction(cmd->config); |
830 | module = i40e_nvmupd_get_module(cmd->config); | ||
831 | 830 | ||
832 | /* limits on data size */ | 831 | /* limits on data size */ |
833 | if ((cmd->data_size < 1) || | 832 | if ((cmd->data_size < 1) || |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index fea0d37ecc72..7b34f1e660ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h | |||
@@ -303,4 +303,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, | |||
303 | u16 vsi_seid, u16 queue, bool is_add, | 303 | u16 vsi_seid, u16 queue, bool is_add, |
304 | struct i40e_control_filter_stats *stats, | 304 | struct i40e_control_filter_stats *stats, |
305 | struct i40e_asq_cmd_details *cmd_details); | 305 | struct i40e_asq_cmd_details *cmd_details); |
306 | i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, | ||
307 | u8 table_id, u32 start_index, u16 buff_size, | ||
308 | void *buff, u16 *ret_buff_size, | ||
309 | u8 *ret_next_table, u32 *ret_next_index, | ||
310 | struct i40e_asq_cmd_details *cmd_details); | ||
306 | #endif /* _I40E_PROTOTYPE_H_ */ | 311 | #endif /* _I40E_PROTOTYPE_H_ */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 67c7bc9e9c21..568e855da0f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h | |||
@@ -242,6 +242,7 @@ struct i40e_hw_capabilities { | |||
242 | u8 rx_buf_chain_len; | 242 | u8 rx_buf_chain_len; |
243 | u32 enabled_tcmap; | 243 | u32 enabled_tcmap; |
244 | u32 maxtc; | 244 | u32 maxtc; |
245 | u64 wr_csr_prot; | ||
245 | }; | 246 | }; |
246 | 247 | ||
247 | struct i40e_mac_info { | 248 | struct i40e_mac_info { |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d69e1f04901..78d1c4ff565e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -26,6 +26,129 @@ | |||
26 | 26 | ||
27 | #include "i40e.h" | 27 | #include "i40e.h" |
28 | 28 | ||
29 | /*********************notification routines***********************/ | ||
30 | |||
31 | /** | ||
32 | * i40e_vc_vf_broadcast | ||
33 | * @pf: pointer to the PF structure | ||
34 | * @opcode: operation code | ||
35 | * @retval: return value | ||
36 | * @msg: pointer to the msg buffer | ||
37 | * @msglen: msg length | ||
38 | * | ||
39 | * send a message to all VFs on a given PF | ||
40 | **/ | ||
41 | static void i40e_vc_vf_broadcast(struct i40e_pf *pf, | ||
42 | enum i40e_virtchnl_ops v_opcode, | ||
43 | i40e_status v_retval, u8 *msg, | ||
44 | u16 msglen) | ||
45 | { | ||
46 | struct i40e_hw *hw = &pf->hw; | ||
47 | struct i40e_vf *vf = pf->vf; | ||
48 | int i; | ||
49 | |||
50 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { | ||
51 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
52 | /* Not all vfs are enabled so skip the ones that are not */ | ||
53 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
54 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
55 | continue; | ||
56 | |||
57 | /* Ignore return value on purpose - a given VF may fail, but | ||
58 | * we need to keep going and send to all of them | ||
59 | */ | ||
60 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, | ||
61 | msg, msglen, NULL); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * i40e_vc_notify_link_state | ||
67 | * @vf: pointer to the VF structure | ||
68 | * | ||
69 | * send a link status message to a single VF | ||
70 | **/ | ||
71 | static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) | ||
72 | { | ||
73 | struct i40e_virtchnl_pf_event pfe; | ||
74 | struct i40e_pf *pf = vf->pf; | ||
75 | struct i40e_hw *hw = &pf->hw; | ||
76 | struct i40e_link_status *ls = &pf->hw.phy.link_info; | ||
77 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
78 | |||
79 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | ||
80 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | ||
81 | if (vf->link_forced) { | ||
82 | pfe.event_data.link_event.link_status = vf->link_up; | ||
83 | pfe.event_data.link_event.link_speed = | ||
84 | (vf->link_up ? I40E_LINK_SPEED_40GB : 0); | ||
85 | } else { | ||
86 | pfe.event_data.link_event.link_status = | ||
87 | ls->link_info & I40E_AQ_LINK_UP; | ||
88 | pfe.event_data.link_event.link_speed = ls->link_speed; | ||
89 | } | ||
90 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | ||
91 | 0, (u8 *)&pfe, sizeof(pfe), NULL); | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * i40e_vc_notify_link_state | ||
96 | * @pf: pointer to the PF structure | ||
97 | * | ||
98 | * send a link status message to all VFs on a given PF | ||
99 | **/ | ||
100 | void i40e_vc_notify_link_state(struct i40e_pf *pf) | ||
101 | { | ||
102 | int i; | ||
103 | |||
104 | for (i = 0; i < pf->num_alloc_vfs; i++) | ||
105 | i40e_vc_notify_vf_link_state(&pf->vf[i]); | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * i40e_vc_notify_reset | ||
110 | * @pf: pointer to the PF structure | ||
111 | * | ||
112 | * indicate a pending reset to all VFs on a given PF | ||
113 | **/ | ||
114 | void i40e_vc_notify_reset(struct i40e_pf *pf) | ||
115 | { | ||
116 | struct i40e_virtchnl_pf_event pfe; | ||
117 | |||
118 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | ||
119 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | ||
120 | i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, | ||
121 | (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * i40e_vc_notify_vf_reset | ||
126 | * @vf: pointer to the VF structure | ||
127 | * | ||
128 | * indicate a pending reset to the given VF | ||
129 | **/ | ||
130 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) | ||
131 | { | ||
132 | struct i40e_virtchnl_pf_event pfe; | ||
133 | int abs_vf_id; | ||
134 | |||
135 | /* validate the request */ | ||
136 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
137 | return; | ||
138 | |||
139 | /* verify if the VF is in either init or active before proceeding */ | ||
140 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
141 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
142 | return; | ||
143 | |||
144 | abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | ||
145 | |||
146 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | ||
147 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | ||
148 | i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | ||
149 | 0, (u8 *)&pfe, | ||
150 | sizeof(struct i40e_virtchnl_pf_event), NULL); | ||
151 | } | ||
29 | /***********************misc routines*****************************/ | 152 | /***********************misc routines*****************************/ |
30 | 153 | ||
31 | /** | 154 | /** |
@@ -689,6 +812,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) | |||
689 | } | 812 | } |
690 | } | 813 | } |
691 | 814 | ||
815 | if (flr) | ||
816 | usleep_range(10000, 20000); | ||
817 | |||
692 | if (!rsd) | 818 | if (!rsd) |
693 | dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", | 819 | dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", |
694 | vf->vf_id); | 820 | vf->vf_id); |
@@ -733,6 +859,11 @@ void i40e_free_vfs(struct i40e_pf *pf) | |||
733 | while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) | 859 | while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) |
734 | usleep_range(1000, 2000); | 860 | usleep_range(1000, 2000); |
735 | 861 | ||
862 | for (i = 0; i < pf->num_alloc_vfs; i++) | ||
863 | if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) | ||
864 | i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], | ||
865 | false); | ||
866 | |||
736 | /* Disable IOV before freeing resources. This lets any VF drivers | 867 | /* Disable IOV before freeing resources. This lets any VF drivers |
737 | * running in the host get themselves cleaned up before we yank | 868 | * running in the host get themselves cleaned up before we yank |
738 | * the carpet out from underneath their feet. | 869 | * the carpet out from underneath their feet. |
@@ -1762,6 +1893,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, | |||
1762 | break; | 1893 | break; |
1763 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | 1894 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: |
1764 | ret = i40e_vc_enable_queues_msg(vf, msg, msglen); | 1895 | ret = i40e_vc_enable_queues_msg(vf, msg, msglen); |
1896 | i40e_vc_notify_vf_link_state(vf); | ||
1765 | break; | 1897 | break; |
1766 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | 1898 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: |
1767 | ret = i40e_vc_disable_queues_msg(vf, msg, msglen); | 1899 | ret = i40e_vc_disable_queues_msg(vf, msg, msglen); |
@@ -1835,118 +1967,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) | |||
1835 | } | 1967 | } |
1836 | 1968 | ||
1837 | /** | 1969 | /** |
1838 | * i40e_vc_vf_broadcast | ||
1839 | * @pf: pointer to the PF structure | ||
1840 | * @opcode: operation code | ||
1841 | * @retval: return value | ||
1842 | * @msg: pointer to the msg buffer | ||
1843 | * @msglen: msg length | ||
1844 | * | ||
1845 | * send a message to all VFs on a given PF | ||
1846 | **/ | ||
1847 | static void i40e_vc_vf_broadcast(struct i40e_pf *pf, | ||
1848 | enum i40e_virtchnl_ops v_opcode, | ||
1849 | i40e_status v_retval, u8 *msg, | ||
1850 | u16 msglen) | ||
1851 | { | ||
1852 | struct i40e_hw *hw = &pf->hw; | ||
1853 | struct i40e_vf *vf = pf->vf; | ||
1854 | int i; | ||
1855 | |||
1856 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { | ||
1857 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1858 | /* Not all VFs are enabled so skip the ones that are not */ | ||
1859 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
1860 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
1861 | continue; | ||
1862 | |||
1863 | /* Ignore return value on purpose - a given VF may fail, but | ||
1864 | * we need to keep going and send to all of them | ||
1865 | */ | ||
1866 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, | ||
1867 | msg, msglen, NULL); | ||
1868 | } | ||
1869 | } | ||
1870 | |||
1871 | /** | ||
1872 | * i40e_vc_notify_link_state | ||
1873 | * @pf: pointer to the PF structure | ||
1874 | * | ||
1875 | * send a link status message to all VFs on a given PF | ||
1876 | **/ | ||
1877 | void i40e_vc_notify_link_state(struct i40e_pf *pf) | ||
1878 | { | ||
1879 | struct i40e_virtchnl_pf_event pfe; | ||
1880 | struct i40e_hw *hw = &pf->hw; | ||
1881 | struct i40e_vf *vf = pf->vf; | ||
1882 | struct i40e_link_status *ls = &pf->hw.phy.link_info; | ||
1883 | int i; | ||
1884 | |||
1885 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | ||
1886 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | ||
1887 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { | ||
1888 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
1889 | if (vf->link_forced) { | ||
1890 | pfe.event_data.link_event.link_status = vf->link_up; | ||
1891 | pfe.event_data.link_event.link_speed = | ||
1892 | (vf->link_up ? I40E_LINK_SPEED_40GB : 0); | ||
1893 | } else { | ||
1894 | pfe.event_data.link_event.link_status = | ||
1895 | ls->link_info & I40E_AQ_LINK_UP; | ||
1896 | pfe.event_data.link_event.link_speed = ls->link_speed; | ||
1897 | } | ||
1898 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | ||
1899 | 0, (u8 *)&pfe, sizeof(pfe), | ||
1900 | NULL); | ||
1901 | } | ||
1902 | } | ||
1903 | |||
1904 | /** | ||
1905 | * i40e_vc_notify_reset | ||
1906 | * @pf: pointer to the PF structure | ||
1907 | * | ||
1908 | * indicate a pending reset to all VFs on a given PF | ||
1909 | **/ | ||
1910 | void i40e_vc_notify_reset(struct i40e_pf *pf) | ||
1911 | { | ||
1912 | struct i40e_virtchnl_pf_event pfe; | ||
1913 | |||
1914 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | ||
1915 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | ||
1916 | i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, | ||
1917 | (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); | ||
1918 | } | ||
1919 | |||
1920 | /** | ||
1921 | * i40e_vc_notify_vf_reset | ||
1922 | * @vf: pointer to the VF structure | ||
1923 | * | ||
1924 | * indicate a pending reset to the given VF | ||
1925 | **/ | ||
1926 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) | ||
1927 | { | ||
1928 | struct i40e_virtchnl_pf_event pfe; | ||
1929 | int abs_vf_id; | ||
1930 | |||
1931 | /* validate the request */ | ||
1932 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
1933 | return; | ||
1934 | |||
1935 | /* verify if the VF is in either init or active before proceeding */ | ||
1936 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
1937 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
1938 | return; | ||
1939 | |||
1940 | abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | ||
1941 | |||
1942 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | ||
1943 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | ||
1944 | i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | ||
1945 | I40E_SUCCESS, (u8 *)&pfe, | ||
1946 | sizeof(struct i40e_virtchnl_pf_event), NULL); | ||
1947 | } | ||
1948 | |||
1949 | /** | ||
1950 | * i40e_ndo_set_vf_mac | 1970 | * i40e_ndo_set_vf_mac |
1951 | * @netdev: network interface device structure | 1971 | * @netdev: network interface device structure |
1952 | * @vf_id: VF identifier | 1972 | * @vf_id: VF identifier |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 9c79cb6abb2b..ec9d83a93379 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h | |||
@@ -242,6 +242,7 @@ struct i40e_hw_capabilities { | |||
242 | u8 rx_buf_chain_len; | 242 | u8 rx_buf_chain_len; |
243 | u32 enabled_tcmap; | 243 | u32 enabled_tcmap; |
244 | u32 maxtc; | 244 | u32 maxtc; |
245 | u64 wr_csr_prot; | ||
245 | }; | 246 | }; |
246 | 247 | ||
247 | struct i40e_mac_info { | 248 | struct i40e_mac_info { |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 34c8565031f6..1b98c25b3092 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h | |||
@@ -225,7 +225,6 @@ struct i40evf_adapter { | |||
225 | #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED | 225 | #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED |
226 | /* flags for admin queue service task */ | 226 | /* flags for admin queue service task */ |
227 | u32 aq_required; | 227 | u32 aq_required; |
228 | u32 aq_pending; | ||
229 | #define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) | 228 | #define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) |
230 | #define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) | 229 | #define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) |
231 | #define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) | 230 | #define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 6d5f3b21c68a..7c53aca4b5a6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -1008,7 +1008,6 @@ void i40evf_down(struct i40evf_adapter *adapter) | |||
1008 | adapter->state != __I40EVF_RESETTING) { | 1008 | adapter->state != __I40EVF_RESETTING) { |
1009 | /* cancel any current operation */ | 1009 | /* cancel any current operation */ |
1010 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | 1010 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
1011 | adapter->aq_pending = 0; | ||
1012 | /* Schedule operations to close down the HW. Don't wait | 1011 | /* Schedule operations to close down the HW. Don't wait |
1013 | * here for this to complete. The watchdog is still running | 1012 | * here for this to complete. The watchdog is still running |
1014 | * and it will take care of this. | 1013 | * and it will take care of this. |
@@ -1335,7 +1334,6 @@ static void i40evf_watchdog_task(struct work_struct *work) | |||
1335 | */ | 1334 | */ |
1336 | return; | 1335 | return; |
1337 | } | 1336 | } |
1338 | adapter->aq_pending = 0; | ||
1339 | adapter->aq_required = 0; | 1337 | adapter->aq_required = 0; |
1340 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | 1338 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
1341 | goto watchdog_done; | 1339 | goto watchdog_done; |
@@ -1355,7 +1353,6 @@ static void i40evf_watchdog_task(struct work_struct *work) | |||
1355 | adapter->flags |= I40EVF_FLAG_RESET_PENDING; | 1353 | adapter->flags |= I40EVF_FLAG_RESET_PENDING; |
1356 | dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); | 1354 | dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); |
1357 | schedule_work(&adapter->reset_task); | 1355 | schedule_work(&adapter->reset_task); |
1358 | adapter->aq_pending = 0; | ||
1359 | adapter->aq_required = 0; | 1356 | adapter->aq_required = 0; |
1360 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | 1357 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
1361 | goto watchdog_done; | 1358 | goto watchdog_done; |
@@ -1364,7 +1361,7 @@ static void i40evf_watchdog_task(struct work_struct *work) | |||
1364 | /* Process admin queue tasks. After init, everything gets done | 1361 | /* Process admin queue tasks. After init, everything gets done |
1365 | * here so we don't race on the admin queue. | 1362 | * here so we don't race on the admin queue. |
1366 | */ | 1363 | */ |
1367 | if (adapter->aq_pending) { | 1364 | if (adapter->current_op) { |
1368 | if (!i40evf_asq_done(hw)) { | 1365 | if (!i40evf_asq_done(hw)) { |
1369 | dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); | 1366 | dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); |
1370 | i40evf_send_api_ver(adapter); | 1367 | i40evf_send_api_ver(adapter); |
@@ -2029,7 +2026,7 @@ static void i40evf_init_task(struct work_struct *work) | |||
2029 | if (err) { | 2026 | if (err) { |
2030 | dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", | 2027 | dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", |
2031 | err); | 2028 | err); |
2032 | goto err; | 2029 | goto err; |
2033 | } | 2030 | } |
2034 | err = i40evf_check_reset_complete(hw); | 2031 | err = i40evf_check_reset_complete(hw); |
2035 | if (err) { | 2032 | if (err) { |
@@ -2249,7 +2246,6 @@ static void i40evf_shutdown(struct pci_dev *pdev) | |||
2249 | /* Prevent the watchdog from running. */ | 2246 | /* Prevent the watchdog from running. */ |
2250 | adapter->state = __I40EVF_REMOVE; | 2247 | adapter->state = __I40EVF_REMOVE; |
2251 | adapter->aq_required = 0; | 2248 | adapter->aq_required = 0; |
2252 | adapter->aq_pending = 0; | ||
2253 | 2249 | ||
2254 | #ifdef CONFIG_PM | 2250 | #ifdef CONFIG_PM |
2255 | pci_save_state(pdev); | 2251 | pci_save_state(pdev); |
@@ -2467,7 +2463,6 @@ static void i40evf_remove(struct pci_dev *pdev) | |||
2467 | /* Shut down all the garbage mashers on the detention level */ | 2463 | /* Shut down all the garbage mashers on the detention level */ |
2468 | adapter->state = __I40EVF_REMOVE; | 2464 | adapter->state = __I40EVF_REMOVE; |
2469 | adapter->aq_required = 0; | 2465 | adapter->aq_required = 0; |
2470 | adapter->aq_pending = 0; | ||
2471 | i40evf_request_reset(adapter); | 2466 | i40evf_request_reset(adapter); |
2472 | msleep(20); | 2467 | msleep(20); |
2473 | /* If the FW isn't responding, kick it once, but only once. */ | 2468 | /* If the FW isn't responding, kick it once, but only once. */ |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 4240a496dc50..61e090558f31 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | |||
@@ -250,7 +250,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) | |||
250 | vqpi++; | 250 | vqpi++; |
251 | } | 251 | } |
252 | 252 | ||
253 | adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; | ||
254 | adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; | 253 | adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; |
255 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | 254 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, |
256 | (u8 *)vqci, len); | 255 | (u8 *)vqci, len); |
@@ -277,7 +276,6 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) | |||
277 | vqs.vsi_id = adapter->vsi_res->vsi_id; | 276 | vqs.vsi_id = adapter->vsi_res->vsi_id; |
278 | vqs.tx_queues = (1 << adapter->num_active_queues) - 1; | 277 | vqs.tx_queues = (1 << adapter->num_active_queues) - 1; |
279 | vqs.rx_queues = vqs.tx_queues; | 278 | vqs.rx_queues = vqs.tx_queues; |
280 | adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; | ||
281 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; | 279 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; |
282 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | 280 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, |
283 | (u8 *)&vqs, sizeof(vqs)); | 281 | (u8 *)&vqs, sizeof(vqs)); |
@@ -303,7 +301,6 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) | |||
303 | vqs.vsi_id = adapter->vsi_res->vsi_id; | 301 | vqs.vsi_id = adapter->vsi_res->vsi_id; |
304 | vqs.tx_queues = (1 << adapter->num_active_queues) - 1; | 302 | vqs.tx_queues = (1 << adapter->num_active_queues) - 1; |
305 | vqs.rx_queues = vqs.tx_queues; | 303 | vqs.rx_queues = vqs.tx_queues; |
306 | adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; | ||
307 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; | 304 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; |
308 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | 305 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, |
309 | (u8 *)&vqs, sizeof(vqs)); | 306 | (u8 *)&vqs, sizeof(vqs)); |
@@ -354,7 +351,6 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) | |||
354 | vimi->vecmap[v_idx].txq_map = 0; | 351 | vimi->vecmap[v_idx].txq_map = 0; |
355 | vimi->vecmap[v_idx].rxq_map = 0; | 352 | vimi->vecmap[v_idx].rxq_map = 0; |
356 | 353 | ||
357 | adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS; | ||
358 | adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; | 354 | adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; |
359 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | 355 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, |
360 | (u8 *)vimi, len); | 356 | (u8 *)vimi, len); |
@@ -415,7 +411,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) | |||
415 | f->add = false; | 411 | f->add = false; |
416 | } | 412 | } |
417 | } | 413 | } |
418 | adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; | ||
419 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; | 414 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; |
420 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | 415 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, |
421 | (u8 *)veal, len); | 416 | (u8 *)veal, len); |
@@ -476,7 +471,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) | |||
476 | kfree(f); | 471 | kfree(f); |
477 | } | 472 | } |
478 | } | 473 | } |
479 | adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; | ||
480 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; | 474 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; |
481 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, | 475 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, |
482 | (u8 *)veal, len); | 476 | (u8 *)veal, len); |
@@ -537,7 +531,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) | |||
537 | f->add = false; | 531 | f->add = false; |
538 | } | 532 | } |
539 | } | 533 | } |
540 | adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | ||
541 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | 534 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; |
542 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); | 535 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); |
543 | kfree(vvfl); | 536 | kfree(vvfl); |
@@ -598,7 +591,6 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) | |||
598 | kfree(f); | 591 | kfree(f); |
599 | } | 592 | } |
600 | } | 593 | } |
601 | adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; | ||
602 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; | 594 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; |
603 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); | 595 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); |
604 | kfree(vvfl); | 596 | kfree(vvfl); |
@@ -720,9 +712,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, | |||
720 | __func__, v_retval, v_opcode); | 712 | __func__, v_retval, v_opcode); |
721 | } | 713 | } |
722 | switch (v_opcode) { | 714 | switch (v_opcode) { |
723 | case I40E_VIRTCHNL_OP_VERSION: | ||
724 | /* no action, but also not an error */ | ||
725 | break; | ||
726 | case I40E_VIRTCHNL_OP_GET_STATS: { | 715 | case I40E_VIRTCHNL_OP_GET_STATS: { |
727 | struct i40e_eth_stats *stats = | 716 | struct i40e_eth_stats *stats = |
728 | (struct i40e_eth_stats *)msg; | 717 | (struct i40e_eth_stats *)msg; |
@@ -740,39 +729,30 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, | |||
740 | adapter->current_stats = *stats; | 729 | adapter->current_stats = *stats; |
741 | } | 730 | } |
742 | break; | 731 | break; |
743 | case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | ||
744 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER); | ||
745 | break; | ||
746 | case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | ||
747 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER); | ||
748 | break; | ||
749 | case I40E_VIRTCHNL_OP_ADD_VLAN: | ||
750 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER); | ||
751 | break; | ||
752 | case I40E_VIRTCHNL_OP_DEL_VLAN: | ||
753 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER); | ||
754 | break; | ||
755 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | 732 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: |
756 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES); | ||
757 | /* enable transmits */ | 733 | /* enable transmits */ |
758 | i40evf_irq_enable(adapter, true); | 734 | i40evf_irq_enable(adapter, true); |
759 | netif_tx_start_all_queues(adapter->netdev); | 735 | netif_tx_start_all_queues(adapter->netdev); |
760 | netif_carrier_on(adapter->netdev); | 736 | netif_carrier_on(adapter->netdev); |
761 | break; | 737 | break; |
762 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | 738 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: |
763 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); | ||
764 | i40evf_free_all_tx_resources(adapter); | 739 | i40evf_free_all_tx_resources(adapter); |
765 | i40evf_free_all_rx_resources(adapter); | 740 | i40evf_free_all_rx_resources(adapter); |
766 | break; | 741 | break; |
767 | case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | 742 | case I40E_VIRTCHNL_OP_VERSION: |
768 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); | 743 | case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: |
769 | break; | ||
770 | case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | 744 | case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: |
771 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); | 745 | /* Don't display an error if we get these out of sequence. |
746 | * If the firmware needed to get kicked, we'll get these and | ||
747 | * it's no problem. | ||
748 | */ | ||
749 | if (v_opcode != adapter->current_op) | ||
750 | return; | ||
772 | break; | 751 | break; |
773 | default: | 752 | default: |
774 | dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", | 753 | if (v_opcode != adapter->current_op) |
775 | v_opcode); | 754 | dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", |
755 | adapter->current_op, v_opcode); | ||
776 | break; | 756 | break; |
777 | } /* switch v_opcode */ | 757 | } /* switch v_opcode */ |
778 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | 758 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 4a42e960d331..f66641d961e3 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/skbuff.h> | 41 | #include <linux/skbuff.h> |
42 | #include <linux/spi/spi.h> | 42 | #include <linux/spi/spi.h> |
43 | #include <linux/types.h> | 43 | #include <linux/types.h> |
44 | #include <linux/version.h> | ||
45 | 44 | ||
46 | #include "qca_7k.h" | 45 | #include "qca_7k.h" |
47 | #include "qca_debug.h" | 46 | #include "qca_debug.h" |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a87b177bd723..a570a60533be 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
@@ -4759,6 +4759,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
4759 | 4759 | ||
4760 | if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { | 4760 | if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { |
4761 | dev_err(&pdev->dev, "invalid PCI region size\n"); | 4761 | dev_err(&pdev->dev, "invalid PCI region size\n"); |
4762 | err = -EINVAL; | ||
4762 | goto err_pci_resource_len_check; | 4763 | goto err_pci_resource_len_check; |
4763 | } | 4764 | } |
4764 | 4765 | ||
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 10b6173d557d..b605dfd5c7bc 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c | |||
@@ -46,7 +46,7 @@ struct efx_loopback_payload { | |||
46 | struct iphdr ip; | 46 | struct iphdr ip; |
47 | struct udphdr udp; | 47 | struct udphdr udp; |
48 | __be16 iteration; | 48 | __be16 iteration; |
49 | const char msg[64]; | 49 | char msg[64]; |
50 | } __packed; | 50 | } __packed; |
51 | 51 | ||
52 | /* Loopback test source MAC address */ | 52 | /* Loopback test source MAC address */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index cd77289c3cfe..623c6ed8764a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -150,7 +150,7 @@ struct stmmac_extra_stats { | |||
150 | #define MAC_CSR_H_FRQ_MASK 0x20 | 150 | #define MAC_CSR_H_FRQ_MASK 0x20 |
151 | 151 | ||
152 | #define HASH_TABLE_SIZE 64 | 152 | #define HASH_TABLE_SIZE 64 |
153 | #define PAUSE_TIME 0x200 | 153 | #define PAUSE_TIME 0xffff |
154 | 154 | ||
155 | /* Flow Control defines */ | 155 | /* Flow Control defines */ |
156 | #define FLOW_OFF 0 | 156 | #define FLOW_OFF 0 |
@@ -357,7 +357,8 @@ struct stmmac_dma_ops { | |||
357 | void (*dump_regs) (void __iomem *ioaddr); | 357 | void (*dump_regs) (void __iomem *ioaddr); |
358 | /* Set tx/rx threshold in the csr6 register | 358 | /* Set tx/rx threshold in the csr6 register |
359 | * An invalid value enables the store-and-forward mode */ | 359 | * An invalid value enables the store-and-forward mode */ |
360 | void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode); | 360 | void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, |
361 | int rxfifosz); | ||
361 | /* To track extra statistic (if supported) */ | 362 | /* To track extra statistic (if supported) */ |
362 | void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, | 363 | void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, |
363 | void __iomem *ioaddr); | 364 | void __iomem *ioaddr); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 64d8f56a9c17..b3fe0575ff6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | |||
@@ -172,6 +172,7 @@ enum inter_frame_gap { | |||
172 | /* GMAC FLOW CTRL defines */ | 172 | /* GMAC FLOW CTRL defines */ |
173 | #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ | 173 | #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ |
174 | #define GMAC_FLOW_CTRL_PT_SHIFT 16 | 174 | #define GMAC_FLOW_CTRL_PT_SHIFT 16 |
175 | #define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ | ||
175 | #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ | 176 | #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ |
176 | #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ | 177 | #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ |
177 | #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | 178 | #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ |
@@ -246,6 +247,56 @@ enum ttc_control { | |||
246 | #define DMA_CONTROL_FEF 0x00000080 | 247 | #define DMA_CONTROL_FEF 0x00000080 |
247 | #define DMA_CONTROL_FUF 0x00000040 | 248 | #define DMA_CONTROL_FUF 0x00000040 |
248 | 249 | ||
250 | /* Receive flow control activation field | ||
251 | * RFA field in DMA control register, bits 23,10:9 | ||
252 | */ | ||
253 | #define DMA_CONTROL_RFA_MASK 0x00800600 | ||
254 | |||
255 | /* Receive flow control deactivation field | ||
256 | * RFD field in DMA control register, bits 22,12:11 | ||
257 | */ | ||
258 | #define DMA_CONTROL_RFD_MASK 0x00401800 | ||
259 | |||
260 | /* RFD and RFA fields are encoded as follows | ||
261 | * | ||
262 | * Bit Field | ||
263 | * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) | ||
264 | * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) | ||
265 | * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) | ||
266 | * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) | ||
267 | * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) | ||
268 | * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) | ||
269 | * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) | ||
270 | * 1,11 - Reserved | ||
271 | * | ||
272 | * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, | ||
273 | * but packet throughput performance may not be as expected. | ||
274 | * | ||
275 | * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame | ||
276 | * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause | ||
277 | * Description). | ||
278 | * | ||
279 | * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), | ||
280 | * is set to 0. This allows pause frames with a quanta of 0 to be sent | ||
281 | * as an XOFF message to the link peer. | ||
282 | */ | ||
283 | |||
284 | #define RFA_FULL_MINUS_1K 0x00000000 | ||
285 | #define RFA_FULL_MINUS_2K 0x00000200 | ||
286 | #define RFA_FULL_MINUS_3K 0x00000400 | ||
287 | #define RFA_FULL_MINUS_4K 0x00000600 | ||
288 | #define RFA_FULL_MINUS_5K 0x00800000 | ||
289 | #define RFA_FULL_MINUS_6K 0x00800200 | ||
290 | #define RFA_FULL_MINUS_7K 0x00800400 | ||
291 | |||
292 | #define RFD_FULL_MINUS_1K 0x00000000 | ||
293 | #define RFD_FULL_MINUS_2K 0x00000800 | ||
294 | #define RFD_FULL_MINUS_3K 0x00001000 | ||
295 | #define RFD_FULL_MINUS_4K 0x00001800 | ||
296 | #define RFD_FULL_MINUS_5K 0x00400000 | ||
297 | #define RFD_FULL_MINUS_6K 0x00400800 | ||
298 | #define RFD_FULL_MINUS_7K 0x00401000 | ||
299 | |||
249 | enum rtc_control { | 300 | enum rtc_control { |
250 | DMA_CONTROL_RTC_64 = 0x00000000, | 301 | DMA_CONTROL_RTC_64 = 0x00000000, |
251 | DMA_CONTROL_RTC_32 = 0x00000008, | 302 | DMA_CONTROL_RTC_32 = 0x00000008, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 0adcf73cf722..371a669d69fd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -201,7 +201,10 @@ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, | |||
201 | unsigned int fc, unsigned int pause_time) | 201 | unsigned int fc, unsigned int pause_time) |
202 | { | 202 | { |
203 | void __iomem *ioaddr = hw->pcsr; | 203 | void __iomem *ioaddr = hw->pcsr; |
204 | unsigned int flow = 0; | 204 | /* Set flow such that DZPQ in Mac Register 6 is 0, |
205 | * and unicast pause detect is enabled. | ||
206 | */ | ||
207 | unsigned int flow = GMAC_FLOW_CTRL_UP; | ||
205 | 208 | ||
206 | pr_debug("GMAC Flow-Control:\n"); | 209 | pr_debug("GMAC Flow-Control:\n"); |
207 | if (fc & FLOW_RX) { | 210 | if (fc & FLOW_RX) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 59d92e811750..0e8937c1184a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c | |||
@@ -106,8 +106,29 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, | |||
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
108 | 108 | ||
109 | static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) | ||
110 | { | ||
111 | csr6 &= ~DMA_CONTROL_RFA_MASK; | ||
112 | csr6 &= ~DMA_CONTROL_RFD_MASK; | ||
113 | |||
114 | /* Leave flow control disabled if receive fifo size is less than | ||
115 | * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, | ||
116 | * and send XON when 2K less than full. | ||
117 | */ | ||
118 | if (rxfifosz < 4096) { | ||
119 | csr6 &= ~DMA_CONTROL_EFC; | ||
120 | pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", | ||
121 | rxfifosz); | ||
122 | } else { | ||
123 | csr6 |= DMA_CONTROL_EFC; | ||
124 | csr6 |= RFA_FULL_MINUS_1K; | ||
125 | csr6 |= RFD_FULL_MINUS_2K; | ||
126 | } | ||
127 | return csr6; | ||
128 | } | ||
129 | |||
109 | static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, | 130 | static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, |
110 | int rxmode) | 131 | int rxmode, int rxfifosz) |
111 | { | 132 | { |
112 | u32 csr6 = readl(ioaddr + DMA_CONTROL); | 133 | u32 csr6 = readl(ioaddr + DMA_CONTROL); |
113 | 134 | ||
@@ -153,6 +174,9 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, | |||
153 | csr6 |= DMA_CONTROL_RTC_128; | 174 | csr6 |= DMA_CONTROL_RTC_128; |
154 | } | 175 | } |
155 | 176 | ||
177 | /* Configure flow control based on rx fifo size */ | ||
178 | csr6 = dwmac1000_configure_fc(csr6, rxfifosz); | ||
179 | |||
156 | writel(csr6, ioaddr + DMA_CONTROL); | 180 | writel(csr6, ioaddr + DMA_CONTROL); |
157 | } | 181 | } |
158 | 182 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 7d1dce9e7ffc..9d0971c1c2ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | |||
@@ -72,7 +72,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, | |||
72 | * control register. | 72 | * control register. |
73 | */ | 73 | */ |
74 | static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, | 74 | static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, |
75 | int rxmode) | 75 | int rxmode, int rxfifosz) |
76 | { | 76 | { |
77 | u32 csr6 = readl(ioaddr + DMA_CONTROL); | 77 | u32 csr6 = readl(ioaddr + DMA_CONTROL); |
78 | 78 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 06103cad7c77..05c146f718a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1277,8 +1277,10 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) | |||
1277 | */ | 1277 | */ |
1278 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | 1278 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) |
1279 | { | 1279 | { |
1280 | int rxfifosz = priv->plat->rx_fifo_size; | ||
1281 | |||
1280 | if (priv->plat->force_thresh_dma_mode) | 1282 | if (priv->plat->force_thresh_dma_mode) |
1281 | priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); | 1283 | priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); |
1282 | else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { | 1284 | else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { |
1283 | /* | 1285 | /* |
1284 | * In case of GMAC, SF mode can be enabled | 1286 | * In case of GMAC, SF mode can be enabled |
@@ -1287,10 +1289,12 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
1287 | * 2) There is no bugged Jumbo frame support | 1289 | * 2) There is no bugged Jumbo frame support |
1288 | * that needs to not insert csum in the TDES. | 1290 | * that needs to not insert csum in the TDES. |
1289 | */ | 1291 | */ |
1290 | priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); | 1292 | priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, |
1293 | rxfifosz); | ||
1291 | priv->xstats.threshold = SF_DMA_MODE; | 1294 | priv->xstats.threshold = SF_DMA_MODE; |
1292 | } else | 1295 | } else |
1293 | priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); | 1296 | priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, |
1297 | rxfifosz); | ||
1294 | } | 1298 | } |
1295 | 1299 | ||
1296 | /** | 1300 | /** |
@@ -1442,6 +1446,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv) | |||
1442 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) | 1446 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) |
1443 | { | 1447 | { |
1444 | int status; | 1448 | int status; |
1449 | int rxfifosz = priv->plat->rx_fifo_size; | ||
1445 | 1450 | ||
1446 | status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); | 1451 | status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); |
1447 | if (likely((status & handle_rx)) || (status & handle_tx)) { | 1452 | if (likely((status & handle_rx)) || (status & handle_tx)) { |
@@ -1456,10 +1461,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) | |||
1456 | (tc <= 256)) { | 1461 | (tc <= 256)) { |
1457 | tc += 64; | 1462 | tc += 64; |
1458 | if (priv->plat->force_thresh_dma_mode) | 1463 | if (priv->plat->force_thresh_dma_mode) |
1459 | priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); | 1464 | priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, |
1465 | rxfifosz); | ||
1460 | else | 1466 | else |
1461 | priv->hw->dma->dma_mode(priv->ioaddr, tc, | 1467 | priv->hw->dma->dma_mode(priv->ioaddr, tc, |
1462 | SF_DMA_MODE); | 1468 | SF_DMA_MODE, rxfifosz); |
1463 | priv->xstats.threshold = tc; | 1469 | priv->xstats.threshold = tc; |
1464 | } | 1470 | } |
1465 | } else if (unlikely(status == tx_hard_error)) | 1471 | } else if (unlikely(status == tx_hard_error)) |
@@ -2970,15 +2976,15 @@ int stmmac_dvr_remove(struct net_device *ndev) | |||
2970 | priv->hw->dma->stop_tx(priv->ioaddr); | 2976 | priv->hw->dma->stop_tx(priv->ioaddr); |
2971 | 2977 | ||
2972 | stmmac_set_mac(priv->ioaddr, false); | 2978 | stmmac_set_mac(priv->ioaddr, false); |
2973 | if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && | ||
2974 | priv->pcs != STMMAC_PCS_RTBI) | ||
2975 | stmmac_mdio_unregister(ndev); | ||
2976 | netif_carrier_off(ndev); | 2979 | netif_carrier_off(ndev); |
2977 | unregister_netdev(ndev); | 2980 | unregister_netdev(ndev); |
2978 | if (priv->stmmac_rst) | 2981 | if (priv->stmmac_rst) |
2979 | reset_control_assert(priv->stmmac_rst); | 2982 | reset_control_assert(priv->stmmac_rst); |
2980 | clk_disable_unprepare(priv->pclk); | 2983 | clk_disable_unprepare(priv->pclk); |
2981 | clk_disable_unprepare(priv->stmmac_clk); | 2984 | clk_disable_unprepare(priv->stmmac_clk); |
2985 | if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && | ||
2986 | priv->pcs != STMMAC_PCS_RTBI) | ||
2987 | stmmac_mdio_unregister(ndev); | ||
2982 | free_netdev(ndev); | 2988 | free_netdev(ndev); |
2983 | 2989 | ||
2984 | return 0; | 2990 | return 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index f9b42f11950f..705bbdf93940 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -181,6 +181,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
181 | sizeof(struct stmmac_mdio_bus_data), | 181 | sizeof(struct stmmac_mdio_bus_data), |
182 | GFP_KERNEL); | 182 | GFP_KERNEL); |
183 | 183 | ||
184 | of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); | ||
185 | |||
186 | of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); | ||
187 | |||
184 | plat->force_sf_dma_mode = | 188 | plat->force_sf_dma_mode = |
185 | of_property_read_bool(np, "snps,force_sf_dma_mode"); | 189 | of_property_read_bool(np, "snps,force_sf_dma_mode"); |
186 | 190 | ||
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 34f846b4bd05..94570aace241 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -105,7 +105,7 @@ static void ri_tasklet(unsigned long dev) | |||
105 | if (from & AT_EGRESS) { | 105 | if (from & AT_EGRESS) { |
106 | dev_queue_xmit(skb); | 106 | dev_queue_xmit(skb); |
107 | } else if (from & AT_INGRESS) { | 107 | } else if (from & AT_INGRESS) { |
108 | skb_pull(skb, skb->dev->hard_header_len); | 108 | skb_pull(skb, skb->mac_len); |
109 | netif_receive_skb(skb); | 109 | netif_receive_skb(skb); |
110 | } else | 110 | } else |
111 | BUG(); | 111 | BUG(); |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9c91ff872485..8c350c5d54ad 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -313,7 +313,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) | |||
313 | */ | 313 | */ |
314 | if (q->flags & IFF_VNET_HDR) | 314 | if (q->flags & IFF_VNET_HDR) |
315 | features |= vlan->tap_features; | 315 | features |= vlan->tap_features; |
316 | if (netif_needs_gso(dev, skb, features)) { | 316 | if (netif_needs_gso(skb, features)) { |
317 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | 317 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
318 | 318 | ||
319 | if (IS_ERR(segs)) | 319 | if (IS_ERR(segs)) |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 4c08f98f4484..3f45afd4382e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -560,7 +560,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
560 | 560 | ||
561 | if (unlikely(!netif_carrier_ok(dev) || | 561 | if (unlikely(!netif_carrier_ok(dev) || |
562 | (slots > 1 && !xennet_can_sg(dev)) || | 562 | (slots > 1 && !xennet_can_sg(dev)) || |
563 | netif_needs_gso(dev, skb, netif_skb_features(skb)))) { | 563 | netif_needs_gso(skb, netif_skb_features(skb)))) { |
564 | spin_unlock_irqrestore(&queue->tx_lock, flags); | 564 | spin_unlock_irqrestore(&queue->tx_lock, flags); |
565 | goto drop; | 565 | goto drop; |
566 | } | 566 | } |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b5679aed660b..bcbde799ec69 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3713,7 +3713,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) | |||
3713 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); | 3713 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
3714 | } | 3714 | } |
3715 | 3715 | ||
3716 | static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb, | 3716 | static inline bool netif_needs_gso(struct sk_buff *skb, |
3717 | netdev_features_t features) | 3717 | netdev_features_t features) |
3718 | { | 3718 | { |
3719 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || | 3719 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index cd63851b57f2..7f484a239f53 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -114,6 +114,8 @@ struct plat_stmmacenet_data { | |||
114 | int maxmtu; | 114 | int maxmtu; |
115 | int multicast_filter_bins; | 115 | int multicast_filter_bins; |
116 | int unicast_filter_entries; | 116 | int unicast_filter_entries; |
117 | int tx_fifo_size; | ||
118 | int rx_fifo_size; | ||
117 | void (*fix_mac_speed)(void *priv, unsigned int speed); | 119 | void (*fix_mac_speed)(void *priv, unsigned int speed); |
118 | void (*bus_setup)(void __iomem *ioaddr); | 120 | void (*bus_setup)(void __iomem *ioaddr); |
119 | void *(*setup)(struct platform_device *pdev); | 121 | void *(*setup)(struct platform_device *pdev); |
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h index 0931618c0f7f..70e158551704 100644 --- a/include/net/netns/generic.h +++ b/include/net/netns/generic.h | |||
@@ -38,11 +38,9 @@ static inline void *net_generic(const struct net *net, int id) | |||
38 | 38 | ||
39 | rcu_read_lock(); | 39 | rcu_read_lock(); |
40 | ng = rcu_dereference(net->gen); | 40 | ng = rcu_dereference(net->gen); |
41 | BUG_ON(id == 0 || id > ng->len); | ||
42 | ptr = ng->ptr[id - 1]; | 41 | ptr = ng->ptr[id - 1]; |
43 | rcu_read_unlock(); | 42 | rcu_read_unlock(); |
44 | 43 | ||
45 | BUG_ON(!ptr); | ||
46 | return ptr; | 44 | return ptr; |
47 | } | 45 | } |
48 | #endif | 46 | #endif |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 9598871485ce..051dc5c2802d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -829,7 +829,7 @@ struct tcp_congestion_ops { | |||
829 | /* hook for packet ack accounting (optional) */ | 829 | /* hook for packet ack accounting (optional) */ |
830 | void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); | 830 | void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); |
831 | /* get info for inet_diag (optional) */ | 831 | /* get info for inet_diag (optional) */ |
832 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); | 832 | int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); |
833 | 833 | ||
834 | char name[TCP_CA_NAME_MAX]; | 834 | char name[TCP_CA_NAME_MAX]; |
835 | struct module *owner; | 835 | struct module *owner; |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5c1cee11f777..a9ebdf5701e8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -177,7 +177,7 @@ enum bpf_func_id { | |||
177 | /** | 177 | /** |
178 | * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet | 178 | * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet |
179 | * @skb: pointer to skb | 179 | * @skb: pointer to skb |
180 | * @offset: offset within packet from skb->data | 180 | * @offset: offset within packet from skb->mac_header |
181 | * @from: pointer where to copy bytes from | 181 | * @from: pointer where to copy bytes from |
182 | * @len: number of bytes to store into packet | 182 | * @len: number of bytes to store into packet |
183 | * @flags: bit 0 - if true, recompute skb->csum | 183 | * @flags: bit 0 - if true, recompute skb->csum |
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h index 34c7936ca114..c97340e43dd6 100644 --- a/include/uapi/linux/filter.h +++ b/include/uapi/linux/filter.h | |||
@@ -79,8 +79,11 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ | |||
79 | #define SKF_AD_RANDOM 56 | 79 | #define SKF_AD_RANDOM 56 |
80 | #define SKF_AD_VLAN_TPID 60 | 80 | #define SKF_AD_VLAN_TPID 60 |
81 | #define SKF_AD_MAX 64 | 81 | #define SKF_AD_MAX 64 |
82 | #define SKF_NET_OFF (-0x100000) | ||
83 | #define SKF_LL_OFF (-0x200000) | ||
84 | 82 | ||
83 | #define SKF_NET_OFF (-0x100000) | ||
84 | #define SKF_LL_OFF (-0x200000) | ||
85 | |||
86 | #define BPF_NET_OFF SKF_NET_OFF | ||
87 | #define BPF_LL_OFF SKF_LL_OFF | ||
85 | 88 | ||
86 | #endif /* _UAPI__LINUX_FILTER_H__ */ | 89 | #endif /* _UAPI__LINUX_FILTER_H__ */ |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 630a7bac1e51..47dcd3aa6e23 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1397,7 +1397,8 @@ peek_stack: | |||
1397 | /* tell verifier to check for equivalent states | 1397 | /* tell verifier to check for equivalent states |
1398 | * after every call and jump | 1398 | * after every call and jump |
1399 | */ | 1399 | */ |
1400 | env->explored_states[t + 1] = STATE_LIST_MARK; | 1400 | if (t + 1 < insn_cnt) |
1401 | env->explored_states[t + 1] = STATE_LIST_MARK; | ||
1401 | } else { | 1402 | } else { |
1402 | /* conditional jump with two edges */ | 1403 | /* conditional jump with two edges */ |
1403 | ret = push_insn(t, t + 1, FALLTHROUGH, env); | 1404 | ret = push_insn(t, t + 1, FALLTHROUGH, env); |
@@ -1636,6 +1637,8 @@ static int do_check(struct verifier_env *env) | |||
1636 | if (err) | 1637 | if (err) |
1637 | return err; | 1638 | return err; |
1638 | 1639 | ||
1640 | src_reg_type = regs[insn->src_reg].type; | ||
1641 | |||
1639 | /* check that memory (src_reg + off) is readable, | 1642 | /* check that memory (src_reg + off) is readable, |
1640 | * the state of dst_reg will be updated by this func | 1643 | * the state of dst_reg will be updated by this func |
1641 | */ | 1644 | */ |
@@ -1645,9 +1648,12 @@ static int do_check(struct verifier_env *env) | |||
1645 | if (err) | 1648 | if (err) |
1646 | return err; | 1649 | return err; |
1647 | 1650 | ||
1648 | src_reg_type = regs[insn->src_reg].type; | 1651 | if (BPF_SIZE(insn->code) != BPF_W) { |
1652 | insn_idx++; | ||
1653 | continue; | ||
1654 | } | ||
1649 | 1655 | ||
1650 | if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) { | 1656 | if (insn->imm == 0) { |
1651 | /* saw a valid insn | 1657 | /* saw a valid insn |
1652 | * dst_reg = *(u32 *)(src_reg + off) | 1658 | * dst_reg = *(u32 *)(src_reg + off) |
1653 | * use reserved 'imm' field to mark this insn | 1659 | * use reserved 'imm' field to mark this insn |
diff --git a/net/core/dev.c b/net/core/dev.c index af4a1b0adc10..1796cef55ab5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2713,7 +2713,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device | |||
2713 | if (unlikely(!skb)) | 2713 | if (unlikely(!skb)) |
2714 | goto out_null; | 2714 | goto out_null; |
2715 | 2715 | ||
2716 | if (netif_needs_gso(dev, skb, features)) { | 2716 | if (netif_needs_gso(skb, features)) { |
2717 | struct sk_buff *segs; | 2717 | struct sk_buff *segs; |
2718 | 2718 | ||
2719 | segs = skb_gso_segment(skb, features); | 2719 | segs = skb_gso_segment(skb, features); |
diff --git a/net/core/filter.c b/net/core/filter.c index b669e75d2b36..bf831a85c315 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1175,12 +1175,27 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) | |||
1175 | return 0; | 1175 | return 0; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | /** | ||
1179 | * bpf_skb_clone_not_writable - is the header of a clone not writable | ||
1180 | * @skb: buffer to check | ||
1181 | * @len: length up to which to write, can be negative | ||
1182 | * | ||
1183 | * Returns true if modifying the header part of the cloned buffer | ||
1184 | * does require the data to be copied. I.e. this version works with | ||
1185 | * negative lengths needed for eBPF case! | ||
1186 | */ | ||
1187 | static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len) | ||
1188 | { | ||
1189 | return skb_header_cloned(skb) || | ||
1190 | (int) skb_headroom(skb) + len > skb->hdr_len; | ||
1191 | } | ||
1192 | |||
1178 | #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) | 1193 | #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) |
1179 | 1194 | ||
1180 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) | 1195 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) |
1181 | { | 1196 | { |
1182 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1197 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1183 | unsigned int offset = (unsigned int) r2; | 1198 | int offset = (int) r2; |
1184 | void *from = (void *) (long) r3; | 1199 | void *from = (void *) (long) r3; |
1185 | unsigned int len = (unsigned int) r4; | 1200 | unsigned int len = (unsigned int) r4; |
1186 | char buf[16]; | 1201 | char buf[16]; |
@@ -1194,10 +1209,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) | |||
1194 | * | 1209 | * |
1195 | * so check for invalid 'offset' and too large 'len' | 1210 | * so check for invalid 'offset' and too large 'len' |
1196 | */ | 1211 | */ |
1197 | if (unlikely(offset > 0xffff || len > sizeof(buf))) | 1212 | if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) |
1198 | return -EFAULT; | 1213 | return -EFAULT; |
1199 | 1214 | ||
1200 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) | 1215 | offset -= skb->data - skb_mac_header(skb); |
1216 | if (unlikely(skb_cloned(skb) && | ||
1217 | bpf_skb_clone_unwritable(skb, offset + len))) | ||
1201 | return -EFAULT; | 1218 | return -EFAULT; |
1202 | 1219 | ||
1203 | ptr = skb_header_pointer(skb, offset, len, buf); | 1220 | ptr = skb_header_pointer(skb, offset, len, buf); |
@@ -1232,15 +1249,18 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = { | |||
1232 | #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) | 1249 | #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) |
1233 | #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) | 1250 | #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) |
1234 | 1251 | ||
1235 | static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) | 1252 | static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) |
1236 | { | 1253 | { |
1237 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1254 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1255 | int offset = (int) r2; | ||
1238 | __sum16 sum, *ptr; | 1256 | __sum16 sum, *ptr; |
1239 | 1257 | ||
1240 | if (unlikely(offset > 0xffff)) | 1258 | if (unlikely((u32) offset > 0xffff)) |
1241 | return -EFAULT; | 1259 | return -EFAULT; |
1242 | 1260 | ||
1243 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) | 1261 | offset -= skb->data - skb_mac_header(skb); |
1262 | if (unlikely(skb_cloned(skb) && | ||
1263 | bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) | ||
1244 | return -EFAULT; | 1264 | return -EFAULT; |
1245 | 1265 | ||
1246 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | 1266 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); |
@@ -1276,16 +1296,19 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = { | |||
1276 | .arg5_type = ARG_ANYTHING, | 1296 | .arg5_type = ARG_ANYTHING, |
1277 | }; | 1297 | }; |
1278 | 1298 | ||
1279 | static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) | 1299 | static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) |
1280 | { | 1300 | { |
1281 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1301 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1282 | u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); | 1302 | u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); |
1303 | int offset = (int) r2; | ||
1283 | __sum16 sum, *ptr; | 1304 | __sum16 sum, *ptr; |
1284 | 1305 | ||
1285 | if (unlikely(offset > 0xffff)) | 1306 | if (unlikely((u32) offset > 0xffff)) |
1286 | return -EFAULT; | 1307 | return -EFAULT; |
1287 | 1308 | ||
1288 | if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) | 1309 | offset -= skb->data - skb_mac_header(skb); |
1310 | if (unlikely(skb_cloned(skb) && | ||
1311 | bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) | ||
1289 | return -EFAULT; | 1312 | return -EFAULT; |
1290 | 1313 | ||
1291 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | 1314 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a3abb719221f..78fc04ad36fc 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/export.h> | 16 | #include <linux/export.h> |
17 | #include <linux/user_namespace.h> | 17 | #include <linux/user_namespace.h> |
18 | #include <linux/net_namespace.h> | 18 | #include <linux/net_namespace.h> |
19 | #include <linux/rtnetlink.h> | ||
20 | #include <net/sock.h> | 19 | #include <net/sock.h> |
21 | #include <net/netlink.h> | 20 | #include <net/netlink.h> |
22 | #include <net/net_namespace.h> | 21 | #include <net/net_namespace.h> |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3b6e5830256e..d1967dab9cc6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4124,19 +4124,21 @@ EXPORT_SYMBOL(skb_try_coalesce); | |||
4124 | */ | 4124 | */ |
4125 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) | 4125 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
4126 | { | 4126 | { |
4127 | if (xnet) | ||
4128 | skb_orphan(skb); | ||
4129 | skb->tstamp.tv64 = 0; | 4127 | skb->tstamp.tv64 = 0; |
4130 | skb->pkt_type = PACKET_HOST; | 4128 | skb->pkt_type = PACKET_HOST; |
4131 | skb->skb_iif = 0; | 4129 | skb->skb_iif = 0; |
4132 | skb->ignore_df = 0; | 4130 | skb->ignore_df = 0; |
4133 | skb_dst_drop(skb); | 4131 | skb_dst_drop(skb); |
4134 | skb->mark = 0; | ||
4135 | skb_sender_cpu_clear(skb); | 4132 | skb_sender_cpu_clear(skb); |
4136 | skb_init_secmark(skb); | ||
4137 | secpath_reset(skb); | 4133 | secpath_reset(skb); |
4138 | nf_reset(skb); | 4134 | nf_reset(skb); |
4139 | nf_reset_trace(skb); | 4135 | nf_reset_trace(skb); |
4136 | |||
4137 | if (!xnet) | ||
4138 | return; | ||
4139 | |||
4140 | skb_orphan(skb); | ||
4141 | skb->mark = 0; | ||
4140 | } | 4142 | } |
4141 | EXPORT_SYMBOL_GPL(skb_scrub_packet); | 4143 | EXPORT_SYMBOL_GPL(skb_scrub_packet); |
4142 | 4144 | ||
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 5eaadabe23a1..079a224471e7 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -124,7 +124,7 @@ static ssize_t temp1_max_store(struct device *dev, | |||
124 | 124 | ||
125 | return count; | 125 | return count; |
126 | } | 126 | } |
127 | static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); | 127 | static DEVICE_ATTR_RW(temp1_max); |
128 | 128 | ||
129 | static ssize_t temp1_max_alarm_show(struct device *dev, | 129 | static ssize_t temp1_max_alarm_show(struct device *dev, |
130 | struct device_attribute *attr, char *buf) | 130 | struct device_attribute *attr, char *buf) |
@@ -159,8 +159,8 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, | |||
159 | if (index == 1) { | 159 | if (index == 1) { |
160 | if (!drv->get_temp_limit) | 160 | if (!drv->get_temp_limit) |
161 | mode = 0; | 161 | mode = 0; |
162 | else if (drv->set_temp_limit) | 162 | else if (!drv->set_temp_limit) |
163 | mode |= S_IWUSR; | 163 | mode &= ~S_IWUSR; |
164 | } else if (index == 2 && !drv->get_temp_alarm) { | 164 | } else if (index == 2 && !drv->get_temp_alarm) { |
165 | mode = 0; | 165 | mode = 0; |
166 | } | 166 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index af150b43b214..34968cd5c146 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -711,11 +711,10 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
711 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 711 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
712 | skb, FOU_CMD_GET); | 712 | skb, FOU_CMD_GET); |
713 | if (ret) | 713 | if (ret) |
714 | goto done; | 714 | break; |
715 | } | 715 | } |
716 | mutex_unlock(&fn->fou_lock); | 716 | mutex_unlock(&fn->fou_lock); |
717 | 717 | ||
718 | done: | ||
719 | cb->args[0] = idx; | 718 | cb->args[0] = idx; |
720 | return skb->len; | 719 | return skb->len; |
721 | } | 720 | } |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 70e8b3c308ec..bb77ebdae3b3 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -111,6 +111,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
111 | const struct nlmsghdr *unlh) | 111 | const struct nlmsghdr *unlh) |
112 | { | 112 | { |
113 | const struct inet_sock *inet = inet_sk(sk); | 113 | const struct inet_sock *inet = inet_sk(sk); |
114 | const struct tcp_congestion_ops *ca_ops; | ||
114 | const struct inet_diag_handler *handler; | 115 | const struct inet_diag_handler *handler; |
115 | int ext = req->idiag_ext; | 116 | int ext = req->idiag_ext; |
116 | struct inet_diag_msg *r; | 117 | struct inet_diag_msg *r; |
@@ -208,16 +209,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
208 | info = nla_data(attr); | 209 | info = nla_data(attr); |
209 | } | 210 | } |
210 | 211 | ||
211 | if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) | 212 | if (ext & (1 << (INET_DIAG_CONG - 1))) { |
212 | if (nla_put_string(skb, INET_DIAG_CONG, | 213 | int err = 0; |
213 | icsk->icsk_ca_ops->name) < 0) | 214 | |
215 | rcu_read_lock(); | ||
216 | ca_ops = READ_ONCE(icsk->icsk_ca_ops); | ||
217 | if (ca_ops) | ||
218 | err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name); | ||
219 | rcu_read_unlock(); | ||
220 | if (err < 0) | ||
214 | goto errout; | 221 | goto errout; |
222 | } | ||
215 | 223 | ||
216 | handler->idiag_get_info(sk, r, info); | 224 | handler->idiag_get_info(sk, r, info); |
217 | 225 | ||
218 | if (sk->sk_state < TCP_TIME_WAIT && | 226 | if (sk->sk_state < TCP_TIME_WAIT) { |
219 | icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) | 227 | int err = 0; |
220 | icsk->icsk_ca_ops->get_info(sk, ext, skb); | 228 | |
229 | rcu_read_lock(); | ||
230 | ca_ops = READ_ONCE(icsk->icsk_ca_ops); | ||
231 | if (ca_ops && ca_ops->get_info) | ||
232 | err = ca_ops->get_info(sk, ext, skb); | ||
233 | rcu_read_unlock(); | ||
234 | if (err < 0) | ||
235 | goto errout; | ||
236 | } | ||
221 | 237 | ||
222 | out: | 238 | out: |
223 | nlmsg_end(skb, nlh); | 239 | nlmsg_end(skb, nlh); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 18e3a12eb1b2..59c8a027721b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2595,6 +2595,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info) | |||
2595 | const struct tcp_sock *tp = tcp_sk(sk); | 2595 | const struct tcp_sock *tp = tcp_sk(sk); |
2596 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2596 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2597 | u32 now = tcp_time_stamp; | 2597 | u32 now = tcp_time_stamp; |
2598 | u32 rate; | ||
2598 | 2599 | ||
2599 | memset(info, 0, sizeof(*info)); | 2600 | memset(info, 0, sizeof(*info)); |
2600 | 2601 | ||
@@ -2655,10 +2656,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info) | |||
2655 | 2656 | ||
2656 | info->tcpi_total_retrans = tp->total_retrans; | 2657 | info->tcpi_total_retrans = tp->total_retrans; |
2657 | 2658 | ||
2658 | info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ? | 2659 | rate = READ_ONCE(sk->sk_pacing_rate); |
2659 | sk->sk_pacing_rate : ~0ULL; | 2660 | info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; |
2660 | info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ? | 2661 | |
2661 | sk->sk_max_pacing_rate : ~0ULL; | 2662 | rate = READ_ONCE(sk->sk_max_pacing_rate); |
2663 | info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; | ||
2662 | } | 2664 | } |
2663 | EXPORT_SYMBOL_GPL(tcp_get_info); | 2665 | EXPORT_SYMBOL_GPL(tcp_get_info); |
2664 | 2666 | ||
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index b504371af742..4376016f7fa5 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -277,7 +277,7 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | |||
277 | } | 277 | } |
278 | } | 278 | } |
279 | 279 | ||
280 | static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) | 280 | static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) |
281 | { | 281 | { |
282 | const struct dctcp *ca = inet_csk_ca(sk); | 282 | const struct dctcp *ca = inet_csk_ca(sk); |
283 | 283 | ||
@@ -297,8 +297,9 @@ static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) | |||
297 | info.dctcp_ab_tot = ca->acked_bytes_total; | 297 | info.dctcp_ab_tot = ca->acked_bytes_total; |
298 | } | 298 | } |
299 | 299 | ||
300 | nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); | 300 | return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); |
301 | } | 301 | } |
302 | return 0; | ||
302 | } | 303 | } |
303 | 304 | ||
304 | static struct tcp_congestion_ops dctcp __read_mostly = { | 305 | static struct tcp_congestion_ops dctcp __read_mostly = { |
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 1d5a30a90adf..67476f085e48 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c | |||
@@ -300,8 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) | |||
300 | } | 300 | } |
301 | 301 | ||
302 | /* Extract info for Tcp socket info provided via netlink. */ | 302 | /* Extract info for Tcp socket info provided via netlink. */ |
303 | static void tcp_illinois_info(struct sock *sk, u32 ext, | 303 | static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) |
304 | struct sk_buff *skb) | ||
305 | { | 304 | { |
306 | const struct illinois *ca = inet_csk_ca(sk); | 305 | const struct illinois *ca = inet_csk_ca(sk); |
307 | 306 | ||
@@ -318,8 +317,9 @@ static void tcp_illinois_info(struct sock *sk, u32 ext, | |||
318 | do_div(t, info.tcpv_rttcnt); | 317 | do_div(t, info.tcpv_rttcnt); |
319 | info.tcpv_rtt = t; | 318 | info.tcpv_rtt = t; |
320 | } | 319 | } |
321 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 320 | return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
322 | } | 321 | } |
322 | return 0; | ||
323 | } | 323 | } |
324 | 324 | ||
325 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { | 325 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index a6afde666ab1..c71a1b8f7bde 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -286,7 +286,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) | |||
286 | } | 286 | } |
287 | 287 | ||
288 | /* Extract info for Tcp socket info provided via netlink. */ | 288 | /* Extract info for Tcp socket info provided via netlink. */ |
289 | void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) | 289 | int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) |
290 | { | 290 | { |
291 | const struct vegas *ca = inet_csk_ca(sk); | 291 | const struct vegas *ca = inet_csk_ca(sk); |
292 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { | 292 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { |
@@ -297,8 +297,9 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) | |||
297 | .tcpv_minrtt = ca->minRTT, | 297 | .tcpv_minrtt = ca->minRTT, |
298 | }; | 298 | }; |
299 | 299 | ||
300 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 300 | return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
301 | } | 301 | } |
302 | return 0; | ||
302 | } | 303 | } |
303 | EXPORT_SYMBOL_GPL(tcp_vegas_get_info); | 304 | EXPORT_SYMBOL_GPL(tcp_vegas_get_info); |
304 | 305 | ||
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h index 0531b99d8637..e8a6b33cc61d 100644 --- a/net/ipv4/tcp_vegas.h +++ b/net/ipv4/tcp_vegas.h | |||
@@ -19,6 +19,6 @@ void tcp_vegas_init(struct sock *sk); | |||
19 | void tcp_vegas_state(struct sock *sk, u8 ca_state); | 19 | void tcp_vegas_state(struct sock *sk, u8 ca_state); |
20 | void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); | 20 | void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); |
21 | void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); | 21 | void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); |
22 | void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); | 22 | int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); |
23 | 23 | ||
24 | #endif /* __TCP_VEGAS_H */ | 24 | #endif /* __TCP_VEGAS_H */ |
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index bb63fba47d47..b3c57cceb990 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c | |||
@@ -256,8 +256,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /* Extract info for Tcp socket info provided via netlink. */ | 258 | /* Extract info for Tcp socket info provided via netlink. */ |
259 | static void tcp_westwood_info(struct sock *sk, u32 ext, | 259 | static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb) |
260 | struct sk_buff *skb) | ||
261 | { | 260 | { |
262 | const struct westwood *ca = inet_csk_ca(sk); | 261 | const struct westwood *ca = inet_csk_ca(sk); |
263 | 262 | ||
@@ -268,8 +267,9 @@ static void tcp_westwood_info(struct sock *sk, u32 ext, | |||
268 | .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), | 267 | .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), |
269 | }; | 268 | }; |
270 | 269 | ||
271 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 270 | return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
272 | } | 271 | } |
272 | return 0; | ||
273 | } | 273 | } |
274 | 274 | ||
275 | static struct tcp_congestion_ops tcp_westwood __read_mostly = { | 275 | static struct tcp_congestion_ops tcp_westwood __read_mostly = { |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 4d2cede17468..dc6a2d324bd8 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -38,6 +38,9 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, | |||
38 | struct tcf_bpf *prog = act->priv; | 38 | struct tcf_bpf *prog = act->priv; |
39 | int action, filter_res; | 39 | int action, filter_res; |
40 | 40 | ||
41 | if (unlikely(!skb_mac_header_was_set(skb))) | ||
42 | return TC_ACT_UNSPEC; | ||
43 | |||
41 | spin_lock(&prog->tcf_lock); | 44 | spin_lock(&prog->tcf_lock); |
42 | 45 | ||
43 | prog->tcf_tm.lastuse = jiffies; | 46 | prog->tcf_tm.lastuse = jiffies; |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5953517ec059..3f63ceac8e01 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -157,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
157 | 157 | ||
158 | if (!(at & AT_EGRESS)) { | 158 | if (!(at & AT_EGRESS)) { |
159 | if (m->tcfm_ok_push) | 159 | if (m->tcfm_ok_push) |
160 | skb_push(skb2, skb2->dev->hard_header_len); | 160 | skb_push(skb2, skb->mac_len); |
161 | } | 161 | } |
162 | 162 | ||
163 | /* mirror is always swallowed */ | 163 | /* mirror is always swallowed */ |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5c4171c5d2bd..91bd9c19471d 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -66,6 +66,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
66 | struct cls_bpf_prog *prog; | 66 | struct cls_bpf_prog *prog; |
67 | int ret = -1; | 67 | int ret = -1; |
68 | 68 | ||
69 | if (unlikely(!skb_mac_header_was_set(skb))) | ||
70 | return -1; | ||
71 | |||
69 | /* Needed here for accessing maps. */ | 72 | /* Needed here for accessing maps. */ |
70 | rcu_read_lock(); | 73 | rcu_read_lock(); |
71 | list_for_each_entry_rcu(prog, &head->plist, link) { | 74 | list_for_each_entry_rcu(prog, &head->plist, link) { |
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c index 7cf3f42a6e39..7c27710f8296 100644 --- a/samples/bpf/tcbpf1_kern.c +++ b/samples/bpf/tcbpf1_kern.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <uapi/linux/ip.h> | 4 | #include <uapi/linux/ip.h> |
5 | #include <uapi/linux/in.h> | 5 | #include <uapi/linux/in.h> |
6 | #include <uapi/linux/tcp.h> | 6 | #include <uapi/linux/tcp.h> |
7 | #include <uapi/linux/filter.h> | ||
8 | |||
7 | #include "bpf_helpers.h" | 9 | #include "bpf_helpers.h" |
8 | 10 | ||
9 | /* compiler workaround */ | 11 | /* compiler workaround */ |
@@ -14,18 +16,12 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac) | |||
14 | bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); | 16 | bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); |
15 | } | 17 | } |
16 | 18 | ||
17 | /* use 1 below for ingress qdisc and 0 for egress */ | ||
18 | #if 0 | ||
19 | #undef ETH_HLEN | ||
20 | #define ETH_HLEN 0 | ||
21 | #endif | ||
22 | |||
23 | #define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check)) | 19 | #define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check)) |
24 | #define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos)) | 20 | #define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos)) |
25 | 21 | ||
26 | static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) | 22 | static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) |
27 | { | 23 | { |
28 | __u8 old_tos = load_byte(skb, TOS_OFF); | 24 | __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); |
29 | 25 | ||
30 | bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); | 26 | bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); |
31 | bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); | 27 | bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); |
@@ -38,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) | |||
38 | 34 | ||
39 | static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) | 35 | static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) |
40 | { | 36 | { |
41 | __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); | 37 | __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); |
42 | 38 | ||
43 | bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); | 39 | bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); |
44 | bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); | 40 | bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); |
@@ -48,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) | |||
48 | #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) | 44 | #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) |
49 | static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) | 45 | static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) |
50 | { | 46 | { |
51 | __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); | 47 | __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF)); |
52 | 48 | ||
53 | bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); | 49 | bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); |
54 | bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); | 50 | bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); |
@@ -57,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) | |||
57 | SEC("classifier") | 53 | SEC("classifier") |
58 | int bpf_prog1(struct __sk_buff *skb) | 54 | int bpf_prog1(struct __sk_buff *skb) |
59 | { | 55 | { |
60 | __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); | 56 | __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol)); |
61 | long *value; | 57 | long *value; |
62 | 58 | ||
63 | if (proto == IPPROTO_TCP) { | 59 | if (proto == IPPROTO_TCP) { |
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 9ab645698ffb..12f3780af73f 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c | |||
@@ -721,6 +721,28 @@ static struct bpf_test tests[] = { | |||
721 | .errstr = "different pointers", | 721 | .errstr = "different pointers", |
722 | .result = REJECT, | 722 | .result = REJECT, |
723 | }, | 723 | }, |
724 | { | ||
725 | "access skb fields bad4", | ||
726 | .insns = { | ||
727 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), | ||
728 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, | ||
729 | offsetof(struct __sk_buff, len)), | ||
730 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
731 | BPF_EXIT_INSN(), | ||
732 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
733 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
734 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
735 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
736 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
737 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
738 | BPF_EXIT_INSN(), | ||
739 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
740 | BPF_JMP_IMM(BPF_JA, 0, 0, -13), | ||
741 | }, | ||
742 | .fixup = {7}, | ||
743 | .errstr = "different pointers", | ||
744 | .result = REJECT, | ||
745 | }, | ||
724 | }; | 746 | }; |
725 | 747 | ||
726 | static int probe_filter_length(struct bpf_insn *fp) | 748 | static int probe_filter_length(struct bpf_insn *fp) |