diff options
Diffstat (limited to 'drivers/net')
119 files changed, 1394 insertions, 765 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f0f5eab0fab1..798ae69fb63c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " | |||
175 | "the same MAC; 0 for none (default), " | 175 | "the same MAC; 0 for none (default), " |
176 | "1 for active, 2 for follow"); | 176 | "1 for active, 2 for follow"); |
177 | module_param(all_slaves_active, int, 0); | 177 | module_param(all_slaves_active, int, 0); |
178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 178 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " |
179 | "by setting active flag for all slaves; " | 179 | "by setting active flag for all slaves; " |
180 | "0 for never (default), 1 for always."); | 180 | "0 for never (default), 1 for always."); |
181 | module_param(resend_igmp, int, 0); | 181 | module_param(resend_igmp, int, 0); |
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3659 | else | 3659 | else |
3660 | bond_xmit_slave_id(bond, skb, 0); | 3660 | bond_xmit_slave_id(bond, skb, 0); |
3661 | } else { | 3661 | } else { |
3662 | slave_id = bond_rr_gen_slave_id(bond); | 3662 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); |
3663 | bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); | 3663 | |
3664 | if (likely(slave_cnt)) { | ||
3665 | slave_id = bond_rr_gen_slave_id(bond); | ||
3666 | bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); | ||
3667 | } else { | ||
3668 | dev_kfree_skb_any(skb); | ||
3669 | } | ||
3664 | } | 3670 | } |
3665 | 3671 | ||
3666 | return NETDEV_TX_OK; | 3672 | return NETDEV_TX_OK; |
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
3691 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) | 3697 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) |
3692 | { | 3698 | { |
3693 | struct bonding *bond = netdev_priv(bond_dev); | 3699 | struct bonding *bond = netdev_priv(bond_dev); |
3700 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | ||
3694 | 3701 | ||
3695 | bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); | 3702 | if (likely(slave_cnt)) |
3703 | bond_xmit_slave_id(bond, skb, | ||
3704 | bond_xmit_hash(bond, skb) % slave_cnt); | ||
3705 | else | ||
3706 | dev_kfree_skb_any(skb); | ||
3696 | 3707 | ||
3697 | return NETDEV_TX_OK; | 3708 | return NETDEV_TX_OK; |
3698 | } | 3709 | } |
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f07fa89b5fd5..05e1aa090add 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c | |||
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev) | |||
1123 | struct at91_priv *priv = netdev_priv(dev); | 1123 | struct at91_priv *priv = netdev_priv(dev); |
1124 | int err; | 1124 | int err; |
1125 | 1125 | ||
1126 | clk_enable(priv->clk); | 1126 | err = clk_prepare_enable(priv->clk); |
1127 | if (err) | ||
1128 | return err; | ||
1127 | 1129 | ||
1128 | /* check or determine and set bittime */ | 1130 | /* check or determine and set bittime */ |
1129 | err = open_candev(dev); | 1131 | err = open_candev(dev); |
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev) | |||
1149 | out_close: | 1151 | out_close: |
1150 | close_candev(dev); | 1152 | close_candev(dev); |
1151 | out: | 1153 | out: |
1152 | clk_disable(priv->clk); | 1154 | clk_disable_unprepare(priv->clk); |
1153 | 1155 | ||
1154 | return err; | 1156 | return err; |
1155 | } | 1157 | } |
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev) | |||
1166 | at91_chip_stop(dev, CAN_STATE_STOPPED); | 1168 | at91_chip_stop(dev, CAN_STATE_STOPPED); |
1167 | 1169 | ||
1168 | free_irq(dev->irq, dev); | 1170 | free_irq(dev->irq, dev); |
1169 | clk_disable(priv->clk); | 1171 | clk_disable_unprepare(priv->clk); |
1170 | 1172 | ||
1171 | close_candev(dev); | 1173 | close_candev(dev); |
1172 | 1174 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 109cb44291f5..fb279d6ae484 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) | |||
97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 97 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
98 | writel(ctrl, priv->raminit_ctrlreg); | 98 | writel(ctrl, priv->raminit_ctrlreg); |
99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); | 99 | ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); |
100 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 100 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
101 | 101 | ||
102 | if (enable) { | 102 | if (enable) { |
103 | /* Set start bit and wait for the done bit. */ | 103 | /* Set start bit and wait for the done bit. */ |
104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); | 104 | ctrl |= CAN_RAMINIT_START_MASK(priv->instance); |
105 | writel(ctrl, priv->raminit_ctrlreg); | 105 | writel(ctrl, priv->raminit_ctrlreg); |
106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); | 106 | ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); |
107 | c_can_hw_raminit_wait_ti(priv, ctrl, mask); | 107 | c_can_hw_raminit_wait_ti(priv, mask, ctrl); |
108 | } | 108 | } |
109 | spin_unlock(&raminit_lock); | 109 | spin_unlock(&raminit_lock); |
110 | } | 110 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 944aa5d3af6e..6586309329e6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #define FLEXCAN_MCR_BCC BIT(16) | 62 | #define FLEXCAN_MCR_BCC BIT(16) |
63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) | 63 | #define FLEXCAN_MCR_LPRIO_EN BIT(13) |
64 | #define FLEXCAN_MCR_AEN BIT(12) | 64 | #define FLEXCAN_MCR_AEN BIT(12) |
65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) | 65 | #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) |
66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) | 66 | #define FLEXCAN_MCR_IDAM_A (0 << 8) |
67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) | 67 | #define FLEXCAN_MCR_IDAM_B (1 << 8) |
68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) | 68 | #define FLEXCAN_MCR_IDAM_C (2 << 8) |
@@ -125,7 +125,9 @@ | |||
125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) | 125 | FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) |
126 | 126 | ||
127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ | 127 | /* FLEXCAN interrupt flag register (IFLAG) bits */ |
128 | #define FLEXCAN_TX_BUF_ID 8 | 128 | /* Errata ERR005829 step7: Reserve first valid MB */ |
129 | #define FLEXCAN_TX_BUF_RESERVED 8 | ||
130 | #define FLEXCAN_TX_BUF_ID 9 | ||
129 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) | 131 | #define FLEXCAN_IFLAG_BUF(x) BIT(x) |
130 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) | 132 | #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) |
131 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) | 133 | #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) |
@@ -136,6 +138,17 @@ | |||
136 | 138 | ||
137 | /* FLEXCAN message buffers */ | 139 | /* FLEXCAN message buffers */ |
138 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) | 140 | #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) |
141 | #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) | ||
142 | #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) | ||
143 | #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) | ||
144 | #define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) | ||
145 | #define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) | ||
146 | |||
147 | #define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) | ||
148 | #define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) | ||
149 | #define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) | ||
150 | #define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) | ||
151 | |||
139 | #define FLEXCAN_MB_CNT_SRR BIT(22) | 152 | #define FLEXCAN_MB_CNT_SRR BIT(22) |
140 | #define FLEXCAN_MB_CNT_IDE BIT(21) | 153 | #define FLEXCAN_MB_CNT_IDE BIT(21) |
141 | #define FLEXCAN_MB_CNT_RTR BIT(20) | 154 | #define FLEXCAN_MB_CNT_RTR BIT(20) |
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) | |||
298 | flexcan_write(reg, ®s->mcr); | 311 | flexcan_write(reg, ®s->mcr); |
299 | 312 | ||
300 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 313 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
301 | usleep_range(10, 20); | 314 | udelay(10); |
302 | 315 | ||
303 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) | 316 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) |
304 | return -ETIMEDOUT; | 317 | return -ETIMEDOUT; |
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) | |||
317 | flexcan_write(reg, ®s->mcr); | 330 | flexcan_write(reg, ®s->mcr); |
318 | 331 | ||
319 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 332 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
320 | usleep_range(10, 20); | 333 | udelay(10); |
321 | 334 | ||
322 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | 335 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
323 | return -ETIMEDOUT; | 336 | return -ETIMEDOUT; |
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) | |||
336 | flexcan_write(reg, ®s->mcr); | 349 | flexcan_write(reg, ®s->mcr); |
337 | 350 | ||
338 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 351 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
339 | usleep_range(100, 200); | 352 | udelay(100); |
340 | 353 | ||
341 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 354 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
342 | return -ETIMEDOUT; | 355 | return -ETIMEDOUT; |
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) | |||
355 | flexcan_write(reg, ®s->mcr); | 368 | flexcan_write(reg, ®s->mcr); |
356 | 369 | ||
357 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | 370 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) |
358 | usleep_range(10, 20); | 371 | udelay(10); |
359 | 372 | ||
360 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) | 373 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) |
361 | return -ETIMEDOUT; | 374 | return -ETIMEDOUT; |
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) | |||
370 | 383 | ||
371 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | 384 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); |
372 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) | 385 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) |
373 | usleep_range(10, 20); | 386 | udelay(10); |
374 | 387 | ||
375 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) | 388 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) |
376 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
428 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); | 441 | flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); |
429 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 442 | flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
430 | 443 | ||
444 | /* Errata ERR005829 step8: | ||
445 | * Write twice INACTIVE(0x8) code to first MB. | ||
446 | */ | ||
447 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
448 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
449 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
450 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
451 | |||
431 | return NETDEV_TX_OK; | 452 | return NETDEV_TX_OK; |
432 | } | 453 | } |
433 | 454 | ||
@@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
744 | stats->tx_bytes += can_get_echo_skb(dev, 0); | 765 | stats->tx_bytes += can_get_echo_skb(dev, 0); |
745 | stats->tx_packets++; | 766 | stats->tx_packets++; |
746 | can_led_event(dev, CAN_LED_EVENT_TX); | 767 | can_led_event(dev, CAN_LED_EVENT_TX); |
768 | /* after sending a RTR frame mailbox is in RX mode */ | ||
769 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
770 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | ||
747 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); | 771 | flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); |
748 | netif_wake_queue(dev); | 772 | netif_wake_queue(dev); |
749 | } | 773 | } |
@@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
801 | struct flexcan_regs __iomem *regs = priv->base; | 825 | struct flexcan_regs __iomem *regs = priv->base; |
802 | int err; | 826 | int err; |
803 | u32 reg_mcr, reg_ctrl; | 827 | u32 reg_mcr, reg_ctrl; |
828 | int i; | ||
804 | 829 | ||
805 | /* enable module */ | 830 | /* enable module */ |
806 | err = flexcan_chip_enable(priv); | 831 | err = flexcan_chip_enable(priv); |
@@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev) | |||
867 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | 892 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); |
868 | flexcan_write(reg_ctrl, ®s->ctrl); | 893 | flexcan_write(reg_ctrl, ®s->ctrl); |
869 | 894 | ||
870 | /* Abort any pending TX, mark Mailbox as INACTIVE */ | 895 | /* clear and invalidate all mailboxes first */ |
871 | flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), | 896 | for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { |
897 | flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, | ||
898 | ®s->cantxfg[i].can_ctrl); | ||
899 | } | ||
900 | |||
901 | /* Errata ERR005829: mark first TX mailbox as INACTIVE */ | ||
902 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
903 | ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); | ||
904 | |||
905 | /* mark TX mailbox as INACTIVE */ | ||
906 | flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, | ||
872 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); | 907 | ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); |
873 | 908 | ||
874 | /* acceptance mask/acceptance code (accept everything) */ | 909 | /* acceptance mask/acceptance code (accept everything) */ |
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 7a85590fefb9..e5fac368068a 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c | |||
@@ -70,6 +70,8 @@ struct peak_pci_chan { | |||
70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ | 70 | #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ |
71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ | 71 | #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ |
72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ | 72 | #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ |
73 | #define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ | ||
74 | #define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ | ||
73 | 75 | ||
74 | #define PEAK_PCI_CHAN_MAX 4 | 76 | #define PEAK_PCI_CHAN_MAX 4 |
75 | 77 | ||
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = { | |||
87 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 89 | {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
88 | #ifdef CONFIG_CAN_PEAK_PCIEC | 90 | #ifdef CONFIG_CAN_PEAK_PCIEC |
89 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | 91 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, |
92 | {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, | ||
90 | #endif | 93 | #endif |
91 | {0,} | 94 | {0,} |
92 | }; | 95 | }; |
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
653 | * This must be done *before* register_sja1000dev() but | 656 | * This must be done *before* register_sja1000dev() but |
654 | * *after* devices linkage | 657 | * *after* devices linkage |
655 | */ | 658 | */ |
656 | if (pdev->device == PEAK_PCIEC_DEVICE_ID) { | 659 | if (pdev->device == PEAK_PCIEC_DEVICE_ID || |
660 | pdev->device == PEAK_PCIEC34_DEVICE_ID) { | ||
657 | err = peak_pciec_probe(pdev, dev); | 661 | err = peak_pciec_probe(pdev, dev); |
658 | if (err) { | 662 | if (err) { |
659 | dev_err(&pdev->dev, | 663 | dev_err(&pdev->dev, |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 059c7414e303..8ca49f04acec 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2129 | int entry = vp->cur_tx % TX_RING_SIZE; | 2129 | int entry = vp->cur_tx % TX_RING_SIZE; |
2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | 2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; |
2131 | unsigned long flags; | 2131 | unsigned long flags; |
2132 | dma_addr_t dma_addr; | ||
2132 | 2133 | ||
2133 | if (vortex_debug > 6) { | 2134 | if (vortex_debug > 6) { |
2134 | pr_debug("boomerang_start_xmit()\n"); | 2135 | pr_debug("boomerang_start_xmit()\n"); |
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2164 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
2164 | 2165 | ||
2165 | if (!skb_shinfo(skb)->nr_frags) { | 2166 | if (!skb_shinfo(skb)->nr_frags) { |
2166 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2167 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, |
2167 | skb->len, PCI_DMA_TODEVICE)); | 2168 | PCI_DMA_TODEVICE); |
2169 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2170 | goto out_dma_err; | ||
2171 | |||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2168 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | 2173 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); |
2169 | } else { | 2174 | } else { |
2170 | int i; | 2175 | int i; |
2171 | 2176 | ||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2177 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, |
2173 | skb_headlen(skb), PCI_DMA_TODEVICE)); | 2178 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2179 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2180 | goto out_dma_err; | ||
2181 | |||
2182 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2174 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | 2183 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); |
2175 | 2184 | ||
2176 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2185 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2186 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2178 | 2187 | ||
2188 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | ||
2189 | 0, | ||
2190 | frag->size, | ||
2191 | DMA_TO_DEVICE); | ||
2192 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | ||
2193 | for(i = i-1; i >= 0; i--) | ||
2194 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | ||
2195 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | ||
2196 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | ||
2197 | DMA_TO_DEVICE); | ||
2198 | |||
2199 | pci_unmap_single(VORTEX_PCI(vp), | ||
2200 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | ||
2201 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | ||
2202 | PCI_DMA_TODEVICE); | ||
2203 | |||
2204 | goto out_dma_err; | ||
2205 | } | ||
2206 | |||
2179 | vp->tx_ring[entry].frag[i+1].addr = | 2207 | vp->tx_ring[entry].frag[i+1].addr = |
2180 | cpu_to_le32(pci_map_single( | 2208 | cpu_to_le32(dma_addr); |
2181 | VORTEX_PCI(vp), | ||
2182 | (void *)skb_frag_address(frag), | ||
2183 | skb_frag_size(frag), PCI_DMA_TODEVICE)); | ||
2184 | 2209 | ||
2185 | if (i == skb_shinfo(skb)->nr_frags-1) | 2210 | if (i == skb_shinfo(skb)->nr_frags-1) |
2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2211 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2189 | } | 2214 | } |
2190 | } | 2215 | } |
2191 | #else | 2216 | #else |
2192 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2217 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); |
2218 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2219 | goto out_dma_err; | ||
2220 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | ||
2193 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2221 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
2194 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | 2222 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); |
2195 | #endif | 2223 | #endif |
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2217 | skb_tx_timestamp(skb); | 2245 | skb_tx_timestamp(skb); |
2218 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | 2246 | iowrite16(DownUnstall, ioaddr + EL3_CMD); |
2219 | spin_unlock_irqrestore(&vp->lock, flags); | 2247 | spin_unlock_irqrestore(&vp->lock, flags); |
2248 | out: | ||
2220 | return NETDEV_TX_OK; | 2249 | return NETDEV_TX_OK; |
2250 | out_dma_err: | ||
2251 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | ||
2252 | goto out; | ||
2221 | } | 2253 | } |
2222 | 2254 | ||
2223 | /* The interrupt handler does all of the Rx thread work and cleans up | 2255 | /* The interrupt handler does all of the Rx thread work and cleans up |
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 23578dfee249..3005155e412b 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c | |||
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth) | |||
123 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); | 123 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); |
124 | } | 124 | } |
125 | 125 | ||
126 | static inline void greth_enable_tx_and_irq(struct greth_private *greth) | ||
127 | { | ||
128 | wmb(); /* BDs must been written to memory before enabling TX */ | ||
129 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); | ||
130 | } | ||
131 | |||
126 | static inline void greth_disable_tx(struct greth_private *greth) | 132 | static inline void greth_disable_tx(struct greth_private *greth) |
127 | { | 133 | { |
128 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); | 134 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); |
@@ -447,29 +453,30 @@ out: | |||
447 | return err; | 453 | return err; |
448 | } | 454 | } |
449 | 455 | ||
456 | static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) | ||
457 | { | ||
458 | if (tx_next < tx_last) | ||
459 | return (tx_last - tx_next) - 1; | ||
460 | else | ||
461 | return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; | ||
462 | } | ||
450 | 463 | ||
451 | static netdev_tx_t | 464 | static netdev_tx_t |
452 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | 465 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) |
453 | { | 466 | { |
454 | struct greth_private *greth = netdev_priv(dev); | 467 | struct greth_private *greth = netdev_priv(dev); |
455 | struct greth_bd *bdp; | 468 | struct greth_bd *bdp; |
456 | u32 status = 0, dma_addr, ctrl; | 469 | u32 status, dma_addr; |
457 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; | 470 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
458 | unsigned long flags; | 471 | unsigned long flags; |
472 | u16 tx_last; | ||
459 | 473 | ||
460 | nr_frags = skb_shinfo(skb)->nr_frags; | 474 | nr_frags = skb_shinfo(skb)->nr_frags; |
475 | tx_last = greth->tx_last; | ||
476 | rmb(); /* tx_last is updated by the poll task */ | ||
461 | 477 | ||
462 | /* Clean TX Ring */ | 478 | if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { |
463 | greth_clean_tx_gbit(dev); | ||
464 | |||
465 | if (greth->tx_free < nr_frags + 1) { | ||
466 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
467 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
468 | /* Enable TX IRQ only if not already in poll() routine */ | ||
469 | if (ctrl & GRETH_RXI) | ||
470 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
471 | netif_stop_queue(dev); | 479 | netif_stop_queue(dev); |
472 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
473 | err = NETDEV_TX_BUSY; | 480 | err = NETDEV_TX_BUSY; |
474 | goto out; | 481 | goto out; |
475 | } | 482 | } |
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
488 | /* Linear buf */ | 495 | /* Linear buf */ |
489 | if (nr_frags != 0) | 496 | if (nr_frags != 0) |
490 | status = GRETH_TXBD_MORE; | 497 | status = GRETH_TXBD_MORE; |
498 | else | ||
499 | status = GRETH_BD_IE; | ||
491 | 500 | ||
492 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 501 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
493 | status |= GRETH_TXBD_CSALL; | 502 | status |= GRETH_TXBD_CSALL; |
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
545 | 554 | ||
546 | /* Enable the descriptor chain by enabling the first descriptor */ | 555 | /* Enable the descriptor chain by enabling the first descriptor */ |
547 | bdp = greth->tx_bd_base + greth->tx_next; | 556 | bdp = greth->tx_bd_base + greth->tx_next; |
548 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | 557 | greth_write_bd(&bdp->stat, |
549 | greth->tx_next = curr_tx; | 558 | greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
550 | greth->tx_free -= nr_frags + 1; | ||
551 | |||
552 | wmb(); | ||
553 | 559 | ||
554 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | 560 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
555 | greth_enable_tx(greth); | 561 | greth->tx_next = curr_tx; |
562 | greth_enable_tx_and_irq(greth); | ||
556 | spin_unlock_irqrestore(&greth->devlock, flags); | 563 | spin_unlock_irqrestore(&greth->devlock, flags); |
557 | 564 | ||
558 | return NETDEV_TX_OK; | 565 | return NETDEV_TX_OK; |
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev) | |||
648 | if (greth->tx_free > 0) { | 655 | if (greth->tx_free > 0) { |
649 | netif_wake_queue(dev); | 656 | netif_wake_queue(dev); |
650 | } | 657 | } |
651 | |||
652 | } | 658 | } |
653 | 659 | ||
654 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) | 660 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) |
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
670 | { | 676 | { |
671 | struct greth_private *greth; | 677 | struct greth_private *greth; |
672 | struct greth_bd *bdp, *bdp_last_frag; | 678 | struct greth_bd *bdp, *bdp_last_frag; |
673 | struct sk_buff *skb; | 679 | struct sk_buff *skb = NULL; |
674 | u32 stat; | 680 | u32 stat; |
675 | int nr_frags, i; | 681 | int nr_frags, i; |
682 | u16 tx_last; | ||
676 | 683 | ||
677 | greth = netdev_priv(dev); | 684 | greth = netdev_priv(dev); |
685 | tx_last = greth->tx_last; | ||
678 | 686 | ||
679 | while (greth->tx_free < GRETH_TXBD_NUM) { | 687 | while (tx_last != greth->tx_next) { |
680 | 688 | ||
681 | skb = greth->tx_skbuff[greth->tx_last]; | 689 | skb = greth->tx_skbuff[tx_last]; |
682 | 690 | ||
683 | nr_frags = skb_shinfo(skb)->nr_frags; | 691 | nr_frags = skb_shinfo(skb)->nr_frags; |
684 | 692 | ||
685 | /* We only clean fully completed SKBs */ | 693 | /* We only clean fully completed SKBs */ |
686 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | 694 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); |
687 | 695 | ||
688 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | 696 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); |
689 | mb(); | 697 | mb(); |
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
692 | if (stat & GRETH_BD_EN) | 700 | if (stat & GRETH_BD_EN) |
693 | break; | 701 | break; |
694 | 702 | ||
695 | greth->tx_skbuff[greth->tx_last] = NULL; | 703 | greth->tx_skbuff[tx_last] = NULL; |
696 | 704 | ||
697 | greth_update_tx_stats(dev, stat); | 705 | greth_update_tx_stats(dev, stat); |
698 | dev->stats.tx_bytes += skb->len; | 706 | dev->stats.tx_bytes += skb->len; |
699 | 707 | ||
700 | bdp = greth->tx_bd_base + greth->tx_last; | 708 | bdp = greth->tx_bd_base + tx_last; |
701 | 709 | ||
702 | greth->tx_last = NEXT_TX(greth->tx_last); | 710 | tx_last = NEXT_TX(tx_last); |
703 | 711 | ||
704 | dma_unmap_single(greth->dev, | 712 | dma_unmap_single(greth->dev, |
705 | greth_read_bd(&bdp->addr), | 713 | greth_read_bd(&bdp->addr), |
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
708 | 716 | ||
709 | for (i = 0; i < nr_frags; i++) { | 717 | for (i = 0; i < nr_frags; i++) { |
710 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 718 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
711 | bdp = greth->tx_bd_base + greth->tx_last; | 719 | bdp = greth->tx_bd_base + tx_last; |
712 | 720 | ||
713 | dma_unmap_page(greth->dev, | 721 | dma_unmap_page(greth->dev, |
714 | greth_read_bd(&bdp->addr), | 722 | greth_read_bd(&bdp->addr), |
715 | skb_frag_size(frag), | 723 | skb_frag_size(frag), |
716 | DMA_TO_DEVICE); | 724 | DMA_TO_DEVICE); |
717 | 725 | ||
718 | greth->tx_last = NEXT_TX(greth->tx_last); | 726 | tx_last = NEXT_TX(tx_last); |
719 | } | 727 | } |
720 | greth->tx_free += nr_frags+1; | ||
721 | dev_kfree_skb(skb); | 728 | dev_kfree_skb(skb); |
722 | } | 729 | } |
730 | if (skb) { /* skb is set only if the above while loop was entered */ | ||
731 | wmb(); | ||
732 | greth->tx_last = tx_last; | ||
723 | 733 | ||
724 | if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) | 734 | if (netif_queue_stopped(dev) && |
725 | netif_wake_queue(dev); | 735 | (greth_num_free_bds(tx_last, greth->tx_next) > |
736 | (MAX_SKB_FRAGS+1))) | ||
737 | netif_wake_queue(dev); | ||
738 | } | ||
726 | } | 739 | } |
727 | 740 | ||
728 | static int greth_rx(struct net_device *dev, int limit) | 741 | static int greth_rx(struct net_device *dev, int limit) |
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget) | |||
965 | greth = container_of(napi, struct greth_private, napi); | 978 | greth = container_of(napi, struct greth_private, napi); |
966 | 979 | ||
967 | restart_txrx_poll: | 980 | restart_txrx_poll: |
968 | if (netif_queue_stopped(greth->netdev)) { | ||
969 | if (greth->gbit_mac) | ||
970 | greth_clean_tx_gbit(greth->netdev); | ||
971 | else | ||
972 | greth_clean_tx(greth->netdev); | ||
973 | } | ||
974 | |||
975 | if (greth->gbit_mac) { | 981 | if (greth->gbit_mac) { |
982 | greth_clean_tx_gbit(greth->netdev); | ||
976 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); | 983 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); |
977 | } else { | 984 | } else { |
985 | if (netif_queue_stopped(greth->netdev)) | ||
986 | greth_clean_tx(greth->netdev); | ||
978 | work_done += greth_rx(greth->netdev, budget - work_done); | 987 | work_done += greth_rx(greth->netdev, budget - work_done); |
979 | } | 988 | } |
980 | 989 | ||
@@ -983,7 +992,8 @@ restart_txrx_poll: | |||
983 | spin_lock_irqsave(&greth->devlock, flags); | 992 | spin_lock_irqsave(&greth->devlock, flags); |
984 | 993 | ||
985 | ctrl = GRETH_REGLOAD(greth->regs->control); | 994 | ctrl = GRETH_REGLOAD(greth->regs->control); |
986 | if (netif_queue_stopped(greth->netdev)) { | 995 | if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || |
996 | (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { | ||
987 | GRETH_REGSAVE(greth->regs->control, | 997 | GRETH_REGSAVE(greth->regs->control, |
988 | ctrl | GRETH_TXI | GRETH_RXI); | 998 | ctrl | GRETH_TXI | GRETH_RXI); |
989 | mask = GRETH_INT_RX | GRETH_INT_RE | | 999 | mask = GRETH_INT_RX | GRETH_INT_RE | |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 232a622a85b7..ae16ac94daf8 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h | |||
@@ -107,7 +107,7 @@ struct greth_private { | |||
107 | 107 | ||
108 | u16 tx_next; | 108 | u16 tx_next; |
109 | u16 tx_last; | 109 | u16 tx_last; |
110 | u16 tx_free; | 110 | u16 tx_free; /* only used on 10/100Mbit */ |
111 | u16 rx_cur; | 111 | u16 rx_cur; |
112 | 112 | ||
113 | struct greth_regs *regs; /* Address of controller registers. */ | 113 | struct greth_regs *regs; /* Address of controller registers. */ |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 346592dca33c..a3c11355a34d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | |||
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer, | |||
272 | struct xgbe_prv_data *pdata = filp->private_data; | 272 | struct xgbe_prv_data *pdata = filp->private_data; |
273 | unsigned int value; | 273 | unsigned int value; |
274 | 274 | ||
275 | value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, | 275 | value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd, |
276 | pdata->debugfs_xpcs_reg); | 276 | pdata->debugfs_xpcs_reg); |
277 | 277 | ||
278 | return xgbe_common_read(buffer, count, ppos, value); | 278 | return xgbe_common_read(buffer, count, ppos, value); |
279 | } | 279 | } |
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp, | |||
290 | if (len < 0) | 290 | if (len < 0) |
291 | return len; | 291 | return len; |
292 | 292 | ||
293 | pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, | 293 | XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg, |
294 | pdata->debugfs_xpcs_reg, value); | 294 | value); |
295 | 295 | ||
296 | return len; | 296 | return len; |
297 | } | 297 | } |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index edaca4496264..ea273836d999 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
348 | 348 | ||
349 | /* Clear MAC flow control */ | 349 | /* Clear MAC flow control */ |
350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
351 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); | 351 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
352 | reg = MAC_Q0TFCR; | 352 | reg = MAC_Q0TFCR; |
353 | for (i = 0; i < q_count; i++) { | 353 | for (i = 0; i < q_count; i++) { |
354 | reg_val = XGMAC_IOREAD(pdata, reg); | 354 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
373 | 373 | ||
374 | /* Set MAC flow control */ | 374 | /* Set MAC flow control */ |
375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
376 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); | 376 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
377 | reg = MAC_Q0TFCR; | 377 | reg = MAC_Q0TFCR; |
378 | for (i = 0; i < q_count; i++) { | 378 | for (i = 0; i < q_count; i++) { |
379 | reg_val = XGMAC_IOREAD(pdata, reg); | 379 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) | |||
509 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); | 509 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); |
510 | 510 | ||
511 | /* Enable all counter interrupts */ | 511 | /* Enable all counter interrupts */ |
512 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); | 512 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); |
513 | XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); | 513 | XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); |
514 | } | 514 | } |
515 | 515 | ||
516 | static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) | 516 | static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) |
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) | |||
1633 | { | 1633 | { |
1634 | unsigned int i, count; | 1634 | unsigned int i, count; |
1635 | 1635 | ||
1636 | if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) | ||
1637 | return 0; | ||
1638 | |||
1636 | for (i = 0; i < pdata->tx_q_count; i++) | 1639 | for (i = 0; i < pdata->tx_q_count; i++) |
1637 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); | 1640 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); |
1638 | 1641 | ||
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) | |||
1703 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); | 1706 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); |
1704 | } | 1707 | } |
1705 | 1708 | ||
1706 | static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, | 1709 | static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, |
1707 | unsigned char queue_count) | 1710 | unsigned int queue_count) |
1708 | { | 1711 | { |
1709 | unsigned int q_fifo_size = 0; | 1712 | unsigned int q_fifo_size = 0; |
1710 | enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; | 1713 | enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; |
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, | |||
1748 | q_fifo_size = XGBE_FIFO_SIZE_KB(256); | 1751 | q_fifo_size = XGBE_FIFO_SIZE_KB(256); |
1749 | break; | 1752 | break; |
1750 | } | 1753 | } |
1754 | |||
1755 | /* The configured value is not the actual amount of fifo RAM */ | ||
1756 | q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); | ||
1757 | |||
1751 | q_fifo_size = q_fifo_size / queue_count; | 1758 | q_fifo_size = q_fifo_size / queue_count; |
1752 | 1759 | ||
1753 | /* Set the queue fifo size programmable value */ | 1760 | /* Set the queue fifo size programmable value */ |
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) | |||
1947 | xgbe_disable_rx_vlan_stripping(pdata); | 1954 | xgbe_disable_rx_vlan_stripping(pdata); |
1948 | } | 1955 | } |
1949 | 1956 | ||
1957 | static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) | ||
1958 | { | ||
1959 | bool read_hi; | ||
1960 | u64 val; | ||
1961 | |||
1962 | switch (reg_lo) { | ||
1963 | /* These registers are always 64 bit */ | ||
1964 | case MMC_TXOCTETCOUNT_GB_LO: | ||
1965 | case MMC_TXOCTETCOUNT_G_LO: | ||
1966 | case MMC_RXOCTETCOUNT_GB_LO: | ||
1967 | case MMC_RXOCTETCOUNT_G_LO: | ||
1968 | read_hi = true; | ||
1969 | break; | ||
1970 | |||
1971 | default: | ||
1972 | read_hi = false; | ||
1973 | }; | ||
1974 | |||
1975 | val = XGMAC_IOREAD(pdata, reg_lo); | ||
1976 | |||
1977 | if (read_hi) | ||
1978 | val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); | ||
1979 | |||
1980 | return val; | ||
1981 | } | ||
1982 | |||
1950 | static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) | 1983 | static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) |
1951 | { | 1984 | { |
1952 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; | 1985 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; |
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) | |||
1954 | 1987 | ||
1955 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) | 1988 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) |
1956 | stats->txoctetcount_gb += | 1989 | stats->txoctetcount_gb += |
1957 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); | 1990 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
1958 | 1991 | ||
1959 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) | 1992 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) |
1960 | stats->txframecount_gb += | 1993 | stats->txframecount_gb += |
1961 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); | 1994 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
1962 | 1995 | ||
1963 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) | 1996 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) |
1964 | stats->txbroadcastframes_g += | 1997 | stats->txbroadcastframes_g += |
1965 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); | 1998 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
1966 | 1999 | ||
1967 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) | 2000 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) |
1968 | stats->txmulticastframes_g += | 2001 | stats->txmulticastframes_g += |
1969 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); | 2002 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
1970 | 2003 | ||
1971 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) | 2004 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) |
1972 | stats->tx64octets_gb += | 2005 | stats->tx64octets_gb += |
1973 | XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); | 2006 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
1974 | 2007 | ||
1975 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) | 2008 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) |
1976 | stats->tx65to127octets_gb += | 2009 | stats->tx65to127octets_gb += |
1977 | XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); | 2010 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
1978 | 2011 | ||
1979 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) | 2012 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) |
1980 | stats->tx128to255octets_gb += | 2013 | stats->tx128to255octets_gb += |
1981 | XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); | 2014 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
1982 | 2015 | ||
1983 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) | 2016 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) |
1984 | stats->tx256to511octets_gb += | 2017 | stats->tx256to511octets_gb += |
1985 | XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); | 2018 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
1986 | 2019 | ||
1987 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) | 2020 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) |
1988 | stats->tx512to1023octets_gb += | 2021 | stats->tx512to1023octets_gb += |
1989 | XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); | 2022 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
1990 | 2023 | ||
1991 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) | 2024 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) |
1992 | stats->tx1024tomaxoctets_gb += | 2025 | stats->tx1024tomaxoctets_gb += |
1993 | XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | 2026 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
1994 | 2027 | ||
1995 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) | 2028 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) |
1996 | stats->txunicastframes_gb += | 2029 | stats->txunicastframes_gb += |
1997 | XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); | 2030 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
1998 | 2031 | ||
1999 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) | 2032 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) |
2000 | stats->txmulticastframes_gb += | 2033 | stats->txmulticastframes_gb += |
2001 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | 2034 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
2002 | 2035 | ||
2003 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) | 2036 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) |
2004 | stats->txbroadcastframes_g += | 2037 | stats->txbroadcastframes_g += |
2005 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | 2038 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
2006 | 2039 | ||
2007 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) | 2040 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) |
2008 | stats->txunderflowerror += | 2041 | stats->txunderflowerror += |
2009 | XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); | 2042 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
2010 | 2043 | ||
2011 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) | 2044 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) |
2012 | stats->txoctetcount_g += | 2045 | stats->txoctetcount_g += |
2013 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); | 2046 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
2014 | 2047 | ||
2015 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) | 2048 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) |
2016 | stats->txframecount_g += | 2049 | stats->txframecount_g += |
2017 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); | 2050 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
2018 | 2051 | ||
2019 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) | 2052 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) |
2020 | stats->txpauseframes += | 2053 | stats->txpauseframes += |
2021 | XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); | 2054 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
2022 | 2055 | ||
2023 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) | 2056 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) |
2024 | stats->txvlanframes_g += | 2057 | stats->txvlanframes_g += |
2025 | XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); | 2058 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
2026 | } | 2059 | } |
2027 | 2060 | ||
2028 | static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) | 2061 | static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) |
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) | |||
2032 | 2065 | ||
2033 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) | 2066 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) |
2034 | stats->rxframecount_gb += | 2067 | stats->rxframecount_gb += |
2035 | XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); | 2068 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
2036 | 2069 | ||
2037 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) | 2070 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) |
2038 | stats->rxoctetcount_gb += | 2071 | stats->rxoctetcount_gb += |
2039 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); | 2072 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
2040 | 2073 | ||
2041 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) | 2074 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) |
2042 | stats->rxoctetcount_g += | 2075 | stats->rxoctetcount_g += |
2043 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); | 2076 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2044 | 2077 | ||
2045 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) | 2078 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) |
2046 | stats->rxbroadcastframes_g += | 2079 | stats->rxbroadcastframes_g += |
2047 | XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); | 2080 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2048 | 2081 | ||
2049 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) | 2082 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) |
2050 | stats->rxmulticastframes_g += | 2083 | stats->rxmulticastframes_g += |
2051 | XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); | 2084 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2052 | 2085 | ||
2053 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) | 2086 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) |
2054 | stats->rxcrcerror += | 2087 | stats->rxcrcerror += |
2055 | XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); | 2088 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2056 | 2089 | ||
2057 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) | 2090 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) |
2058 | stats->rxrunterror += | 2091 | stats->rxrunterror += |
2059 | XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); | 2092 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
2060 | 2093 | ||
2061 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) | 2094 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) |
2062 | stats->rxjabbererror += | 2095 | stats->rxjabbererror += |
2063 | XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); | 2096 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
2064 | 2097 | ||
2065 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) | 2098 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) |
2066 | stats->rxundersize_g += | 2099 | stats->rxundersize_g += |
2067 | XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); | 2100 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2068 | 2101 | ||
2069 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) | 2102 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) |
2070 | stats->rxoversize_g += | 2103 | stats->rxoversize_g += |
2071 | XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); | 2104 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2072 | 2105 | ||
2073 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) | 2106 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) |
2074 | stats->rx64octets_gb += | 2107 | stats->rx64octets_gb += |
2075 | XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); | 2108 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2076 | 2109 | ||
2077 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) | 2110 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) |
2078 | stats->rx65to127octets_gb += | 2111 | stats->rx65to127octets_gb += |
2079 | XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); | 2112 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2080 | 2113 | ||
2081 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) | 2114 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) |
2082 | stats->rx128to255octets_gb += | 2115 | stats->rx128to255octets_gb += |
2083 | XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); | 2116 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2084 | 2117 | ||
2085 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) | 2118 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) |
2086 | stats->rx256to511octets_gb += | 2119 | stats->rx256to511octets_gb += |
2087 | XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); | 2120 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2088 | 2121 | ||
2089 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) | 2122 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) |
2090 | stats->rx512to1023octets_gb += | 2123 | stats->rx512to1023octets_gb += |
2091 | XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); | 2124 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2092 | 2125 | ||
2093 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) | 2126 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) |
2094 | stats->rx1024tomaxoctets_gb += | 2127 | stats->rx1024tomaxoctets_gb += |
2095 | XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | 2128 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2096 | 2129 | ||
2097 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) | 2130 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) |
2098 | stats->rxunicastframes_g += | 2131 | stats->rxunicastframes_g += |
2099 | XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); | 2132 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2100 | 2133 | ||
2101 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) | 2134 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) |
2102 | stats->rxlengtherror += | 2135 | stats->rxlengtherror += |
2103 | XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); | 2136 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2104 | 2137 | ||
2105 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) | 2138 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) |
2106 | stats->rxoutofrangetype += | 2139 | stats->rxoutofrangetype += |
2107 | XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); | 2140 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2108 | 2141 | ||
2109 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) | 2142 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) |
2110 | stats->rxpauseframes += | 2143 | stats->rxpauseframes += |
2111 | XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); | 2144 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2112 | 2145 | ||
2113 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) | 2146 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) |
2114 | stats->rxfifooverflow += | 2147 | stats->rxfifooverflow += |
2115 | XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); | 2148 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2116 | 2149 | ||
2117 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) | 2150 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) |
2118 | stats->rxvlanframes_gb += | 2151 | stats->rxvlanframes_gb += |
2119 | XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); | 2152 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2120 | 2153 | ||
2121 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) | 2154 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) |
2122 | stats->rxwatchdogerror += | 2155 | stats->rxwatchdogerror += |
2123 | XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); | 2156 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2124 | } | 2157 | } |
2125 | 2158 | ||
2126 | static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) | 2159 | static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) |
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) | |||
2131 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); | 2164 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); |
2132 | 2165 | ||
2133 | stats->txoctetcount_gb += | 2166 | stats->txoctetcount_gb += |
2134 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); | 2167 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
2135 | 2168 | ||
2136 | stats->txframecount_gb += | 2169 | stats->txframecount_gb += |
2137 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); | 2170 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
2138 | 2171 | ||
2139 | stats->txbroadcastframes_g += | 2172 | stats->txbroadcastframes_g += |
2140 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); | 2173 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
2141 | 2174 | ||
2142 | stats->txmulticastframes_g += | 2175 | stats->txmulticastframes_g += |
2143 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); | 2176 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
2144 | 2177 | ||
2145 | stats->tx64octets_gb += | 2178 | stats->tx64octets_gb += |
2146 | XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); | 2179 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
2147 | 2180 | ||
2148 | stats->tx65to127octets_gb += | 2181 | stats->tx65to127octets_gb += |
2149 | XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); | 2182 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
2150 | 2183 | ||
2151 | stats->tx128to255octets_gb += | 2184 | stats->tx128to255octets_gb += |
2152 | XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); | 2185 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
2153 | 2186 | ||
2154 | stats->tx256to511octets_gb += | 2187 | stats->tx256to511octets_gb += |
2155 | XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); | 2188 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
2156 | 2189 | ||
2157 | stats->tx512to1023octets_gb += | 2190 | stats->tx512to1023octets_gb += |
2158 | XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); | 2191 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
2159 | 2192 | ||
2160 | stats->tx1024tomaxoctets_gb += | 2193 | stats->tx1024tomaxoctets_gb += |
2161 | XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | 2194 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
2162 | 2195 | ||
2163 | stats->txunicastframes_gb += | 2196 | stats->txunicastframes_gb += |
2164 | XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); | 2197 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
2165 | 2198 | ||
2166 | stats->txmulticastframes_gb += | 2199 | stats->txmulticastframes_gb += |
2167 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | 2200 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
2168 | 2201 | ||
2169 | stats->txbroadcastframes_g += | 2202 | stats->txbroadcastframes_g += |
2170 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | 2203 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
2171 | 2204 | ||
2172 | stats->txunderflowerror += | 2205 | stats->txunderflowerror += |
2173 | XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); | 2206 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
2174 | 2207 | ||
2175 | stats->txoctetcount_g += | 2208 | stats->txoctetcount_g += |
2176 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); | 2209 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
2177 | 2210 | ||
2178 | stats->txframecount_g += | 2211 | stats->txframecount_g += |
2179 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); | 2212 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
2180 | 2213 | ||
2181 | stats->txpauseframes += | 2214 | stats->txpauseframes += |
2182 | XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); | 2215 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
2183 | 2216 | ||
2184 | stats->txvlanframes_g += | 2217 | stats->txvlanframes_g += |
2185 | XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); | 2218 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
2186 | 2219 | ||
2187 | stats->rxframecount_gb += | 2220 | stats->rxframecount_gb += |
2188 | XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); | 2221 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
2189 | 2222 | ||
2190 | stats->rxoctetcount_gb += | 2223 | stats->rxoctetcount_gb += |
2191 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); | 2224 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
2192 | 2225 | ||
2193 | stats->rxoctetcount_g += | 2226 | stats->rxoctetcount_g += |
2194 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); | 2227 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2195 | 2228 | ||
2196 | stats->rxbroadcastframes_g += | 2229 | stats->rxbroadcastframes_g += |
2197 | XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); | 2230 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2198 | 2231 | ||
2199 | stats->rxmulticastframes_g += | 2232 | stats->rxmulticastframes_g += |
2200 | XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); | 2233 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2201 | 2234 | ||
2202 | stats->rxcrcerror += | 2235 | stats->rxcrcerror += |
2203 | XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); | 2236 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2204 | 2237 | ||
2205 | stats->rxrunterror += | 2238 | stats->rxrunterror += |
2206 | XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); | 2239 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
2207 | 2240 | ||
2208 | stats->rxjabbererror += | 2241 | stats->rxjabbererror += |
2209 | XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); | 2242 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
2210 | 2243 | ||
2211 | stats->rxundersize_g += | 2244 | stats->rxundersize_g += |
2212 | XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); | 2245 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2213 | 2246 | ||
2214 | stats->rxoversize_g += | 2247 | stats->rxoversize_g += |
2215 | XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); | 2248 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2216 | 2249 | ||
2217 | stats->rx64octets_gb += | 2250 | stats->rx64octets_gb += |
2218 | XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); | 2251 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2219 | 2252 | ||
2220 | stats->rx65to127octets_gb += | 2253 | stats->rx65to127octets_gb += |
2221 | XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); | 2254 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2222 | 2255 | ||
2223 | stats->rx128to255octets_gb += | 2256 | stats->rx128to255octets_gb += |
2224 | XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); | 2257 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2225 | 2258 | ||
2226 | stats->rx256to511octets_gb += | 2259 | stats->rx256to511octets_gb += |
2227 | XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); | 2260 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2228 | 2261 | ||
2229 | stats->rx512to1023octets_gb += | 2262 | stats->rx512to1023octets_gb += |
2230 | XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); | 2263 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2231 | 2264 | ||
2232 | stats->rx1024tomaxoctets_gb += | 2265 | stats->rx1024tomaxoctets_gb += |
2233 | XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | 2266 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2234 | 2267 | ||
2235 | stats->rxunicastframes_g += | 2268 | stats->rxunicastframes_g += |
2236 | XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); | 2269 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2237 | 2270 | ||
2238 | stats->rxlengtherror += | 2271 | stats->rxlengtherror += |
2239 | XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); | 2272 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2240 | 2273 | ||
2241 | stats->rxoutofrangetype += | 2274 | stats->rxoutofrangetype += |
2242 | XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); | 2275 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2243 | 2276 | ||
2244 | stats->rxpauseframes += | 2277 | stats->rxpauseframes += |
2245 | XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); | 2278 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2246 | 2279 | ||
2247 | stats->rxfifooverflow += | 2280 | stats->rxfifooverflow += |
2248 | XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); | 2281 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2249 | 2282 | ||
2250 | stats->rxvlanframes_gb += | 2283 | stats->rxvlanframes_gb += |
2251 | XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); | 2284 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2252 | 2285 | ||
2253 | stats->rxwatchdogerror += | 2286 | stats->rxwatchdogerror += |
2254 | XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); | 2287 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2255 | 2288 | ||
2256 | /* Un-freeze counters */ | 2289 | /* Un-freeze counters */ |
2257 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); | 2290 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index dc84f7193c2d..b26d75856553 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) | |||
361 | 361 | ||
362 | memset(hw_feat, 0, sizeof(*hw_feat)); | 362 | memset(hw_feat, 0, sizeof(*hw_feat)); |
363 | 363 | ||
364 | hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); | ||
365 | |||
364 | /* Hardware feature register 0 */ | 366 | /* Hardware feature register 0 */ |
365 | hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); | 367 | hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); |
366 | hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); | 368 | hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a076aca138a1..46f613028e9c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | |||
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev, | |||
361 | struct ethtool_drvinfo *drvinfo) | 361 | struct ethtool_drvinfo *drvinfo) |
362 | { | 362 | { |
363 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 363 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
364 | struct xgbe_hw_features *hw_feat = &pdata->hw_feat; | ||
364 | 365 | ||
365 | strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); | 366 | strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); |
366 | strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); | 367 | strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); |
367 | strlcpy(drvinfo->bus_info, dev_name(pdata->dev), | 368 | strlcpy(drvinfo->bus_info, dev_name(pdata->dev), |
368 | sizeof(drvinfo->bus_info)); | 369 | sizeof(drvinfo->bus_info)); |
369 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", | 370 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", |
370 | XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), | 371 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), |
371 | XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), | 372 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), |
372 | XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); | 373 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); |
373 | drvinfo->n_stats = XGBE_STATS_COUNT; | 374 | drvinfo->n_stats = XGBE_STATS_COUNT; |
374 | } | 375 | } |
375 | 376 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 8aa6a9353f7b..bdf9cfa70e88 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c | |||
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | if (i < pdata->rx_ring_count) { | 174 | if (i < pdata->rx_ring_count) { |
175 | spin_lock_init(&tx_ring->lock); | 175 | spin_lock_init(&rx_ring->lock); |
176 | channel->rx_ring = rx_ring++; | 176 | channel->rx_ring = rx_ring++; |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 07bf70a82908..e9fe6e6ddcc3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h | |||
@@ -183,6 +183,7 @@ | |||
183 | #define XGMAC_DRIVER_CONTEXT 1 | 183 | #define XGMAC_DRIVER_CONTEXT 1 |
184 | #define XGMAC_IOCTL_CONTEXT 2 | 184 | #define XGMAC_IOCTL_CONTEXT 2 |
185 | 185 | ||
186 | #define XGBE_FIFO_MAX 81920 | ||
186 | #define XGBE_FIFO_SIZE_B(x) (x) | 187 | #define XGBE_FIFO_SIZE_B(x) (x) |
187 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) | 188 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) |
188 | 189 | ||
@@ -526,6 +527,9 @@ struct xgbe_desc_if { | |||
526 | * or configurations are present in the device. | 527 | * or configurations are present in the device. |
527 | */ | 528 | */ |
528 | struct xgbe_hw_features { | 529 | struct xgbe_hw_features { |
530 | /* HW Version */ | ||
531 | unsigned int version; | ||
532 | |||
529 | /* HW Feature Register0 */ | 533 | /* HW Feature Register0 */ |
530 | unsigned int gmii; /* 1000 Mbps support */ | 534 | unsigned int gmii; /* 1000 Mbps support */ |
531 | unsigned int vlhash; /* VLAN Hash Filter */ | 535 | unsigned int vlhash; /* VLAN Hash Filter */ |
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 616dff6d3f5f..f4054d242f3c 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config NET_XGENE | 1 | config NET_XGENE |
2 | tristate "APM X-Gene SoC Ethernet Driver" | 2 | tristate "APM X-Gene SoC Ethernet Driver" |
3 | depends on HAS_DMA | ||
3 | select PHYLIB | 4 | select PHYLIB |
4 | help | 5 | help |
5 | This is the Ethernet driver for the on-chip ethernet interface on the | 6 | This is the Ethernet driver for the on-chip ethernet interface on the |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -30,6 +30,17 @@ | |||
30 | #define DRV_VERSION "1.0" | 30 | #define DRV_VERSION "1.0" |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * arc_emac_tx_avail - Return the number of available slots in the tx ring. | ||
34 | * @priv: Pointer to ARC EMAC private data structure. | ||
35 | * | ||
36 | * returns: the number of slots available for transmission in tx the ring. | ||
37 | */ | ||
38 | static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) | ||
39 | { | ||
40 | return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; | ||
41 | } | ||
42 | |||
43 | /** | ||
33 | * arc_emac_adjust_link - Adjust the PHY link duplex. | 44 | * arc_emac_adjust_link - Adjust the PHY link duplex. |
34 | * @ndev: Pointer to the net_device structure. | 45 | * @ndev: Pointer to the net_device structure. |
35 | * | 46 | * |
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) | |||
180 | txbd->info = 0; | 191 | txbd->info = 0; |
181 | 192 | ||
182 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; | 193 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
183 | |||
184 | if (netif_queue_stopped(ndev)) | ||
185 | netif_wake_queue(ndev); | ||
186 | } | 194 | } |
195 | |||
196 | /* Ensure that txbd_dirty is visible to tx() before checking | ||
197 | * for queue stopped. | ||
198 | */ | ||
199 | smp_mb(); | ||
200 | |||
201 | if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) | ||
202 | netif_wake_queue(ndev); | ||
187 | } | 203 | } |
188 | 204 | ||
189 | /** | 205 | /** |
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) | |||
298 | work_done = arc_emac_rx(ndev, budget); | 314 | work_done = arc_emac_rx(ndev, budget); |
299 | if (work_done < budget) { | 315 | if (work_done < budget) { |
300 | napi_complete(napi); | 316 | napi_complete(napi); |
301 | arc_reg_or(priv, R_ENABLE, RXINT_MASK); | 317 | arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
302 | } | 318 | } |
303 | 319 | ||
304 | return work_done; | 320 | return work_done; |
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) | |||
327 | /* Reset all flags except "MDIO complete" */ | 343 | /* Reset all flags except "MDIO complete" */ |
328 | arc_reg_set(priv, R_STATUS, status); | 344 | arc_reg_set(priv, R_STATUS, status); |
329 | 345 | ||
330 | if (status & RXINT_MASK) { | 346 | if (status & (RXINT_MASK | TXINT_MASK)) { |
331 | if (likely(napi_schedule_prep(&priv->napi))) { | 347 | if (likely(napi_schedule_prep(&priv->napi))) { |
332 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK); | 348 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
333 | __napi_schedule(&priv->napi); | 349 | __napi_schedule(&priv->napi); |
334 | } | 350 | } |
335 | } | 351 | } |
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev) | |||
440 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); | 456 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); |
441 | 457 | ||
442 | /* Enable interrupts */ | 458 | /* Enable interrupts */ |
443 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 459 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
444 | 460 | ||
445 | /* Set CONTROL */ | 461 | /* Set CONTROL */ |
446 | arc_reg_set(priv, R_CTRL, | 462 | arc_reg_set(priv, R_CTRL, |
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev) | |||
511 | netif_stop_queue(ndev); | 527 | netif_stop_queue(ndev); |
512 | 528 | ||
513 | /* Disable interrupts */ | 529 | /* Disable interrupts */ |
514 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 530 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
515 | 531 | ||
516 | /* Disable EMAC */ | 532 | /* Disable EMAC */ |
517 | arc_reg_clr(priv, R_CTRL, EN_MASK); | 533 | arc_reg_clr(priv, R_CTRL, EN_MASK); |
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
574 | 590 | ||
575 | len = max_t(unsigned int, ETH_ZLEN, skb->len); | 591 | len = max_t(unsigned int, ETH_ZLEN, skb->len); |
576 | 592 | ||
577 | /* EMAC still holds this buffer in its possession. | 593 | if (unlikely(!arc_emac_tx_avail(priv))) { |
578 | * CPU must not modify this buffer descriptor | ||
579 | */ | ||
580 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { | ||
581 | netif_stop_queue(ndev); | 594 | netif_stop_queue(ndev); |
595 | netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); | ||
582 | return NETDEV_TX_BUSY; | 596 | return NETDEV_TX_BUSY; |
583 | } | 597 | } |
584 | 598 | ||
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
607 | /* Increment index to point to the next BD */ | 621 | /* Increment index to point to the next BD */ |
608 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; | 622 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; |
609 | 623 | ||
610 | /* Get "info" of the next BD */ | 624 | /* Ensure that tx_clean() sees the new txbd_curr before |
611 | info = &priv->txbd[*txbd_curr].info; | 625 | * checking the queue status. This prevents an unneeded wake |
626 | * of the queue in tx_clean(). | ||
627 | */ | ||
628 | smp_mb(); | ||
612 | 629 | ||
613 | /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ | 630 | if (!arc_emac_tx_avail(priv)) { |
614 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) | ||
615 | netif_stop_queue(ndev); | 631 | netif_stop_queue(ndev); |
632 | /* Refresh tx_dirty */ | ||
633 | smp_mb(); | ||
634 | if (arc_emac_tx_avail(priv)) | ||
635 | netif_start_queue(ndev); | ||
636 | } | ||
616 | 637 | ||
617 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 638 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
618 | 639 | ||
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 7dcfb19a31c8..d8d07a818b89 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
@@ -84,7 +84,7 @@ config BNX2 | |||
84 | 84 | ||
85 | config CNIC | 85 | config CNIC |
86 | tristate "QLogic CNIC support" | 86 | tristate "QLogic CNIC support" |
87 | depends on PCI | 87 | depends on PCI && (IPV6 || IPV6=n) |
88 | select BNX2 | 88 | select BNX2 |
89 | select UIO | 89 | select UIO |
90 | ---help--- | 90 | ---help--- |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4a7028d65912..d588136b23b9 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, | |||
1697 | hwstat->tx_underruns + | 1697 | hwstat->tx_underruns + |
1698 | hwstat->tx_excessive_cols + | 1698 | hwstat->tx_excessive_cols + |
1699 | hwstat->tx_late_cols); | 1699 | hwstat->tx_late_cols); |
1700 | nstat->multicast = hwstat->tx_multicast_pkts; | 1700 | nstat->multicast = hwstat->rx_multicast_pkts; |
1701 | nstat->collisions = hwstat->tx_total_cols; | 1701 | nstat->collisions = hwstat->tx_total_cols; |
1702 | 1702 | ||
1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6f4e18644bd4..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
534 | while ((processed < to_process) && (processed < budget)) { | 534 | while ((processed < to_process) && (processed < budget)) { |
535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
536 | skb = cb->skb; | 536 | skb = cb->skb; |
537 | |||
538 | processed++; | ||
539 | priv->rx_read_ptr++; | ||
540 | |||
541 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
542 | priv->rx_read_ptr = 0; | ||
543 | |||
544 | /* We do not have a backing SKB, so we do not a corresponding | ||
545 | * DMA mapping for this incoming packet since | ||
546 | * bcm_sysport_rx_refill always either has both skb and mapping | ||
547 | * or none. | ||
548 | */ | ||
549 | if (unlikely(!skb)) { | ||
550 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
551 | ndev->stats.rx_dropped++; | ||
552 | ndev->stats.rx_errors++; | ||
553 | goto refill; | ||
554 | } | ||
555 | |||
537 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 556 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
538 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 557 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
539 | 558 | ||
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
543 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & | 562 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & |
544 | DESC_STATUS_MASK; | 563 | DESC_STATUS_MASK; |
545 | 564 | ||
546 | processed++; | ||
547 | priv->rx_read_ptr++; | ||
548 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
549 | priv->rx_read_ptr = 0; | ||
550 | |||
551 | netif_dbg(priv, rx_status, ndev, | 565 | netif_dbg(priv, rx_status, ndev, |
552 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", | 566 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", |
553 | p_index, priv->rx_c_index, priv->rx_read_ptr, | 567 | p_index, priv->rx_c_index, priv->rx_read_ptr, |
554 | len, status); | 568 | len, status); |
555 | 569 | ||
556 | if (unlikely(!skb)) { | ||
557 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
558 | ndev->stats.rx_dropped++; | ||
559 | ndev->stats.rx_errors++; | ||
560 | goto refill; | ||
561 | } | ||
562 | |||
563 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { | 570 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { |
564 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); | 571 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); |
565 | ndev->stats.rx_dropped++; | 572 | ndev->stats.rx_dropped++; |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2fee73b878c2..823d01c5684c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
3236 | 3236 | ||
3237 | skb->protocol = eth_type_trans(skb, bp->dev); | 3237 | skb->protocol = eth_type_trans(skb, bp->dev); |
3238 | 3238 | ||
3239 | if ((len > (bp->dev->mtu + ETH_HLEN)) && | 3239 | if (len > (bp->dev->mtu + ETH_HLEN) && |
3240 | (ntohs(skb->protocol) != 0x8100)) { | 3240 | skb->protocol != htons(0x8100) && |
3241 | skb->protocol != htons(ETH_P_8021AD)) { | ||
3241 | 3242 | ||
3242 | dev_kfree_skb(skb); | 3243 | dev_kfree_skb(skb); |
3243 | goto next_rx; | 3244 | goto next_rx; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ba8af50c84f..c4daa068f1db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -2233,7 +2233,12 @@ struct shmem2_region { | |||
2233 | u32 reserved3; /* Offset 0x14C */ | 2233 | u32 reserved3; /* Offset 0x14C */ |
2234 | u32 reserved4; /* Offset 0x150 */ | 2234 | u32 reserved4; /* Offset 0x150 */ |
2235 | u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ | 2235 | u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ |
2236 | #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) | 2236 | #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 |
2237 | #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 | ||
2238 | #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 | ||
2239 | #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 | ||
2240 | #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 | ||
2241 | #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 | ||
2237 | 2242 | ||
2238 | u32 reserved5[2]; | 2243 | u32 reserved5[2]; |
2239 | u32 reserved6[PORT_MAX]; | 2244 | u32 reserved6[PORT_MAX]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 53fb4fa61b40..549549eaf580 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, | |||
154 | LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) | 154 | LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) |
155 | 155 | ||
156 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 | 156 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 |
157 | #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 | ||
157 | #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 | 158 | #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 |
158 | #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 | 159 | #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 |
159 | #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 | 160 | #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 |
160 | 161 | ||
161 | 162 | ||
162 | #define SFP_EEPROM_COMP_CODE_ADDR 0x3 | 163 | #define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 |
163 | #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) | 164 | #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4) |
164 | #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) | 165 | #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5) |
165 | #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) | 166 | #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6) |
167 | |||
168 | #define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 | ||
169 | #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0) | ||
170 | #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1) | ||
171 | #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2) | ||
172 | #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3) | ||
166 | 173 | ||
167 | #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 | 174 | #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 |
168 | #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 | 175 | #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 |
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, | |||
3633 | reg_set[i].val); | 3640 | reg_set[i].val); |
3634 | 3641 | ||
3635 | /* Start KR2 work-around timer which handles BCM8073 link-parner */ | 3642 | /* Start KR2 work-around timer which handles BCM8073 link-parner */ |
3636 | vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; | 3643 | params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; |
3637 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3644 | bnx2x_update_link_attr(params, params->link_attr_sync); |
3638 | } | 3645 | } |
3639 | 3646 | ||
3640 | static void bnx2x_disable_kr2(struct link_params *params, | 3647 | static void bnx2x_disable_kr2(struct link_params *params, |
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params, | |||
3666 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | 3673 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) |
3667 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | 3674 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, |
3668 | reg_set[i].val); | 3675 | reg_set[i].val); |
3669 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | 3676 | params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; |
3670 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3677 | bnx2x_update_link_attr(params, params->link_attr_sync); |
3671 | 3678 | ||
3672 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | 3679 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; |
3673 | } | 3680 | } |
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params, | |||
4810 | ~FEATURE_CONFIG_PFC_ENABLED; | 4817 | ~FEATURE_CONFIG_PFC_ENABLED; |
4811 | 4818 | ||
4812 | if (SHMEM2_HAS(bp, link_attr_sync)) | 4819 | if (SHMEM2_HAS(bp, link_attr_sync)) |
4813 | vars->link_attr_sync = SHMEM2_RD(bp, | 4820 | params->link_attr_sync = SHMEM2_RD(bp, |
4814 | link_attr_sync[params->port]); | 4821 | link_attr_sync[params->port]); |
4815 | 4822 | ||
4816 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", | 4823 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", |
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8057 | { | 8064 | { |
8058 | struct bnx2x *bp = params->bp; | 8065 | struct bnx2x *bp = params->bp; |
8059 | u32 sync_offset = 0, phy_idx, media_types; | 8066 | u32 sync_offset = 0, phy_idx, media_types; |
8060 | u8 gport, val[2], check_limiting_mode = 0; | 8067 | u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0; |
8061 | *edc_mode = EDC_MODE_LIMITING; | 8068 | *edc_mode = EDC_MODE_LIMITING; |
8062 | phy->media_type = ETH_PHY_UNSPECIFIED; | 8069 | phy->media_type = ETH_PHY_UNSPECIFIED; |
8063 | /* First check for copper cable */ | 8070 | /* First check for copper cable */ |
8064 | if (bnx2x_read_sfp_module_eeprom(phy, | 8071 | if (bnx2x_read_sfp_module_eeprom(phy, |
8065 | params, | 8072 | params, |
8066 | I2C_DEV_ADDR_A0, | 8073 | I2C_DEV_ADDR_A0, |
8067 | SFP_EEPROM_CON_TYPE_ADDR, | 8074 | 0, |
8068 | 2, | 8075 | SFP_EEPROM_FC_TX_TECH_ADDR + 1, |
8069 | (u8 *)val) != 0) { | 8076 | (u8 *)val) != 0) { |
8070 | DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); | 8077 | DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); |
8071 | return -EINVAL; | 8078 | return -EINVAL; |
8072 | } | 8079 | } |
8073 | 8080 | params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; | |
8074 | switch (val[0]) { | 8081 | params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] << |
8082 | LINK_SFP_EEPROM_COMP_CODE_SHIFT; | ||
8083 | bnx2x_update_link_attr(params, params->link_attr_sync); | ||
8084 | switch (val[SFP_EEPROM_CON_TYPE_ADDR]) { | ||
8075 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: | 8085 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: |
8076 | { | 8086 | { |
8077 | u8 copper_module_type; | 8087 | u8 copper_module_type; |
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8079 | /* Check if its active cable (includes SFP+ module) | 8089 | /* Check if its active cable (includes SFP+ module) |
8080 | * of passive cable | 8090 | * of passive cable |
8081 | */ | 8091 | */ |
8082 | if (bnx2x_read_sfp_module_eeprom(phy, | 8092 | copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR]; |
8083 | params, | ||
8084 | I2C_DEV_ADDR_A0, | ||
8085 | SFP_EEPROM_FC_TX_TECH_ADDR, | ||
8086 | 1, | ||
8087 | &copper_module_type) != 0) { | ||
8088 | DP(NETIF_MSG_LINK, | ||
8089 | "Failed to read copper-cable-type" | ||
8090 | " from SFP+ EEPROM\n"); | ||
8091 | return -EINVAL; | ||
8092 | } | ||
8093 | 8093 | ||
8094 | if (copper_module_type & | 8094 | if (copper_module_type & |
8095 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { | 8095 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { |
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8115 | } | 8115 | } |
8116 | break; | 8116 | break; |
8117 | } | 8117 | } |
8118 | case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: | ||
8118 | case SFP_EEPROM_CON_TYPE_VAL_LC: | 8119 | case SFP_EEPROM_CON_TYPE_VAL_LC: |
8119 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: | 8120 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: |
8120 | check_limiting_mode = 1; | 8121 | check_limiting_mode = 1; |
8121 | if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | | 8122 | if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & |
8122 | SFP_EEPROM_COMP_CODE_LR_MASK | | 8123 | (SFP_EEPROM_10G_COMP_CODE_SR_MASK | |
8123 | SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { | 8124 | SFP_EEPROM_10G_COMP_CODE_LR_MASK | |
8125 | SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { | ||
8124 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); | 8126 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); |
8125 | gport = params->port; | ||
8126 | phy->media_type = ETH_PHY_SFP_1G_FIBER; | 8127 | phy->media_type = ETH_PHY_SFP_1G_FIBER; |
8127 | if (phy->req_line_speed != SPEED_1000) { | 8128 | if (phy->req_line_speed != SPEED_1000) { |
8129 | u8 gport = params->port; | ||
8128 | phy->req_line_speed = SPEED_1000; | 8130 | phy->req_line_speed = SPEED_1000; |
8129 | if (!CHIP_IS_E1x(bp)) { | 8131 | if (!CHIP_IS_E1x(bp)) { |
8130 | gport = BP_PATH(bp) + | 8132 | gport = BP_PATH(bp) + |
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8134 | "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", | 8136 | "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", |
8135 | gport); | 8137 | gport); |
8136 | } | 8138 | } |
8139 | if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] & | ||
8140 | SFP_EEPROM_1G_COMP_CODE_BASE_T) { | ||
8141 | bnx2x_sfp_set_transmitter(params, phy, 0); | ||
8142 | msleep(40); | ||
8143 | bnx2x_sfp_set_transmitter(params, phy, 1); | ||
8144 | } | ||
8137 | } else { | 8145 | } else { |
8138 | int idx, cfg_idx = 0; | 8146 | int idx, cfg_idx = 0; |
8139 | DP(NETIF_MSG_LINK, "10G Optic module detected\n"); | 8147 | DP(NETIF_MSG_LINK, "10G Optic module detected\n"); |
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8149 | break; | 8157 | break; |
8150 | default: | 8158 | default: |
8151 | DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", | 8159 | DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", |
8152 | val[0]); | 8160 | val[SFP_EEPROM_CON_TYPE_ADDR]); |
8153 | return -EINVAL; | 8161 | return -EINVAL; |
8154 | } | 8162 | } |
8155 | sync_offset = params->shmem_base + | 8163 | sync_offset = params->shmem_base + |
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13507 | 13515 | ||
13508 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13516 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); |
13509 | if (!sigdet) { | 13517 | if (!sigdet) { |
13510 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13518 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13511 | bnx2x_kr2_recovery(params, vars, phy); | 13519 | bnx2x_kr2_recovery(params, vars, phy); |
13512 | DP(NETIF_MSG_LINK, "No sigdet\n"); | 13520 | DP(NETIF_MSG_LINK, "No sigdet\n"); |
13513 | } | 13521 | } |
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13525 | 13533 | ||
13526 | /* CL73 has not begun yet */ | 13534 | /* CL73 has not begun yet */ |
13527 | if (base_page == 0) { | 13535 | if (base_page == 0) { |
13528 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13536 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13529 | bnx2x_kr2_recovery(params, vars, phy); | 13537 | bnx2x_kr2_recovery(params, vars, phy); |
13530 | DP(NETIF_MSG_LINK, "No BP\n"); | 13538 | DP(NETIF_MSG_LINK, "No BP\n"); |
13531 | } | 13539 | } |
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13541 | ((next_page & 0xe0) == 0x20)))); | 13549 | ((next_page & 0xe0) == 0x20)))); |
13542 | 13550 | ||
13543 | /* In case KR2 is already disabled, check if we need to re-enable it */ | 13551 | /* In case KR2 is already disabled, check if we need to re-enable it */ |
13544 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13552 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13545 | if (!not_kr2_device) { | 13553 | if (!not_kr2_device) { |
13546 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, | 13554 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, |
13547 | next_page); | 13555 | next_page); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 389f5f8cb0a3..d9cce4c3899b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | |||
@@ -323,6 +323,9 @@ struct link_params { | |||
323 | #define LINK_FLAGS_INT_DISABLED (1<<0) | 323 | #define LINK_FLAGS_INT_DISABLED (1<<0) |
324 | #define PHY_INITIALIZED (1<<1) | 324 | #define PHY_INITIALIZED (1<<1) |
325 | u32 lfa_base; | 325 | u32 lfa_base; |
326 | |||
327 | /* The same definitions as the shmem2 parameter */ | ||
328 | u32 link_attr_sync; | ||
326 | }; | 329 | }; |
327 | 330 | ||
328 | /* Output parameters */ | 331 | /* Output parameters */ |
@@ -364,8 +367,6 @@ struct link_vars { | |||
364 | u8 rx_tx_asic_rst; | 367 | u8 rx_tx_asic_rst; |
365 | u8 turn_to_run_wc_rt; | 368 | u8 turn_to_run_wc_rt; |
366 | u16 rsrv2; | 369 | u16 rsrv2; |
367 | /* The same definitions as the shmem2 parameter */ | ||
368 | u32 link_attr_sync; | ||
369 | }; | 370 | }; |
370 | 371 | ||
371 | /***********************************************************/ | 372 | /***********************************************************/ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 900cab420810..d1c093dcb054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp) | |||
6849 | bnx2x_release_phy_lock(bp); | 6849 | bnx2x_release_phy_lock(bp); |
6850 | } | 6850 | } |
6851 | 6851 | ||
6852 | static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) | ||
6853 | { | ||
6854 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); | ||
6855 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); | ||
6856 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); | ||
6857 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); | ||
6858 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); | ||
6859 | |||
6860 | /* make sure this value is 0 */ | ||
6861 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); | ||
6862 | |||
6863 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); | ||
6864 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); | ||
6865 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); | ||
6866 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); | ||
6867 | } | ||
6868 | |||
6869 | static void bnx2x_set_endianity(struct bnx2x *bp) | ||
6870 | { | ||
6871 | #ifdef __BIG_ENDIAN | ||
6872 | bnx2x_config_endianity(bp, 1); | ||
6873 | #else | ||
6874 | bnx2x_config_endianity(bp, 0); | ||
6875 | #endif | ||
6876 | } | ||
6877 | |||
6878 | static void bnx2x_reset_endianity(struct bnx2x *bp) | ||
6879 | { | ||
6880 | bnx2x_config_endianity(bp, 0); | ||
6881 | } | ||
6882 | |||
6852 | /** | 6883 | /** |
6853 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. | 6884 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. |
6854 | * | 6885 | * |
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
6915 | 6946 | ||
6916 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); | 6947 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); |
6917 | bnx2x_init_pxp(bp); | 6948 | bnx2x_init_pxp(bp); |
6918 | 6949 | bnx2x_set_endianity(bp); | |
6919 | #ifdef __BIG_ENDIAN | ||
6920 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); | ||
6921 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); | ||
6922 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | ||
6923 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | ||
6924 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | ||
6925 | /* make sure this value is 0 */ | ||
6926 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); | ||
6927 | |||
6928 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | ||
6929 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | ||
6930 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); | ||
6931 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); | ||
6932 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | ||
6933 | #endif | ||
6934 | |||
6935 | bnx2x_ilt_init_page_size(bp, INITOP_SET); | 6950 | bnx2x_ilt_init_page_size(bp, INITOP_SET); |
6936 | 6951 | ||
6937 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) | 6952 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) |
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
13169 | bnx2x_iov_remove_one(bp); | 13184 | bnx2x_iov_remove_one(bp); |
13170 | 13185 | ||
13171 | /* Power on: we can't let PCI layer write to us while we are in D3 */ | 13186 | /* Power on: we can't let PCI layer write to us while we are in D3 */ |
13172 | if (IS_PF(bp)) | 13187 | if (IS_PF(bp)) { |
13173 | bnx2x_set_power_state(bp, PCI_D0); | 13188 | bnx2x_set_power_state(bp, PCI_D0); |
13174 | 13189 | ||
13190 | /* Set endianity registers to reset values in case next driver | ||
13191 | * boots in different endianty environment. | ||
13192 | */ | ||
13193 | bnx2x_reset_endianity(bp); | ||
13194 | } | ||
13195 | |||
13175 | /* Disable MSI/MSI-X */ | 13196 | /* Disable MSI/MSI-X */ |
13176 | bnx2x_disable_msi(bp); | 13197 | bnx2x_disable_msi(bp); |
13177 | 13198 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 27861a6c7ca5..a6a9f284c8dd 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/if_vlan.h> | 31 | #include <linux/if_vlan.h> |
32 | #include <linux/prefetch.h> | 32 | #include <linux/prefetch.h> |
33 | #include <linux/random.h> | 33 | #include <linux/random.h> |
34 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 34 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
35 | #define BCM_VLAN 1 | 35 | #define BCM_VLAN 1 |
36 | #endif | 36 | #endif |
37 | #include <net/ip.h> | 37 | #include <net/ip.h> |
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, | |||
3685 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, | 3685 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, |
3686 | struct dst_entry **dst) | 3686 | struct dst_entry **dst) |
3687 | { | 3687 | { |
3688 | #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) | 3688 | #if IS_ENABLED(CONFIG_IPV6) |
3689 | struct flowi6 fl6; | 3689 | struct flowi6 fl6; |
3690 | 3690 | ||
3691 | memset(&fl6, 0, sizeof(fl6)); | 3691 | memset(&fl6, 0, sizeof(fl6)); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3f9d4de8173c..5cc9cae21ed5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
875 | int last_tx_cn, last_c_index, num_tx_bds; | 875 | int last_tx_cn, last_c_index, num_tx_bds; |
876 | struct enet_cb *tx_cb_ptr; | 876 | struct enet_cb *tx_cb_ptr; |
877 | struct netdev_queue *txq; | 877 | struct netdev_queue *txq; |
878 | unsigned int bds_compl; | ||
878 | unsigned int c_index; | 879 | unsigned int c_index; |
879 | 880 | ||
880 | /* Compute how many buffers are transmitted since last xmit call */ | 881 | /* Compute how many buffers are transmitted since last xmit call */ |
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
899 | /* Reclaim transmitted buffers */ | 900 | /* Reclaim transmitted buffers */ |
900 | while (last_tx_cn-- > 0) { | 901 | while (last_tx_cn-- > 0) { |
901 | tx_cb_ptr = ring->cbs + last_c_index; | 902 | tx_cb_ptr = ring->cbs + last_c_index; |
903 | bds_compl = 0; | ||
902 | if (tx_cb_ptr->skb) { | 904 | if (tx_cb_ptr->skb) { |
905 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | ||
903 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 906 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
904 | dma_unmap_single(&dev->dev, | 907 | dma_unmap_single(&dev->dev, |
905 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 908 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
916 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | 919 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
917 | } | 920 | } |
918 | dev->stats.tx_packets++; | 921 | dev->stats.tx_packets++; |
919 | ring->free_bds += 1; | 922 | ring->free_bds += bds_compl; |
920 | 923 | ||
921 | last_c_index++; | 924 | last_c_index++; |
922 | last_c_index &= (num_tx_bds - 1); | 925 | last_c_index &= (num_tx_bds - 1); |
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1274 | 1277 | ||
1275 | while ((rxpktprocessed < rxpkttoprocess) && | 1278 | while ((rxpktprocessed < rxpkttoprocess) && |
1276 | (rxpktprocessed < budget)) { | 1279 | (rxpktprocessed < budget)) { |
1280 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1281 | skb = cb->skb; | ||
1282 | |||
1283 | rxpktprocessed++; | ||
1284 | |||
1285 | priv->rx_read_ptr++; | ||
1286 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1287 | |||
1288 | /* We do not have a backing SKB, so we do not have a | ||
1289 | * corresponding DMA mapping for this incoming packet since | ||
1290 | * bcmgenet_rx_refill always either has both skb and mapping or | ||
1291 | * none. | ||
1292 | */ | ||
1293 | if (unlikely(!skb)) { | ||
1294 | dev->stats.rx_dropped++; | ||
1295 | dev->stats.rx_errors++; | ||
1296 | goto refill; | ||
1297 | } | ||
1298 | |||
1277 | /* Unmap the packet contents such that we can use the | 1299 | /* Unmap the packet contents such that we can use the |
1278 | * RSV from the 64 bytes descriptor when enabled and save | 1300 | * RSV from the 64 bytes descriptor when enabled and save |
1279 | * a 32-bits register read | 1301 | * a 32-bits register read |
1280 | */ | 1302 | */ |
1281 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1282 | skb = cb->skb; | ||
1283 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | 1303 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), |
1284 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1304 | priv->rx_buf_len, DMA_FROM_DEVICE); |
1285 | 1305 | ||
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1307 | __func__, p_index, priv->rx_c_index, | 1327 | __func__, p_index, priv->rx_c_index, |
1308 | priv->rx_read_ptr, dma_length_status); | 1328 | priv->rx_read_ptr, dma_length_status); |
1309 | 1329 | ||
1310 | rxpktprocessed++; | ||
1311 | |||
1312 | priv->rx_read_ptr++; | ||
1313 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1314 | |||
1315 | /* out of memory, just drop packets at the hardware level */ | ||
1316 | if (unlikely(!skb)) { | ||
1317 | dev->stats.rx_dropped++; | ||
1318 | dev->stats.rx_errors++; | ||
1319 | goto refill; | ||
1320 | } | ||
1321 | |||
1322 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | 1330 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1323 | netif_err(priv, rx_status, dev, | 1331 | netif_err(priv, rx_status, dev, |
1324 | "dropping fragmented packet!\n"); | 1332 | "dropping fragmented packet!\n"); |
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) | |||
1736 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 1744 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); |
1737 | } | 1745 | } |
1738 | 1746 | ||
1747 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
1748 | { | ||
1749 | int ret = 0; | ||
1750 | int timeout = 0; | ||
1751 | u32 reg; | ||
1752 | |||
1753 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
1754 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1755 | reg &= ~DMA_EN; | ||
1756 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1757 | |||
1758 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
1759 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1760 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
1761 | if (reg & DMA_DISABLED) | ||
1762 | break; | ||
1763 | |||
1764 | udelay(1); | ||
1765 | } | ||
1766 | |||
1767 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1768 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
1769 | ret = -ETIMEDOUT; | ||
1770 | } | ||
1771 | |||
1772 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
1773 | usleep_range(10000, 20000); | ||
1774 | |||
1775 | /* Disable RDMA */ | ||
1776 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
1777 | reg &= ~DMA_EN; | ||
1778 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
1779 | |||
1780 | timeout = 0; | ||
1781 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
1782 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1783 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
1784 | if (reg & DMA_DISABLED) | ||
1785 | break; | ||
1786 | |||
1787 | udelay(1); | ||
1788 | } | ||
1789 | |||
1790 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1791 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
1792 | ret = -ETIMEDOUT; | ||
1793 | } | ||
1794 | |||
1795 | return ret; | ||
1796 | } | ||
1797 | |||
1739 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1798 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1740 | { | 1799 | { |
1741 | int i; | 1800 | int i; |
1742 | 1801 | ||
1743 | /* disable DMA */ | 1802 | /* disable DMA */ |
1744 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | 1803 | bcmgenet_dma_teardown(priv); |
1745 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | ||
1746 | 1804 | ||
1747 | for (i = 0; i < priv->num_tx_bds; i++) { | 1805 | for (i = 0; i < priv->num_tx_bds; i++) { |
1748 | if (priv->tx_cbs[i].skb != NULL) { | 1806 | if (priv->tx_cbs[i].skb != NULL) { |
@@ -2101,57 +2159,6 @@ err_clk_disable: | |||
2101 | return ret; | 2159 | return ret; |
2102 | } | 2160 | } |
2103 | 2161 | ||
2104 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
2105 | { | ||
2106 | int ret = 0; | ||
2107 | int timeout = 0; | ||
2108 | u32 reg; | ||
2109 | |||
2110 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
2111 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
2112 | reg &= ~DMA_EN; | ||
2113 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
2114 | |||
2115 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
2116 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2117 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
2118 | if (reg & DMA_DISABLED) | ||
2119 | break; | ||
2120 | |||
2121 | udelay(1); | ||
2122 | } | ||
2123 | |||
2124 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2125 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
2126 | ret = -ETIMEDOUT; | ||
2127 | } | ||
2128 | |||
2129 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
2130 | usleep_range(10000, 20000); | ||
2131 | |||
2132 | /* Disable RDMA */ | ||
2133 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
2134 | reg &= ~DMA_EN; | ||
2135 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
2136 | |||
2137 | timeout = 0; | ||
2138 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
2139 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2140 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
2141 | if (reg & DMA_DISABLED) | ||
2142 | break; | ||
2143 | |||
2144 | udelay(1); | ||
2145 | } | ||
2146 | |||
2147 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2148 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
2149 | ret = -ETIMEDOUT; | ||
2150 | } | ||
2151 | |||
2152 | return ret; | ||
2153 | } | ||
2154 | |||
2155 | static void bcmgenet_netif_stop(struct net_device *dev) | 2162 | static void bcmgenet_netif_stop(struct net_device *dev) |
2156 | { | 2163 | { |
2157 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2164 | struct bcmgenet_priv *priv = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3ac5d23454a8..ba499489969a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
6918 | skb->protocol = eth_type_trans(skb, tp->dev); | 6918 | skb->protocol = eth_type_trans(skb, tp->dev); |
6919 | 6919 | ||
6920 | if (len > (tp->dev->mtu + ETH_HLEN) && | 6920 | if (len > (tp->dev->mtu + ETH_HLEN) && |
6921 | skb->protocol != htons(ETH_P_8021Q)) { | 6921 | skb->protocol != htons(ETH_P_8021Q) && |
6922 | skb->protocol != htons(ETH_P_8021AD)) { | ||
6922 | dev_kfree_skb_any(skb); | 6923 | dev_kfree_skb_any(skb); |
6923 | goto drop_it_no_recycle; | 6924 | goto drop_it_no_recycle; |
6924 | } | 6925 | } |
@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7914 | 7915 | ||
7915 | entry = tnapi->tx_prod; | 7916 | entry = tnapi->tx_prod; |
7916 | base_flags = 0; | 7917 | base_flags = 0; |
7917 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
7918 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7919 | 7918 | ||
7920 | mss = skb_shinfo(skb)->gso_size; | 7919 | mss = skb_shinfo(skb)->gso_size; |
7921 | if (mss) { | 7920 | if (mss) { |
@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7929 | 7928 | ||
7930 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; | 7929 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
7931 | 7930 | ||
7931 | /* HW/FW can not correctly segment packets that have been | ||
7932 | * vlan encapsulated. | ||
7933 | */ | ||
7934 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7935 | skb->protocol == htons(ETH_P_8021AD)) | ||
7936 | return tg3_tso_bug(tp, tnapi, txq, skb); | ||
7937 | |||
7932 | if (!skb_is_gso_v6(skb)) { | 7938 | if (!skb_is_gso_v6(skb)) { |
7933 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7939 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
7934 | tg3_flag(tp, TSO_BUG)) | 7940 | tg3_flag(tp, TSO_BUG)) |
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7979 | base_flags |= tsflags << 12; | 7985 | base_flags |= tsflags << 12; |
7980 | } | 7986 | } |
7981 | } | 7987 | } |
7988 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
7989 | /* HW/FW can not correctly checksum packets that have been | ||
7990 | * vlan encapsulated. | ||
7991 | */ | ||
7992 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7993 | skb->protocol == htons(ETH_P_8021AD)) { | ||
7994 | if (skb_checksum_help(skb)) | ||
7995 | goto drop; | ||
7996 | } else { | ||
7997 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7998 | } | ||
7982 | } | 7999 | } |
7983 | 8000 | ||
7984 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && | 8001 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
@@ -11617,6 +11634,12 @@ static int tg3_open(struct net_device *dev) | |||
11617 | struct tg3 *tp = netdev_priv(dev); | 11634 | struct tg3 *tp = netdev_priv(dev); |
11618 | int err; | 11635 | int err; |
11619 | 11636 | ||
11637 | if (tp->pcierr_recovery) { | ||
11638 | netdev_err(dev, "Failed to open device. PCI error recovery " | ||
11639 | "in progress\n"); | ||
11640 | return -EAGAIN; | ||
11641 | } | ||
11642 | |||
11620 | if (tp->fw_needed) { | 11643 | if (tp->fw_needed) { |
11621 | err = tg3_request_firmware(tp); | 11644 | err = tg3_request_firmware(tp); |
11622 | if (tg3_asic_rev(tp) == ASIC_REV_57766) { | 11645 | if (tg3_asic_rev(tp) == ASIC_REV_57766) { |
@@ -11674,6 +11697,12 @@ static int tg3_close(struct net_device *dev) | |||
11674 | { | 11697 | { |
11675 | struct tg3 *tp = netdev_priv(dev); | 11698 | struct tg3 *tp = netdev_priv(dev); |
11676 | 11699 | ||
11700 | if (tp->pcierr_recovery) { | ||
11701 | netdev_err(dev, "Failed to close device. PCI error recovery " | ||
11702 | "in progress\n"); | ||
11703 | return -EAGAIN; | ||
11704 | } | ||
11705 | |||
11677 | tg3_ptp_fini(tp); | 11706 | tg3_ptp_fini(tp); |
11678 | 11707 | ||
11679 | tg3_stop(tp); | 11708 | tg3_stop(tp); |
@@ -17561,6 +17590,7 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17561 | tp->rx_mode = TG3_DEF_RX_MODE; | 17590 | tp->rx_mode = TG3_DEF_RX_MODE; |
17562 | tp->tx_mode = TG3_DEF_TX_MODE; | 17591 | tp->tx_mode = TG3_DEF_TX_MODE; |
17563 | tp->irq_sync = 1; | 17592 | tp->irq_sync = 1; |
17593 | tp->pcierr_recovery = false; | ||
17564 | 17594 | ||
17565 | if (tg3_debug > 0) | 17595 | if (tg3_debug > 0) |
17566 | tp->msg_enable = tg3_debug; | 17596 | tp->msg_enable = tg3_debug; |
@@ -18071,6 +18101,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
18071 | 18101 | ||
18072 | rtnl_lock(); | 18102 | rtnl_lock(); |
18073 | 18103 | ||
18104 | tp->pcierr_recovery = true; | ||
18105 | |||
18074 | /* We probably don't have netdev yet */ | 18106 | /* We probably don't have netdev yet */ |
18075 | if (!netdev || !netif_running(netdev)) | 18107 | if (!netdev || !netif_running(netdev)) |
18076 | goto done; | 18108 | goto done; |
@@ -18195,6 +18227,7 @@ static void tg3_io_resume(struct pci_dev *pdev) | |||
18195 | tg3_phy_start(tp); | 18227 | tg3_phy_start(tp); |
18196 | 18228 | ||
18197 | done: | 18229 | done: |
18230 | tp->pcierr_recovery = false; | ||
18198 | rtnl_unlock(); | 18231 | rtnl_unlock(); |
18199 | } | 18232 | } |
18200 | 18233 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 461accaf0aa4..31c9f8295953 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -3407,6 +3407,7 @@ struct tg3 { | |||
3407 | 3407 | ||
3408 | struct device *hwmon_dev; | 3408 | struct device *hwmon_dev; |
3409 | bool link_up; | 3409 | bool link_up; |
3410 | bool pcierr_recovery; | ||
3410 | }; | 3411 | }; |
3411 | 3412 | ||
3412 | /* Accessor macros for chip and asic attributes | 3413 | /* Accessor macros for chip and asic attributes |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ff8cae5e2535..ffc92a41d75b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | |||
2506 | * For TSO, the TCP checksum field is seeded with pseudo-header sum | 2506 | * For TSO, the TCP checksum field is seeded with pseudo-header sum |
2507 | * excluding the length field. | 2507 | * excluding the length field. |
2508 | */ | 2508 | */ |
2509 | if (skb->protocol == htons(ETH_P_IP)) { | 2509 | if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { |
2510 | struct iphdr *iph = ip_hdr(skb); | 2510 | struct iphdr *iph = ip_hdr(skb); |
2511 | 2511 | ||
2512 | /* Do we really need these? */ | 2512 | /* Do we really need these? */ |
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, | |||
2870 | } | 2870 | } |
2871 | 2871 | ||
2872 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2872 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2873 | __be16 net_proto = vlan_get_protocol(skb); | ||
2873 | u8 proto = 0; | 2874 | u8 proto = 0; |
2874 | 2875 | ||
2875 | if (skb->protocol == htons(ETH_P_IP)) | 2876 | if (net_proto == htons(ETH_P_IP)) |
2876 | proto = ip_hdr(skb)->protocol; | 2877 | proto = ip_hdr(skb)->protocol; |
2877 | #ifdef NETIF_F_IPV6_CSUM | 2878 | #ifdef NETIF_F_IPV6_CSUM |
2878 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 2879 | else if (net_proto == htons(ETH_P_IPV6)) { |
2879 | /* nexthdr may not be TCP immediately. */ | 2880 | /* nexthdr may not be TCP immediately. */ |
2880 | proto = ipv6_hdr(skb)->nexthdr; | 2881 | proto = ipv6_hdr(skb)->nexthdr; |
2881 | } | 2882 | } |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca5d7798b265..e1e02fba4fcc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/of_mdio.h> | 31 | #include <linux/of_mdio.h> |
32 | #include <linux/of_net.h> | 32 | #include <linux/of_net.h> |
33 | #include <linux/pinctrl/consumer.h> | ||
34 | 33 | ||
35 | #include "macb.h" | 34 | #include "macb.h" |
36 | 35 | ||
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2071 | struct phy_device *phydev; | 2070 | struct phy_device *phydev; |
2072 | u32 config; | 2071 | u32 config; |
2073 | int err = -ENXIO; | 2072 | int err = -ENXIO; |
2074 | struct pinctrl *pinctrl; | ||
2075 | const char *mac; | 2073 | const char *mac; |
2076 | 2074 | ||
2077 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2075 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev) | |||
2080 | goto err_out; | 2078 | goto err_out; |
2081 | } | 2079 | } |
2082 | 2080 | ||
2083 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
2084 | if (IS_ERR(pinctrl)) { | ||
2085 | err = PTR_ERR(pinctrl); | ||
2086 | if (err == -EPROBE_DEFER) | ||
2087 | goto err_out; | ||
2088 | |||
2089 | dev_warn(&pdev->dev, "No pinctrl provided\n"); | ||
2090 | } | ||
2091 | |||
2092 | err = -ENOMEM; | 2081 | err = -ENOMEM; |
2093 | dev = alloc_etherdev(sizeof(*bp)); | 2082 | dev = alloc_etherdev(sizeof(*bp)); |
2094 | if (!dev) | 2083 | if (!dev) |
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index 184a063bed5f..07d2201530d2 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config NET_CALXEDA_XGMAC | 1 | config NET_CALXEDA_XGMAC |
2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" | 2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" |
3 | depends on HAS_IOMEM && HAS_DMA | 3 | depends on HAS_IOMEM && HAS_DMA |
4 | depends on ARCH_HIGHBANK || COMPILE_TEST | ||
4 | select CRC32 | 5 | select CRC32 |
5 | help | 6 | help |
6 | This is the driver for the XGMAC Ethernet IP block found on Calxeda | 7 | This is the driver for the XGMAC Ethernet IP block found on Calxeda |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 18fb9c61d7ba..e5be511a3c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap); | |||
1253 | goto freeout; | 1253 | goto freeout; |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | t4_write_reg(adap, MPS_TRC_RSS_CONTROL, | 1256 | t4_write_reg(adap, is_t4(adap->params.chip) ? |
1257 | MPS_TRC_RSS_CONTROL : | ||
1258 | MPS_T5_TRC_RSS_CONTROL, | ||
1257 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | | 1259 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | |
1258 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); | 1260 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); |
1259 | return 0; | 1261 | return 0; |
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
1761 | 0xd004, 0xd03c, | 1763 | 0xd004, 0xd03c, |
1762 | 0xdfc0, 0xdfe0, | 1764 | 0xdfc0, 0xdfe0, |
1763 | 0xe000, 0xea7c, | 1765 | 0xe000, 0xea7c, |
1764 | 0xf000, 0x11190, | 1766 | 0xf000, 0x11110, |
1767 | 0x11118, 0x11190, | ||
1765 | 0x19040, 0x1906c, | 1768 | 0x19040, 0x1906c, |
1766 | 0x19078, 0x19080, | 1769 | 0x19078, 0x19080, |
1767 | 0x1908c, 0x19124, | 1770 | 0x1908c, 0x19124, |
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
1968 | 0xd004, 0xd03c, | 1971 | 0xd004, 0xd03c, |
1969 | 0xdfc0, 0xdfe0, | 1972 | 0xdfc0, 0xdfe0, |
1970 | 0xe000, 0x11088, | 1973 | 0xe000, 0x11088, |
1971 | 0x1109c, 0x1117c, | 1974 | 0x1109c, 0x11110, |
1975 | 0x11118, 0x1117c, | ||
1972 | 0x11190, 0x11204, | 1976 | 0x11190, 0x11204, |
1973 | 0x19040, 0x1906c, | 1977 | 0x19040, 0x1906c, |
1974 | 0x19078, 0x19080, | 1978 | 0x19078, 0x19080, |
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap) | |||
5955 | params[3] = FW_PARAM_PFVF(CQ_END); | 5959 | params[3] = FW_PARAM_PFVF(CQ_END); |
5956 | params[4] = FW_PARAM_PFVF(OCQ_START); | 5960 | params[4] = FW_PARAM_PFVF(OCQ_START); |
5957 | params[5] = FW_PARAM_PFVF(OCQ_END); | 5961 | params[5] = FW_PARAM_PFVF(OCQ_END); |
5958 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | 5962 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, |
5963 | val); | ||
5959 | if (ret < 0) | 5964 | if (ret < 0) |
5960 | goto bye; | 5965 | goto bye; |
5961 | adap->vres.qp.start = val[0]; | 5966 | adap->vres.qp.start = val[0]; |
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap) | |||
5967 | 5972 | ||
5968 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); | 5973 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); |
5969 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); | 5974 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); |
5970 | ret = t4_query_params(adap, 0, 0, 0, 2, params, val); | 5975 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, |
5976 | val); | ||
5971 | if (ret < 0) { | 5977 | if (ret < 0) { |
5972 | adap->params.max_ordird_qp = 8; | 5978 | adap->params.max_ordird_qp = 8; |
5973 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; | 5979 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; |
@@ -6472,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6472 | struct port_info *pi; | 6478 | struct port_info *pi; |
6473 | bool highdma = false; | 6479 | bool highdma = false; |
6474 | struct adapter *adapter = NULL; | 6480 | struct adapter *adapter = NULL; |
6481 | void __iomem *regs; | ||
6475 | 6482 | ||
6476 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | 6483 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); |
6477 | 6484 | ||
@@ -6488,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6488 | goto out_release_regions; | 6495 | goto out_release_regions; |
6489 | } | 6496 | } |
6490 | 6497 | ||
6498 | regs = pci_ioremap_bar(pdev, 0); | ||
6499 | if (!regs) { | ||
6500 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6501 | err = -ENOMEM; | ||
6502 | goto out_disable_device; | ||
6503 | } | ||
6504 | |||
6505 | /* We control everything through one PF */ | ||
6506 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); | ||
6507 | if (func != ent->driver_data) { | ||
6508 | iounmap(regs); | ||
6509 | pci_disable_device(pdev); | ||
6510 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6511 | goto sriov; | ||
6512 | } | ||
6513 | |||
6491 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 6514 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
6492 | highdma = true; | 6515 | highdma = true; |
6493 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 6516 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
6494 | if (err) { | 6517 | if (err) { |
6495 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | 6518 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " |
6496 | "coherent allocations\n"); | 6519 | "coherent allocations\n"); |
6497 | goto out_disable_device; | 6520 | goto out_unmap_bar0; |
6498 | } | 6521 | } |
6499 | } else { | 6522 | } else { |
6500 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 6523 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
6501 | if (err) { | 6524 | if (err) { |
6502 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | 6525 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
6503 | goto out_disable_device; | 6526 | goto out_unmap_bar0; |
6504 | } | 6527 | } |
6505 | } | 6528 | } |
6506 | 6529 | ||
@@ -6512,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6512 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 6535 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
6513 | if (!adapter) { | 6536 | if (!adapter) { |
6514 | err = -ENOMEM; | 6537 | err = -ENOMEM; |
6515 | goto out_disable_device; | 6538 | goto out_unmap_bar0; |
6516 | } | 6539 | } |
6517 | 6540 | ||
6518 | adapter->workq = create_singlethread_workqueue("cxgb4"); | 6541 | adapter->workq = create_singlethread_workqueue("cxgb4"); |
@@ -6524,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6524 | /* PCI device has been enabled */ | 6547 | /* PCI device has been enabled */ |
6525 | adapter->flags |= DEV_ENABLED; | 6548 | adapter->flags |= DEV_ENABLED; |
6526 | 6549 | ||
6527 | adapter->regs = pci_ioremap_bar(pdev, 0); | 6550 | adapter->regs = regs; |
6528 | if (!adapter->regs) { | ||
6529 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6530 | err = -ENOMEM; | ||
6531 | goto out_free_adapter; | ||
6532 | } | ||
6533 | |||
6534 | /* We control everything through one PF */ | ||
6535 | func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); | ||
6536 | if (func != ent->driver_data) { | ||
6537 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6538 | goto sriov; | ||
6539 | } | ||
6540 | |||
6541 | adapter->pdev = pdev; | 6551 | adapter->pdev = pdev; |
6542 | adapter->pdev_dev = &pdev->dev; | 6552 | adapter->pdev_dev = &pdev->dev; |
6543 | adapter->mbox = func; | 6553 | adapter->mbox = func; |
@@ -6554,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6554 | 6564 | ||
6555 | err = t4_prep_adapter(adapter); | 6565 | err = t4_prep_adapter(adapter); |
6556 | if (err) | 6566 | if (err) |
6557 | goto out_unmap_bar0; | 6567 | goto out_free_adapter; |
6568 | |||
6558 | 6569 | ||
6559 | if (!is_t4(adapter->params.chip)) { | 6570 | if (!is_t4(adapter->params.chip)) { |
6560 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 6571 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
@@ -6571,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6571 | dev_err(&pdev->dev, | 6582 | dev_err(&pdev->dev, |
6572 | "Incorrect number of egress queues per page\n"); | 6583 | "Incorrect number of egress queues per page\n"); |
6573 | err = -EINVAL; | 6584 | err = -EINVAL; |
6574 | goto out_unmap_bar0; | 6585 | goto out_free_adapter; |
6575 | } | 6586 | } |
6576 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), | 6587 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), |
6577 | pci_resource_len(pdev, 2)); | 6588 | pci_resource_len(pdev, 2)); |
6578 | if (!adapter->bar2) { | 6589 | if (!adapter->bar2) { |
6579 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); | 6590 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); |
6580 | err = -ENOMEM; | 6591 | err = -ENOMEM; |
6581 | goto out_unmap_bar0; | 6592 | goto out_free_adapter; |
6582 | } | 6593 | } |
6583 | } | 6594 | } |
6584 | 6595 | ||
@@ -6716,13 +6727,13 @@ sriov: | |||
6716 | out_unmap_bar: | 6727 | out_unmap_bar: |
6717 | if (!is_t4(adapter->params.chip)) | 6728 | if (!is_t4(adapter->params.chip)) |
6718 | iounmap(adapter->bar2); | 6729 | iounmap(adapter->bar2); |
6719 | out_unmap_bar0: | ||
6720 | iounmap(adapter->regs); | ||
6721 | out_free_adapter: | 6730 | out_free_adapter: |
6722 | if (adapter->workq) | 6731 | if (adapter->workq) |
6723 | destroy_workqueue(adapter->workq); | 6732 | destroy_workqueue(adapter->workq); |
6724 | 6733 | ||
6725 | kfree(adapter); | 6734 | kfree(adapter); |
6735 | out_unmap_bar0: | ||
6736 | iounmap(regs); | ||
6726 | out_disable_device: | 6737 | out_disable_device: |
6727 | pci_disable_pcie_error_reporting(pdev); | 6738 | pci_disable_pcie_error_reporting(pdev); |
6728 | pci_disable_device(pdev); | 6739 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a853133d8db8..41d04462b72e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * t4_report_fw_error - report firmware error | ||
172 | * @adap: the adapter | ||
173 | * | ||
174 | * The adapter firmware can indicate error conditions to the host. | ||
175 | * If the firmware has indicated an error, print out the reason for | ||
176 | * the firmware error. | ||
177 | */ | ||
178 | static void t4_report_fw_error(struct adapter *adap) | ||
179 | { | ||
180 | static const char *const reason[] = { | ||
181 | "Crash", /* PCIE_FW_EVAL_CRASH */ | ||
182 | "During Device Preparation", /* PCIE_FW_EVAL_PREP */ | ||
183 | "During Device Configuration", /* PCIE_FW_EVAL_CONF */ | ||
184 | "During Device Initialization", /* PCIE_FW_EVAL_INIT */ | ||
185 | "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ | ||
186 | "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ | ||
187 | "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ | ||
188 | "Reserved", /* reserved */ | ||
189 | }; | ||
190 | u32 pcie_fw; | ||
191 | |||
192 | pcie_fw = t4_read_reg(adap, MA_PCIE_FW); | ||
193 | if (pcie_fw & FW_PCIE_FW_ERR) | ||
194 | dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", | ||
195 | reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); | ||
196 | } | ||
197 | |||
198 | /* | ||
171 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. | 199 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. |
172 | */ | 200 | */ |
173 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, | 201 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, |
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | |||
300 | dump_mbox(adap, mbox, data_reg); | 328 | dump_mbox(adap, mbox, data_reg); |
301 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", | 329 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", |
302 | *(const u8 *)cmd, mbox); | 330 | *(const u8 *)cmd, mbox); |
331 | t4_report_fw_error(adap); | ||
303 | return -ETIMEDOUT; | 332 | return -ETIMEDOUT; |
304 | } | 333 | } |
305 | 334 | ||
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
566 | #define VPD_BASE 0x400 | 595 | #define VPD_BASE 0x400 |
567 | #define VPD_BASE_OLD 0 | 596 | #define VPD_BASE_OLD 0 |
568 | #define VPD_LEN 1024 | 597 | #define VPD_LEN 1024 |
598 | #define CHELSIO_VPD_UNIQUE_ID 0x82 | ||
569 | 599 | ||
570 | /** | 600 | /** |
571 | * t4_seeprom_wp - enable/disable EEPROM write protection | 601 | * t4_seeprom_wp - enable/disable EEPROM write protection |
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
603 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); | 633 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); |
604 | if (ret < 0) | 634 | if (ret < 0) |
605 | goto out; | 635 | goto out; |
606 | addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; | 636 | |
637 | /* The VPD shall have a unique identifier specified by the PCI SIG. | ||
638 | * For chelsio adapters, the identifier is 0x82. The first byte of a VPD | ||
639 | * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software | ||
640 | * is expected to automatically put this entry at the | ||
641 | * beginning of the VPD. | ||
642 | */ | ||
643 | addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; | ||
607 | 644 | ||
608 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); | 645 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); |
609 | if (ret < 0) | 646 | if (ret < 0) |
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
667 | i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); | 704 | i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); |
668 | memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); | 705 | memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); |
669 | strim(p->sn); | 706 | strim(p->sn); |
707 | i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); | ||
670 | memcpy(p->pn, vpd + pn, min(i, PN_LEN)); | 708 | memcpy(p->pn, vpd + pn, min(i, PN_LEN)); |
671 | strim(p->pn); | 709 | strim(p->pn); |
672 | 710 | ||
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter) | |||
1394 | 1432 | ||
1395 | int fat; | 1433 | int fat; |
1396 | 1434 | ||
1397 | fat = t4_handle_intr_status(adapter, | 1435 | if (is_t4(adapter->params.chip)) |
1398 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | 1436 | fat = t4_handle_intr_status(adapter, |
1399 | sysbus_intr_info) + | 1437 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, |
1400 | t4_handle_intr_status(adapter, | 1438 | sysbus_intr_info) + |
1401 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | 1439 | t4_handle_intr_status(adapter, |
1402 | pcie_port_intr_info) + | 1440 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, |
1403 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | 1441 | pcie_port_intr_info) + |
1404 | is_t4(adapter->params.chip) ? | 1442 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, |
1405 | pcie_intr_info : t5_pcie_intr_info); | 1443 | pcie_intr_info); |
1444 | else | ||
1445 | fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | ||
1446 | t5_pcie_intr_info); | ||
1406 | 1447 | ||
1407 | if (fat) | 1448 | if (fat) |
1408 | t4_fatal_err(adapter); | 1449 | t4_fatal_err(adapter); |
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter) | |||
1521 | 1562 | ||
1522 | int fat; | 1563 | int fat; |
1523 | 1564 | ||
1565 | if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) | ||
1566 | t4_report_fw_error(adapter); | ||
1567 | |||
1524 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, | 1568 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, |
1525 | cim_intr_info) + | 1569 | cim_intr_info) + |
1526 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, | 1570 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, |
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap) | |||
1768 | { | 1812 | { |
1769 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); | 1813 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); |
1770 | 1814 | ||
1771 | if (status & MEM_PERR_INT_CAUSE) | 1815 | if (status & MEM_PERR_INT_CAUSE) { |
1772 | dev_alert(adap->pdev_dev, | 1816 | dev_alert(adap->pdev_dev, |
1773 | "MA parity error, parity status %#x\n", | 1817 | "MA parity error, parity status %#x\n", |
1774 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); | 1818 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); |
1819 | if (is_t5(adap->params.chip)) | ||
1820 | dev_alert(adap->pdev_dev, | ||
1821 | "MA parity error, parity status %#x\n", | ||
1822 | t4_read_reg(adap, | ||
1823 | MA_PARITY_ERROR_STATUS2)); | ||
1824 | } | ||
1775 | if (status & MEM_WRAP_INT_CAUSE) { | 1825 | if (status & MEM_WRAP_INT_CAUSE) { |
1776 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); | 1826 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); |
1777 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " | 1827 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " |
@@ -2733,12 +2783,16 @@ retry: | |||
2733 | /* | 2783 | /* |
2734 | * Issue the HELLO command to the firmware. If it's not successful | 2784 | * Issue the HELLO command to the firmware. If it's not successful |
2735 | * but indicates that we got a "busy" or "timeout" condition, retry | 2785 | * but indicates that we got a "busy" or "timeout" condition, retry |
2736 | * the HELLO until we exhaust our retry limit. | 2786 | * the HELLO until we exhaust our retry limit. If we do exceed our |
2787 | * retry limit, check to see if the firmware left us any error | ||
2788 | * information and report that if so. | ||
2737 | */ | 2789 | */ |
2738 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | 2790 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); |
2739 | if (ret < 0) { | 2791 | if (ret < 0) { |
2740 | if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) | 2792 | if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) |
2741 | goto retry; | 2793 | goto retry; |
2794 | if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) | ||
2795 | t4_report_fw_error(adap); | ||
2742 | return ret; | 2796 | return ret; |
2743 | } | 2797 | } |
2744 | 2798 | ||
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) | |||
3742 | lc->link_ok = link_ok; | 3796 | lc->link_ok = link_ok; |
3743 | lc->speed = speed; | 3797 | lc->speed = speed; |
3744 | lc->fc = fc; | 3798 | lc->fc = fc; |
3799 | lc->supported = be16_to_cpu(p->u.info.pcap); | ||
3745 | t4_os_link_changed(adap, port, link_ok); | 3800 | t4_os_link_changed(adap, port, link_ok); |
3746 | } | 3801 | } |
3747 | if (mod != pi->mod_type) { | 3802 | if (mod != pi->mod_type) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e3146e83df20..39fb325474f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -511,6 +511,7 @@ | |||
511 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) | 511 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) |
512 | #define MA_PCIE_FW 0x30b8 | 512 | #define MA_PCIE_FW 0x30b8 |
513 | #define MA_PARITY_ERROR_STATUS 0x77f4 | 513 | #define MA_PARITY_ERROR_STATUS 0x77f4 |
514 | #define MA_PARITY_ERROR_STATUS2 0x7804 | ||
514 | 515 | ||
515 | #define MA_EXT_MEMORY1_BAR 0x7808 | 516 | #define MA_EXT_MEMORY1_BAR 0x7808 |
516 | #define EDC_0_BASE_ADDR 0x7900 | 517 | #define EDC_0_BASE_ADDR 0x7900 |
@@ -959,6 +960,7 @@ | |||
959 | #define TRCMULTIFILTER 0x00000001U | 960 | #define TRCMULTIFILTER 0x00000001U |
960 | 961 | ||
961 | #define MPS_TRC_RSS_CONTROL 0x9808 | 962 | #define MPS_TRC_RSS_CONTROL 0x9808 |
963 | #define MPS_T5_TRC_RSS_CONTROL 0xa00c | ||
962 | #define RSSCONTROL_MASK 0x00ff0000U | 964 | #define RSSCONTROL_MASK 0x00ff0000U |
963 | #define RSSCONTROL_SHIFT 16 | 965 | #define RSSCONTROL_SHIFT 16 |
964 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) | 966 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5f2729ebadbe..3409756a85b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd { | |||
2228 | #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) | 2228 | #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) |
2229 | #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ | 2229 | #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ |
2230 | FW_PCIE_FW_MASTER_MASK) | 2230 | FW_PCIE_FW_MASTER_MASK) |
2231 | #define FW_PCIE_FW_EVAL_MASK 0x7 | ||
2232 | #define FW_PCIE_FW_EVAL_SHIFT 24 | ||
2233 | #define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ | ||
2234 | FW_PCIE_FW_EVAL_MASK) | ||
2231 | 2235 | ||
2232 | struct fw_hdr { | 2236 | struct fw_hdr { |
2233 | u8 ver; | 2237 | u8 ver; |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9b33057a9477..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) | |||
1399 | const void *mac_addr; | 1399 | const void *mac_addr; |
1400 | 1400 | ||
1401 | if (!IS_ENABLED(CONFIG_OF) || !np) | 1401 | if (!IS_ENABLED(CONFIG_OF) || !np) |
1402 | return NULL; | 1402 | return ERR_PTR(-ENXIO); |
1403 | 1403 | ||
1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
1405 | if (!pdata) | 1405 | if (!pdata) |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index a0b418e007a0..566b17db135a 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) | |||
1994 | { | 1994 | { |
1995 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; | 1995 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; |
1996 | 1996 | ||
1997 | if (skb->protocol != htons(ETH_P_IP)) | 1997 | if (vlan_get_protocol(skb) != htons(ETH_P_IP)) |
1998 | return; | 1998 | return; |
1999 | 1999 | ||
2000 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 2000 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index cbc330b301cd..ad3d5d12173f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -2674,7 +2674,8 @@ set_itr_now: | |||
2674 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 2674 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
2675 | 2675 | ||
2676 | static int e1000_tso(struct e1000_adapter *adapter, | 2676 | static int e1000_tso(struct e1000_adapter *adapter, |
2677 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | 2677 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2678 | __be16 protocol) | ||
2678 | { | 2679 | { |
2679 | struct e1000_context_desc *context_desc; | 2680 | struct e1000_context_desc *context_desc; |
2680 | struct e1000_buffer *buffer_info; | 2681 | struct e1000_buffer *buffer_info; |
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2692 | 2693 | ||
2693 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 2694 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
2694 | mss = skb_shinfo(skb)->gso_size; | 2695 | mss = skb_shinfo(skb)->gso_size; |
2695 | if (skb->protocol == htons(ETH_P_IP)) { | 2696 | if (protocol == htons(ETH_P_IP)) { |
2696 | struct iphdr *iph = ip_hdr(skb); | 2697 | struct iphdr *iph = ip_hdr(skb); |
2697 | iph->tot_len = 0; | 2698 | iph->tot_len = 0; |
2698 | iph->check = 0; | 2699 | iph->check = 0; |
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2702 | 0); | 2703 | 0); |
2703 | cmd_length = E1000_TXD_CMD_IP; | 2704 | cmd_length = E1000_TXD_CMD_IP; |
2704 | ipcse = skb_transport_offset(skb) - 1; | 2705 | ipcse = skb_transport_offset(skb) - 1; |
2705 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2706 | } else if (skb_is_gso_v6(skb)) { |
2706 | ipv6_hdr(skb)->payload_len = 0; | 2707 | ipv6_hdr(skb)->payload_len = 0; |
2707 | tcp_hdr(skb)->check = | 2708 | tcp_hdr(skb)->check = |
2708 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 2709 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2745 | } | 2746 | } |
2746 | 2747 | ||
2747 | static bool e1000_tx_csum(struct e1000_adapter *adapter, | 2748 | static bool e1000_tx_csum(struct e1000_adapter *adapter, |
2748 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | 2749 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2750 | __be16 protocol) | ||
2749 | { | 2751 | { |
2750 | struct e1000_context_desc *context_desc; | 2752 | struct e1000_context_desc *context_desc; |
2751 | struct e1000_buffer *buffer_info; | 2753 | struct e1000_buffer *buffer_info; |
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, | |||
2756 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 2758 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2757 | return false; | 2759 | return false; |
2758 | 2760 | ||
2759 | switch (skb->protocol) { | 2761 | switch (protocol) { |
2760 | case cpu_to_be16(ETH_P_IP): | 2762 | case cpu_to_be16(ETH_P_IP): |
2761 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 2763 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
2762 | cmd_len |= E1000_TXD_CMD_TCP; | 2764 | cmd_len |= E1000_TXD_CMD_TCP; |
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3097 | int count = 0; | 3099 | int count = 0; |
3098 | int tso; | 3100 | int tso; |
3099 | unsigned int f; | 3101 | unsigned int f; |
3102 | __be16 protocol = vlan_get_protocol(skb); | ||
3100 | 3103 | ||
3101 | /* This goes back to the question of how to logically map a Tx queue | 3104 | /* This goes back to the question of how to logically map a Tx queue |
3102 | * to a flow. Right now, performance is impacted slightly negatively | 3105 | * to a flow. Right now, performance is impacted slightly negatively |
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3210 | 3213 | ||
3211 | first = tx_ring->next_to_use; | 3214 | first = tx_ring->next_to_use; |
3212 | 3215 | ||
3213 | tso = e1000_tso(adapter, tx_ring, skb); | 3216 | tso = e1000_tso(adapter, tx_ring, skb, protocol); |
3214 | if (tso < 0) { | 3217 | if (tso < 0) { |
3215 | dev_kfree_skb_any(skb); | 3218 | dev_kfree_skb_any(skb); |
3216 | return NETDEV_TX_OK; | 3219 | return NETDEV_TX_OK; |
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3220 | if (likely(hw->mac_type != e1000_82544)) | 3223 | if (likely(hw->mac_type != e1000_82544)) |
3221 | tx_ring->last_tx_tso = true; | 3224 | tx_ring->last_tx_tso = true; |
3222 | tx_flags |= E1000_TX_FLAGS_TSO; | 3225 | tx_flags |= E1000_TX_FLAGS_TSO; |
3223 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) | 3226 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) |
3224 | tx_flags |= E1000_TX_FLAGS_CSUM; | 3227 | tx_flags |= E1000_TX_FLAGS_CSUM; |
3225 | 3228 | ||
3226 | if (likely(skb->protocol == htons(ETH_P_IP))) | 3229 | if (protocol == htons(ETH_P_IP)) |
3227 | tx_flags |= E1000_TX_FLAGS_IPV4; | 3230 | tx_flags |= E1000_TX_FLAGS_IPV4; |
3228 | 3231 | ||
3229 | if (unlikely(skb->no_fcs)) | 3232 | if (unlikely(skb->no_fcs)) |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 65c3aef2bd36..247335d2c7ec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -5164,7 +5164,8 @@ link_up: | |||
5164 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | 5164 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
5165 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 5165 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
5166 | 5166 | ||
5167 | static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | 5167 | static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, |
5168 | __be16 protocol) | ||
5168 | { | 5169 | { |
5169 | struct e1000_context_desc *context_desc; | 5170 | struct e1000_context_desc *context_desc; |
5170 | struct e1000_buffer *buffer_info; | 5171 | struct e1000_buffer *buffer_info; |
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5183 | 5184 | ||
5184 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 5185 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
5185 | mss = skb_shinfo(skb)->gso_size; | 5186 | mss = skb_shinfo(skb)->gso_size; |
5186 | if (skb->protocol == htons(ETH_P_IP)) { | 5187 | if (protocol == htons(ETH_P_IP)) { |
5187 | struct iphdr *iph = ip_hdr(skb); | 5188 | struct iphdr *iph = ip_hdr(skb); |
5188 | iph->tot_len = 0; | 5189 | iph->tot_len = 0; |
5189 | iph->check = 0; | 5190 | iph->check = 0; |
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5231 | return 1; | 5232 | return 1; |
5232 | } | 5233 | } |
5233 | 5234 | ||
5234 | static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | 5235 | static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, |
5236 | __be16 protocol) | ||
5235 | { | 5237 | { |
5236 | struct e1000_adapter *adapter = tx_ring->adapter; | 5238 | struct e1000_adapter *adapter = tx_ring->adapter; |
5237 | struct e1000_context_desc *context_desc; | 5239 | struct e1000_context_desc *context_desc; |
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5239 | unsigned int i; | 5241 | unsigned int i; |
5240 | u8 css; | 5242 | u8 css; |
5241 | u32 cmd_len = E1000_TXD_CMD_DEXT; | 5243 | u32 cmd_len = E1000_TXD_CMD_DEXT; |
5242 | __be16 protocol; | ||
5243 | 5244 | ||
5244 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 5245 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
5245 | return false; | 5246 | return false; |
5246 | 5247 | ||
5247 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | ||
5248 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | ||
5249 | else | ||
5250 | protocol = skb->protocol; | ||
5251 | |||
5252 | switch (protocol) { | 5248 | switch (protocol) { |
5253 | case cpu_to_be16(ETH_P_IP): | 5249 | case cpu_to_be16(ETH_P_IP): |
5254 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 5250 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5546 | int count = 0; | 5542 | int count = 0; |
5547 | int tso; | 5543 | int tso; |
5548 | unsigned int f; | 5544 | unsigned int f; |
5545 | __be16 protocol = vlan_get_protocol(skb); | ||
5549 | 5546 | ||
5550 | if (test_bit(__E1000_DOWN, &adapter->state)) { | 5547 | if (test_bit(__E1000_DOWN, &adapter->state)) { |
5551 | dev_kfree_skb_any(skb); | 5548 | dev_kfree_skb_any(skb); |
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5620 | 5617 | ||
5621 | first = tx_ring->next_to_use; | 5618 | first = tx_ring->next_to_use; |
5622 | 5619 | ||
5623 | tso = e1000_tso(tx_ring, skb); | 5620 | tso = e1000_tso(tx_ring, skb, protocol); |
5624 | if (tso < 0) { | 5621 | if (tso < 0) { |
5625 | dev_kfree_skb_any(skb); | 5622 | dev_kfree_skb_any(skb); |
5626 | return NETDEV_TX_OK; | 5623 | return NETDEV_TX_OK; |
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5628 | 5625 | ||
5629 | if (tso) | 5626 | if (tso) |
5630 | tx_flags |= E1000_TX_FLAGS_TSO; | 5627 | tx_flags |= E1000_TX_FLAGS_TSO; |
5631 | else if (e1000_tx_csum(tx_ring, skb)) | 5628 | else if (e1000_tx_csum(tx_ring, skb, protocol)) |
5632 | tx_flags |= E1000_TX_FLAGS_CSUM; | 5629 | tx_flags |= E1000_TX_FLAGS_CSUM; |
5633 | 5630 | ||
5634 | /* Old method was to assume IPv4 packet by default if TSO was enabled. | 5631 | /* Old method was to assume IPv4 packet by default if TSO was enabled. |
5635 | * 82571 hardware supports TSO capabilities for IPv6 as well... | 5632 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
5636 | * no longer assume, we must. | 5633 | * no longer assume, we must. |
5637 | */ | 5634 | */ |
5638 | if (skb->protocol == htons(ETH_P_IP)) | 5635 | if (protocol == htons(ETH_P_IP)) |
5639 | tx_flags |= E1000_TX_FLAGS_IPV4; | 5636 | tx_flags |= E1000_TX_FLAGS_IPV4; |
5640 | 5637 | ||
5641 | if (unlikely(skb->no_fcs)) | 5638 | if (unlikely(skb->no_fcs)) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a51aa37b7b5a..369848e107f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
2295 | goto out_drop; | 2295 | goto out_drop; |
2296 | 2296 | ||
2297 | /* obtain protocol of skb */ | 2297 | /* obtain protocol of skb */ |
2298 | protocol = skb->protocol; | 2298 | protocol = vlan_get_protocol(skb); |
2299 | 2299 | ||
2300 | /* record the location of the first descriptor for this packet */ | 2300 | /* record the location of the first descriptor for this packet */ |
2301 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; | 2301 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79bf96ca6489..95a3ec236b49 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1597 | goto out_drop; | 1597 | goto out_drop; |
1598 | 1598 | ||
1599 | /* obtain protocol of skb */ | 1599 | /* obtain protocol of skb */ |
1600 | protocol = skb->protocol; | 1600 | protocol = vlan_get_protocol(skb); |
1601 | 1601 | ||
1602 | /* record the location of the first descriptor for this packet */ | 1602 | /* record the location of the first descriptor for this packet */ |
1603 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; | 1603 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c9f1d1b7ef37..ade067de1689 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mbus.h> | 20 | #include <linux/mbus.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/if_vlan.h> | ||
23 | #include <net/ip.h> | 24 | #include <net/ip.h> |
24 | #include <net/ipv6.h> | 25 | #include <net/ipv6.h> |
25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) | |||
1371 | { | 1372 | { |
1372 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1373 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1373 | int ip_hdr_len = 0; | 1374 | int ip_hdr_len = 0; |
1375 | __be16 l3_proto = vlan_get_protocol(skb); | ||
1374 | u8 l4_proto; | 1376 | u8 l4_proto; |
1375 | 1377 | ||
1376 | if (skb->protocol == htons(ETH_P_IP)) { | 1378 | if (l3_proto == htons(ETH_P_IP)) { |
1377 | struct iphdr *ip4h = ip_hdr(skb); | 1379 | struct iphdr *ip4h = ip_hdr(skb); |
1378 | 1380 | ||
1379 | /* Calculate IPv4 checksum and L4 checksum */ | 1381 | /* Calculate IPv4 checksum and L4 checksum */ |
1380 | ip_hdr_len = ip4h->ihl; | 1382 | ip_hdr_len = ip4h->ihl; |
1381 | l4_proto = ip4h->protocol; | 1383 | l4_proto = ip4h->protocol; |
1382 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 1384 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
1383 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | 1385 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
1384 | 1386 | ||
1385 | /* Read l4_protocol from one of IPv6 extra headers */ | 1387 | /* Read l4_protocol from one of IPv6 extra headers */ |
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) | |||
1390 | return MVNETA_TX_L4_CSUM_NOT; | 1392 | return MVNETA_TX_L4_CSUM_NOT; |
1391 | 1393 | ||
1392 | return mvneta_txq_desc_csum(skb_network_offset(skb), | 1394 | return mvneta_txq_desc_csum(skb_network_offset(skb), |
1393 | skb->protocol, ip_hdr_len, l4_proto); | 1395 | l3_proto, ip_hdr_len, l4_proto); |
1394 | } | 1396 | } |
1395 | 1397 | ||
1396 | return MVNETA_TX_L4_CSUM_NOT; | 1398 | return MVNETA_TX_L4_CSUM_NOT; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea0..02a2e90d581a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( | |||
2389 | } | 2389 | } |
2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); | 2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); |
2391 | 2391 | ||
2392 | static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) | ||
2393 | { | ||
2394 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); | ||
2395 | int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) | ||
2396 | + 1; | ||
2397 | int max_port = min_port + | ||
2398 | bitmap_weight(actv_ports.ports, dev->caps.num_ports); | ||
2399 | |||
2400 | if (port < min_port) | ||
2401 | port = min_port; | ||
2402 | else if (port >= max_port) | ||
2403 | port = max_port - 1; | ||
2404 | |||
2405 | return port; | ||
2406 | } | ||
2407 | |||
2392 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | 2408 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) |
2393 | { | 2409 | { |
2394 | struct mlx4_priv *priv = mlx4_priv(dev); | 2410 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | |||
2402 | if (slave < 0) | 2418 | if (slave < 0) |
2403 | return -EINVAL; | 2419 | return -EINVAL; |
2404 | 2420 | ||
2421 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2405 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2422 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2406 | s_info->mac = mac; | 2423 | s_info->mac = mac; |
2407 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", | 2424 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", |
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) | |||
2428 | if (slave < 0) | 2445 | if (slave < 0) |
2429 | return -EINVAL; | 2446 | return -EINVAL; |
2430 | 2447 | ||
2448 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2431 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2449 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2432 | 2450 | ||
2433 | if ((0 == vlan) && (0 == qos)) | 2451 | if ((0 == vlan) && (0 == qos)) |
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, | |||
2455 | struct mlx4_priv *priv; | 2473 | struct mlx4_priv *priv; |
2456 | 2474 | ||
2457 | priv = mlx4_priv(dev); | 2475 | priv = mlx4_priv(dev); |
2476 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2458 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 2477 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
2459 | 2478 | ||
2460 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 2479 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) | |||
2482 | if (slave < 0) | 2501 | if (slave < 0) |
2483 | return -EINVAL; | 2502 | return -EINVAL; |
2484 | 2503 | ||
2504 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2485 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2505 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2486 | s_info->spoofchk = setting; | 2506 | s_info->spoofchk = setting; |
2487 | 2507 | ||
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat | |||
2535 | if (slave < 0) | 2555 | if (slave < 0) |
2536 | return -EINVAL; | 2556 | return -EINVAL; |
2537 | 2557 | ||
2558 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2538 | switch (link_state) { | 2559 | switch (link_state) { |
2539 | case IFLA_VF_LINK_STATE_AUTO: | 2560 | case IFLA_VF_LINK_STATE_AUTO: |
2540 | /* get current link state */ | 2561 | /* get current link state */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
487 | struct mlx4_en_dev *mdev = priv->mdev; | 487 | struct mlx4_en_dev *mdev = priv->mdev; |
488 | int err; | 488 | int err; |
489 | 489 | ||
490 | if (pause->autoneg) | ||
491 | return -EINVAL; | ||
492 | |||
490 | priv->prof->tx_pause = pause->tx_pause != 0; | 493 | priv->prof->tx_pause = pause->tx_pause != 0; |
491 | priv->prof->rx_pause = pause->rx_pause != 0; | 494 | priv->prof->rx_pause = pause->rx_pause != 0; |
492 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 495 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb536aa613f4..abddcf8c40aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad | |||
474 | int qpn, u64 *reg_id) | 474 | int qpn, u64 *reg_id) |
475 | { | 475 | { |
476 | int err; | 476 | int err; |
477 | struct mlx4_spec_list spec_eth_outer = { {NULL} }; | ||
478 | struct mlx4_spec_list spec_vxlan = { {NULL} }; | ||
479 | struct mlx4_spec_list spec_eth_inner = { {NULL} }; | ||
480 | |||
481 | struct mlx4_net_trans_rule rule = { | ||
482 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | ||
483 | .exclusive = 0, | ||
484 | .allow_loopback = 1, | ||
485 | .promisc_mode = MLX4_FS_REGULAR, | ||
486 | .priority = MLX4_DOMAIN_NIC, | ||
487 | }; | ||
488 | |||
489 | __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); | ||
490 | 477 | ||
491 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 478 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
492 | return 0; /* do nothing */ | 479 | return 0; /* do nothing */ |
493 | 480 | ||
494 | rule.port = priv->port; | 481 | err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, |
495 | rule.qpn = qpn; | 482 | MLX4_DOMAIN_NIC, reg_id); |
496 | INIT_LIST_HEAD(&rule.list); | ||
497 | |||
498 | spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; | ||
499 | memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); | ||
500 | memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); | ||
501 | |||
502 | spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ | ||
503 | spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ | ||
504 | |||
505 | list_add_tail(&spec_eth_outer.list, &rule.list); | ||
506 | list_add_tail(&spec_vxlan.list, &rule.list); | ||
507 | list_add_tail(&spec_eth_inner.list, &rule.list); | ||
508 | |||
509 | err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id); | ||
510 | if (err) { | 483 | if (err) { |
511 | en_err(priv, "failed to add vxlan steering rule, err %d\n", err); | 484 | en_err(priv, "failed to add vxlan steering rule, err %d\n", err); |
512 | return err; | 485 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e2d5d57c598..871e3a5bda38 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); | |||
78 | #endif /* CONFIG_PCI_MSI */ | 78 | #endif /* CONFIG_PCI_MSI */ |
79 | 79 | ||
80 | static uint8_t num_vfs[3] = {0, 0, 0}; | 80 | static uint8_t num_vfs[3] = {0, 0, 0}; |
81 | static int num_vfs_argc = 3; | 81 | static int num_vfs_argc; |
82 | module_param_array(num_vfs, byte , &num_vfs_argc, 0444); | 82 | module_param_array(num_vfs, byte , &num_vfs_argc, 0444); |
83 | MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" | 83 | MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" |
84 | "num_vfs=port1,port2,port1+2"); | 84 | "num_vfs=port1,port2,port1+2"); |
85 | 85 | ||
86 | static uint8_t probe_vf[3] = {0, 0, 0}; | 86 | static uint8_t probe_vf[3] = {0, 0, 0}; |
87 | static int probe_vfs_argc = 3; | 87 | static int probe_vfs_argc; |
88 | module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); | 88 | module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); |
89 | MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" | 89 | MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" |
90 | "probe_vf=port1,port2,port1+2"); | 90 | "probe_vf=port1,port2,port1+2"); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index d80e7a6fac74..ca0f98c95105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) | |||
1020 | } | 1020 | } |
1021 | EXPORT_SYMBOL_GPL(mlx4_flow_detach); | 1021 | EXPORT_SYMBOL_GPL(mlx4_flow_detach); |
1022 | 1022 | ||
1023 | int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, | ||
1024 | int port, int qpn, u16 prio, u64 *reg_id) | ||
1025 | { | ||
1026 | int err; | ||
1027 | struct mlx4_spec_list spec_eth_outer = { {NULL} }; | ||
1028 | struct mlx4_spec_list spec_vxlan = { {NULL} }; | ||
1029 | struct mlx4_spec_list spec_eth_inner = { {NULL} }; | ||
1030 | |||
1031 | struct mlx4_net_trans_rule rule = { | ||
1032 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | ||
1033 | .exclusive = 0, | ||
1034 | .allow_loopback = 1, | ||
1035 | .promisc_mode = MLX4_FS_REGULAR, | ||
1036 | }; | ||
1037 | |||
1038 | __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); | ||
1039 | |||
1040 | rule.port = port; | ||
1041 | rule.qpn = qpn; | ||
1042 | rule.priority = prio; | ||
1043 | INIT_LIST_HEAD(&rule.list); | ||
1044 | |||
1045 | spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; | ||
1046 | memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); | ||
1047 | memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); | ||
1048 | |||
1049 | spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ | ||
1050 | spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ | ||
1051 | |||
1052 | list_add_tail(&spec_eth_outer.list, &rule.list); | ||
1053 | list_add_tail(&spec_vxlan.list, &rule.list); | ||
1054 | list_add_tail(&spec_eth_inner.list, &rule.list); | ||
1055 | |||
1056 | err = mlx4_flow_attach(dev, &rule, reg_id); | ||
1057 | return err; | ||
1058 | } | ||
1059 | EXPORT_SYMBOL(mlx4_tunnel_steer_add); | ||
1060 | |||
1023 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, | 1061 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, |
1024 | u32 max_range_qpn) | 1062 | u32 max_range_qpn) |
1025 | { | 1063 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7d717eccb7b0..193a6adb5d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
299 | } | 299 | } |
300 | 300 | ||
301 | /* Must protect against concurrent access */ | ||
301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | 302 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, |
302 | struct mlx4_mpt_entry ***mpt_entry) | 303 | struct mlx4_mpt_entry ***mpt_entry) |
303 | { | 304 | { |
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | 306 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); |
306 | struct mlx4_cmd_mailbox *mailbox = NULL; | 307 | struct mlx4_cmd_mailbox *mailbox = NULL; |
307 | 308 | ||
308 | /* Make sure that at this point we have single-threaded access only */ | ||
309 | |||
310 | if (mmr->enabled != MLX4_MPT_EN_HW) | 309 | if (mmr->enabled != MLX4_MPT_EN_HW) |
311 | return -EINVAL; | 310 | return -EINVAL; |
312 | 311 | ||
313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | 312 | err = mlx4_HW2SW_MPT(dev, NULL, key); |
314 | |||
315 | if (err) { | 313 | if (err) { |
316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | 314 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); |
317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | 315 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); |
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
333 | 0, MLX4_CMD_QUERY_MPT, | 331 | 0, MLX4_CMD_QUERY_MPT, |
334 | MLX4_CMD_TIME_CLASS_B, | 332 | MLX4_CMD_TIME_CLASS_B, |
335 | MLX4_CMD_WRAPPED); | 333 | MLX4_CMD_WRAPPED); |
336 | |||
337 | if (err) | 334 | if (err) |
338 | goto free_mailbox; | 335 | goto free_mailbox; |
339 | 336 | ||
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | 375 | err = mlx4_SW2HW_MPT(dev, mailbox, key); |
379 | } | 376 | } |
380 | 377 | ||
381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | 378 | if (!err) { |
382 | if (!err) | 379 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; |
383 | mmr->enabled = MLX4_MPT_EN_HW; | 380 | mmr->enabled = MLX4_MPT_EN_HW; |
381 | } | ||
384 | return err; | 382 | return err; |
385 | } | 383 | } |
386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | 384 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); |
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | |||
400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | 398 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, |
401 | u32 pdn) | 399 | u32 pdn) |
402 | { | 400 | { |
403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | 401 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; |
404 | /* The wrapper function will put the slave's id here */ | 402 | /* The wrapper function will put the slave's id here */ |
405 | if (mlx4_is_mfunc(dev)) | 403 | if (mlx4_is_mfunc(dev)) |
406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | 404 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; |
407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | 405 | |
406 | mpt_entry->pd_flags = cpu_to_be32(pd_flags | | ||
408 | (pdn & MLX4_MPT_PD_MASK) | 407 | (pdn & MLX4_MPT_PD_MASK) |
409 | | MLX4_MPT_PD_FLAG_EN_INV); | 408 | | MLX4_MPT_PD_FLAG_EN_INV); |
410 | return 0; | 409 | return 0; |
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
600 | { | 599 | { |
601 | int err; | 600 | int err; |
602 | 601 | ||
603 | mpt_entry->start = cpu_to_be64(mr->iova); | 602 | mpt_entry->start = cpu_to_be64(iova); |
604 | mpt_entry->length = cpu_to_be64(mr->size); | 603 | mpt_entry->length = cpu_to_be64(size); |
605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | 604 | mpt_entry->entity_size = cpu_to_be32(page_shift); |
606 | 605 | ||
607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 606 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
608 | if (err) | 607 | if (err) |
609 | return err; | 608 | return err; |
610 | 609 | ||
610 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | ||
611 | MLX4_MPT_PD_FLAG_EN_INV); | ||
612 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | ||
613 | MLX4_MPT_FLAG_SW_OWNS); | ||
611 | if (mr->mtt.order < 0) { | 614 | if (mr->mtt.order < 0) { |
612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | 615 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); |
613 | mpt_entry->mtt_addr = 0; | 616 | mpt_entry->mtt_addr = 0; |
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
617 | if (mr->mtt.page_shift == 0) | 620 | if (mr->mtt.page_shift == 0) |
618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | 621 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
619 | } | 622 | } |
623 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | ||
624 | /* fast register MR in free state */ | ||
625 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
626 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | ||
627 | MLX4_MPT_PD_FLAG_RAE); | ||
628 | } else { | ||
629 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | ||
630 | } | ||
620 | mr->enabled = MLX4_MPT_EN_SW; | 631 | mr->enabled = MLX4_MPT_EN_SW; |
621 | 632 | ||
622 | return 0; | 633 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9ba0c1ca10d5..94eeb2c7d7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev, | |||
103 | int i; | 103 | int i; |
104 | 104 | ||
105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 105 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
106 | if ((mac & MLX4_MAC_MASK) == | 106 | if (table->refs[i] && |
107 | (MLX4_MAC_MASK & mac) == | ||
107 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | 108 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) |
108 | return i; | 109 | return i; |
109 | } | 110 | } |
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) | |||
165 | 166 | ||
166 | mutex_lock(&table->mutex); | 167 | mutex_lock(&table->mutex); |
167 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | 168 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { |
168 | if (free < 0 && !table->entries[i]) { | 169 | if (!table->refs[i]) { |
169 | free = i; | 170 | if (free < 0) |
171 | free = i; | ||
170 | continue; | 172 | continue; |
171 | } | 173 | } |
172 | 174 | ||
173 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | 175 | if ((MLX4_MAC_MASK & mac) == |
176 | (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
174 | /* MAC already registered, increment ref count */ | 177 | /* MAC already registered, increment ref count */ |
175 | err = i; | 178 | err = i; |
176 | ++table->refs[i]; | 179 | ++table->refs[i]; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -390,13 +390,14 @@ err_icm: | |||
390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
391 | 391 | ||
392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | 392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC |
393 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 393 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
394 | enum mlx4_update_qp_attr attr, | 394 | enum mlx4_update_qp_attr attr, |
395 | struct mlx4_update_qp_params *params) | 395 | struct mlx4_update_qp_params *params) |
396 | { | 396 | { |
397 | struct mlx4_cmd_mailbox *mailbox; | 397 | struct mlx4_cmd_mailbox *mailbox; |
398 | struct mlx4_update_qp_context *cmd; | 398 | struct mlx4_update_qp_context *cmd; |
399 | u64 pri_addr_path_mask = 0; | 399 | u64 pri_addr_path_mask = 0; |
400 | u64 qp_mask = 0; | ||
400 | int err = 0; | 401 | int err = 0; |
401 | 402 | ||
402 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 403 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
413 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; | 414 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; |
414 | } | 415 | } |
415 | 416 | ||
417 | if (attr & MLX4_UPDATE_QP_VSD) { | ||
418 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; | ||
419 | if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) | ||
420 | cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); | ||
421 | } | ||
422 | |||
416 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); | 423 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); |
424 | cmd->qp_mask = cpu_to_be64(qp_mask); | ||
417 | 425 | ||
418 | err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, | 426 | err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, |
419 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | 427 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, |
420 | MLX4_CMD_NATIVE); | 428 | MLX4_CMD_NATIVE); |
421 | 429 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed22..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
702 | struct mlx4_qp_context *qpc = inbox->buf + 8; | 702 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
703 | struct mlx4_vport_oper_state *vp_oper; | 703 | struct mlx4_vport_oper_state *vp_oper; |
704 | struct mlx4_priv *priv; | 704 | struct mlx4_priv *priv; |
705 | u32 qp_type; | ||
705 | int port; | 706 | int port; |
706 | 707 | ||
707 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 708 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
708 | priv = mlx4_priv(dev); | 709 | priv = mlx4_priv(dev); |
709 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 710 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
711 | qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; | ||
710 | 712 | ||
711 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 713 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
712 | /* the reserved QPs (special, proxy, tunnel) | 714 | /* the reserved QPs (special, proxy, tunnel) |
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
715 | if (mlx4_is_qp_reserved(dev, qpn)) | 717 | if (mlx4_is_qp_reserved(dev, qpn)) |
716 | return 0; | 718 | return 0; |
717 | 719 | ||
718 | /* force strip vlan by clear vsd */ | 720 | /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ |
719 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | 721 | if (qp_type == MLX4_QP_ST_UD || |
722 | (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { | ||
723 | if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { | ||
724 | *(__be32 *)inbox->buf = | ||
725 | cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | | ||
726 | MLX4_QP_OPTPAR_VLAN_STRIPPING); | ||
727 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | ||
728 | } else { | ||
729 | struct mlx4_update_qp_params params = {.flags = 0}; | ||
730 | |||
731 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | ||
732 | } | ||
733 | } | ||
720 | 734 | ||
721 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && | 735 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && |
722 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { | 736 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { |
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3998 | } | 4012 | } |
3999 | 4013 | ||
4000 | port = (rqp->sched_queue >> 6 & 1) + 1; | 4014 | port = (rqp->sched_queue >> 6 & 1) + 1; |
4001 | smac_index = cmd->qp_context.pri_path.grh_mylmc; | 4015 | |
4002 | err = mac_find_smac_ix_in_slave(dev, slave, port, | 4016 | if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { |
4003 | smac_index, &mac); | 4017 | smac_index = cmd->qp_context.pri_path.grh_mylmc; |
4004 | if (err) { | 4018 | err = mac_find_smac_ix_in_slave(dev, slave, port, |
4005 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | 4019 | smac_index, &mac); |
4006 | qpn, smac_index); | 4020 | |
4007 | goto err_mac; | 4021 | if (err) { |
4022 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | ||
4023 | qpn, smac_index); | ||
4024 | goto err_mac; | ||
4025 | } | ||
4008 | } | 4026 | } |
4009 | 4027 | ||
4010 | err = mlx4_cmd(dev, inbox->dma, | 4028 | err = mlx4_cmd(dev, inbox->dma, |
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
4818 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; | 4836 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
4819 | 4837 | ||
4820 | upd_context = mailbox->buf; | 4838 | upd_context = mailbox->buf; |
4821 | upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); | 4839 | upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); |
4822 | 4840 | ||
4823 | spin_lock_irq(mlx4_tlock(dev)); | 4841 | spin_lock_irq(mlx4_tlock(dev)); |
4824 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { | 4842 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5020fd47825d..2f12c88c66ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c | |||
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
206 | int rx_head = priv->rx_head; | 206 | int rx_head = priv->rx_head; |
207 | int rx = 0; | 207 | int rx = 0; |
208 | 208 | ||
209 | while (1) { | 209 | while (rx < budget) { |
210 | desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); | 210 | desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); |
211 | desc0 = readl(desc + RX_REG_OFFSET_DESC0); | 211 | desc0 = readl(desc + RX_REG_OFFSET_DESC0); |
212 | 212 | ||
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
218 | net_dbg_ratelimited("packet error\n"); | 218 | net_dbg_ratelimited("packet error\n"); |
219 | priv->stats.rx_dropped++; | 219 | priv->stats.rx_dropped++; |
220 | priv->stats.rx_errors++; | 220 | priv->stats.rx_errors++; |
221 | continue; | 221 | goto rx_next; |
222 | } | 222 | } |
223 | 223 | ||
224 | len = desc0 & RX_DESC0_FRAME_LEN_MASK; | 224 | len = desc0 & RX_DESC0_FRAME_LEN_MASK; |
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
226 | if (len > RX_BUF_SIZE) | 226 | if (len > RX_BUF_SIZE) |
227 | len = RX_BUF_SIZE; | 227 | len = RX_BUF_SIZE; |
228 | 228 | ||
229 | skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); | 229 | dma_sync_single_for_cpu(&ndev->dev, |
230 | priv->rx_mapping[rx_head], | ||
231 | priv->rx_buf_size, DMA_FROM_DEVICE); | ||
232 | skb = netdev_alloc_skb_ip_align(ndev, len); | ||
233 | |||
230 | if (unlikely(!skb)) { | 234 | if (unlikely(!skb)) { |
231 | net_dbg_ratelimited("build_skb failed\n"); | 235 | net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); |
232 | priv->stats.rx_dropped++; | 236 | priv->stats.rx_dropped++; |
233 | priv->stats.rx_errors++; | 237 | priv->stats.rx_errors++; |
238 | goto rx_next; | ||
234 | } | 239 | } |
235 | 240 | ||
241 | memcpy(skb->data, priv->rx_buf[rx_head], len); | ||
236 | skb_put(skb, len); | 242 | skb_put(skb, len); |
237 | skb->protocol = eth_type_trans(skb, ndev); | 243 | skb->protocol = eth_type_trans(skb, ndev); |
238 | napi_gro_receive(&priv->napi, skb); | 244 | napi_gro_receive(&priv->napi, skb); |
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
244 | if (desc0 & RX_DESC0_MULTICAST) | 250 | if (desc0 & RX_DESC0_MULTICAST) |
245 | priv->stats.multicast++; | 251 | priv->stats.multicast++; |
246 | 252 | ||
253 | rx_next: | ||
247 | writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); | 254 | writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); |
248 | 255 | ||
249 | rx_head = RX_NEXT(rx_head); | 256 | rx_head = RX_NEXT(rx_head); |
250 | priv->rx_head = rx_head; | 257 | priv->rx_head = rx_head; |
251 | |||
252 | if (rx >= budget) | ||
253 | break; | ||
254 | } | 258 | } |
255 | 259 | ||
256 | if (rx < budget) { | 260 | if (rx < budget) { |
257 | napi_gro_flush(napi, false); | 261 | napi_complete(napi); |
258 | __napi_complete(napi); | ||
259 | } | 262 | } |
260 | 263 | ||
261 | priv->reg_imr |= RPKT_FINISH_M; | 264 | priv->reg_imr |= RPKT_FINISH_M; |
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
346 | len = ETH_ZLEN; | 349 | len = ETH_ZLEN; |
347 | } | 350 | } |
348 | 351 | ||
349 | txdes1 = readl(desc + TX_REG_OFFSET_DESC1); | 352 | dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], |
350 | txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; | 353 | priv->tx_buf_size, DMA_TO_DEVICE); |
351 | txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); | 354 | |
352 | txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); | 355 | txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); |
356 | if (tx_head == TX_DESC_NUM_MASK) | ||
357 | txdes1 |= TX_DESC1_END; | ||
353 | writel(txdes1, desc + TX_REG_OFFSET_DESC1); | 358 | writel(txdes1, desc + TX_REG_OFFSET_DESC1); |
354 | writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); | 359 | writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); |
355 | 360 | ||
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev) | |||
465 | spin_lock_init(&priv->txlock); | 470 | spin_lock_init(&priv->txlock); |
466 | 471 | ||
467 | priv->tx_buf_size = TX_BUF_SIZE; | 472 | priv->tx_buf_size = TX_BUF_SIZE; |
468 | priv->rx_buf_size = RX_BUF_SIZE + | 473 | priv->rx_buf_size = RX_BUF_SIZE; |
469 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
470 | 474 | ||
471 | priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * | 475 | priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * |
472 | TX_DESC_NUM, &priv->tx_base, | 476 | TX_DESC_NUM, &priv->tx_base, |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8706c0dbd0c3..a44a03c45014 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev) | |||
1220 | 1220 | ||
1221 | __lpc_eth_clock_enable(pldat, true); | 1221 | __lpc_eth_clock_enable(pldat, true); |
1222 | 1222 | ||
1223 | /* Suspended PHY makes LPC ethernet core block, so resume now */ | ||
1224 | phy_resume(pldat->phy_dev); | ||
1225 | |||
1223 | /* Reset and initialize */ | 1226 | /* Reset and initialize */ |
1224 | __lpc_eth_reset(pldat); | 1227 | __lpc_eth_reset(pldat); |
1225 | __lpc_eth_init(pldat); | 1228 | __lpc_eth_init(pldat); |
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c | |||
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |||
290 | /* Read the hardware TX timestamp if one was recorded */ | 290 | /* Read the hardware TX timestamp if one was recorded */ |
291 | if (unlikely(re.s.tstamp)) { | 291 | if (unlikely(re.s.tstamp)) { |
292 | struct skb_shared_hwtstamps ts; | 292 | struct skb_shared_hwtstamps ts; |
293 | u64 ns; | ||
294 | |||
293 | memset(&ts, 0, sizeof(ts)); | 295 | memset(&ts, 0, sizeof(ts)); |
294 | /* Read the timestamp */ | 296 | /* Read the timestamp */ |
295 | u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); | 297 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
296 | /* Remove the timestamp from the FIFO */ | 298 | /* Remove the timestamp from the FIFO */ |
297 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | 299 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); |
298 | /* Tell the kernel about the timestamp */ | 300 | /* Tell the kernel about the timestamp */ |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
@@ -7,6 +7,7 @@ config PCH_GBE | |||
7 | depends on PCI && (X86_32 || COMPILE_TEST) | 7 | depends on PCI && (X86_32 || COMPILE_TEST) |
8 | select MII | 8 | select MII |
9 | select PTP_1588_CLOCK_PCH | 9 | select PTP_1588_CLOCK_PCH |
10 | select NET_PTP_CLASSIFY | ||
10 | ---help--- | 11 | ---help--- |
11 | This is a gigabit ethernet driver for EG20T PCH. | 12 | This is a gigabit ethernet driver for EG20T PCH. |
12 | EG20T PCH is the platform controller hub that is used in Intel's | 13 | EG20T PCH is the platform controller hub that is used in Intel's |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 32058614151a..5c4068353f66 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
135 | int i, j; | 135 | int i, j; |
136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; | 136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; |
137 | 137 | ||
138 | spin_lock(&adapter->tx_clean_lock); | ||
138 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
139 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
140 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
158 | } | 159 | } |
159 | cmd_buf++; | 160 | cmd_buf++; |
160 | } | 161 | } |
162 | spin_unlock(&adapter->tx_clean_lock); | ||
161 | } | 163 | } |
162 | 164 | ||
163 | void netxen_free_sw_resources(struct netxen_adapter *adapter) | 165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1792 | break; | 1794 | break; |
1793 | } | 1795 | } |
1794 | 1796 | ||
1795 | if (count && netif_running(netdev)) { | 1797 | tx_ring->sw_consumer = sw_consumer; |
1796 | tx_ring->sw_consumer = sw_consumer; | ||
1797 | 1798 | ||
1799 | if (count && netif_running(netdev)) { | ||
1798 | smp_mb(); | 1800 | smp_mb(); |
1799 | 1801 | ||
1800 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) | 1802 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1159031f885b..5ec5a2b0e989 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
1186 | return; | 1186 | return; |
1187 | 1187 | ||
1188 | smp_mb(); | 1188 | smp_mb(); |
1189 | spin_lock(&adapter->tx_clean_lock); | ||
1190 | netif_carrier_off(netdev); | 1189 | netif_carrier_off(netdev); |
1191 | netif_tx_disable(netdev); | 1190 | netif_tx_disable(netdev); |
1192 | 1191 | ||
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
1204 | netxen_napi_disable(adapter); | 1203 | netxen_napi_disable(adapter); |
1205 | 1204 | ||
1206 | netxen_release_tx_buffers(adapter); | 1205 | netxen_release_tx_buffers(adapter); |
1207 | spin_unlock(&adapter->tx_clean_lock); | ||
1208 | } | 1206 | } |
1209 | 1207 | ||
1210 | /* Usage: During suspend and firmware recovery module */ | 1208 | /* Usage: During suspend and firmware recovery module */ |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 86783e1afcf7..3172cdf591fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) | |||
1177 | { | 1177 | { |
1178 | u32 idc_params, val; | 1178 | u32 idc_params, val; |
1179 | 1179 | ||
1180 | if (qlcnic_83xx_lockless_flash_read32(adapter, | 1180 | if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, |
1181 | QLC_83XX_IDC_FLASH_PARAM_ADDR, | 1181 | (u8 *)&idc_params, 1)) { |
1182 | (u8 *)&idc_params, 1)) { | ||
1183 | dev_info(&adapter->pdev->dev, | 1182 | dev_info(&adapter->pdev->dev, |
1184 | "%s:failed to get IDC params from flash\n", __func__); | 1183 | "%s:failed to get IDC params from flash\n", __func__); |
1185 | adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; | 1184 | adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 141f116eb868..494e8105adee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, | |||
1333 | struct qlcnic_host_tx_ring *tx_ring; | 1333 | struct qlcnic_host_tx_ring *tx_ring; |
1334 | struct qlcnic_esw_statistics port_stats; | 1334 | struct qlcnic_esw_statistics port_stats; |
1335 | struct qlcnic_mac_statistics mac_stats; | 1335 | struct qlcnic_mac_statistics mac_stats; |
1336 | int index, ret, length, size, tx_size, ring; | 1336 | int index, ret, length, size, ring; |
1337 | char *p; | 1337 | char *p; |
1338 | 1338 | ||
1339 | tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; | 1339 | memset(data, 0, stats->n_stats * sizeof(u64)); |
1340 | 1340 | ||
1341 | memset(data, 0, tx_size * sizeof(u64)); | ||
1342 | for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { | 1341 | for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { |
1343 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 1342 | if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { |
1344 | tx_ring = &adapter->tx_ring[ring]; | 1343 | tx_ring = &adapter->tx_ring[ring]; |
1345 | data = qlcnic_fill_tx_queue_stats(data, tx_ring); | 1344 | data = qlcnic_fill_tx_queue_stats(data, tx_ring); |
1346 | qlcnic_update_stats(adapter); | 1345 | qlcnic_update_stats(adapter); |
1346 | } else { | ||
1347 | data += QLCNIC_TX_STATS_LEN; | ||
1347 | } | 1348 | } |
1348 | } | 1349 | } |
1349 | 1350 | ||
1350 | memset(data, 0, stats->n_stats * sizeof(u64)); | ||
1351 | length = QLCNIC_STATS_LEN; | 1351 | length = QLCNIC_STATS_LEN; |
1352 | for (index = 0; index < length; index++) { | 1352 | for (index = 0; index < length; index++) { |
1353 | p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; | 1353 | p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 188626e2a861..3e96f269150d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2556 | 2556 | ||
2557 | if (skb_is_gso(skb)) { | 2557 | if (skb_is_gso(skb)) { |
2558 | int err; | 2558 | int err; |
2559 | __be16 l3_proto = vlan_get_protocol(skb); | ||
2559 | 2560 | ||
2560 | err = skb_cow_head(skb, 0); | 2561 | err = skb_cow_head(skb, 0); |
2561 | if (err < 0) | 2562 | if (err < 0) |
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2572 | << OB_MAC_TRANSPORT_HDR_SHIFT); | 2573 | << OB_MAC_TRANSPORT_HDR_SHIFT); |
2573 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 2574 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
2574 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; | 2575 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; |
2575 | if (likely(skb->protocol == htons(ETH_P_IP))) { | 2576 | if (likely(l3_proto == htons(ETH_P_IP))) { |
2576 | struct iphdr *iph = ip_hdr(skb); | 2577 | struct iphdr *iph = ip_hdr(skb); |
2577 | iph->check = 0; | 2578 | iph->check = 0; |
2578 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | 2579 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; |
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2580 | iph->daddr, 0, | 2581 | iph->daddr, 0, |
2581 | IPPROTO_TCP, | 2582 | IPPROTO_TCP, |
2582 | 0); | 2583 | 0); |
2583 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2584 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
2584 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; | 2585 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; |
2585 | tcp_hdr(skb)->check = | 2586 | tcp_hdr(skb)->check = |
2586 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 2587 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91652e7235e4..0921302553c6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev, | |||
1783 | netdev_features_t features) | 1783 | netdev_features_t features) |
1784 | { | 1784 | { |
1785 | struct rtl8169_private *tp = netdev_priv(dev); | 1785 | struct rtl8169_private *tp = netdev_priv(dev); |
1786 | netdev_features_t changed = features ^ dev->features; | ||
1787 | void __iomem *ioaddr = tp->mmio_addr; | 1786 | void __iomem *ioaddr = tp->mmio_addr; |
1787 | u32 rx_config; | ||
1788 | 1788 | ||
1789 | if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | | 1789 | rx_config = RTL_R32(RxConfig); |
1790 | NETIF_F_HW_VLAN_CTAG_RX))) | 1790 | if (features & NETIF_F_RXALL) |
1791 | return; | 1791 | rx_config |= (AcceptErr | AcceptRunt); |
1792 | else | ||
1793 | rx_config &= ~(AcceptErr | AcceptRunt); | ||
1792 | 1794 | ||
1793 | if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { | 1795 | RTL_W32(RxConfig, rx_config); |
1794 | if (features & NETIF_F_RXCSUM) | ||
1795 | tp->cp_cmd |= RxChkSum; | ||
1796 | else | ||
1797 | tp->cp_cmd &= ~RxChkSum; | ||
1798 | 1796 | ||
1799 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | 1797 | if (features & NETIF_F_RXCSUM) |
1800 | tp->cp_cmd |= RxVlan; | 1798 | tp->cp_cmd |= RxChkSum; |
1801 | else | 1799 | else |
1802 | tp->cp_cmd &= ~RxVlan; | 1800 | tp->cp_cmd &= ~RxChkSum; |
1803 | 1801 | ||
1804 | RTL_W16(CPlusCmd, tp->cp_cmd); | 1802 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
1805 | RTL_R16(CPlusCmd); | 1803 | tp->cp_cmd |= RxVlan; |
1806 | } | 1804 | else |
1807 | if (changed & NETIF_F_RXALL) { | 1805 | tp->cp_cmd &= ~RxVlan; |
1808 | int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); | 1806 | |
1809 | if (features & NETIF_F_RXALL) | 1807 | tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); |
1810 | tmp |= (AcceptErr | AcceptRunt); | 1808 | |
1811 | RTL_W32(RxConfig, tmp); | 1809 | RTL_W16(CPlusCmd, tp->cp_cmd); |
1812 | } | 1810 | RTL_R16(CPlusCmd); |
1813 | } | 1811 | } |
1814 | 1812 | ||
1815 | static int rtl8169_set_features(struct net_device *dev, | 1813 | static int rtl8169_set_features(struct net_device *dev, |
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev, | |||
1817 | { | 1815 | { |
1818 | struct rtl8169_private *tp = netdev_priv(dev); | 1816 | struct rtl8169_private *tp = netdev_priv(dev); |
1819 | 1817 | ||
1818 | features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; | ||
1819 | |||
1820 | rtl_lock_work(tp); | 1820 | rtl_lock_work(tp); |
1821 | __rtl8169_set_features(dev, features); | 1821 | if (features ^ dev->features) |
1822 | __rtl8169_set_features(dev, features); | ||
1822 | rtl_unlock_work(tp); | 1823 | rtl_unlock_work(tp); |
1823 | 1824 | ||
1824 | return 0; | 1825 | return 0; |
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) | |||
7118 | } | 7119 | } |
7119 | } | 7120 | } |
7120 | 7121 | ||
7121 | static int | 7122 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
7122 | rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
7123 | { | 7123 | { |
7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7124 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
7125 | const unsigned int region = cfg->region; | 7125 | const unsigned int region = cfg->region; |
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7194 | goto err_out_mwi_2; | 7194 | goto err_out_mwi_2; |
7195 | } | 7195 | } |
7196 | 7196 | ||
7197 | tp->cp_cmd = RxChkSum; | 7197 | tp->cp_cmd = 0; |
7198 | 7198 | ||
7199 | if ((sizeof(dma_addr_t) > 4) && | 7199 | if ((sizeof(dma_addr_t) > 4) && |
7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 7200 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7235 | 7235 | ||
7236 | pci_set_master(pdev); | 7236 | pci_set_master(pdev); |
7237 | 7237 | ||
7238 | /* | ||
7239 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7240 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7241 | */ | ||
7242 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
7243 | tp->cp_cmd |= RxVlan; | ||
7244 | |||
7245 | rtl_init_mdio_ops(tp); | 7238 | rtl_init_mdio_ops(tp); |
7246 | rtl_init_pll_power_ops(tp); | 7239 | rtl_init_pll_power_ops(tp); |
7247 | rtl_init_jumbo_ops(tp); | 7240 | rtl_init_jumbo_ops(tp); |
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7302 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | 7295 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
7303 | NETIF_F_HIGHDMA; | 7296 | NETIF_F_HIGHDMA; |
7304 | 7297 | ||
7298 | tp->cp_cmd |= RxChkSum | RxVlan; | ||
7299 | |||
7300 | /* | ||
7301 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7302 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7303 | */ | ||
7305 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | 7304 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) |
7306 | /* 8110SCd requires hardware Rx VLAN - disallow toggling */ | 7305 | /* Disallow toggling */ |
7307 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; | 7306 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
7308 | 7307 | ||
7309 | if (tp->txd_version == RTL_TD_0) | 7308 | if (tp->txd_version == RTL_TD_0) |
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 9e757c792d84..196e98a2d93b 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config SH_ETH | 5 | config SH_ETH |
6 | tristate "Renesas SuperH Ethernet support" | 6 | tristate "Renesas SuperH Ethernet support" |
7 | depends on HAS_DMA | 7 | depends on HAS_DMA |
8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST | ||
8 | select CRC32 | 9 | select CRC32 |
9 | select MII | 10 | select MII |
10 | select MDIO_BITBANG | 11 | select MDIO_BITBANG |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) | |||
2933 | u32 crc; | 2933 | u32 crc; |
2934 | int bit; | 2934 | int bit; |
2935 | 2935 | ||
2936 | if (!efx_dev_registered(efx)) | ||
2937 | return; | ||
2938 | |||
2936 | netif_addr_lock_bh(net_dev); | 2939 | netif_addr_lock_bh(net_dev); |
2937 | 2940 | ||
2938 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); | 2941 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index c553f6b5a913..cf28daba4346 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
47 | 47 | ||
48 | desc->des2 = dma_map_single(priv->device, skb->data, | 48 | desc->des2 = dma_map_single(priv->device, skb->data, |
49 | bmax, DMA_TO_DEVICE); | 49 | bmax, DMA_TO_DEVICE); |
50 | priv->tx_skbuff_dma[entry] = desc->des2; | 50 | if (dma_mapping_error(priv->device, desc->des2)) |
51 | return -1; | ||
52 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
51 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); | 53 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); |
52 | 54 | ||
53 | while (len != 0) { | 55 | while (len != 0) { |
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
59 | desc->des2 = dma_map_single(priv->device, | 61 | desc->des2 = dma_map_single(priv->device, |
60 | (skb->data + bmax * i), | 62 | (skb->data + bmax * i), |
61 | bmax, DMA_TO_DEVICE); | 63 | bmax, DMA_TO_DEVICE); |
62 | priv->tx_skbuff_dma[entry] = desc->des2; | 64 | if (dma_mapping_error(priv->device, desc->des2)) |
65 | return -1; | ||
66 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
63 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, | 67 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, |
64 | STMMAC_CHAIN_MODE); | 68 | STMMAC_CHAIN_MODE); |
65 | priv->hw->desc->set_tx_owner(desc); | 69 | priv->hw->desc->set_tx_owner(desc); |
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
69 | desc->des2 = dma_map_single(priv->device, | 73 | desc->des2 = dma_map_single(priv->device, |
70 | (skb->data + bmax * i), len, | 74 | (skb->data + bmax * i), len, |
71 | DMA_TO_DEVICE); | 75 | DMA_TO_DEVICE); |
72 | priv->tx_skbuff_dma[entry] = desc->des2; | 76 | if (dma_mapping_error(priv->device, desc->des2)) |
77 | return -1; | ||
78 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 79 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_CHAIN_MODE); | 80 | STMMAC_CHAIN_MODE); |
75 | priv->hw->desc->set_tx_owner(desc); | 81 | priv->hw->desc->set_tx_owner(desc); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index de507c32036c..593e6c4144a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -220,10 +220,10 @@ enum dma_irq_status { | |||
220 | handle_tx = 0x8, | 220 | handle_tx = 0x8, |
221 | }; | 221 | }; |
222 | 222 | ||
223 | #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) | 223 | #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) |
224 | #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) | 224 | #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) |
225 | #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) | 225 | #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) |
226 | #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) | 226 | #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) |
227 | 227 | ||
228 | #define CORE_PCS_ANE_COMPLETE (1 << 5) | 228 | #define CORE_PCS_ANE_COMPLETE (1 << 5) |
229 | #define CORE_PCS_LINK_STATUS (1 << 6) | 229 | #define CORE_PCS_LINK_STATUS (1 << 6) |
@@ -287,7 +287,7 @@ struct dma_features { | |||
287 | 287 | ||
288 | /* Default LPI timers */ | 288 | /* Default LPI timers */ |
289 | #define STMMAC_DEFAULT_LIT_LS 0x3E8 | 289 | #define STMMAC_DEFAULT_LIT_LS 0x3E8 |
290 | #define STMMAC_DEFAULT_TWT_LS 0x0 | 290 | #define STMMAC_DEFAULT_TWT_LS 0x1E |
291 | 291 | ||
292 | #define STMMAC_CHAIN_MODE 0x1 | 292 | #define STMMAC_CHAIN_MODE 0x1 |
293 | #define STMMAC_RING_MODE 0x2 | 293 | #define STMMAC_RING_MODE 0x2 |
@@ -425,7 +425,7 @@ struct stmmac_mode_ops { | |||
425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
426 | unsigned int extend_desc); | 426 | unsigned int extend_desc); |
427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
428 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 428 | int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); |
429 | int (*set_16kib_bfsize)(int mtu); | 429 | int (*set_16kib_bfsize)(int mtu); |
430 | void (*init_desc3)(struct dma_desc *p); | 430 | void (*init_desc3)(struct dma_desc *p); |
431 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 431 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
@@ -445,6 +445,7 @@ struct mac_device_info { | |||
445 | int multicast_filter_bins; | 445 | int multicast_filter_bins; |
446 | int unicast_filter_entries; | 446 | int unicast_filter_entries; |
447 | int mcast_bits_log2; | 447 | int mcast_bits_log2; |
448 | unsigned int rx_csum; | ||
448 | }; | 449 | }; |
449 | 450 | ||
450 | struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, | 451 | struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 71b5419256c1..64d8f56a9c17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | |||
@@ -153,7 +153,7 @@ enum inter_frame_gap { | |||
153 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | 153 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ |
154 | 154 | ||
155 | #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ | 155 | #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ |
156 | GMAC_CONTROL_BE) | 156 | GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) |
157 | 157 | ||
158 | /* GMAC Frame Filter defines */ | 158 | /* GMAC Frame Filter defines */ |
159 | #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ | 159 | #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d8ef18786a1c..5efe60ea6526 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) | |||
58 | void __iomem *ioaddr = hw->pcsr; | 58 | void __iomem *ioaddr = hw->pcsr; |
59 | u32 value = readl(ioaddr + GMAC_CONTROL); | 59 | u32 value = readl(ioaddr + GMAC_CONTROL); |
60 | 60 | ||
61 | value |= GMAC_CONTROL_IPC; | 61 | if (hw->rx_csum) |
62 | value |= GMAC_CONTROL_IPC; | ||
63 | else | ||
64 | value &= ~GMAC_CONTROL_IPC; | ||
65 | |||
62 | writel(value, ioaddr + GMAC_CONTROL); | 66 | writel(value, ioaddr + GMAC_CONTROL); |
63 | 67 | ||
64 | value = readl(ioaddr + GMAC_CONTROL); | 68 | value = readl(ioaddr + GMAC_CONTROL); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 8607488cbcfc..192c2491330b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h | |||
@@ -68,7 +68,7 @@ struct stmmac_counters { | |||
68 | unsigned int mmc_rx_octetcount_g; | 68 | unsigned int mmc_rx_octetcount_g; |
69 | unsigned int mmc_rx_broadcastframe_g; | 69 | unsigned int mmc_rx_broadcastframe_g; |
70 | unsigned int mmc_rx_multicastframe_g; | 70 | unsigned int mmc_rx_multicastframe_g; |
71 | unsigned int mmc_rx_crc_errror; | 71 | unsigned int mmc_rx_crc_error; |
72 | unsigned int mmc_rx_align_error; | 72 | unsigned int mmc_rx_align_error; |
73 | unsigned int mmc_rx_run_error; | 73 | unsigned int mmc_rx_run_error; |
74 | unsigned int mmc_rx_jabber_error; | 74 | unsigned int mmc_rx_jabber_error; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 50617c5a0bdb..08c483bd2ec7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) | |||
196 | mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); | 196 | mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); |
197 | mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); | 197 | mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); |
198 | mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); | 198 | mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); |
199 | mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); | 199 | mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); |
200 | mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); | 200 | mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); |
201 | mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); | 201 | mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); |
202 | mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); | 202 | mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 650a4be6bce5..5dd50c6cda5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
53 | 53 | ||
54 | desc->des2 = dma_map_single(priv->device, skb->data, | 54 | desc->des2 = dma_map_single(priv->device, skb->data, |
55 | bmax, DMA_TO_DEVICE); | 55 | bmax, DMA_TO_DEVICE); |
56 | priv->tx_skbuff_dma[entry] = desc->des2; | 56 | if (dma_mapping_error(priv->device, desc->des2)) |
57 | return -1; | ||
58 | |||
59 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
57 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 60 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
58 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, | 61 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, |
59 | STMMAC_RING_MODE); | 62 | STMMAC_RING_MODE); |
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
68 | 71 | ||
69 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 72 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
70 | len, DMA_TO_DEVICE); | 73 | len, DMA_TO_DEVICE); |
71 | priv->tx_skbuff_dma[entry] = desc->des2; | 74 | if (dma_mapping_error(priv->device, desc->des2)) |
75 | return -1; | ||
76 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
72 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 77 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 78 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_RING_MODE); | 79 | STMMAC_RING_MODE); |
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
77 | } else { | 82 | } else { |
78 | desc->des2 = dma_map_single(priv->device, skb->data, | 83 | desc->des2 = dma_map_single(priv->device, skb->data, |
79 | nopaged_len, DMA_TO_DEVICE); | 84 | nopaged_len, DMA_TO_DEVICE); |
80 | priv->tx_skbuff_dma[entry] = desc->des2; | 85 | if (dma_mapping_error(priv->device, desc->des2)) |
86 | return -1; | ||
87 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
81 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 88 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
82 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, | 89 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, |
83 | STMMAC_RING_MODE); | 90 | STMMAC_RING_MODE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ca01035634a7..58097c0e2ad5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -34,6 +34,11 @@ | |||
34 | #include <linux/ptp_clock_kernel.h> | 34 | #include <linux/ptp_clock_kernel.h> |
35 | #include <linux/reset.h> | 35 | #include <linux/reset.h> |
36 | 36 | ||
37 | struct stmmac_tx_info { | ||
38 | dma_addr_t buf; | ||
39 | bool map_as_page; | ||
40 | }; | ||
41 | |||
37 | struct stmmac_priv { | 42 | struct stmmac_priv { |
38 | /* Frequently used values are kept adjacent for cache effect */ | 43 | /* Frequently used values are kept adjacent for cache effect */ |
39 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 44 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
@@ -45,7 +50,7 @@ struct stmmac_priv { | |||
45 | u32 tx_count_frames; | 50 | u32 tx_count_frames; |
46 | u32 tx_coal_frames; | 51 | u32 tx_coal_frames; |
47 | u32 tx_coal_timer; | 52 | u32 tx_coal_timer; |
48 | dma_addr_t *tx_skbuff_dma; | 53 | struct stmmac_tx_info *tx_skbuff_dma; |
49 | dma_addr_t dma_tx_phy; | 54 | dma_addr_t dma_tx_phy; |
50 | int tx_coalesce; | 55 | int tx_coalesce; |
51 | int hwts_tx_en; | 56 | int hwts_tx_en; |
@@ -105,6 +110,8 @@ struct stmmac_priv { | |||
105 | struct ptp_clock *ptp_clock; | 110 | struct ptp_clock *ptp_clock; |
106 | struct ptp_clock_info ptp_clock_ops; | 111 | struct ptp_clock_info ptp_clock_ops; |
107 | unsigned int default_addend; | 112 | unsigned int default_addend; |
113 | struct clk *clk_ptp_ref; | ||
114 | unsigned int clk_ptp_rate; | ||
108 | u32 adv_ts; | 115 | u32 adv_ts; |
109 | int use_riwt; | 116 | int use_riwt; |
110 | int irq_wake; | 117 | int irq_wake; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9af50bae4dde..cf4f38db1c0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = { | |||
175 | STMMAC_MMC_STAT(mmc_rx_octetcount_g), | 175 | STMMAC_MMC_STAT(mmc_rx_octetcount_g), |
176 | STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), | 176 | STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), |
177 | STMMAC_MMC_STAT(mmc_rx_multicastframe_g), | 177 | STMMAC_MMC_STAT(mmc_rx_multicastframe_g), |
178 | STMMAC_MMC_STAT(mmc_rx_crc_errror), | 178 | STMMAC_MMC_STAT(mmc_rx_crc_error), |
179 | STMMAC_MMC_STAT(mmc_rx_align_error), | 179 | STMMAC_MMC_STAT(mmc_rx_align_error), |
180 | STMMAC_MMC_STAT(mmc_rx_run_error), | 180 | STMMAC_MMC_STAT(mmc_rx_run_error), |
181 | STMMAC_MMC_STAT(mmc_rx_jabber_error), | 181 | STMMAC_MMC_STAT(mmc_rx_jabber_error), |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 08addd653728..b0c1521e08a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) | |||
275 | */ | 275 | */ |
276 | bool stmmac_eee_init(struct stmmac_priv *priv) | 276 | bool stmmac_eee_init(struct stmmac_priv *priv) |
277 | { | 277 | { |
278 | char *phy_bus_name = priv->plat->phy_bus_name; | ||
278 | bool ret = false; | 279 | bool ret = false; |
279 | 280 | ||
280 | /* Using PCS we cannot dial with the phy registers at this stage | 281 | /* Using PCS we cannot dial with the phy registers at this stage |
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
284 | (priv->pcs == STMMAC_PCS_RTBI)) | 285 | (priv->pcs == STMMAC_PCS_RTBI)) |
285 | goto out; | 286 | goto out; |
286 | 287 | ||
288 | /* Never init EEE in case of a switch is attached */ | ||
289 | if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) | ||
290 | goto out; | ||
291 | |||
287 | /* MAC core supports the EEE feature. */ | 292 | /* MAC core supports the EEE feature. */ |
288 | if (priv->dma_cap.eee) { | 293 | if (priv->dma_cap.eee) { |
289 | int tx_lpi_timer = priv->tx_lpi_timer; | 294 | int tx_lpi_timer = priv->tx_lpi_timer; |
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
316 | priv->hw->mac->set_eee_timer(priv->hw, | 321 | priv->hw->mac->set_eee_timer(priv->hw, |
317 | STMMAC_DEFAULT_LIT_LS, | 322 | STMMAC_DEFAULT_LIT_LS, |
318 | tx_lpi_timer); | 323 | tx_lpi_timer); |
319 | } else | 324 | } |
320 | /* Set HW EEE according to the speed */ | 325 | /* Set HW EEE according to the speed */ |
321 | priv->hw->mac->set_eee_pls(priv->hw, | 326 | priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); |
322 | priv->phydev->link); | ||
323 | 327 | ||
324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); | 328 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
325 | 329 | ||
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
603 | /* calculate default added value: | 607 | /* calculate default added value: |
604 | * formula is : | 608 | * formula is : |
605 | * addend = (2^32)/freq_div_ratio; | 609 | * addend = (2^32)/freq_div_ratio; |
606 | * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz | 610 | * where, freq_div_ratio = clk_ptp_ref_i/50MHz |
607 | * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; | 611 | * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; |
608 | * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to | 612 | * NOTE: clk_ptp_ref_i should be >= 50MHz to |
609 | * achive 20ns accuracy. | 613 | * achive 20ns accuracy. |
610 | * | 614 | * |
611 | * 2^x * y == (y << x), hence | 615 | * 2^x * y == (y << x), hence |
612 | * 2^32 * 50000000 ==> (50000000 << 32) | 616 | * 2^32 * 50000000 ==> (50000000 << 32) |
613 | */ | 617 | */ |
614 | temp = (u64) (50000000ULL << 32); | 618 | temp = (u64) (50000000ULL << 32); |
615 | priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); | 619 | priv->default_addend = div_u64(temp, priv->clk_ptp_rate); |
616 | priv->hw->ptp->config_addend(priv->ioaddr, | 620 | priv->hw->ptp->config_addend(priv->ioaddr, |
617 | priv->default_addend); | 621 | priv->default_addend); |
618 | 622 | ||
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
638 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) | 642 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
639 | return -EOPNOTSUPP; | 643 | return -EOPNOTSUPP; |
640 | 644 | ||
645 | /* Fall-back to main clock in case of no PTP ref is passed */ | ||
646 | priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); | ||
647 | if (IS_ERR(priv->clk_ptp_ref)) { | ||
648 | priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); | ||
649 | priv->clk_ptp_ref = NULL; | ||
650 | } else { | ||
651 | clk_prepare_enable(priv->clk_ptp_ref); | ||
652 | priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); | ||
653 | } | ||
654 | |||
641 | priv->adv_ts = 0; | 655 | priv->adv_ts = 0; |
642 | if (priv->dma_cap.atime_stamp && priv->extend_desc) | 656 | if (priv->dma_cap.atime_stamp && priv->extend_desc) |
643 | priv->adv_ts = 1; | 657 | priv->adv_ts = 1; |
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
657 | 671 | ||
658 | static void stmmac_release_ptp(struct stmmac_priv *priv) | 672 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
659 | { | 673 | { |
674 | if (priv->clk_ptp_ref) | ||
675 | clk_disable_unprepare(priv->clk_ptp_ref); | ||
660 | stmmac_ptp_unregister(priv); | 676 | stmmac_ptp_unregister(priv); |
661 | } | 677 | } |
662 | 678 | ||
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
1061 | else | 1077 | else |
1062 | p = priv->dma_tx + i; | 1078 | p = priv->dma_tx + i; |
1063 | p->des2 = 0; | 1079 | p->des2 = 0; |
1064 | priv->tx_skbuff_dma[i] = 0; | 1080 | priv->tx_skbuff_dma[i].buf = 0; |
1081 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1065 | priv->tx_skbuff[i] = NULL; | 1082 | priv->tx_skbuff[i] = NULL; |
1066 | } | 1083 | } |
1067 | 1084 | ||
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) | |||
1100 | else | 1117 | else |
1101 | p = priv->dma_tx + i; | 1118 | p = priv->dma_tx + i; |
1102 | 1119 | ||
1103 | if (priv->tx_skbuff_dma[i]) { | 1120 | if (priv->tx_skbuff_dma[i].buf) { |
1104 | dma_unmap_single(priv->device, | 1121 | if (priv->tx_skbuff_dma[i].map_as_page) |
1105 | priv->tx_skbuff_dma[i], | 1122 | dma_unmap_page(priv->device, |
1106 | priv->hw->desc->get_tx_len(p), | 1123 | priv->tx_skbuff_dma[i].buf, |
1107 | DMA_TO_DEVICE); | 1124 | priv->hw->desc->get_tx_len(p), |
1108 | priv->tx_skbuff_dma[i] = 0; | 1125 | DMA_TO_DEVICE); |
1126 | else | ||
1127 | dma_unmap_single(priv->device, | ||
1128 | priv->tx_skbuff_dma[i].buf, | ||
1129 | priv->hw->desc->get_tx_len(p), | ||
1130 | DMA_TO_DEVICE); | ||
1109 | } | 1131 | } |
1110 | 1132 | ||
1111 | if (priv->tx_skbuff[i] != NULL) { | 1133 | if (priv->tx_skbuff[i] != NULL) { |
1112 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 1134 | dev_kfree_skb_any(priv->tx_skbuff[i]); |
1113 | priv->tx_skbuff[i] = NULL; | 1135 | priv->tx_skbuff[i] = NULL; |
1136 | priv->tx_skbuff_dma[i].buf = 0; | ||
1137 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1114 | } | 1138 | } |
1115 | } | 1139 | } |
1116 | } | 1140 | } |
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) | |||
1131 | if (!priv->rx_skbuff) | 1155 | if (!priv->rx_skbuff) |
1132 | goto err_rx_skbuff; | 1156 | goto err_rx_skbuff; |
1133 | 1157 | ||
1134 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1158 | priv->tx_skbuff_dma = kmalloc_array(txsize, |
1159 | sizeof(*priv->tx_skbuff_dma), | ||
1135 | GFP_KERNEL); | 1160 | GFP_KERNEL); |
1136 | if (!priv->tx_skbuff_dma) | 1161 | if (!priv->tx_skbuff_dma) |
1137 | goto err_tx_skbuff_dma; | 1162 | goto err_tx_skbuff_dma; |
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
1293 | pr_debug("%s: curr %d, dirty %d\n", __func__, | 1318 | pr_debug("%s: curr %d, dirty %d\n", __func__, |
1294 | priv->cur_tx, priv->dirty_tx); | 1319 | priv->cur_tx, priv->dirty_tx); |
1295 | 1320 | ||
1296 | if (likely(priv->tx_skbuff_dma[entry])) { | 1321 | if (likely(priv->tx_skbuff_dma[entry].buf)) { |
1297 | dma_unmap_single(priv->device, | 1322 | if (priv->tx_skbuff_dma[entry].map_as_page) |
1298 | priv->tx_skbuff_dma[entry], | 1323 | dma_unmap_page(priv->device, |
1299 | priv->hw->desc->get_tx_len(p), | 1324 | priv->tx_skbuff_dma[entry].buf, |
1300 | DMA_TO_DEVICE); | 1325 | priv->hw->desc->get_tx_len(p), |
1301 | priv->tx_skbuff_dma[entry] = 0; | 1326 | DMA_TO_DEVICE); |
1327 | else | ||
1328 | dma_unmap_single(priv->device, | ||
1329 | priv->tx_skbuff_dma[entry].buf, | ||
1330 | priv->hw->desc->get_tx_len(p), | ||
1331 | DMA_TO_DEVICE); | ||
1332 | priv->tx_skbuff_dma[entry].buf = 0; | ||
1333 | priv->tx_skbuff_dma[entry].map_as_page = false; | ||
1302 | } | 1334 | } |
1303 | priv->hw->mode->clean_desc3(priv, p); | 1335 | priv->hw->mode->clean_desc3(priv, p); |
1304 | 1336 | ||
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev) | |||
1637 | /* Initialize the MAC Core */ | 1669 | /* Initialize the MAC Core */ |
1638 | priv->hw->mac->core_init(priv->hw, dev->mtu); | 1670 | priv->hw->mac->core_init(priv->hw, dev->mtu); |
1639 | 1671 | ||
1672 | ret = priv->hw->mac->rx_ipc(priv->hw); | ||
1673 | if (!ret) { | ||
1674 | pr_warn(" RX IPC Checksum Offload disabled\n"); | ||
1675 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; | ||
1676 | priv->hw->rx_csum = 0; | ||
1677 | } | ||
1678 | |||
1640 | /* Enable the MAC Rx/Tx */ | 1679 | /* Enable the MAC Rx/Tx */ |
1641 | stmmac_set_mac(priv->ioaddr, true); | 1680 | stmmac_set_mac(priv->ioaddr, true); |
1642 | 1681 | ||
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1887 | if (likely(!is_jumbo)) { | 1926 | if (likely(!is_jumbo)) { |
1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1927 | desc->des2 = dma_map_single(priv->device, skb->data, |
1889 | nopaged_len, DMA_TO_DEVICE); | 1928 | nopaged_len, DMA_TO_DEVICE); |
1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1929 | if (dma_mapping_error(priv->device, desc->des2)) |
1930 | goto dma_map_err; | ||
1931 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1932 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
1892 | csum_insertion, priv->mode); | 1933 | csum_insertion, priv->mode); |
1893 | } else { | 1934 | } else { |
1894 | desc = first; | 1935 | desc = first; |
1895 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | 1936 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); |
1937 | if (unlikely(entry < 0)) | ||
1938 | goto dma_map_err; | ||
1896 | } | 1939 | } |
1897 | 1940 | ||
1898 | for (i = 0; i < nfrags; i++) { | 1941 | for (i = 0; i < nfrags; i++) { |
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1908 | 1951 | ||
1909 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, | 1952 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, |
1910 | DMA_TO_DEVICE); | 1953 | DMA_TO_DEVICE); |
1911 | priv->tx_skbuff_dma[entry] = desc->des2; | 1954 | if (dma_mapping_error(priv->device, desc->des2)) |
1955 | goto dma_map_err; /* should reuse desc w/o issues */ | ||
1956 | |||
1957 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1958 | priv->tx_skbuff_dma[entry].map_as_page = true; | ||
1912 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, | 1959 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
1913 | priv->mode); | 1960 | priv->mode); |
1914 | wmb(); | 1961 | wmb(); |
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1975 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); | 2022 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); |
1976 | 2023 | ||
1977 | spin_unlock(&priv->tx_lock); | 2024 | spin_unlock(&priv->tx_lock); |
2025 | return NETDEV_TX_OK; | ||
1978 | 2026 | ||
2027 | dma_map_err: | ||
2028 | dev_err(priv->device, "Tx dma map failed\n"); | ||
2029 | dev_kfree_skb(skb); | ||
2030 | priv->dev->stats.tx_dropped++; | ||
1979 | return NETDEV_TX_OK; | 2031 | return NETDEV_TX_OK; |
1980 | } | 2032 | } |
1981 | 2033 | ||
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
2028 | priv->rx_skbuff_dma[entry] = | 2080 | priv->rx_skbuff_dma[entry] = |
2029 | dma_map_single(priv->device, skb->data, bfsize, | 2081 | dma_map_single(priv->device, skb->data, bfsize, |
2030 | DMA_FROM_DEVICE); | 2082 | DMA_FROM_DEVICE); |
2031 | 2083 | if (dma_mapping_error(priv->device, | |
2084 | priv->rx_skbuff_dma[entry])) { | ||
2085 | dev_err(priv->device, "Rx dma map failed\n"); | ||
2086 | dev_kfree_skb(skb); | ||
2087 | break; | ||
2088 | } | ||
2032 | p->des2 = priv->rx_skbuff_dma[entry]; | 2089 | p->des2 = priv->rx_skbuff_dma[entry]; |
2033 | 2090 | ||
2034 | priv->hw->mode->refill_desc3(priv, p); | 2091 | priv->hw->mode->refill_desc3(priv, p); |
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2055 | unsigned int entry = priv->cur_rx % rxsize; | 2112 | unsigned int entry = priv->cur_rx % rxsize; |
2056 | unsigned int next_entry; | 2113 | unsigned int next_entry; |
2057 | unsigned int count = 0; | 2114 | unsigned int count = 0; |
2058 | int coe = priv->plat->rx_coe; | 2115 | int coe = priv->hw->rx_csum; |
2059 | 2116 | ||
2060 | if (netif_msg_rx_status(priv)) { | 2117 | if (netif_msg_rx_status(priv)) { |
2061 | pr_debug("%s: descriptor ring:\n", __func__); | 2118 | pr_debug("%s: descriptor ring:\n", __func__); |
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, | |||
2276 | 2333 | ||
2277 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) | 2334 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) |
2278 | features &= ~NETIF_F_RXCSUM; | 2335 | features &= ~NETIF_F_RXCSUM; |
2279 | else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) | 2336 | |
2280 | features &= ~NETIF_F_IPV6_CSUM; | ||
2281 | if (!priv->plat->tx_coe) | 2337 | if (!priv->plat->tx_coe) |
2282 | features &= ~NETIF_F_ALL_CSUM; | 2338 | features &= ~NETIF_F_ALL_CSUM; |
2283 | 2339 | ||
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, | |||
2292 | return features; | 2348 | return features; |
2293 | } | 2349 | } |
2294 | 2350 | ||
2351 | static int stmmac_set_features(struct net_device *netdev, | ||
2352 | netdev_features_t features) | ||
2353 | { | ||
2354 | struct stmmac_priv *priv = netdev_priv(netdev); | ||
2355 | |||
2356 | /* Keep the COE Type in case of csum is supporting */ | ||
2357 | if (features & NETIF_F_RXCSUM) | ||
2358 | priv->hw->rx_csum = priv->plat->rx_coe; | ||
2359 | else | ||
2360 | priv->hw->rx_csum = 0; | ||
2361 | /* No check needed because rx_coe has been set before and it will be | ||
2362 | * fixed in case of issue. | ||
2363 | */ | ||
2364 | priv->hw->mac->rx_ipc(priv->hw); | ||
2365 | |||
2366 | return 0; | ||
2367 | } | ||
2368 | |||
2295 | /** | 2369 | /** |
2296 | * stmmac_interrupt - main ISR | 2370 | * stmmac_interrupt - main ISR |
2297 | * @irq: interrupt number. | 2371 | * @irq: interrupt number. |
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = { | |||
2572 | .ndo_stop = stmmac_release, | 2646 | .ndo_stop = stmmac_release, |
2573 | .ndo_change_mtu = stmmac_change_mtu, | 2647 | .ndo_change_mtu = stmmac_change_mtu, |
2574 | .ndo_fix_features = stmmac_fix_features, | 2648 | .ndo_fix_features = stmmac_fix_features, |
2649 | .ndo_set_features = stmmac_set_features, | ||
2575 | .ndo_set_rx_mode = stmmac_set_rx_mode, | 2650 | .ndo_set_rx_mode = stmmac_set_rx_mode, |
2576 | .ndo_tx_timeout = stmmac_tx_timeout, | 2651 | .ndo_tx_timeout = stmmac_tx_timeout, |
2577 | .ndo_do_ioctl = stmmac_ioctl, | 2652 | .ndo_do_ioctl = stmmac_ioctl, |
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = { | |||
2592 | */ | 2667 | */ |
2593 | static int stmmac_hw_init(struct stmmac_priv *priv) | 2668 | static int stmmac_hw_init(struct stmmac_priv *priv) |
2594 | { | 2669 | { |
2595 | int ret; | ||
2596 | struct mac_device_info *mac; | 2670 | struct mac_device_info *mac; |
2597 | 2671 | ||
2598 | /* Identify the MAC HW device */ | 2672 | /* Identify the MAC HW device */ |
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
2649 | /* To use alternate (extended) or normal descriptor structures */ | 2723 | /* To use alternate (extended) or normal descriptor structures */ |
2650 | stmmac_selec_desc_mode(priv); | 2724 | stmmac_selec_desc_mode(priv); |
2651 | 2725 | ||
2652 | ret = priv->hw->mac->rx_ipc(priv->hw); | 2726 | if (priv->plat->rx_coe) { |
2653 | if (!ret) { | 2727 | priv->hw->rx_csum = priv->plat->rx_coe; |
2654 | pr_warn(" RX IPC Checksum Offload not configured.\n"); | ||
2655 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; | ||
2656 | } | ||
2657 | |||
2658 | if (priv->plat->rx_coe) | ||
2659 | pr_info(" RX Checksum Offload Engine supported (type %d)\n", | 2728 | pr_info(" RX Checksum Offload Engine supported (type %d)\n", |
2660 | priv->plat->rx_coe); | 2729 | priv->plat->rx_coe); |
2730 | } | ||
2661 | if (priv->plat->tx_coe) | 2731 | if (priv->plat->tx_coe) |
2662 | pr_info(" TX Checksum insertion supported\n"); | 2732 | pr_info(" TX Checksum insertion supported\n"); |
2663 | 2733 | ||
@@ -2716,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
2716 | if (IS_ERR(priv->stmmac_clk)) { | 2786 | if (IS_ERR(priv->stmmac_clk)) { |
2717 | dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", | 2787 | dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", |
2718 | __func__); | 2788 | __func__); |
2719 | ret = PTR_ERR(priv->stmmac_clk); | 2789 | /* If failed to obtain stmmac_clk and specific clk_csr value |
2720 | goto error_clk_get; | 2790 | * is NOT passed from the platform, probe fail. |
2791 | */ | ||
2792 | if (!priv->plat->clk_csr) { | ||
2793 | ret = PTR_ERR(priv->stmmac_clk); | ||
2794 | goto error_clk_get; | ||
2795 | } else { | ||
2796 | priv->stmmac_clk = NULL; | ||
2797 | } | ||
2721 | } | 2798 | } |
2722 | clk_prepare_enable(priv->stmmac_clk); | 2799 | clk_prepare_enable(priv->stmmac_clk); |
2723 | 2800 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b7ad3565566c..c5ee79d8a8c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) | |||
206 | { | 206 | { |
207 | if (priv->ptp_clock) { | 207 | if (priv->ptp_clock) { |
208 | ptp_clock_unregister(priv->ptp_clock); | 208 | ptp_clock_unregister(priv->ptp_clock); |
209 | priv->ptp_clock = NULL; | ||
209 | pr_debug("Removed PTP HW clock successfully on %s\n", | 210 | pr_debug("Removed PTP HW clock successfully on %s\n", |
210 | priv->dev->name); | 211 | priv->dev->name); |
211 | } | 212 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 3dbc047622fa..4535df37c227 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | |||
@@ -25,8 +25,6 @@ | |||
25 | #ifndef __STMMAC_PTP_H__ | 25 | #ifndef __STMMAC_PTP_H__ |
26 | #define __STMMAC_PTP_H__ | 26 | #define __STMMAC_PTP_H__ |
27 | 27 | ||
28 | #define STMMAC_SYSCLOCK 62500000 | ||
29 | |||
30 | /* IEEE 1588 PTP register offsets */ | 28 | /* IEEE 1588 PTP register offsets */ |
31 | #define PTP_TCR 0x0700 /* Timestamp Control Reg */ | 29 | #define PTP_TCR 0x0700 /* Timestamp Control Reg */ |
32 | #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ | 30 | #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23c89ab5a6ad..f67539650c38 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
350 | if (IS_ERR(desc)) | 350 | if (IS_ERR(desc)) |
351 | return PTR_ERR(desc); | 351 | return PTR_ERR(desc); |
352 | 352 | ||
353 | if (desc->hdr.state != VIO_DESC_READY) | ||
354 | return 1; | ||
355 | |||
356 | rmb(); | ||
357 | |||
353 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | 358 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", |
354 | desc->hdr.state, desc->hdr.ack, | 359 | desc->hdr.state, desc->hdr.ack, |
355 | desc->size, desc->ncookies, | 360 | desc->size, desc->ncookies, |
356 | desc->cookies[0].cookie_addr, | 361 | desc->cookies[0].cookie_addr, |
357 | desc->cookies[0].cookie_size); | 362 | desc->cookies[0].cookie_size); |
358 | 363 | ||
359 | if (desc->hdr.state != VIO_DESC_READY) | ||
360 | return 1; | ||
361 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | 364 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); |
362 | if (err == -ECONNRESET) | 365 | if (err == -ECONNRESET) |
363 | return err; | 366 | return err; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 999fb72688d2..e2a00287f8eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); | 699 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); |
700 | 700 | ||
701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { | 701 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { |
702 | bool ndev_status = false; | ||
703 | struct cpsw_slave *slave = priv->slaves; | ||
704 | int n; | ||
705 | |||
706 | if (priv->data.dual_emac) { | ||
707 | /* In dual emac mode check for all interfaces */ | ||
708 | for (n = priv->data.slaves; n; n--, slave++) | ||
709 | if (netif_running(slave->ndev)) | ||
710 | ndev_status = true; | ||
711 | } | ||
712 | |||
713 | if (ndev_status && (status >= 0)) { | ||
714 | /* The packet received is for the interface which | ||
715 | * is already down and the other interface is up | ||
716 | * and running, intead of freeing which results | ||
717 | * in reducing of the number of rx descriptor in | ||
718 | * DMA engine, requeue skb back to cpdma. | ||
719 | */ | ||
720 | new_skb = skb; | ||
721 | goto requeue; | ||
722 | } | ||
723 | |||
702 | /* the interface is going down, skbs are purged */ | 724 | /* the interface is going down, skbs are purged */ |
703 | dev_kfree_skb_any(skb); | 725 | dev_kfree_skb_any(skb); |
704 | return; | 726 | return; |
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
717 | new_skb = skb; | 739 | new_skb = skb; |
718 | } | 740 | } |
719 | 741 | ||
742 | requeue: | ||
720 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, | 743 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, |
721 | skb_tailroom(new_skb), 0); | 744 | skb_tailroom(new_skb), 0); |
722 | if (WARN_ON(ret < 0)) | 745 | if (WARN_ON(ret < 0)) |
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev) | |||
2311 | struct net_device *ndev = platform_get_drvdata(pdev); | 2334 | struct net_device *ndev = platform_get_drvdata(pdev); |
2312 | struct cpsw_priv *priv = netdev_priv(ndev); | 2335 | struct cpsw_priv *priv = netdev_priv(ndev); |
2313 | 2336 | ||
2314 | if (netif_running(ndev)) | 2337 | if (priv->data.dual_emac) { |
2315 | cpsw_ndo_stop(ndev); | 2338 | int i; |
2316 | 2339 | ||
2317 | for_each_slave(priv, soft_reset_slave); | 2340 | for (i = 0; i < priv->data.slaves; i++) { |
2341 | if (netif_running(priv->slaves[i].ndev)) | ||
2342 | cpsw_ndo_stop(priv->slaves[i].ndev); | ||
2343 | soft_reset_slave(priv->slaves + i); | ||
2344 | } | ||
2345 | } else { | ||
2346 | if (netif_running(ndev)) | ||
2347 | cpsw_ndo_stop(ndev); | ||
2348 | for_each_slave(priv, soft_reset_slave); | ||
2349 | } | ||
2318 | 2350 | ||
2319 | pm_runtime_put_sync(&pdev->dev); | 2351 | pm_runtime_put_sync(&pdev->dev); |
2320 | 2352 | ||
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev) | |||
2328 | { | 2360 | { |
2329 | struct platform_device *pdev = to_platform_device(dev); | 2361 | struct platform_device *pdev = to_platform_device(dev); |
2330 | struct net_device *ndev = platform_get_drvdata(pdev); | 2362 | struct net_device *ndev = platform_get_drvdata(pdev); |
2363 | struct cpsw_priv *priv = netdev_priv(ndev); | ||
2331 | 2364 | ||
2332 | pm_runtime_get_sync(&pdev->dev); | 2365 | pm_runtime_get_sync(&pdev->dev); |
2333 | 2366 | ||
2334 | /* Select default pin state */ | 2367 | /* Select default pin state */ |
2335 | pinctrl_pm_select_default_state(&pdev->dev); | 2368 | pinctrl_pm_select_default_state(&pdev->dev); |
2336 | 2369 | ||
2337 | if (netif_running(ndev)) | 2370 | if (priv->data.dual_emac) { |
2338 | cpsw_ndo_open(ndev); | 2371 | int i; |
2372 | |||
2373 | for (i = 0; i < priv->data.slaves; i++) { | ||
2374 | if (netif_running(priv->slaves[i].ndev)) | ||
2375 | cpsw_ndo_open(priv->slaves[i].ndev); | ||
2376 | } | ||
2377 | } else { | ||
2378 | if (netif_running(ndev)) | ||
2379 | cpsw_ndo_open(ndev); | ||
2380 | } | ||
2339 | return 0; | 2381 | return 0; |
2340 | } | 2382 | } |
2341 | 2383 | ||
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h index c1ba26c06d73..3de2f0d15fe2 100644 --- a/drivers/net/fddi/skfp/h/skfbi.h +++ b/drivers/net/fddi/skfp/h/skfbi.h | |||
@@ -147,11 +147,6 @@ | |||
147 | #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ | 147 | #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ |
148 | #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ | 148 | #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ |
149 | 149 | ||
150 | /* PCI_BASE_2ND 32 bit 2nd Base address */ | ||
151 | #define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ | ||
152 | #define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ | ||
153 | #define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */ | ||
154 | |||
155 | /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ | 150 | /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ |
156 | /* PCI_SUB_ID 16 bit Subsystem ID */ | 151 | /* PCI_SUB_ID 16 bit Subsystem ID */ |
157 | 152 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a9c5eaadc426..0fcb5e7eb073 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
387 | int hdr_offset; | 387 | int hdr_offset; |
388 | u32 net_trans_info; | 388 | u32 net_trans_info; |
389 | u32 hash; | 389 | u32 hash; |
390 | u32 skb_length = skb->len; | ||
390 | 391 | ||
391 | 392 | ||
392 | /* We will atmost need two pages to describe the rndis | 393 | /* We will atmost need two pages to describe the rndis |
@@ -562,7 +563,7 @@ do_send: | |||
562 | 563 | ||
563 | drop: | 564 | drop: |
564 | if (ret == 0) { | 565 | if (ret == 0) { |
565 | net->stats.tx_bytes += skb->len; | 566 | net->stats.tx_bytes += skb_length; |
566 | net->stats.tx_packets++; | 567 | net->stats.tx_packets++; |
567 | } else { | 568 | } else { |
568 | kfree(packet); | 569 | kfree(packet); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a96955597755..726edabff26b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/netpoll.h> | 36 | #include <linux/netpoll.h> |
37 | 37 | ||
38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) | 38 | #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) |
39 | #define MACVLAN_BC_QUEUE_LEN 1000 | ||
39 | 40 | ||
40 | struct macvlan_port { | 41 | struct macvlan_port { |
41 | struct net_device *dev; | 42 | struct net_device *dev; |
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, | |||
248 | goto err; | 249 | goto err; |
249 | 250 | ||
250 | spin_lock(&port->bc_queue.lock); | 251 | spin_lock(&port->bc_queue.lock); |
251 | if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { | 252 | if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { |
252 | __skb_queue_tail(&port->bc_queue, nskb); | 253 | __skb_queue_tail(&port->bc_queue, nskb); |
253 | err = 0; | 254 | err = 0; |
254 | } | 255 | } |
@@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
806 | features, | 807 | features, |
807 | mask); | 808 | mask); |
808 | features |= ALWAYS_ON_FEATURES; | 809 | features |= ALWAYS_ON_FEATURES; |
810 | features &= ~NETIF_F_NETNS_LOCAL; | ||
809 | 811 | ||
810 | return features; | 812 | return features; |
811 | } | 813 | } |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3381c4f91a8c..0c6adaaf898c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -112,17 +112,15 @@ out: | |||
112 | return err; | 112 | return err; |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Requires RTNL */ | ||
115 | static int macvtap_set_queue(struct net_device *dev, struct file *file, | 116 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
116 | struct macvtap_queue *q) | 117 | struct macvtap_queue *q) |
117 | { | 118 | { |
118 | struct macvlan_dev *vlan = netdev_priv(dev); | 119 | struct macvlan_dev *vlan = netdev_priv(dev); |
119 | int err = -EBUSY; | ||
120 | 120 | ||
121 | rtnl_lock(); | ||
122 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) | 121 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
123 | goto out; | 122 | return -EBUSY; |
124 | 123 | ||
125 | err = 0; | ||
126 | rcu_assign_pointer(q->vlan, vlan); | 124 | rcu_assign_pointer(q->vlan, vlan); |
127 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); | 125 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
128 | sock_hold(&q->sk); | 126 | sock_hold(&q->sk); |
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, | |||
136 | vlan->numvtaps++; | 134 | vlan->numvtaps++; |
137 | vlan->numqueues++; | 135 | vlan->numqueues++; |
138 | 136 | ||
139 | out: | 137 | return 0; |
140 | rtnl_unlock(); | ||
141 | return err; | ||
142 | } | 138 | } |
143 | 139 | ||
144 | static int macvtap_disable_queue(struct macvtap_queue *q) | 140 | static int macvtap_disable_queue(struct macvtap_queue *q) |
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk) | |||
454 | static int macvtap_open(struct inode *inode, struct file *file) | 450 | static int macvtap_open(struct inode *inode, struct file *file) |
455 | { | 451 | { |
456 | struct net *net = current->nsproxy->net_ns; | 452 | struct net *net = current->nsproxy->net_ns; |
457 | struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); | 453 | struct net_device *dev; |
458 | struct macvtap_queue *q; | 454 | struct macvtap_queue *q; |
459 | int err; | 455 | int err = -ENODEV; |
460 | 456 | ||
461 | err = -ENODEV; | 457 | rtnl_lock(); |
458 | dev = dev_get_by_macvtap_minor(iminor(inode)); | ||
462 | if (!dev) | 459 | if (!dev) |
463 | goto out; | 460 | goto out; |
464 | 461 | ||
@@ -498,6 +495,7 @@ out: | |||
498 | if (dev) | 495 | if (dev) |
499 | dev_put(dev); | 496 | dev_put(dev); |
500 | 497 | ||
498 | rtnl_unlock(); | ||
501 | return err; | 499 | return err; |
502 | } | 500 | } |
503 | 501 | ||
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd0ea7c50ee6..011dbda2b2f1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = { | |||
592 | .phy_id = PHY_ID_KSZ9031, | 592 | .phy_id = PHY_ID_KSZ9031, |
593 | .phy_id_mask = 0x00fffff0, | 593 | .phy_id_mask = 0x00fffff0, |
594 | .name = "Micrel KSZ9031 Gigabit PHY", | 594 | .name = "Micrel KSZ9031 Gigabit PHY", |
595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | 595 | .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), |
596 | | SUPPORTED_Asym_Pause), | ||
597 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 596 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
598 | .config_init = ksz9031_config_init, | 597 | .config_init = ksz9031_config_init, |
599 | .config_aneg = genphy_config_aneg, | 598 | .config_aneg = genphy_config_aneg, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c94e2a27446a..a854d38c231d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1036 | /* First check if the EEE ability is supported */ | 1036 | /* First check if the EEE ability is supported */ |
1037 | eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, | 1037 | eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, |
1038 | MDIO_MMD_PCS, phydev->addr); | 1038 | MDIO_MMD_PCS, phydev->addr); |
1039 | if (eee_cap < 0) | 1039 | if (eee_cap <= 0) |
1040 | return eee_cap; | 1040 | goto eee_exit_err; |
1041 | 1041 | ||
1042 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); | 1042 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); |
1043 | if (!cap) | 1043 | if (!cap) |
1044 | return -EPROTONOSUPPORT; | 1044 | goto eee_exit_err; |
1045 | 1045 | ||
1046 | /* Check which link settings negotiated and verify it in | 1046 | /* Check which link settings negotiated and verify it in |
1047 | * the EEE advertising registers. | 1047 | * the EEE advertising registers. |
1048 | */ | 1048 | */ |
1049 | eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, | 1049 | eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, |
1050 | MDIO_MMD_AN, phydev->addr); | 1050 | MDIO_MMD_AN, phydev->addr); |
1051 | if (eee_lp < 0) | 1051 | if (eee_lp <= 0) |
1052 | return eee_lp; | 1052 | goto eee_exit_err; |
1053 | 1053 | ||
1054 | eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, | 1054 | eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, |
1055 | MDIO_MMD_AN, phydev->addr); | 1055 | MDIO_MMD_AN, phydev->addr); |
1056 | if (eee_adv < 0) | 1056 | if (eee_adv <= 0) |
1057 | return eee_adv; | 1057 | goto eee_exit_err; |
1058 | 1058 | ||
1059 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); | 1059 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); |
1060 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); | 1060 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); |
1061 | idx = phy_find_setting(phydev->speed, phydev->duplex); | 1061 | idx = phy_find_setting(phydev->speed, phydev->duplex); |
1062 | if (!(lp & adv & settings[idx].setting)) | 1062 | if (!(lp & adv & settings[idx].setting)) |
1063 | return -EPROTONOSUPPORT; | 1063 | goto eee_exit_err; |
1064 | 1064 | ||
1065 | if (clk_stop_enable) { | 1065 | if (clk_stop_enable) { |
1066 | /* Configure the PHY to stop receiving xMII | 1066 | /* Configure the PHY to stop receiving xMII |
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1080 | 1080 | ||
1081 | return 0; /* EEE supported */ | 1081 | return 0; /* EEE supported */ |
1082 | } | 1082 | } |
1083 | 1083 | eee_exit_err: | |
1084 | return -EPROTONOSUPPORT; | 1084 | return -EPROTONOSUPPORT; |
1085 | } | 1085 | } |
1086 | EXPORT_SYMBOL(phy_init_eee); | 1086 | EXPORT_SYMBOL(phy_init_eee); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 87f710476217..604ef210a4de 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <net/ip6_checksum.h> | 24 | #include <net/ip6_checksum.h> |
25 | 25 | ||
26 | /* Version Information */ | 26 | /* Version Information */ |
27 | #define DRIVER_VERSION "v1.06.0 (2014/03/03)" | 27 | #define DRIVER_VERSION "v1.06.1 (2014/10/01)" |
28 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 28 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
29 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" | 29 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" |
30 | #define MODULENAME "r8152" | 30 | #define MODULENAME "r8152" |
@@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) | |||
1949 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); | 1949 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | static int rtl_start_rx(struct r8152 *tp) | ||
1953 | { | ||
1954 | int i, ret = 0; | ||
1955 | |||
1956 | INIT_LIST_HEAD(&tp->rx_done); | ||
1957 | for (i = 0; i < RTL8152_MAX_RX; i++) { | ||
1958 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
1959 | ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); | ||
1960 | if (ret) | ||
1961 | break; | ||
1962 | } | ||
1963 | |||
1964 | return ret; | ||
1965 | } | ||
1966 | |||
1967 | static int rtl_stop_rx(struct r8152 *tp) | ||
1968 | { | ||
1969 | int i; | ||
1970 | |||
1971 | for (i = 0; i < RTL8152_MAX_RX; i++) | ||
1972 | usb_kill_urb(tp->rx_info[i].urb); | ||
1973 | |||
1974 | return 0; | ||
1975 | } | ||
1976 | |||
1952 | static int rtl_enable(struct r8152 *tp) | 1977 | static int rtl_enable(struct r8152 *tp) |
1953 | { | 1978 | { |
1954 | u32 ocp_data; | 1979 | u32 ocp_data; |
1955 | int i, ret; | ||
1956 | 1980 | ||
1957 | r8152b_reset_packet_filter(tp); | 1981 | r8152b_reset_packet_filter(tp); |
1958 | 1982 | ||
@@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp) | |||
1962 | 1986 | ||
1963 | rxdy_gated_en(tp, false); | 1987 | rxdy_gated_en(tp, false); |
1964 | 1988 | ||
1965 | INIT_LIST_HEAD(&tp->rx_done); | 1989 | return rtl_start_rx(tp); |
1966 | ret = 0; | ||
1967 | for (i = 0; i < RTL8152_MAX_RX; i++) { | ||
1968 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
1969 | ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); | ||
1970 | } | ||
1971 | |||
1972 | return ret; | ||
1973 | } | 1990 | } |
1974 | 1991 | ||
1975 | static int rtl8152_enable(struct r8152 *tp) | 1992 | static int rtl8152_enable(struct r8152 *tp) |
@@ -2019,7 +2036,7 @@ static int rtl8153_enable(struct r8152 *tp) | |||
2019 | return rtl_enable(tp); | 2036 | return rtl_enable(tp); |
2020 | } | 2037 | } |
2021 | 2038 | ||
2022 | static void rtl8152_disable(struct r8152 *tp) | 2039 | static void rtl_disable(struct r8152 *tp) |
2023 | { | 2040 | { |
2024 | u32 ocp_data; | 2041 | u32 ocp_data; |
2025 | int i; | 2042 | int i; |
@@ -2053,8 +2070,7 @@ static void rtl8152_disable(struct r8152 *tp) | |||
2053 | mdelay(1); | 2070 | mdelay(1); |
2054 | } | 2071 | } |
2055 | 2072 | ||
2056 | for (i = 0; i < RTL8152_MAX_RX; i++) | 2073 | rtl_stop_rx(tp); |
2057 | usb_kill_urb(tp->rx_info[i].urb); | ||
2058 | 2074 | ||
2059 | rtl8152_nic_reset(tp); | 2075 | rtl8152_nic_reset(tp); |
2060 | } | 2076 | } |
@@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp) | |||
2185 | } | 2201 | } |
2186 | } | 2202 | } |
2187 | 2203 | ||
2188 | static void rtl_clear_bp(struct r8152 *tp) | ||
2189 | { | ||
2190 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); | ||
2191 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); | ||
2192 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); | ||
2193 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); | ||
2194 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); | ||
2195 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); | ||
2196 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); | ||
2197 | ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); | ||
2198 | mdelay(3); | ||
2199 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); | ||
2200 | ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); | ||
2201 | } | ||
2202 | |||
2203 | static void r8153_clear_bp(struct r8152 *tp) | ||
2204 | { | ||
2205 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); | ||
2206 | ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0); | ||
2207 | rtl_clear_bp(tp); | ||
2208 | } | ||
2209 | |||
2210 | static void r8153_teredo_off(struct r8152 *tp) | 2204 | static void r8153_teredo_off(struct r8152 *tp) |
2211 | { | 2205 | { |
2212 | u32 ocp_data; | 2206 | u32 ocp_data; |
@@ -2232,6 +2226,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp) | |||
2232 | LINKENA | DIS_SDSAVE); | 2226 | LINKENA | DIS_SDSAVE); |
2233 | } | 2227 | } |
2234 | 2228 | ||
2229 | static void rtl8152_disable(struct r8152 *tp) | ||
2230 | { | ||
2231 | r8152b_disable_aldps(tp); | ||
2232 | rtl_disable(tp); | ||
2233 | r8152b_enable_aldps(tp); | ||
2234 | } | ||
2235 | |||
2235 | static void r8152b_hw_phy_cfg(struct r8152 *tp) | 2236 | static void r8152b_hw_phy_cfg(struct r8152 *tp) |
2236 | { | 2237 | { |
2237 | u16 data; | 2238 | u16 data; |
@@ -2242,11 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) | |||
2242 | r8152_mdio_write(tp, MII_BMCR, data); | 2243 | r8152_mdio_write(tp, MII_BMCR, data); |
2243 | } | 2244 | } |
2244 | 2245 | ||
2245 | r8152b_disable_aldps(tp); | ||
2246 | |||
2247 | rtl_clear_bp(tp); | ||
2248 | |||
2249 | r8152b_enable_aldps(tp); | ||
2250 | set_bit(PHY_RESET, &tp->flags); | 2246 | set_bit(PHY_RESET, &tp->flags); |
2251 | } | 2247 | } |
2252 | 2248 | ||
@@ -2255,9 +2251,6 @@ static void r8152b_exit_oob(struct r8152 *tp) | |||
2255 | u32 ocp_data; | 2251 | u32 ocp_data; |
2256 | int i; | 2252 | int i; |
2257 | 2253 | ||
2258 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2259 | return; | ||
2260 | |||
2261 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); | 2254 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
2262 | ocp_data &= ~RCR_ACPT_ALL; | 2255 | ocp_data &= ~RCR_ACPT_ALL; |
2263 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2256 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
@@ -2347,7 +2340,7 @@ static void r8152b_enter_oob(struct r8152 *tp) | |||
2347 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); | 2340 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); |
2348 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); | 2341 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); |
2349 | 2342 | ||
2350 | rtl8152_disable(tp); | 2343 | rtl_disable(tp); |
2351 | 2344 | ||
2352 | for (i = 0; i < 1000; i++) { | 2345 | for (i = 0; i < 1000; i++) { |
2353 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2346 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
@@ -2400,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) | |||
2400 | r8152_mdio_write(tp, MII_BMCR, data); | 2393 | r8152_mdio_write(tp, MII_BMCR, data); |
2401 | } | 2394 | } |
2402 | 2395 | ||
2403 | r8153_clear_bp(tp); | ||
2404 | |||
2405 | if (tp->version == RTL_VER_03) { | 2396 | if (tp->version == RTL_VER_03) { |
2406 | data = ocp_reg_read(tp, OCP_EEE_CFG); | 2397 | data = ocp_reg_read(tp, OCP_EEE_CFG); |
2407 | data &= ~CTAP_SHORT_EN; | 2398 | data &= ~CTAP_SHORT_EN; |
@@ -2485,9 +2476,6 @@ static void r8153_first_init(struct r8152 *tp) | |||
2485 | u32 ocp_data; | 2476 | u32 ocp_data; |
2486 | int i; | 2477 | int i; |
2487 | 2478 | ||
2488 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2489 | return; | ||
2490 | |||
2491 | rxdy_gated_en(tp, true); | 2479 | rxdy_gated_en(tp, true); |
2492 | r8153_teredo_off(tp); | 2480 | r8153_teredo_off(tp); |
2493 | 2481 | ||
@@ -2560,7 +2548,7 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
2560 | ocp_data &= ~NOW_IS_OOB; | 2548 | ocp_data &= ~NOW_IS_OOB; |
2561 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); | 2549 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); |
2562 | 2550 | ||
2563 | rtl8152_disable(tp); | 2551 | rtl_disable(tp); |
2564 | 2552 | ||
2565 | for (i = 0; i < 1000; i++) { | 2553 | for (i = 0; i < 1000; i++) { |
2566 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 2554 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
@@ -2624,6 +2612,13 @@ static void r8153_enable_aldps(struct r8152 *tp) | |||
2624 | ocp_reg_write(tp, OCP_POWER_CFG, data); | 2612 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
2625 | } | 2613 | } |
2626 | 2614 | ||
2615 | static void rtl8153_disable(struct r8152 *tp) | ||
2616 | { | ||
2617 | r8153_disable_aldps(tp); | ||
2618 | rtl_disable(tp); | ||
2619 | r8153_enable_aldps(tp); | ||
2620 | } | ||
2621 | |||
2627 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) | 2622 | static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) |
2628 | { | 2623 | { |
2629 | u16 bmcr, anar, gbcr; | 2624 | u16 bmcr, anar, gbcr; |
@@ -2714,6 +2709,16 @@ out: | |||
2714 | return ret; | 2709 | return ret; |
2715 | } | 2710 | } |
2716 | 2711 | ||
2712 | static void rtl8152_up(struct r8152 *tp) | ||
2713 | { | ||
2714 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2715 | return; | ||
2716 | |||
2717 | r8152b_disable_aldps(tp); | ||
2718 | r8152b_exit_oob(tp); | ||
2719 | r8152b_enable_aldps(tp); | ||
2720 | } | ||
2721 | |||
2717 | static void rtl8152_down(struct r8152 *tp) | 2722 | static void rtl8152_down(struct r8152 *tp) |
2718 | { | 2723 | { |
2719 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2724 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
@@ -2727,6 +2732,16 @@ static void rtl8152_down(struct r8152 *tp) | |||
2727 | r8152b_enable_aldps(tp); | 2732 | r8152b_enable_aldps(tp); |
2728 | } | 2733 | } |
2729 | 2734 | ||
2735 | static void rtl8153_up(struct r8152 *tp) | ||
2736 | { | ||
2737 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | ||
2738 | return; | ||
2739 | |||
2740 | r8153_disable_aldps(tp); | ||
2741 | r8153_first_init(tp); | ||
2742 | r8153_enable_aldps(tp); | ||
2743 | } | ||
2744 | |||
2730 | static void rtl8153_down(struct r8152 *tp) | 2745 | static void rtl8153_down(struct r8152 *tp) |
2731 | { | 2746 | { |
2732 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { | 2747 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) { |
@@ -2946,6 +2961,8 @@ static void r8152b_init(struct r8152 *tp) | |||
2946 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 2961 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2947 | return; | 2962 | return; |
2948 | 2963 | ||
2964 | r8152b_disable_aldps(tp); | ||
2965 | |||
2949 | if (tp->version == RTL_VER_01) { | 2966 | if (tp->version == RTL_VER_01) { |
2950 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); | 2967 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); |
2951 | ocp_data &= ~LED_MODE_MASK; | 2968 | ocp_data &= ~LED_MODE_MASK; |
@@ -2984,6 +3001,7 @@ static void r8153_init(struct r8152 *tp) | |||
2984 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3001 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2985 | return; | 3002 | return; |
2986 | 3003 | ||
3004 | r8153_disable_aldps(tp); | ||
2987 | r8153_u1u2en(tp, false); | 3005 | r8153_u1u2en(tp, false); |
2988 | 3006 | ||
2989 | for (i = 0; i < 500; i++) { | 3007 | for (i = 0; i < 500; i++) { |
@@ -3055,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) | |||
3055 | clear_bit(WORK_ENABLE, &tp->flags); | 3073 | clear_bit(WORK_ENABLE, &tp->flags); |
3056 | usb_kill_urb(tp->intr_urb); | 3074 | usb_kill_urb(tp->intr_urb); |
3057 | cancel_delayed_work_sync(&tp->schedule); | 3075 | cancel_delayed_work_sync(&tp->schedule); |
3076 | tasklet_disable(&tp->tl); | ||
3058 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3077 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3078 | rtl_stop_rx(tp); | ||
3059 | rtl_runtime_suspend_enable(tp, true); | 3079 | rtl_runtime_suspend_enable(tp, true); |
3060 | } else { | 3080 | } else { |
3061 | tasklet_disable(&tp->tl); | ||
3062 | tp->rtl_ops.down(tp); | 3081 | tp->rtl_ops.down(tp); |
3063 | tasklet_enable(&tp->tl); | ||
3064 | } | 3082 | } |
3083 | tasklet_enable(&tp->tl); | ||
3065 | } | 3084 | } |
3066 | 3085 | ||
3067 | return 0; | 3086 | return 0; |
@@ -3080,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3080 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3099 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3081 | rtl_runtime_suspend_enable(tp, false); | 3100 | rtl_runtime_suspend_enable(tp, false); |
3082 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | 3101 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); |
3102 | set_bit(WORK_ENABLE, &tp->flags); | ||
3083 | if (tp->speed & LINK_STATUS) | 3103 | if (tp->speed & LINK_STATUS) |
3084 | tp->rtl_ops.disable(tp); | 3104 | rtl_start_rx(tp); |
3085 | } else { | 3105 | } else { |
3086 | tp->rtl_ops.up(tp); | 3106 | tp->rtl_ops.up(tp); |
3087 | rtl8152_set_speed(tp, AUTONEG_ENABLE, | 3107 | rtl8152_set_speed(tp, AUTONEG_ENABLE, |
3088 | tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, | 3108 | tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, |
3089 | DUPLEX_FULL); | 3109 | DUPLEX_FULL); |
3110 | tp->speed = 0; | ||
3111 | netif_carrier_off(tp->netdev); | ||
3112 | set_bit(WORK_ENABLE, &tp->flags); | ||
3090 | } | 3113 | } |
3091 | tp->speed = 0; | ||
3092 | netif_carrier_off(tp->netdev); | ||
3093 | set_bit(WORK_ENABLE, &tp->flags); | ||
3094 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); | 3114 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); |
3095 | } | 3115 | } |
3096 | 3116 | ||
@@ -3377,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp) | |||
3377 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3397 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3378 | return; | 3398 | return; |
3379 | 3399 | ||
3380 | r8153_power_cut_en(tp, true); | 3400 | r8153_power_cut_en(tp, false); |
3381 | } | 3401 | } |
3382 | 3402 | ||
3383 | static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | 3403 | static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) |
@@ -3392,7 +3412,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3392 | ops->init = r8152b_init; | 3412 | ops->init = r8152b_init; |
3393 | ops->enable = rtl8152_enable; | 3413 | ops->enable = rtl8152_enable; |
3394 | ops->disable = rtl8152_disable; | 3414 | ops->disable = rtl8152_disable; |
3395 | ops->up = r8152b_exit_oob; | 3415 | ops->up = rtl8152_up; |
3396 | ops->down = rtl8152_down; | 3416 | ops->down = rtl8152_down; |
3397 | ops->unload = rtl8152_unload; | 3417 | ops->unload = rtl8152_unload; |
3398 | ret = 0; | 3418 | ret = 0; |
@@ -3400,8 +3420,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3400 | case PRODUCT_ID_RTL8153: | 3420 | case PRODUCT_ID_RTL8153: |
3401 | ops->init = r8153_init; | 3421 | ops->init = r8153_init; |
3402 | ops->enable = rtl8153_enable; | 3422 | ops->enable = rtl8153_enable; |
3403 | ops->disable = rtl8152_disable; | 3423 | ops->disable = rtl8153_disable; |
3404 | ops->up = r8153_first_init; | 3424 | ops->up = rtl8153_up; |
3405 | ops->down = rtl8153_down; | 3425 | ops->down = rtl8153_down; |
3406 | ops->unload = rtl8153_unload; | 3426 | ops->unload = rtl8153_unload; |
3407 | ret = 0; | 3427 | ret = 0; |
@@ -3416,8 +3436,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | |||
3416 | case PRODUCT_ID_SAMSUNG: | 3436 | case PRODUCT_ID_SAMSUNG: |
3417 | ops->init = r8153_init; | 3437 | ops->init = r8153_init; |
3418 | ops->enable = rtl8153_enable; | 3438 | ops->enable = rtl8153_enable; |
3419 | ops->disable = rtl8152_disable; | 3439 | ops->disable = rtl8153_disable; |
3420 | ops->up = r8153_first_init; | 3440 | ops->up = rtl8153_up; |
3421 | ops->down = rtl8153_down; | 3441 | ops->down = rtl8153_down; |
3422 | ops->unload = rtl8153_unload; | 3442 | ops->unload = rtl8153_unload; |
3423 | ret = 0; | 3443 | ret = 0; |
@@ -3530,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
3530 | 3550 | ||
3531 | usb_set_intfdata(intf, NULL); | 3551 | usb_set_intfdata(intf, NULL); |
3532 | if (tp) { | 3552 | if (tp) { |
3533 | set_bit(RTL8152_UNPLUG, &tp->flags); | 3553 | struct usb_device *udev = tp->udev; |
3554 | |||
3555 | if (udev->state == USB_STATE_NOTATTACHED) | ||
3556 | set_bit(RTL8152_UNPLUG, &tp->flags); | ||
3557 | |||
3534 | tasklet_kill(&tp->tl); | 3558 | tasklet_kill(&tp->tl); |
3535 | unregister_netdev(tp->netdev); | 3559 | unregister_netdev(tp->netdev); |
3536 | tp->rtl_ops.unload(tp); | 3560 | tp->rtl_ops.unload(tp); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d6e90c72c257..6dfcbf523936 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2056 | if (!netdev_mc_empty(netdev)) { | 2056 | if (!netdev_mc_empty(netdev)) { |
2057 | new_table = vmxnet3_copy_mc(netdev); | 2057 | new_table = vmxnet3_copy_mc(netdev); |
2058 | if (new_table) { | 2058 | if (new_table) { |
2059 | new_mode |= VMXNET3_RXM_MCAST; | ||
2060 | rxConf->mfTableLen = cpu_to_le16( | 2059 | rxConf->mfTableLen = cpu_to_le16( |
2061 | netdev_mc_count(netdev) * ETH_ALEN); | 2060 | netdev_mc_count(netdev) * ETH_ALEN); |
2062 | new_table_pa = dma_map_single( | 2061 | new_table_pa = dma_map_single( |
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2064 | new_table, | 2063 | new_table, |
2065 | rxConf->mfTableLen, | 2064 | rxConf->mfTableLen, |
2066 | PCI_DMA_TODEVICE); | 2065 | PCI_DMA_TODEVICE); |
2066 | } | ||
2067 | |||
2068 | if (new_table_pa) { | ||
2069 | new_mode |= VMXNET3_RXM_MCAST; | ||
2067 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); | 2070 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); |
2068 | } else { | 2071 | } else { |
2069 | netdev_info(netdev, "failed to copy mcast list" | 2072 | netdev_info(netdev, |
2070 | ", setting ALL_MULTI\n"); | 2073 | "failed to copy mcast list, setting ALL_MULTI\n"); |
2071 | new_mode |= VMXNET3_RXM_ALL_MULTI; | 2074 | new_mode |= VMXNET3_RXM_ALL_MULTI; |
2072 | } | 2075 | } |
2073 | } | 2076 | } |
2074 | 2077 | ||
2075 | |||
2076 | if (!(new_mode & VMXNET3_RXM_MCAST)) { | 2078 | if (!(new_mode & VMXNET3_RXM_MCAST)) { |
2077 | rxConf->mfTableLen = 0; | 2079 | rxConf->mfTableLen = 0; |
2078 | rxConf->mfTablePA = 0; | 2080 | rxConf->mfTablePA = 0; |
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2091 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2093 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
2092 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 2094 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
2093 | 2095 | ||
2094 | if (new_table) { | 2096 | if (new_table_pa) |
2095 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, | 2097 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, |
2096 | rxConf->mfTableLen, PCI_DMA_TODEVICE); | 2098 | rxConf->mfTableLen, PCI_DMA_TODEVICE); |
2097 | kfree(new_table); | 2099 | kfree(new_table); |
2098 | } | ||
2099 | } | 2100 | } |
2100 | 2101 | ||
2101 | void | 2102 | void |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 29ee77f2c97f..3759479f959a 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01020000 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01020100 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1fb7b37d1402..beb377b2d4b7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1327 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1327 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
1328 | union vxlan_addr ipa = { | 1328 | union vxlan_addr ipa = { |
1329 | .sin.sin_addr.s_addr = tip, | 1329 | .sin.sin_addr.s_addr = tip, |
1330 | .sa.sa_family = AF_INET, | 1330 | .sin.sin_family = AF_INET, |
1331 | }; | 1331 | }; |
1332 | 1332 | ||
1333 | vxlan_ip_miss(dev, &ipa); | 1333 | vxlan_ip_miss(dev, &ipa); |
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1488 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1488 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
1489 | union vxlan_addr ipa = { | 1489 | union vxlan_addr ipa = { |
1490 | .sin6.sin6_addr = msg->target, | 1490 | .sin6.sin6_addr = msg->target, |
1491 | .sa.sa_family = AF_INET6, | 1491 | .sin6.sin6_family = AF_INET6, |
1492 | }; | 1492 | }; |
1493 | 1493 | ||
1494 | vxlan_ip_miss(dev, &ipa); | 1494 | vxlan_ip_miss(dev, &ipa); |
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) | |||
1521 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { | 1521 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
1522 | union vxlan_addr ipa = { | 1522 | union vxlan_addr ipa = { |
1523 | .sin.sin_addr.s_addr = pip->daddr, | 1523 | .sin.sin_addr.s_addr = pip->daddr, |
1524 | .sa.sa_family = AF_INET, | 1524 | .sin.sin_family = AF_INET, |
1525 | }; | 1525 | }; |
1526 | 1526 | ||
1527 | vxlan_ip_miss(dev, &ipa); | 1527 | vxlan_ip_miss(dev, &ipa); |
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) | |||
1542 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { | 1542 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
1543 | union vxlan_addr ipa = { | 1543 | union vxlan_addr ipa = { |
1544 | .sin6.sin6_addr = pip6->daddr, | 1544 | .sin6.sin6_addr = pip6->daddr, |
1545 | .sa.sa_family = AF_INET6, | 1545 | .sin6.sin6_family = AF_INET6, |
1546 | }; | 1546 | }; |
1547 | 1547 | ||
1548 | vxlan_ip_miss(dev, &ipa); | 1548 | vxlan_ip_miss(dev, &ipa); |
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 334c2ece855a..da92bfa76b7c 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c | |||
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv) | |||
2423 | 2423 | ||
2424 | kfree_skb(priv->rx_skb); | 2424 | kfree_skb(priv->rx_skb); |
2425 | 2425 | ||
2426 | usb_put_dev(priv->udev); | ||
2427 | |||
2428 | at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", | 2426 | at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", |
2429 | __func__); | 2427 | __func__); |
2430 | ieee80211_free_hw(priv->hw); | 2428 | ieee80211_free_hw(priv->hw); |
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface) | |||
2558 | 2556 | ||
2559 | wiphy_info(priv->hw->wiphy, "disconnecting\n"); | 2557 | wiphy_info(priv->hw->wiphy, "disconnecting\n"); |
2560 | at76_delete_device(priv); | 2558 | at76_delete_device(priv); |
2559 | usb_put_dev(priv->udev); | ||
2561 | dev_info(&interface->dev, "disconnected\n"); | 2560 | dev_info(&interface->dev, "disconnected\n"); |
2562 | } | 2561 | } |
2563 | 2562 | ||
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 733be5178481..6ad44470d0f2 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c | |||
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
57 | struct ath9k_beacon_state *bs) | 57 | struct ath9k_beacon_state *bs) |
58 | { | 58 | { |
59 | struct ath_common *common = ath9k_hw_common(ah); | 59 | struct ath_common *common = ath9k_hw_common(ah); |
60 | int dtim_intval, sleepduration; | 60 | int dtim_intval; |
61 | u64 tsf; | 61 | u64 tsf; |
62 | 62 | ||
63 | /* No need to configure beacon if we are not associated */ | 63 | /* No need to configure beacon if we are not associated */ |
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
75 | * last beacon we received (which may be none). | 75 | * last beacon we received (which may be none). |
76 | */ | 76 | */ |
77 | dtim_intval = conf->intval * conf->dtim_period; | 77 | dtim_intval = conf->intval * conf->dtim_period; |
78 | sleepduration = ah->hw->conf.listen_interval * conf->intval; | ||
79 | 78 | ||
80 | /* | 79 | /* |
81 | * Pull nexttbtt forward to reflect the current | 80 | * Pull nexttbtt forward to reflect the current |
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, | |||
113 | */ | 112 | */ |
114 | 113 | ||
115 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), | 114 | bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), |
116 | sleepduration)); | 115 | conf->intval)); |
117 | if (bs->bs_sleepduration > bs->bs_dtimperiod) | 116 | if (bs->bs_sleepduration > bs->bs_dtimperiod) |
118 | bs->bs_sleepduration = bs->bs_dtimperiod; | 117 | bs->bs_sleepduration = bs->bs_dtimperiod; |
119 | 118 | ||
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bb86eb2ffc95..f0484b1b617e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, | |||
978 | struct ath_hw *ah = common->ah; | 978 | struct ath_hw *ah = common->ah; |
979 | struct ath_htc_rx_status *rxstatus; | 979 | struct ath_htc_rx_status *rxstatus; |
980 | struct ath_rx_status rx_stats; | 980 | struct ath_rx_status rx_stats; |
981 | bool decrypt_error; | 981 | bool decrypt_error = false; |
982 | 982 | ||
983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { | 983 | if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { |
984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", | 984 | ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e6ac8d2e610c..4b148bbb2bf6 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
513 | * touch anything. Note this can happen early | 513 | * touch anything. Note this can happen early |
514 | * on if the IRQ is shared. | 514 | * on if the IRQ is shared. |
515 | */ | 515 | */ |
516 | if (test_bit(ATH_OP_INVALID, &common->op_flags)) | 516 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
517 | return IRQ_NONE; | 517 | return IRQ_NONE; |
518 | 518 | ||
519 | /* shared irq, not for us */ | 519 | /* shared irq, not for us */ |
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c index 5fe29b9f8fa2..8f68426ca653 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.c +++ b/drivers/net/wireless/ath/ath9k/spectral.c | |||
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, | |||
253 | 253 | ||
254 | if (strncmp("trigger", buf, 7) == 0) { | 254 | if (strncmp("trigger", buf, 7) == 0) { |
255 | ath9k_spectral_scan_trigger(sc->hw); | 255 | ath9k_spectral_scan_trigger(sc->hw); |
256 | } else if (strncmp("background", buf, 9) == 0) { | 256 | } else if (strncmp("background", buf, 10) == 0) { |
257 | ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); | 257 | ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); |
258 | ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); | 258 | ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); |
259 | } else if (strncmp("chanscan", buf, 8) == 0) { | 259 | } else if (strncmp("chanscan", buf, 8) == 0) { |
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b8e2561ea645..fe3dc126b149 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig | |||
@@ -27,10 +27,17 @@ config BRCMFMAC | |||
27 | one of the bus interface support. If you choose to build a module, | 27 | one of the bus interface support. If you choose to build a module, |
28 | it'll be called brcmfmac.ko. | 28 | it'll be called brcmfmac.ko. |
29 | 29 | ||
30 | config BRCMFMAC_PROTO_BCDC | ||
31 | bool | ||
32 | |||
33 | config BRCMFMAC_PROTO_MSGBUF | ||
34 | bool | ||
35 | |||
30 | config BRCMFMAC_SDIO | 36 | config BRCMFMAC_SDIO |
31 | bool "SDIO bus interface support for FullMAC driver" | 37 | bool "SDIO bus interface support for FullMAC driver" |
32 | depends on (MMC = y || MMC = BRCMFMAC) | 38 | depends on (MMC = y || MMC = BRCMFMAC) |
33 | depends on BRCMFMAC | 39 | depends on BRCMFMAC |
40 | select BRCMFMAC_PROTO_BCDC | ||
34 | select FW_LOADER | 41 | select FW_LOADER |
35 | default y | 42 | default y |
36 | ---help--- | 43 | ---help--- |
@@ -42,6 +49,7 @@ config BRCMFMAC_USB | |||
42 | bool "USB bus interface support for FullMAC driver" | 49 | bool "USB bus interface support for FullMAC driver" |
43 | depends on (USB = y || USB = BRCMFMAC) | 50 | depends on (USB = y || USB = BRCMFMAC) |
44 | depends on BRCMFMAC | 51 | depends on BRCMFMAC |
52 | select BRCMFMAC_PROTO_BCDC | ||
45 | select FW_LOADER | 53 | select FW_LOADER |
46 | ---help--- | 54 | ---help--- |
47 | This option enables the USB bus interface support for Broadcom | 55 | This option enables the USB bus interface support for Broadcom |
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE | |||
52 | bool "PCIE bus interface support for FullMAC driver" | 60 | bool "PCIE bus interface support for FullMAC driver" |
53 | depends on BRCMFMAC | 61 | depends on BRCMFMAC |
54 | depends on PCI | 62 | depends on PCI |
63 | depends on HAS_DMA | ||
64 | select BRCMFMAC_PROTO_MSGBUF | ||
55 | select FW_LOADER | 65 | select FW_LOADER |
56 | ---help--- | 66 | ---help--- |
57 | This option enables the PCIE bus interface support for Broadcom | 67 | This option enables the PCIE bus interface support for Broadcom |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index c35adf4bc70b..90a977fe9a64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile | |||
@@ -30,16 +30,18 @@ brcmfmac-objs += \ | |||
30 | fwsignal.o \ | 30 | fwsignal.o \ |
31 | p2p.o \ | 31 | p2p.o \ |
32 | proto.o \ | 32 | proto.o \ |
33 | bcdc.o \ | ||
34 | commonring.o \ | ||
35 | flowring.o \ | ||
36 | msgbuf.o \ | ||
37 | dhd_common.o \ | 33 | dhd_common.o \ |
38 | dhd_linux.o \ | 34 | dhd_linux.o \ |
39 | firmware.o \ | 35 | firmware.o \ |
40 | feature.o \ | 36 | feature.o \ |
41 | btcoex.o \ | 37 | btcoex.o \ |
42 | vendor.o | 38 | vendor.o |
39 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ | ||
40 | bcdc.o | ||
41 | brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ | ||
42 | commonring.o \ | ||
43 | flowring.o \ | ||
44 | msgbuf.o | ||
43 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ | 45 | brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ |
44 | dhd_sdio.o \ | 46 | dhd_sdio.o \ |
45 | bcmsdh.o | 47 | bcmsdh.o |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h index 17e8c039ff32..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h | |||
@@ -16,9 +16,12 @@ | |||
16 | #ifndef BRCMFMAC_BCDC_H | 16 | #ifndef BRCMFMAC_BCDC_H |
17 | #define BRCMFMAC_BCDC_H | 17 | #define BRCMFMAC_BCDC_H |
18 | 18 | ||
19 | 19 | #ifdef CONFIG_BRCMFMAC_PROTO_BCDC | |
20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); | 20 | int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); |
21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); | 21 | void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); |
22 | 22 | #else | |
23 | static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } | ||
24 | static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} | ||
25 | #endif | ||
23 | 26 | ||
24 | #endif /* BRCMFMAC_BCDC_H */ | 27 | #endif /* BRCMFMAC_BCDC_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 4f1daabc551b..44fc85f68f7a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c | |||
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, | 185 | ifevent->action, ifevent->ifidx, ifevent->bssidx, |
186 | ifevent->flags, ifevent->role); | 186 | ifevent->flags, ifevent->role); |
187 | 187 | ||
188 | if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { | 188 | /* The P2P Device interface event must not be ignored |
189 | * contrary to what firmware tells us. The only way to | ||
190 | * distinguish the P2P Device is by looking at the ifidx | ||
191 | * and bssidx received. | ||
192 | */ | ||
193 | if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && | ||
194 | (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { | ||
189 | brcmf_dbg(EVENT, "event can be ignored\n"); | 195 | brcmf_dbg(EVENT, "event can be ignored\n"); |
190 | return; | 196 | return; |
191 | } | 197 | } |
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, | |||
210 | return; | 216 | return; |
211 | } | 217 | } |
212 | 218 | ||
213 | if (ifevent->action == BRCMF_E_IF_CHANGE) | 219 | if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) |
214 | brcmf_fws_reset_interface(ifp); | 220 | brcmf_fws_reset_interface(ifp); |
215 | 221 | ||
216 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); | 222 | err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); |
217 | 223 | ||
218 | if (ifevent->action == BRCMF_E_IF_DEL) { | 224 | if (ifp && ifevent->action == BRCMF_E_IF_DEL) { |
219 | brcmf_fws_del_interface(ifp); | 225 | brcmf_fws_del_interface(ifp); |
220 | brcmf_del_if(drvr, ifevent->bssidx); | 226 | brcmf_del_if(drvr, ifevent->bssidx); |
221 | } | 227 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index dd20b1862d44..cbf033f59109 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h | |||
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code { | |||
172 | #define BRCMF_E_IF_ROLE_STA 0 | 172 | #define BRCMF_E_IF_ROLE_STA 0 |
173 | #define BRCMF_E_IF_ROLE_AP 1 | 173 | #define BRCMF_E_IF_ROLE_AP 1 |
174 | #define BRCMF_E_IF_ROLE_WDS 2 | 174 | #define BRCMF_E_IF_ROLE_WDS 2 |
175 | #define BRCMF_E_IF_ROLE_P2P_GO 3 | ||
176 | #define BRCMF_E_IF_ROLE_P2P_CLIENT 4 | ||
175 | 177 | ||
176 | /** | 178 | /** |
177 | * definitions for event packet validation. | 179 | * definitions for event packet validation. |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h index f901ae52bf2b..77a51b8c1e12 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef BRCMFMAC_MSGBUF_H | 15 | #ifndef BRCMFMAC_MSGBUF_H |
16 | #define BRCMFMAC_MSGBUF_H | 16 | #define BRCMFMAC_MSGBUF_H |
17 | 17 | ||
18 | #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF | ||
18 | 19 | ||
19 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 | 20 | #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 |
20 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 | 21 | #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 |
@@ -32,9 +33,15 @@ | |||
32 | 33 | ||
33 | 34 | ||
34 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); | 35 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev); |
36 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | ||
35 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); | 37 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); |
36 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); | 38 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); |
37 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); | 39 | #else |
38 | 40 | static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) | |
41 | { | ||
42 | return 0; | ||
43 | } | ||
44 | static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {} | ||
45 | #endif | ||
39 | 46 | ||
40 | #endif /* BRCMFMAC_MSGBUF_H */ | 47 | #endif /* BRCMFMAC_MSGBUF_H */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 02fe706fc9ec..16a246bfc343 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) | |||
497 | static void | 497 | static void |
498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) | 498 | brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) |
499 | { | 499 | { |
500 | struct net_device *ndev = wdev->netdev; | 500 | struct brcmf_cfg80211_vif *vif; |
501 | struct brcmf_if *ifp = netdev_priv(ndev); | 501 | struct brcmf_if *ifp; |
502 | |||
503 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | ||
504 | ifp = vif->ifp; | ||
502 | 505 | ||
503 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || | 506 | if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || |
504 | (wdev->iftype == NL80211_IFTYPE_AP) || | 507 | (wdev->iftype == NL80211_IFTYPE_AP) || |
@@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg, | |||
4918 | struct brcmu_chan ch; | 4921 | struct brcmu_chan ch; |
4919 | int i; | 4922 | int i; |
4920 | 4923 | ||
4921 | for (i = 0; i <= total; i++) { | 4924 | for (i = 0; i < total; i++) { |
4922 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); | 4925 | ch.chspec = (u16)le32_to_cpu(chlist->element[i]); |
4923 | cfg->d11inf.decchspec(&ch); | 4926 | cfg->d11inf.decchspec(&ch); |
4924 | 4927 | ||
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
5143 | 5146 | ||
5144 | ch.band = BRCMU_CHAN_BAND_2G; | 5147 | ch.band = BRCMU_CHAN_BAND_2G; |
5145 | ch.bw = BRCMU_CHAN_BW_40; | 5148 | ch.bw = BRCMU_CHAN_BW_40; |
5149 | ch.sb = BRCMU_CHAN_SB_NONE; | ||
5146 | ch.chnum = 0; | 5150 | ch.chnum = 0; |
5147 | cfg->d11inf.encchspec(&ch); | 5151 | cfg->d11inf.encchspec(&ch); |
5148 | 5152 | ||
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) | |||
5176 | 5180 | ||
5177 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); | 5181 | brcmf_update_bw40_channel_flag(&band->channels[j], &ch); |
5178 | } | 5182 | } |
5183 | kfree(pbuf); | ||
5179 | } | 5184 | } |
5180 | return err; | 5185 | return err; |
5181 | } | 5186 | } |
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 6451d2b6abcf..824f5e287783 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig | |||
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS | |||
51 | 51 | ||
52 | config IWLDVM | 52 | config IWLDVM |
53 | tristate "Intel Wireless WiFi DVM Firmware support" | 53 | tristate "Intel Wireless WiFi DVM Firmware support" |
54 | depends on m | ||
55 | default IWLWIFI | 54 | default IWLWIFI |
56 | help | 55 | help |
57 | This is the driver that supports the DVM firmware which is | 56 | This is the driver that supports the DVM firmware which is |
@@ -60,7 +59,6 @@ config IWLDVM | |||
60 | 59 | ||
61 | config IWLMVM | 60 | config IWLMVM |
62 | tristate "Intel Wireless WiFi MVM Firmware support" | 61 | tristate "Intel Wireless WiFi MVM Firmware support" |
63 | depends on m | ||
64 | help | 62 | help |
65 | This is the driver that supports the MVM firmware which is | 63 | This is the driver that supports the MVM firmware which is |
66 | currently only available for 7260 and 3160 devices. | 64 | currently only available for 7260 and 3160 devices. |
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 760c45c34ef3..1513dbc79c14 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "commands.h" | 40 | #include "commands.h" |
41 | #include "power.h" | 41 | #include "power.h" |
42 | 42 | ||
43 | static bool force_cam; | 43 | static bool force_cam = true; |
44 | module_param(force_cam, bool, 0644); | 44 | module_param(force_cam, bool, 0644); |
45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); | 45 | MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); |
46 | 46 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 6dc5dd3ced44..ed50de6362ed 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | |||
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) | |||
1068 | /* recalculate basic rates */ | 1068 | /* recalculate basic rates */ |
1069 | iwl_calc_basic_rates(priv, ctx); | 1069 | iwl_calc_basic_rates(priv, ctx); |
1070 | 1070 | ||
1071 | /* | ||
1072 | * force CTS-to-self frames protection if RTS-CTS is not preferred | ||
1073 | * one aggregation protection method | ||
1074 | */ | ||
1075 | if (!priv->hw_params.use_rts_for_aggregation) | ||
1076 | ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; | ||
1077 | |||
1071 | if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || | 1078 | if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || |
1072 | !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) | 1079 | !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) |
1073 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; | 1080 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; |
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1473 | else | 1480 | else |
1474 | ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; | 1481 | ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; |
1475 | 1482 | ||
1483 | if (bss_conf->use_cts_prot) | ||
1484 | ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; | ||
1485 | else | ||
1486 | ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; | ||
1487 | |||
1476 | memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); | 1488 | memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); |
1477 | 1489 | ||
1478 | if (vif->type == NL80211_IFTYPE_AP || | 1490 | if (vif->type == NL80211_IFTYPE_AP || |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 48730064da73..d53adc245497 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -67,8 +67,8 @@ | |||
67 | #include "iwl-agn-hw.h" | 67 | #include "iwl-agn-hw.h" |
68 | 68 | ||
69 | /* Highest firmware API version supported */ | 69 | /* Highest firmware API version supported */ |
70 | #define IWL7260_UCODE_API_MAX 9 | 70 | #define IWL7260_UCODE_API_MAX 10 |
71 | #define IWL3160_UCODE_API_MAX 9 | 71 | #define IWL3160_UCODE_API_MAX 10 |
72 | 72 | ||
73 | /* Oldest version we won't warn about */ | 73 | /* Oldest version we won't warn about */ |
74 | #define IWL7260_UCODE_API_OK 9 | 74 | #define IWL7260_UCODE_API_OK 9 |
@@ -83,6 +83,8 @@ | |||
83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ | 83 | #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ |
84 | #define IWL3160_NVM_VERSION 0x709 | 84 | #define IWL3160_NVM_VERSION 0x709 |
85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ | 85 | #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ |
86 | #define IWL3165_NVM_VERSION 0x709 | ||
87 | #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ | ||
86 | #define IWL7265_NVM_VERSION 0x0a1d | 88 | #define IWL7265_NVM_VERSION 0x0a1d |
87 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ | 89 | #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ |
88 | 90 | ||
@@ -92,6 +94,9 @@ | |||
92 | #define IWL3160_FW_PRE "iwlwifi-3160-" | 94 | #define IWL3160_FW_PRE "iwlwifi-3160-" |
93 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" | 95 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" |
94 | 96 | ||
97 | #define IWL3165_FW_PRE "iwlwifi-3165-" | ||
98 | #define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" | ||
99 | |||
95 | #define IWL7265_FW_PRE "iwlwifi-7265-" | 100 | #define IWL7265_FW_PRE "iwlwifi-7265-" |
96 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 101 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" |
97 | 102 | ||
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { | |||
213 | {0}, | 218 | {0}, |
214 | }; | 219 | }; |
215 | 220 | ||
221 | const struct iwl_cfg iwl3165_2ac_cfg = { | ||
222 | .name = "Intel(R) Dual Band Wireless AC 3165", | ||
223 | .fw_name_pre = IWL3165_FW_PRE, | ||
224 | IWL_DEVICE_7000, | ||
225 | .ht_params = &iwl7000_ht_params, | ||
226 | .nvm_ver = IWL3165_NVM_VERSION, | ||
227 | .nvm_calib_ver = IWL3165_TX_POWER_VERSION, | ||
228 | .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, | ||
229 | }; | ||
230 | |||
216 | const struct iwl_cfg iwl7265_2ac_cfg = { | 231 | const struct iwl_cfg iwl7265_2ac_cfg = { |
217 | .name = "Intel(R) Dual Band Wireless AC 7265", | 232 | .name = "Intel(R) Dual Band Wireless AC 7265", |
218 | .fw_name_pre = IWL7265_FW_PRE, | 233 | .fw_name_pre = IWL7265_FW_PRE, |
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = { | |||
245 | 260 | ||
246 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 261 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
247 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | 262 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); |
263 | MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | ||
248 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 264 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 44b19e015102..e93c6972290b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c | |||
@@ -67,7 +67,7 @@ | |||
67 | #include "iwl-agn-hw.h" | 67 | #include "iwl-agn-hw.h" |
68 | 68 | ||
69 | /* Highest firmware API version supported */ | 69 | /* Highest firmware API version supported */ |
70 | #define IWL8000_UCODE_API_MAX 9 | 70 | #define IWL8000_UCODE_API_MAX 10 |
71 | 71 | ||
72 | /* Oldest version we won't warn about */ | 72 | /* Oldest version we won't warn about */ |
73 | #define IWL8000_UCODE_API_OK 8 | 73 | #define IWL8000_UCODE_API_OK 8 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 8da596db9abe..3d7cc37420ae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
@@ -120,6 +120,8 @@ enum iwl_led_mode { | |||
120 | #define IWL_LONG_WD_TIMEOUT 10000 | 120 | #define IWL_LONG_WD_TIMEOUT 10000 |
121 | #define IWL_MAX_WD_TIMEOUT 120000 | 121 | #define IWL_MAX_WD_TIMEOUT 120000 |
122 | 122 | ||
123 | #define IWL_DEFAULT_MAX_TX_POWER 22 | ||
124 | |||
123 | /* Antenna presence definitions */ | 125 | /* Antenna presence definitions */ |
124 | #define ANT_NONE 0x0 | 126 | #define ANT_NONE 0x0 |
125 | #define ANT_A BIT(0) | 127 | #define ANT_A BIT(0) |
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg; | |||
335 | extern const struct iwl_cfg iwl3160_2ac_cfg; | 337 | extern const struct iwl_cfg iwl3160_2ac_cfg; |
336 | extern const struct iwl_cfg iwl3160_2n_cfg; | 338 | extern const struct iwl_cfg iwl3160_2n_cfg; |
337 | extern const struct iwl_cfg iwl3160_n_cfg; | 339 | extern const struct iwl_cfg iwl3160_n_cfg; |
340 | extern const struct iwl_cfg iwl3165_2ac_cfg; | ||
338 | extern const struct iwl_cfg iwl7265_2ac_cfg; | 341 | extern const struct iwl_cfg iwl7265_2ac_cfg; |
339 | extern const struct iwl_cfg iwl7265_2n_cfg; | 342 | extern const struct iwl_cfg iwl7265_2n_cfg; |
340 | extern const struct iwl_cfg iwl7265_n_cfg; | 343 | extern const struct iwl_cfg iwl7265_n_cfg; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 018af2957d3b..354255f08754 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = { | |||
146 | #define LAST_2GHZ_HT_PLUS 9 | 146 | #define LAST_2GHZ_HT_PLUS 9 |
147 | #define LAST_5GHZ_HT 161 | 147 | #define LAST_5GHZ_HT 161 |
148 | 148 | ||
149 | #define DEFAULT_MAX_TX_POWER 16 | ||
150 | |||
151 | /* rate data (static) */ | 149 | /* rate data (static) */ |
152 | static struct ieee80211_rate iwl_cfg80211_rates[] = { | 150 | static struct ieee80211_rate iwl_cfg80211_rates[] = { |
153 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, | 151 | { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, |
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
295 | * Default value - highest tx power value. max_power | 293 | * Default value - highest tx power value. max_power |
296 | * is not used in mvm, and is used for backwards compatibility | 294 | * is not used in mvm, and is used for backwards compatibility |
297 | */ | 295 | */ |
298 | channel->max_power = DEFAULT_MAX_TX_POWER; | 296 | channel->max_power = IWL_DEFAULT_MAX_TX_POWER; |
299 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; | 297 | is_5ghz = channel->band == IEEE80211_BAND_5GHZ; |
300 | IWL_DEBUG_EEPROM(dev, | 298 | IWL_DEBUG_EEPROM(dev, |
301 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", | 299 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 2291bbcaaeab..ce71625f497f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) | |||
585 | lockdep_assert_held(&mvm->mutex); | 585 | lockdep_assert_held(&mvm->mutex); |
586 | 586 | ||
587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { | 587 | if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { |
588 | u32 mode; | ||
589 | |||
590 | switch (mvm->bt_force_ant_mode) { | 588 | switch (mvm->bt_force_ant_mode) { |
591 | case BT_FORCE_ANT_BT: | 589 | case BT_FORCE_ANT_BT: |
592 | mode = BT_COEX_BT; | 590 | mode = BT_COEX_BT; |
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
756 | struct iwl_bt_iterator_data *data = _data; | 754 | struct iwl_bt_iterator_data *data = _data; |
757 | struct iwl_mvm *mvm = data->mvm; | 755 | struct iwl_mvm *mvm = data->mvm; |
758 | struct ieee80211_chanctx_conf *chanctx_conf; | 756 | struct ieee80211_chanctx_conf *chanctx_conf; |
759 | enum ieee80211_smps_mode smps_mode; | 757 | /* default smps_mode is AUTOMATIC - only used for client modes */ |
758 | enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
760 | u32 bt_activity_grading; | 759 | u32 bt_activity_grading; |
761 | int ave_rssi; | 760 | int ave_rssi; |
762 | 761 | ||
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
764 | 763 | ||
765 | switch (vif->type) { | 764 | switch (vif->type) { |
766 | case NL80211_IFTYPE_STATION: | 765 | case NL80211_IFTYPE_STATION: |
767 | /* default smps_mode for BSS / P2P client is AUTOMATIC */ | ||
768 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | ||
769 | break; | 766 | break; |
770 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
771 | if (!mvmvif->ap_ibss_active) | 768 | if (!mvmvif->ap_ibss_active) |
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
797 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) | 794 | else if (bt_activity_grading >= BT_LOW_TRAFFIC) |
798 | smps_mode = IEEE80211_SMPS_DYNAMIC; | 795 | smps_mode = IEEE80211_SMPS_DYNAMIC; |
799 | 796 | ||
800 | /* relax SMPS contraints for next association */ | 797 | /* relax SMPS constraints for next association */ |
801 | if (!vif->bss_conf.assoc) | 798 | if (!vif->bss_conf.assoc) |
802 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 799 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
803 | 800 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 2e90ff795c13..87e517bffedc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c | |||
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, | |||
74 | 74 | ||
75 | switch (param) { | 75 | switch (param) { |
76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { | 76 | case MVM_DEBUGFS_PM_KEEP_ALIVE: { |
77 | struct ieee80211_hw *hw = mvm->hw; | 77 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
78 | int dtimper = hw->conf.ps_dtim_period ?: 1; | ||
79 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 78 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
80 | 79 | ||
81 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); | 80 | IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 95f5b3274efb..9a922f3bd16b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h | |||
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario { | |||
1563 | 1563 | ||
1564 | /** | 1564 | /** |
1565 | * Smart Fifo configuration command. | 1565 | * Smart Fifo configuration command. |
1566 | * @state: smart fifo state, types listed in iwl_sf_sate. | 1566 | * @state: smart fifo state, types listed in enum %iwl_sf_sate. |
1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. | 1567 | * @watermark: Minimum allowed availabe free space in RXF for transient state. |
1568 | * @long_delay_timeouts: aging and idle timer values for each scenario | 1568 | * @long_delay_timeouts: aging and idle timer values for each scenario |
1569 | * in long delay state. | 1569 | * in long delay state. |
1570 | * @full_on_timeouts: timer values for each scenario in full on state. | 1570 | * @full_on_timeouts: timer values for each scenario in full on state. |
1571 | */ | 1571 | */ |
1572 | struct iwl_sf_cfg_cmd { | 1572 | struct iwl_sf_cfg_cmd { |
1573 | enum iwl_sf_state state; | 1573 | __le32 state; |
1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; | 1574 | __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; |
1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1575 | __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; | 1576 | __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0e523e28cabf..8242e689ddb1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | |||
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
721 | !force_assoc_off) { | 721 | !force_assoc_off) { |
722 | u32 dtim_offs; | 722 | u32 dtim_offs; |
723 | 723 | ||
724 | /* Allow beacons to pass through as long as we are not | ||
725 | * associated, or we do not have dtim period information. | ||
726 | */ | ||
727 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
728 | |||
729 | /* | 724 | /* |
730 | * The DTIM count counts down, so when it is N that means N | 725 | * The DTIM count counts down, so when it is N that means N |
731 | * more beacon intervals happen until the DTIM TBTT. Therefore | 726 | * more beacon intervals happen until the DTIM TBTT. Therefore |
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, | |||
759 | ctxt_sta->is_assoc = cpu_to_le32(1); | 754 | ctxt_sta->is_assoc = cpu_to_le32(1); |
760 | } else { | 755 | } else { |
761 | ctxt_sta->is_assoc = cpu_to_le32(0); | 756 | ctxt_sta->is_assoc = cpu_to_le32(0); |
757 | |||
758 | /* Allow beacons to pass through as long as we are not | ||
759 | * associated, or we do not have dtim period information. | ||
760 | */ | ||
761 | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); | ||
762 | } | 762 | } |
763 | 763 | ||
764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); | 764 | ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7c8796584c25..cdc272d776e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
396 | else | 396 | else |
397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | 397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; |
398 | 398 | ||
399 | /* TODO: enable that only for firmwares that don't crash */ | 399 | if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { |
400 | /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ | 400 | hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; |
401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; | 401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; |
402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; | 402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; |
403 | /* we create the 802.11 header and zero length SSID IE. */ | 403 | /* we create the 802.11 header and zero length SSID IE. */ |
404 | hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | 404 | hw->wiphy->max_sched_scan_ie_len = |
405 | SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; | ||
406 | } | ||
405 | 407 | ||
406 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | | 408 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | |
407 | NL80211_FEATURE_LOW_PRIORITY_SCAN | | 409 | NL80211_FEATURE_LOW_PRIORITY_SCAN | |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
1524 | */ | 1526 | */ |
1525 | iwl_mvm_remove_time_event(mvm, mvmvif, | 1527 | iwl_mvm_remove_time_event(mvm, mvmvif, |
1526 | &mvmvif->time_event_data); | 1528 | &mvmvif->time_event_data); |
1527 | } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | | ||
1528 | BSS_CHANGED_QOS)) { | ||
1529 | ret = iwl_mvm_power_update_mac(mvm); | ||
1530 | if (ret) | ||
1531 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
1532 | } | 1529 | } |
1533 | 1530 | ||
1534 | if (changes & BSS_CHANGED_BEACON_INFO) { | 1531 | if (changes & BSS_CHANGED_BEACON_INFO) { |
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
1536 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); | 1533 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); |
1537 | } | 1534 | } |
1538 | 1535 | ||
1536 | if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { | ||
1537 | ret = iwl_mvm_power_update_mac(mvm); | ||
1538 | if (ret) | ||
1539 | IWL_ERR(mvm, "failed to update power mode\n"); | ||
1540 | } | ||
1541 | |||
1539 | if (changes & BSS_CHANGED_TXPOWER) { | 1542 | if (changes & BSS_CHANGED_TXPOWER) { |
1540 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", | 1543 | IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", |
1541 | bss_conf->txpower); | 1544 | bss_conf->txpower); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2b2d10800a55..d9769a23c68b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c | |||
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
281 | struct ieee80211_vif *vif, | 281 | struct ieee80211_vif *vif, |
282 | struct iwl_mac_power_cmd *cmd) | 282 | struct iwl_mac_power_cmd *cmd) |
283 | { | 283 | { |
284 | struct ieee80211_hw *hw = mvm->hw; | ||
285 | struct ieee80211_chanctx_conf *chanctx_conf; | 284 | struct ieee80211_chanctx_conf *chanctx_conf; |
286 | struct ieee80211_channel *chan; | 285 | struct ieee80211_channel *chan; |
287 | int dtimper, dtimper_msec; | 286 | int dtimper, dtimper_msec; |
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, | |||
292 | 291 | ||
293 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, | 292 | cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, |
294 | mvmvif->color)); | 293 | mvmvif->color)); |
295 | dtimper = hw->conf.ps_dtim_period ?: 1; | 294 | dtimper = vif->bss_conf.dtim_period; |
296 | 295 | ||
297 | /* | 296 | /* |
298 | * Regardless of power management state the driver must set | 297 | * Regardless of power management state the driver must set |
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, | |||
885 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); | 884 | iwl_mvm_power_build_cmd(mvm, vif, &cmd); |
886 | if (enable) { | 885 | if (enable) { |
887 | /* configure skip over dtim up to 300 msec */ | 886 | /* configure skip over dtim up to 300 msec */ |
888 | int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; | 887 | int dtimper = vif->bss_conf.dtim_period ?: 1; |
889 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; | 888 | int dtimper_msec = dtimper * vif->bss_conf.beacon_int; |
890 | 889 | ||
891 | if (WARN_ON(!dtimper_msec)) | 890 | if (WARN_ON(!dtimper_msec)) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4b98987fc413..bf5cd8c8b0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c | |||
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, | |||
149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); | 149 | le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); |
150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> | 150 | energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> |
151 | IWL_RX_INFO_ENERGY_ANT_A_POS; | 151 | IWL_RX_INFO_ENERGY_ANT_A_POS; |
152 | energy_a = energy_a ? -energy_a : -256; | 152 | energy_a = energy_a ? -energy_a : S8_MIN; |
153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> | 153 | energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> |
154 | IWL_RX_INFO_ENERGY_ANT_B_POS; | 154 | IWL_RX_INFO_ENERGY_ANT_B_POS; |
155 | energy_b = energy_b ? -energy_b : -256; | 155 | energy_b = energy_b ? -energy_b : S8_MIN; |
156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> | 156 | energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> |
157 | IWL_RX_INFO_ENERGY_ANT_C_POS; | 157 | IWL_RX_INFO_ENERGY_ANT_C_POS; |
158 | energy_c = energy_c ? -energy_c : -256; | 158 | energy_c = energy_c ? -energy_c : S8_MIN; |
159 | max_energy = max(energy_a, energy_b); | 159 | max_energy = max(energy_a, energy_b); |
160 | max_energy = max(max_energy, energy_c); | 160 | max_energy = max(max_energy, energy_c); |
161 | 161 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 7edfd15efc9d..e843b67f2201 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c | |||
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, | |||
172 | enum iwl_sf_state new_state) | 172 | enum iwl_sf_state new_state) |
173 | { | 173 | { |
174 | struct iwl_sf_cfg_cmd sf_cmd = { | 174 | struct iwl_sf_cfg_cmd sf_cmd = { |
175 | .state = new_state, | 175 | .state = cpu_to_le32(new_state), |
176 | }; | 176 | }; |
177 | struct ieee80211_sta *sta; | 177 | struct ieee80211_sta *sta; |
178 | int ret = 0; | 178 | int ret = 0; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index dbc870713882..9ee410bf6da2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, | |||
168 | 168 | ||
169 | /* | 169 | /* |
170 | * for data packets, rate info comes from the table inside the fw. This | 170 | * for data packets, rate info comes from the table inside the fw. This |
171 | * table is controlled by LINK_QUALITY commands | 171 | * table is controlled by LINK_QUALITY commands. Exclude ctrl port |
172 | * frames like EAPOLs which should be treated as mgmt frames. This | ||
173 | * avoids them being sent initially in high rates which increases the | ||
174 | * chances for completion of the 4-Way handshake. | ||
172 | */ | 175 | */ |
173 | 176 | ||
174 | if (ieee80211_is_data(fc) && sta) { | 177 | if (ieee80211_is_data(fc) && sta && |
178 | !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { | ||
175 | tx_cmd->initial_rate_index = 0; | 179 | tx_cmd->initial_rate_index = 0; |
176 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); | 180 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); |
177 | return; | 181 | return; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f0e722ced080..073a68b97a72 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, | 352 | {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, |
353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, | 353 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, |
354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, | 354 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, |
355 | {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, | ||
356 | {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, | ||
355 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, | 357 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, |
356 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, | 358 | {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, |
357 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, | 359 | {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, |
358 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, |
359 | 361 | ||
362 | /* 3165 Series */ | ||
363 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | ||
364 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
365 | |||
360 | /* 7265 Series */ | 366 | /* 7265 Series */ |
361 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 367 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
362 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
378 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, | 384 | {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, |
379 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, | 385 | {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, |
380 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, | 386 | {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, |
387 | {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, | ||
381 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, | 388 | {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, |
382 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, | 389 | {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, |
383 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, | 390 | {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c index 33da3dfcfa4f..d4bd550f505c 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c | |||
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter) | |||
101 | 101 | ||
102 | bool is_legacy = false; | 102 | bool is_legacy = false; |
103 | 103 | ||
104 | if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) | 104 | if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G)) |
105 | is_legacy = true; | 105 | is_legacy = true; |
106 | 106 | ||
107 | return is_legacy; | 107 | return is_legacy; |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 361435f8608a..1ac6383e7947 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
317 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ | 317 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ |
318 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ | 318 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
319 | {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ | 319 | {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
320 | {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ | ||
320 | {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ | 321 | {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ |
321 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ | 322 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ |
322 | {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ | 323 | {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e29e15dca86e..f379689dde30 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
576 | init_waitqueue_head(&queue->dealloc_wq); | 576 | init_waitqueue_head(&queue->dealloc_wq); |
577 | atomic_set(&queue->inflight_packets, 0); | 577 | atomic_set(&queue->inflight_packets, 0); |
578 | 578 | ||
579 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
580 | XENVIF_NAPI_WEIGHT); | ||
581 | |||
579 | if (tx_evtchn == rx_evtchn) { | 582 | if (tx_evtchn == rx_evtchn) { |
580 | /* feature-split-event-channels == 0 */ | 583 | /* feature-split-event-channels == 0 */ |
581 | err = bind_interdomain_evtchn_to_irqhandler( | 584 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
629 | wake_up_process(queue->task); | 632 | wake_up_process(queue->task); |
630 | wake_up_process(queue->dealloc_task); | 633 | wake_up_process(queue->dealloc_task); |
631 | 634 | ||
632 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
633 | XENVIF_NAPI_WEIGHT); | ||
634 | |||
635 | return 0; | 635 | return 0; |
636 | 636 | ||
637 | err_rx_unbind: | 637 | err_rx_unbind: |