diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/3com/3c59x.c | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/arc/emac_main.c | 53 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/b44.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.c | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 143 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/davicom/dm9000.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/qp.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 38 | ||||
-rw-r--r-- | drivers/net/ethernet/octeon/octeon_mgmt.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/realtek/r8169.c | 67 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/farch.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunvnet.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/cpsw.c | 52 |
18 files changed, 373 insertions, 185 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2b92d712f212..86e621142d5b 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2128,6 +2128,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2128 | int entry = vp->cur_tx % TX_RING_SIZE; | 2128 | int entry = vp->cur_tx % TX_RING_SIZE; |
2129 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | 2129 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; |
2130 | unsigned long flags; | 2130 | unsigned long flags; |
2131 | dma_addr_t dma_addr; | ||
2131 | 2132 | ||
2132 | if (vortex_debug > 6) { | 2133 | if (vortex_debug > 6) { |
2133 | pr_debug("boomerang_start_xmit()\n"); | 2134 | pr_debug("boomerang_start_xmit()\n"); |
@@ -2162,24 +2163,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2162 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
2163 | 2164 | ||
2164 | if (!skb_shinfo(skb)->nr_frags) { | 2165 | if (!skb_shinfo(skb)->nr_frags) { |
2165 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2166 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, |
2166 | skb->len, PCI_DMA_TODEVICE)); | 2167 | PCI_DMA_TODEVICE); |
2168 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2169 | goto out_dma_err; | ||
2170 | |||
2171 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2167 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | 2172 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); |
2168 | } else { | 2173 | } else { |
2169 | int i; | 2174 | int i; |
2170 | 2175 | ||
2171 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2176 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, |
2172 | skb_headlen(skb), PCI_DMA_TODEVICE)); | 2177 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2178 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2179 | goto out_dma_err; | ||
2180 | |||
2181 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2173 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | 2182 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); |
2174 | 2183 | ||
2175 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2184 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2176 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2185 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2177 | 2186 | ||
2187 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | ||
2188 | 0, | ||
2189 | frag->size, | ||
2190 | DMA_TO_DEVICE); | ||
2191 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | ||
2192 | for(i = i-1; i >= 0; i--) | ||
2193 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | ||
2194 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | ||
2195 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | ||
2196 | DMA_TO_DEVICE); | ||
2197 | |||
2198 | pci_unmap_single(VORTEX_PCI(vp), | ||
2199 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | ||
2200 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | ||
2201 | PCI_DMA_TODEVICE); | ||
2202 | |||
2203 | goto out_dma_err; | ||
2204 | } | ||
2205 | |||
2178 | vp->tx_ring[entry].frag[i+1].addr = | 2206 | vp->tx_ring[entry].frag[i+1].addr = |
2179 | cpu_to_le32(skb_frag_dma_map( | 2207 | cpu_to_le32(dma_addr); |
2180 | &VORTEX_PCI(vp)->dev, | ||
2181 | frag, | ||
2182 | frag->page_offset, frag->size, DMA_TO_DEVICE)); | ||
2183 | 2208 | ||
2184 | if (i == skb_shinfo(skb)->nr_frags-1) | 2209 | if (i == skb_shinfo(skb)->nr_frags-1) |
2185 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2210 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
@@ -2188,7 +2213,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2188 | } | 2213 | } |
2189 | } | 2214 | } |
2190 | #else | 2215 | #else |
2191 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2216 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); |
2217 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2218 | goto out_dma_err; | ||
2219 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | ||
2192 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2220 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
2193 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | 2221 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); |
2194 | #endif | 2222 | #endif |
@@ -2216,7 +2244,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2216 | skb_tx_timestamp(skb); | 2244 | skb_tx_timestamp(skb); |
2217 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | 2245 | iowrite16(DownUnstall, ioaddr + EL3_CMD); |
2218 | spin_unlock_irqrestore(&vp->lock, flags); | 2246 | spin_unlock_irqrestore(&vp->lock, flags); |
2247 | out: | ||
2219 | return NETDEV_TX_OK; | 2248 | return NETDEV_TX_OK; |
2249 | out_dma_err: | ||
2250 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | ||
2251 | goto out; | ||
2220 | } | 2252 | } |
2221 | 2253 | ||
2222 | /* The interrupt handler does all of the Rx thread work and cleans up | 2254 | /* The interrupt handler does all of the Rx thread work and cleans up |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index dbea8472bfb4..abe1eabc0171 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -28,6 +28,17 @@ | |||
28 | 28 | ||
29 | 29 | ||
30 | /** | 30 | /** |
31 | * arc_emac_tx_avail - Return the number of available slots in the tx ring. | ||
32 | * @priv: Pointer to ARC EMAC private data structure. | ||
33 | * | ||
34 | * returns: the number of slots available for transmission in tx the ring. | ||
35 | */ | ||
36 | static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) | ||
37 | { | ||
38 | return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; | ||
39 | } | ||
40 | |||
41 | /** | ||
31 | * arc_emac_adjust_link - Adjust the PHY link duplex. | 42 | * arc_emac_adjust_link - Adjust the PHY link duplex. |
32 | * @ndev: Pointer to the net_device structure. | 43 | * @ndev: Pointer to the net_device structure. |
33 | * | 44 | * |
@@ -182,10 +193,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) | |||
182 | txbd->info = 0; | 193 | txbd->info = 0; |
183 | 194 | ||
184 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; | 195 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
185 | |||
186 | if (netif_queue_stopped(ndev)) | ||
187 | netif_wake_queue(ndev); | ||
188 | } | 196 | } |
197 | |||
198 | /* Ensure that txbd_dirty is visible to tx() before checking | ||
199 | * for queue stopped. | ||
200 | */ | ||
201 | smp_mb(); | ||
202 | |||
203 | if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) | ||
204 | netif_wake_queue(ndev); | ||
189 | } | 205 | } |
190 | 206 | ||
191 | /** | 207 | /** |
@@ -300,7 +316,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) | |||
300 | work_done = arc_emac_rx(ndev, budget); | 316 | work_done = arc_emac_rx(ndev, budget); |
301 | if (work_done < budget) { | 317 | if (work_done < budget) { |
302 | napi_complete(napi); | 318 | napi_complete(napi); |
303 | arc_reg_or(priv, R_ENABLE, RXINT_MASK); | 319 | arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
304 | } | 320 | } |
305 | 321 | ||
306 | return work_done; | 322 | return work_done; |
@@ -329,9 +345,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) | |||
329 | /* Reset all flags except "MDIO complete" */ | 345 | /* Reset all flags except "MDIO complete" */ |
330 | arc_reg_set(priv, R_STATUS, status); | 346 | arc_reg_set(priv, R_STATUS, status); |
331 | 347 | ||
332 | if (status & RXINT_MASK) { | 348 | if (status & (RXINT_MASK | TXINT_MASK)) { |
333 | if (likely(napi_schedule_prep(&priv->napi))) { | 349 | if (likely(napi_schedule_prep(&priv->napi))) { |
334 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK); | 350 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); |
335 | __napi_schedule(&priv->napi); | 351 | __napi_schedule(&priv->napi); |
336 | } | 352 | } |
337 | } | 353 | } |
@@ -442,7 +458,7 @@ static int arc_emac_open(struct net_device *ndev) | |||
442 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); | 458 | arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); |
443 | 459 | ||
444 | /* Enable interrupts */ | 460 | /* Enable interrupts */ |
445 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 461 | arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
446 | 462 | ||
447 | /* Set CONTROL */ | 463 | /* Set CONTROL */ |
448 | arc_reg_set(priv, R_CTRL, | 464 | arc_reg_set(priv, R_CTRL, |
@@ -513,7 +529,7 @@ static int arc_emac_stop(struct net_device *ndev) | |||
513 | netif_stop_queue(ndev); | 529 | netif_stop_queue(ndev); |
514 | 530 | ||
515 | /* Disable interrupts */ | 531 | /* Disable interrupts */ |
516 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); | 532 | arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
517 | 533 | ||
518 | /* Disable EMAC */ | 534 | /* Disable EMAC */ |
519 | arc_reg_clr(priv, R_CTRL, EN_MASK); | 535 | arc_reg_clr(priv, R_CTRL, EN_MASK); |
@@ -576,11 +592,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
576 | 592 | ||
577 | len = max_t(unsigned int, ETH_ZLEN, skb->len); | 593 | len = max_t(unsigned int, ETH_ZLEN, skb->len); |
578 | 594 | ||
579 | /* EMAC still holds this buffer in its possession. | 595 | if (unlikely(!arc_emac_tx_avail(priv))) { |
580 | * CPU must not modify this buffer descriptor | ||
581 | */ | ||
582 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { | ||
583 | netif_stop_queue(ndev); | 596 | netif_stop_queue(ndev); |
597 | netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); | ||
584 | return NETDEV_TX_BUSY; | 598 | return NETDEV_TX_BUSY; |
585 | } | 599 | } |
586 | 600 | ||
@@ -609,12 +623,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
609 | /* Increment index to point to the next BD */ | 623 | /* Increment index to point to the next BD */ |
610 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; | 624 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; |
611 | 625 | ||
612 | /* Get "info" of the next BD */ | 626 | /* Ensure that tx_clean() sees the new txbd_curr before |
613 | info = &priv->txbd[*txbd_curr].info; | 627 | * checking the queue status. This prevents an unneeded wake |
628 | * of the queue in tx_clean(). | ||
629 | */ | ||
630 | smp_mb(); | ||
614 | 631 | ||
615 | /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ | 632 | if (!arc_emac_tx_avail(priv)) { |
616 | if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) | ||
617 | netif_stop_queue(ndev); | 633 | netif_stop_queue(ndev); |
634 | /* Refresh tx_dirty */ | ||
635 | smp_mb(); | ||
636 | if (arc_emac_tx_avail(priv)) | ||
637 | netif_start_queue(ndev); | ||
638 | } | ||
618 | 639 | ||
619 | arc_reg_set(priv, R_STATUS, TXPL_MASK); | 640 | arc_reg_set(priv, R_STATUS, TXPL_MASK); |
620 | 641 | ||
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 56fadbd4258a..416620fa8fac 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, | |||
1697 | hwstat->tx_underruns + | 1697 | hwstat->tx_underruns + |
1698 | hwstat->tx_excessive_cols + | 1698 | hwstat->tx_excessive_cols + |
1699 | hwstat->tx_late_cols); | 1699 | hwstat->tx_late_cols); |
1700 | nstat->multicast = hwstat->tx_multicast_pkts; | 1700 | nstat->multicast = hwstat->rx_multicast_pkts; |
1701 | nstat->collisions = hwstat->tx_total_cols; | 1701 | nstat->collisions = hwstat->tx_total_cols; |
1702 | 1702 | ||
1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 1703 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 662cf2222873..77f1ff7396ac 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -543,6 +543,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
543 | while ((processed < to_process) && (processed < budget)) { | 543 | while ((processed < to_process) && (processed < budget)) { |
544 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 544 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
545 | skb = cb->skb; | 545 | skb = cb->skb; |
546 | |||
547 | processed++; | ||
548 | priv->rx_read_ptr++; | ||
549 | |||
550 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
551 | priv->rx_read_ptr = 0; | ||
552 | |||
553 | /* We do not have a backing SKB, so we do not a corresponding | ||
554 | * DMA mapping for this incoming packet since | ||
555 | * bcm_sysport_rx_refill always either has both skb and mapping | ||
556 | * or none. | ||
557 | */ | ||
558 | if (unlikely(!skb)) { | ||
559 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
560 | ndev->stats.rx_dropped++; | ||
561 | ndev->stats.rx_errors++; | ||
562 | goto refill; | ||
563 | } | ||
564 | |||
546 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 565 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
547 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 566 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
548 | 567 | ||
@@ -552,23 +571,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
552 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & | 571 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & |
553 | DESC_STATUS_MASK; | 572 | DESC_STATUS_MASK; |
554 | 573 | ||
555 | processed++; | ||
556 | priv->rx_read_ptr++; | ||
557 | if (priv->rx_read_ptr == priv->num_rx_bds) | ||
558 | priv->rx_read_ptr = 0; | ||
559 | |||
560 | netif_dbg(priv, rx_status, ndev, | 574 | netif_dbg(priv, rx_status, ndev, |
561 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", | 575 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", |
562 | p_index, priv->rx_c_index, priv->rx_read_ptr, | 576 | p_index, priv->rx_c_index, priv->rx_read_ptr, |
563 | len, status); | 577 | len, status); |
564 | 578 | ||
565 | if (unlikely(!skb)) { | ||
566 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | ||
567 | ndev->stats.rx_dropped++; | ||
568 | ndev->stats.rx_errors++; | ||
569 | goto refill; | ||
570 | } | ||
571 | |||
572 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { | 579 | if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { |
573 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); | 580 | netif_err(priv, rx_status, ndev, "fragmented packet!\n"); |
574 | ndev->stats.rx_dropped++; | 581 | ndev->stats.rx_dropped++; |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index a12c65604f9d..e8c601d26c64 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
875 | int last_tx_cn, last_c_index, num_tx_bds; | 875 | int last_tx_cn, last_c_index, num_tx_bds; |
876 | struct enet_cb *tx_cb_ptr; | 876 | struct enet_cb *tx_cb_ptr; |
877 | struct netdev_queue *txq; | 877 | struct netdev_queue *txq; |
878 | unsigned int bds_compl; | ||
878 | unsigned int c_index; | 879 | unsigned int c_index; |
879 | 880 | ||
880 | /* Compute how many buffers are transmitted since last xmit call */ | 881 | /* Compute how many buffers are transmitted since last xmit call */ |
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
899 | /* Reclaim transmitted buffers */ | 900 | /* Reclaim transmitted buffers */ |
900 | while (last_tx_cn-- > 0) { | 901 | while (last_tx_cn-- > 0) { |
901 | tx_cb_ptr = ring->cbs + last_c_index; | 902 | tx_cb_ptr = ring->cbs + last_c_index; |
903 | bds_compl = 0; | ||
902 | if (tx_cb_ptr->skb) { | 904 | if (tx_cb_ptr->skb) { |
905 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | ||
903 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 906 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
904 | dma_unmap_single(&dev->dev, | 907 | dma_unmap_single(&dev->dev, |
905 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 908 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
916 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | 919 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
917 | } | 920 | } |
918 | dev->stats.tx_packets++; | 921 | dev->stats.tx_packets++; |
919 | ring->free_bds += 1; | 922 | ring->free_bds += bds_compl; |
920 | 923 | ||
921 | last_c_index++; | 924 | last_c_index++; |
922 | last_c_index &= (num_tx_bds - 1); | 925 | last_c_index &= (num_tx_bds - 1); |
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1274 | 1277 | ||
1275 | while ((rxpktprocessed < rxpkttoprocess) && | 1278 | while ((rxpktprocessed < rxpkttoprocess) && |
1276 | (rxpktprocessed < budget)) { | 1279 | (rxpktprocessed < budget)) { |
1280 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1281 | skb = cb->skb; | ||
1282 | |||
1283 | rxpktprocessed++; | ||
1284 | |||
1285 | priv->rx_read_ptr++; | ||
1286 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1287 | |||
1288 | /* We do not have a backing SKB, so we do not have a | ||
1289 | * corresponding DMA mapping for this incoming packet since | ||
1290 | * bcmgenet_rx_refill always either has both skb and mapping or | ||
1291 | * none. | ||
1292 | */ | ||
1293 | if (unlikely(!skb)) { | ||
1294 | dev->stats.rx_dropped++; | ||
1295 | dev->stats.rx_errors++; | ||
1296 | goto refill; | ||
1297 | } | ||
1298 | |||
1277 | /* Unmap the packet contents such that we can use the | 1299 | /* Unmap the packet contents such that we can use the |
1278 | * RSV from the 64 bytes descriptor when enabled and save | 1300 | * RSV from the 64 bytes descriptor when enabled and save |
1279 | * a 32-bits register read | 1301 | * a 32-bits register read |
1280 | */ | 1302 | */ |
1281 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1282 | skb = cb->skb; | ||
1283 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | 1303 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), |
1284 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1304 | priv->rx_buf_len, DMA_FROM_DEVICE); |
1285 | 1305 | ||
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
1307 | __func__, p_index, priv->rx_c_index, | 1327 | __func__, p_index, priv->rx_c_index, |
1308 | priv->rx_read_ptr, dma_length_status); | 1328 | priv->rx_read_ptr, dma_length_status); |
1309 | 1329 | ||
1310 | rxpktprocessed++; | ||
1311 | |||
1312 | priv->rx_read_ptr++; | ||
1313 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1314 | |||
1315 | /* out of memory, just drop packets at the hardware level */ | ||
1316 | if (unlikely(!skb)) { | ||
1317 | dev->stats.rx_dropped++; | ||
1318 | dev->stats.rx_errors++; | ||
1319 | goto refill; | ||
1320 | } | ||
1321 | |||
1322 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | 1330 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1323 | netif_err(priv, rx_status, dev, | 1331 | netif_err(priv, rx_status, dev, |
1324 | "dropping fragmented packet!\n"); | 1332 | "dropping fragmented packet!\n"); |
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) | |||
1736 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 1744 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); |
1737 | } | 1745 | } |
1738 | 1746 | ||
1747 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
1748 | { | ||
1749 | int ret = 0; | ||
1750 | int timeout = 0; | ||
1751 | u32 reg; | ||
1752 | |||
1753 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
1754 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1755 | reg &= ~DMA_EN; | ||
1756 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1757 | |||
1758 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
1759 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1760 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
1761 | if (reg & DMA_DISABLED) | ||
1762 | break; | ||
1763 | |||
1764 | udelay(1); | ||
1765 | } | ||
1766 | |||
1767 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1768 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
1769 | ret = -ETIMEDOUT; | ||
1770 | } | ||
1771 | |||
1772 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
1773 | usleep_range(10000, 20000); | ||
1774 | |||
1775 | /* Disable RDMA */ | ||
1776 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
1777 | reg &= ~DMA_EN; | ||
1778 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
1779 | |||
1780 | timeout = 0; | ||
1781 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
1782 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
1783 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
1784 | if (reg & DMA_DISABLED) | ||
1785 | break; | ||
1786 | |||
1787 | udelay(1); | ||
1788 | } | ||
1789 | |||
1790 | if (timeout == DMA_TIMEOUT_VAL) { | ||
1791 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
1792 | ret = -ETIMEDOUT; | ||
1793 | } | ||
1794 | |||
1795 | return ret; | ||
1796 | } | ||
1797 | |||
1739 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1798 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1740 | { | 1799 | { |
1741 | int i; | 1800 | int i; |
1742 | 1801 | ||
1743 | /* disable DMA */ | 1802 | /* disable DMA */ |
1744 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | 1803 | bcmgenet_dma_teardown(priv); |
1745 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | ||
1746 | 1804 | ||
1747 | for (i = 0; i < priv->num_tx_bds; i++) { | 1805 | for (i = 0; i < priv->num_tx_bds; i++) { |
1748 | if (priv->tx_cbs[i].skb != NULL) { | 1806 | if (priv->tx_cbs[i].skb != NULL) { |
@@ -2101,57 +2159,6 @@ err_clk_disable: | |||
2101 | return ret; | 2159 | return ret; |
2102 | } | 2160 | } |
2103 | 2161 | ||
2104 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
2105 | { | ||
2106 | int ret = 0; | ||
2107 | int timeout = 0; | ||
2108 | u32 reg; | ||
2109 | |||
2110 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
2111 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
2112 | reg &= ~DMA_EN; | ||
2113 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
2114 | |||
2115 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
2116 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2117 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
2118 | if (reg & DMA_DISABLED) | ||
2119 | break; | ||
2120 | |||
2121 | udelay(1); | ||
2122 | } | ||
2123 | |||
2124 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2125 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | ||
2126 | ret = -ETIMEDOUT; | ||
2127 | } | ||
2128 | |||
2129 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
2130 | usleep_range(10000, 20000); | ||
2131 | |||
2132 | /* Disable RDMA */ | ||
2133 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
2134 | reg &= ~DMA_EN; | ||
2135 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
2136 | |||
2137 | timeout = 0; | ||
2138 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
2139 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2140 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
2141 | if (reg & DMA_DISABLED) | ||
2142 | break; | ||
2143 | |||
2144 | udelay(1); | ||
2145 | } | ||
2146 | |||
2147 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2148 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | ||
2149 | ret = -ETIMEDOUT; | ||
2150 | } | ||
2151 | |||
2152 | return ret; | ||
2153 | } | ||
2154 | |||
2155 | static void bcmgenet_netif_stop(struct net_device *dev) | 2162 | static void bcmgenet_netif_stop(struct net_device *dev) |
2156 | { | 2163 | { |
2157 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2164 | struct bcmgenet_priv *priv = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index cb77ae93d89a..e7d3a620d96a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7914 | 7914 | ||
7915 | entry = tnapi->tx_prod; | 7915 | entry = tnapi->tx_prod; |
7916 | base_flags = 0; | 7916 | base_flags = 0; |
7917 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
7918 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7919 | 7917 | ||
7920 | mss = skb_shinfo(skb)->gso_size; | 7918 | mss = skb_shinfo(skb)->gso_size; |
7921 | if (mss) { | 7919 | if (mss) { |
@@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7929 | 7927 | ||
7930 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; | 7928 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
7931 | 7929 | ||
7930 | /* HW/FW can not correctly segment packets that have been | ||
7931 | * vlan encapsulated. | ||
7932 | */ | ||
7933 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7934 | skb->protocol == htons(ETH_P_8021AD)) | ||
7935 | return tg3_tso_bug(tp, tnapi, txq, skb); | ||
7936 | |||
7932 | if (!skb_is_gso_v6(skb)) { | 7937 | if (!skb_is_gso_v6(skb)) { |
7933 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7938 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
7934 | tg3_flag(tp, TSO_BUG)) | 7939 | tg3_flag(tp, TSO_BUG)) |
@@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
7979 | base_flags |= tsflags << 12; | 7984 | base_flags |= tsflags << 12; |
7980 | } | 7985 | } |
7981 | } | 7986 | } |
7987 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
7988 | /* HW/FW can not correctly checksum packets that have been | ||
7989 | * vlan encapsulated. | ||
7990 | */ | ||
7991 | if (skb->protocol == htons(ETH_P_8021Q) || | ||
7992 | skb->protocol == htons(ETH_P_8021AD)) { | ||
7993 | if (skb_checksum_help(skb)) | ||
7994 | goto drop; | ||
7995 | } else { | ||
7996 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
7997 | } | ||
7982 | } | 7998 | } |
7983 | 7999 | ||
7984 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && | 8000 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f56b95af3fcf..eeec49540233 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6478 | struct port_info *pi; | 6478 | struct port_info *pi; |
6479 | bool highdma = false; | 6479 | bool highdma = false; |
6480 | struct adapter *adapter = NULL; | 6480 | struct adapter *adapter = NULL; |
6481 | void __iomem *regs; | ||
6481 | 6482 | ||
6482 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | 6483 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); |
6483 | 6484 | ||
@@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6494 | goto out_release_regions; | 6495 | goto out_release_regions; |
6495 | } | 6496 | } |
6496 | 6497 | ||
6498 | regs = pci_ioremap_bar(pdev, 0); | ||
6499 | if (!regs) { | ||
6500 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6501 | err = -ENOMEM; | ||
6502 | goto out_disable_device; | ||
6503 | } | ||
6504 | |||
6505 | /* We control everything through one PF */ | ||
6506 | func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); | ||
6507 | if (func != ent->driver_data) { | ||
6508 | iounmap(regs); | ||
6509 | pci_disable_device(pdev); | ||
6510 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6511 | goto sriov; | ||
6512 | } | ||
6513 | |||
6497 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 6514 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
6498 | highdma = true; | 6515 | highdma = true; |
6499 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 6516 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
6500 | if (err) { | 6517 | if (err) { |
6501 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | 6518 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " |
6502 | "coherent allocations\n"); | 6519 | "coherent allocations\n"); |
6503 | goto out_disable_device; | 6520 | goto out_unmap_bar0; |
6504 | } | 6521 | } |
6505 | } else { | 6522 | } else { |
6506 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 6523 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
6507 | if (err) { | 6524 | if (err) { |
6508 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | 6525 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
6509 | goto out_disable_device; | 6526 | goto out_unmap_bar0; |
6510 | } | 6527 | } |
6511 | } | 6528 | } |
6512 | 6529 | ||
@@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6518 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 6535 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
6519 | if (!adapter) { | 6536 | if (!adapter) { |
6520 | err = -ENOMEM; | 6537 | err = -ENOMEM; |
6521 | goto out_disable_device; | 6538 | goto out_unmap_bar0; |
6522 | } | 6539 | } |
6523 | 6540 | ||
6524 | adapter->workq = create_singlethread_workqueue("cxgb4"); | 6541 | adapter->workq = create_singlethread_workqueue("cxgb4"); |
@@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6530 | /* PCI device has been enabled */ | 6547 | /* PCI device has been enabled */ |
6531 | adapter->flags |= DEV_ENABLED; | 6548 | adapter->flags |= DEV_ENABLED; |
6532 | 6549 | ||
6533 | adapter->regs = pci_ioremap_bar(pdev, 0); | 6550 | adapter->regs = regs; |
6534 | if (!adapter->regs) { | ||
6535 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
6536 | err = -ENOMEM; | ||
6537 | goto out_free_adapter; | ||
6538 | } | ||
6539 | |||
6540 | /* We control everything through one PF */ | ||
6541 | func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); | ||
6542 | if (func != ent->driver_data) { | ||
6543 | pci_save_state(pdev); /* to restore SR-IOV later */ | ||
6544 | goto sriov; | ||
6545 | } | ||
6546 | |||
6547 | adapter->pdev = pdev; | 6551 | adapter->pdev = pdev; |
6548 | adapter->pdev_dev = &pdev->dev; | 6552 | adapter->pdev_dev = &pdev->dev; |
6549 | adapter->mbox = func; | 6553 | adapter->mbox = func; |
@@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6560 | 6564 | ||
6561 | err = t4_prep_adapter(adapter); | 6565 | err = t4_prep_adapter(adapter); |
6562 | if (err) | 6566 | if (err) |
6563 | goto out_unmap_bar0; | 6567 | goto out_free_adapter; |
6568 | |||
6564 | 6569 | ||
6565 | if (!is_t4(adapter->params.chip)) { | 6570 | if (!is_t4(adapter->params.chip)) { |
6566 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; | 6571 | s_qpp = QUEUESPERPAGEPF1 * adapter->fn; |
@@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6577 | dev_err(&pdev->dev, | 6582 | dev_err(&pdev->dev, |
6578 | "Incorrect number of egress queues per page\n"); | 6583 | "Incorrect number of egress queues per page\n"); |
6579 | err = -EINVAL; | 6584 | err = -EINVAL; |
6580 | goto out_unmap_bar0; | 6585 | goto out_free_adapter; |
6581 | } | 6586 | } |
6582 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), | 6587 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), |
6583 | pci_resource_len(pdev, 2)); | 6588 | pci_resource_len(pdev, 2)); |
6584 | if (!adapter->bar2) { | 6589 | if (!adapter->bar2) { |
6585 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); | 6590 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); |
6586 | err = -ENOMEM; | 6591 | err = -ENOMEM; |
6587 | goto out_unmap_bar0; | 6592 | goto out_free_adapter; |
6588 | } | 6593 | } |
6589 | } | 6594 | } |
6590 | 6595 | ||
@@ -6722,13 +6727,13 @@ sriov: | |||
6722 | out_unmap_bar: | 6727 | out_unmap_bar: |
6723 | if (!is_t4(adapter->params.chip)) | 6728 | if (!is_t4(adapter->params.chip)) |
6724 | iounmap(adapter->bar2); | 6729 | iounmap(adapter->bar2); |
6725 | out_unmap_bar0: | ||
6726 | iounmap(adapter->regs); | ||
6727 | out_free_adapter: | 6730 | out_free_adapter: |
6728 | if (adapter->workq) | 6731 | if (adapter->workq) |
6729 | destroy_workqueue(adapter->workq); | 6732 | destroy_workqueue(adapter->workq); |
6730 | 6733 | ||
6731 | kfree(adapter); | 6734 | kfree(adapter); |
6735 | out_unmap_bar0: | ||
6736 | iounmap(regs); | ||
6732 | out_disable_device: | 6737 | out_disable_device: |
6733 | pci_disable_pcie_error_reporting(pdev); | 6738 | pci_disable_pcie_error_reporting(pdev); |
6734 | pci_disable_device(pdev); | 6739 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9b33057a9477..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) | |||
1399 | const void *mac_addr; | 1399 | const void *mac_addr; |
1400 | 1400 | ||
1401 | if (!IS_ENABLED(CONFIG_OF) || !np) | 1401 | if (!IS_ENABLED(CONFIG_OF) || !np) |
1402 | return NULL; | 1402 | return ERR_PTR(-ENXIO); |
1403 | 1403 | ||
1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 1404 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
1405 | if (!pdata) | 1405 | if (!pdata) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea0..02a2e90d581a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( | |||
2389 | } | 2389 | } |
2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); | 2390 | EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); |
2391 | 2391 | ||
2392 | static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) | ||
2393 | { | ||
2394 | struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); | ||
2395 | int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) | ||
2396 | + 1; | ||
2397 | int max_port = min_port + | ||
2398 | bitmap_weight(actv_ports.ports, dev->caps.num_ports); | ||
2399 | |||
2400 | if (port < min_port) | ||
2401 | port = min_port; | ||
2402 | else if (port >= max_port) | ||
2403 | port = max_port - 1; | ||
2404 | |||
2405 | return port; | ||
2406 | } | ||
2407 | |||
2392 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | 2408 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) |
2393 | { | 2409 | { |
2394 | struct mlx4_priv *priv = mlx4_priv(dev); | 2410 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) | |||
2402 | if (slave < 0) | 2418 | if (slave < 0) |
2403 | return -EINVAL; | 2419 | return -EINVAL; |
2404 | 2420 | ||
2421 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2405 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2422 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2406 | s_info->mac = mac; | 2423 | s_info->mac = mac; |
2407 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", | 2424 | mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", |
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) | |||
2428 | if (slave < 0) | 2445 | if (slave < 0) |
2429 | return -EINVAL; | 2446 | return -EINVAL; |
2430 | 2447 | ||
2448 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2431 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2449 | vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2432 | 2450 | ||
2433 | if ((0 == vlan) && (0 == qos)) | 2451 | if ((0 == vlan) && (0 == qos)) |
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, | |||
2455 | struct mlx4_priv *priv; | 2473 | struct mlx4_priv *priv; |
2456 | 2474 | ||
2457 | priv = mlx4_priv(dev); | 2475 | priv = mlx4_priv(dev); |
2476 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2458 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 2477 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
2459 | 2478 | ||
2460 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 2479 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) | |||
2482 | if (slave < 0) | 2501 | if (slave < 0) |
2483 | return -EINVAL; | 2502 | return -EINVAL; |
2484 | 2503 | ||
2504 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2485 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; | 2505 | s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; |
2486 | s_info->spoofchk = setting; | 2506 | s_info->spoofchk = setting; |
2487 | 2507 | ||
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat | |||
2535 | if (slave < 0) | 2555 | if (slave < 0) |
2536 | return -EINVAL; | 2556 | return -EINVAL; |
2537 | 2557 | ||
2558 | port = mlx4_slaves_closest_port(dev, slave, port); | ||
2538 | switch (link_state) { | 2559 | switch (link_state) { |
2539 | case IFLA_VF_LINK_STATE_AUTO: | 2560 | case IFLA_VF_LINK_STATE_AUTO: |
2540 | /* get current link state */ | 2561 | /* get current link state */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
487 | struct mlx4_en_dev *mdev = priv->mdev; | 487 | struct mlx4_en_dev *mdev = priv->mdev; |
488 | int err; | 488 | int err; |
489 | 489 | ||
490 | if (pause->autoneg) | ||
491 | return -EINVAL; | ||
492 | |||
490 | priv->prof->tx_pause = pause->tx_pause != 0; | 493 | priv->prof->tx_pause = pause->tx_pause != 0; |
491 | priv->prof->rx_pause = pause->rx_pause != 0; | 494 | priv->prof->rx_pause = pause->rx_pause != 0; |
492 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 495 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -390,13 +390,14 @@ err_icm: | |||
390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
391 | 391 | ||
392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | 392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC |
393 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | 393 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
394 | enum mlx4_update_qp_attr attr, | 394 | enum mlx4_update_qp_attr attr, |
395 | struct mlx4_update_qp_params *params) | 395 | struct mlx4_update_qp_params *params) |
396 | { | 396 | { |
397 | struct mlx4_cmd_mailbox *mailbox; | 397 | struct mlx4_cmd_mailbox *mailbox; |
398 | struct mlx4_update_qp_context *cmd; | 398 | struct mlx4_update_qp_context *cmd; |
399 | u64 pri_addr_path_mask = 0; | 399 | u64 pri_addr_path_mask = 0; |
400 | u64 qp_mask = 0; | ||
400 | int err = 0; | 401 | int err = 0; |
401 | 402 | ||
402 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 403 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
413 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; | 414 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; |
414 | } | 415 | } |
415 | 416 | ||
417 | if (attr & MLX4_UPDATE_QP_VSD) { | ||
418 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; | ||
419 | if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) | ||
420 | cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); | ||
421 | } | ||
422 | |||
416 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); | 423 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); |
424 | cmd->qp_mask = cpu_to_be64(qp_mask); | ||
417 | 425 | ||
418 | err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, | 426 | err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, |
419 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | 427 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, |
420 | MLX4_CMD_NATIVE); | 428 | MLX4_CMD_NATIVE); |
421 | 429 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed22..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
702 | struct mlx4_qp_context *qpc = inbox->buf + 8; | 702 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
703 | struct mlx4_vport_oper_state *vp_oper; | 703 | struct mlx4_vport_oper_state *vp_oper; |
704 | struct mlx4_priv *priv; | 704 | struct mlx4_priv *priv; |
705 | u32 qp_type; | ||
705 | int port; | 706 | int port; |
706 | 707 | ||
707 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 708 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
708 | priv = mlx4_priv(dev); | 709 | priv = mlx4_priv(dev); |
709 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; | 710 | vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; |
711 | qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; | ||
710 | 712 | ||
711 | if (MLX4_VGT != vp_oper->state.default_vlan) { | 713 | if (MLX4_VGT != vp_oper->state.default_vlan) { |
712 | /* the reserved QPs (special, proxy, tunnel) | 714 | /* the reserved QPs (special, proxy, tunnel) |
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
715 | if (mlx4_is_qp_reserved(dev, qpn)) | 717 | if (mlx4_is_qp_reserved(dev, qpn)) |
716 | return 0; | 718 | return 0; |
717 | 719 | ||
718 | /* force strip vlan by clear vsd */ | 720 | /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ |
719 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | 721 | if (qp_type == MLX4_QP_ST_UD || |
722 | (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { | ||
723 | if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { | ||
724 | *(__be32 *)inbox->buf = | ||
725 | cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | | ||
726 | MLX4_QP_OPTPAR_VLAN_STRIPPING); | ||
727 | qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); | ||
728 | } else { | ||
729 | struct mlx4_update_qp_params params = {.flags = 0}; | ||
730 | |||
731 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | ||
732 | } | ||
733 | } | ||
720 | 734 | ||
721 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && | 735 | if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && |
722 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { | 736 | dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { |
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
3998 | } | 4012 | } |
3999 | 4013 | ||
4000 | port = (rqp->sched_queue >> 6 & 1) + 1; | 4014 | port = (rqp->sched_queue >> 6 & 1) + 1; |
4001 | smac_index = cmd->qp_context.pri_path.grh_mylmc; | 4015 | |
4002 | err = mac_find_smac_ix_in_slave(dev, slave, port, | 4016 | if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { |
4003 | smac_index, &mac); | 4017 | smac_index = cmd->qp_context.pri_path.grh_mylmc; |
4004 | if (err) { | 4018 | err = mac_find_smac_ix_in_slave(dev, slave, port, |
4005 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | 4019 | smac_index, &mac); |
4006 | qpn, smac_index); | 4020 | |
4007 | goto err_mac; | 4021 | if (err) { |
4022 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | ||
4023 | qpn, smac_index); | ||
4024 | goto err_mac; | ||
4025 | } | ||
4008 | } | 4026 | } |
4009 | 4027 | ||
4010 | err = mlx4_cmd(dev, inbox->dma, | 4028 | err = mlx4_cmd(dev, inbox->dma, |
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
4818 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; | 4836 | MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; |
4819 | 4837 | ||
4820 | upd_context = mailbox->buf; | 4838 | upd_context = mailbox->buf; |
4821 | upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); | 4839 | upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); |
4822 | 4840 | ||
4823 | spin_lock_irq(mlx4_tlock(dev)); | 4841 | spin_lock_irq(mlx4_tlock(dev)); |
4824 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { | 4842 | list_for_each_entry_safe(qp, tmp, qp_list, com.list) { |
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c | |||
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |||
290 | /* Read the hardware TX timestamp if one was recorded */ | 290 | /* Read the hardware TX timestamp if one was recorded */ |
291 | if (unlikely(re.s.tstamp)) { | 291 | if (unlikely(re.s.tstamp)) { |
292 | struct skb_shared_hwtstamps ts; | 292 | struct skb_shared_hwtstamps ts; |
293 | u64 ns; | ||
294 | |||
293 | memset(&ts, 0, sizeof(ts)); | 295 | memset(&ts, 0, sizeof(ts)); |
294 | /* Read the timestamp */ | 296 | /* Read the timestamp */ |
295 | u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); | 297 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
296 | /* Remove the timestamp from the FIFO */ | 298 | /* Remove the timestamp from the FIFO */ |
297 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | 299 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); |
298 | /* Tell the kernel about the timestamp */ | 300 | /* Tell the kernel about the timestamp */ |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
@@ -7,6 +7,7 @@ config PCH_GBE | |||
7 | depends on PCI && (X86_32 || COMPILE_TEST) | 7 | depends on PCI && (X86_32 || COMPILE_TEST) |
8 | select MII | 8 | select MII |
9 | select PTP_1588_CLOCK_PCH | 9 | select PTP_1588_CLOCK_PCH |
10 | select NET_PTP_CLASSIFY | ||
10 | ---help--- | 11 | ---help--- |
11 | This is a gigabit ethernet driver for EG20T PCH. | 12 | This is a gigabit ethernet driver for EG20T PCH. |
12 | EG20T PCH is the platform controller hub that is used in Intel's | 13 | EG20T PCH is the platform controller hub that is used in Intel's |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 02dd92ac1764..1d81238fcb93 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1847,33 +1847,31 @@ static void __rtl8169_set_features(struct net_device *dev, | |||
1847 | netdev_features_t features) | 1847 | netdev_features_t features) |
1848 | { | 1848 | { |
1849 | struct rtl8169_private *tp = netdev_priv(dev); | 1849 | struct rtl8169_private *tp = netdev_priv(dev); |
1850 | netdev_features_t changed = features ^ dev->features; | ||
1851 | void __iomem *ioaddr = tp->mmio_addr; | 1850 | void __iomem *ioaddr = tp->mmio_addr; |
1851 | u32 rx_config; | ||
1852 | 1852 | ||
1853 | if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | | 1853 | rx_config = RTL_R32(RxConfig); |
1854 | NETIF_F_HW_VLAN_CTAG_RX))) | 1854 | if (features & NETIF_F_RXALL) |
1855 | return; | 1855 | rx_config |= (AcceptErr | AcceptRunt); |
1856 | else | ||
1857 | rx_config &= ~(AcceptErr | AcceptRunt); | ||
1856 | 1858 | ||
1857 | if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { | 1859 | RTL_W32(RxConfig, rx_config); |
1858 | if (features & NETIF_F_RXCSUM) | ||
1859 | tp->cp_cmd |= RxChkSum; | ||
1860 | else | ||
1861 | tp->cp_cmd &= ~RxChkSum; | ||
1862 | 1860 | ||
1863 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | 1861 | if (features & NETIF_F_RXCSUM) |
1864 | tp->cp_cmd |= RxVlan; | 1862 | tp->cp_cmd |= RxChkSum; |
1865 | else | 1863 | else |
1866 | tp->cp_cmd &= ~RxVlan; | 1864 | tp->cp_cmd &= ~RxChkSum; |
1867 | 1865 | ||
1868 | RTL_W16(CPlusCmd, tp->cp_cmd); | 1866 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
1869 | RTL_R16(CPlusCmd); | 1867 | tp->cp_cmd |= RxVlan; |
1870 | } | 1868 | else |
1871 | if (changed & NETIF_F_RXALL) { | 1869 | tp->cp_cmd &= ~RxVlan; |
1872 | int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); | 1870 | |
1873 | if (features & NETIF_F_RXALL) | 1871 | tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); |
1874 | tmp |= (AcceptErr | AcceptRunt); | 1872 | |
1875 | RTL_W32(RxConfig, tmp); | 1873 | RTL_W16(CPlusCmd, tp->cp_cmd); |
1876 | } | 1874 | RTL_R16(CPlusCmd); |
1877 | } | 1875 | } |
1878 | 1876 | ||
1879 | static int rtl8169_set_features(struct net_device *dev, | 1877 | static int rtl8169_set_features(struct net_device *dev, |
@@ -1881,8 +1879,11 @@ static int rtl8169_set_features(struct net_device *dev, | |||
1881 | { | 1879 | { |
1882 | struct rtl8169_private *tp = netdev_priv(dev); | 1880 | struct rtl8169_private *tp = netdev_priv(dev); |
1883 | 1881 | ||
1882 | features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; | ||
1883 | |||
1884 | rtl_lock_work(tp); | 1884 | rtl_lock_work(tp); |
1885 | __rtl8169_set_features(dev, features); | 1885 | if (features ^ dev->features) |
1886 | __rtl8169_set_features(dev, features); | ||
1886 | rtl_unlock_work(tp); | 1887 | rtl_unlock_work(tp); |
1887 | 1888 | ||
1888 | return 0; | 1889 | return 0; |
@@ -7531,8 +7532,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) | |||
7531 | } | 7532 | } |
7532 | } | 7533 | } |
7533 | 7534 | ||
7534 | static int | 7535 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
7535 | rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
7536 | { | 7536 | { |
7537 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7537 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
7538 | const unsigned int region = cfg->region; | 7538 | const unsigned int region = cfg->region; |
@@ -7607,7 +7607,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7607 | goto err_out_mwi_2; | 7607 | goto err_out_mwi_2; |
7608 | } | 7608 | } |
7609 | 7609 | ||
7610 | tp->cp_cmd = RxChkSum; | 7610 | tp->cp_cmd = 0; |
7611 | 7611 | ||
7612 | if ((sizeof(dma_addr_t) > 4) && | 7612 | if ((sizeof(dma_addr_t) > 4) && |
7613 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 7613 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -7648,13 +7648,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7648 | 7648 | ||
7649 | pci_set_master(pdev); | 7649 | pci_set_master(pdev); |
7650 | 7650 | ||
7651 | /* | ||
7652 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7653 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7654 | */ | ||
7655 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
7656 | tp->cp_cmd |= RxVlan; | ||
7657 | |||
7658 | rtl_init_mdio_ops(tp); | 7651 | rtl_init_mdio_ops(tp); |
7659 | rtl_init_pll_power_ops(tp); | 7652 | rtl_init_pll_power_ops(tp); |
7660 | rtl_init_jumbo_ops(tp); | 7653 | rtl_init_jumbo_ops(tp); |
@@ -7738,8 +7731,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7738 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | 7731 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
7739 | NETIF_F_HIGHDMA; | 7732 | NETIF_F_HIGHDMA; |
7740 | 7733 | ||
7734 | tp->cp_cmd |= RxChkSum | RxVlan; | ||
7735 | |||
7736 | /* | ||
7737 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
7738 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
7739 | */ | ||
7741 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | 7740 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) |
7742 | /* 8110SCd requires hardware Rx VLAN - disallow toggling */ | 7741 | /* Disallow toggling */ |
7743 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; | 7742 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
7744 | 7743 | ||
7745 | if (tp->txd_version == RTL_TD_0) | 7744 | if (tp->txd_version == RTL_TD_0) |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) | |||
2933 | u32 crc; | 2933 | u32 crc; |
2934 | int bit; | 2934 | int bit; |
2935 | 2935 | ||
2936 | if (!efx_dev_registered(efx)) | ||
2937 | return; | ||
2938 | |||
2936 | netif_addr_lock_bh(net_dev); | 2939 | netif_addr_lock_bh(net_dev); |
2937 | 2940 | ||
2938 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); | 2941 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 763cdfc228be..edb860947da4 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -360,14 +360,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
360 | if (IS_ERR(desc)) | 360 | if (IS_ERR(desc)) |
361 | return PTR_ERR(desc); | 361 | return PTR_ERR(desc); |
362 | 362 | ||
363 | if (desc->hdr.state != VIO_DESC_READY) | ||
364 | return 1; | ||
365 | |||
366 | rmb(); | ||
367 | |||
363 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | 368 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", |
364 | desc->hdr.state, desc->hdr.ack, | 369 | desc->hdr.state, desc->hdr.ack, |
365 | desc->size, desc->ncookies, | 370 | desc->size, desc->ncookies, |
366 | desc->cookies[0].cookie_addr, | 371 | desc->cookies[0].cookie_addr, |
367 | desc->cookies[0].cookie_size); | 372 | desc->cookies[0].cookie_size); |
368 | 373 | ||
369 | if (desc->hdr.state != VIO_DESC_READY) | ||
370 | return 1; | ||
371 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | 374 | err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); |
372 | if (err == -ECONNRESET) | 375 | if (err == -ECONNRESET) |
373 | return err; | 376 | return err; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 5c3f1f3ad16f..45ba50e4eaec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -701,6 +701,28 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
701 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); | 701 | cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); |
702 | 702 | ||
703 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { | 703 | if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { |
704 | bool ndev_status = false; | ||
705 | struct cpsw_slave *slave = priv->slaves; | ||
706 | int n; | ||
707 | |||
708 | if (priv->data.dual_emac) { | ||
709 | /* In dual emac mode check for all interfaces */ | ||
710 | for (n = priv->data.slaves; n; n--, slave++) | ||
711 | if (netif_running(slave->ndev)) | ||
712 | ndev_status = true; | ||
713 | } | ||
714 | |||
715 | if (ndev_status && (status >= 0)) { | ||
716 | /* The packet received is for the interface which | ||
717 | * is already down and the other interface is up | ||
718 | * and running, intead of freeing which results | ||
719 | * in reducing of the number of rx descriptor in | ||
720 | * DMA engine, requeue skb back to cpdma. | ||
721 | */ | ||
722 | new_skb = skb; | ||
723 | goto requeue; | ||
724 | } | ||
725 | |||
704 | /* the interface is going down, skbs are purged */ | 726 | /* the interface is going down, skbs are purged */ |
705 | dev_kfree_skb_any(skb); | 727 | dev_kfree_skb_any(skb); |
706 | return; | 728 | return; |
@@ -719,6 +741,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
719 | new_skb = skb; | 741 | new_skb = skb; |
720 | } | 742 | } |
721 | 743 | ||
744 | requeue: | ||
722 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, | 745 | ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, |
723 | skb_tailroom(new_skb), 0); | 746 | skb_tailroom(new_skb), 0); |
724 | if (WARN_ON(ret < 0)) | 747 | if (WARN_ON(ret < 0)) |
@@ -2354,10 +2377,19 @@ static int cpsw_suspend(struct device *dev) | |||
2354 | struct net_device *ndev = platform_get_drvdata(pdev); | 2377 | struct net_device *ndev = platform_get_drvdata(pdev); |
2355 | struct cpsw_priv *priv = netdev_priv(ndev); | 2378 | struct cpsw_priv *priv = netdev_priv(ndev); |
2356 | 2379 | ||
2357 | if (netif_running(ndev)) | 2380 | if (priv->data.dual_emac) { |
2358 | cpsw_ndo_stop(ndev); | 2381 | int i; |
2359 | 2382 | ||
2360 | for_each_slave(priv, soft_reset_slave); | 2383 | for (i = 0; i < priv->data.slaves; i++) { |
2384 | if (netif_running(priv->slaves[i].ndev)) | ||
2385 | cpsw_ndo_stop(priv->slaves[i].ndev); | ||
2386 | soft_reset_slave(priv->slaves + i); | ||
2387 | } | ||
2388 | } else { | ||
2389 | if (netif_running(ndev)) | ||
2390 | cpsw_ndo_stop(ndev); | ||
2391 | for_each_slave(priv, soft_reset_slave); | ||
2392 | } | ||
2361 | 2393 | ||
2362 | pm_runtime_put_sync(&pdev->dev); | 2394 | pm_runtime_put_sync(&pdev->dev); |
2363 | 2395 | ||
@@ -2371,14 +2403,24 @@ static int cpsw_resume(struct device *dev) | |||
2371 | { | 2403 | { |
2372 | struct platform_device *pdev = to_platform_device(dev); | 2404 | struct platform_device *pdev = to_platform_device(dev); |
2373 | struct net_device *ndev = platform_get_drvdata(pdev); | 2405 | struct net_device *ndev = platform_get_drvdata(pdev); |
2406 | struct cpsw_priv *priv = netdev_priv(ndev); | ||
2374 | 2407 | ||
2375 | pm_runtime_get_sync(&pdev->dev); | 2408 | pm_runtime_get_sync(&pdev->dev); |
2376 | 2409 | ||
2377 | /* Select default pin state */ | 2410 | /* Select default pin state */ |
2378 | pinctrl_pm_select_default_state(&pdev->dev); | 2411 | pinctrl_pm_select_default_state(&pdev->dev); |
2379 | 2412 | ||
2380 | if (netif_running(ndev)) | 2413 | if (priv->data.dual_emac) { |
2381 | cpsw_ndo_open(ndev); | 2414 | int i; |
2415 | |||
2416 | for (i = 0; i < priv->data.slaves; i++) { | ||
2417 | if (netif_running(priv->slaves[i].ndev)) | ||
2418 | cpsw_ndo_open(priv->slaves[i].ndev); | ||
2419 | } | ||
2420 | } else { | ||
2421 | if (netif_running(ndev)) | ||
2422 | cpsw_ndo_open(ndev); | ||
2423 | } | ||
2382 | return 0; | 2424 | return 0; |
2383 | } | 2425 | } |
2384 | 2426 | ||