diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
26 files changed, 1544 insertions, 567 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 3e488094b073..7dcfb19a31c8 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
| @@ -72,23 +72,23 @@ config BCMGENET | |||
| 72 | Broadcom BCM7xxx Set Top Box family chipset. | 72 | Broadcom BCM7xxx Set Top Box family chipset. |
| 73 | 73 | ||
| 74 | config BNX2 | 74 | config BNX2 |
| 75 | tristate "Broadcom NetXtremeII support" | 75 | tristate "QLogic NetXtremeII support" |
| 76 | depends on PCI | 76 | depends on PCI |
| 77 | select CRC32 | 77 | select CRC32 |
| 78 | select FW_LOADER | 78 | select FW_LOADER |
| 79 | ---help--- | 79 | ---help--- |
| 80 | This driver supports Broadcom NetXtremeII gigabit Ethernet cards. | 80 | This driver supports QLogic NetXtremeII gigabit Ethernet cards. |
| 81 | 81 | ||
| 82 | To compile this driver as a module, choose M here: the module | 82 | To compile this driver as a module, choose M here: the module |
| 83 | will be called bnx2. This is recommended. | 83 | will be called bnx2. This is recommended. |
| 84 | 84 | ||
| 85 | config CNIC | 85 | config CNIC |
| 86 | tristate "Broadcom CNIC support" | 86 | tristate "QLogic CNIC support" |
| 87 | depends on PCI | 87 | depends on PCI |
| 88 | select BNX2 | 88 | select BNX2 |
| 89 | select UIO | 89 | select UIO |
| 90 | ---help--- | 90 | ---help--- |
| 91 | This driver supports offload features of Broadcom NetXtremeII | 91 | This driver supports offload features of QLogic NetXtremeII |
| 92 | gigabit Ethernet cards. | 92 | gigabit Ethernet cards. |
| 93 | 93 | ||
| 94 | To compile this driver as a module, choose M here: the module | 94 | To compile this driver as a module, choose M here: the module |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index ca5a20a48b14..4a7028d65912 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -105,7 +105,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); | |||
| 105 | 105 | ||
| 106 | 106 | ||
| 107 | #ifdef CONFIG_B44_PCI | 107 | #ifdef CONFIG_B44_PCI |
| 108 | static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { | 108 | static const struct pci_device_id b44_pci_tbl[] = { |
| 109 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, | 109 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, |
| 110 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, | 110 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, |
| 111 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, | 111 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5776e503e4c5..6f4e18644bd4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -81,14 +81,14 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, | |||
| 81 | { | 81 | { |
| 82 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 82 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 83 | __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, | 83 | __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, |
| 84 | d + DESC_ADDR_HI_STATUS_LEN); | 84 | d + DESC_ADDR_HI_STATUS_LEN); |
| 85 | #endif | 85 | #endif |
| 86 | __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); | 86 | __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, | 89 | static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, |
| 90 | struct dma_desc *desc, | 90 | struct dma_desc *desc, |
| 91 | unsigned int port) | 91 | unsigned int port) |
| 92 | { | 92 | { |
| 93 | /* Ports are latched, so write upper address first */ | 93 | /* Ports are latched, so write upper address first */ |
| 94 | tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); | 94 | tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); |
| @@ -108,7 +108,7 @@ static int bcm_sysport_set_settings(struct net_device *dev, | |||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static int bcm_sysport_get_settings(struct net_device *dev, | 110 | static int bcm_sysport_get_settings(struct net_device *dev, |
| 111 | struct ethtool_cmd *cmd) | 111 | struct ethtool_cmd *cmd) |
| 112 | { | 112 | { |
| 113 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 113 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 114 | 114 | ||
| @@ -119,14 +119,14 @@ static int bcm_sysport_get_settings(struct net_device *dev, | |||
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static int bcm_sysport_set_rx_csum(struct net_device *dev, | 121 | static int bcm_sysport_set_rx_csum(struct net_device *dev, |
| 122 | netdev_features_t wanted) | 122 | netdev_features_t wanted) |
| 123 | { | 123 | { |
| 124 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 124 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 125 | u32 reg; | 125 | u32 reg; |
| 126 | 126 | ||
| 127 | priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | 127 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); |
| 128 | reg = rxchk_readl(priv, RXCHK_CONTROL); | 128 | reg = rxchk_readl(priv, RXCHK_CONTROL); |
| 129 | if (priv->rx_csum_en) | 129 | if (priv->rx_chk_en) |
| 130 | reg |= RXCHK_EN; | 130 | reg |= RXCHK_EN; |
| 131 | else | 131 | else |
| 132 | reg &= ~RXCHK_EN; | 132 | reg &= ~RXCHK_EN; |
| @@ -134,7 +134,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, | |||
| 134 | /* If UniMAC forwards CRC, we need to skip over it to get | 134 | /* If UniMAC forwards CRC, we need to skip over it to get |
| 135 | * a valid CHK bit to be set in the per-packet status word | 135 | * a valid CHK bit to be set in the per-packet status word |
| 136 | */ | 136 | */ |
| 137 | if (priv->rx_csum_en && priv->crc_fwd) | 137 | if (priv->rx_chk_en && priv->crc_fwd) |
| 138 | reg |= RXCHK_SKIP_FCS; | 138 | reg |= RXCHK_SKIP_FCS; |
| 139 | else | 139 | else |
| 140 | reg &= ~RXCHK_SKIP_FCS; | 140 | reg &= ~RXCHK_SKIP_FCS; |
| @@ -145,7 +145,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, | |||
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static int bcm_sysport_set_tx_csum(struct net_device *dev, | 147 | static int bcm_sysport_set_tx_csum(struct net_device *dev, |
| 148 | netdev_features_t wanted) | 148 | netdev_features_t wanted) |
| 149 | { | 149 | { |
| 150 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 150 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 151 | u32 reg; | 151 | u32 reg; |
| @@ -165,7 +165,7 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, | |||
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | static int bcm_sysport_set_features(struct net_device *dev, | 167 | static int bcm_sysport_set_features(struct net_device *dev, |
| 168 | netdev_features_t features) | 168 | netdev_features_t features) |
| 169 | { | 169 | { |
| 170 | netdev_features_t changed = features ^ dev->features; | 170 | netdev_features_t changed = features ^ dev->features; |
| 171 | netdev_features_t wanted = dev->wanted_features; | 171 | netdev_features_t wanted = dev->wanted_features; |
| @@ -261,7 +261,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { | |||
| 261 | /* RXCHK misc statistics */ | 261 | /* RXCHK misc statistics */ |
| 262 | STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), | 262 | STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), |
| 263 | STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, | 263 | STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, |
| 264 | RXCHK_OTHER_DISC_CNTR), | 264 | RXCHK_OTHER_DISC_CNTR), |
| 265 | /* RBUF misc statistics */ | 265 | /* RBUF misc statistics */ |
| 266 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), | 266 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), |
| 267 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), | 267 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), |
| @@ -270,7 +270,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { | |||
| 270 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) | 270 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) |
| 271 | 271 | ||
| 272 | static void bcm_sysport_get_drvinfo(struct net_device *dev, | 272 | static void bcm_sysport_get_drvinfo(struct net_device *dev, |
| 273 | struct ethtool_drvinfo *info) | 273 | struct ethtool_drvinfo *info) |
| 274 | { | 274 | { |
| 275 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | 275 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 276 | strlcpy(info->version, "0.1", sizeof(info->version)); | 276 | strlcpy(info->version, "0.1", sizeof(info->version)); |
| @@ -303,7 +303,7 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) | |||
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static void bcm_sysport_get_strings(struct net_device *dev, | 305 | static void bcm_sysport_get_strings(struct net_device *dev, |
| 306 | u32 stringset, u8 *data) | 306 | u32 stringset, u8 *data) |
| 307 | { | 307 | { |
| 308 | int i; | 308 | int i; |
| 309 | 309 | ||
| @@ -311,8 +311,8 @@ static void bcm_sysport_get_strings(struct net_device *dev, | |||
| 311 | case ETH_SS_STATS: | 311 | case ETH_SS_STATS: |
| 312 | for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { | 312 | for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { |
| 313 | memcpy(data + i * ETH_GSTRING_LEN, | 313 | memcpy(data + i * ETH_GSTRING_LEN, |
| 314 | bcm_sysport_gstrings_stats[i].stat_string, | 314 | bcm_sysport_gstrings_stats[i].stat_string, |
| 315 | ETH_GSTRING_LEN); | 315 | ETH_GSTRING_LEN); |
| 316 | } | 316 | } |
| 317 | break; | 317 | break; |
| 318 | default: | 318 | default: |
| @@ -362,7 +362,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) | |||
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | static void bcm_sysport_get_stats(struct net_device *dev, | 364 | static void bcm_sysport_get_stats(struct net_device *dev, |
| 365 | struct ethtool_stats *stats, u64 *data) | 365 | struct ethtool_stats *stats, u64 *data) |
| 366 | { | 366 | { |
| 367 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 367 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 368 | int i; | 368 | int i; |
| @@ -384,6 +384,64 @@ static void bcm_sysport_get_stats(struct net_device *dev, | |||
| 384 | } | 384 | } |
| 385 | } | 385 | } |
| 386 | 386 | ||
| 387 | static void bcm_sysport_get_wol(struct net_device *dev, | ||
| 388 | struct ethtool_wolinfo *wol) | ||
| 389 | { | ||
| 390 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 391 | u32 reg; | ||
| 392 | |||
| 393 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; | ||
| 394 | wol->wolopts = priv->wolopts; | ||
| 395 | |||
| 396 | if (!(priv->wolopts & WAKE_MAGICSECURE)) | ||
| 397 | return; | ||
| 398 | |||
| 399 | /* Return the programmed SecureOn password */ | ||
| 400 | reg = umac_readl(priv, UMAC_PSW_MS); | ||
| 401 | put_unaligned_be16(reg, &wol->sopass[0]); | ||
| 402 | reg = umac_readl(priv, UMAC_PSW_LS); | ||
| 403 | put_unaligned_be32(reg, &wol->sopass[2]); | ||
| 404 | } | ||
| 405 | |||
| 406 | static int bcm_sysport_set_wol(struct net_device *dev, | ||
| 407 | struct ethtool_wolinfo *wol) | ||
| 408 | { | ||
| 409 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 410 | struct device *kdev = &priv->pdev->dev; | ||
| 411 | u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; | ||
| 412 | |||
| 413 | if (!device_can_wakeup(kdev)) | ||
| 414 | return -ENOTSUPP; | ||
| 415 | |||
| 416 | if (wol->wolopts & ~supported) | ||
| 417 | return -EINVAL; | ||
| 418 | |||
| 419 | /* Program the SecureOn password */ | ||
| 420 | if (wol->wolopts & WAKE_MAGICSECURE) { | ||
| 421 | umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | ||
| 422 | UMAC_PSW_MS); | ||
| 423 | umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | ||
| 424 | UMAC_PSW_LS); | ||
| 425 | } | ||
| 426 | |||
| 427 | /* Flag the device and relevant IRQ as wakeup capable */ | ||
| 428 | if (wol->wolopts) { | ||
| 429 | device_set_wakeup_enable(kdev, 1); | ||
| 430 | enable_irq_wake(priv->wol_irq); | ||
| 431 | priv->wol_irq_disabled = 0; | ||
| 432 | } else { | ||
| 433 | device_set_wakeup_enable(kdev, 0); | ||
| 434 | /* Avoid unbalanced disable_irq_wake calls */ | ||
| 435 | if (!priv->wol_irq_disabled) | ||
| 436 | disable_irq_wake(priv->wol_irq); | ||
| 437 | priv->wol_irq_disabled = 1; | ||
| 438 | } | ||
| 439 | |||
| 440 | priv->wolopts = wol->wolopts; | ||
| 441 | |||
| 442 | return 0; | ||
| 443 | } | ||
| 444 | |||
| 387 | static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) | 445 | static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) |
| 388 | { | 446 | { |
| 389 | dev_kfree_skb_any(cb->skb); | 447 | dev_kfree_skb_any(cb->skb); |
| @@ -406,7 +464,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, | |||
| 406 | } | 464 | } |
| 407 | 465 | ||
| 408 | mapping = dma_map_single(kdev, cb->skb->data, | 466 | mapping = dma_map_single(kdev, cb->skb->data, |
| 409 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 467 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
| 410 | ret = dma_mapping_error(kdev, mapping); | 468 | ret = dma_mapping_error(kdev, mapping); |
| 411 | if (ret) { | 469 | if (ret) { |
| 412 | bcm_sysport_free_cb(cb); | 470 | bcm_sysport_free_cb(cb); |
| @@ -470,22 +528,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
| 470 | to_process = p_index - priv->rx_c_index; | 528 | to_process = p_index - priv->rx_c_index; |
| 471 | 529 | ||
| 472 | netif_dbg(priv, rx_status, ndev, | 530 | netif_dbg(priv, rx_status, ndev, |
| 473 | "p_index=%d rx_c_index=%d to_process=%d\n", | 531 | "p_index=%d rx_c_index=%d to_process=%d\n", |
| 474 | p_index, priv->rx_c_index, to_process); | 532 | p_index, priv->rx_c_index, to_process); |
| 475 | |||
| 476 | while ((processed < to_process) && | ||
| 477 | (processed < budget)) { | ||
| 478 | 533 | ||
| 534 | while ((processed < to_process) && (processed < budget)) { | ||
| 479 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 535 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
| 480 | skb = cb->skb; | 536 | skb = cb->skb; |
| 481 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 537 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
| 482 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 538 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
| 483 | 539 | ||
| 484 | /* Extract the Receive Status Block prepended */ | 540 | /* Extract the Receive Status Block prepended */ |
| 485 | rsb = (struct bcm_rsb *)skb->data; | 541 | rsb = (struct bcm_rsb *)skb->data; |
| 486 | len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; | 542 | len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; |
| 487 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & | 543 | status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & |
| 488 | DESC_STATUS_MASK; | 544 | DESC_STATUS_MASK; |
| 489 | 545 | ||
| 490 | processed++; | 546 | processed++; |
| 491 | priv->rx_read_ptr++; | 547 | priv->rx_read_ptr++; |
| @@ -493,9 +549,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, | |||
| 493 | priv->rx_read_ptr = 0; | 549 | priv->rx_read_ptr = 0; |
| 494 | 550 | ||
| 495 | netif_dbg(priv, rx_status, ndev, | 551 | netif_dbg(priv, rx_status, ndev, |
| 496 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", | 552 | "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", |
| 497 | p_index, priv->rx_c_index, priv->rx_read_ptr, | 553 | p_index, priv->rx_c_index, priv->rx_read_ptr, |
| 498 | len, status); | 554 | len, status); |
| 499 | 555 | ||
| 500 | if (unlikely(!skb)) { | 556 | if (unlikely(!skb)) { |
| 501 | netif_err(priv, rx_err, ndev, "out of memory!\n"); | 557 | netif_err(priv, rx_err, ndev, "out of memory!\n"); |
| @@ -554,9 +610,9 @@ refill: | |||
| 554 | } | 610 | } |
| 555 | 611 | ||
| 556 | static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, | 612 | static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, |
| 557 | struct bcm_sysport_cb *cb, | 613 | struct bcm_sysport_cb *cb, |
| 558 | unsigned int *bytes_compl, | 614 | unsigned int *bytes_compl, |
| 559 | unsigned int *pkts_compl) | 615 | unsigned int *pkts_compl) |
| 560 | { | 616 | { |
| 561 | struct device *kdev = &priv->pdev->dev; | 617 | struct device *kdev = &priv->pdev->dev; |
| 562 | struct net_device *ndev = priv->netdev; | 618 | struct net_device *ndev = priv->netdev; |
| @@ -565,8 +621,8 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, | |||
| 565 | ndev->stats.tx_bytes += cb->skb->len; | 621 | ndev->stats.tx_bytes += cb->skb->len; |
| 566 | *bytes_compl += cb->skb->len; | 622 | *bytes_compl += cb->skb->len; |
| 567 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | 623 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), |
| 568 | dma_unmap_len(cb, dma_len), | 624 | dma_unmap_len(cb, dma_len), |
| 569 | DMA_TO_DEVICE); | 625 | DMA_TO_DEVICE); |
| 570 | ndev->stats.tx_packets++; | 626 | ndev->stats.tx_packets++; |
| 571 | (*pkts_compl)++; | 627 | (*pkts_compl)++; |
| 572 | bcm_sysport_free_cb(cb); | 628 | bcm_sysport_free_cb(cb); |
| @@ -574,7 +630,7 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, | |||
| 574 | } else if (dma_unmap_addr(cb, dma_addr)) { | 630 | } else if (dma_unmap_addr(cb, dma_addr)) { |
| 575 | ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); | 631 | ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); |
| 576 | dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), | 632 | dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), |
| 577 | dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); | 633 | dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); |
| 578 | dma_unmap_addr_set(cb, dma_addr, 0); | 634 | dma_unmap_addr_set(cb, dma_addr, 0); |
| 579 | } | 635 | } |
| 580 | } | 636 | } |
| @@ -608,8 +664,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, | |||
| 608 | last_tx_cn = num_tx_cbs - last_c_index + c_index; | 664 | last_tx_cn = num_tx_cbs - last_c_index + c_index; |
| 609 | 665 | ||
| 610 | netif_dbg(priv, tx_done, ndev, | 666 | netif_dbg(priv, tx_done, ndev, |
| 611 | "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", | 667 | "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", |
| 612 | ring->index, c_index, last_tx_cn, last_c_index); | 668 | ring->index, c_index, last_tx_cn, last_c_index); |
| 613 | 669 | ||
| 614 | while (last_tx_cn-- > 0) { | 670 | while (last_tx_cn-- > 0) { |
| 615 | cb = ring->cbs + last_c_index; | 671 | cb = ring->cbs + last_c_index; |
| @@ -626,8 +682,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, | |||
| 626 | netif_tx_wake_queue(txq); | 682 | netif_tx_wake_queue(txq); |
| 627 | 683 | ||
| 628 | netif_dbg(priv, tx_done, ndev, | 684 | netif_dbg(priv, tx_done, ndev, |
| 629 | "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", | 685 | "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", |
| 630 | ring->index, ring->c_index, pkts_compl, bytes_compl); | 686 | ring->index, ring->c_index, pkts_compl, bytes_compl); |
| 631 | 687 | ||
| 632 | return pkts_compl; | 688 | return pkts_compl; |
| 633 | } | 689 | } |
| @@ -692,6 +748,20 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) | |||
| 692 | return work_done; | 748 | return work_done; |
| 693 | } | 749 | } |
| 694 | 750 | ||
| 751 | static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) | ||
| 752 | { | ||
| 753 | u32 reg; | ||
| 754 | |||
| 755 | /* Stop monitoring MPD interrupt */ | ||
| 756 | intrl2_0_mask_set(priv, INTRL2_0_MPD); | ||
| 757 | |||
| 758 | /* Clear the MagicPacket detection logic */ | ||
| 759 | reg = umac_readl(priv, UMAC_MPD_CTRL); | ||
| 760 | reg &= ~MPD_EN; | ||
| 761 | umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 762 | |||
| 763 | netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); | ||
| 764 | } | ||
| 695 | 765 | ||
| 696 | /* RX and misc interrupt routine */ | 766 | /* RX and misc interrupt routine */ |
| 697 | static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) | 767 | static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) |
| @@ -722,6 +792,11 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) | |||
| 722 | if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) | 792 | if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) |
| 723 | bcm_sysport_tx_reclaim_all(priv); | 793 | bcm_sysport_tx_reclaim_all(priv); |
| 724 | 794 | ||
| 795 | if (priv->irq0_stat & INTRL2_0_MPD) { | ||
| 796 | netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); | ||
| 797 | bcm_sysport_resume_from_wol(priv); | ||
| 798 | } | ||
| 799 | |||
| 725 | return IRQ_HANDLED; | 800 | return IRQ_HANDLED; |
| 726 | } | 801 | } |
| 727 | 802 | ||
| @@ -757,6 +832,15 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) | |||
| 757 | return IRQ_HANDLED; | 832 | return IRQ_HANDLED; |
| 758 | } | 833 | } |
| 759 | 834 | ||
| 835 | static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) | ||
| 836 | { | ||
| 837 | struct bcm_sysport_priv *priv = dev_id; | ||
| 838 | |||
| 839 | pm_wakeup_event(&priv->pdev->dev, 0); | ||
| 840 | |||
| 841 | return IRQ_HANDLED; | ||
| 842 | } | ||
| 843 | |||
| 760 | static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | 844 | static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) |
| 761 | { | 845 | { |
| 762 | struct sk_buff *nskb; | 846 | struct sk_buff *nskb; |
| @@ -804,8 +888,9 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | |||
| 804 | csum_info |= L4_LENGTH_VALID; | 888 | csum_info |= L4_LENGTH_VALID; |
| 805 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | 889 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) |
| 806 | csum_info |= L4_UDP; | 890 | csum_info |= L4_UDP; |
| 807 | } else | 891 | } else { |
| 808 | csum_info = 0; | 892 | csum_info = 0; |
| 893 | } | ||
| 809 | 894 | ||
| 810 | tsb->l4_ptr_dest_map = csum_info; | 895 | tsb->l4_ptr_dest_map = csum_info; |
| 811 | } | 896 | } |
| @@ -869,7 +954,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, | |||
| 869 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | 954 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); |
| 870 | if (dma_mapping_error(kdev, mapping)) { | 955 | if (dma_mapping_error(kdev, mapping)) { |
| 871 | netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", | 956 | netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", |
| 872 | skb->data, skb_len); | 957 | skb->data, skb_len); |
| 873 | ret = NETDEV_TX_OK; | 958 | ret = NETDEV_TX_OK; |
| 874 | goto out; | 959 | goto out; |
| 875 | } | 960 | } |
| @@ -887,7 +972,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, | |||
| 887 | len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; | 972 | len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; |
| 888 | len_status |= (skb_len << DESC_LEN_SHIFT); | 973 | len_status |= (skb_len << DESC_LEN_SHIFT); |
| 889 | len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << | 974 | len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << |
| 890 | DESC_STATUS_SHIFT; | 975 | DESC_STATUS_SHIFT; |
| 891 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 976 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 892 | len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); | 977 | len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); |
| 893 | 978 | ||
| @@ -912,7 +997,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, | |||
| 912 | netif_tx_stop_queue(txq); | 997 | netif_tx_stop_queue(txq); |
| 913 | 998 | ||
| 914 | netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", | 999 | netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", |
| 915 | ring->index, ring->desc_count, ring->curr_desc); | 1000 | ring->index, ring->desc_count, ring->curr_desc); |
| 916 | 1001 | ||
| 917 | ret = NETDEV_TX_OK; | 1002 | ret = NETDEV_TX_OK; |
| 918 | out: | 1003 | out: |
| @@ -1010,7 +1095,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
| 1010 | return -ENOMEM; | 1095 | return -ENOMEM; |
| 1011 | } | 1096 | } |
| 1012 | 1097 | ||
| 1013 | ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL); | 1098 | ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); |
| 1014 | if (!ring->cbs) { | 1099 | if (!ring->cbs) { |
| 1015 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); | 1100 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); |
| 1016 | return -ENOMEM; | 1101 | return -ENOMEM; |
| @@ -1050,14 +1135,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
| 1050 | napi_enable(&ring->napi); | 1135 | napi_enable(&ring->napi); |
| 1051 | 1136 | ||
| 1052 | netif_dbg(priv, hw, priv->netdev, | 1137 | netif_dbg(priv, hw, priv->netdev, |
| 1053 | "TDMA cfg, size=%d, desc_cpu=%p\n", | 1138 | "TDMA cfg, size=%d, desc_cpu=%p\n", |
| 1054 | ring->size, ring->desc_cpu); | 1139 | ring->size, ring->desc_cpu); |
| 1055 | 1140 | ||
| 1056 | return 0; | 1141 | return 0; |
| 1057 | } | 1142 | } |
| 1058 | 1143 | ||
| 1059 | static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, | 1144 | static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, |
| 1060 | unsigned int index) | 1145 | unsigned int index) |
| 1061 | { | 1146 | { |
| 1062 | struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; | 1147 | struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; |
| 1063 | struct device *kdev = &priv->pdev->dev; | 1148 | struct device *kdev = &priv->pdev->dev; |
| @@ -1088,7 +1173,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, | |||
| 1088 | 1173 | ||
| 1089 | /* RDMA helper */ | 1174 | /* RDMA helper */ |
| 1090 | static inline int rdma_enable_set(struct bcm_sysport_priv *priv, | 1175 | static inline int rdma_enable_set(struct bcm_sysport_priv *priv, |
| 1091 | unsigned int enable) | 1176 | unsigned int enable) |
| 1092 | { | 1177 | { |
| 1093 | unsigned int timeout = 1000; | 1178 | unsigned int timeout = 1000; |
| 1094 | u32 reg; | 1179 | u32 reg; |
| @@ -1115,7 +1200,7 @@ static inline int rdma_enable_set(struct bcm_sysport_priv *priv, | |||
| 1115 | 1200 | ||
| 1116 | /* TDMA helper */ | 1201 | /* TDMA helper */ |
| 1117 | static inline int tdma_enable_set(struct bcm_sysport_priv *priv, | 1202 | static inline int tdma_enable_set(struct bcm_sysport_priv *priv, |
| 1118 | unsigned int enable) | 1203 | unsigned int enable) |
| 1119 | { | 1204 | { |
| 1120 | unsigned int timeout = 1000; | 1205 | unsigned int timeout = 1000; |
| 1121 | u32 reg; | 1206 | u32 reg; |
| @@ -1153,8 +1238,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) | |||
| 1153 | priv->rx_bd_assign_index = 0; | 1238 | priv->rx_bd_assign_index = 0; |
| 1154 | priv->rx_c_index = 0; | 1239 | priv->rx_c_index = 0; |
| 1155 | priv->rx_read_ptr = 0; | 1240 | priv->rx_read_ptr = 0; |
| 1156 | priv->rx_cbs = kzalloc(priv->num_rx_bds * | 1241 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), |
| 1157 | sizeof(struct bcm_sysport_cb), GFP_KERNEL); | 1242 | GFP_KERNEL); |
| 1158 | if (!priv->rx_cbs) { | 1243 | if (!priv->rx_cbs) { |
| 1159 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); | 1244 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); |
| 1160 | return -ENOMEM; | 1245 | return -ENOMEM; |
| @@ -1186,8 +1271,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) | |||
| 1186 | rdma_writel(priv, 1, RDMA_MBDONE_INTR); | 1271 | rdma_writel(priv, 1, RDMA_MBDONE_INTR); |
| 1187 | 1272 | ||
| 1188 | netif_dbg(priv, hw, priv->netdev, | 1273 | netif_dbg(priv, hw, priv->netdev, |
| 1189 | "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", | 1274 | "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", |
| 1190 | priv->num_rx_bds, priv->rx_bds); | 1275 | priv->num_rx_bds, priv->rx_bds); |
| 1191 | 1276 | ||
| 1192 | return 0; | 1277 | return 0; |
| 1193 | } | 1278 | } |
| @@ -1207,8 +1292,8 @@ static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) | |||
| 1207 | cb = &priv->rx_cbs[i]; | 1292 | cb = &priv->rx_cbs[i]; |
| 1208 | if (dma_unmap_addr(cb, dma_addr)) | 1293 | if (dma_unmap_addr(cb, dma_addr)) |
| 1209 | dma_unmap_single(&priv->pdev->dev, | 1294 | dma_unmap_single(&priv->pdev->dev, |
| 1210 | dma_unmap_addr(cb, dma_addr), | 1295 | dma_unmap_addr(cb, dma_addr), |
| 1211 | RX_BUF_LENGTH, DMA_FROM_DEVICE); | 1296 | RX_BUF_LENGTH, DMA_FROM_DEVICE); |
| 1212 | bcm_sysport_free_cb(cb); | 1297 | bcm_sysport_free_cb(cb); |
| 1213 | } | 1298 | } |
| 1214 | 1299 | ||
| @@ -1236,15 +1321,15 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev) | |||
| 1236 | } | 1321 | } |
| 1237 | 1322 | ||
| 1238 | static inline void umac_enable_set(struct bcm_sysport_priv *priv, | 1323 | static inline void umac_enable_set(struct bcm_sysport_priv *priv, |
| 1239 | unsigned int enable) | 1324 | u32 mask, unsigned int enable) |
| 1240 | { | 1325 | { |
| 1241 | u32 reg; | 1326 | u32 reg; |
| 1242 | 1327 | ||
| 1243 | reg = umac_readl(priv, UMAC_CMD); | 1328 | reg = umac_readl(priv, UMAC_CMD); |
| 1244 | if (enable) | 1329 | if (enable) |
| 1245 | reg |= CMD_RX_EN | CMD_TX_EN; | 1330 | reg |= mask; |
| 1246 | else | 1331 | else |
| 1247 | reg &= ~(CMD_RX_EN | CMD_TX_EN); | 1332 | reg &= ~mask; |
| 1248 | umac_writel(priv, reg, UMAC_CMD); | 1333 | umac_writel(priv, reg, UMAC_CMD); |
| 1249 | 1334 | ||
| 1250 | /* UniMAC stops on a packet boundary, wait for a full-sized packet | 1335 | /* UniMAC stops on a packet boundary, wait for a full-sized packet |
| @@ -1268,7 +1353,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) | |||
| 1268 | } | 1353 | } |
| 1269 | 1354 | ||
| 1270 | static void umac_set_hw_addr(struct bcm_sysport_priv *priv, | 1355 | static void umac_set_hw_addr(struct bcm_sysport_priv *priv, |
| 1271 | unsigned char *addr) | 1356 | unsigned char *addr) |
| 1272 | { | 1357 | { |
| 1273 | umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | 1358 | umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | |
| 1274 | (addr[2] << 8) | addr[3], UMAC_MAC0); | 1359 | (addr[2] << 8) | addr[3], UMAC_MAC0); |
| @@ -1284,11 +1369,35 @@ static void topctrl_flush(struct bcm_sysport_priv *priv) | |||
| 1284 | topctrl_writel(priv, 0, TX_FLUSH_CNTL); | 1369 | topctrl_writel(priv, 0, TX_FLUSH_CNTL); |
| 1285 | } | 1370 | } |
| 1286 | 1371 | ||
| 1372 | static void bcm_sysport_netif_start(struct net_device *dev) | ||
| 1373 | { | ||
| 1374 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 1375 | |||
| 1376 | /* Enable NAPI */ | ||
| 1377 | napi_enable(&priv->napi); | ||
| 1378 | |||
| 1379 | phy_start(priv->phydev); | ||
| 1380 | |||
| 1381 | /* Enable TX interrupts for the 32 TXQs */ | ||
| 1382 | intrl2_1_mask_clear(priv, 0xffffffff); | ||
| 1383 | |||
| 1384 | /* Last call before we start the real business */ | ||
| 1385 | netif_tx_start_all_queues(dev); | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static void rbuf_init(struct bcm_sysport_priv *priv) | ||
| 1389 | { | ||
| 1390 | u32 reg; | ||
| 1391 | |||
| 1392 | reg = rbuf_readl(priv, RBUF_CONTROL); | ||
| 1393 | reg |= RBUF_4B_ALGN | RBUF_RSB_EN; | ||
| 1394 | rbuf_writel(priv, reg, RBUF_CONTROL); | ||
| 1395 | } | ||
| 1396 | |||
| 1287 | static int bcm_sysport_open(struct net_device *dev) | 1397 | static int bcm_sysport_open(struct net_device *dev) |
| 1288 | { | 1398 | { |
| 1289 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 1399 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 1290 | unsigned int i; | 1400 | unsigned int i; |
| 1291 | u32 reg; | ||
| 1292 | int ret; | 1401 | int ret; |
| 1293 | 1402 | ||
| 1294 | /* Reset UniMAC */ | 1403 | /* Reset UniMAC */ |
| @@ -1298,12 +1407,10 @@ static int bcm_sysport_open(struct net_device *dev) | |||
| 1298 | topctrl_flush(priv); | 1407 | topctrl_flush(priv); |
| 1299 | 1408 | ||
| 1300 | /* Disable the UniMAC RX/TX */ | 1409 | /* Disable the UniMAC RX/TX */ |
| 1301 | umac_enable_set(priv, 0); | 1410 | umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); |
| 1302 | 1411 | ||
| 1303 | /* Enable RBUF 2bytes alignment and Receive Status Block */ | 1412 | /* Enable RBUF 2bytes alignment and Receive Status Block */ |
| 1304 | reg = rbuf_readl(priv, RBUF_CONTROL); | 1413 | rbuf_init(priv); |
| 1305 | reg |= RBUF_4B_ALGN | RBUF_RSB_EN; | ||
| 1306 | rbuf_writel(priv, reg, RBUF_CONTROL); | ||
| 1307 | 1414 | ||
| 1308 | /* Set maximum frame length */ | 1415 | /* Set maximum frame length */ |
| 1309 | umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | 1416 | umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); |
| @@ -1351,7 +1458,7 @@ static int bcm_sysport_open(struct net_device *dev) | |||
| 1351 | ret = bcm_sysport_init_tx_ring(priv, i); | 1458 | ret = bcm_sysport_init_tx_ring(priv, i); |
| 1352 | if (ret) { | 1459 | if (ret) { |
| 1353 | netdev_err(dev, "failed to initialize TX ring %d\n", | 1460 | netdev_err(dev, "failed to initialize TX ring %d\n", |
| 1354 | i); | 1461 | i); |
| 1355 | goto out_free_tx_ring; | 1462 | goto out_free_tx_ring; |
| 1356 | } | 1463 | } |
| 1357 | } | 1464 | } |
| @@ -1379,19 +1486,10 @@ static int bcm_sysport_open(struct net_device *dev) | |||
| 1379 | if (ret) | 1486 | if (ret) |
| 1380 | goto out_clear_rx_int; | 1487 | goto out_clear_rx_int; |
| 1381 | 1488 | ||
| 1382 | /* Enable NAPI */ | ||
| 1383 | napi_enable(&priv->napi); | ||
| 1384 | |||
| 1385 | /* Turn on UniMAC TX/RX */ | 1489 | /* Turn on UniMAC TX/RX */ |
| 1386 | umac_enable_set(priv, 1); | 1490 | umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); |
| 1387 | 1491 | ||
| 1388 | phy_start(priv->phydev); | 1492 | bcm_sysport_netif_start(dev); |
| 1389 | |||
| 1390 | /* Enable TX interrupts for the 32 TXQs */ | ||
| 1391 | intrl2_1_mask_clear(priv, 0xffffffff); | ||
| 1392 | |||
| 1393 | /* Last call before we start the real business */ | ||
| 1394 | netif_tx_start_all_queues(dev); | ||
| 1395 | 1493 | ||
| 1396 | return 0; | 1494 | return 0; |
| 1397 | 1495 | ||
| @@ -1410,12 +1508,9 @@ out_phy_disconnect: | |||
| 1410 | return ret; | 1508 | return ret; |
| 1411 | } | 1509 | } |
| 1412 | 1510 | ||
| 1413 | static int bcm_sysport_stop(struct net_device *dev) | 1511 | static void bcm_sysport_netif_stop(struct net_device *dev) |
| 1414 | { | 1512 | { |
| 1415 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 1513 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
| 1416 | unsigned int i; | ||
| 1417 | u32 reg; | ||
| 1418 | int ret; | ||
| 1419 | 1514 | ||
| 1420 | /* stop all software from updating hardware */ | 1515 | /* stop all software from updating hardware */ |
| 1421 | netif_tx_stop_all_queues(dev); | 1516 | netif_tx_stop_all_queues(dev); |
| @@ -1427,11 +1522,18 @@ static int bcm_sysport_stop(struct net_device *dev) | |||
| 1427 | intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); | 1522 | intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); |
| 1428 | intrl2_1_mask_set(priv, 0xffffffff); | 1523 | intrl2_1_mask_set(priv, 0xffffffff); |
| 1429 | intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); | 1524 | intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); |
| 1525 | } | ||
| 1526 | |||
| 1527 | static int bcm_sysport_stop(struct net_device *dev) | ||
| 1528 | { | ||
| 1529 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 1530 | unsigned int i; | ||
| 1531 | int ret; | ||
| 1532 | |||
| 1533 | bcm_sysport_netif_stop(dev); | ||
| 1430 | 1534 | ||
| 1431 | /* Disable UniMAC RX */ | 1535 | /* Disable UniMAC RX */ |
| 1432 | reg = umac_readl(priv, UMAC_CMD); | 1536 | umac_enable_set(priv, CMD_RX_EN, 0); |
| 1433 | reg &= ~CMD_RX_EN; | ||
| 1434 | umac_writel(priv, reg, UMAC_CMD); | ||
| 1435 | 1537 | ||
| 1436 | ret = tdma_enable_set(priv, 0); | 1538 | ret = tdma_enable_set(priv, 0); |
| 1437 | if (ret) { | 1539 | if (ret) { |
| @@ -1449,9 +1551,7 @@ static int bcm_sysport_stop(struct net_device *dev) | |||
| 1449 | } | 1551 | } |
| 1450 | 1552 | ||
| 1451 | /* Disable UniMAC TX */ | 1553 | /* Disable UniMAC TX */ |
| 1452 | reg = umac_readl(priv, UMAC_CMD); | 1554 | umac_enable_set(priv, CMD_TX_EN, 0); |
| 1453 | reg &= ~CMD_TX_EN; | ||
| 1454 | umac_writel(priv, reg, UMAC_CMD); | ||
| 1455 | 1555 | ||
| 1456 | /* Free RX/TX rings SW structures */ | 1556 | /* Free RX/TX rings SW structures */ |
| 1457 | for (i = 0; i < dev->num_tx_queues; i++) | 1557 | for (i = 0; i < dev->num_tx_queues; i++) |
| @@ -1477,6 +1577,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = { | |||
| 1477 | .get_strings = bcm_sysport_get_strings, | 1577 | .get_strings = bcm_sysport_get_strings, |
| 1478 | .get_ethtool_stats = bcm_sysport_get_stats, | 1578 | .get_ethtool_stats = bcm_sysport_get_stats, |
| 1479 | .get_sset_count = bcm_sysport_get_sset_count, | 1579 | .get_sset_count = bcm_sysport_get_sset_count, |
| 1580 | .get_wol = bcm_sysport_get_wol, | ||
| 1581 | .set_wol = bcm_sysport_set_wol, | ||
| 1480 | }; | 1582 | }; |
| 1481 | 1583 | ||
| 1482 | static const struct net_device_ops bcm_sysport_netdev_ops = { | 1584 | static const struct net_device_ops bcm_sysport_netdev_ops = { |
| @@ -1518,6 +1620,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) | |||
| 1518 | 1620 | ||
| 1519 | priv->irq0 = platform_get_irq(pdev, 0); | 1621 | priv->irq0 = platform_get_irq(pdev, 0); |
| 1520 | priv->irq1 = platform_get_irq(pdev, 1); | 1622 | priv->irq1 = platform_get_irq(pdev, 1); |
| 1623 | priv->wol_irq = platform_get_irq(pdev, 2); | ||
| 1521 | if (priv->irq0 <= 0 || priv->irq1 <= 0) { | 1624 | if (priv->irq0 <= 0 || priv->irq1 <= 0) { |
| 1522 | dev_err(&pdev->dev, "invalid interrupts\n"); | 1625 | dev_err(&pdev->dev, "invalid interrupts\n"); |
| 1523 | ret = -EINVAL; | 1626 | ret = -EINVAL; |
| @@ -1570,6 +1673,13 @@ static int bcm_sysport_probe(struct platform_device *pdev) | |||
| 1570 | dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | | 1673 | dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | |
| 1571 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1674 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
| 1572 | 1675 | ||
| 1676 | /* Request the WOL interrupt and advertise suspend if available */ | ||
| 1677 | priv->wol_irq_disabled = 1; | ||
| 1678 | ret = devm_request_irq(&pdev->dev, priv->wol_irq, | ||
| 1679 | bcm_sysport_wol_isr, 0, dev->name, priv); | ||
| 1680 | if (!ret) | ||
| 1681 | device_set_wakeup_capable(&pdev->dev, 1); | ||
| 1682 | |||
| 1573 | /* Set the needed headroom once and for all */ | 1683 | /* Set the needed headroom once and for all */ |
| 1574 | BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); | 1684 | BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); |
| 1575 | dev->needed_headroom += sizeof(struct bcm_tsb); | 1685 | dev->needed_headroom += sizeof(struct bcm_tsb); |
| @@ -1585,10 +1695,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) | |||
| 1585 | 1695 | ||
| 1586 | priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; | 1696 | priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; |
| 1587 | dev_info(&pdev->dev, | 1697 | dev_info(&pdev->dev, |
| 1588 | "Broadcom SYSTEMPORT" REV_FMT | 1698 | "Broadcom SYSTEMPORT" REV_FMT |
| 1589 | " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", | 1699 | " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", |
| 1590 | (priv->rev >> 8) & 0xff, priv->rev & 0xff, | 1700 | (priv->rev >> 8) & 0xff, priv->rev & 0xff, |
| 1591 | priv->base, priv->irq0, priv->irq1, txq, rxq); | 1701 | priv->base, priv->irq0, priv->irq1, txq, rxq); |
| 1592 | 1702 | ||
| 1593 | return 0; | 1703 | return 0; |
| 1594 | err: | 1704 | err: |
| @@ -1610,6 +1720,208 @@ static int bcm_sysport_remove(struct platform_device *pdev) | |||
| 1610 | return 0; | 1720 | return 0; |
| 1611 | } | 1721 | } |
| 1612 | 1722 | ||
| 1723 | #ifdef CONFIG_PM_SLEEP | ||
| 1724 | static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) | ||
| 1725 | { | ||
| 1726 | struct net_device *ndev = priv->netdev; | ||
| 1727 | unsigned int timeout = 1000; | ||
| 1728 | u32 reg; | ||
| 1729 | |||
| 1730 | /* Password has already been programmed */ | ||
| 1731 | reg = umac_readl(priv, UMAC_MPD_CTRL); | ||
| 1732 | reg |= MPD_EN; | ||
| 1733 | reg &= ~PSW_EN; | ||
| 1734 | if (priv->wolopts & WAKE_MAGICSECURE) | ||
| 1735 | reg |= PSW_EN; | ||
| 1736 | umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 1737 | |||
| 1738 | /* Make sure RBUF entered WoL mode as result */ | ||
| 1739 | do { | ||
| 1740 | reg = rbuf_readl(priv, RBUF_STATUS); | ||
| 1741 | if (reg & RBUF_WOL_MODE) | ||
| 1742 | break; | ||
| 1743 | |||
| 1744 | udelay(10); | ||
| 1745 | } while (timeout-- > 0); | ||
| 1746 | |||
| 1747 | /* Do not leave the UniMAC RBUF matching only MPD packets */ | ||
| 1748 | if (!timeout) { | ||
| 1749 | reg = umac_readl(priv, UMAC_MPD_CTRL); | ||
| 1750 | reg &= ~MPD_EN; | ||
| 1751 | umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 1752 | netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); | ||
| 1753 | return -ETIMEDOUT; | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | /* UniMAC receive needs to be turned on */ | ||
| 1757 | umac_enable_set(priv, CMD_RX_EN, 1); | ||
| 1758 | |||
| 1759 | /* Enable the interrupt wake-up source */ | ||
| 1760 | intrl2_0_mask_clear(priv, INTRL2_0_MPD); | ||
| 1761 | |||
| 1762 | netif_dbg(priv, wol, ndev, "entered WOL mode\n"); | ||
| 1763 | |||
| 1764 | return 0; | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | static int bcm_sysport_suspend(struct device *d) | ||
| 1768 | { | ||
| 1769 | struct net_device *dev = dev_get_drvdata(d); | ||
| 1770 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 1771 | unsigned int i; | ||
| 1772 | int ret = 0; | ||
| 1773 | u32 reg; | ||
| 1774 | |||
| 1775 | if (!netif_running(dev)) | ||
| 1776 | return 0; | ||
| 1777 | |||
| 1778 | bcm_sysport_netif_stop(dev); | ||
| 1779 | |||
| 1780 | phy_suspend(priv->phydev); | ||
| 1781 | |||
| 1782 | netif_device_detach(dev); | ||
| 1783 | |||
| 1784 | /* Disable UniMAC RX */ | ||
| 1785 | umac_enable_set(priv, CMD_RX_EN, 0); | ||
| 1786 | |||
| 1787 | ret = rdma_enable_set(priv, 0); | ||
| 1788 | if (ret) { | ||
| 1789 | netdev_err(dev, "RDMA timeout!\n"); | ||
| 1790 | return ret; | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | /* Disable RXCHK if enabled */ | ||
| 1794 | if (priv->rx_chk_en) { | ||
| 1795 | reg = rxchk_readl(priv, RXCHK_CONTROL); | ||
| 1796 | reg &= ~RXCHK_EN; | ||
| 1797 | rxchk_writel(priv, reg, RXCHK_CONTROL); | ||
| 1798 | } | ||
| 1799 | |||
| 1800 | /* Flush RX pipe */ | ||
| 1801 | if (!priv->wolopts) | ||
| 1802 | topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); | ||
| 1803 | |||
| 1804 | ret = tdma_enable_set(priv, 0); | ||
| 1805 | if (ret) { | ||
| 1806 | netdev_err(dev, "TDMA timeout!\n"); | ||
| 1807 | return ret; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | /* Wait for a packet boundary */ | ||
| 1811 | usleep_range(2000, 3000); | ||
| 1812 | |||
| 1813 | umac_enable_set(priv, CMD_TX_EN, 0); | ||
| 1814 | |||
| 1815 | topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); | ||
| 1816 | |||
| 1817 | /* Free RX/TX rings SW structures */ | ||
| 1818 | for (i = 0; i < dev->num_tx_queues; i++) | ||
| 1819 | bcm_sysport_fini_tx_ring(priv, i); | ||
| 1820 | bcm_sysport_fini_rx_ring(priv); | ||
| 1821 | |||
| 1822 | /* Get prepared for Wake-on-LAN */ | ||
| 1823 | if (device_may_wakeup(d) && priv->wolopts) | ||
| 1824 | ret = bcm_sysport_suspend_to_wol(priv); | ||
| 1825 | |||
| 1826 | return ret; | ||
| 1827 | } | ||
| 1828 | |||
| 1829 | static int bcm_sysport_resume(struct device *d) | ||
| 1830 | { | ||
| 1831 | struct net_device *dev = dev_get_drvdata(d); | ||
| 1832 | struct bcm_sysport_priv *priv = netdev_priv(dev); | ||
| 1833 | unsigned int i; | ||
| 1834 | u32 reg; | ||
| 1835 | int ret; | ||
| 1836 | |||
| 1837 | if (!netif_running(dev)) | ||
| 1838 | return 0; | ||
| 1839 | |||
| 1840 | /* We may have been suspended and never received a WOL event that | ||
| 1841 | * would turn off MPD detection, take care of that now | ||
| 1842 | */ | ||
| 1843 | bcm_sysport_resume_from_wol(priv); | ||
| 1844 | |||
| 1845 | /* Initialize both hardware and software ring */ | ||
| 1846 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
| 1847 | ret = bcm_sysport_init_tx_ring(priv, i); | ||
| 1848 | if (ret) { | ||
| 1849 | netdev_err(dev, "failed to initialize TX ring %d\n", | ||
| 1850 | i); | ||
| 1851 | goto out_free_tx_rings; | ||
| 1852 | } | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | /* Initialize linked-list */ | ||
| 1856 | tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); | ||
| 1857 | |||
| 1858 | /* Initialize RX ring */ | ||
| 1859 | ret = bcm_sysport_init_rx_ring(priv); | ||
| 1860 | if (ret) { | ||
| 1861 | netdev_err(dev, "failed to initialize RX ring\n"); | ||
| 1862 | goto out_free_rx_ring; | ||
| 1863 | } | ||
| 1864 | |||
| 1865 | netif_device_attach(dev); | ||
| 1866 | |||
| 1867 | /* Enable RX interrupt and TX ring full interrupt */ | ||
| 1868 | intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); | ||
| 1869 | |||
| 1870 | /* RX pipe enable */ | ||
| 1871 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); | ||
| 1872 | |||
| 1873 | ret = rdma_enable_set(priv, 1); | ||
| 1874 | if (ret) { | ||
| 1875 | netdev_err(dev, "failed to enable RDMA\n"); | ||
| 1876 | goto out_free_rx_ring; | ||
| 1877 | } | ||
| 1878 | |||
| 1879 | /* Enable rxhck */ | ||
| 1880 | if (priv->rx_chk_en) { | ||
| 1881 | reg = rxchk_readl(priv, RXCHK_CONTROL); | ||
| 1882 | reg |= RXCHK_EN; | ||
| 1883 | rxchk_writel(priv, reg, RXCHK_CONTROL); | ||
| 1884 | } | ||
| 1885 | |||
| 1886 | rbuf_init(priv); | ||
| 1887 | |||
| 1888 | /* Set maximum frame length */ | ||
| 1889 | umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | ||
| 1890 | |||
| 1891 | /* Set MAC address */ | ||
| 1892 | umac_set_hw_addr(priv, dev->dev_addr); | ||
| 1893 | |||
| 1894 | umac_enable_set(priv, CMD_RX_EN, 1); | ||
| 1895 | |||
| 1896 | /* TX pipe enable */ | ||
| 1897 | topctrl_writel(priv, 0, TX_FLUSH_CNTL); | ||
| 1898 | |||
| 1899 | umac_enable_set(priv, CMD_TX_EN, 1); | ||
| 1900 | |||
| 1901 | ret = tdma_enable_set(priv, 1); | ||
| 1902 | if (ret) { | ||
| 1903 | netdev_err(dev, "TDMA timeout!\n"); | ||
| 1904 | goto out_free_rx_ring; | ||
| 1905 | } | ||
| 1906 | |||
| 1907 | phy_resume(priv->phydev); | ||
| 1908 | |||
| 1909 | bcm_sysport_netif_start(dev); | ||
| 1910 | |||
| 1911 | return 0; | ||
| 1912 | |||
| 1913 | out_free_rx_ring: | ||
| 1914 | bcm_sysport_fini_rx_ring(priv); | ||
| 1915 | out_free_tx_rings: | ||
| 1916 | for (i = 0; i < dev->num_tx_queues; i++) | ||
| 1917 | bcm_sysport_fini_tx_ring(priv, i); | ||
| 1918 | return ret; | ||
| 1919 | } | ||
| 1920 | #endif | ||
| 1921 | |||
| 1922 | static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, | ||
| 1923 | bcm_sysport_suspend, bcm_sysport_resume); | ||
| 1924 | |||
| 1613 | static const struct of_device_id bcm_sysport_of_match[] = { | 1925 | static const struct of_device_id bcm_sysport_of_match[] = { |
| 1614 | { .compatible = "brcm,systemport-v1.00" }, | 1926 | { .compatible = "brcm,systemport-v1.00" }, |
| 1615 | { .compatible = "brcm,systemport" }, | 1927 | { .compatible = "brcm,systemport" }, |
| @@ -1623,6 +1935,7 @@ static struct platform_driver bcm_sysport_driver = { | |||
| 1623 | .name = "brcm-systemport", | 1935 | .name = "brcm-systemport", |
| 1624 | .owner = THIS_MODULE, | 1936 | .owner = THIS_MODULE, |
| 1625 | .of_match_table = bcm_sysport_of_match, | 1937 | .of_match_table = bcm_sysport_of_match, |
| 1938 | .pm = &bcm_sysport_pm_ops, | ||
| 1626 | }, | 1939 | }, |
| 1627 | }; | 1940 | }; |
| 1628 | module_platform_driver(bcm_sysport_driver); | 1941 | module_platform_driver(bcm_sysport_driver); |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 281c08246037..b08dab828101 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
| @@ -246,6 +246,15 @@ struct bcm_rsb { | |||
| 246 | #define MIB_RX_CNT_RST (1 << 0) | 246 | #define MIB_RX_CNT_RST (1 << 0) |
| 247 | #define MIB_RUNT_CNT_RST (1 << 1) | 247 | #define MIB_RUNT_CNT_RST (1 << 1) |
| 248 | #define MIB_TX_CNT_RST (1 << 2) | 248 | #define MIB_TX_CNT_RST (1 << 2) |
| 249 | |||
| 250 | #define UMAC_MPD_CTRL 0x620 | ||
| 251 | #define MPD_EN (1 << 0) | ||
| 252 | #define MSEQ_LEN_SHIFT 16 | ||
| 253 | #define MSEQ_LEN_MASK 0xff | ||
| 254 | #define PSW_EN (1 << 27) | ||
| 255 | |||
| 256 | #define UMAC_PSW_MS 0x624 | ||
| 257 | #define UMAC_PSW_LS 0x628 | ||
| 249 | #define UMAC_MDF_CTRL 0x650 | 258 | #define UMAC_MDF_CTRL 0x650 |
| 250 | #define UMAC_MDF_ADDR 0x654 | 259 | #define UMAC_MDF_ADDR 0x654 |
| 251 | 260 | ||
| @@ -642,6 +651,7 @@ struct bcm_sysport_priv { | |||
| 642 | struct platform_device *pdev; | 651 | struct platform_device *pdev; |
| 643 | int irq0; | 652 | int irq0; |
| 644 | int irq1; | 653 | int irq1; |
| 654 | int wol_irq; | ||
| 645 | 655 | ||
| 646 | /* Transmit rings */ | 656 | /* Transmit rings */ |
| 647 | struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; | 657 | struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; |
| @@ -664,10 +674,12 @@ struct bcm_sysport_priv { | |||
| 664 | int old_duplex; | 674 | int old_duplex; |
| 665 | 675 | ||
| 666 | /* Misc fields */ | 676 | /* Misc fields */ |
| 667 | unsigned int rx_csum_en:1; | 677 | unsigned int rx_chk_en:1; |
| 668 | unsigned int tsb_en:1; | 678 | unsigned int tsb_en:1; |
| 669 | unsigned int crc_fwd:1; | 679 | unsigned int crc_fwd:1; |
| 670 | u16 rev; | 680 | u16 rev; |
| 681 | u32 wolopts; | ||
| 682 | unsigned int wol_irq_disabled:1; | ||
| 671 | 683 | ||
| 672 | /* MIB related fields */ | 684 | /* MIB related fields */ |
| 673 | struct bcm_sysport_mib mib; | 685 | struct bcm_sysport_mib mib; |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 67d2b0047371..2fee73b878c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* bnx2.c: Broadcom NX2 network driver. | 1 | /* bnx2.c: QLogic NX2 network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2004-2013 Broadcom Corporation | 3 | * Copyright (c) 2004-2014 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| @@ -71,10 +72,10 @@ | |||
| 71 | #define TX_TIMEOUT (5*HZ) | 72 | #define TX_TIMEOUT (5*HZ) |
| 72 | 73 | ||
| 73 | static char version[] = | 74 | static char version[] = |
| 74 | "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 75 | "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
| 75 | 76 | ||
| 76 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); | 77 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); |
| 77 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver"); | 78 | MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver"); |
| 78 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
| 79 | MODULE_VERSION(DRV_MODULE_VERSION); | 80 | MODULE_VERSION(DRV_MODULE_VERSION); |
| 80 | MODULE_FIRMWARE(FW_MIPS_FILE_06); | 81 | MODULE_FIRMWARE(FW_MIPS_FILE_06); |
| @@ -119,7 +120,7 @@ static struct { | |||
| 119 | { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, | 120 | { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, |
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { | 123 | static const struct pci_device_id bnx2_pci_tbl[] = { |
| 123 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, | 124 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, |
| 124 | PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, | 125 | PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, |
| 125 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, | 126 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, |
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index e341bc366fa5..28df35d35893 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* bnx2.h: Broadcom NX2 network driver. | 1 | /* bnx2.h: QLogic NX2 network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2004-2013 Broadcom Corporation | 3 | * Copyright (c) 2004-2014 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h index 940eb91f209d..7db79c28b5ff 100644 --- a/drivers/net/ethernet/broadcom/bnx2_fw.h +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* bnx2_fw.h: Broadcom NX2 network driver. | 1 | /* bnx2_fw.h: QLogic NX2 network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation | 3 | * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4cab09d3f807..d777fae86988 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -346,6 +346,7 @@ struct sw_tx_bd { | |||
| 346 | u8 flags; | 346 | u8 flags; |
| 347 | /* Set on the first BD descriptor when there is a split BD */ | 347 | /* Set on the first BD descriptor when there is a split BD */ |
| 348 | #define BNX2X_TSO_SPLIT_BD (1<<0) | 348 | #define BNX2X_TSO_SPLIT_BD (1<<0) |
| 349 | #define BNX2X_HAS_SECOND_PBD (1<<1) | ||
| 349 | }; | 350 | }; |
| 350 | 351 | ||
| 351 | struct sw_rx_page { | 352 | struct sw_rx_page { |
| @@ -1482,6 +1483,7 @@ struct bnx2x { | |||
| 1482 | union pf_vf_bulletin *pf2vf_bulletin; | 1483 | union pf_vf_bulletin *pf2vf_bulletin; |
| 1483 | dma_addr_t pf2vf_bulletin_mapping; | 1484 | dma_addr_t pf2vf_bulletin_mapping; |
| 1484 | 1485 | ||
| 1486 | union pf_vf_bulletin shadow_bulletin; | ||
| 1485 | struct pf_vf_bulletin_content old_bulletin; | 1487 | struct pf_vf_bulletin_content old_bulletin; |
| 1486 | 1488 | ||
| 1487 | u16 requested_nr_virtfn; | 1489 | u16 requested_nr_virtfn; |
| @@ -1507,8 +1509,10 @@ struct bnx2x { | |||
| 1507 | /* TCP with Timestamp Option (32) + IPv6 (40) */ | 1509 | /* TCP with Timestamp Option (32) + IPv6 (40) */ |
| 1508 | #define ETH_MAX_TPA_HEADER_SIZE 72 | 1510 | #define ETH_MAX_TPA_HEADER_SIZE 72 |
| 1509 | 1511 | ||
| 1510 | /* Max supported alignment is 256 (8 shift) */ | 1512 | /* Max supported alignment is 256 (8 shift) |
| 1511 | #define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) | 1513 | * minimal alignment shift 6 is optimal for 57xxx HW performance |
| 1514 | */ | ||
| 1515 | #define BNX2X_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) | ||
| 1512 | 1516 | ||
| 1513 | /* FW uses 2 Cache lines Alignment for start packet and size | 1517 | /* FW uses 2 Cache lines Alignment for start packet and size |
| 1514 | * | 1518 | * |
| @@ -1928,6 +1932,8 @@ struct bnx2x { | |||
| 1928 | struct semaphore stats_sema; | 1932 | struct semaphore stats_sema; |
| 1929 | 1933 | ||
| 1930 | u8 phys_port_id[ETH_ALEN]; | 1934 | u8 phys_port_id[ETH_ALEN]; |
| 1935 | |||
| 1936 | struct bnx2x_link_report_data vf_link_vars; | ||
| 1931 | }; | 1937 | }; |
| 1932 | 1938 | ||
| 1933 | /* Tx queues may be less or equal to Rx queues */ | 1939 | /* Tx queues may be less or equal to Rx queues */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4b875da1c7ed..4ccc806b1150 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | |||
| 227 | --nbd; | 227 | --nbd; |
| 228 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 228 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
| 229 | 229 | ||
| 230 | if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { | ||
| 231 | /* Skip second parse bd... */ | ||
| 232 | --nbd; | ||
| 233 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
| 234 | } | ||
| 235 | |||
| 230 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ | 236 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
| 231 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | 237 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
| 232 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; | 238 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; |
| @@ -477,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
| 477 | 483 | ||
| 478 | #ifdef BNX2X_STOP_ON_ERROR | 484 | #ifdef BNX2X_STOP_ON_ERROR |
| 479 | fp->tpa_queue_used |= (1 << queue); | 485 | fp->tpa_queue_used |= (1 << queue); |
| 480 | #ifdef _ASM_GENERIC_INT_L64_H | ||
| 481 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
| 482 | #else | ||
| 483 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | 486 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", |
| 484 | #endif | ||
| 485 | fp->tpa_queue_used); | 487 | fp->tpa_queue_used); |
| 486 | #endif | 488 | #endif |
| 487 | } | 489 | } |
| @@ -1186,29 +1188,38 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) | |||
| 1186 | static void bnx2x_fill_report_data(struct bnx2x *bp, | 1188 | static void bnx2x_fill_report_data(struct bnx2x *bp, |
| 1187 | struct bnx2x_link_report_data *data) | 1189 | struct bnx2x_link_report_data *data) |
| 1188 | { | 1190 | { |
| 1189 | u16 line_speed = bnx2x_get_mf_speed(bp); | ||
| 1190 | |||
| 1191 | memset(data, 0, sizeof(*data)); | 1191 | memset(data, 0, sizeof(*data)); |
| 1192 | 1192 | ||
| 1193 | /* Fill the report data: effective line speed */ | 1193 | if (IS_PF(bp)) { |
| 1194 | data->line_speed = line_speed; | 1194 | /* Fill the report data: effective line speed */ |
| 1195 | 1195 | data->line_speed = bnx2x_get_mf_speed(bp); | |
| 1196 | /* Link is down */ | 1196 | |
| 1197 | if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) | 1197 | /* Link is down */ |
| 1198 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, | 1198 | if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) |
| 1199 | &data->link_report_flags); | 1199 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
| 1200 | 1200 | &data->link_report_flags); | |
| 1201 | /* Full DUPLEX */ | 1201 | |
| 1202 | if (bp->link_vars.duplex == DUPLEX_FULL) | 1202 | if (!BNX2X_NUM_ETH_QUEUES(bp)) |
| 1203 | __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); | 1203 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
| 1204 | 1204 | &data->link_report_flags); | |
| 1205 | /* Rx Flow Control is ON */ | 1205 | |
| 1206 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) | 1206 | /* Full DUPLEX */ |
| 1207 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); | 1207 | if (bp->link_vars.duplex == DUPLEX_FULL) |
| 1208 | 1208 | __set_bit(BNX2X_LINK_REPORT_FD, | |
| 1209 | /* Tx Flow Control is ON */ | 1209 | &data->link_report_flags); |
| 1210 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | 1210 | |
| 1211 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); | 1211 | /* Rx Flow Control is ON */ |
| 1212 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) | ||
| 1213 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, | ||
| 1214 | &data->link_report_flags); | ||
| 1215 | |||
| 1216 | /* Tx Flow Control is ON */ | ||
| 1217 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
| 1218 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, | ||
| 1219 | &data->link_report_flags); | ||
| 1220 | } else { /* VF */ | ||
| 1221 | *data = bp->vf_link_vars; | ||
| 1222 | } | ||
| 1212 | } | 1223 | } |
| 1213 | 1224 | ||
| 1214 | /** | 1225 | /** |
| @@ -1262,6 +1273,10 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
| 1262 | */ | 1273 | */ |
| 1263 | memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); | 1274 | memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); |
| 1264 | 1275 | ||
| 1276 | /* propagate status to VFs */ | ||
| 1277 | if (IS_PF(bp)) | ||
| 1278 | bnx2x_iov_link_update(bp); | ||
| 1279 | |||
| 1265 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | 1280 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
| 1266 | &cur_data.link_report_flags)) { | 1281 | &cur_data.link_report_flags)) { |
| 1267 | netif_carrier_off(bp->dev); | 1282 | netif_carrier_off(bp->dev); |
| @@ -3889,6 +3904,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3889 | /* set encapsulation flag in start BD */ | 3904 | /* set encapsulation flag in start BD */ |
| 3890 | SET_FLAG(tx_start_bd->general_data, | 3905 | SET_FLAG(tx_start_bd->general_data, |
| 3891 | ETH_TX_START_BD_TUNNEL_EXIST, 1); | 3906 | ETH_TX_START_BD_TUNNEL_EXIST, 1); |
| 3907 | |||
| 3908 | tx_buf->flags |= BNX2X_HAS_SECOND_PBD; | ||
| 3909 | |||
| 3892 | nbd++; | 3910 | nbd++; |
| 3893 | } else if (xmit_type & XMIT_CSUM) { | 3911 | } else if (xmit_type & XMIT_CSUM) { |
| 3894 | /* Set PBD in checksum offload case w/o encapsulation */ | 3912 | /* Set PBD in checksum offload case w/o encapsulation */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 51a952c51cb1..fb26bc4c42a1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
| @@ -2303,8 +2303,8 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) | |||
| 2303 | return 0; | 2303 | return 0; |
| 2304 | } | 2304 | } |
| 2305 | 2305 | ||
| 2306 | static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, | 2306 | static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, |
| 2307 | u16 idval, u8 up) | 2307 | u16 idval, u8 up) |
| 2308 | { | 2308 | { |
| 2309 | struct bnx2x *bp = netdev_priv(netdev); | 2309 | struct bnx2x *bp = netdev_priv(netdev); |
| 2310 | 2310 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index bd0600cf7266..92fee842f954 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
| @@ -216,6 +216,43 @@ static int bnx2x_get_port_type(struct bnx2x *bp) | |||
| 216 | return port_type; | 216 | return port_type; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static int bnx2x_get_vf_settings(struct net_device *dev, | ||
| 220 | struct ethtool_cmd *cmd) | ||
| 221 | { | ||
| 222 | struct bnx2x *bp = netdev_priv(dev); | ||
| 223 | |||
| 224 | if (bp->state == BNX2X_STATE_OPEN) { | ||
| 225 | if (test_bit(BNX2X_LINK_REPORT_FD, | ||
| 226 | &bp->vf_link_vars.link_report_flags)) | ||
| 227 | cmd->duplex = DUPLEX_FULL; | ||
| 228 | else | ||
| 229 | cmd->duplex = DUPLEX_HALF; | ||
| 230 | |||
| 231 | ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); | ||
| 232 | } else { | ||
| 233 | cmd->duplex = DUPLEX_UNKNOWN; | ||
| 234 | ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); | ||
| 235 | } | ||
| 236 | |||
| 237 | cmd->port = PORT_OTHER; | ||
| 238 | cmd->phy_address = 0; | ||
| 239 | cmd->transceiver = XCVR_INTERNAL; | ||
| 240 | cmd->autoneg = AUTONEG_DISABLE; | ||
| 241 | cmd->maxtxpkt = 0; | ||
| 242 | cmd->maxrxpkt = 0; | ||
| 243 | |||
| 244 | DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" | ||
| 245 | " supported 0x%x advertising 0x%x speed %u\n" | ||
| 246 | " duplex %d port %d phy_address %d transceiver %d\n" | ||
| 247 | " autoneg %d maxtxpkt %d maxrxpkt %d\n", | ||
| 248 | cmd->cmd, cmd->supported, cmd->advertising, | ||
| 249 | ethtool_cmd_speed(cmd), | ||
| 250 | cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, | ||
| 251 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | ||
| 252 | |||
| 253 | return 0; | ||
| 254 | } | ||
| 255 | |||
| 219 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 256 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 220 | { | 257 | { |
| 221 | struct bnx2x *bp = netdev_priv(dev); | 258 | struct bnx2x *bp = netdev_priv(dev); |
| @@ -379,6 +416,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
| 379 | break; | 416 | break; |
| 380 | case PORT_FIBRE: | 417 | case PORT_FIBRE: |
| 381 | case PORT_DA: | 418 | case PORT_DA: |
| 419 | case PORT_NONE: | ||
| 382 | if (!(bp->port.supported[0] & SUPPORTED_FIBRE || | 420 | if (!(bp->port.supported[0] & SUPPORTED_FIBRE || |
| 383 | bp->port.supported[1] & SUPPORTED_FIBRE)) { | 421 | bp->port.supported[1] & SUPPORTED_FIBRE)) { |
| 384 | DP(BNX2X_MSG_ETHTOOL, | 422 | DP(BNX2X_MSG_ETHTOOL, |
| @@ -1110,6 +1148,10 @@ static u32 bnx2x_get_link(struct net_device *dev) | |||
| 1110 | if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) | 1148 | if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) |
| 1111 | return 0; | 1149 | return 0; |
| 1112 | 1150 | ||
| 1151 | if (IS_VF(bp)) | ||
| 1152 | return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
| 1153 | &bp->vf_link_vars.link_report_flags); | ||
| 1154 | |||
| 1113 | return bp->link_vars.link_up; | 1155 | return bp->link_vars.link_up; |
| 1114 | } | 1156 | } |
| 1115 | 1157 | ||
| @@ -3484,8 +3526,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { | |||
| 3484 | }; | 3526 | }; |
| 3485 | 3527 | ||
| 3486 | static const struct ethtool_ops bnx2x_vf_ethtool_ops = { | 3528 | static const struct ethtool_ops bnx2x_vf_ethtool_ops = { |
| 3487 | .get_settings = bnx2x_get_settings, | 3529 | .get_settings = bnx2x_get_vf_settings, |
| 3488 | .set_settings = bnx2x_set_settings, | ||
| 3489 | .get_drvinfo = bnx2x_get_drvinfo, | 3530 | .get_drvinfo = bnx2x_get_drvinfo, |
| 3490 | .get_msglevel = bnx2x_get_msglevel, | 3531 | .get_msglevel = bnx2x_get_msglevel, |
| 3491 | .set_msglevel = bnx2x_set_msglevel, | 3532 | .set_msglevel = bnx2x_set_msglevel, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6a8b1453a1b9..900cab420810 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -249,7 +249,7 @@ static struct { | |||
| 249 | #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF | 249 | #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF |
| 250 | #endif | 250 | #endif |
| 251 | 251 | ||
| 252 | static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { | 252 | static const struct pci_device_id bnx2x_pci_tbl[] = { |
| 253 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, | 253 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, |
| 254 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, | 254 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, |
| 255 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, | 255 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, |
| @@ -2698,6 +2698,14 @@ void bnx2x__link_status_update(struct bnx2x *bp) | |||
| 2698 | bp->link_vars.duplex = DUPLEX_FULL; | 2698 | bp->link_vars.duplex = DUPLEX_FULL; |
| 2699 | bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; | 2699 | bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; |
| 2700 | __bnx2x_link_report(bp); | 2700 | __bnx2x_link_report(bp); |
| 2701 | |||
| 2702 | bnx2x_sample_bulletin(bp); | ||
| 2703 | |||
| 2704 | /* if bulletin board did not have an update for link status | ||
| 2705 | * __bnx2x_link_report will report current status | ||
| 2706 | * but it will NOT duplicate report in case of already reported | ||
| 2707 | * during sampling bulletin board. | ||
| 2708 | */ | ||
| 2701 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2709 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
| 2702 | } | 2710 | } |
| 2703 | } | 2711 | } |
| @@ -10044,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10044 | } | 10052 | } |
| 10045 | 10053 | ||
| 10046 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) | 10054 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) |
| 10055 | #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ | ||
| 10056 | 0x1848 + ((f) << 4)) | ||
| 10047 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) | 10057 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) |
| 10048 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) | 10058 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) |
| 10049 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) | 10059 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) |
| @@ -10051,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10051 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) | 10061 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) |
| 10052 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) | 10062 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) |
| 10053 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) | 10063 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) |
| 10054 | #define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) | ||
| 10055 | #define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) | ||
| 10056 | 10064 | ||
| 10057 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | 10065 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) |
| 10058 | { | 10066 | { |
| @@ -10071,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | |||
| 10071 | return false; | 10079 | return false; |
| 10072 | } | 10080 | } |
| 10073 | 10081 | ||
| 10074 | static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) | 10082 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) |
| 10075 | { | ||
| 10076 | u8 major, minor, version; | ||
| 10077 | u32 fw; | ||
| 10078 | |||
| 10079 | /* Must check that FW is loaded */ | ||
| 10080 | if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & | ||
| 10081 | MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { | ||
| 10082 | BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); | ||
| 10083 | return false; | ||
| 10084 | } | ||
| 10085 | |||
| 10086 | /* Read Currently loaded FW version */ | ||
| 10087 | fw = REG_RD(bp, XSEM_REG_PRAM); | ||
| 10088 | major = fw & 0xff; | ||
| 10089 | minor = (fw >> 0x8) & 0xff; | ||
| 10090 | version = (fw >> 0x10) & 0xff; | ||
| 10091 | BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", | ||
| 10092 | fw, major, minor, version); | ||
| 10093 | |||
| 10094 | if (major > BCM_5710_UNDI_FW_MF_MAJOR) | ||
| 10095 | return true; | ||
| 10096 | |||
| 10097 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
| 10098 | (minor > BCM_5710_UNDI_FW_MF_MINOR)) | ||
| 10099 | return true; | ||
| 10100 | |||
| 10101 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
| 10102 | (minor == BCM_5710_UNDI_FW_MF_MINOR) && | ||
| 10103 | (version >= BCM_5710_UNDI_FW_MF_VERS)) | ||
| 10104 | return true; | ||
| 10105 | |||
| 10106 | return false; | ||
| 10107 | } | ||
| 10108 | |||
| 10109 | static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) | ||
| 10110 | { | ||
| 10111 | int i; | ||
| 10112 | |||
| 10113 | /* Due to legacy (FW) code, the first function on each engine has a | ||
| 10114 | * different offset macro from the rest of the functions. | ||
| 10115 | * Setting this for all 8 functions is harmless regardless of whether | ||
| 10116 | * this is actually a multi-function device. | ||
| 10117 | */ | ||
| 10118 | for (i = 0; i < 2; i++) | ||
| 10119 | REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); | ||
| 10120 | |||
| 10121 | for (i = 2; i < 8; i++) | ||
| 10122 | REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); | ||
| 10123 | |||
| 10124 | BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); | ||
| 10125 | } | ||
| 10126 | |||
| 10127 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) | ||
| 10128 | { | 10083 | { |
| 10129 | u16 rcq, bd; | 10084 | u16 rcq, bd; |
| 10130 | u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); | 10085 | u32 addr, tmp_reg; |
| 10086 | |||
| 10087 | if (BP_FUNC(bp) < 2) | ||
| 10088 | addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); | ||
| 10089 | else | ||
| 10090 | addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); | ||
| 10131 | 10091 | ||
| 10092 | tmp_reg = REG_RD(bp, addr); | ||
| 10132 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; | 10093 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; |
| 10133 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; | 10094 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; |
| 10134 | 10095 | ||
| 10135 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); | 10096 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); |
| 10136 | REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); | 10097 | REG_WR(bp, addr, tmp_reg); |
| 10137 | 10098 | ||
| 10138 | BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", | 10099 | BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", |
| 10139 | port, bd, rcq); | 10100 | BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); |
| 10140 | } | 10101 | } |
| 10141 | 10102 | ||
| 10142 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) | 10103 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) |
| @@ -10375,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10375 | /* Reset should be performed after BRB is emptied */ | 10336 | /* Reset should be performed after BRB is emptied */ |
| 10376 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { | 10337 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { |
| 10377 | u32 timer_count = 1000; | 10338 | u32 timer_count = 1000; |
| 10378 | bool need_write = true; | ||
| 10379 | 10339 | ||
| 10380 | /* Close the MAC Rx to prevent BRB from filling up */ | 10340 | /* Close the MAC Rx to prevent BRB from filling up */ |
| 10381 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10341 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
| @@ -10412,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10412 | else | 10372 | else |
| 10413 | timer_count--; | 10373 | timer_count--; |
| 10414 | 10374 | ||
| 10415 | /* New UNDI FW supports MF and contains better | 10375 | /* If UNDI resides in memory, manually increment it */ |
| 10416 | * cleaning methods - might be redundant but harmless. | 10376 | if (prev_undi) |
| 10417 | */ | 10377 | bnx2x_prev_unload_undi_inc(bp, 1); |
| 10418 | if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { | 10378 | |
| 10419 | if (need_write) { | ||
| 10420 | bnx2x_prev_unload_undi_mf(bp); | ||
| 10421 | need_write = false; | ||
| 10422 | } | ||
| 10423 | } else if (prev_undi) { | ||
| 10424 | /* If UNDI resides in memory, | ||
| 10425 | * manually increment it | ||
| 10426 | */ | ||
| 10427 | bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); | ||
| 10428 | } | ||
| 10429 | udelay(10); | 10379 | udelay(10); |
| 10430 | } | 10380 | } |
| 10431 | 10381 | ||
| @@ -12424,6 +12374,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
| 12424 | .ndo_busy_poll = bnx2x_low_latency_recv, | 12374 | .ndo_busy_poll = bnx2x_low_latency_recv, |
| 12425 | #endif | 12375 | #endif |
| 12426 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, | 12376 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, |
| 12377 | .ndo_set_vf_link_state = bnx2x_set_vf_link_state, | ||
| 12427 | }; | 12378 | }; |
| 12428 | 12379 | ||
| 12429 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) | 12380 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index eda8583f6fc0..662310c5f4e9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
| @@ -24,6 +24,11 @@ | |||
| 24 | #include <linux/crc32.h> | 24 | #include <linux/crc32.h> |
| 25 | #include <linux/if_vlan.h> | 25 | #include <linux/if_vlan.h> |
| 26 | 26 | ||
| 27 | static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, | ||
| 28 | struct bnx2x_virtf **vf, | ||
| 29 | struct pf_vf_bulletin_content **bulletin, | ||
| 30 | bool test_queue); | ||
| 31 | |||
| 27 | /* General service functions */ | 32 | /* General service functions */ |
| 28 | static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, | 33 | static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, |
| 29 | u16 pf_id) | 34 | u16 pf_id) |
| @@ -597,8 +602,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 597 | rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); | 602 | rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); |
| 598 | if (rc) { | 603 | if (rc) { |
| 599 | BNX2X_ERR("Failed to remove multicasts\n"); | 604 | BNX2X_ERR("Failed to remove multicasts\n"); |
| 600 | if (mc) | 605 | kfree(mc); |
| 601 | kfree(mc); | ||
| 602 | return rc; | 606 | return rc; |
| 603 | } | 607 | } |
| 604 | 608 | ||
| @@ -1328,6 +1332,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, | |||
| 1328 | /* Prepare the VFs event synchronization mechanism */ | 1332 | /* Prepare the VFs event synchronization mechanism */ |
| 1329 | mutex_init(&bp->vfdb->event_mutex); | 1333 | mutex_init(&bp->vfdb->event_mutex); |
| 1330 | 1334 | ||
| 1335 | mutex_init(&bp->vfdb->bulletin_mutex); | ||
| 1336 | |||
| 1331 | return 0; | 1337 | return 0; |
| 1332 | failed: | 1338 | failed: |
| 1333 | DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); | 1339 | DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); |
| @@ -1473,6 +1479,107 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 1473 | vf->abs_vfid, q->sp_obj.func_id, q->cid); | 1479 | vf->abs_vfid, q->sp_obj.func_id, q->cid); |
| 1474 | } | 1480 | } |
| 1475 | 1481 | ||
| 1482 | static int bnx2x_max_speed_cap(struct bnx2x *bp) | ||
| 1483 | { | ||
| 1484 | u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; | ||
| 1485 | |||
| 1486 | if (supported & | ||
| 1487 | (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full)) | ||
| 1488 | return 20000; | ||
| 1489 | |||
| 1490 | return 10000; /* assume lowest supported speed is 10G */ | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) | ||
| 1494 | { | ||
| 1495 | struct bnx2x_link_report_data *state = &bp->last_reported_link; | ||
| 1496 | struct pf_vf_bulletin_content *bulletin; | ||
| 1497 | struct bnx2x_virtf *vf; | ||
| 1498 | bool update = true; | ||
| 1499 | int rc = 0; | ||
| 1500 | |||
| 1501 | /* sanity and init */ | ||
| 1502 | rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); | ||
| 1503 | if (rc) | ||
| 1504 | return rc; | ||
| 1505 | |||
| 1506 | mutex_lock(&bp->vfdb->bulletin_mutex); | ||
| 1507 | |||
| 1508 | if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) { | ||
| 1509 | bulletin->valid_bitmap |= 1 << LINK_VALID; | ||
| 1510 | |||
| 1511 | bulletin->link_speed = state->line_speed; | ||
| 1512 | bulletin->link_flags = 0; | ||
| 1513 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
| 1514 | &state->link_report_flags)) | ||
| 1515 | bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; | ||
| 1516 | if (test_bit(BNX2X_LINK_REPORT_FD, | ||
| 1517 | &state->link_report_flags)) | ||
| 1518 | bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX; | ||
| 1519 | if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, | ||
| 1520 | &state->link_report_flags)) | ||
| 1521 | bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON; | ||
| 1522 | if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, | ||
| 1523 | &state->link_report_flags)) | ||
| 1524 | bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON; | ||
| 1525 | } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE && | ||
| 1526 | !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { | ||
| 1527 | bulletin->valid_bitmap |= 1 << LINK_VALID; | ||
| 1528 | bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; | ||
| 1529 | } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE && | ||
| 1530 | (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { | ||
| 1531 | bulletin->valid_bitmap |= 1 << LINK_VALID; | ||
| 1532 | bulletin->link_speed = bnx2x_max_speed_cap(bp); | ||
| 1533 | bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN; | ||
| 1534 | } else { | ||
| 1535 | update = false; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | if (update) { | ||
| 1539 | DP(NETIF_MSG_LINK | BNX2X_MSG_IOV, | ||
| 1540 | "vf %d mode %u speed %d flags %x\n", idx, | ||
| 1541 | vf->link_cfg, bulletin->link_speed, bulletin->link_flags); | ||
| 1542 | |||
| 1543 | /* Post update on VF's bulletin board */ | ||
| 1544 | rc = bnx2x_post_vf_bulletin(bp, idx); | ||
| 1545 | if (rc) { | ||
| 1546 | BNX2X_ERR("failed to update VF[%d] bulletin\n", idx); | ||
| 1547 | goto out; | ||
| 1548 | } | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | out: | ||
| 1552 | mutex_unlock(&bp->vfdb->bulletin_mutex); | ||
| 1553 | return rc; | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state) | ||
| 1557 | { | ||
| 1558 | struct bnx2x *bp = netdev_priv(dev); | ||
| 1559 | struct bnx2x_virtf *vf = BP_VF(bp, idx); | ||
| 1560 | |||
| 1561 | if (!vf) | ||
| 1562 | return -EINVAL; | ||
| 1563 | |||
| 1564 | if (vf->link_cfg == link_state) | ||
| 1565 | return 0; /* nothing todo */ | ||
| 1566 | |||
| 1567 | vf->link_cfg = link_state; | ||
| 1568 | |||
| 1569 | return bnx2x_iov_link_update_vf(bp, idx); | ||
| 1570 | } | ||
| 1571 | |||
| 1572 | void bnx2x_iov_link_update(struct bnx2x *bp) | ||
| 1573 | { | ||
| 1574 | int vfid; | ||
| 1575 | |||
| 1576 | if (!IS_SRIOV(bp)) | ||
| 1577 | return; | ||
| 1578 | |||
| 1579 | for_each_vf(bp, vfid) | ||
| 1580 | bnx2x_iov_link_update_vf(bp, vfid); | ||
| 1581 | } | ||
| 1582 | |||
| 1476 | /* called by bnx2x_nic_load */ | 1583 | /* called by bnx2x_nic_load */ |
| 1477 | int bnx2x_iov_nic_init(struct bnx2x *bp) | 1584 | int bnx2x_iov_nic_init(struct bnx2x *bp) |
| 1478 | { | 1585 | { |
| @@ -2510,22 +2617,23 @@ void bnx2x_disable_sriov(struct bnx2x *bp) | |||
| 2510 | pci_disable_sriov(bp->pdev); | 2617 | pci_disable_sriov(bp->pdev); |
| 2511 | } | 2618 | } |
| 2512 | 2619 | ||
| 2513 | static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, | 2620 | static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, |
| 2514 | struct bnx2x_virtf **vf, | 2621 | struct bnx2x_virtf **vf, |
| 2515 | struct pf_vf_bulletin_content **bulletin) | 2622 | struct pf_vf_bulletin_content **bulletin, |
| 2623 | bool test_queue) | ||
| 2516 | { | 2624 | { |
| 2517 | if (bp->state != BNX2X_STATE_OPEN) { | 2625 | if (bp->state != BNX2X_STATE_OPEN) { |
| 2518 | BNX2X_ERR("vf ndo called though PF is down\n"); | 2626 | BNX2X_ERR("PF is down - can't utilize iov-related functionality\n"); |
| 2519 | return -EINVAL; | 2627 | return -EINVAL; |
| 2520 | } | 2628 | } |
| 2521 | 2629 | ||
| 2522 | if (!IS_SRIOV(bp)) { | 2630 | if (!IS_SRIOV(bp)) { |
| 2523 | BNX2X_ERR("vf ndo called though sriov is disabled\n"); | 2631 | BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); |
| 2524 | return -EINVAL; | 2632 | return -EINVAL; |
| 2525 | } | 2633 | } |
| 2526 | 2634 | ||
| 2527 | if (vfidx >= BNX2X_NR_VIRTFN(bp)) { | 2635 | if (vfidx >= BNX2X_NR_VIRTFN(bp)) { |
| 2528 | BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", | 2636 | BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n", |
| 2529 | vfidx, BNX2X_NR_VIRTFN(bp)); | 2637 | vfidx, BNX2X_NR_VIRTFN(bp)); |
| 2530 | return -EINVAL; | 2638 | return -EINVAL; |
| 2531 | } | 2639 | } |
| @@ -2535,19 +2643,18 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, | |||
| 2535 | *bulletin = BP_VF_BULLETIN(bp, vfidx); | 2643 | *bulletin = BP_VF_BULLETIN(bp, vfidx); |
| 2536 | 2644 | ||
| 2537 | if (!*vf) { | 2645 | if (!*vf) { |
| 2538 | BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", | 2646 | BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx); |
| 2539 | vfidx); | ||
| 2540 | return -EINVAL; | 2647 | return -EINVAL; |
| 2541 | } | 2648 | } |
| 2542 | 2649 | ||
| 2543 | if (!(*vf)->vfqs) { | 2650 | if (test_queue && !(*vf)->vfqs) { |
| 2544 | BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", | 2651 | BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n", |
| 2545 | vfidx); | 2652 | vfidx); |
| 2546 | return -EINVAL; | 2653 | return -EINVAL; |
| 2547 | } | 2654 | } |
| 2548 | 2655 | ||
| 2549 | if (!*bulletin) { | 2656 | if (!*bulletin) { |
| 2550 | BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", | 2657 | BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n", |
| 2551 | vfidx); | 2658 | vfidx); |
| 2552 | return -EINVAL; | 2659 | return -EINVAL; |
| 2553 | } | 2660 | } |
| @@ -2566,9 +2673,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
| 2566 | int rc; | 2673 | int rc; |
| 2567 | 2674 | ||
| 2568 | /* sanity and init */ | 2675 | /* sanity and init */ |
| 2569 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | 2676 | rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); |
| 2570 | if (rc) | 2677 | if (rc) |
| 2571 | return rc; | 2678 | return rc; |
| 2679 | |||
| 2572 | mac_obj = &bnx2x_leading_vfq(vf, mac_obj); | 2680 | mac_obj = &bnx2x_leading_vfq(vf, mac_obj); |
| 2573 | vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); | 2681 | vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); |
| 2574 | if (!mac_obj || !vlan_obj) { | 2682 | if (!mac_obj || !vlan_obj) { |
| @@ -2591,6 +2699,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
| 2591 | VLAN_HLEN); | 2699 | VLAN_HLEN); |
| 2592 | } | 2700 | } |
| 2593 | } else { | 2701 | } else { |
| 2702 | mutex_lock(&bp->vfdb->bulletin_mutex); | ||
| 2594 | /* mac */ | 2703 | /* mac */ |
| 2595 | if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) | 2704 | if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) |
| 2596 | /* mac configured by ndo so its in bulletin board */ | 2705 | /* mac configured by ndo so its in bulletin board */ |
| @@ -2606,6 +2715,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
| 2606 | else | 2715 | else |
| 2607 | /* function has not been loaded yet. Show vlans as 0s */ | 2716 | /* function has not been loaded yet. Show vlans as 0s */ |
| 2608 | memset(&ivi->vlan, 0, VLAN_HLEN); | 2717 | memset(&ivi->vlan, 0, VLAN_HLEN); |
| 2718 | |||
| 2719 | mutex_unlock(&bp->vfdb->bulletin_mutex); | ||
| 2609 | } | 2720 | } |
| 2610 | 2721 | ||
| 2611 | return 0; | 2722 | return 0; |
| @@ -2635,15 +2746,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
| 2635 | struct bnx2x_virtf *vf = NULL; | 2746 | struct bnx2x_virtf *vf = NULL; |
| 2636 | struct pf_vf_bulletin_content *bulletin = NULL; | 2747 | struct pf_vf_bulletin_content *bulletin = NULL; |
| 2637 | 2748 | ||
| 2638 | /* sanity and init */ | ||
| 2639 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | ||
| 2640 | if (rc) | ||
| 2641 | return rc; | ||
| 2642 | if (!is_valid_ether_addr(mac)) { | 2749 | if (!is_valid_ether_addr(mac)) { |
| 2643 | BNX2X_ERR("mac address invalid\n"); | 2750 | BNX2X_ERR("mac address invalid\n"); |
| 2644 | return -EINVAL; | 2751 | return -EINVAL; |
| 2645 | } | 2752 | } |
| 2646 | 2753 | ||
| 2754 | /* sanity and init */ | ||
| 2755 | rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); | ||
| 2756 | if (rc) | ||
| 2757 | return rc; | ||
| 2758 | |||
| 2759 | mutex_lock(&bp->vfdb->bulletin_mutex); | ||
| 2760 | |||
| 2647 | /* update PF's copy of the VF's bulletin. Will no longer accept mac | 2761 | /* update PF's copy of the VF's bulletin. Will no longer accept mac |
| 2648 | * configuration requests from vf unless match this mac | 2762 | * configuration requests from vf unless match this mac |
| 2649 | */ | 2763 | */ |
| @@ -2652,6 +2766,10 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
| 2652 | 2766 | ||
| 2653 | /* Post update on VF's bulletin board */ | 2767 | /* Post update on VF's bulletin board */ |
| 2654 | rc = bnx2x_post_vf_bulletin(bp, vfidx); | 2768 | rc = bnx2x_post_vf_bulletin(bp, vfidx); |
| 2769 | |||
| 2770 | /* release lock before checking return code */ | ||
| 2771 | mutex_unlock(&bp->vfdb->bulletin_mutex); | ||
| 2772 | |||
| 2655 | if (rc) { | 2773 | if (rc) { |
| 2656 | BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); | 2774 | BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); |
| 2657 | return rc; | 2775 | return rc; |
| @@ -2716,11 +2834,6 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
| 2716 | unsigned long accept_flags; | 2834 | unsigned long accept_flags; |
| 2717 | int rc; | 2835 | int rc; |
| 2718 | 2836 | ||
| 2719 | /* sanity and init */ | ||
| 2720 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); | ||
| 2721 | if (rc) | ||
| 2722 | return rc; | ||
| 2723 | |||
| 2724 | if (vlan > 4095) { | 2837 | if (vlan > 4095) { |
| 2725 | BNX2X_ERR("illegal vlan value %d\n", vlan); | 2838 | BNX2X_ERR("illegal vlan value %d\n", vlan); |
| 2726 | return -EINVAL; | 2839 | return -EINVAL; |
| @@ -2729,18 +2842,27 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
| 2729 | DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", | 2842 | DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", |
| 2730 | vfidx, vlan, 0); | 2843 | vfidx, vlan, 0); |
| 2731 | 2844 | ||
| 2845 | /* sanity and init */ | ||
| 2846 | rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); | ||
| 2847 | if (rc) | ||
| 2848 | return rc; | ||
| 2849 | |||
| 2732 | /* update PF's copy of the VF's bulletin. No point in posting the vlan | 2850 | /* update PF's copy of the VF's bulletin. No point in posting the vlan |
| 2733 | * to the VF since it doesn't have anything to do with it. But it useful | 2851 | * to the VF since it doesn't have anything to do with it. But it useful |
| 2734 | * to store it here in case the VF is not up yet and we can only | 2852 | * to store it here in case the VF is not up yet and we can only |
| 2735 | * configure the vlan later when it does. Treat vlan id 0 as remove the | 2853 | * configure the vlan later when it does. Treat vlan id 0 as remove the |
| 2736 | * Host tag. | 2854 | * Host tag. |
| 2737 | */ | 2855 | */ |
| 2856 | mutex_lock(&bp->vfdb->bulletin_mutex); | ||
| 2857 | |||
| 2738 | if (vlan > 0) | 2858 | if (vlan > 0) |
| 2739 | bulletin->valid_bitmap |= 1 << VLAN_VALID; | 2859 | bulletin->valid_bitmap |= 1 << VLAN_VALID; |
| 2740 | else | 2860 | else |
| 2741 | bulletin->valid_bitmap &= ~(1 << VLAN_VALID); | 2861 | bulletin->valid_bitmap &= ~(1 << VLAN_VALID); |
| 2742 | bulletin->vlan = vlan; | 2862 | bulletin->vlan = vlan; |
| 2743 | 2863 | ||
| 2864 | mutex_unlock(&bp->vfdb->bulletin_mutex); | ||
| 2865 | |||
| 2744 | /* is vf initialized and queue set up? */ | 2866 | /* is vf initialized and queue set up? */ |
| 2745 | if (vf->state != VF_ENABLED || | 2867 | if (vf->state != VF_ENABLED || |
| 2746 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != | 2868 | bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != |
| @@ -2850,10 +2972,9 @@ out: | |||
| 2850 | * entire bulletin board excluding the crc field itself. Use the length field | 2972 | * entire bulletin board excluding the crc field itself. Use the length field |
| 2851 | * as the Bulletin Board was posted by a PF with possibly a different version | 2973 | * as the Bulletin Board was posted by a PF with possibly a different version |
| 2852 | * from the vf which will sample it. Therefore, the length is computed by the | 2974 | * from the vf which will sample it. Therefore, the length is computed by the |
| 2853 | * PF and the used blindly by the VF. | 2975 | * PF and then used blindly by the VF. |
| 2854 | */ | 2976 | */ |
| 2855 | u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, | 2977 | u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin) |
| 2856 | struct pf_vf_bulletin_content *bulletin) | ||
| 2857 | { | 2978 | { |
| 2858 | return crc32(BULLETIN_CRC_SEED, | 2979 | return crc32(BULLETIN_CRC_SEED, |
| 2859 | ((u8 *)bulletin) + sizeof(bulletin->crc), | 2980 | ((u8 *)bulletin) + sizeof(bulletin->crc), |
| @@ -2863,47 +2984,74 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, | |||
| 2863 | /* Check for new posts on the bulletin board */ | 2984 | /* Check for new posts on the bulletin board */ |
| 2864 | enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) | 2985 | enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) |
| 2865 | { | 2986 | { |
| 2866 | struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; | 2987 | struct pf_vf_bulletin_content *bulletin; |
| 2867 | int attempts; | 2988 | int attempts; |
| 2868 | 2989 | ||
| 2869 | /* bulletin board hasn't changed since last sample */ | 2990 | /* sampling structure in mid post may result with corrupted data |
| 2870 | if (bp->old_bulletin.version == bulletin.version) | 2991 | * validate crc to ensure coherency. |
| 2871 | return PFVF_BULLETIN_UNCHANGED; | 2992 | */ |
| 2993 | for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { | ||
| 2994 | u32 crc; | ||
| 2872 | 2995 | ||
| 2873 | /* validate crc of new bulletin board */ | 2996 | /* sample the bulletin board */ |
| 2874 | if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { | 2997 | memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, |
| 2875 | /* sampling structure in mid post may result with corrupted data | 2998 | sizeof(union pf_vf_bulletin)); |
| 2876 | * validate crc to ensure coherency. | 2999 | |
| 2877 | */ | 3000 | crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); |
| 2878 | for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { | 3001 | |
| 2879 | bulletin = bp->pf2vf_bulletin->content; | 3002 | if (bp->shadow_bulletin.content.crc == crc) |
| 2880 | if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, | 3003 | break; |
| 2881 | &bulletin)) | 3004 | |
| 2882 | break; | 3005 | BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", |
| 2883 | BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", | 3006 | bp->shadow_bulletin.content.crc, crc); |
| 2884 | bulletin.crc, | 3007 | } |
| 2885 | bnx2x_crc_vf_bulletin(bp, &bulletin)); | 3008 | |
| 2886 | } | 3009 | if (attempts >= BULLETIN_ATTEMPTS) { |
| 2887 | if (attempts >= BULLETIN_ATTEMPTS) { | 3010 | BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", |
| 2888 | BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", | 3011 | attempts); |
| 2889 | attempts); | 3012 | return PFVF_BULLETIN_CRC_ERR; |
| 2890 | return PFVF_BULLETIN_CRC_ERR; | ||
| 2891 | } | ||
| 2892 | } | 3013 | } |
| 3014 | bulletin = &bp->shadow_bulletin.content; | ||
| 3015 | |||
| 3016 | /* bulletin board hasn't changed since last sample */ | ||
| 3017 | if (bp->old_bulletin.version == bulletin->version) | ||
| 3018 | return PFVF_BULLETIN_UNCHANGED; | ||
| 2893 | 3019 | ||
| 2894 | /* the mac address in bulletin board is valid and is new */ | 3020 | /* the mac address in bulletin board is valid and is new */ |
| 2895 | if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && | 3021 | if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && |
| 2896 | !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { | 3022 | !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { |
| 2897 | /* update new mac to net device */ | 3023 | /* update new mac to net device */ |
| 2898 | memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); | 3024 | memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); |
| 3025 | } | ||
| 3026 | |||
| 3027 | if (bulletin->valid_bitmap & (1 << LINK_VALID)) { | ||
| 3028 | DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n", | ||
| 3029 | bulletin->link_speed, bulletin->link_flags); | ||
| 3030 | |||
| 3031 | bp->vf_link_vars.line_speed = bulletin->link_speed; | ||
| 3032 | bp->vf_link_vars.link_report_flags = 0; | ||
| 3033 | /* Link is down */ | ||
| 3034 | if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN) | ||
| 3035 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
| 3036 | &bp->vf_link_vars.link_report_flags); | ||
| 3037 | /* Full DUPLEX */ | ||
| 3038 | if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX) | ||
| 3039 | __set_bit(BNX2X_LINK_REPORT_FD, | ||
| 3040 | &bp->vf_link_vars.link_report_flags); | ||
| 3041 | /* Rx Flow Control is ON */ | ||
| 3042 | if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON) | ||
| 3043 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, | ||
| 3044 | &bp->vf_link_vars.link_report_flags); | ||
| 3045 | /* Tx Flow Control is ON */ | ||
| 3046 | if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON) | ||
| 3047 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, | ||
| 3048 | &bp->vf_link_vars.link_report_flags); | ||
| 3049 | __bnx2x_link_report(bp); | ||
| 2899 | } | 3050 | } |
| 2900 | 3051 | ||
| 2901 | /* the vlan in bulletin board is valid and is new */ | ||
| 2902 | if (bulletin.valid_bitmap & 1 << VLAN_VALID) | ||
| 2903 | memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); | ||
| 2904 | |||
| 2905 | /* copy new bulletin board to bp */ | 3052 | /* copy new bulletin board to bp */ |
| 2906 | bp->old_bulletin = bulletin; | 3053 | memcpy(&bp->old_bulletin, bulletin, |
| 3054 | sizeof(struct pf_vf_bulletin_content)); | ||
| 2907 | 3055 | ||
| 2908 | return PFVF_BULLETIN_UPDATED; | 3056 | return PFVF_BULLETIN_UPDATED; |
| 2909 | } | 3057 | } |
| @@ -2948,6 +3096,8 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
| 2948 | if (!bp->pf2vf_bulletin) | 3096 | if (!bp->pf2vf_bulletin) |
| 2949 | goto alloc_mem_err; | 3097 | goto alloc_mem_err; |
| 2950 | 3098 | ||
| 3099 | bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); | ||
| 3100 | |||
| 2951 | return 0; | 3101 | return 0; |
| 2952 | 3102 | ||
| 2953 | alloc_mem_err: | 3103 | alloc_mem_err: |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 96c575e147a5..ca1055f3d8af 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
| @@ -126,7 +126,11 @@ struct bnx2x_virtf { | |||
| 126 | #define VF_CACHE_LINE 0x0010 | 126 | #define VF_CACHE_LINE 0x0010 |
| 127 | #define VF_CFG_VLAN 0x0020 | 127 | #define VF_CFG_VLAN 0x0020 |
| 128 | #define VF_CFG_STATS_COALESCE 0x0040 | 128 | #define VF_CFG_STATS_COALESCE 0x0040 |
| 129 | 129 | #define VF_CFG_EXT_BULLETIN 0x0080 | |
| 130 | u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO | ||
| 131 | * IFLA_VF_LINK_STATE_ENABLE | ||
| 132 | * IFLA_VF_LINK_STATE_DISABLE | ||
| 133 | */ | ||
| 130 | u8 state; | 134 | u8 state; |
| 131 | #define VF_FREE 0 /* VF ready to be acquired holds no resc */ | 135 | #define VF_FREE 0 /* VF ready to be acquired holds no resc */ |
| 132 | #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ | 136 | #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ |
| @@ -295,22 +299,22 @@ struct bnx2x_vfdb { | |||
| 295 | #define BP_VFDB(bp) ((bp)->vfdb) | 299 | #define BP_VFDB(bp) ((bp)->vfdb) |
| 296 | /* vf array */ | 300 | /* vf array */ |
| 297 | struct bnx2x_virtf *vfs; | 301 | struct bnx2x_virtf *vfs; |
| 298 | #define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)])) | 302 | #define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) |
| 299 | #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var) | 303 | #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) |
| 300 | 304 | ||
| 301 | /* queue array - for all vfs */ | 305 | /* queue array - for all vfs */ |
| 302 | struct bnx2x_vf_queue *vfqs; | 306 | struct bnx2x_vf_queue *vfqs; |
| 303 | 307 | ||
| 304 | /* vf HW contexts */ | 308 | /* vf HW contexts */ |
| 305 | struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; | 309 | struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; |
| 306 | #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)]) | 310 | #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i]) |
| 307 | 311 | ||
| 308 | /* SR-IOV information */ | 312 | /* SR-IOV information */ |
| 309 | struct bnx2x_sriov sriov; | 313 | struct bnx2x_sriov sriov; |
| 310 | struct hw_dma mbx_dma; | 314 | struct hw_dma mbx_dma; |
| 311 | #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) | 315 | #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) |
| 312 | struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; | 316 | struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; |
| 313 | #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)])) | 317 | #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid])) |
| 314 | 318 | ||
| 315 | struct hw_dma bulletin_dma; | 319 | struct hw_dma bulletin_dma; |
| 316 | #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) | 320 | #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) |
| @@ -336,6 +340,9 @@ struct bnx2x_vfdb { | |||
| 336 | /* sp_rtnl synchronization */ | 340 | /* sp_rtnl synchronization */ |
| 337 | struct mutex event_mutex; | 341 | struct mutex event_mutex; |
| 338 | u64 event_occur; | 342 | u64 event_occur; |
| 343 | |||
| 344 | /* bulletin board update synchronization */ | ||
| 345 | struct mutex bulletin_mutex; | ||
| 339 | }; | 346 | }; |
| 340 | 347 | ||
| 341 | /* queue access */ | 348 | /* queue access */ |
| @@ -467,9 +474,10 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp); | |||
| 467 | 474 | ||
| 468 | bool bnx2x_tlv_supported(u16 tlvtype); | 475 | bool bnx2x_tlv_supported(u16 tlvtype); |
| 469 | 476 | ||
| 470 | u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, | 477 | u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin); |
| 471 | struct pf_vf_bulletin_content *bulletin); | ||
| 472 | int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); | 478 | int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); |
| 479 | void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, | ||
| 480 | bool support_long); | ||
| 473 | 481 | ||
| 474 | enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); | 482 | enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); |
| 475 | 483 | ||
| @@ -520,6 +528,11 @@ void bnx2x_iov_task(struct work_struct *work); | |||
| 520 | 528 | ||
| 521 | void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); | 529 | void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); |
| 522 | 530 | ||
| 531 | void bnx2x_iov_link_update(struct bnx2x *bp); | ||
| 532 | int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); | ||
| 533 | |||
| 534 | int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); | ||
| 535 | |||
| 523 | #else /* CONFIG_BNX2X_SRIOV */ | 536 | #else /* CONFIG_BNX2X_SRIOV */ |
| 524 | 537 | ||
| 525 | static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | 538 | static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, |
| @@ -579,6 +592,14 @@ static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} | |||
| 579 | 592 | ||
| 580 | static inline void bnx2x_iov_task(struct work_struct *work) {} | 593 | static inline void bnx2x_iov_task(struct work_struct *work) {} |
| 581 | static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} | 594 | static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} |
| 595 | static inline void bnx2x_iov_link_update(struct bnx2x *bp) {} | ||
| 596 | static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; } | ||
| 597 | |||
| 598 | static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf, | ||
| 599 | int link_state) {return 0; } | ||
| 600 | struct pf_vf_bulletin_content; | ||
| 601 | static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, | ||
| 602 | bool support_long) {} | ||
| 582 | 603 | ||
| 583 | #endif /* CONFIG_BNX2X_SRIOV */ | 604 | #endif /* CONFIG_BNX2X_SRIOV */ |
| 584 | #endif /* bnx2x_sriov.h */ | 605 | #endif /* bnx2x_sriov.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index d712d0ddd719..54e0427a9ee6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
| @@ -251,6 +251,9 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) | |||
| 251 | bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, | 251 | bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, |
| 252 | CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); | 252 | CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); |
| 253 | 253 | ||
| 254 | /* Bulletin support for bulletin board with length > legacy length */ | ||
| 255 | req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; | ||
| 256 | |||
| 254 | /* add list termination tlv */ | 257 | /* add list termination tlv */ |
| 255 | bnx2x_add_tlv(bp, req, | 258 | bnx2x_add_tlv(bp, req, |
| 256 | req->first_tlv.tl.length + sizeof(struct channel_tlv), | 259 | req->first_tlv.tl.length + sizeof(struct channel_tlv), |
| @@ -1232,6 +1235,41 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 1232 | bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); | 1235 | bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); |
| 1233 | } | 1236 | } |
| 1234 | 1237 | ||
| 1238 | static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp, | ||
| 1239 | struct vfpf_acquire_tlv *acquire) | ||
| 1240 | { | ||
| 1241 | /* Windows driver does one of three things: | ||
| 1242 | * 1. Old driver doesn't have bulletin board address set. | ||
| 1243 | * 2. 'Middle' driver sends mc_num == 32. | ||
| 1244 | * 3. New driver sets the OS field. | ||
| 1245 | */ | ||
| 1246 | if (!acquire->bulletin_addr || | ||
| 1247 | acquire->resc_request.num_mc_filters == 32 || | ||
| 1248 | ((acquire->vfdev_info.vf_os & VF_OS_MASK) == | ||
| 1249 | VF_OS_WINDOWS)) | ||
| 1250 | return true; | ||
| 1251 | |||
| 1252 | return false; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp, | ||
| 1256 | struct bnx2x_virtf *vf, | ||
| 1257 | struct bnx2x_vf_mbx *mbx) | ||
| 1258 | { | ||
| 1259 | /* Linux drivers which correctly set the doorbell size also | ||
| 1260 | * send a physical port request | ||
| 1261 | */ | ||
| 1262 | if (bnx2x_search_tlv_list(bp, &mbx->msg->req, | ||
| 1263 | CHANNEL_TLV_PHYS_PORT_ID)) | ||
| 1264 | return 0; | ||
| 1265 | |||
| 1266 | /* Issue does not exist in windows VMs */ | ||
| 1267 | if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) | ||
| 1268 | return 0; | ||
| 1269 | |||
| 1270 | return -EOPNOTSUPP; | ||
| 1271 | } | ||
| 1272 | |||
| 1235 | static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1273 | static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, |
| 1236 | struct bnx2x_vf_mbx *mbx) | 1274 | struct bnx2x_vf_mbx *mbx) |
| 1237 | { | 1275 | { |
| @@ -1247,12 +1285,32 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 1247 | acquire->resc_request.num_vlan_filters, | 1285 | acquire->resc_request.num_vlan_filters, |
| 1248 | acquire->resc_request.num_mc_filters); | 1286 | acquire->resc_request.num_mc_filters); |
| 1249 | 1287 | ||
| 1288 | /* Prevent VFs with old drivers from loading, since they calculate | ||
| 1289 | * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover | ||
| 1290 | * while being upgraded. | ||
| 1291 | */ | ||
| 1292 | rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx); | ||
| 1293 | if (rc) { | ||
| 1294 | DP(BNX2X_MSG_IOV, | ||
| 1295 | "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n", | ||
| 1296 | vf->abs_vfid); | ||
| 1297 | goto out; | ||
| 1298 | } | ||
| 1299 | |||
| 1250 | /* acquire the resources */ | 1300 | /* acquire the resources */ |
| 1251 | rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); | 1301 | rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); |
| 1252 | 1302 | ||
| 1253 | /* store address of vf's bulletin board */ | 1303 | /* store address of vf's bulletin board */ |
| 1254 | vf->bulletin_map = acquire->bulletin_addr; | 1304 | vf->bulletin_map = acquire->bulletin_addr; |
| 1305 | if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) { | ||
| 1306 | DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n", | ||
| 1307 | vf->abs_vfid); | ||
| 1308 | vf->cfg_flags |= VF_CFG_EXT_BULLETIN; | ||
| 1309 | } else { | ||
| 1310 | vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN; | ||
| 1311 | } | ||
| 1255 | 1312 | ||
| 1313 | out: | ||
| 1256 | /* response */ | 1314 | /* response */ |
| 1257 | bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); | 1315 | bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); |
| 1258 | } | 1316 | } |
| @@ -1273,6 +1331,10 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
| 1273 | if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) | 1331 | if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) |
| 1274 | vf->cfg_flags |= VF_CFG_STATS_COALESCE; | 1332 | vf->cfg_flags |= VF_CFG_STATS_COALESCE; |
| 1275 | 1333 | ||
| 1334 | /* Update VF's view of link state */ | ||
| 1335 | if (vf->cfg_flags & VF_CFG_EXT_BULLETIN) | ||
| 1336 | bnx2x_iov_link_update_vf(bp, vf->index); | ||
| 1337 | |||
| 1276 | /* response */ | 1338 | /* response */ |
| 1277 | bnx2x_vf_mbx_resp(bp, vf, rc); | 1339 | bnx2x_vf_mbx_resp(bp, vf, rc); |
| 1278 | } | 1340 | } |
| @@ -2007,6 +2069,17 @@ void bnx2x_vf_mbx(struct bnx2x *bp) | |||
| 2007 | } | 2069 | } |
| 2008 | } | 2070 | } |
| 2009 | 2071 | ||
| 2072 | void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, | ||
| 2073 | bool support_long) | ||
| 2074 | { | ||
| 2075 | /* Older VFs contain a bug where they can't check CRC for bulletin | ||
| 2076 | * boards of length greater than legacy size. | ||
| 2077 | */ | ||
| 2078 | bulletin->length = support_long ? BULLETIN_CONTENT_SIZE : | ||
| 2079 | BULLETIN_CONTENT_LEGACY_SIZE; | ||
| 2080 | bulletin->crc = bnx2x_crc_vf_bulletin(bulletin); | ||
| 2081 | } | ||
| 2082 | |||
| 2010 | /* propagate local bulletin board to vf */ | 2083 | /* propagate local bulletin board to vf */ |
| 2011 | int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) | 2084 | int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) |
| 2012 | { | 2085 | { |
| @@ -2023,8 +2096,9 @@ int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) | |||
| 2023 | 2096 | ||
| 2024 | /* increment bulletin board version and compute crc */ | 2097 | /* increment bulletin board version and compute crc */ |
| 2025 | bulletin->version++; | 2098 | bulletin->version++; |
| 2026 | bulletin->length = BULLETIN_CONTENT_SIZE; | 2099 | bnx2x_vf_bulletin_finalize(bulletin, |
| 2027 | bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin); | 2100 | (bnx2x_vf(bp, vf, cfg_flags) & |
| 2101 | VF_CFG_EXT_BULLETIN) ? true : false); | ||
| 2028 | 2102 | ||
| 2029 | /* propagate bulletin board via dmae to vm memory */ | 2103 | /* propagate bulletin board via dmae to vm memory */ |
| 2030 | rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, | 2104 | rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index e21e706762c9..15670c499a20 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | |||
| @@ -65,6 +65,7 @@ struct hw_sb_info { | |||
| 65 | #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 | 65 | #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 |
| 66 | #define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 | 66 | #define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 |
| 67 | #define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) | 67 | #define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) |
| 68 | #define BULLETIN_CONTENT_LEGACY_SIZE (32) | ||
| 68 | #define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ | 69 | #define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ |
| 69 | #define BULLETIN_CRC_SEED 0 | 70 | #define BULLETIN_CRC_SEED 0 |
| 70 | 71 | ||
| @@ -117,7 +118,15 @@ struct vfpf_acquire_tlv { | |||
| 117 | /* the following fields are for debug purposes */ | 118 | /* the following fields are for debug purposes */ |
| 118 | u8 vf_id; /* ME register value */ | 119 | u8 vf_id; /* ME register value */ |
| 119 | u8 vf_os; /* e.g. Linux, W2K8 */ | 120 | u8 vf_os; /* e.g. Linux, W2K8 */ |
| 120 | u8 padding[2]; | 121 | #define VF_OS_SUBVERSION_MASK (0x1f) |
| 122 | #define VF_OS_MASK (0xe0) | ||
| 123 | #define VF_OS_SHIFT (5) | ||
| 124 | #define VF_OS_UNDEFINED (0 << VF_OS_SHIFT) | ||
| 125 | #define VF_OS_WINDOWS (1 << VF_OS_SHIFT) | ||
| 126 | |||
| 127 | u8 padding; | ||
| 128 | u8 caps; | ||
| 129 | #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) | ||
| 121 | } vfdev_info; | 130 | } vfdev_info; |
| 122 | 131 | ||
| 123 | struct vf_pf_resc_request resc_request; | 132 | struct vf_pf_resc_request resc_request; |
| @@ -393,11 +402,23 @@ struct pf_vf_bulletin_content { | |||
| 393 | * to attempt to send messages on the | 402 | * to attempt to send messages on the |
| 394 | * channel after this bit is set | 403 | * channel after this bit is set |
| 395 | */ | 404 | */ |
| 405 | #define LINK_VALID 3 /* alert the VF thet a new link status | ||
| 406 | * update is available for it | ||
| 407 | */ | ||
| 396 | u8 mac[ETH_ALEN]; | 408 | u8 mac[ETH_ALEN]; |
| 397 | u8 mac_padding[2]; | 409 | u8 mac_padding[2]; |
| 398 | 410 | ||
| 399 | u16 vlan; | 411 | u16 vlan; |
| 400 | u8 vlan_padding[6]; | 412 | u8 vlan_padding[6]; |
| 413 | |||
| 414 | u16 link_speed; /* Effective line speed */ | ||
| 415 | u8 link_speed_padding[6]; | ||
| 416 | u32 link_flags; /* VFPF_LINK_REPORT_XXX flags */ | ||
| 417 | #define VFPF_LINK_REPORT_LINK_DOWN (1 << 0) | ||
| 418 | #define VFPF_LINK_REPORT_FULL_DUPLEX (1 << 1) | ||
| 419 | #define VFPF_LINK_REPORT_RX_FC_ON (1 << 2) | ||
| 420 | #define VFPF_LINK_REPORT_TX_FC_ON (1 << 3) | ||
| 421 | u8 link_flags_padding[4]; | ||
| 401 | }; | 422 | }; |
| 402 | 423 | ||
| 403 | union pf_vf_bulletin { | 424 | union pf_vf_bulletin { |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 8244e2b14bb4..27861a6c7ca5 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
| @@ -1,13 +1,15 @@ | |||
| 1 | /* cnic.c: Broadcom CNIC core network driver. | 1 | /* cnic.c: QLogic CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2014 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
| 8 | * | 9 | * |
| 9 | * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) | 10 | * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) |
| 10 | * Modified and maintained by: Michael Chan <mchan@broadcom.com> | 11 | * Previously modified and maintained by: Michael Chan <mchan@broadcom.com> |
| 12 | * Maintained By: Dept-HSGLinuxNICDev@qlogic.com | ||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| @@ -56,11 +58,11 @@ | |||
| 56 | #define CNIC_MODULE_NAME "cnic" | 58 | #define CNIC_MODULE_NAME "cnic" |
| 57 | 59 | ||
| 58 | static char version[] = | 60 | static char version[] = |
| 59 | "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; | 61 | "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; |
| 60 | 62 | ||
| 61 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " | 63 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " |
| 62 | "Chen (zongxi@broadcom.com"); | 64 | "Chen (zongxi@broadcom.com"); |
| 63 | MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); | 65 | MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver"); |
| 64 | MODULE_LICENSE("GPL"); | 66 | MODULE_LICENSE("GPL"); |
| 65 | MODULE_VERSION(CNIC_MODULE_VERSION); | 67 | MODULE_VERSION(CNIC_MODULE_VERSION); |
| 66 | 68 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index d535ae4228b4..4baea81bae7a 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* cnic.h: Broadcom CNIC core network driver. | 1 | /* cnic.h: QLogic CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2014 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index dcbca6997e8f..b38499774071 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | 1 | ||
| 2 | /* cnic.c: Broadcom CNIC core network driver. | 2 | /* cnic.c: QLogic CNIC core network driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2006-2014 Broadcom Corporation | 4 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 5 | * Copyright (c) 2014 QLogic Corporation | ||
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 5f4d5573a73d..8bb36c1c4d68 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* cnic_if.h: Broadcom CNIC core network driver. | 1 | /* cnic_if.h: QLogic CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2014 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * Copyright (c) 2014 QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile index 31f55a90a197..9b6885efa9e7 100644 --- a/drivers/net/ethernet/broadcom/genet/Makefile +++ b/drivers/net/ethernet/broadcom/genet/Makefile | |||
| @@ -1,2 +1,2 @@ | |||
| 1 | obj-$(CONFIG_BCMGENET) += genet.o | 1 | obj-$(CONFIG_BCMGENET) += genet.o |
| 2 | genet-objs := bcmgenet.o bcmmii.o | 2 | genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 16281ad2da12..3f9d4de8173c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -6,15 +6,6 @@ | |||
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 18 | */ | 9 | */ |
| 19 | 10 | ||
| 20 | #define pr_fmt(fmt) "bcmgenet: " fmt | 11 | #define pr_fmt(fmt) "bcmgenet: " fmt |
| @@ -79,13 +70,13 @@ | |||
| 79 | TOTAL_DESC * DMA_DESC_SIZE) | 70 | TOTAL_DESC * DMA_DESC_SIZE) |
| 80 | 71 | ||
| 81 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | 72 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, |
| 82 | void __iomem *d, u32 value) | 73 | void __iomem *d, u32 value) |
| 83 | { | 74 | { |
| 84 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | 75 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); |
| 85 | } | 76 | } |
| 86 | 77 | ||
| 87 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | 78 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, |
| 88 | void __iomem *d) | 79 | void __iomem *d) |
| 89 | { | 80 | { |
| 90 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | 81 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); |
| 91 | } | 82 | } |
| @@ -98,7 +89,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |||
| 98 | 89 | ||
| 99 | /* Register writes to GISB bus can take couple hundred nanoseconds | 90 | /* Register writes to GISB bus can take couple hundred nanoseconds |
| 100 | * and are done for each packet, save these expensive writes unless | 91 | * and are done for each packet, save these expensive writes unless |
| 101 | * the platform is explicitely configured for 64-bits/LPAE. | 92 | * the platform is explicitly configured for 64-bits/LPAE. |
| 102 | */ | 93 | */ |
| 103 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 94 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 104 | if (priv->hw_params->flags & GENET_HAS_40BITS) | 95 | if (priv->hw_params->flags & GENET_HAS_40BITS) |
| @@ -108,7 +99,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |||
| 108 | 99 | ||
| 109 | /* Combined address + length/status setter */ | 100 | /* Combined address + length/status setter */ |
| 110 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | 101 | static inline void dmadesc_set(struct bcmgenet_priv *priv, |
| 111 | void __iomem *d, dma_addr_t addr, u32 val) | 102 | void __iomem *d, dma_addr_t addr, u32 val) |
| 112 | { | 103 | { |
| 113 | dmadesc_set_length_status(priv, d, val); | 104 | dmadesc_set_length_status(priv, d, val); |
| 114 | dmadesc_set_addr(priv, d, addr); | 105 | dmadesc_set_addr(priv, d, addr); |
| @@ -123,7 +114,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |||
| 123 | 114 | ||
| 124 | /* Register writes to GISB bus can take couple hundred nanoseconds | 115 | /* Register writes to GISB bus can take couple hundred nanoseconds |
| 125 | * and are done for each packet, save these expensive writes unless | 116 | * and are done for each packet, save these expensive writes unless |
| 126 | * the platform is explicitely configured for 64-bits/LPAE. | 117 | * the platform is explicitly configured for 64-bits/LPAE. |
| 127 | */ | 118 | */ |
| 128 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 119 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 129 | if (priv->hw_params->flags & GENET_HAS_40BITS) | 120 | if (priv->hw_params->flags & GENET_HAS_40BITS) |
| @@ -242,7 +233,7 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |||
| 242 | } | 233 | } |
| 243 | 234 | ||
| 244 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | 235 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, |
| 245 | enum dma_reg r) | 236 | enum dma_reg r) |
| 246 | { | 237 | { |
| 247 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | 238 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + |
| 248 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | 239 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
| @@ -256,7 +247,7 @@ static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |||
| 256 | } | 247 | } |
| 257 | 248 | ||
| 258 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | 249 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, |
| 259 | enum dma_reg r) | 250 | enum dma_reg r) |
| 260 | { | 251 | { |
| 261 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | 252 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + |
| 262 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | 253 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
| @@ -333,8 +324,8 @@ static const u8 genet_dma_ring_regs_v123[] = { | |||
| 333 | static const u8 *genet_dma_ring_regs; | 324 | static const u8 *genet_dma_ring_regs; |
| 334 | 325 | ||
| 335 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | 326 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, |
| 336 | unsigned int ring, | 327 | unsigned int ring, |
| 337 | enum dma_ring_reg r) | 328 | enum dma_ring_reg r) |
| 338 | { | 329 | { |
| 339 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | 330 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + |
| 340 | (DMA_RING_SIZE * ring) + | 331 | (DMA_RING_SIZE * ring) + |
| @@ -342,9 +333,8 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |||
| 342 | } | 333 | } |
| 343 | 334 | ||
| 344 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | 335 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, |
| 345 | unsigned int ring, | 336 | unsigned int ring, u32 val, |
| 346 | u32 val, | 337 | enum dma_ring_reg r) |
| 347 | enum dma_ring_reg r) | ||
| 348 | { | 338 | { |
| 349 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | 339 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + |
| 350 | (DMA_RING_SIZE * ring) + | 340 | (DMA_RING_SIZE * ring) + |
| @@ -352,8 +342,8 @@ static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |||
| 352 | } | 342 | } |
| 353 | 343 | ||
| 354 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | 344 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, |
| 355 | unsigned int ring, | 345 | unsigned int ring, |
| 356 | enum dma_ring_reg r) | 346 | enum dma_ring_reg r) |
| 357 | { | 347 | { |
| 358 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | 348 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + |
| 359 | (DMA_RING_SIZE * ring) + | 349 | (DMA_RING_SIZE * ring) + |
| @@ -361,9 +351,8 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |||
| 361 | } | 351 | } |
| 362 | 352 | ||
| 363 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | 353 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, |
| 364 | unsigned int ring, | 354 | unsigned int ring, u32 val, |
| 365 | u32 val, | 355 | enum dma_ring_reg r) |
| 366 | enum dma_ring_reg r) | ||
| 367 | { | 356 | { |
| 368 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | 357 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + |
| 369 | (DMA_RING_SIZE * ring) + | 358 | (DMA_RING_SIZE * ring) + |
| @@ -371,7 +360,7 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |||
| 371 | } | 360 | } |
| 372 | 361 | ||
| 373 | static int bcmgenet_get_settings(struct net_device *dev, | 362 | static int bcmgenet_get_settings(struct net_device *dev, |
| 374 | struct ethtool_cmd *cmd) | 363 | struct ethtool_cmd *cmd) |
| 375 | { | 364 | { |
| 376 | struct bcmgenet_priv *priv = netdev_priv(dev); | 365 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 377 | 366 | ||
| @@ -385,7 +374,7 @@ static int bcmgenet_get_settings(struct net_device *dev, | |||
| 385 | } | 374 | } |
| 386 | 375 | ||
| 387 | static int bcmgenet_set_settings(struct net_device *dev, | 376 | static int bcmgenet_set_settings(struct net_device *dev, |
| 388 | struct ethtool_cmd *cmd) | 377 | struct ethtool_cmd *cmd) |
| 389 | { | 378 | { |
| 390 | struct bcmgenet_priv *priv = netdev_priv(dev); | 379 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 391 | 380 | ||
| @@ -458,7 +447,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev, | |||
| 458 | } | 447 | } |
| 459 | 448 | ||
| 460 | static int bcmgenet_set_features(struct net_device *dev, | 449 | static int bcmgenet_set_features(struct net_device *dev, |
| 461 | netdev_features_t features) | 450 | netdev_features_t features) |
| 462 | { | 451 | { |
| 463 | netdev_features_t changed = features ^ dev->features; | 452 | netdev_features_t changed = features ^ dev->features; |
| 464 | netdev_features_t wanted = dev->wanted_features; | 453 | netdev_features_t wanted = dev->wanted_features; |
| @@ -625,12 +614,11 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |||
| 625 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | 614 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) |
| 626 | 615 | ||
| 627 | static void bcmgenet_get_drvinfo(struct net_device *dev, | 616 | static void bcmgenet_get_drvinfo(struct net_device *dev, |
| 628 | struct ethtool_drvinfo *info) | 617 | struct ethtool_drvinfo *info) |
| 629 | { | 618 | { |
| 630 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | 619 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); |
| 631 | strlcpy(info->version, "v2.0", sizeof(info->version)); | 620 | strlcpy(info->version, "v2.0", sizeof(info->version)); |
| 632 | info->n_stats = BCMGENET_STATS_LEN; | 621 | info->n_stats = BCMGENET_STATS_LEN; |
| 633 | |||
| 634 | } | 622 | } |
| 635 | 623 | ||
| 636 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | 624 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) |
| @@ -643,8 +631,8 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |||
| 643 | } | 631 | } |
| 644 | } | 632 | } |
| 645 | 633 | ||
| 646 | static void bcmgenet_get_strings(struct net_device *dev, | 634 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
| 647 | u32 stringset, u8 *data) | 635 | u8 *data) |
| 648 | { | 636 | { |
| 649 | int i; | 637 | int i; |
| 650 | 638 | ||
| @@ -652,8 +640,8 @@ static void bcmgenet_get_strings(struct net_device *dev, | |||
| 652 | case ETH_SS_STATS: | 640 | case ETH_SS_STATS: |
| 653 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | 641 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { |
| 654 | memcpy(data + i * ETH_GSTRING_LEN, | 642 | memcpy(data + i * ETH_GSTRING_LEN, |
| 655 | bcmgenet_gstrings_stats[i].stat_string, | 643 | bcmgenet_gstrings_stats[i].stat_string, |
| 656 | ETH_GSTRING_LEN); | 644 | ETH_GSTRING_LEN); |
| 657 | } | 645 | } |
| 658 | break; | 646 | break; |
| 659 | } | 647 | } |
| @@ -678,8 +666,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |||
| 678 | case BCMGENET_STAT_RUNT: | 666 | case BCMGENET_STAT_RUNT: |
| 679 | if (s->type != BCMGENET_STAT_MIB_RX) | 667 | if (s->type != BCMGENET_STAT_MIB_RX) |
| 680 | offset = BCMGENET_STAT_OFFSET; | 668 | offset = BCMGENET_STAT_OFFSET; |
| 681 | val = bcmgenet_umac_readl(priv, UMAC_MIB_START + | 669 | val = bcmgenet_umac_readl(priv, |
| 682 | j + offset); | 670 | UMAC_MIB_START + j + offset); |
| 683 | break; | 671 | break; |
| 684 | case BCMGENET_STAT_MISC: | 672 | case BCMGENET_STAT_MISC: |
| 685 | val = bcmgenet_umac_readl(priv, s->reg_offset); | 673 | val = bcmgenet_umac_readl(priv, s->reg_offset); |
| @@ -696,8 +684,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |||
| 696 | } | 684 | } |
| 697 | 685 | ||
| 698 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | 686 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, |
| 699 | struct ethtool_stats *stats, | 687 | struct ethtool_stats *stats, |
| 700 | u64 *data) | 688 | u64 *data) |
| 701 | { | 689 | { |
| 702 | struct bcmgenet_priv *priv = netdev_priv(dev); | 690 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 703 | int i; | 691 | int i; |
| @@ -730,6 +718,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = { | |||
| 730 | .get_link = ethtool_op_get_link, | 718 | .get_link = ethtool_op_get_link, |
| 731 | .get_msglevel = bcmgenet_get_msglevel, | 719 | .get_msglevel = bcmgenet_get_msglevel, |
| 732 | .set_msglevel = bcmgenet_set_msglevel, | 720 | .set_msglevel = bcmgenet_set_msglevel, |
| 721 | .get_wol = bcmgenet_get_wol, | ||
| 722 | .set_wol = bcmgenet_set_wol, | ||
| 733 | }; | 723 | }; |
| 734 | 724 | ||
| 735 | /* Power down the unimac, based on mode. */ | 725 | /* Power down the unimac, based on mode. */ |
| @@ -743,9 +733,12 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv, | |||
| 743 | phy_detach(priv->phydev); | 733 | phy_detach(priv->phydev); |
| 744 | break; | 734 | break; |
| 745 | 735 | ||
| 736 | case GENET_POWER_WOL_MAGIC: | ||
| 737 | bcmgenet_wol_power_down_cfg(priv, mode); | ||
| 738 | break; | ||
| 739 | |||
| 746 | case GENET_POWER_PASSIVE: | 740 | case GENET_POWER_PASSIVE: |
| 747 | /* Power down LED */ | 741 | /* Power down LED */ |
| 748 | bcmgenet_mii_reset(priv->dev); | ||
| 749 | if (priv->hw_params->flags & GENET_HAS_EXT) { | 742 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
| 750 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | 743 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
| 751 | reg |= (EXT_PWR_DOWN_PHY | | 744 | reg |= (EXT_PWR_DOWN_PHY | |
| @@ -759,7 +752,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv, | |||
| 759 | } | 752 | } |
| 760 | 753 | ||
| 761 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | 754 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, |
| 762 | enum bcmgenet_power_mode mode) | 755 | enum bcmgenet_power_mode mode) |
| 763 | { | 756 | { |
| 764 | u32 reg; | 757 | u32 reg; |
| 765 | 758 | ||
| @@ -777,12 +770,17 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |||
| 777 | /* enable APD */ | 770 | /* enable APD */ |
| 778 | reg |= EXT_PWR_DN_EN_LD; | 771 | reg |= EXT_PWR_DN_EN_LD; |
| 779 | break; | 772 | break; |
| 773 | case GENET_POWER_WOL_MAGIC: | ||
| 774 | bcmgenet_wol_power_up_cfg(priv, mode); | ||
| 775 | return; | ||
| 780 | default: | 776 | default: |
| 781 | break; | 777 | break; |
| 782 | } | 778 | } |
| 783 | 779 | ||
| 784 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | 780 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); |
| 785 | bcmgenet_mii_reset(priv->dev); | 781 | |
| 782 | if (mode == GENET_POWER_PASSIVE) | ||
| 783 | bcmgenet_mii_reset(priv->dev); | ||
| 786 | } | 784 | } |
| 787 | 785 | ||
| 788 | /* ioctl handle special commands that are not present in ethtool. */ | 786 | /* ioctl handle special commands that are not present in ethtool. */ |
| @@ -841,37 +839,37 @@ static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, | |||
| 841 | struct bcmgenet_tx_ring *ring) | 839 | struct bcmgenet_tx_ring *ring) |
| 842 | { | 840 | { |
| 843 | bcmgenet_intrl2_0_writel(priv, | 841 | bcmgenet_intrl2_0_writel(priv, |
| 844 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, | 842 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
| 845 | INTRL2_CPU_MASK_SET); | 843 | INTRL2_CPU_MASK_SET); |
| 846 | } | 844 | } |
| 847 | 845 | ||
| 848 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, | 846 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, |
| 849 | struct bcmgenet_tx_ring *ring) | 847 | struct bcmgenet_tx_ring *ring) |
| 850 | { | 848 | { |
| 851 | bcmgenet_intrl2_0_writel(priv, | 849 | bcmgenet_intrl2_0_writel(priv, |
| 852 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, | 850 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
| 853 | INTRL2_CPU_MASK_CLEAR); | 851 | INTRL2_CPU_MASK_CLEAR); |
| 854 | } | 852 | } |
| 855 | 853 | ||
| 856 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, | 854 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, |
| 857 | struct bcmgenet_tx_ring *ring) | 855 | struct bcmgenet_tx_ring *ring) |
| 858 | { | 856 | { |
| 859 | bcmgenet_intrl2_1_writel(priv, | 857 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
| 860 | (1 << ring->index), INTRL2_CPU_MASK_CLEAR); | 858 | INTRL2_CPU_MASK_CLEAR); |
| 861 | priv->int1_mask &= ~(1 << ring->index); | 859 | priv->int1_mask &= ~(1 << ring->index); |
| 862 | } | 860 | } |
| 863 | 861 | ||
| 864 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | 862 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, |
| 865 | struct bcmgenet_tx_ring *ring) | 863 | struct bcmgenet_tx_ring *ring) |
| 866 | { | 864 | { |
| 867 | bcmgenet_intrl2_1_writel(priv, | 865 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
| 868 | (1 << ring->index), INTRL2_CPU_MASK_SET); | 866 | INTRL2_CPU_MASK_SET); |
| 869 | priv->int1_mask |= (1 << ring->index); | 867 | priv->int1_mask |= (1 << ring->index); |
| 870 | } | 868 | } |
| 871 | 869 | ||
| 872 | /* Unlocked version of the reclaim routine */ | 870 | /* Unlocked version of the reclaim routine */ |
| 873 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | 871 | static void __bcmgenet_tx_reclaim(struct net_device *dev, |
| 874 | struct bcmgenet_tx_ring *ring) | 872 | struct bcmgenet_tx_ring *ring) |
| 875 | { | 873 | { |
| 876 | struct bcmgenet_priv *priv = netdev_priv(dev); | 874 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 877 | int last_tx_cn, last_c_index, num_tx_bds; | 875 | int last_tx_cn, last_c_index, num_tx_bds; |
| @@ -879,7 +877,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 879 | struct netdev_queue *txq; | 877 | struct netdev_queue *txq; |
| 880 | unsigned int c_index; | 878 | unsigned int c_index; |
| 881 | 879 | ||
| 882 | /* Compute how many buffers are transmited since last xmit call */ | 880 | /* Compute how many buffers are transmitted since last xmit call */ |
| 883 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | 881 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); |
| 884 | txq = netdev_get_tx_queue(dev, ring->queue); | 882 | txq = netdev_get_tx_queue(dev, ring->queue); |
| 885 | 883 | ||
| @@ -894,9 +892,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 894 | last_tx_cn = num_tx_bds - last_c_index + c_index; | 892 | last_tx_cn = num_tx_bds - last_c_index + c_index; |
| 895 | 893 | ||
| 896 | netif_dbg(priv, tx_done, dev, | 894 | netif_dbg(priv, tx_done, dev, |
| 897 | "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", | 895 | "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", |
| 898 | __func__, ring->index, | 896 | __func__, ring->index, |
| 899 | c_index, last_tx_cn, last_c_index); | 897 | c_index, last_tx_cn, last_c_index); |
| 900 | 898 | ||
| 901 | /* Reclaim transmitted buffers */ | 899 | /* Reclaim transmitted buffers */ |
| 902 | while (last_tx_cn-- > 0) { | 900 | while (last_tx_cn-- > 0) { |
| @@ -904,17 +902,17 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 904 | if (tx_cb_ptr->skb) { | 902 | if (tx_cb_ptr->skb) { |
| 905 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 903 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
| 906 | dma_unmap_single(&dev->dev, | 904 | dma_unmap_single(&dev->dev, |
| 907 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 905 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
| 908 | tx_cb_ptr->skb->len, | 906 | tx_cb_ptr->skb->len, |
| 909 | DMA_TO_DEVICE); | 907 | DMA_TO_DEVICE); |
| 910 | bcmgenet_free_cb(tx_cb_ptr); | 908 | bcmgenet_free_cb(tx_cb_ptr); |
| 911 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | 909 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { |
| 912 | dev->stats.tx_bytes += | 910 | dev->stats.tx_bytes += |
| 913 | dma_unmap_len(tx_cb_ptr, dma_len); | 911 | dma_unmap_len(tx_cb_ptr, dma_len); |
| 914 | dma_unmap_page(&dev->dev, | 912 | dma_unmap_page(&dev->dev, |
| 915 | dma_unmap_addr(tx_cb_ptr, dma_addr), | 913 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
| 916 | dma_unmap_len(tx_cb_ptr, dma_len), | 914 | dma_unmap_len(tx_cb_ptr, dma_len), |
| 917 | DMA_TO_DEVICE); | 915 | DMA_TO_DEVICE); |
| 918 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | 916 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
| 919 | } | 917 | } |
| 920 | dev->stats.tx_packets++; | 918 | dev->stats.tx_packets++; |
| @@ -934,7 +932,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 934 | } | 932 | } |
| 935 | 933 | ||
| 936 | static void bcmgenet_tx_reclaim(struct net_device *dev, | 934 | static void bcmgenet_tx_reclaim(struct net_device *dev, |
| 937 | struct bcmgenet_tx_ring *ring) | 935 | struct bcmgenet_tx_ring *ring) |
| 938 | { | 936 | { |
| 939 | unsigned long flags; | 937 | unsigned long flags; |
| 940 | 938 | ||
| @@ -1008,11 +1006,11 @@ static int bcmgenet_xmit_single(struct net_device *dev, | |||
| 1008 | return 0; | 1006 | return 0; |
| 1009 | } | 1007 | } |
| 1010 | 1008 | ||
| 1011 | /* Transmit a SKB fragement */ | 1009 | /* Transmit a SKB fragment */ |
| 1012 | static int bcmgenet_xmit_frag(struct net_device *dev, | 1010 | static int bcmgenet_xmit_frag(struct net_device *dev, |
| 1013 | skb_frag_t *frag, | 1011 | skb_frag_t *frag, |
| 1014 | u16 dma_desc_flags, | 1012 | u16 dma_desc_flags, |
| 1015 | struct bcmgenet_tx_ring *ring) | 1013 | struct bcmgenet_tx_ring *ring) |
| 1016 | { | 1014 | { |
| 1017 | struct bcmgenet_priv *priv = netdev_priv(dev); | 1015 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 1018 | struct device *kdev = &priv->pdev->dev; | 1016 | struct device *kdev = &priv->pdev->dev; |
| @@ -1027,11 +1025,11 @@ static int bcmgenet_xmit_frag(struct net_device *dev, | |||
| 1027 | tx_cb_ptr->skb = NULL; | 1025 | tx_cb_ptr->skb = NULL; |
| 1028 | 1026 | ||
| 1029 | mapping = skb_frag_dma_map(kdev, frag, 0, | 1027 | mapping = skb_frag_dma_map(kdev, frag, 0, |
| 1030 | skb_frag_size(frag), DMA_TO_DEVICE); | 1028 | skb_frag_size(frag), DMA_TO_DEVICE); |
| 1031 | ret = dma_mapping_error(kdev, mapping); | 1029 | ret = dma_mapping_error(kdev, mapping); |
| 1032 | if (ret) { | 1030 | if (ret) { |
| 1033 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", | 1031 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", |
| 1034 | __func__); | 1032 | __func__); |
| 1035 | return ret; | 1033 | return ret; |
| 1036 | } | 1034 | } |
| 1037 | 1035 | ||
| @@ -1039,8 +1037,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev, | |||
| 1039 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); | 1037 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); |
| 1040 | 1038 | ||
| 1041 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | 1039 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, |
| 1042 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | 1040 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
| 1043 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); | 1041 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); |
| 1044 | 1042 | ||
| 1045 | 1043 | ||
| 1046 | ring->free_bds -= 1; | 1044 | ring->free_bds -= 1; |
| @@ -1103,8 +1101,9 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) | |||
| 1103 | tx_csum_info |= STATUS_TX_CSUM_LV; | 1101 | tx_csum_info |= STATUS_TX_CSUM_LV; |
| 1104 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | 1102 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) |
| 1105 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | 1103 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; |
| 1106 | } else | 1104 | } else { |
| 1107 | tx_csum_info = 0; | 1105 | tx_csum_info = 0; |
| 1106 | } | ||
| 1108 | 1107 | ||
| 1109 | status->tx_csum_info = tx_csum_info; | 1108 | status->tx_csum_info = tx_csum_info; |
| 1110 | } | 1109 | } |
| @@ -1144,11 +1143,16 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1144 | if (ring->free_bds <= nr_frags + 1) { | 1143 | if (ring->free_bds <= nr_frags + 1) { |
| 1145 | netif_tx_stop_queue(txq); | 1144 | netif_tx_stop_queue(txq); |
| 1146 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", | 1145 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", |
| 1147 | __func__, index, ring->queue); | 1146 | __func__, index, ring->queue); |
| 1148 | ret = NETDEV_TX_BUSY; | 1147 | ret = NETDEV_TX_BUSY; |
| 1149 | goto out; | 1148 | goto out; |
| 1150 | } | 1149 | } |
| 1151 | 1150 | ||
| 1151 | if (skb_padto(skb, ETH_ZLEN)) { | ||
| 1152 | ret = NETDEV_TX_OK; | ||
| 1153 | goto out; | ||
| 1154 | } | ||
| 1155 | |||
| 1152 | /* set the SKB transmit checksum */ | 1156 | /* set the SKB transmit checksum */ |
| 1153 | if (priv->desc_64b_en) { | 1157 | if (priv->desc_64b_en) { |
| 1154 | ret = bcmgenet_put_tx_csum(dev, skb); | 1158 | ret = bcmgenet_put_tx_csum(dev, skb); |
| @@ -1172,8 +1176,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1172 | /* xmit fragment */ | 1176 | /* xmit fragment */ |
| 1173 | for (i = 0; i < nr_frags; i++) { | 1177 | for (i = 0; i < nr_frags; i++) { |
| 1174 | ret = bcmgenet_xmit_frag(dev, | 1178 | ret = bcmgenet_xmit_frag(dev, |
| 1175 | &skb_shinfo(skb)->frags[i], | 1179 | &skb_shinfo(skb)->frags[i], |
| 1176 | (i == nr_frags - 1) ? DMA_EOP : 0, ring); | 1180 | (i == nr_frags - 1) ? DMA_EOP : 0, |
| 1181 | ring); | ||
| 1177 | if (ret) { | 1182 | if (ret) { |
| 1178 | ret = NETDEV_TX_OK; | 1183 | ret = NETDEV_TX_OK; |
| 1179 | goto out; | 1184 | goto out; |
| @@ -1186,7 +1191,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1186 | * producer index, now write it down to the hardware | 1191 | * producer index, now write it down to the hardware |
| 1187 | */ | 1192 | */ |
| 1188 | bcmgenet_tdma_ring_writel(priv, ring->index, | 1193 | bcmgenet_tdma_ring_writel(priv, ring->index, |
| 1189 | ring->prod_index, TDMA_PROD_INDEX); | 1194 | ring->prod_index, TDMA_PROD_INDEX); |
| 1190 | 1195 | ||
| 1191 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | 1196 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { |
| 1192 | netif_tx_stop_queue(txq); | 1197 | netif_tx_stop_queue(txq); |
| @@ -1200,16 +1205,14 @@ out: | |||
| 1200 | } | 1205 | } |
| 1201 | 1206 | ||
| 1202 | 1207 | ||
| 1203 | static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, | 1208 | static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) |
| 1204 | struct enet_cb *cb) | ||
| 1205 | { | 1209 | { |
| 1206 | struct device *kdev = &priv->pdev->dev; | 1210 | struct device *kdev = &priv->pdev->dev; |
| 1207 | struct sk_buff *skb; | 1211 | struct sk_buff *skb; |
| 1208 | dma_addr_t mapping; | 1212 | dma_addr_t mapping; |
| 1209 | int ret; | 1213 | int ret; |
| 1210 | 1214 | ||
| 1211 | skb = netdev_alloc_skb(priv->dev, | 1215 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
| 1212 | priv->rx_buf_len + SKB_ALIGNMENT); | ||
| 1213 | if (!skb) | 1216 | if (!skb) |
| 1214 | return -ENOMEM; | 1217 | return -ENOMEM; |
| 1215 | 1218 | ||
| @@ -1217,12 +1220,12 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, | |||
| 1217 | WARN_ON(cb->skb != NULL); | 1220 | WARN_ON(cb->skb != NULL); |
| 1218 | cb->skb = skb; | 1221 | cb->skb = skb; |
| 1219 | mapping = dma_map_single(kdev, skb->data, | 1222 | mapping = dma_map_single(kdev, skb->data, |
| 1220 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1223 | priv->rx_buf_len, DMA_FROM_DEVICE); |
| 1221 | ret = dma_mapping_error(kdev, mapping); | 1224 | ret = dma_mapping_error(kdev, mapping); |
| 1222 | if (ret) { | 1225 | if (ret) { |
| 1223 | bcmgenet_free_cb(cb); | 1226 | bcmgenet_free_cb(cb); |
| 1224 | netif_err(priv, rx_err, priv->dev, | 1227 | netif_err(priv, rx_err, priv->dev, |
| 1225 | "%s DMA map failed\n", __func__); | 1228 | "%s DMA map failed\n", __func__); |
| 1226 | return ret; | 1229 | return ret; |
| 1227 | } | 1230 | } |
| 1228 | 1231 | ||
| @@ -1257,8 +1260,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1257 | unsigned int p_index; | 1260 | unsigned int p_index; |
| 1258 | unsigned int chksum_ok = 0; | 1261 | unsigned int chksum_ok = 0; |
| 1259 | 1262 | ||
| 1260 | p_index = bcmgenet_rdma_ring_readl(priv, | 1263 | p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); |
| 1261 | DESC_INDEX, RDMA_PROD_INDEX); | ||
| 1262 | p_index &= DMA_P_INDEX_MASK; | 1264 | p_index &= DMA_P_INDEX_MASK; |
| 1263 | 1265 | ||
| 1264 | if (p_index < priv->rx_c_index) | 1266 | if (p_index < priv->rx_c_index) |
| @@ -1268,11 +1270,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1268 | rxpkttoprocess = p_index - priv->rx_c_index; | 1270 | rxpkttoprocess = p_index - priv->rx_c_index; |
| 1269 | 1271 | ||
| 1270 | netif_dbg(priv, rx_status, dev, | 1272 | netif_dbg(priv, rx_status, dev, |
| 1271 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); | 1273 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
| 1272 | 1274 | ||
| 1273 | while ((rxpktprocessed < rxpkttoprocess) && | 1275 | while ((rxpktprocessed < rxpkttoprocess) && |
| 1274 | (rxpktprocessed < budget)) { | 1276 | (rxpktprocessed < budget)) { |
| 1275 | |||
| 1276 | /* Unmap the packet contents such that we can use the | 1277 | /* Unmap the packet contents such that we can use the |
| 1277 | * RSV from the 64 bytes descriptor when enabled and save | 1278 | * RSV from the 64 bytes descriptor when enabled and save |
| 1278 | * a 32-bits register read | 1279 | * a 32-bits register read |
| @@ -1280,15 +1281,17 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1280 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | 1281 | cb = &priv->rx_cbs[priv->rx_read_ptr]; |
| 1281 | skb = cb->skb; | 1282 | skb = cb->skb; |
| 1282 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | 1283 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), |
| 1283 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1284 | priv->rx_buf_len, DMA_FROM_DEVICE); |
| 1284 | 1285 | ||
| 1285 | if (!priv->desc_64b_en) { | 1286 | if (!priv->desc_64b_en) { |
| 1286 | dma_length_status = dmadesc_get_length_status(priv, | 1287 | dma_length_status = |
| 1287 | priv->rx_bds + | 1288 | dmadesc_get_length_status(priv, |
| 1288 | (priv->rx_read_ptr * | 1289 | priv->rx_bds + |
| 1289 | DMA_DESC_SIZE)); | 1290 | (priv->rx_read_ptr * |
| 1291 | DMA_DESC_SIZE)); | ||
| 1290 | } else { | 1292 | } else { |
| 1291 | struct status_64 *status; | 1293 | struct status_64 *status; |
| 1294 | |||
| 1292 | status = (struct status_64 *)skb->data; | 1295 | status = (struct status_64 *)skb->data; |
| 1293 | dma_length_status = status->length_status; | 1296 | dma_length_status = status->length_status; |
| 1294 | } | 1297 | } |
| @@ -1300,9 +1303,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1300 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | 1303 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; |
| 1301 | 1304 | ||
| 1302 | netif_dbg(priv, rx_status, dev, | 1305 | netif_dbg(priv, rx_status, dev, |
| 1303 | "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", | 1306 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
| 1304 | __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, | 1307 | __func__, p_index, priv->rx_c_index, |
| 1305 | dma_length_status); | 1308 | priv->rx_read_ptr, dma_length_status); |
| 1306 | 1309 | ||
| 1307 | rxpktprocessed++; | 1310 | rxpktprocessed++; |
| 1308 | 1311 | ||
| @@ -1318,7 +1321,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1318 | 1321 | ||
| 1319 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | 1322 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
| 1320 | netif_err(priv, rx_status, dev, | 1323 | netif_err(priv, rx_status, dev, |
| 1321 | "Droping fragmented packet!\n"); | 1324 | "dropping fragmented packet!\n"); |
| 1322 | dev->stats.rx_dropped++; | 1325 | dev->stats.rx_dropped++; |
| 1323 | dev->stats.rx_errors++; | 1326 | dev->stats.rx_errors++; |
| 1324 | dev_kfree_skb_any(cb->skb); | 1327 | dev_kfree_skb_any(cb->skb); |
| @@ -1332,7 +1335,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1332 | DMA_RX_LG | | 1335 | DMA_RX_LG | |
| 1333 | DMA_RX_RXER))) { | 1336 | DMA_RX_RXER))) { |
| 1334 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | 1337 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", |
| 1335 | (unsigned int)dma_flag); | 1338 | (unsigned int)dma_flag); |
| 1336 | if (dma_flag & DMA_RX_CRC_ERROR) | 1339 | if (dma_flag & DMA_RX_CRC_ERROR) |
| 1337 | dev->stats.rx_crc_errors++; | 1340 | dev->stats.rx_crc_errors++; |
| 1338 | if (dma_flag & DMA_RX_OV) | 1341 | if (dma_flag & DMA_RX_OV) |
| @@ -1351,7 +1354,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |||
| 1351 | } /* error packet */ | 1354 | } /* error packet */ |
| 1352 | 1355 | ||
| 1353 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | 1356 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && |
| 1354 | priv->desc_rxchk_en; | 1357 | priv->desc_rxchk_en; |
| 1355 | 1358 | ||
| 1356 | skb_put(skb, len); | 1359 | skb_put(skb, len); |
| 1357 | if (priv->desc_64b_en) { | 1360 | if (priv->desc_64b_en) { |
| @@ -1411,7 +1414,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) | |||
| 1411 | ret = bcmgenet_rx_refill(priv, cb); | 1414 | ret = bcmgenet_rx_refill(priv, cb); |
| 1412 | if (ret) | 1415 | if (ret) |
| 1413 | break; | 1416 | break; |
| 1414 | |||
| 1415 | } | 1417 | } |
| 1416 | 1418 | ||
| 1417 | return ret; | 1419 | return ret; |
| @@ -1427,8 +1429,8 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |||
| 1427 | 1429 | ||
| 1428 | if (dma_unmap_addr(cb, dma_addr)) { | 1430 | if (dma_unmap_addr(cb, dma_addr)) { |
| 1429 | dma_unmap_single(&priv->dev->dev, | 1431 | dma_unmap_single(&priv->dev->dev, |
| 1430 | dma_unmap_addr(cb, dma_addr), | 1432 | dma_unmap_addr(cb, dma_addr), |
| 1431 | priv->rx_buf_len, DMA_FROM_DEVICE); | 1433 | priv->rx_buf_len, DMA_FROM_DEVICE); |
| 1432 | dma_unmap_addr_set(cb, dma_addr, 0); | 1434 | dma_unmap_addr_set(cb, dma_addr, 0); |
| 1433 | } | 1435 | } |
| 1434 | 1436 | ||
| @@ -1437,6 +1439,24 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |||
| 1437 | } | 1439 | } |
| 1438 | } | 1440 | } |
| 1439 | 1441 | ||
| 1442 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) | ||
| 1443 | { | ||
| 1444 | u32 reg; | ||
| 1445 | |||
| 1446 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
| 1447 | if (enable) | ||
| 1448 | reg |= mask; | ||
| 1449 | else | ||
| 1450 | reg &= ~mask; | ||
| 1451 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 1452 | |||
| 1453 | /* UniMAC stops on a packet boundary, wait for a full-size packet | ||
| 1454 | * to be processed | ||
| 1455 | */ | ||
| 1456 | if (enable == 0) | ||
| 1457 | usleep_range(1000, 2000); | ||
| 1458 | } | ||
| 1459 | |||
| 1440 | static int reset_umac(struct bcmgenet_priv *priv) | 1460 | static int reset_umac(struct bcmgenet_priv *priv) |
| 1441 | { | 1461 | { |
| 1442 | struct device *kdev = &priv->pdev->dev; | 1462 | struct device *kdev = &priv->pdev->dev; |
| @@ -1462,13 +1482,24 @@ static int reset_umac(struct bcmgenet_priv *priv) | |||
| 1462 | 1482 | ||
| 1463 | if (timeout == 1000) { | 1483 | if (timeout == 1000) { |
| 1464 | dev_err(kdev, | 1484 | dev_err(kdev, |
| 1465 | "timeout waiting for MAC to come out of resetn\n"); | 1485 | "timeout waiting for MAC to come out of reset\n"); |
| 1466 | return -ETIMEDOUT; | 1486 | return -ETIMEDOUT; |
| 1467 | } | 1487 | } |
| 1468 | 1488 | ||
| 1469 | return 0; | 1489 | return 0; |
| 1470 | } | 1490 | } |
| 1471 | 1491 | ||
| 1492 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) | ||
| 1493 | { | ||
| 1494 | /* Mask all interrupts.*/ | ||
| 1495 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | ||
| 1496 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | ||
| 1497 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | ||
| 1498 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | ||
| 1499 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | ||
| 1500 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | ||
| 1501 | } | ||
| 1502 | |||
| 1472 | static int init_umac(struct bcmgenet_priv *priv) | 1503 | static int init_umac(struct bcmgenet_priv *priv) |
| 1473 | { | 1504 | { |
| 1474 | struct device *kdev = &priv->pdev->dev; | 1505 | struct device *kdev = &priv->pdev->dev; |
| @@ -1484,7 +1515,8 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1484 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | 1515 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); |
| 1485 | /* clear tx/rx counter */ | 1516 | /* clear tx/rx counter */ |
| 1486 | bcmgenet_umac_writel(priv, | 1517 | bcmgenet_umac_writel(priv, |
| 1487 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL); | 1518 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
| 1519 | UMAC_MIB_CTRL); | ||
| 1488 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); | 1520 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
| 1489 | 1521 | ||
| 1490 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | 1522 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); |
| @@ -1497,21 +1529,18 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1497 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | 1529 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) |
| 1498 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | 1530 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); |
| 1499 | 1531 | ||
| 1500 | /* Mask all interrupts.*/ | 1532 | bcmgenet_intr_disable(priv); |
| 1501 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | ||
| 1502 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | ||
| 1503 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | ||
| 1504 | 1533 | ||
| 1505 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | 1534 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; |
| 1506 | 1535 | ||
| 1507 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | 1536 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); |
| 1508 | 1537 | ||
| 1509 | /* Monitor cable plug/unpluged event for internal PHY */ | 1538 | /* Monitor cable plug/unplugged event for internal PHY */ |
| 1510 | if (phy_is_internal(priv->phydev)) | 1539 | if (phy_is_internal(priv->phydev)) { |
| 1511 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | 1540 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); |
| 1512 | else if (priv->ext_phy) | 1541 | } else if (priv->ext_phy) { |
| 1513 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | 1542 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); |
| 1514 | else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | 1543 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
| 1515 | reg = bcmgenet_bp_mc_get(priv); | 1544 | reg = bcmgenet_bp_mc_get(priv); |
| 1516 | reg |= BIT(priv->hw_params->bp_in_en_shift); | 1545 | reg |= BIT(priv->hw_params->bp_in_en_shift); |
| 1517 | 1546 | ||
| @@ -1527,8 +1556,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1527 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | 1556 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) |
| 1528 | cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; | 1557 | cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; |
| 1529 | 1558 | ||
| 1530 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, | 1559 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
| 1531 | INTRL2_CPU_MASK_CLEAR); | ||
| 1532 | 1560 | ||
| 1533 | /* Enable rx/tx engine.*/ | 1561 | /* Enable rx/tx engine.*/ |
| 1534 | dev_dbg(kdev, "done init umac\n"); | 1562 | dev_dbg(kdev, "done init umac\n"); |
| @@ -1577,28 +1605,28 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
| 1577 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | 1605 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); |
| 1578 | /* Disable rate control for now */ | 1606 | /* Disable rate control for now */ |
| 1579 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | 1607 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, |
| 1580 | TDMA_FLOW_PERIOD); | 1608 | TDMA_FLOW_PERIOD); |
| 1581 | /* Unclassified traffic goes to ring 16 */ | 1609 | /* Unclassified traffic goes to ring 16 */ |
| 1582 | bcmgenet_tdma_ring_writel(priv, index, | 1610 | bcmgenet_tdma_ring_writel(priv, index, |
| 1583 | ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), | 1611 | ((size << DMA_RING_SIZE_SHIFT) | |
| 1584 | DMA_RING_BUF_SIZE); | 1612 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); |
| 1585 | 1613 | ||
| 1586 | first_bd = write_ptr; | 1614 | first_bd = write_ptr; |
| 1587 | 1615 | ||
| 1588 | /* Set start and end address, read and write pointers */ | 1616 | /* Set start and end address, read and write pointers */ |
| 1589 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | 1617 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, |
| 1590 | DMA_START_ADDR); | 1618 | DMA_START_ADDR); |
| 1591 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | 1619 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, |
| 1592 | TDMA_READ_PTR); | 1620 | TDMA_READ_PTR); |
| 1593 | bcmgenet_tdma_ring_writel(priv, index, first_bd, | 1621 | bcmgenet_tdma_ring_writel(priv, index, first_bd, |
| 1594 | TDMA_WRITE_PTR); | 1622 | TDMA_WRITE_PTR); |
| 1595 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | 1623 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
| 1596 | DMA_END_ADDR); | 1624 | DMA_END_ADDR); |
| 1597 | } | 1625 | } |
| 1598 | 1626 | ||
| 1599 | /* Initialize a RDMA ring */ | 1627 | /* Initialize a RDMA ring */ |
| 1600 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | 1628 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, |
| 1601 | unsigned int index, unsigned int size) | 1629 | unsigned int index, unsigned int size) |
| 1602 | { | 1630 | { |
| 1603 | u32 words_per_bd = WORDS_PER_BD(priv); | 1631 | u32 words_per_bd = WORDS_PER_BD(priv); |
| 1604 | int ret; | 1632 | int ret; |
| @@ -1609,8 +1637,8 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |||
| 1609 | priv->rx_bd_assign_index = 0; | 1637 | priv->rx_bd_assign_index = 0; |
| 1610 | priv->rx_c_index = 0; | 1638 | priv->rx_c_index = 0; |
| 1611 | priv->rx_read_ptr = 0; | 1639 | priv->rx_read_ptr = 0; |
| 1612 | priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), | 1640 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), |
| 1613 | GFP_KERNEL); | 1641 | GFP_KERNEL); |
| 1614 | if (!priv->rx_cbs) | 1642 | if (!priv->rx_cbs) |
| 1615 | return -ENOMEM; | 1643 | return -ENOMEM; |
| 1616 | 1644 | ||
| @@ -1624,14 +1652,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |||
| 1624 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); | 1652 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); |
| 1625 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | 1653 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); |
| 1626 | bcmgenet_rdma_ring_writel(priv, index, | 1654 | bcmgenet_rdma_ring_writel(priv, index, |
| 1627 | ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), | 1655 | ((size << DMA_RING_SIZE_SHIFT) | |
| 1628 | DMA_RING_BUF_SIZE); | 1656 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); |
| 1629 | bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); | 1657 | bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); |
| 1630 | bcmgenet_rdma_ring_writel(priv, index, | 1658 | bcmgenet_rdma_ring_writel(priv, index, |
| 1631 | words_per_bd * size - 1, DMA_END_ADDR); | 1659 | words_per_bd * size - 1, DMA_END_ADDR); |
| 1632 | bcmgenet_rdma_ring_writel(priv, index, | 1660 | bcmgenet_rdma_ring_writel(priv, index, |
| 1633 | (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | | 1661 | (DMA_FC_THRESH_LO << |
| 1634 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | 1662 | DMA_XOFF_THRESHOLD_SHIFT) | |
| 1663 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | ||
| 1635 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); | 1664 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); |
| 1636 | 1665 | ||
| 1637 | return ret; | 1666 | return ret; |
| @@ -1678,10 +1707,10 @@ static void bcmgenet_init_multiq(struct net_device *dev) | |||
| 1678 | * (ring 16) | 1707 | * (ring 16) |
| 1679 | */ | 1708 | */ |
| 1680 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, | 1709 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, |
| 1681 | i * priv->hw_params->bds_cnt, | 1710 | i * priv->hw_params->bds_cnt, |
| 1682 | (i + 1) * priv->hw_params->bds_cnt); | 1711 | (i + 1) * priv->hw_params->bds_cnt); |
| 1683 | 1712 | ||
| 1684 | /* Configure ring as decriptor ring and setup priority */ | 1713 | /* Configure ring as descriptor ring and setup priority */ |
| 1685 | ring_cfg |= 1 << i; | 1714 | ring_cfg |= 1 << i; |
| 1686 | dma_priority |= ((GENET_Q0_PRIORITY + i) << | 1715 | dma_priority |= ((GENET_Q0_PRIORITY + i) << |
| 1687 | (GENET_MAX_MQ_CNT + 1) * i); | 1716 | (GENET_MAX_MQ_CNT + 1) * i); |
| @@ -1747,11 +1776,11 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
| 1747 | /* Init tDma */ | 1776 | /* Init tDma */ |
| 1748 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | 1777 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); |
| 1749 | 1778 | ||
| 1750 | /* Initialize commont TX ring structures */ | 1779 | /* Initialize common TX ring structures */ |
| 1751 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; | 1780 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
| 1752 | priv->num_tx_bds = TOTAL_DESC; | 1781 | priv->num_tx_bds = TOTAL_DESC; |
| 1753 | priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), | 1782 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
| 1754 | GFP_KERNEL); | 1783 | GFP_KERNEL); |
| 1755 | if (!priv->tx_cbs) { | 1784 | if (!priv->tx_cbs) { |
| 1756 | bcmgenet_fini_dma(priv); | 1785 | bcmgenet_fini_dma(priv); |
| 1757 | return -ENOMEM; | 1786 | return -ENOMEM; |
| @@ -1762,8 +1791,9 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
| 1762 | 1791 | ||
| 1763 | /* initialize special ring 16 */ | 1792 | /* initialize special ring 16 */ |
| 1764 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, | 1793 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, |
| 1765 | priv->hw_params->tx_queues * priv->hw_params->bds_cnt, | 1794 | priv->hw_params->tx_queues * |
| 1766 | TOTAL_DESC); | 1795 | priv->hw_params->bds_cnt, |
| 1796 | TOTAL_DESC); | ||
| 1767 | 1797 | ||
| 1768 | return 0; | 1798 | return 0; |
| 1769 | } | 1799 | } |
| @@ -1784,11 +1814,11 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) | |||
| 1784 | priv->rx_c_index += work_done; | 1814 | priv->rx_c_index += work_done; |
| 1785 | priv->rx_c_index &= DMA_C_INDEX_MASK; | 1815 | priv->rx_c_index &= DMA_C_INDEX_MASK; |
| 1786 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | 1816 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, |
| 1787 | priv->rx_c_index, RDMA_CONS_INDEX); | 1817 | priv->rx_c_index, RDMA_CONS_INDEX); |
| 1788 | if (work_done < budget) { | 1818 | if (work_done < budget) { |
| 1789 | napi_complete(napi); | 1819 | napi_complete(napi); |
| 1790 | bcmgenet_intrl2_0_writel(priv, | 1820 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
| 1791 | UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR); | 1821 | INTRL2_CPU_MASK_CLEAR); |
| 1792 | } | 1822 | } |
| 1793 | 1823 | ||
| 1794 | return work_done; | 1824 | return work_done; |
| @@ -1802,11 +1832,18 @@ static void bcmgenet_irq_task(struct work_struct *work) | |||
| 1802 | 1832 | ||
| 1803 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | 1833 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); |
| 1804 | 1834 | ||
| 1835 | if (priv->irq0_stat & UMAC_IRQ_MPD_R) { | ||
| 1836 | priv->irq0_stat &= ~UMAC_IRQ_MPD_R; | ||
| 1837 | netif_dbg(priv, wol, priv->dev, | ||
| 1838 | "magic packet detected, waking up\n"); | ||
| 1839 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | ||
| 1840 | } | ||
| 1841 | |||
| 1805 | /* Link UP/DOWN event */ | 1842 | /* Link UP/DOWN event */ |
| 1806 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | 1843 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && |
| 1807 | (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { | 1844 | (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { |
| 1808 | phy_mac_interrupt(priv->phydev, | 1845 | phy_mac_interrupt(priv->phydev, |
| 1809 | priv->irq0_stat & UMAC_IRQ_LINK_UP); | 1846 | priv->irq0_stat & UMAC_IRQ_LINK_UP); |
| 1810 | priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); | 1847 | priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); |
| 1811 | } | 1848 | } |
| 1812 | } | 1849 | } |
| @@ -1821,11 +1858,11 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | |||
| 1821 | priv->irq1_stat = | 1858 | priv->irq1_stat = |
| 1822 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | 1859 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
| 1823 | ~priv->int1_mask; | 1860 | ~priv->int1_mask; |
| 1824 | /* clear inerrupts*/ | 1861 | /* clear interrupts */ |
| 1825 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | 1862 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
| 1826 | 1863 | ||
| 1827 | netif_dbg(priv, intr, priv->dev, | 1864 | netif_dbg(priv, intr, priv->dev, |
| 1828 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | 1865 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
| 1829 | /* Check the MBDONE interrupts. | 1866 | /* Check the MBDONE interrupts. |
| 1830 | * packet is done, reclaim descriptors | 1867 | * packet is done, reclaim descriptors |
| 1831 | */ | 1868 | */ |
| @@ -1834,7 +1871,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | |||
| 1834 | for (index = 0; index < 16; index++) { | 1871 | for (index = 0; index < 16; index++) { |
| 1835 | if (priv->irq1_stat & (1 << index)) | 1872 | if (priv->irq1_stat & (1 << index)) |
| 1836 | bcmgenet_tx_reclaim(priv->dev, | 1873 | bcmgenet_tx_reclaim(priv->dev, |
| 1837 | &priv->tx_rings[index]); | 1874 | &priv->tx_rings[index]); |
| 1838 | } | 1875 | } |
| 1839 | } | 1876 | } |
| 1840 | return IRQ_HANDLED; | 1877 | return IRQ_HANDLED; |
| @@ -1849,11 +1886,11 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 1849 | priv->irq0_stat = | 1886 | priv->irq0_stat = |
| 1850 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | 1887 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & |
| 1851 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | 1888 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); |
| 1852 | /* clear inerrupts*/ | 1889 | /* clear interrupts */ |
| 1853 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); | 1890 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); |
| 1854 | 1891 | ||
| 1855 | netif_dbg(priv, intr, priv->dev, | 1892 | netif_dbg(priv, intr, priv->dev, |
| 1856 | "IRQ=0x%x\n", priv->irq0_stat); | 1893 | "IRQ=0x%x\n", priv->irq0_stat); |
| 1857 | 1894 | ||
| 1858 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { | 1895 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { |
| 1859 | /* We use NAPI(software interrupt throttling, if | 1896 | /* We use NAPI(software interrupt throttling, if |
| @@ -1861,8 +1898,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 1861 | * Disable interrupt, will be enabled in the poll method. | 1898 | * Disable interrupt, will be enabled in the poll method. |
| 1862 | */ | 1899 | */ |
| 1863 | if (likely(napi_schedule_prep(&priv->napi))) { | 1900 | if (likely(napi_schedule_prep(&priv->napi))) { |
| 1864 | bcmgenet_intrl2_0_writel(priv, | 1901 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
| 1865 | UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET); | 1902 | INTRL2_CPU_MASK_SET); |
| 1866 | __napi_schedule(&priv->napi); | 1903 | __napi_schedule(&priv->napi); |
| 1867 | } | 1904 | } |
| 1868 | } | 1905 | } |
| @@ -1883,7 +1920,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 1883 | } | 1920 | } |
| 1884 | 1921 | ||
| 1885 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | 1922 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && |
| 1886 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { | 1923 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
| 1887 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); | 1924 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
| 1888 | wake_up(&priv->wq); | 1925 | wake_up(&priv->wq); |
| 1889 | } | 1926 | } |
| @@ -1891,6 +1928,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 1891 | return IRQ_HANDLED; | 1928 | return IRQ_HANDLED; |
| 1892 | } | 1929 | } |
| 1893 | 1930 | ||
| 1931 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) | ||
| 1932 | { | ||
| 1933 | struct bcmgenet_priv *priv = dev_id; | ||
| 1934 | |||
| 1935 | pm_wakeup_event(&priv->pdev->dev, 0); | ||
| 1936 | |||
| 1937 | return IRQ_HANDLED; | ||
| 1938 | } | ||
| 1939 | |||
| 1894 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) | 1940 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
| 1895 | { | 1941 | { |
| 1896 | u32 reg; | 1942 | u32 reg; |
| @@ -1906,7 +1952,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) | |||
| 1906 | } | 1952 | } |
| 1907 | 1953 | ||
| 1908 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | 1954 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, |
| 1909 | unsigned char *addr) | 1955 | unsigned char *addr) |
| 1910 | { | 1956 | { |
| 1911 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | 1957 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | |
| 1912 | (addr[2] << 8) | addr[3], UMAC_MAC0); | 1958 | (addr[2] << 8) | addr[3], UMAC_MAC0); |
| @@ -1915,14 +1961,9 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |||
| 1915 | 1961 | ||
| 1916 | static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) | 1962 | static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) |
| 1917 | { | 1963 | { |
| 1918 | int ret; | ||
| 1919 | |||
| 1920 | /* From WOL-enabled suspend, switch to regular clock */ | 1964 | /* From WOL-enabled suspend, switch to regular clock */ |
| 1921 | clk_disable(priv->clk_wol); | 1965 | if (priv->wolopts) |
| 1922 | /* init umac registers to synchronize s/w with h/w */ | 1966 | clk_disable_unprepare(priv->clk_wol); |
| 1923 | ret = init_umac(priv); | ||
| 1924 | if (ret) | ||
| 1925 | return ret; | ||
| 1926 | 1967 | ||
| 1927 | phy_init_hw(priv->phydev); | 1968 | phy_init_hw(priv->phydev); |
| 1928 | /* Speed settings must be restored */ | 1969 | /* Speed settings must be restored */ |
| @@ -1967,6 +2008,23 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |||
| 1967 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 2008 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); |
| 1968 | } | 2009 | } |
| 1969 | 2010 | ||
| 2011 | static void bcmgenet_netif_start(struct net_device *dev) | ||
| 2012 | { | ||
| 2013 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 2014 | |||
| 2015 | /* Start the network engine */ | ||
| 2016 | napi_enable(&priv->napi); | ||
| 2017 | |||
| 2018 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | ||
| 2019 | |||
| 2020 | if (phy_is_internal(priv->phydev)) | ||
| 2021 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | ||
| 2022 | |||
| 2023 | netif_tx_start_all_queues(dev); | ||
| 2024 | |||
| 2025 | phy_start(priv->phydev); | ||
| 2026 | } | ||
| 2027 | |||
| 1970 | static int bcmgenet_open(struct net_device *dev) | 2028 | static int bcmgenet_open(struct net_device *dev) |
| 1971 | { | 2029 | { |
| 1972 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2030 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| @@ -1988,18 +2046,14 @@ static int bcmgenet_open(struct net_device *dev) | |||
| 1988 | goto err_clk_disable; | 2046 | goto err_clk_disable; |
| 1989 | 2047 | ||
| 1990 | /* disable ethernet MAC while updating its registers */ | 2048 | /* disable ethernet MAC while updating its registers */ |
| 2049 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | ||
| 2050 | |||
| 2051 | /* Make sure we reflect the value of CRC_CMD_FWD */ | ||
| 1991 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | 2052 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
| 1992 | reg &= ~(CMD_TX_EN | CMD_RX_EN); | 2053 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); |
| 1993 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 1994 | 2054 | ||
| 1995 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | 2055 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
| 1996 | 2056 | ||
| 1997 | if (priv->wol_enabled) { | ||
| 1998 | ret = bcmgenet_wol_resume(priv); | ||
| 1999 | if (ret) | ||
| 2000 | return ret; | ||
| 2001 | } | ||
| 2002 | |||
| 2003 | if (phy_is_internal(priv->phydev)) { | 2057 | if (phy_is_internal(priv->phydev)) { |
| 2004 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | 2058 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
| 2005 | reg |= EXT_ENERGY_DET_MASK; | 2059 | reg |= EXT_ENERGY_DET_MASK; |
| @@ -2020,37 +2074,20 @@ static int bcmgenet_open(struct net_device *dev) | |||
| 2020 | bcmgenet_enable_dma(priv, dma_ctrl); | 2074 | bcmgenet_enable_dma(priv, dma_ctrl); |
| 2021 | 2075 | ||
| 2022 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, | 2076 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, |
| 2023 | dev->name, priv); | 2077 | dev->name, priv); |
| 2024 | if (ret < 0) { | 2078 | if (ret < 0) { |
| 2025 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | 2079 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); |
| 2026 | goto err_fini_dma; | 2080 | goto err_fini_dma; |
| 2027 | } | 2081 | } |
| 2028 | 2082 | ||
| 2029 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | 2083 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, |
| 2030 | dev->name, priv); | 2084 | dev->name, priv); |
| 2031 | if (ret < 0) { | 2085 | if (ret < 0) { |
| 2032 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | 2086 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); |
| 2033 | goto err_irq0; | 2087 | goto err_irq0; |
| 2034 | } | 2088 | } |
| 2035 | 2089 | ||
| 2036 | /* Start the network engine */ | 2090 | bcmgenet_netif_start(dev); |
| 2037 | napi_enable(&priv->napi); | ||
| 2038 | |||
| 2039 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
| 2040 | reg |= (CMD_TX_EN | CMD_RX_EN); | ||
| 2041 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 2042 | |||
| 2043 | /* Make sure we reflect the value of CRC_CMD_FWD */ | ||
| 2044 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | ||
| 2045 | |||
| 2046 | device_set_wakeup_capable(&dev->dev, 1); | ||
| 2047 | |||
| 2048 | if (phy_is_internal(priv->phydev)) | ||
| 2049 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | ||
| 2050 | |||
| 2051 | netif_tx_start_all_queues(dev); | ||
| 2052 | |||
| 2053 | phy_start(priv->phydev); | ||
| 2054 | 2091 | ||
| 2055 | return 0; | 2092 | return 0; |
| 2056 | 2093 | ||
| @@ -2085,8 +2122,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
| 2085 | } | 2122 | } |
| 2086 | 2123 | ||
| 2087 | if (timeout == DMA_TIMEOUT_VAL) { | 2124 | if (timeout == DMA_TIMEOUT_VAL) { |
| 2088 | netdev_warn(priv->dev, | 2125 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); |
| 2089 | "Timed out while disabling TX DMA\n"); | ||
| 2090 | ret = -ETIMEDOUT; | 2126 | ret = -ETIMEDOUT; |
| 2091 | } | 2127 | } |
| 2092 | 2128 | ||
| @@ -2109,41 +2145,51 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
| 2109 | } | 2145 | } |
| 2110 | 2146 | ||
| 2111 | if (timeout == DMA_TIMEOUT_VAL) { | 2147 | if (timeout == DMA_TIMEOUT_VAL) { |
| 2112 | netdev_warn(priv->dev, | 2148 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); |
| 2113 | "Timed out while disabling RX DMA\n"); | 2149 | ret = -ETIMEDOUT; |
| 2114 | ret = -ETIMEDOUT; | ||
| 2115 | } | 2150 | } |
| 2116 | 2151 | ||
| 2117 | return ret; | 2152 | return ret; |
| 2118 | } | 2153 | } |
| 2119 | 2154 | ||
| 2155 | static void bcmgenet_netif_stop(struct net_device *dev) | ||
| 2156 | { | ||
| 2157 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 2158 | |||
| 2159 | netif_tx_stop_all_queues(dev); | ||
| 2160 | napi_disable(&priv->napi); | ||
| 2161 | phy_stop(priv->phydev); | ||
| 2162 | |||
| 2163 | bcmgenet_intr_disable(priv); | ||
| 2164 | |||
| 2165 | /* Wait for pending work items to complete. Since interrupts are | ||
| 2166 | * disabled no new work will be scheduled. | ||
| 2167 | */ | ||
| 2168 | cancel_work_sync(&priv->bcmgenet_irq_work); | ||
| 2169 | |||
| 2170 | priv->old_pause = -1; | ||
| 2171 | priv->old_link = -1; | ||
| 2172 | priv->old_duplex = -1; | ||
| 2173 | } | ||
| 2174 | |||
| 2120 | static int bcmgenet_close(struct net_device *dev) | 2175 | static int bcmgenet_close(struct net_device *dev) |
| 2121 | { | 2176 | { |
| 2122 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2177 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 2123 | int ret; | 2178 | int ret; |
| 2124 | u32 reg; | ||
| 2125 | 2179 | ||
| 2126 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | 2180 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); |
| 2127 | 2181 | ||
| 2128 | phy_stop(priv->phydev); | 2182 | bcmgenet_netif_stop(dev); |
| 2129 | 2183 | ||
| 2130 | /* Disable MAC receive */ | 2184 | /* Disable MAC receive */ |
| 2131 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | 2185 | umac_enable_set(priv, CMD_RX_EN, false); |
| 2132 | reg &= ~CMD_RX_EN; | ||
| 2133 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 2134 | |||
| 2135 | netif_tx_stop_all_queues(dev); | ||
| 2136 | 2186 | ||
| 2137 | ret = bcmgenet_dma_teardown(priv); | 2187 | ret = bcmgenet_dma_teardown(priv); |
| 2138 | if (ret) | 2188 | if (ret) |
| 2139 | return ret; | 2189 | return ret; |
| 2140 | 2190 | ||
| 2141 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | 2191 | /* Disable MAC transmit. TX DMA disabled have to done before this */ |
| 2142 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | 2192 | umac_enable_set(priv, CMD_TX_EN, false); |
| 2143 | reg &= ~CMD_TX_EN; | ||
| 2144 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 2145 | |||
| 2146 | napi_disable(&priv->napi); | ||
| 2147 | 2193 | ||
| 2148 | /* tx reclaim */ | 2194 | /* tx reclaim */ |
| 2149 | bcmgenet_tx_reclaim_all(dev); | 2195 | bcmgenet_tx_reclaim_all(dev); |
| @@ -2152,18 +2198,9 @@ static int bcmgenet_close(struct net_device *dev) | |||
| 2152 | free_irq(priv->irq0, priv); | 2198 | free_irq(priv->irq0, priv); |
| 2153 | free_irq(priv->irq1, priv); | 2199 | free_irq(priv->irq1, priv); |
| 2154 | 2200 | ||
| 2155 | /* Wait for pending work items to complete - we are stopping | ||
| 2156 | * the clock now. Since interrupts are disabled, no new work | ||
| 2157 | * will be scheduled. | ||
| 2158 | */ | ||
| 2159 | cancel_work_sync(&priv->bcmgenet_irq_work); | ||
| 2160 | |||
| 2161 | if (phy_is_internal(priv->phydev)) | 2201 | if (phy_is_internal(priv->phydev)) |
| 2162 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | 2202 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
| 2163 | 2203 | ||
| 2164 | if (priv->wol_enabled) | ||
| 2165 | clk_enable(priv->clk_wol); | ||
| 2166 | |||
| 2167 | if (!IS_ERR(priv->clk)) | 2204 | if (!IS_ERR(priv->clk)) |
| 2168 | clk_disable_unprepare(priv->clk); | 2205 | clk_disable_unprepare(priv->clk); |
| 2169 | 2206 | ||
| @@ -2192,12 +2229,11 @@ static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |||
| 2192 | { | 2229 | { |
| 2193 | u32 reg; | 2230 | u32 reg; |
| 2194 | 2231 | ||
| 2195 | bcmgenet_umac_writel(priv, | 2232 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
| 2196 | addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); | 2233 | UMAC_MDF_ADDR + (*i * 4)); |
| 2197 | bcmgenet_umac_writel(priv, | 2234 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | |
| 2198 | addr[2] << 24 | addr[3] << 16 | | 2235 | addr[4] << 8 | addr[5], |
| 2199 | addr[4] << 8 | addr[5], | 2236 | UMAC_MDF_ADDR + ((*i + 1) * 4)); |
| 2200 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | ||
| 2201 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); | 2237 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
| 2202 | reg |= (1 << (MAX_MC_COUNT - *mc)); | 2238 | reg |= (1 << (MAX_MC_COUNT - *mc)); |
| 2203 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | 2239 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); |
| @@ -2214,7 +2250,7 @@ static void bcmgenet_set_rx_mode(struct net_device *dev) | |||
| 2214 | 2250 | ||
| 2215 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | 2251 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); |
| 2216 | 2252 | ||
| 2217 | /* Promiscous mode */ | 2253 | /* Promiscuous mode */ |
| 2218 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | 2254 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
| 2219 | if (dev->flags & IFF_PROMISC) { | 2255 | if (dev->flags & IFF_PROMISC) { |
| 2220 | reg |= CMD_PROMISC; | 2256 | reg |= CMD_PROMISC; |
| @@ -2394,7 +2430,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |||
| 2394 | 2430 | ||
| 2395 | /* Print the GENET core version */ | 2431 | /* Print the GENET core version */ |
| 2396 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | 2432 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, |
| 2397 | major, (reg >> 16) & 0x0f, reg & 0xffff); | 2433 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
| 2398 | 2434 | ||
| 2399 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 2435 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 2400 | if (!(params->flags & GENET_HAS_40BITS)) | 2436 | if (!(params->flags & GENET_HAS_40BITS)) |
| @@ -2450,6 +2486,7 @@ static int bcmgenet_probe(struct platform_device *pdev) | |||
| 2450 | priv = netdev_priv(dev); | 2486 | priv = netdev_priv(dev); |
| 2451 | priv->irq0 = platform_get_irq(pdev, 0); | 2487 | priv->irq0 = platform_get_irq(pdev, 0); |
| 2452 | priv->irq1 = platform_get_irq(pdev, 1); | 2488 | priv->irq1 = platform_get_irq(pdev, 1); |
| 2489 | priv->wol_irq = platform_get_irq(pdev, 2); | ||
| 2453 | if (!priv->irq0 || !priv->irq1) { | 2490 | if (!priv->irq0 || !priv->irq1) { |
| 2454 | dev_err(&pdev->dev, "can't find IRQs\n"); | 2491 | dev_err(&pdev->dev, "can't find IRQs\n"); |
| 2455 | err = -EINVAL; | 2492 | err = -EINVAL; |
| @@ -2484,6 +2521,13 @@ static int bcmgenet_probe(struct platform_device *pdev) | |||
| 2484 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | 2521 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | |
| 2485 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | 2522 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; |
| 2486 | 2523 | ||
| 2524 | /* Request the WOL interrupt and advertise suspend if available */ | ||
| 2525 | priv->wol_irq_disabled = true; | ||
| 2526 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | ||
| 2527 | dev->name, priv); | ||
| 2528 | if (!err) | ||
| 2529 | device_set_wakeup_capable(&pdev->dev, 1); | ||
| 2530 | |||
| 2487 | /* Set the needed headroom to account for any possible | 2531 | /* Set the needed headroom to account for any possible |
| 2488 | * features enabling/disabling at runtime | 2532 | * features enabling/disabling at runtime |
| 2489 | */ | 2533 | */ |
| @@ -2495,6 +2539,13 @@ static int bcmgenet_probe(struct platform_device *pdev) | |||
| 2495 | priv->pdev = pdev; | 2539 | priv->pdev = pdev; |
| 2496 | priv->version = (enum bcmgenet_version)of_id->data; | 2540 | priv->version = (enum bcmgenet_version)of_id->data; |
| 2497 | 2541 | ||
| 2542 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); | ||
| 2543 | if (IS_ERR(priv->clk)) | ||
| 2544 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); | ||
| 2545 | |||
| 2546 | if (!IS_ERR(priv->clk)) | ||
| 2547 | clk_prepare_enable(priv->clk); | ||
| 2548 | |||
| 2498 | bcmgenet_set_hw_params(priv); | 2549 | bcmgenet_set_hw_params(priv); |
| 2499 | 2550 | ||
| 2500 | /* Mii wait queue */ | 2551 | /* Mii wait queue */ |
| @@ -2503,17 +2554,10 @@ static int bcmgenet_probe(struct platform_device *pdev) | |||
| 2503 | priv->rx_buf_len = RX_BUF_LENGTH; | 2554 | priv->rx_buf_len = RX_BUF_LENGTH; |
| 2504 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | 2555 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); |
| 2505 | 2556 | ||
| 2506 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); | ||
| 2507 | if (IS_ERR(priv->clk)) | ||
| 2508 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); | ||
| 2509 | |||
| 2510 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); | 2557 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); |
| 2511 | if (IS_ERR(priv->clk_wol)) | 2558 | if (IS_ERR(priv->clk_wol)) |
| 2512 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); | 2559 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); |
| 2513 | 2560 | ||
| 2514 | if (!IS_ERR(priv->clk)) | ||
| 2515 | clk_prepare_enable(priv->clk); | ||
| 2516 | |||
| 2517 | err = reset_umac(priv); | 2561 | err = reset_umac(priv); |
| 2518 | if (err) | 2562 | if (err) |
| 2519 | goto err_clk_disable; | 2563 | goto err_clk_disable; |
| @@ -2561,6 +2605,116 @@ static int bcmgenet_remove(struct platform_device *pdev) | |||
| 2561 | return 0; | 2605 | return 0; |
| 2562 | } | 2606 | } |
| 2563 | 2607 | ||
| 2608 | #ifdef CONFIG_PM_SLEEP | ||
| 2609 | static int bcmgenet_suspend(struct device *d) | ||
| 2610 | { | ||
| 2611 | struct net_device *dev = dev_get_drvdata(d); | ||
| 2612 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 2613 | int ret; | ||
| 2614 | |||
| 2615 | if (!netif_running(dev)) | ||
| 2616 | return 0; | ||
| 2617 | |||
| 2618 | bcmgenet_netif_stop(dev); | ||
| 2619 | |||
| 2620 | phy_suspend(priv->phydev); | ||
| 2621 | |||
| 2622 | netif_device_detach(dev); | ||
| 2623 | |||
| 2624 | /* Disable MAC receive */ | ||
| 2625 | umac_enable_set(priv, CMD_RX_EN, false); | ||
| 2626 | |||
| 2627 | ret = bcmgenet_dma_teardown(priv); | ||
| 2628 | if (ret) | ||
| 2629 | return ret; | ||
| 2630 | |||
| 2631 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | ||
| 2632 | umac_enable_set(priv, CMD_TX_EN, false); | ||
| 2633 | |||
| 2634 | /* tx reclaim */ | ||
| 2635 | bcmgenet_tx_reclaim_all(dev); | ||
| 2636 | bcmgenet_fini_dma(priv); | ||
| 2637 | |||
| 2638 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ | ||
| 2639 | if (device_may_wakeup(d) && priv->wolopts) { | ||
| 2640 | bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); | ||
| 2641 | clk_prepare_enable(priv->clk_wol); | ||
| 2642 | } | ||
| 2643 | |||
| 2644 | /* Turn off the clocks */ | ||
| 2645 | clk_disable_unprepare(priv->clk); | ||
| 2646 | |||
| 2647 | return 0; | ||
| 2648 | } | ||
| 2649 | |||
| 2650 | static int bcmgenet_resume(struct device *d) | ||
| 2651 | { | ||
| 2652 | struct net_device *dev = dev_get_drvdata(d); | ||
| 2653 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 2654 | unsigned long dma_ctrl; | ||
| 2655 | int ret; | ||
| 2656 | u32 reg; | ||
| 2657 | |||
| 2658 | if (!netif_running(dev)) | ||
| 2659 | return 0; | ||
| 2660 | |||
| 2661 | /* Turn on the clock */ | ||
| 2662 | ret = clk_prepare_enable(priv->clk); | ||
| 2663 | if (ret) | ||
| 2664 | return ret; | ||
| 2665 | |||
| 2666 | bcmgenet_umac_reset(priv); | ||
| 2667 | |||
| 2668 | ret = init_umac(priv); | ||
| 2669 | if (ret) | ||
| 2670 | goto out_clk_disable; | ||
| 2671 | |||
| 2672 | ret = bcmgenet_wol_resume(priv); | ||
| 2673 | if (ret) | ||
| 2674 | goto out_clk_disable; | ||
| 2675 | |||
| 2676 | /* disable ethernet MAC while updating its registers */ | ||
| 2677 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | ||
| 2678 | |||
| 2679 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | ||
| 2680 | |||
| 2681 | if (phy_is_internal(priv->phydev)) { | ||
| 2682 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
| 2683 | reg |= EXT_ENERGY_DET_MASK; | ||
| 2684 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
| 2685 | } | ||
| 2686 | |||
| 2687 | if (priv->wolopts) | ||
| 2688 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | ||
| 2689 | |||
| 2690 | /* Disable RX/TX DMA and flush TX queues */ | ||
| 2691 | dma_ctrl = bcmgenet_dma_disable(priv); | ||
| 2692 | |||
| 2693 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | ||
| 2694 | ret = bcmgenet_init_dma(priv); | ||
| 2695 | if (ret) { | ||
| 2696 | netdev_err(dev, "failed to initialize DMA\n"); | ||
| 2697 | goto out_clk_disable; | ||
| 2698 | } | ||
| 2699 | |||
| 2700 | /* Always enable ring 16 - descriptor ring */ | ||
| 2701 | bcmgenet_enable_dma(priv, dma_ctrl); | ||
| 2702 | |||
| 2703 | netif_device_attach(dev); | ||
| 2704 | |||
| 2705 | phy_resume(priv->phydev); | ||
| 2706 | |||
| 2707 | bcmgenet_netif_start(dev); | ||
| 2708 | |||
| 2709 | return 0; | ||
| 2710 | |||
| 2711 | out_clk_disable: | ||
| 2712 | clk_disable_unprepare(priv->clk); | ||
| 2713 | return ret; | ||
| 2714 | } | ||
| 2715 | #endif /* CONFIG_PM_SLEEP */ | ||
| 2716 | |||
| 2717 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | ||
| 2564 | 2718 | ||
| 2565 | static struct platform_driver bcmgenet_driver = { | 2719 | static struct platform_driver bcmgenet_driver = { |
| 2566 | .probe = bcmgenet_probe, | 2720 | .probe = bcmgenet_probe, |
| @@ -2569,6 +2723,7 @@ static struct platform_driver bcmgenet_driver = { | |||
| 2569 | .name = "bcmgenet", | 2723 | .name = "bcmgenet", |
| 2570 | .owner = THIS_MODULE, | 2724 | .owner = THIS_MODULE, |
| 2571 | .of_match_table = bcmgenet_match, | 2725 | .of_match_table = bcmgenet_match, |
| 2726 | .pm = &bcmgenet_pm_ops, | ||
| 2572 | }, | 2727 | }, |
| 2573 | }; | 2728 | }; |
| 2574 | module_platform_driver(bcmgenet_driver); | 2729 | module_platform_driver(bcmgenet_driver); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index e23c993b1362..c862d0666771 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
| @@ -4,18 +4,8 @@ | |||
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | * | 7 | */ |
| 8 | * This program is distributed in the hope that it will be useful, | 8 | |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 16 | * | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | #ifndef __BCMGENET_H__ | 9 | #ifndef __BCMGENET_H__ |
| 20 | #define __BCMGENET_H__ | 10 | #define __BCMGENET_H__ |
| 21 | 11 | ||
| @@ -456,6 +446,7 @@ struct enet_cb { | |||
| 456 | enum bcmgenet_power_mode { | 446 | enum bcmgenet_power_mode { |
| 457 | GENET_POWER_CABLE_SENSE = 0, | 447 | GENET_POWER_CABLE_SENSE = 0, |
| 458 | GENET_POWER_PASSIVE, | 448 | GENET_POWER_PASSIVE, |
| 449 | GENET_POWER_WOL_MAGIC, | ||
| 459 | }; | 450 | }; |
| 460 | 451 | ||
| 461 | struct bcmgenet_priv; | 452 | struct bcmgenet_priv; |
| @@ -513,9 +504,9 @@ struct bcmgenet_tx_ring { | |||
| 513 | unsigned int cb_ptr; /* Tx ring initial CB ptr */ | 504 | unsigned int cb_ptr; /* Tx ring initial CB ptr */ |
| 514 | unsigned int end_ptr; /* Tx ring end CB ptr */ | 505 | unsigned int end_ptr; /* Tx ring end CB ptr */ |
| 515 | void (*int_enable)(struct bcmgenet_priv *priv, | 506 | void (*int_enable)(struct bcmgenet_priv *priv, |
| 516 | struct bcmgenet_tx_ring *); | 507 | struct bcmgenet_tx_ring *); |
| 517 | void (*int_disable)(struct bcmgenet_priv *priv, | 508 | void (*int_disable)(struct bcmgenet_priv *priv, |
| 518 | struct bcmgenet_tx_ring *); | 509 | struct bcmgenet_tx_ring *); |
| 519 | }; | 510 | }; |
| 520 | 511 | ||
| 521 | /* device context */ | 512 | /* device context */ |
| @@ -569,6 +560,8 @@ struct bcmgenet_priv { | |||
| 569 | int irq1; | 560 | int irq1; |
| 570 | unsigned int irq0_stat; | 561 | unsigned int irq0_stat; |
| 571 | unsigned int irq1_stat; | 562 | unsigned int irq1_stat; |
| 563 | int wol_irq; | ||
| 564 | bool wol_irq_disabled; | ||
| 572 | 565 | ||
| 573 | /* HW descriptors/checksum variables */ | 566 | /* HW descriptors/checksum variables */ |
| 574 | bool desc_64b_en; | 567 | bool desc_64b_en; |
| @@ -583,7 +576,6 @@ struct bcmgenet_priv { | |||
| 583 | struct platform_device *pdev; | 576 | struct platform_device *pdev; |
| 584 | 577 | ||
| 585 | /* WOL */ | 578 | /* WOL */ |
| 586 | unsigned long wol_enabled; | ||
| 587 | struct clk *clk_wol; | 579 | struct clk *clk_wol; |
| 588 | u32 wolopts; | 580 | u32 wolopts; |
| 589 | 581 | ||
| @@ -625,4 +617,12 @@ int bcmgenet_mii_config(struct net_device *dev); | |||
| 625 | void bcmgenet_mii_exit(struct net_device *dev); | 617 | void bcmgenet_mii_exit(struct net_device *dev); |
| 626 | void bcmgenet_mii_reset(struct net_device *dev); | 618 | void bcmgenet_mii_reset(struct net_device *dev); |
| 627 | 619 | ||
| 620 | /* Wake-on-LAN routines */ | ||
| 621 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); | ||
| 622 | int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); | ||
| 623 | int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, | ||
| 624 | enum bcmgenet_power_mode mode); | ||
| 625 | void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, | ||
| 626 | enum bcmgenet_power_mode mode); | ||
| 627 | |||
| 628 | #endif /* __BCMGENET_H__ */ | 628 | #endif /* __BCMGENET_H__ */ |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c new file mode 100644 index 000000000000..b82b7e4e06b2 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
| @@ -0,0 +1,206 @@ | |||
| 1 | /* | ||
| 2 | * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support | ||
| 3 | * | ||
| 4 | * Copyright (c) 2014 Broadcom Corporation | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #define pr_fmt(fmt) "bcmgenet_wol: " fmt | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/sched.h> | ||
| 16 | #include <linux/types.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/string.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/delay.h> | ||
| 22 | #include <linux/pm.h> | ||
| 23 | #include <linux/clk.h> | ||
| 24 | #include <linux/version.h> | ||
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <net/arp.h> | ||
| 27 | |||
| 28 | #include <linux/mii.h> | ||
| 29 | #include <linux/ethtool.h> | ||
| 30 | #include <linux/netdevice.h> | ||
| 31 | #include <linux/inetdevice.h> | ||
| 32 | #include <linux/etherdevice.h> | ||
| 33 | #include <linux/skbuff.h> | ||
| 34 | #include <linux/in.h> | ||
| 35 | #include <linux/ip.h> | ||
| 36 | #include <linux/ipv6.h> | ||
| 37 | #include <linux/phy.h> | ||
| 38 | |||
| 39 | #include "bcmgenet.h" | ||
| 40 | |||
| 41 | /* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet | ||
| 42 | * Detection is supported through ethtool | ||
| 43 | */ | ||
| 44 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
| 45 | { | ||
| 46 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 47 | u32 reg; | ||
| 48 | |||
| 49 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; | ||
| 50 | wol->wolopts = priv->wolopts; | ||
| 51 | memset(wol->sopass, 0, sizeof(wol->sopass)); | ||
| 52 | |||
| 53 | if (wol->wolopts & WAKE_MAGICSECURE) { | ||
| 54 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); | ||
| 55 | put_unaligned_be16(reg, &wol->sopass[0]); | ||
| 56 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS); | ||
| 57 | put_unaligned_be32(reg, &wol->sopass[2]); | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | /* ethtool function - set WOL (Wake on LAN) settings. | ||
| 62 | * Only for magic packet detection mode. | ||
| 63 | */ | ||
| 64 | int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
| 65 | { | ||
| 66 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 67 | struct device *kdev = &priv->pdev->dev; | ||
| 68 | u32 reg; | ||
| 69 | |||
| 70 | if (!device_can_wakeup(kdev)) | ||
| 71 | return -ENOTSUPP; | ||
| 72 | |||
| 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | ||
| 74 | return -EINVAL; | ||
| 75 | |||
| 76 | if (wol->wolopts & WAKE_MAGICSECURE) { | ||
| 77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | ||
| 78 | UMAC_MPD_PW_MS); | ||
| 79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | ||
| 80 | UMAC_MPD_PW_LS); | ||
| 81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 82 | reg |= MPD_PW_EN; | ||
| 83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Flag the device and relevant IRQ as wakeup capable */ | ||
| 87 | if (wol->wolopts) { | ||
| 88 | device_set_wakeup_enable(kdev, 1); | ||
| 89 | enable_irq_wake(priv->wol_irq); | ||
| 90 | priv->wol_irq_disabled = false; | ||
| 91 | } else { | ||
| 92 | device_set_wakeup_enable(kdev, 0); | ||
| 93 | /* Avoid unbalanced disable_irq_wake calls */ | ||
| 94 | if (!priv->wol_irq_disabled) | ||
| 95 | disable_irq_wake(priv->wol_irq); | ||
| 96 | priv->wol_irq_disabled = true; | ||
| 97 | } | ||
| 98 | |||
| 99 | priv->wolopts = wol->wolopts; | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) | ||
| 105 | { | ||
| 106 | struct net_device *dev = priv->dev; | ||
| 107 | int retries = 0; | ||
| 108 | |||
| 109 | while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) | ||
| 110 | & RBUF_STATUS_WOL)) { | ||
| 111 | retries++; | ||
| 112 | if (retries > 5) { | ||
| 113 | netdev_crit(dev, "polling wol mode timeout\n"); | ||
| 114 | return -ETIMEDOUT; | ||
| 115 | } | ||
| 116 | mdelay(1); | ||
| 117 | } | ||
| 118 | |||
| 119 | return retries; | ||
| 120 | } | ||
| 121 | |||
| 122 | int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, | ||
| 123 | enum bcmgenet_power_mode mode) | ||
| 124 | { | ||
| 125 | struct net_device *dev = priv->dev; | ||
| 126 | u32 cpu_mask_clear; | ||
| 127 | int retries = 0; | ||
| 128 | u32 reg; | ||
| 129 | |||
| 130 | if (mode != GENET_POWER_WOL_MAGIC) { | ||
| 131 | netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); | ||
| 132 | return -EINVAL; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* disable RX */ | ||
| 136 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
| 137 | reg &= ~CMD_RX_EN; | ||
| 138 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 139 | mdelay(10); | ||
| 140 | |||
| 141 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 142 | reg |= MPD_EN; | ||
| 143 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 144 | |||
| 145 | /* Do not leave UniMAC in MPD mode only */ | ||
| 146 | retries = bcmgenet_poll_wol_status(priv); | ||
| 147 | if (retries < 0) { | ||
| 148 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 149 | reg &= ~MPD_EN; | ||
| 150 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 151 | return retries; | ||
| 152 | } | ||
| 153 | |||
| 154 | netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", | ||
| 155 | retries); | ||
| 156 | |||
| 157 | /* Enable CRC forward */ | ||
| 158 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
| 159 | priv->crc_fwd_en = 1; | ||
| 160 | reg |= CMD_CRC_FWD; | ||
| 161 | |||
| 162 | /* Receiver must be enabled for WOL MP detection */ | ||
| 163 | reg |= CMD_RX_EN; | ||
| 164 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 165 | |||
| 166 | if (priv->hw_params->flags & GENET_HAS_EXT) { | ||
| 167 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
| 168 | reg &= ~EXT_ENERGY_DET_MASK; | ||
| 169 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
| 170 | } | ||
| 171 | |||
| 172 | /* Enable the MPD interrupt */ | ||
| 173 | cpu_mask_clear = UMAC_IRQ_MPD_R; | ||
| 174 | |||
| 175 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, | ||
| 181 | enum bcmgenet_power_mode mode) | ||
| 182 | { | ||
| 183 | u32 cpu_mask_set; | ||
| 184 | u32 reg; | ||
| 185 | |||
| 186 | if (mode != GENET_POWER_WOL_MAGIC) { | ||
| 187 | netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | |||
| 191 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 192 | reg &= ~MPD_EN; | ||
| 193 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 194 | |||
| 195 | /* Disable CRC Forward */ | ||
| 196 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
| 197 | reg &= ~CMD_CRC_FWD; | ||
| 198 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
| 199 | priv->crc_fwd_en = 0; | ||
| 200 | |||
| 201 | /* Stop monitoring magic packet IRQ */ | ||
| 202 | cpu_mask_set = UMAC_IRQ_MPD_R; | ||
| 203 | |||
| 204 | /* Stop monitoring magic packet IRQ */ | ||
| 205 | bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET); | ||
| 206 | } | ||
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index add8d8596084..c88f7ae99636 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -6,15 +6,6 @@ | |||
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 18 | */ | 9 | */ |
| 19 | 10 | ||
| 20 | 11 | ||
| @@ -44,15 +35,15 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) | |||
| 44 | u32 reg; | 35 | u32 reg; |
| 45 | 36 | ||
| 46 | bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | | 37 | bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | |
| 47 | (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); | 38 | (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); |
| 48 | /* Start MDIO transaction*/ | 39 | /* Start MDIO transaction*/ |
| 49 | reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); | 40 | reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); |
| 50 | reg |= MDIO_START_BUSY; | 41 | reg |= MDIO_START_BUSY; |
| 51 | bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); | 42 | bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); |
| 52 | wait_event_timeout(priv->wq, | 43 | wait_event_timeout(priv->wq, |
| 53 | !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) | 44 | !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) |
| 54 | & MDIO_START_BUSY), | 45 | & MDIO_START_BUSY), |
| 55 | HZ / 100); | 46 | HZ / 100); |
| 56 | ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); | 47 | ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); |
| 57 | 48 | ||
| 58 | if (ret & MDIO_READ_FAIL) | 49 | if (ret & MDIO_READ_FAIL) |
| @@ -63,22 +54,22 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) | |||
| 63 | 54 | ||
| 64 | /* write a value to the MII */ | 55 | /* write a value to the MII */ |
| 65 | static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, | 56 | static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, |
| 66 | int location, u16 val) | 57 | int location, u16 val) |
| 67 | { | 58 | { |
| 68 | struct net_device *dev = bus->priv; | 59 | struct net_device *dev = bus->priv; |
| 69 | struct bcmgenet_priv *priv = netdev_priv(dev); | 60 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 70 | u32 reg; | 61 | u32 reg; |
| 71 | 62 | ||
| 72 | bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | | 63 | bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | |
| 73 | (location << MDIO_REG_SHIFT) | (0xffff & val)), | 64 | (location << MDIO_REG_SHIFT) | (0xffff & val)), |
| 74 | UMAC_MDIO_CMD); | 65 | UMAC_MDIO_CMD); |
| 75 | reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); | 66 | reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); |
| 76 | reg |= MDIO_START_BUSY; | 67 | reg |= MDIO_START_BUSY; |
| 77 | bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); | 68 | bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); |
| 78 | wait_event_timeout(priv->wq, | 69 | wait_event_timeout(priv->wq, |
| 79 | !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & | 70 | !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & |
| 80 | MDIO_START_BUSY), | 71 | MDIO_START_BUSY), |
| 81 | HZ / 100); | 72 | HZ / 100); |
| 82 | 73 | ||
| 83 | return 0; | 74 | return 0; |
| 84 | } | 75 | } |
| @@ -136,17 +127,22 @@ static void bcmgenet_mii_setup(struct net_device *dev) | |||
| 136 | /* pause capability */ | 127 | /* pause capability */ |
| 137 | if (!phydev->pause) | 128 | if (!phydev->pause) |
| 138 | cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; | 129 | cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; |
| 130 | } | ||
| 139 | 131 | ||
| 132 | if (!status_changed) | ||
| 133 | return; | ||
| 134 | |||
| 135 | if (phydev->link) { | ||
| 140 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | 136 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
| 141 | reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | | 137 | reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | |
| 142 | CMD_HD_EN | | 138 | CMD_HD_EN | |
| 143 | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); | 139 | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); |
| 144 | reg |= cmd_bits; | 140 | reg |= cmd_bits; |
| 145 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | 141 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); |
| 142 | |||
| 146 | } | 143 | } |
| 147 | 144 | ||
| 148 | if (status_changed) | 145 | phy_print_status(phydev); |
| 149 | phy_print_status(phydev); | ||
| 150 | } | 146 | } |
| 151 | 147 | ||
| 152 | void bcmgenet_mii_reset(struct net_device *dev) | 148 | void bcmgenet_mii_reset(struct net_device *dev) |
| @@ -247,7 +243,7 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
| 247 | phy_name = "external MII"; | 243 | phy_name = "external MII"; |
| 248 | phydev->supported &= PHY_BASIC_FEATURES; | 244 | phydev->supported &= PHY_BASIC_FEATURES; |
| 249 | bcmgenet_sys_writel(priv, | 245 | bcmgenet_sys_writel(priv, |
| 250 | PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); | 246 | PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); |
| 251 | break; | 247 | break; |
| 252 | 248 | ||
| 253 | case PHY_INTERFACE_MODE_REVMII: | 249 | case PHY_INTERFACE_MODE_REVMII: |
| @@ -283,7 +279,7 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
| 283 | reg |= RGMII_MODE_EN | id_mode_dis; | 279 | reg |= RGMII_MODE_EN | id_mode_dis; |
| 284 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | 280 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); |
| 285 | bcmgenet_sys_writel(priv, | 281 | bcmgenet_sys_writel(priv, |
| 286 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); | 282 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); |
| 287 | break; | 283 | break; |
| 288 | default: | 284 | default: |
| 289 | dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); | 285 | dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); |
| @@ -311,12 +307,12 @@ static int bcmgenet_mii_probe(struct net_device *dev) | |||
| 311 | /* In the case of a fixed PHY, the DT node associated | 307 | /* In the case of a fixed PHY, the DT node associated |
| 312 | * to the PHY is the Ethernet MAC DT node. | 308 | * to the PHY is the Ethernet MAC DT node. |
| 313 | */ | 309 | */ |
| 314 | if (of_phy_is_fixed_link(dn)) { | 310 | if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { |
| 315 | ret = of_phy_register_fixed_link(dn); | 311 | ret = of_phy_register_fixed_link(dn); |
| 316 | if (ret) | 312 | if (ret) |
| 317 | return ret; | 313 | return ret; |
| 318 | 314 | ||
| 319 | priv->phy_dn = dn; | 315 | priv->phy_dn = of_node_get(dn); |
| 320 | } | 316 | } |
| 321 | 317 | ||
| 322 | phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, | 318 | phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, |
| @@ -362,7 +358,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) | |||
| 362 | priv->mii_bus->irq[phydev->addr] = PHY_POLL; | 358 | priv->mii_bus->irq[phydev->addr] = PHY_POLL; |
| 363 | 359 | ||
| 364 | pr_info("attached PHY at address %d [%s]\n", | 360 | pr_info("attached PHY at address %d [%s]\n", |
| 365 | phydev->addr, phydev->drv->name); | 361 | phydev->addr, phydev->drv->name); |
| 366 | 362 | ||
| 367 | return 0; | 363 | return 0; |
| 368 | } | 364 | } |
| @@ -387,9 +383,9 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) | |||
| 387 | bus->read = bcmgenet_mii_read; | 383 | bus->read = bcmgenet_mii_read; |
| 388 | bus->write = bcmgenet_mii_write; | 384 | bus->write = bcmgenet_mii_write; |
| 389 | snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", | 385 | snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", |
| 390 | priv->pdev->name, priv->pdev->id); | 386 | priv->pdev->name, priv->pdev->id); |
| 391 | 387 | ||
| 392 | bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 388 | bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); |
| 393 | if (!bus->irq) { | 389 | if (!bus->irq) { |
| 394 | mdiobus_free(priv->mii_bus); | 390 | mdiobus_free(priv->mii_bus); |
| 395 | return -ENOMEM; | 391 | return -ENOMEM; |
| @@ -452,6 +448,7 @@ int bcmgenet_mii_init(struct net_device *dev) | |||
| 452 | return 0; | 448 | return 0; |
| 453 | 449 | ||
| 454 | out: | 450 | out: |
| 451 | of_node_put(priv->phy_dn); | ||
| 455 | mdiobus_unregister(priv->mii_bus); | 452 | mdiobus_unregister(priv->mii_bus); |
| 456 | out_free: | 453 | out_free: |
| 457 | kfree(priv->mii_bus->irq); | 454 | kfree(priv->mii_bus->irq); |
| @@ -463,6 +460,7 @@ void bcmgenet_mii_exit(struct net_device *dev) | |||
| 463 | { | 460 | { |
| 464 | struct bcmgenet_priv *priv = netdev_priv(dev); | 461 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 465 | 462 | ||
| 463 | of_node_put(priv->phy_dn); | ||
| 466 | mdiobus_unregister(priv->mii_bus); | 464 | mdiobus_unregister(priv->mii_bus); |
| 467 | kfree(priv->mii_bus->irq); | 465 | kfree(priv->mii_bus->irq); |
| 468 | mdiobus_free(priv->mii_bus); | 466 | mdiobus_free(priv->mii_bus); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 8afa579e7c40..3ac5d23454a8 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -237,7 +237,7 @@ MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); | |||
| 237 | #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 | 237 | #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 |
| 238 | #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 | 238 | #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 |
| 239 | 239 | ||
| 240 | static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { | 240 | static const struct pci_device_id tg3_pci_tbl[] = { |
| 241 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, | 241 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, |
| 242 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, | 242 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, |
| 243 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, | 243 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, |
| @@ -7830,17 +7830,18 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
| 7830 | 7830 | ||
| 7831 | static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); | 7831 | static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); |
| 7832 | 7832 | ||
| 7833 | /* Use GSO to workaround a rare TSO bug that may be triggered when the | 7833 | /* Use GSO to workaround all TSO packets that meet HW bug conditions |
| 7834 | * TSO header is greater than 80 bytes. | 7834 | * indicated in tg3_tx_frag_set() |
| 7835 | */ | 7835 | */ |
| 7836 | static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | 7836 | static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, |
| 7837 | struct netdev_queue *txq, struct sk_buff *skb) | ||
| 7837 | { | 7838 | { |
| 7838 | struct sk_buff *segs, *nskb; | 7839 | struct sk_buff *segs, *nskb; |
| 7839 | u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; | 7840 | u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; |
| 7840 | 7841 | ||
| 7841 | /* Estimate the number of fragments in the worst case */ | 7842 | /* Estimate the number of fragments in the worst case */ |
| 7842 | if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { | 7843 | if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { |
| 7843 | netif_stop_queue(tp->dev); | 7844 | netif_tx_stop_queue(txq); |
| 7844 | 7845 | ||
| 7845 | /* netif_tx_stop_queue() must be done before checking | 7846 | /* netif_tx_stop_queue() must be done before checking |
| 7846 | * checking tx index in tg3_tx_avail() below, because in | 7847 | * checking tx index in tg3_tx_avail() below, because in |
| @@ -7848,13 +7849,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
| 7848 | * netif_tx_queue_stopped(). | 7849 | * netif_tx_queue_stopped(). |
| 7849 | */ | 7850 | */ |
| 7850 | smp_mb(); | 7851 | smp_mb(); |
| 7851 | if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) | 7852 | if (tg3_tx_avail(tnapi) <= frag_cnt_est) |
| 7852 | return NETDEV_TX_BUSY; | 7853 | return NETDEV_TX_BUSY; |
| 7853 | 7854 | ||
| 7854 | netif_wake_queue(tp->dev); | 7855 | netif_tx_wake_queue(txq); |
| 7855 | } | 7856 | } |
| 7856 | 7857 | ||
| 7857 | segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); | 7858 | segs = skb_gso_segment(skb, tp->dev->features & |
| 7859 | ~(NETIF_F_TSO | NETIF_F_TSO6)); | ||
| 7858 | if (IS_ERR(segs) || !segs) | 7860 | if (IS_ERR(segs) || !segs) |
| 7859 | goto tg3_tso_bug_end; | 7861 | goto tg3_tso_bug_end; |
| 7860 | 7862 | ||
| @@ -7930,7 +7932,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 7930 | if (!skb_is_gso_v6(skb)) { | 7932 | if (!skb_is_gso_v6(skb)) { |
| 7931 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7933 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
| 7932 | tg3_flag(tp, TSO_BUG)) | 7934 | tg3_flag(tp, TSO_BUG)) |
| 7933 | return tg3_tso_bug(tp, skb); | 7935 | return tg3_tso_bug(tp, tnapi, txq, skb); |
| 7934 | 7936 | ||
| 7935 | ip_csum = iph->check; | 7937 | ip_csum = iph->check; |
| 7936 | ip_tot_len = iph->tot_len; | 7938 | ip_tot_len = iph->tot_len; |
| @@ -8061,7 +8063,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 8061 | iph->tot_len = ip_tot_len; | 8063 | iph->tot_len = ip_tot_len; |
| 8062 | } | 8064 | } |
| 8063 | tcph->check = tcp_csum; | 8065 | tcph->check = tcp_csum; |
| 8064 | return tg3_tso_bug(tp, skb); | 8066 | return tg3_tso_bug(tp, tnapi, txq, skb); |
| 8065 | } | 8067 | } |
| 8066 | 8068 | ||
| 8067 | /* If the workaround fails due to memory/mapping | 8069 | /* If the workaround fails due to memory/mapping |
| @@ -14091,8 +14093,9 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, | |||
| 14091 | 14093 | ||
| 14092 | spin_lock_bh(&tp->lock); | 14094 | spin_lock_bh(&tp->lock); |
| 14093 | if (!tp->hw_stats) { | 14095 | if (!tp->hw_stats) { |
| 14096 | *stats = tp->net_stats_prev; | ||
| 14094 | spin_unlock_bh(&tp->lock); | 14097 | spin_unlock_bh(&tp->lock); |
| 14095 | return &tp->net_stats_prev; | 14098 | return stats; |
| 14096 | } | 14099 | } |
| 14097 | 14100 | ||
| 14098 | tg3_get_nstats(tp, stats); | 14101 | tg3_get_nstats(tp, stats); |
| @@ -15924,7 +15927,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) | |||
| 15924 | return TG3_RX_RET_MAX_SIZE_5705; | 15927 | return TG3_RX_RET_MAX_SIZE_5705; |
| 15925 | } | 15928 | } |
| 15926 | 15929 | ||
| 15927 | static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { | 15930 | static const struct pci_device_id tg3_write_reorder_chipsets[] = { |
| 15928 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 15931 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
| 15929 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | 15932 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, |
| 15930 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, | 15933 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, |
| @@ -17183,7 +17186,7 @@ static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, | |||
| 17183 | 17186 | ||
| 17184 | #define TEST_BUFFER_SIZE 0x2000 | 17187 | #define TEST_BUFFER_SIZE 0x2000 |
| 17185 | 17188 | ||
| 17186 | static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { | 17189 | static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { |
| 17187 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, | 17190 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, |
| 17188 | { }, | 17191 | { }, |
| 17189 | }; | 17192 | }; |
