diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/net/ethernet | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/net/ethernet')
71 files changed, 2490 insertions, 387 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 955d06b9cdba..31c5e476fd64 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig" | |||
29 | source "drivers/net/ethernet/apple/Kconfig" | 29 | source "drivers/net/ethernet/apple/Kconfig" |
30 | source "drivers/net/ethernet/arc/Kconfig" | 30 | source "drivers/net/ethernet/arc/Kconfig" |
31 | source "drivers/net/ethernet/atheros/Kconfig" | 31 | source "drivers/net/ethernet/atheros/Kconfig" |
32 | source "drivers/net/ethernet/aurora/Kconfig" | ||
32 | source "drivers/net/ethernet/cadence/Kconfig" | 33 | source "drivers/net/ethernet/cadence/Kconfig" |
33 | source "drivers/net/ethernet/adi/Kconfig" | 34 | source "drivers/net/ethernet/adi/Kconfig" |
34 | source "drivers/net/ethernet/broadcom/Kconfig" | 35 | source "drivers/net/ethernet/broadcom/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 4a2ee98738f0..071f84eb6f3f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/ | |||
15 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ | 15 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ |
16 | obj-$(CONFIG_NET_VENDOR_ARC) += arc/ | 16 | obj-$(CONFIG_NET_VENDOR_ARC) += arc/ |
17 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ | 17 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ |
18 | obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ | ||
18 | obj-$(CONFIG_NET_CADENCE) += cadence/ | 19 | obj-$(CONFIG_NET_CADENCE) += cadence/ |
19 | obj-$(CONFIG_NET_BFIN) += adi/ | 20 | obj-$(CONFIG_NET_BFIN) += adi/ |
20 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ | 21 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 970781a9e677..f6a7161e3b85 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata) | |||
1849 | usleep_range(10, 15); | 1849 | usleep_range(10, 15); |
1850 | 1850 | ||
1851 | /* Poll Until Poll Condition */ | 1851 | /* Poll Until Poll Condition */ |
1852 | while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) | 1852 | while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) |
1853 | usleep_range(500, 600); | 1853 | usleep_range(500, 600); |
1854 | 1854 | ||
1855 | if (!count) | 1855 | if (!count) |
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) | |||
1873 | /* Poll Until Poll Condition */ | 1873 | /* Poll Until Poll Condition */ |
1874 | for (i = 0; i < pdata->tx_q_count; i++) { | 1874 | for (i = 0; i < pdata->tx_q_count; i++) { |
1875 | count = 2000; | 1875 | count = 2000; |
1876 | while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, | 1876 | while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, |
1877 | MTL_Q_TQOMR, FTQ)) | 1877 | MTL_Q_TQOMR, FTQ)) |
1878 | usleep_range(500, 600); | 1878 | usleep_range(500, 600); |
1879 | 1879 | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 991412ce6f48..d0ae1a6cc212 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, | |||
289 | struct sk_buff *skb) | 289 | struct sk_buff *skb) |
290 | { | 290 | { |
291 | struct device *dev = ndev_to_dev(tx_ring->ndev); | 291 | struct device *dev = ndev_to_dev(tx_ring->ndev); |
292 | struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); | ||
292 | struct xgene_enet_raw_desc *raw_desc; | 293 | struct xgene_enet_raw_desc *raw_desc; |
293 | __le64 *exp_desc = NULL, *exp_bufs = NULL; | 294 | __le64 *exp_desc = NULL, *exp_bufs = NULL; |
294 | dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; | 295 | dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; |
@@ -419,6 +420,7 @@ out: | |||
419 | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | | 420 | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | |
420 | SET_VAL(USERINFO, tx_ring->tail)); | 421 | SET_VAL(USERINFO, tx_ring->tail)); |
421 | tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; | 422 | tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; |
423 | pdata->tx_level += count; | ||
422 | tx_ring->tail = tail; | 424 | tx_ring->tail = tail; |
423 | 425 | ||
424 | return count; | 426 | return count; |
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, | |||
429 | { | 431 | { |
430 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 432 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
431 | struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; | 433 | struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; |
432 | struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; | 434 | u32 tx_level = pdata->tx_level; |
433 | u32 tx_level, cq_level; | ||
434 | int count; | 435 | int count; |
435 | 436 | ||
436 | tx_level = pdata->ring_ops->len(tx_ring); | 437 | if (tx_level < pdata->txc_level) |
437 | cq_level = pdata->ring_ops->len(cp_ring); | 438 | tx_level += ((typeof(pdata->tx_level))~0U); |
438 | if (unlikely(tx_level > pdata->tx_qcnt_hi || | 439 | |
439 | cq_level > pdata->cp_qcnt_hi)) { | 440 | if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { |
440 | netif_stop_queue(ndev); | 441 | netif_stop_queue(ndev); |
441 | return NETDEV_TX_BUSY; | 442 | return NETDEV_TX_BUSY; |
442 | } | 443 | } |
@@ -450,12 +451,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, | |||
450 | return NETDEV_TX_OK; | 451 | return NETDEV_TX_OK; |
451 | } | 452 | } |
452 | 453 | ||
453 | pdata->ring_ops->wr_cmd(tx_ring, count); | ||
454 | skb_tx_timestamp(skb); | 454 | skb_tx_timestamp(skb); |
455 | 455 | ||
456 | pdata->stats.tx_packets++; | 456 | pdata->stats.tx_packets++; |
457 | pdata->stats.tx_bytes += skb->len; | 457 | pdata->stats.tx_bytes += skb->len; |
458 | 458 | ||
459 | pdata->ring_ops->wr_cmd(tx_ring, count); | ||
459 | return NETDEV_TX_OK; | 460 | return NETDEV_TX_OK; |
460 | } | 461 | } |
461 | 462 | ||
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
539 | struct xgene_enet_raw_desc *raw_desc, *exp_desc; | 540 | struct xgene_enet_raw_desc *raw_desc, *exp_desc; |
540 | u16 head = ring->head; | 541 | u16 head = ring->head; |
541 | u16 slots = ring->slots - 1; | 542 | u16 slots = ring->slots - 1; |
542 | int ret, count = 0, processed = 0; | 543 | int ret, desc_count, count = 0, processed = 0; |
544 | bool is_completion; | ||
543 | 545 | ||
544 | do { | 546 | do { |
545 | raw_desc = &ring->raw_desc[head]; | 547 | raw_desc = &ring->raw_desc[head]; |
548 | desc_count = 0; | ||
549 | is_completion = false; | ||
546 | exp_desc = NULL; | 550 | exp_desc = NULL; |
547 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) | 551 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) |
548 | break; | 552 | break; |
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
559 | } | 563 | } |
560 | dma_rmb(); | 564 | dma_rmb(); |
561 | count++; | 565 | count++; |
566 | desc_count++; | ||
562 | } | 567 | } |
563 | if (is_rx_desc(raw_desc)) | 568 | if (is_rx_desc(raw_desc)) { |
564 | ret = xgene_enet_rx_frame(ring, raw_desc); | 569 | ret = xgene_enet_rx_frame(ring, raw_desc); |
565 | else | 570 | } else { |
566 | ret = xgene_enet_tx_completion(ring, raw_desc); | 571 | ret = xgene_enet_tx_completion(ring, raw_desc); |
572 | is_completion = true; | ||
573 | } | ||
567 | xgene_enet_mark_desc_slot_empty(raw_desc); | 574 | xgene_enet_mark_desc_slot_empty(raw_desc); |
568 | if (exp_desc) | 575 | if (exp_desc) |
569 | xgene_enet_mark_desc_slot_empty(exp_desc); | 576 | xgene_enet_mark_desc_slot_empty(exp_desc); |
570 | 577 | ||
571 | head = (head + 1) & slots; | 578 | head = (head + 1) & slots; |
572 | count++; | 579 | count++; |
580 | desc_count++; | ||
573 | processed++; | 581 | processed++; |
582 | if (is_completion) | ||
583 | pdata->txc_level += desc_count; | ||
574 | 584 | ||
575 | if (ret) | 585 | if (ret) |
576 | break; | 586 | break; |
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
580 | pdata->ring_ops->wr_cmd(ring, -count); | 590 | pdata->ring_ops->wr_cmd(ring, -count); |
581 | ring->head = head; | 591 | ring->head = head; |
582 | 592 | ||
583 | if (netif_queue_stopped(ring->ndev)) { | 593 | if (netif_queue_stopped(ring->ndev)) |
584 | if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) | 594 | netif_start_queue(ring->ndev); |
585 | netif_wake_queue(ring->ndev); | ||
586 | } | ||
587 | } | 595 | } |
588 | 596 | ||
589 | return processed; | 597 | return processed; |
@@ -688,10 +696,10 @@ static int xgene_enet_open(struct net_device *ndev) | |||
688 | mac_ops->tx_enable(pdata); | 696 | mac_ops->tx_enable(pdata); |
689 | mac_ops->rx_enable(pdata); | 697 | mac_ops->rx_enable(pdata); |
690 | 698 | ||
699 | xgene_enet_napi_enable(pdata); | ||
691 | ret = xgene_enet_register_irq(ndev); | 700 | ret = xgene_enet_register_irq(ndev); |
692 | if (ret) | 701 | if (ret) |
693 | return ret; | 702 | return ret; |
694 | xgene_enet_napi_enable(pdata); | ||
695 | 703 | ||
696 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) | 704 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
697 | phy_start(pdata->phy_dev); | 705 | phy_start(pdata->phy_dev); |
@@ -715,13 +723,13 @@ static int xgene_enet_close(struct net_device *ndev) | |||
715 | else | 723 | else |
716 | cancel_delayed_work_sync(&pdata->link_work); | 724 | cancel_delayed_work_sync(&pdata->link_work); |
717 | 725 | ||
718 | xgene_enet_napi_disable(pdata); | ||
719 | xgene_enet_free_irq(ndev); | ||
720 | xgene_enet_process_ring(pdata->rx_ring, -1); | ||
721 | |||
722 | mac_ops->tx_disable(pdata); | 726 | mac_ops->tx_disable(pdata); |
723 | mac_ops->rx_disable(pdata); | 727 | mac_ops->rx_disable(pdata); |
724 | 728 | ||
729 | xgene_enet_free_irq(ndev); | ||
730 | xgene_enet_napi_disable(pdata); | ||
731 | xgene_enet_process_ring(pdata->rx_ring, -1); | ||
732 | |||
725 | return 0; | 733 | return 0; |
726 | } | 734 | } |
727 | 735 | ||
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
1033 | pdata->tx_ring->cp_ring = cp_ring; | 1041 | pdata->tx_ring->cp_ring = cp_ring; |
1034 | pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | 1042 | pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); |
1035 | 1043 | ||
1036 | pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; | 1044 | pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; |
1037 | pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; | ||
1038 | pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; | ||
1039 | 1045 | ||
1040 | return 0; | 1046 | return 0; |
1041 | 1047 | ||
@@ -1474,15 +1480,15 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
1474 | } | 1480 | } |
1475 | ndev->hw_features = ndev->features; | 1481 | ndev->hw_features = ndev->features; |
1476 | 1482 | ||
1477 | ret = register_netdev(ndev); | 1483 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
1478 | if (ret) { | 1484 | if (ret) { |
1479 | netdev_err(ndev, "Failed to register netdev\n"); | 1485 | netdev_err(ndev, "No usable DMA configuration\n"); |
1480 | goto err; | 1486 | goto err; |
1481 | } | 1487 | } |
1482 | 1488 | ||
1483 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); | 1489 | ret = register_netdev(ndev); |
1484 | if (ret) { | 1490 | if (ret) { |
1485 | netdev_err(ndev, "No usable DMA configuration\n"); | 1491 | netdev_err(ndev, "Failed to register netdev\n"); |
1486 | goto err; | 1492 | goto err; |
1487 | } | 1493 | } |
1488 | 1494 | ||
@@ -1490,14 +1496,17 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
1490 | if (ret) | 1496 | if (ret) |
1491 | goto err; | 1497 | goto err; |
1492 | 1498 | ||
1493 | xgene_enet_napi_add(pdata); | ||
1494 | mac_ops = pdata->mac_ops; | 1499 | mac_ops = pdata->mac_ops; |
1495 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) | 1500 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { |
1496 | ret = xgene_enet_mdio_config(pdata); | 1501 | ret = xgene_enet_mdio_config(pdata); |
1497 | else | 1502 | if (ret) |
1503 | goto err; | ||
1504 | } else { | ||
1498 | INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); | 1505 | INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); |
1506 | } | ||
1499 | 1507 | ||
1500 | return ret; | 1508 | xgene_enet_napi_add(pdata); |
1509 | return 0; | ||
1501 | err: | 1510 | err: |
1502 | unregister_netdev(ndev); | 1511 | unregister_netdev(ndev); |
1503 | free_netdev(ndev); | 1512 | free_netdev(ndev); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index a6e56b88c0a0..1aa72c787f8d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h | |||
@@ -155,11 +155,11 @@ struct xgene_enet_pdata { | |||
155 | enum xgene_enet_id enet_id; | 155 | enum xgene_enet_id enet_id; |
156 | struct xgene_enet_desc_ring *tx_ring; | 156 | struct xgene_enet_desc_ring *tx_ring; |
157 | struct xgene_enet_desc_ring *rx_ring; | 157 | struct xgene_enet_desc_ring *rx_ring; |
158 | u16 tx_level; | ||
159 | u16 txc_level; | ||
158 | char *dev_name; | 160 | char *dev_name; |
159 | u32 rx_buff_cnt; | 161 | u32 rx_buff_cnt; |
160 | u32 tx_qcnt_hi; | 162 | u32 tx_qcnt_hi; |
161 | u32 cp_qcnt_hi; | ||
162 | u32 cp_qcnt_low; | ||
163 | u32 rx_irq; | 163 | u32 rx_irq; |
164 | u32 txc_irq; | 164 | u32 txc_irq; |
165 | u8 cq_cnt; | 165 | u8 cq_cnt; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8af3ce3ea38..bd377a6b067d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = { | |||
1534 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1534 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1535 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), | 1535 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), |
1536 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1536 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1537 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), | ||
1538 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | ||
1537 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), | 1539 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), |
1538 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1540 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1539 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, | 1541 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, |
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index af006b44b2a6..0959e6824cb6 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #define ALX_DEV_ID_AR8161 0x1091 | 38 | #define ALX_DEV_ID_AR8161 0x1091 |
39 | #define ALX_DEV_ID_E2200 0xe091 | 39 | #define ALX_DEV_ID_E2200 0xe091 |
40 | #define ALX_DEV_ID_E2400 0xe0a1 | ||
40 | #define ALX_DEV_ID_AR8162 0x1090 | 41 | #define ALX_DEV_ID_AR8162 0x1090 |
41 | #define ALX_DEV_ID_AR8171 0x10A1 | 42 | #define ALX_DEV_ID_AR8171 0x10A1 |
42 | #define ALX_DEV_ID_AR8172 0x10A0 | 43 | #define ALX_DEV_ID_AR8172 0x10A0 |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 2795d6db10e1..8b5988e210d5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) | |||
1016 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + | 1016 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + |
1017 | 8 * 4; | 1017 | 8 * 4; |
1018 | 1018 | ||
1019 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, | 1019 | ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, |
1020 | &ring_header->dma); | 1020 | &ring_header->dma, GFP_KERNEL); |
1021 | if (unlikely(!ring_header->desc)) { | 1021 | if (unlikely(!ring_header->desc)) { |
1022 | dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); | 1022 | dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); |
1023 | goto err_nomem; | 1023 | goto err_nomem; |
1024 | } | 1024 | } |
1025 | memset(ring_header->desc, 0, ring_header->size); | ||
1026 | /* init TPD ring */ | 1025 | /* init TPD ring */ |
1027 | 1026 | ||
1028 | tpd_ring[0].dma = roundup(ring_header->dma, 8); | 1027 | tpd_ring[0].dma = roundup(ring_header->dma, 8); |
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig new file mode 100644 index 000000000000..8ba7f8ff3434 --- /dev/null +++ b/drivers/net/ethernet/aurora/Kconfig | |||
@@ -0,0 +1,21 @@ | |||
1 | config NET_VENDOR_AURORA | ||
2 | bool "Aurora VLSI devices" | ||
3 | help | ||
4 | If you have a network (Ethernet) device belonging to this class, | ||
5 | say Y. | ||
6 | |||
7 | Note that the answer to this question doesn't directly affect the | ||
8 | kernel: saying N will just cause the configurator to skip all | ||
9 | questions about Aurora devices. If you say Y, you will be asked | ||
10 | for your specific device in the following questions. | ||
11 | |||
12 | if NET_VENDOR_AURORA | ||
13 | |||
14 | config AURORA_NB8800 | ||
15 | tristate "Aurora AU-NB8800 support" | ||
16 | depends on HAS_DMA | ||
17 | select PHYLIB | ||
18 | help | ||
19 | Support for the AU-NB8800 gigabit Ethernet controller. | ||
20 | |||
21 | endif | ||
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile new file mode 100644 index 000000000000..6cb528a2fc26 --- /dev/null +++ b/drivers/net/ethernet/aurora/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_AURORA_NB8800) += nb8800.o | |||
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c new file mode 100644 index 000000000000..ecc4a334c507 --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.c | |||
@@ -0,0 +1,1552 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Mans Rullgard <mans@mansr.com> | ||
3 | * | ||
4 | * Mostly rewritten, based on driver from Sigma Designs. Original | ||
5 | * copyright notice below. | ||
6 | * | ||
7 | * | ||
8 | * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac. | ||
9 | * | ||
10 | * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/etherdevice.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/ethtool.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/of_device.h> | ||
30 | #include <linux/of_mdio.h> | ||
31 | #include <linux/of_net.h> | ||
32 | #include <linux/dma-mapping.h> | ||
33 | #include <linux/phy.h> | ||
34 | #include <linux/cache.h> | ||
35 | #include <linux/jiffies.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/iopoll.h> | ||
38 | #include <asm/barrier.h> | ||
39 | |||
40 | #include "nb8800.h" | ||
41 | |||
42 | static void nb8800_tx_done(struct net_device *dev); | ||
43 | static int nb8800_dma_stop(struct net_device *dev); | ||
44 | |||
45 | static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg) | ||
46 | { | ||
47 | return readb_relaxed(priv->base + reg); | ||
48 | } | ||
49 | |||
50 | static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg) | ||
51 | { | ||
52 | return readl_relaxed(priv->base + reg); | ||
53 | } | ||
54 | |||
55 | static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val) | ||
56 | { | ||
57 | writeb_relaxed(val, priv->base + reg); | ||
58 | } | ||
59 | |||
60 | static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val) | ||
61 | { | ||
62 | writew_relaxed(val, priv->base + reg); | ||
63 | } | ||
64 | |||
65 | static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val) | ||
66 | { | ||
67 | writel_relaxed(val, priv->base + reg); | ||
68 | } | ||
69 | |||
70 | static inline void nb8800_maskb(struct nb8800_priv *priv, int reg, | ||
71 | u32 mask, u32 val) | ||
72 | { | ||
73 | u32 old = nb8800_readb(priv, reg); | ||
74 | u32 new = (old & ~mask) | (val & mask); | ||
75 | |||
76 | if (new != old) | ||
77 | nb8800_writeb(priv, reg, new); | ||
78 | } | ||
79 | |||
80 | static inline void nb8800_maskl(struct nb8800_priv *priv, int reg, | ||
81 | u32 mask, u32 val) | ||
82 | { | ||
83 | u32 old = nb8800_readl(priv, reg); | ||
84 | u32 new = (old & ~mask) | (val & mask); | ||
85 | |||
86 | if (new != old) | ||
87 | nb8800_writel(priv, reg, new); | ||
88 | } | ||
89 | |||
90 | static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits, | ||
91 | bool set) | ||
92 | { | ||
93 | nb8800_maskb(priv, reg, bits, set ? bits : 0); | ||
94 | } | ||
95 | |||
96 | static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits) | ||
97 | { | ||
98 | nb8800_maskb(priv, reg, bits, bits); | ||
99 | } | ||
100 | |||
101 | static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits) | ||
102 | { | ||
103 | nb8800_maskb(priv, reg, bits, 0); | ||
104 | } | ||
105 | |||
106 | static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits, | ||
107 | bool set) | ||
108 | { | ||
109 | nb8800_maskl(priv, reg, bits, set ? bits : 0); | ||
110 | } | ||
111 | |||
112 | static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits) | ||
113 | { | ||
114 | nb8800_maskl(priv, reg, bits, bits); | ||
115 | } | ||
116 | |||
117 | static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits) | ||
118 | { | ||
119 | nb8800_maskl(priv, reg, bits, 0); | ||
120 | } | ||
121 | |||
122 | static int nb8800_mdio_wait(struct mii_bus *bus) | ||
123 | { | ||
124 | struct nb8800_priv *priv = bus->priv; | ||
125 | u32 val; | ||
126 | |||
127 | return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, | ||
128 | val, !(val & MDIO_CMD_GO), 1, 1000); | ||
129 | } | ||
130 | |||
131 | static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd) | ||
132 | { | ||
133 | struct nb8800_priv *priv = bus->priv; | ||
134 | int err; | ||
135 | |||
136 | err = nb8800_mdio_wait(bus); | ||
137 | if (err) | ||
138 | return err; | ||
139 | |||
140 | nb8800_writel(priv, NB8800_MDIO_CMD, cmd); | ||
141 | udelay(10); | ||
142 | nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO); | ||
143 | |||
144 | return nb8800_mdio_wait(bus); | ||
145 | } | ||
146 | |||
147 | static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
148 | { | ||
149 | struct nb8800_priv *priv = bus->priv; | ||
150 | u32 val; | ||
151 | int err; | ||
152 | |||
153 | err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg)); | ||
154 | if (err) | ||
155 | return err; | ||
156 | |||
157 | val = nb8800_readl(priv, NB8800_MDIO_STS); | ||
158 | if (val & MDIO_STS_ERR) | ||
159 | return 0xffff; | ||
160 | |||
161 | return val & 0xffff; | ||
162 | } | ||
163 | |||
164 | static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) | ||
165 | { | ||
166 | u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) | | ||
167 | MDIO_CMD_DATA(val) | MDIO_CMD_WR; | ||
168 | |||
169 | return nb8800_mdio_cmd(bus, cmd); | ||
170 | } | ||
171 | |||
172 | static void nb8800_mac_tx(struct net_device *dev, bool enable) | ||
173 | { | ||
174 | struct nb8800_priv *priv = netdev_priv(dev); | ||
175 | |||
176 | while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN) | ||
177 | cpu_relax(); | ||
178 | |||
179 | nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable); | ||
180 | } | ||
181 | |||
182 | static void nb8800_mac_rx(struct net_device *dev, bool enable) | ||
183 | { | ||
184 | nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable); | ||
185 | } | ||
186 | |||
187 | static void nb8800_mac_af(struct net_device *dev, bool enable) | ||
188 | { | ||
189 | nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable); | ||
190 | } | ||
191 | |||
192 | static void nb8800_start_rx(struct net_device *dev) | ||
193 | { | ||
194 | nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN); | ||
195 | } | ||
196 | |||
197 | static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi) | ||
198 | { | ||
199 | struct nb8800_priv *priv = netdev_priv(dev); | ||
200 | struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; | ||
201 | struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; | ||
202 | int size = L1_CACHE_ALIGN(RX_BUF_SIZE); | ||
203 | dma_addr_t dma_addr; | ||
204 | struct page *page; | ||
205 | unsigned long offset; | ||
206 | void *data; | ||
207 | |||
208 | data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size); | ||
209 | if (!data) | ||
210 | return -ENOMEM; | ||
211 | |||
212 | page = virt_to_head_page(data); | ||
213 | offset = data - page_address(page); | ||
214 | |||
215 | dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, | ||
216 | DMA_FROM_DEVICE); | ||
217 | |||
218 | if (dma_mapping_error(&dev->dev, dma_addr)) { | ||
219 | skb_free_frag(data); | ||
220 | return -ENOMEM; | ||
221 | } | ||
222 | |||
223 | rxb->page = page; | ||
224 | rxb->offset = offset; | ||
225 | rxd->desc.s_addr = dma_addr; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static void nb8800_receive(struct net_device *dev, unsigned int i, | ||
231 | unsigned int len) | ||
232 | { | ||
233 | struct nb8800_priv *priv = netdev_priv(dev); | ||
234 | struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; | ||
235 | struct page *page = priv->rx_bufs[i].page; | ||
236 | int offset = priv->rx_bufs[i].offset; | ||
237 | void *data = page_address(page) + offset; | ||
238 | dma_addr_t dma = rxd->desc.s_addr; | ||
239 | struct sk_buff *skb; | ||
240 | unsigned int size; | ||
241 | int err; | ||
242 | |||
243 | size = len <= RX_COPYBREAK ? len : RX_COPYHDR; | ||
244 | |||
245 | skb = napi_alloc_skb(&priv->napi, size); | ||
246 | if (!skb) { | ||
247 | netdev_err(dev, "rx skb allocation failed\n"); | ||
248 | dev->stats.rx_dropped++; | ||
249 | return; | ||
250 | } | ||
251 | |||
252 | if (len <= RX_COPYBREAK) { | ||
253 | dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); | ||
254 | memcpy(skb_put(skb, len), data, len); | ||
255 | dma_sync_single_for_device(&dev->dev, dma, len, | ||
256 | DMA_FROM_DEVICE); | ||
257 | } else { | ||
258 | err = nb8800_alloc_rx(dev, i, true); | ||
259 | if (err) { | ||
260 | netdev_err(dev, "rx buffer allocation failed\n"); | ||
261 | dev->stats.rx_dropped++; | ||
262 | return; | ||
263 | } | ||
264 | |||
265 | dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
266 | memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR); | ||
267 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
268 | offset + RX_COPYHDR, len - RX_COPYHDR, | ||
269 | RX_BUF_SIZE); | ||
270 | } | ||
271 | |||
272 | skb->protocol = eth_type_trans(skb, dev); | ||
273 | napi_gro_receive(&priv->napi, skb); | ||
274 | } | ||
275 | |||
276 | static void nb8800_rx_error(struct net_device *dev, u32 report) | ||
277 | { | ||
278 | if (report & RX_LENGTH_ERR) | ||
279 | dev->stats.rx_length_errors++; | ||
280 | |||
281 | if (report & RX_FCS_ERR) | ||
282 | dev->stats.rx_crc_errors++; | ||
283 | |||
284 | if (report & RX_FIFO_OVERRUN) | ||
285 | dev->stats.rx_fifo_errors++; | ||
286 | |||
287 | if (report & RX_ALIGNMENT_ERROR) | ||
288 | dev->stats.rx_frame_errors++; | ||
289 | |||
290 | dev->stats.rx_errors++; | ||
291 | } | ||
292 | |||
293 | static int nb8800_poll(struct napi_struct *napi, int budget) | ||
294 | { | ||
295 | struct net_device *dev = napi->dev; | ||
296 | struct nb8800_priv *priv = netdev_priv(dev); | ||
297 | struct nb8800_rx_desc *rxd; | ||
298 | unsigned int last = priv->rx_eoc; | ||
299 | unsigned int next; | ||
300 | int work = 0; | ||
301 | |||
302 | nb8800_tx_done(dev); | ||
303 | |||
304 | again: | ||
305 | while (work < budget) { | ||
306 | struct nb8800_rx_buf *rxb; | ||
307 | unsigned int len; | ||
308 | |||
309 | next = (last + 1) % RX_DESC_COUNT; | ||
310 | |||
311 | rxb = &priv->rx_bufs[next]; | ||
312 | rxd = &priv->rx_descs[next]; | ||
313 | |||
314 | if (!rxd->report) | ||
315 | break; | ||
316 | |||
317 | len = RX_BYTES_TRANSFERRED(rxd->report); | ||
318 | |||
319 | if (IS_RX_ERROR(rxd->report)) | ||
320 | nb8800_rx_error(dev, rxd->report); | ||
321 | else | ||
322 | nb8800_receive(dev, next, len); | ||
323 | |||
324 | dev->stats.rx_packets++; | ||
325 | dev->stats.rx_bytes += len; | ||
326 | |||
327 | if (rxd->report & RX_MULTICAST_PKT) | ||
328 | dev->stats.multicast++; | ||
329 | |||
330 | rxd->report = 0; | ||
331 | last = next; | ||
332 | work++; | ||
333 | } | ||
334 | |||
335 | if (work) { | ||
336 | priv->rx_descs[last].desc.config |= DESC_EOC; | ||
337 | wmb(); /* ensure new EOC is written before clearing old */ | ||
338 | priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; | ||
339 | priv->rx_eoc = last; | ||
340 | nb8800_start_rx(dev); | ||
341 | } | ||
342 | |||
343 | if (work < budget) { | ||
344 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); | ||
345 | |||
346 | /* If a packet arrived after we last checked but | ||
347 | * before writing RX_ITR, the interrupt will be | ||
348 | * delayed, so we retrieve it now. | ||
349 | */ | ||
350 | if (priv->rx_descs[next].report) | ||
351 | goto again; | ||
352 | |||
353 | napi_complete_done(napi, work); | ||
354 | } | ||
355 | |||
356 | return work; | ||
357 | } | ||
358 | |||
359 | static void __nb8800_tx_dma_start(struct net_device *dev) | ||
360 | { | ||
361 | struct nb8800_priv *priv = netdev_priv(dev); | ||
362 | struct nb8800_tx_buf *txb; | ||
363 | u32 txc_cr; | ||
364 | |||
365 | txb = &priv->tx_bufs[priv->tx_queue]; | ||
366 | if (!txb->ready) | ||
367 | return; | ||
368 | |||
369 | txc_cr = nb8800_readl(priv, NB8800_TXC_CR); | ||
370 | if (txc_cr & TCR_EN) | ||
371 | return; | ||
372 | |||
373 | nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); | ||
374 | wmb(); /* ensure desc addr is written before starting DMA */ | ||
375 | nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN); | ||
376 | |||
377 | priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; | ||
378 | } | ||
379 | |||
380 | static void nb8800_tx_dma_start(struct net_device *dev) | ||
381 | { | ||
382 | struct nb8800_priv *priv = netdev_priv(dev); | ||
383 | |||
384 | spin_lock_irq(&priv->tx_lock); | ||
385 | __nb8800_tx_dma_start(dev); | ||
386 | spin_unlock_irq(&priv->tx_lock); | ||
387 | } | ||
388 | |||
389 | static void nb8800_tx_dma_start_irq(struct net_device *dev) | ||
390 | { | ||
391 | struct nb8800_priv *priv = netdev_priv(dev); | ||
392 | |||
393 | spin_lock(&priv->tx_lock); | ||
394 | __nb8800_tx_dma_start(dev); | ||
395 | spin_unlock(&priv->tx_lock); | ||
396 | } | ||
397 | |||
398 | static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) | ||
399 | { | ||
400 | struct nb8800_priv *priv = netdev_priv(dev); | ||
401 | struct nb8800_tx_desc *txd; | ||
402 | struct nb8800_tx_buf *txb; | ||
403 | struct nb8800_dma_desc *desc; | ||
404 | dma_addr_t dma_addr; | ||
405 | unsigned int dma_len; | ||
406 | unsigned int align; | ||
407 | unsigned int next; | ||
408 | |||
409 | if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { | ||
410 | netif_stop_queue(dev); | ||
411 | return NETDEV_TX_BUSY; | ||
412 | } | ||
413 | |||
414 | align = (8 - (uintptr_t)skb->data) & 7; | ||
415 | |||
416 | dma_len = skb->len - align; | ||
417 | dma_addr = dma_map_single(&dev->dev, skb->data + align, | ||
418 | dma_len, DMA_TO_DEVICE); | ||
419 | |||
420 | if (dma_mapping_error(&dev->dev, dma_addr)) { | ||
421 | netdev_err(dev, "tx dma mapping error\n"); | ||
422 | kfree_skb(skb); | ||
423 | dev->stats.tx_dropped++; | ||
424 | return NETDEV_TX_OK; | ||
425 | } | ||
426 | |||
427 | if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { | ||
428 | netif_stop_queue(dev); | ||
429 | skb->xmit_more = 0; | ||
430 | } | ||
431 | |||
432 | next = priv->tx_next; | ||
433 | txb = &priv->tx_bufs[next]; | ||
434 | txd = &priv->tx_descs[next]; | ||
435 | desc = &txd->desc[0]; | ||
436 | |||
437 | next = (next + 1) % TX_DESC_COUNT; | ||
438 | |||
439 | if (align) { | ||
440 | memcpy(txd->buf, skb->data, align); | ||
441 | |||
442 | desc->s_addr = | ||
443 | txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); | ||
444 | desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); | ||
445 | desc->config = DESC_BTS(2) | DESC_DS | align; | ||
446 | |||
447 | desc++; | ||
448 | } | ||
449 | |||
450 | desc->s_addr = dma_addr; | ||
451 | desc->n_addr = priv->tx_bufs[next].dma_desc; | ||
452 | desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; | ||
453 | |||
454 | if (!skb->xmit_more) | ||
455 | desc->config |= DESC_EOC; | ||
456 | |||
457 | txb->skb = skb; | ||
458 | txb->dma_addr = dma_addr; | ||
459 | txb->dma_len = dma_len; | ||
460 | |||
461 | if (!priv->tx_chain) { | ||
462 | txb->chain_len = 1; | ||
463 | priv->tx_chain = txb; | ||
464 | } else { | ||
465 | priv->tx_chain->chain_len++; | ||
466 | } | ||
467 | |||
468 | netdev_sent_queue(dev, skb->len); | ||
469 | |||
470 | priv->tx_next = next; | ||
471 | |||
472 | if (!skb->xmit_more) { | ||
473 | smp_wmb(); | ||
474 | priv->tx_chain->ready = true; | ||
475 | priv->tx_chain = NULL; | ||
476 | nb8800_tx_dma_start(dev); | ||
477 | } | ||
478 | |||
479 | return NETDEV_TX_OK; | ||
480 | } | ||
481 | |||
482 | static void nb8800_tx_error(struct net_device *dev, u32 report) | ||
483 | { | ||
484 | if (report & TX_LATE_COLLISION) | ||
485 | dev->stats.collisions++; | ||
486 | |||
487 | if (report & TX_PACKET_DROPPED) | ||
488 | dev->stats.tx_dropped++; | ||
489 | |||
490 | if (report & TX_FIFO_UNDERRUN) | ||
491 | dev->stats.tx_fifo_errors++; | ||
492 | |||
493 | dev->stats.tx_errors++; | ||
494 | } | ||
495 | |||
496 | static void nb8800_tx_done(struct net_device *dev) | ||
497 | { | ||
498 | struct nb8800_priv *priv = netdev_priv(dev); | ||
499 | unsigned int limit = priv->tx_next; | ||
500 | unsigned int done = priv->tx_done; | ||
501 | unsigned int packets = 0; | ||
502 | unsigned int len = 0; | ||
503 | |||
504 | while (done != limit) { | ||
505 | struct nb8800_tx_desc *txd = &priv->tx_descs[done]; | ||
506 | struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; | ||
507 | struct sk_buff *skb; | ||
508 | |||
509 | if (!txd->report) | ||
510 | break; | ||
511 | |||
512 | skb = txb->skb; | ||
513 | len += skb->len; | ||
514 | |||
515 | dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, | ||
516 | DMA_TO_DEVICE); | ||
517 | |||
518 | if (IS_TX_ERROR(txd->report)) { | ||
519 | nb8800_tx_error(dev, txd->report); | ||
520 | kfree_skb(skb); | ||
521 | } else { | ||
522 | consume_skb(skb); | ||
523 | } | ||
524 | |||
525 | dev->stats.tx_packets++; | ||
526 | dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); | ||
527 | dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); | ||
528 | |||
529 | txb->skb = NULL; | ||
530 | txb->ready = false; | ||
531 | txd->report = 0; | ||
532 | |||
533 | done = (done + 1) % TX_DESC_COUNT; | ||
534 | packets++; | ||
535 | } | ||
536 | |||
537 | if (packets) { | ||
538 | smp_mb__before_atomic(); | ||
539 | atomic_add(packets, &priv->tx_free); | ||
540 | netdev_completed_queue(dev, packets, len); | ||
541 | netif_wake_queue(dev); | ||
542 | priv->tx_done = done; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | static irqreturn_t nb8800_irq(int irq, void *dev_id) | ||
547 | { | ||
548 | struct net_device *dev = dev_id; | ||
549 | struct nb8800_priv *priv = netdev_priv(dev); | ||
550 | irqreturn_t ret = IRQ_NONE; | ||
551 | u32 val; | ||
552 | |||
553 | /* tx interrupt */ | ||
554 | val = nb8800_readl(priv, NB8800_TXC_SR); | ||
555 | if (val) { | ||
556 | nb8800_writel(priv, NB8800_TXC_SR, val); | ||
557 | |||
558 | if (val & TSR_DI) | ||
559 | nb8800_tx_dma_start_irq(dev); | ||
560 | |||
561 | if (val & TSR_TI) | ||
562 | napi_schedule_irqoff(&priv->napi); | ||
563 | |||
564 | if (unlikely(val & TSR_DE)) | ||
565 | netdev_err(dev, "TX DMA error\n"); | ||
566 | |||
567 | /* should never happen with automatic status retrieval */ | ||
568 | if (unlikely(val & TSR_TO)) | ||
569 | netdev_err(dev, "TX Status FIFO overflow\n"); | ||
570 | |||
571 | ret = IRQ_HANDLED; | ||
572 | } | ||
573 | |||
574 | /* rx interrupt */ | ||
575 | val = nb8800_readl(priv, NB8800_RXC_SR); | ||
576 | if (val) { | ||
577 | nb8800_writel(priv, NB8800_RXC_SR, val); | ||
578 | |||
579 | if (likely(val & (RSR_RI | RSR_DI))) { | ||
580 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); | ||
581 | napi_schedule_irqoff(&priv->napi); | ||
582 | } | ||
583 | |||
584 | if (unlikely(val & RSR_DE)) | ||
585 | netdev_err(dev, "RX DMA error\n"); | ||
586 | |||
587 | /* should never happen with automatic status retrieval */ | ||
588 | if (unlikely(val & RSR_RO)) | ||
589 | netdev_err(dev, "RX Status FIFO overflow\n"); | ||
590 | |||
591 | ret = IRQ_HANDLED; | ||
592 | } | ||
593 | |||
594 | return ret; | ||
595 | } | ||
596 | |||
597 | static void nb8800_mac_config(struct net_device *dev) | ||
598 | { | ||
599 | struct nb8800_priv *priv = netdev_priv(dev); | ||
600 | bool gigabit = priv->speed == SPEED_1000; | ||
601 | u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE; | ||
602 | u32 mac_mode = 0; | ||
603 | u32 slot_time; | ||
604 | u32 phy_clk; | ||
605 | u32 ict; | ||
606 | |||
607 | if (!priv->duplex) | ||
608 | mac_mode |= HALF_DUPLEX; | ||
609 | |||
610 | if (gigabit) { | ||
611 | if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) | ||
612 | mac_mode |= RGMII_MODE; | ||
613 | |||
614 | mac_mode |= GMAC_MODE; | ||
615 | phy_clk = 125000000; | ||
616 | |||
617 | /* Should be 512 but register is only 8 bits */ | ||
618 | slot_time = 255; | ||
619 | } else { | ||
620 | phy_clk = 25000000; | ||
621 | slot_time = 128; | ||
622 | } | ||
623 | |||
624 | ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); | ||
625 | |||
626 | nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict); | ||
627 | nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time); | ||
628 | nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode); | ||
629 | } | ||
630 | |||
631 | static void nb8800_pause_config(struct net_device *dev) | ||
632 | { | ||
633 | struct nb8800_priv *priv = netdev_priv(dev); | ||
634 | struct phy_device *phydev = priv->phydev; | ||
635 | u32 rxcr; | ||
636 | |||
637 | if (priv->pause_aneg) { | ||
638 | if (!phydev || !phydev->link) | ||
639 | return; | ||
640 | |||
641 | priv->pause_rx = phydev->pause; | ||
642 | priv->pause_tx = phydev->pause ^ phydev->asym_pause; | ||
643 | } | ||
644 | |||
645 | nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); | ||
646 | |||
647 | rxcr = nb8800_readl(priv, NB8800_RXC_CR); | ||
648 | if (!!(rxcr & RCR_FL) == priv->pause_tx) | ||
649 | return; | ||
650 | |||
651 | if (netif_running(dev)) { | ||
652 | napi_disable(&priv->napi); | ||
653 | netif_tx_lock_bh(dev); | ||
654 | nb8800_dma_stop(dev); | ||
655 | nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); | ||
656 | nb8800_start_rx(dev); | ||
657 | netif_tx_unlock_bh(dev); | ||
658 | napi_enable(&priv->napi); | ||
659 | } else { | ||
660 | nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static void nb8800_link_reconfigure(struct net_device *dev) | ||
665 | { | ||
666 | struct nb8800_priv *priv = netdev_priv(dev); | ||
667 | struct phy_device *phydev = priv->phydev; | ||
668 | int change = 0; | ||
669 | |||
670 | if (phydev->link) { | ||
671 | if (phydev->speed != priv->speed) { | ||
672 | priv->speed = phydev->speed; | ||
673 | change = 1; | ||
674 | } | ||
675 | |||
676 | if (phydev->duplex != priv->duplex) { | ||
677 | priv->duplex = phydev->duplex; | ||
678 | change = 1; | ||
679 | } | ||
680 | |||
681 | if (change) | ||
682 | nb8800_mac_config(dev); | ||
683 | |||
684 | nb8800_pause_config(dev); | ||
685 | } | ||
686 | |||
687 | if (phydev->link != priv->link) { | ||
688 | priv->link = phydev->link; | ||
689 | change = 1; | ||
690 | } | ||
691 | |||
692 | if (change) | ||
693 | phy_print_status(priv->phydev); | ||
694 | } | ||
695 | |||
696 | static void nb8800_update_mac_addr(struct net_device *dev) | ||
697 | { | ||
698 | struct nb8800_priv *priv = netdev_priv(dev); | ||
699 | int i; | ||
700 | |||
701 | for (i = 0; i < ETH_ALEN; i++) | ||
702 | nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); | ||
703 | |||
704 | for (i = 0; i < ETH_ALEN; i++) | ||
705 | nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); | ||
706 | } | ||
707 | |||
708 | static int nb8800_set_mac_address(struct net_device *dev, void *addr) | ||
709 | { | ||
710 | struct sockaddr *sock = addr; | ||
711 | |||
712 | if (netif_running(dev)) | ||
713 | return -EBUSY; | ||
714 | |||
715 | ether_addr_copy(dev->dev_addr, sock->sa_data); | ||
716 | nb8800_update_mac_addr(dev); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static void nb8800_mc_init(struct net_device *dev, int val) | ||
722 | { | ||
723 | struct nb8800_priv *priv = netdev_priv(dev); | ||
724 | |||
725 | nb8800_writeb(priv, NB8800_MC_INIT, val); | ||
726 | readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, | ||
727 | 1, 1000); | ||
728 | } | ||
729 | |||
730 | static void nb8800_set_rx_mode(struct net_device *dev) | ||
731 | { | ||
732 | struct nb8800_priv *priv = netdev_priv(dev); | ||
733 | struct netdev_hw_addr *ha; | ||
734 | int i; | ||
735 | |||
736 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { | ||
737 | nb8800_mac_af(dev, false); | ||
738 | return; | ||
739 | } | ||
740 | |||
741 | nb8800_mac_af(dev, true); | ||
742 | nb8800_mc_init(dev, 0); | ||
743 | |||
744 | netdev_for_each_mc_addr(ha, dev) { | ||
745 | for (i = 0; i < ETH_ALEN; i++) | ||
746 | nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); | ||
747 | |||
748 | nb8800_mc_init(dev, 0xff); | ||
749 | } | ||
750 | } | ||
751 | |||
752 | #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc)) | ||
753 | #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc)) | ||
754 | |||
755 | static void nb8800_dma_free(struct net_device *dev) | ||
756 | { | ||
757 | struct nb8800_priv *priv = netdev_priv(dev); | ||
758 | unsigned int i; | ||
759 | |||
760 | if (priv->rx_bufs) { | ||
761 | for (i = 0; i < RX_DESC_COUNT; i++) | ||
762 | if (priv->rx_bufs[i].page) | ||
763 | put_page(priv->rx_bufs[i].page); | ||
764 | |||
765 | kfree(priv->rx_bufs); | ||
766 | priv->rx_bufs = NULL; | ||
767 | } | ||
768 | |||
769 | if (priv->tx_bufs) { | ||
770 | for (i = 0; i < TX_DESC_COUNT; i++) | ||
771 | kfree_skb(priv->tx_bufs[i].skb); | ||
772 | |||
773 | kfree(priv->tx_bufs); | ||
774 | priv->tx_bufs = NULL; | ||
775 | } | ||
776 | |||
777 | if (priv->rx_descs) { | ||
778 | dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, | ||
779 | priv->rx_desc_dma); | ||
780 | priv->rx_descs = NULL; | ||
781 | } | ||
782 | |||
783 | if (priv->tx_descs) { | ||
784 | dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, | ||
785 | priv->tx_desc_dma); | ||
786 | priv->tx_descs = NULL; | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static void nb8800_dma_reset(struct net_device *dev) | ||
791 | { | ||
792 | struct nb8800_priv *priv = netdev_priv(dev); | ||
793 | struct nb8800_rx_desc *rxd; | ||
794 | struct nb8800_tx_desc *txd; | ||
795 | unsigned int i; | ||
796 | |||
797 | for (i = 0; i < RX_DESC_COUNT; i++) { | ||
798 | dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); | ||
799 | |||
800 | rxd = &priv->rx_descs[i]; | ||
801 | rxd->desc.n_addr = rx_dma + sizeof(*rxd); | ||
802 | rxd->desc.r_addr = | ||
803 | rx_dma + offsetof(struct nb8800_rx_desc, report); | ||
804 | rxd->desc.config = priv->rx_dma_config; | ||
805 | rxd->report = 0; | ||
806 | } | ||
807 | |||
808 | rxd->desc.n_addr = priv->rx_desc_dma; | ||
809 | rxd->desc.config |= DESC_EOC; | ||
810 | |||
811 | priv->rx_eoc = RX_DESC_COUNT - 1; | ||
812 | |||
813 | for (i = 0; i < TX_DESC_COUNT; i++) { | ||
814 | struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; | ||
815 | dma_addr_t r_dma = txb->dma_desc + | ||
816 | offsetof(struct nb8800_tx_desc, report); | ||
817 | |||
818 | txd = &priv->tx_descs[i]; | ||
819 | txd->desc[0].r_addr = r_dma; | ||
820 | txd->desc[1].r_addr = r_dma; | ||
821 | txd->report = 0; | ||
822 | } | ||
823 | |||
824 | priv->tx_next = 0; | ||
825 | priv->tx_queue = 0; | ||
826 | priv->tx_done = 0; | ||
827 | atomic_set(&priv->tx_free, TX_DESC_COUNT); | ||
828 | |||
829 | nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); | ||
830 | |||
831 | wmb(); /* ensure all setup is written before starting */ | ||
832 | } | ||
833 | |||
834 | static int nb8800_dma_init(struct net_device *dev) | ||
835 | { | ||
836 | struct nb8800_priv *priv = netdev_priv(dev); | ||
837 | unsigned int n_rx = RX_DESC_COUNT; | ||
838 | unsigned int n_tx = TX_DESC_COUNT; | ||
839 | unsigned int i; | ||
840 | int err; | ||
841 | |||
842 | priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, | ||
843 | &priv->rx_desc_dma, GFP_KERNEL); | ||
844 | if (!priv->rx_descs) | ||
845 | goto err_out; | ||
846 | |||
847 | priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); | ||
848 | if (!priv->rx_bufs) | ||
849 | goto err_out; | ||
850 | |||
851 | for (i = 0; i < n_rx; i++) { | ||
852 | err = nb8800_alloc_rx(dev, i, false); | ||
853 | if (err) | ||
854 | goto err_out; | ||
855 | } | ||
856 | |||
857 | priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, | ||
858 | &priv->tx_desc_dma, GFP_KERNEL); | ||
859 | if (!priv->tx_descs) | ||
860 | goto err_out; | ||
861 | |||
862 | priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); | ||
863 | if (!priv->tx_bufs) | ||
864 | goto err_out; | ||
865 | |||
866 | for (i = 0; i < n_tx; i++) | ||
867 | priv->tx_bufs[i].dma_desc = | ||
868 | priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); | ||
869 | |||
870 | nb8800_dma_reset(dev); | ||
871 | |||
872 | return 0; | ||
873 | |||
874 | err_out: | ||
875 | nb8800_dma_free(dev); | ||
876 | |||
877 | return -ENOMEM; | ||
878 | } | ||
879 | |||
880 | static int nb8800_dma_stop(struct net_device *dev) | ||
881 | { | ||
882 | struct nb8800_priv *priv = netdev_priv(dev); | ||
883 | struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; | ||
884 | struct nb8800_tx_desc *txd = &priv->tx_descs[0]; | ||
885 | int retry = 5; | ||
886 | u32 txcr; | ||
887 | u32 rxcr; | ||
888 | int err; | ||
889 | unsigned int i; | ||
890 | |||
891 | /* wait for tx to finish */ | ||
892 | err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, | ||
893 | !(txcr & TCR_EN) && | ||
894 | priv->tx_done == priv->tx_next, | ||
895 | 1000, 1000000); | ||
896 | if (err) | ||
897 | return err; | ||
898 | |||
899 | /* The rx DMA only stops if it reaches the end of chain. | ||
900 | * To make this happen, we set the EOC flag on all rx | ||
901 | * descriptors, put the device in loopback mode, and send | ||
902 | * a few dummy frames. The interrupt handler will ignore | ||
903 | * these since NAPI is disabled and no real frames are in | ||
904 | * the tx queue. | ||
905 | */ | ||
906 | |||
907 | for (i = 0; i < RX_DESC_COUNT; i++) | ||
908 | priv->rx_descs[i].desc.config |= DESC_EOC; | ||
909 | |||
910 | txd->desc[0].s_addr = | ||
911 | txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); | ||
912 | txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; | ||
913 | memset(txd->buf, 0, sizeof(txd->buf)); | ||
914 | |||
915 | nb8800_mac_af(dev, false); | ||
916 | nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN); | ||
917 | |||
918 | do { | ||
919 | nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); | ||
920 | wmb(); | ||
921 | nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN); | ||
922 | |||
923 | err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, | ||
924 | rxcr, !(rxcr & RCR_EN), | ||
925 | 1000, 100000); | ||
926 | } while (err && --retry); | ||
927 | |||
928 | nb8800_mac_af(dev, true); | ||
929 | nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN); | ||
930 | nb8800_dma_reset(dev); | ||
931 | |||
932 | return retry ? 0 : -ETIMEDOUT; | ||
933 | } | ||
934 | |||
935 | static void nb8800_pause_adv(struct net_device *dev) | ||
936 | { | ||
937 | struct nb8800_priv *priv = netdev_priv(dev); | ||
938 | u32 adv = 0; | ||
939 | |||
940 | if (!priv->phydev) | ||
941 | return; | ||
942 | |||
943 | if (priv->pause_rx) | ||
944 | adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; | ||
945 | if (priv->pause_tx) | ||
946 | adv ^= ADVERTISED_Asym_Pause; | ||
947 | |||
948 | priv->phydev->supported |= adv; | ||
949 | priv->phydev->advertising |= adv; | ||
950 | } | ||
951 | |||
952 | static int nb8800_open(struct net_device *dev) | ||
953 | { | ||
954 | struct nb8800_priv *priv = netdev_priv(dev); | ||
955 | int err; | ||
956 | |||
957 | /* clear any pending interrupts */ | ||
958 | nb8800_writel(priv, NB8800_RXC_SR, 0xf); | ||
959 | nb8800_writel(priv, NB8800_TXC_SR, 0xf); | ||
960 | |||
961 | err = nb8800_dma_init(dev); | ||
962 | if (err) | ||
963 | return err; | ||
964 | |||
965 | err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); | ||
966 | if (err) | ||
967 | goto err_free_dma; | ||
968 | |||
969 | nb8800_mac_rx(dev, true); | ||
970 | nb8800_mac_tx(dev, true); | ||
971 | |||
972 | priv->phydev = of_phy_connect(dev, priv->phy_node, | ||
973 | nb8800_link_reconfigure, 0, | ||
974 | priv->phy_mode); | ||
975 | if (!priv->phydev) | ||
976 | goto err_free_irq; | ||
977 | |||
978 | nb8800_pause_adv(dev); | ||
979 | |||
980 | netdev_reset_queue(dev); | ||
981 | napi_enable(&priv->napi); | ||
982 | netif_start_queue(dev); | ||
983 | |||
984 | nb8800_start_rx(dev); | ||
985 | phy_start(priv->phydev); | ||
986 | |||
987 | return 0; | ||
988 | |||
989 | err_free_irq: | ||
990 | free_irq(dev->irq, dev); | ||
991 | err_free_dma: | ||
992 | nb8800_dma_free(dev); | ||
993 | |||
994 | return err; | ||
995 | } | ||
996 | |||
997 | static int nb8800_stop(struct net_device *dev) | ||
998 | { | ||
999 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1000 | |||
1001 | phy_stop(priv->phydev); | ||
1002 | |||
1003 | netif_stop_queue(dev); | ||
1004 | napi_disable(&priv->napi); | ||
1005 | |||
1006 | nb8800_dma_stop(dev); | ||
1007 | nb8800_mac_rx(dev, false); | ||
1008 | nb8800_mac_tx(dev, false); | ||
1009 | |||
1010 | phy_disconnect(priv->phydev); | ||
1011 | priv->phydev = NULL; | ||
1012 | |||
1013 | free_irq(dev->irq, dev); | ||
1014 | |||
1015 | nb8800_dma_free(dev); | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1021 | { | ||
1022 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1023 | |||
1024 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
1025 | } | ||
1026 | |||
1027 | static const struct net_device_ops nb8800_netdev_ops = { | ||
1028 | .ndo_open = nb8800_open, | ||
1029 | .ndo_stop = nb8800_stop, | ||
1030 | .ndo_start_xmit = nb8800_xmit, | ||
1031 | .ndo_set_mac_address = nb8800_set_mac_address, | ||
1032 | .ndo_set_rx_mode = nb8800_set_rx_mode, | ||
1033 | .ndo_do_ioctl = nb8800_ioctl, | ||
1034 | .ndo_change_mtu = eth_change_mtu, | ||
1035 | .ndo_validate_addr = eth_validate_addr, | ||
1036 | }; | ||
1037 | |||
1038 | static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1039 | { | ||
1040 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1041 | |||
1042 | if (!priv->phydev) | ||
1043 | return -ENODEV; | ||
1044 | |||
1045 | return phy_ethtool_gset(priv->phydev, cmd); | ||
1046 | } | ||
1047 | |||
1048 | static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1049 | { | ||
1050 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1051 | |||
1052 | if (!priv->phydev) | ||
1053 | return -ENODEV; | ||
1054 | |||
1055 | return phy_ethtool_sset(priv->phydev, cmd); | ||
1056 | } | ||
1057 | |||
1058 | static int nb8800_nway_reset(struct net_device *dev) | ||
1059 | { | ||
1060 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1061 | |||
1062 | if (!priv->phydev) | ||
1063 | return -ENODEV; | ||
1064 | |||
1065 | return genphy_restart_aneg(priv->phydev); | ||
1066 | } | ||
1067 | |||
1068 | static void nb8800_get_pauseparam(struct net_device *dev, | ||
1069 | struct ethtool_pauseparam *pp) | ||
1070 | { | ||
1071 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1072 | |||
1073 | pp->autoneg = priv->pause_aneg; | ||
1074 | pp->rx_pause = priv->pause_rx; | ||
1075 | pp->tx_pause = priv->pause_tx; | ||
1076 | } | ||
1077 | |||
1078 | static int nb8800_set_pauseparam(struct net_device *dev, | ||
1079 | struct ethtool_pauseparam *pp) | ||
1080 | { | ||
1081 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1082 | |||
1083 | priv->pause_aneg = pp->autoneg; | ||
1084 | priv->pause_rx = pp->rx_pause; | ||
1085 | priv->pause_tx = pp->tx_pause; | ||
1086 | |||
1087 | nb8800_pause_adv(dev); | ||
1088 | |||
1089 | if (!priv->pause_aneg) | ||
1090 | nb8800_pause_config(dev); | ||
1091 | else if (priv->phydev) | ||
1092 | phy_start_aneg(priv->phydev); | ||
1093 | |||
1094 | return 0; | ||
1095 | } | ||
1096 | |||
1097 | static const char nb8800_stats_names[][ETH_GSTRING_LEN] = { | ||
1098 | "rx_bytes_ok", | ||
1099 | "rx_frames_ok", | ||
1100 | "rx_undersize_frames", | ||
1101 | "rx_fragment_frames", | ||
1102 | "rx_64_byte_frames", | ||
1103 | "rx_127_byte_frames", | ||
1104 | "rx_255_byte_frames", | ||
1105 | "rx_511_byte_frames", | ||
1106 | "rx_1023_byte_frames", | ||
1107 | "rx_max_size_frames", | ||
1108 | "rx_oversize_frames", | ||
1109 | "rx_bad_fcs_frames", | ||
1110 | "rx_broadcast_frames", | ||
1111 | "rx_multicast_frames", | ||
1112 | "rx_control_frames", | ||
1113 | "rx_pause_frames", | ||
1114 | "rx_unsup_control_frames", | ||
1115 | "rx_align_error_frames", | ||
1116 | "rx_overrun_frames", | ||
1117 | "rx_jabber_frames", | ||
1118 | "rx_bytes", | ||
1119 | "rx_frames", | ||
1120 | |||
1121 | "tx_bytes_ok", | ||
1122 | "tx_frames_ok", | ||
1123 | "tx_64_byte_frames", | ||
1124 | "tx_127_byte_frames", | ||
1125 | "tx_255_byte_frames", | ||
1126 | "tx_511_byte_frames", | ||
1127 | "tx_1023_byte_frames", | ||
1128 | "tx_max_size_frames", | ||
1129 | "tx_oversize_frames", | ||
1130 | "tx_broadcast_frames", | ||
1131 | "tx_multicast_frames", | ||
1132 | "tx_control_frames", | ||
1133 | "tx_pause_frames", | ||
1134 | "tx_underrun_frames", | ||
1135 | "tx_single_collision_frames", | ||
1136 | "tx_multi_collision_frames", | ||
1137 | "tx_deferred_collision_frames", | ||
1138 | "tx_late_collision_frames", | ||
1139 | "tx_excessive_collision_frames", | ||
1140 | "tx_bytes", | ||
1141 | "tx_frames", | ||
1142 | "tx_collisions", | ||
1143 | }; | ||
1144 | |||
1145 | #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names) | ||
1146 | |||
1147 | static int nb8800_get_sset_count(struct net_device *dev, int sset) | ||
1148 | { | ||
1149 | if (sset == ETH_SS_STATS) | ||
1150 | return NB8800_NUM_STATS; | ||
1151 | |||
1152 | return -EOPNOTSUPP; | ||
1153 | } | ||
1154 | |||
1155 | static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf) | ||
1156 | { | ||
1157 | if (sset == ETH_SS_STATS) | ||
1158 | memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names)); | ||
1159 | } | ||
1160 | |||
1161 | static u32 nb8800_read_stat(struct net_device *dev, int index) | ||
1162 | { | ||
1163 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1164 | |||
1165 | nb8800_writeb(priv, NB8800_STAT_INDEX, index); | ||
1166 | |||
1167 | return nb8800_readl(priv, NB8800_STAT_DATA); | ||
1168 | } | ||
1169 | |||
1170 | static void nb8800_get_ethtool_stats(struct net_device *dev, | ||
1171 | struct ethtool_stats *estats, u64 *st) | ||
1172 | { | ||
1173 | unsigned int i; | ||
1174 | u32 rx, tx; | ||
1175 | |||
1176 | for (i = 0; i < NB8800_NUM_STATS / 2; i++) { | ||
1177 | rx = nb8800_read_stat(dev, i); | ||
1178 | tx = nb8800_read_stat(dev, i | 0x80); | ||
1179 | st[i] = rx; | ||
1180 | st[i + NB8800_NUM_STATS / 2] = tx; | ||
1181 | } | ||
1182 | } | ||
1183 | |||
1184 | static const struct ethtool_ops nb8800_ethtool_ops = { | ||
1185 | .get_settings = nb8800_get_settings, | ||
1186 | .set_settings = nb8800_set_settings, | ||
1187 | .nway_reset = nb8800_nway_reset, | ||
1188 | .get_link = ethtool_op_get_link, | ||
1189 | .get_pauseparam = nb8800_get_pauseparam, | ||
1190 | .set_pauseparam = nb8800_set_pauseparam, | ||
1191 | .get_sset_count = nb8800_get_sset_count, | ||
1192 | .get_strings = nb8800_get_strings, | ||
1193 | .get_ethtool_stats = nb8800_get_ethtool_stats, | ||
1194 | }; | ||
1195 | |||
1196 | static int nb8800_hw_init(struct net_device *dev) | ||
1197 | { | ||
1198 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1199 | u32 val; | ||
1200 | |||
1201 | val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS; | ||
1202 | nb8800_writeb(priv, NB8800_TX_CTL1, val); | ||
1203 | |||
1204 | /* Collision retry count */ | ||
1205 | nb8800_writeb(priv, NB8800_TX_CTL2, 5); | ||
1206 | |||
1207 | val = RX_PAD_STRIP | RX_AF_EN; | ||
1208 | nb8800_writeb(priv, NB8800_RX_CTL, val); | ||
1209 | |||
1210 | /* Chosen by fair dice roll */ | ||
1211 | nb8800_writeb(priv, NB8800_RANDOM_SEED, 4); | ||
1212 | |||
1213 | /* TX cycles per deferral period */ | ||
1214 | nb8800_writeb(priv, NB8800_TX_SDP, 12); | ||
1215 | |||
1216 | /* The following three threshold values have been | ||
1217 | * experimentally determined for good results. | ||
1218 | */ | ||
1219 | |||
1220 | /* RX/TX FIFO threshold for partial empty (64-bit entries) */ | ||
1221 | nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0); | ||
1222 | |||
1223 | /* RX/TX FIFO threshold for partial full (64-bit entries) */ | ||
1224 | nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255); | ||
1225 | |||
1226 | /* Buffer size for transmit (64-bit entries) */ | ||
1227 | nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64); | ||
1228 | |||
1229 | /* Configure tx DMA */ | ||
1230 | |||
1231 | val = nb8800_readl(priv, NB8800_TXC_CR); | ||
1232 | val &= TCR_LE; /* keep endian setting */ | ||
1233 | val |= TCR_DM; /* DMA descriptor mode */ | ||
1234 | val |= TCR_RS; /* automatically store tx status */ | ||
1235 | val |= TCR_DIE; /* interrupt on DMA chain completion */ | ||
1236 | val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */ | ||
1237 | val |= TCR_BTS(2); /* 32-byte bus transaction size */ | ||
1238 | nb8800_writel(priv, NB8800_TXC_CR, val); | ||
1239 | |||
1240 | /* TX complete interrupt after 10 ms or 7 frames (see above) */ | ||
1241 | val = clk_get_rate(priv->clk) / 100; | ||
1242 | nb8800_writel(priv, NB8800_TX_ITR, val); | ||
1243 | |||
1244 | /* Configure rx DMA */ | ||
1245 | |||
1246 | val = nb8800_readl(priv, NB8800_RXC_CR); | ||
1247 | val &= RCR_LE; /* keep endian setting */ | ||
1248 | val |= RCR_DM; /* DMA descriptor mode */ | ||
1249 | val |= RCR_RS; /* automatically store rx status */ | ||
1250 | val |= RCR_DIE; /* interrupt at end of DMA chain */ | ||
1251 | val |= RCR_RFI(7); /* interrupt after 7 frames received */ | ||
1252 | val |= RCR_BTS(2); /* 32-byte bus transaction size */ | ||
1253 | nb8800_writel(priv, NB8800_RXC_CR, val); | ||
1254 | |||
1255 | /* The rx interrupt can fire before the DMA has completed | ||
1256 | * unless a small delay is added. 50 us is hopefully enough. | ||
1257 | */ | ||
1258 | priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; | ||
1259 | |||
1260 | /* In NAPI poll mode we want to disable interrupts, but the | ||
1261 | * hardware does not permit this. Delay 10 ms instead. | ||
1262 | */ | ||
1263 | priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; | ||
1264 | |||
1265 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); | ||
1266 | |||
1267 | priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; | ||
1268 | |||
1269 | /* Flow control settings */ | ||
1270 | |||
1271 | /* Pause time of 0.1 ms */ | ||
1272 | val = 100000 / 512; | ||
1273 | nb8800_writeb(priv, NB8800_PQ1, val >> 8); | ||
1274 | nb8800_writeb(priv, NB8800_PQ2, val & 0xff); | ||
1275 | |||
1276 | /* Auto-negotiate by default */ | ||
1277 | priv->pause_aneg = true; | ||
1278 | priv->pause_rx = true; | ||
1279 | priv->pause_tx = true; | ||
1280 | |||
1281 | nb8800_mc_init(dev, 0); | ||
1282 | |||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | static int nb8800_tangox_init(struct net_device *dev) | ||
1287 | { | ||
1288 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1289 | u32 pad_mode = PAD_MODE_MII; | ||
1290 | |||
1291 | switch (priv->phy_mode) { | ||
1292 | case PHY_INTERFACE_MODE_MII: | ||
1293 | case PHY_INTERFACE_MODE_GMII: | ||
1294 | pad_mode = PAD_MODE_MII; | ||
1295 | break; | ||
1296 | |||
1297 | case PHY_INTERFACE_MODE_RGMII: | ||
1298 | pad_mode = PAD_MODE_RGMII; | ||
1299 | break; | ||
1300 | |||
1301 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
1302 | pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; | ||
1303 | break; | ||
1304 | |||
1305 | default: | ||
1306 | dev_err(dev->dev.parent, "unsupported phy mode %s\n", | ||
1307 | phy_modes(priv->phy_mode)); | ||
1308 | return -EINVAL; | ||
1309 | } | ||
1310 | |||
1311 | nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode); | ||
1312 | |||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | static int nb8800_tangox_reset(struct net_device *dev) | ||
1317 | { | ||
1318 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1319 | int clk_div; | ||
1320 | |||
1321 | nb8800_writeb(priv, NB8800_TANGOX_RESET, 0); | ||
1322 | usleep_range(1000, 10000); | ||
1323 | nb8800_writeb(priv, NB8800_TANGOX_RESET, 1); | ||
1324 | |||
1325 | wmb(); /* ensure reset is cleared before proceeding */ | ||
1326 | |||
1327 | clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); | ||
1328 | nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div); | ||
1329 | |||
1330 | return 0; | ||
1331 | } | ||
1332 | |||
1333 | static const struct nb8800_ops nb8800_tangox_ops = { | ||
1334 | .init = nb8800_tangox_init, | ||
1335 | .reset = nb8800_tangox_reset, | ||
1336 | }; | ||
1337 | |||
1338 | static int nb8800_tango4_init(struct net_device *dev) | ||
1339 | { | ||
1340 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1341 | int err; | ||
1342 | |||
1343 | err = nb8800_tangox_init(dev); | ||
1344 | if (err) | ||
1345 | return err; | ||
1346 | |||
1347 | /* On tango4 interrupt on DMA completion per frame works and gives | ||
1348 | * better performance despite generating more rx interrupts. | ||
1349 | */ | ||
1350 | |||
1351 | /* Disable unnecessary interrupt on rx completion */ | ||
1352 | nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7)); | ||
1353 | |||
1354 | /* Request interrupt on descriptor DMA completion */ | ||
1355 | priv->rx_dma_config |= DESC_ID; | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static const struct nb8800_ops nb8800_tango4_ops = { | ||
1361 | .init = nb8800_tango4_init, | ||
1362 | .reset = nb8800_tangox_reset, | ||
1363 | }; | ||
1364 | |||
1365 | static const struct of_device_id nb8800_dt_ids[] = { | ||
1366 | { | ||
1367 | .compatible = "aurora,nb8800", | ||
1368 | }, | ||
1369 | { | ||
1370 | .compatible = "sigma,smp8642-ethernet", | ||
1371 | .data = &nb8800_tangox_ops, | ||
1372 | }, | ||
1373 | { | ||
1374 | .compatible = "sigma,smp8734-ethernet", | ||
1375 | .data = &nb8800_tango4_ops, | ||
1376 | }, | ||
1377 | { } | ||
1378 | }; | ||
1379 | |||
1380 | static int nb8800_probe(struct platform_device *pdev) | ||
1381 | { | ||
1382 | const struct of_device_id *match; | ||
1383 | const struct nb8800_ops *ops = NULL; | ||
1384 | struct nb8800_priv *priv; | ||
1385 | struct resource *res; | ||
1386 | struct net_device *dev; | ||
1387 | struct mii_bus *bus; | ||
1388 | const unsigned char *mac; | ||
1389 | void __iomem *base; | ||
1390 | int irq; | ||
1391 | int ret; | ||
1392 | |||
1393 | match = of_match_device(nb8800_dt_ids, &pdev->dev); | ||
1394 | if (match) | ||
1395 | ops = match->data; | ||
1396 | |||
1397 | irq = platform_get_irq(pdev, 0); | ||
1398 | if (irq <= 0) { | ||
1399 | dev_err(&pdev->dev, "No IRQ\n"); | ||
1400 | return -EINVAL; | ||
1401 | } | ||
1402 | |||
1403 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1404 | base = devm_ioremap_resource(&pdev->dev, res); | ||
1405 | if (IS_ERR(base)) | ||
1406 | return PTR_ERR(base); | ||
1407 | |||
1408 | dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); | ||
1409 | |||
1410 | dev = alloc_etherdev(sizeof(*priv)); | ||
1411 | if (!dev) | ||
1412 | return -ENOMEM; | ||
1413 | |||
1414 | platform_set_drvdata(pdev, dev); | ||
1415 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1416 | |||
1417 | priv = netdev_priv(dev); | ||
1418 | priv->base = base; | ||
1419 | |||
1420 | priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); | ||
1421 | if (priv->phy_mode < 0) | ||
1422 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII; | ||
1423 | |||
1424 | priv->clk = devm_clk_get(&pdev->dev, NULL); | ||
1425 | if (IS_ERR(priv->clk)) { | ||
1426 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
1427 | ret = PTR_ERR(priv->clk); | ||
1428 | goto err_free_dev; | ||
1429 | } | ||
1430 | |||
1431 | ret = clk_prepare_enable(priv->clk); | ||
1432 | if (ret) | ||
1433 | goto err_free_dev; | ||
1434 | |||
1435 | spin_lock_init(&priv->tx_lock); | ||
1436 | |||
1437 | if (ops && ops->reset) { | ||
1438 | ret = ops->reset(dev); | ||
1439 | if (ret) | ||
1440 | goto err_free_dev; | ||
1441 | } | ||
1442 | |||
1443 | bus = devm_mdiobus_alloc(&pdev->dev); | ||
1444 | if (!bus) { | ||
1445 | ret = -ENOMEM; | ||
1446 | goto err_disable_clk; | ||
1447 | } | ||
1448 | |||
1449 | bus->name = "nb8800-mii"; | ||
1450 | bus->read = nb8800_mdio_read; | ||
1451 | bus->write = nb8800_mdio_write; | ||
1452 | bus->parent = &pdev->dev; | ||
1453 | snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", | ||
1454 | (unsigned long)res->start); | ||
1455 | bus->priv = priv; | ||
1456 | |||
1457 | ret = of_mdiobus_register(bus, pdev->dev.of_node); | ||
1458 | if (ret) { | ||
1459 | dev_err(&pdev->dev, "failed to register MII bus\n"); | ||
1460 | goto err_disable_clk; | ||
1461 | } | ||
1462 | |||
1463 | priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); | ||
1464 | if (!priv->phy_node) { | ||
1465 | dev_err(&pdev->dev, "no PHY specified\n"); | ||
1466 | ret = -ENODEV; | ||
1467 | goto err_free_bus; | ||
1468 | } | ||
1469 | |||
1470 | priv->mii_bus = bus; | ||
1471 | |||
1472 | ret = nb8800_hw_init(dev); | ||
1473 | if (ret) | ||
1474 | goto err_free_bus; | ||
1475 | |||
1476 | if (ops && ops->init) { | ||
1477 | ret = ops->init(dev); | ||
1478 | if (ret) | ||
1479 | goto err_free_bus; | ||
1480 | } | ||
1481 | |||
1482 | dev->netdev_ops = &nb8800_netdev_ops; | ||
1483 | dev->ethtool_ops = &nb8800_ethtool_ops; | ||
1484 | dev->flags |= IFF_MULTICAST; | ||
1485 | dev->irq = irq; | ||
1486 | |||
1487 | mac = of_get_mac_address(pdev->dev.of_node); | ||
1488 | if (mac) | ||
1489 | ether_addr_copy(dev->dev_addr, mac); | ||
1490 | |||
1491 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
1492 | eth_hw_addr_random(dev); | ||
1493 | |||
1494 | nb8800_update_mac_addr(dev); | ||
1495 | |||
1496 | netif_carrier_off(dev); | ||
1497 | |||
1498 | ret = register_netdev(dev); | ||
1499 | if (ret) { | ||
1500 | netdev_err(dev, "failed to register netdev\n"); | ||
1501 | goto err_free_dma; | ||
1502 | } | ||
1503 | |||
1504 | netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); | ||
1505 | |||
1506 | netdev_info(dev, "MAC address %pM\n", dev->dev_addr); | ||
1507 | |||
1508 | return 0; | ||
1509 | |||
1510 | err_free_dma: | ||
1511 | nb8800_dma_free(dev); | ||
1512 | err_free_bus: | ||
1513 | mdiobus_unregister(bus); | ||
1514 | err_disable_clk: | ||
1515 | clk_disable_unprepare(priv->clk); | ||
1516 | err_free_dev: | ||
1517 | free_netdev(dev); | ||
1518 | |||
1519 | return ret; | ||
1520 | } | ||
1521 | |||
1522 | static int nb8800_remove(struct platform_device *pdev) | ||
1523 | { | ||
1524 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1525 | struct nb8800_priv *priv = netdev_priv(ndev); | ||
1526 | |||
1527 | unregister_netdev(ndev); | ||
1528 | |||
1529 | mdiobus_unregister(priv->mii_bus); | ||
1530 | |||
1531 | clk_disable_unprepare(priv->clk); | ||
1532 | |||
1533 | nb8800_dma_free(ndev); | ||
1534 | free_netdev(ndev); | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static struct platform_driver nb8800_driver = { | ||
1540 | .driver = { | ||
1541 | .name = "nb8800", | ||
1542 | .of_match_table = nb8800_dt_ids, | ||
1543 | }, | ||
1544 | .probe = nb8800_probe, | ||
1545 | .remove = nb8800_remove, | ||
1546 | }; | ||
1547 | |||
1548 | module_platform_driver(nb8800_driver); | ||
1549 | |||
1550 | MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver"); | ||
1551 | MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>"); | ||
1552 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h new file mode 100644 index 000000000000..e5adbc2aac9f --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.h | |||
@@ -0,0 +1,316 @@ | |||
1 | #ifndef _NB8800_H_ | ||
2 | #define _NB8800_H_ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/skbuff.h> | ||
6 | #include <linux/phy.h> | ||
7 | #include <linux/clk.h> | ||
8 | #include <linux/bitops.h> | ||
9 | |||
10 | #define RX_DESC_COUNT 256 | ||
11 | #define TX_DESC_COUNT 256 | ||
12 | |||
13 | #define NB8800_DESC_LOW 4 | ||
14 | |||
15 | #define RX_BUF_SIZE 1552 | ||
16 | |||
17 | #define RX_COPYBREAK 256 | ||
18 | #define RX_COPYHDR 128 | ||
19 | |||
20 | #define MAX_MDC_CLOCK 2500000 | ||
21 | |||
22 | /* Stargate Solutions SSN8800 core registers */ | ||
23 | #define NB8800_TX_CTL1 0x000 | ||
24 | #define TX_TPD BIT(5) | ||
25 | #define TX_APPEND_FCS BIT(4) | ||
26 | #define TX_PAD_EN BIT(3) | ||
27 | #define TX_RETRY_EN BIT(2) | ||
28 | #define TX_EN BIT(0) | ||
29 | |||
30 | #define NB8800_TX_CTL2 0x001 | ||
31 | |||
32 | #define NB8800_RX_CTL 0x004 | ||
33 | #define RX_BC_DISABLE BIT(7) | ||
34 | #define RX_RUNT BIT(6) | ||
35 | #define RX_AF_EN BIT(5) | ||
36 | #define RX_PAUSE_EN BIT(3) | ||
37 | #define RX_SEND_CRC BIT(2) | ||
38 | #define RX_PAD_STRIP BIT(1) | ||
39 | #define RX_EN BIT(0) | ||
40 | |||
41 | #define NB8800_RANDOM_SEED 0x008 | ||
42 | #define NB8800_TX_SDP 0x14 | ||
43 | #define NB8800_TX_TPDP1 0x18 | ||
44 | #define NB8800_TX_TPDP2 0x19 | ||
45 | #define NB8800_SLOT_TIME 0x1c | ||
46 | |||
47 | #define NB8800_MDIO_CMD 0x020 | ||
48 | #define MDIO_CMD_GO BIT(31) | ||
49 | #define MDIO_CMD_WR BIT(26) | ||
50 | #define MDIO_CMD_ADDR(x) ((x) << 21) | ||
51 | #define MDIO_CMD_REG(x) ((x) << 16) | ||
52 | #define MDIO_CMD_DATA(x) ((x) << 0) | ||
53 | |||
54 | #define NB8800_MDIO_STS 0x024 | ||
55 | #define MDIO_STS_ERR BIT(31) | ||
56 | |||
57 | #define NB8800_MC_ADDR(i) (0x028 + (i)) | ||
58 | #define NB8800_MC_INIT 0x02e | ||
59 | #define NB8800_UC_ADDR(i) (0x03c + (i)) | ||
60 | |||
61 | #define NB8800_MAC_MODE 0x044 | ||
62 | #define RGMII_MODE BIT(7) | ||
63 | #define HALF_DUPLEX BIT(4) | ||
64 | #define BURST_EN BIT(3) | ||
65 | #define LOOPBACK_EN BIT(2) | ||
66 | #define GMAC_MODE BIT(0) | ||
67 | |||
68 | #define NB8800_IC_THRESHOLD 0x050 | ||
69 | #define NB8800_PE_THRESHOLD 0x051 | ||
70 | #define NB8800_PF_THRESHOLD 0x052 | ||
71 | #define NB8800_TX_BUFSIZE 0x054 | ||
72 | #define NB8800_FIFO_CTL 0x056 | ||
73 | #define NB8800_PQ1 0x060 | ||
74 | #define NB8800_PQ2 0x061 | ||
75 | #define NB8800_SRC_ADDR(i) (0x06a + (i)) | ||
76 | #define NB8800_STAT_DATA 0x078 | ||
77 | #define NB8800_STAT_INDEX 0x07c | ||
78 | #define NB8800_STAT_CLEAR 0x07d | ||
79 | |||
80 | #define NB8800_SLEEP_MODE 0x07e | ||
81 | #define SLEEP_MODE BIT(0) | ||
82 | |||
83 | #define NB8800_WAKEUP 0x07f | ||
84 | #define WAKEUP BIT(0) | ||
85 | |||
86 | /* Aurora NB8800 host interface registers */ | ||
87 | #define NB8800_TXC_CR 0x100 | ||
88 | #define TCR_LK BIT(12) | ||
89 | #define TCR_DS BIT(11) | ||
90 | #define TCR_BTS(x) (((x) & 0x7) << 8) | ||
91 | #define TCR_DIE BIT(7) | ||
92 | #define TCR_TFI(x) (((x) & 0x7) << 4) | ||
93 | #define TCR_LE BIT(3) | ||
94 | #define TCR_RS BIT(2) | ||
95 | #define TCR_DM BIT(1) | ||
96 | #define TCR_EN BIT(0) | ||
97 | |||
98 | #define NB8800_TXC_SR 0x104 | ||
99 | #define TSR_DE BIT(3) | ||
100 | #define TSR_DI BIT(2) | ||
101 | #define TSR_TO BIT(1) | ||
102 | #define TSR_TI BIT(0) | ||
103 | |||
104 | #define NB8800_TX_SAR 0x108 | ||
105 | #define NB8800_TX_DESC_ADDR 0x10c | ||
106 | |||
107 | #define NB8800_TX_REPORT_ADDR 0x110 | ||
108 | #define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff) | ||
109 | #define TX_FIRST_DEFERRAL BIT(7) | ||
110 | #define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf) | ||
111 | #define TX_LATE_COLLISION BIT(2) | ||
112 | #define TX_PACKET_DROPPED BIT(1) | ||
113 | #define TX_FIFO_UNDERRUN BIT(0) | ||
114 | #define IS_TX_ERROR(r) ((r) & 0x07) | ||
115 | |||
116 | #define NB8800_TX_FIFO_SR 0x114 | ||
117 | #define NB8800_TX_ITR 0x118 | ||
118 | |||
119 | #define NB8800_RXC_CR 0x200 | ||
120 | #define RCR_FL BIT(13) | ||
121 | #define RCR_LK BIT(12) | ||
122 | #define RCR_DS BIT(11) | ||
123 | #define RCR_BTS(x) (((x) & 7) << 8) | ||
124 | #define RCR_DIE BIT(7) | ||
125 | #define RCR_RFI(x) (((x) & 7) << 4) | ||
126 | #define RCR_LE BIT(3) | ||
127 | #define RCR_RS BIT(2) | ||
128 | #define RCR_DM BIT(1) | ||
129 | #define RCR_EN BIT(0) | ||
130 | |||
131 | #define NB8800_RXC_SR 0x204 | ||
132 | #define RSR_DE BIT(3) | ||
133 | #define RSR_DI BIT(2) | ||
134 | #define RSR_RO BIT(1) | ||
135 | #define RSR_RI BIT(0) | ||
136 | |||
137 | #define NB8800_RX_SAR 0x208 | ||
138 | #define NB8800_RX_DESC_ADDR 0x20c | ||
139 | |||
140 | #define NB8800_RX_REPORT_ADDR 0x210 | ||
141 | #define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF) | ||
142 | #define RX_MULTICAST_PKT BIT(9) | ||
143 | #define RX_BROADCAST_PKT BIT(8) | ||
144 | #define RX_LENGTH_ERR BIT(7) | ||
145 | #define RX_FCS_ERR BIT(6) | ||
146 | #define RX_RUNT_PKT BIT(5) | ||
147 | #define RX_FIFO_OVERRUN BIT(4) | ||
148 | #define RX_LATE_COLLISION BIT(3) | ||
149 | #define RX_ALIGNMENT_ERROR BIT(2) | ||
150 | #define RX_ERROR_MASK 0xfc | ||
151 | #define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK) | ||
152 | |||
153 | #define NB8800_RX_FIFO_SR 0x214 | ||
154 | #define NB8800_RX_ITR 0x218 | ||
155 | |||
156 | /* Sigma Designs SMP86xx additional registers */ | ||
157 | #define NB8800_TANGOX_PAD_MODE 0x400 | ||
158 | #define PAD_MODE_MASK 0x7 | ||
159 | #define PAD_MODE_MII 0x0 | ||
160 | #define PAD_MODE_RGMII 0x1 | ||
161 | #define PAD_MODE_GTX_CLK_INV BIT(3) | ||
162 | #define PAD_MODE_GTX_CLK_DELAY BIT(4) | ||
163 | |||
164 | #define NB8800_TANGOX_MDIO_CLKDIV 0x420 | ||
165 | #define NB8800_TANGOX_RESET 0x424 | ||
166 | |||
167 | /* Hardware DMA descriptor */ | ||
168 | struct nb8800_dma_desc { | ||
169 | u32 s_addr; /* start address */ | ||
170 | u32 n_addr; /* next descriptor address */ | ||
171 | u32 r_addr; /* report address */ | ||
172 | u32 config; | ||
173 | } __aligned(8); | ||
174 | |||
175 | #define DESC_ID BIT(23) | ||
176 | #define DESC_EOC BIT(22) | ||
177 | #define DESC_EOF BIT(21) | ||
178 | #define DESC_LK BIT(20) | ||
179 | #define DESC_DS BIT(19) | ||
180 | #define DESC_BTS(x) (((x) & 0x7) << 16) | ||
181 | |||
182 | /* DMA descriptor and associated data for rx. | ||
183 | * Allocated from coherent memory. | ||
184 | */ | ||
185 | struct nb8800_rx_desc { | ||
186 | /* DMA descriptor */ | ||
187 | struct nb8800_dma_desc desc; | ||
188 | |||
189 | /* Status report filled in by hardware */ | ||
190 | u32 report; | ||
191 | }; | ||
192 | |||
193 | /* Address of buffer on rx ring */ | ||
194 | struct nb8800_rx_buf { | ||
195 | struct page *page; | ||
196 | unsigned long offset; | ||
197 | }; | ||
198 | |||
199 | /* DMA descriptors and associated data for tx. | ||
200 | * Allocated from coherent memory. | ||
201 | */ | ||
202 | struct nb8800_tx_desc { | ||
203 | /* DMA descriptor. The second descriptor is used if packet | ||
204 | * data is unaligned. | ||
205 | */ | ||
206 | struct nb8800_dma_desc desc[2]; | ||
207 | |||
208 | /* Status report filled in by hardware */ | ||
209 | u32 report; | ||
210 | |||
211 | /* Bounce buffer for initial unaligned part of packet */ | ||
212 | u8 buf[8] __aligned(8); | ||
213 | }; | ||
214 | |||
215 | /* Packet in tx queue */ | ||
216 | struct nb8800_tx_buf { | ||
217 | /* Currently queued skb */ | ||
218 | struct sk_buff *skb; | ||
219 | |||
220 | /* DMA address of the first descriptor */ | ||
221 | dma_addr_t dma_desc; | ||
222 | |||
223 | /* DMA address of packet data */ | ||
224 | dma_addr_t dma_addr; | ||
225 | |||
226 | /* Length of DMA mapping, less than skb->len if alignment | ||
227 | * buffer is used. | ||
228 | */ | ||
229 | unsigned int dma_len; | ||
230 | |||
231 | /* Number of packets in chain starting here */ | ||
232 | unsigned int chain_len; | ||
233 | |||
234 | /* Packet chain ready to be submitted to hardware */ | ||
235 | bool ready; | ||
236 | }; | ||
237 | |||
238 | struct nb8800_priv { | ||
239 | struct napi_struct napi; | ||
240 | |||
241 | void __iomem *base; | ||
242 | |||
243 | /* RX DMA descriptors */ | ||
244 | struct nb8800_rx_desc *rx_descs; | ||
245 | |||
246 | /* RX buffers referenced by DMA descriptors */ | ||
247 | struct nb8800_rx_buf *rx_bufs; | ||
248 | |||
249 | /* Current end of chain */ | ||
250 | u32 rx_eoc; | ||
251 | |||
252 | /* Value for rx interrupt time register in NAPI interrupt mode */ | ||
253 | u32 rx_itr_irq; | ||
254 | |||
255 | /* Value for rx interrupt time register in NAPI poll mode */ | ||
256 | u32 rx_itr_poll; | ||
257 | |||
258 | /* Value for config field of rx DMA descriptors */ | ||
259 | u32 rx_dma_config; | ||
260 | |||
261 | /* TX DMA descriptors */ | ||
262 | struct nb8800_tx_desc *tx_descs; | ||
263 | |||
264 | /* TX packet queue */ | ||
265 | struct nb8800_tx_buf *tx_bufs; | ||
266 | |||
267 | /* Number of free tx queue entries */ | ||
268 | atomic_t tx_free; | ||
269 | |||
270 | /* First free tx queue entry */ | ||
271 | u32 tx_next; | ||
272 | |||
273 | /* Next buffer to transmit */ | ||
274 | u32 tx_queue; | ||
275 | |||
276 | /* Start of current packet chain */ | ||
277 | struct nb8800_tx_buf *tx_chain; | ||
278 | |||
279 | /* Next buffer to reclaim */ | ||
280 | u32 tx_done; | ||
281 | |||
282 | /* Lock for DMA activation */ | ||
283 | spinlock_t tx_lock; | ||
284 | |||
285 | struct mii_bus *mii_bus; | ||
286 | struct device_node *phy_node; | ||
287 | struct phy_device *phydev; | ||
288 | |||
289 | /* PHY connection type from DT */ | ||
290 | int phy_mode; | ||
291 | |||
292 | /* Current link status */ | ||
293 | int speed; | ||
294 | int duplex; | ||
295 | int link; | ||
296 | |||
297 | /* Pause settings */ | ||
298 | bool pause_aneg; | ||
299 | bool pause_rx; | ||
300 | bool pause_tx; | ||
301 | |||
302 | /* DMA base address of rx descriptors, see rx_descs above */ | ||
303 | dma_addr_t rx_desc_dma; | ||
304 | |||
305 | /* DMA base address of tx descriptors, see tx_descs above */ | ||
306 | dma_addr_t tx_desc_dma; | ||
307 | |||
308 | struct clk *clk; | ||
309 | }; | ||
310 | |||
311 | struct nb8800_ops { | ||
312 | int (*init)(struct net_device *dev); | ||
313 | int (*reset)(struct net_device *dev); | ||
314 | }; | ||
315 | |||
316 | #endif /* _NB8800_H_ */ | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c9b036789184..2e611dc5f162 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) | |||
10139 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); | 10139 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); |
10140 | return; | 10140 | return; |
10141 | } | 10141 | } |
10142 | bp->vxlan_dst_port--; | 10142 | bp->vxlan_dst_port_count--; |
10143 | if (bp->vxlan_dst_port) | 10143 | if (bp->vxlan_dst_port_count) |
10144 | return; | 10144 | return; |
10145 | 10145 | ||
10146 | if (netif_running(bp->dev)) { | 10146 | if (netif_running(bp->dev)) { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db15c5ee09c5..07f5f239cb65 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) | |||
2693 | req.ver_upd = DRV_VER_UPD; | 2693 | req.ver_upd = DRV_VER_UPD; |
2694 | 2694 | ||
2695 | if (BNXT_PF(bp)) { | 2695 | if (BNXT_PF(bp)) { |
2696 | unsigned long vf_req_snif_bmap[4]; | 2696 | DECLARE_BITMAP(vf_req_snif_bmap, 256); |
2697 | u32 *data = (u32 *)vf_req_snif_bmap; | 2697 | u32 *data = (u32 *)vf_req_snif_bmap; |
2698 | 2698 | ||
2699 | memset(vf_req_snif_bmap, 0, 32); | 2699 | memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); |
2700 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) | 2700 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) |
2701 | __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); | 2701 | __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); |
2702 | 2702 | ||
2703 | for (i = 0; i < 8; i++) { | 2703 | for (i = 0; i < 8; i++) |
2704 | req.vf_req_fwd[i] = cpu_to_le32(*data); | 2704 | req.vf_req_fwd[i] = cpu_to_le32(data[i]); |
2705 | data++; | 2705 | |
2706 | } | ||
2707 | req.enables |= | 2706 | req.enables |= |
2708 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); | 2707 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
2709 | } | 2708 | } |
@@ -3625,6 +3624,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
3625 | pf->fw_fid = le16_to_cpu(resp->fid); | 3624 | pf->fw_fid = le16_to_cpu(resp->fid); |
3626 | pf->port_id = le16_to_cpu(resp->port_id); | 3625 | pf->port_id = le16_to_cpu(resp->port_id); |
3627 | memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); | 3626 | memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); |
3627 | memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); | ||
3628 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); | 3628 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
3629 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); | 3629 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
3630 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); | 3630 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
@@ -3648,8 +3648,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
3648 | 3648 | ||
3649 | vf->fw_fid = le16_to_cpu(resp->fid); | 3649 | vf->fw_fid = le16_to_cpu(resp->fid); |
3650 | memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); | 3650 | memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); |
3651 | if (!is_valid_ether_addr(vf->mac_addr)) | 3651 | if (is_valid_ether_addr(vf->mac_addr)) |
3652 | random_ether_addr(vf->mac_addr); | 3652 | /* overwrite netdev dev_adr with admin VF MAC */ |
3653 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | ||
3654 | else | ||
3655 | random_ether_addr(bp->dev->dev_addr); | ||
3653 | 3656 | ||
3654 | vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); | 3657 | vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
3655 | vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); | 3658 | vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
@@ -3880,6 +3883,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) | |||
3880 | #endif | 3883 | #endif |
3881 | } | 3884 | } |
3882 | 3885 | ||
3886 | static int bnxt_cfg_rx_mode(struct bnxt *); | ||
3887 | |||
3883 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) | 3888 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) |
3884 | { | 3889 | { |
3885 | int rc = 0; | 3890 | int rc = 0; |
@@ -3946,11 +3951,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) | |||
3946 | bp->vnic_info[0].rx_mask |= | 3951 | bp->vnic_info[0].rx_mask |= |
3947 | CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; | 3952 | CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
3948 | 3953 | ||
3949 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); | 3954 | rc = bnxt_cfg_rx_mode(bp); |
3950 | if (rc) { | 3955 | if (rc) |
3951 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); | ||
3952 | goto err_out; | 3956 | goto err_out; |
3953 | } | ||
3954 | 3957 | ||
3955 | rc = bnxt_hwrm_set_coal(bp); | 3958 | rc = bnxt_hwrm_set_coal(bp); |
3956 | if (rc) | 3959 | if (rc) |
@@ -4599,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
4599 | bp->nge_port_cnt = 1; | 4602 | bp->nge_port_cnt = 1; |
4600 | } | 4603 | } |
4601 | 4604 | ||
4602 | bp->state = BNXT_STATE_OPEN; | 4605 | set_bit(BNXT_STATE_OPEN, &bp->state); |
4603 | bnxt_enable_int(bp); | 4606 | bnxt_enable_int(bp); |
4604 | /* Enable TX queues */ | 4607 | /* Enable TX queues */ |
4605 | bnxt_tx_enable(bp); | 4608 | bnxt_tx_enable(bp); |
@@ -4675,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
4675 | /* Change device state to avoid TX queue wake up's */ | 4678 | /* Change device state to avoid TX queue wake up's */ |
4676 | bnxt_tx_disable(bp); | 4679 | bnxt_tx_disable(bp); |
4677 | 4680 | ||
4678 | bp->state = BNXT_STATE_CLOSED; | 4681 | clear_bit(BNXT_STATE_OPEN, &bp->state); |
4679 | cancel_work_sync(&bp->sp_task); | 4682 | smp_mb__after_atomic(); |
4683 | while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) | ||
4684 | msleep(20); | ||
4680 | 4685 | ||
4681 | /* Flush rings before disabling interrupts */ | 4686 | /* Flush rings before disabling interrupts */ |
4682 | bnxt_shutdown_nic(bp, irq_re_init); | 4687 | bnxt_shutdown_nic(bp, irq_re_init); |
@@ -4865,7 +4870,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) | |||
4865 | } | 4870 | } |
4866 | } | 4871 | } |
4867 | 4872 | ||
4868 | static void bnxt_cfg_rx_mode(struct bnxt *bp) | 4873 | static int bnxt_cfg_rx_mode(struct bnxt *bp) |
4869 | { | 4874 | { |
4870 | struct net_device *dev = bp->dev; | 4875 | struct net_device *dev = bp->dev; |
4871 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; | 4876 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
@@ -4914,6 +4919,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp) | |||
4914 | netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", | 4919 | netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", |
4915 | rc); | 4920 | rc); |
4916 | vnic->uc_filter_count = i; | 4921 | vnic->uc_filter_count = i; |
4922 | return rc; | ||
4917 | } | 4923 | } |
4918 | } | 4924 | } |
4919 | 4925 | ||
@@ -4922,6 +4928,8 @@ skip_uc: | |||
4922 | if (rc) | 4928 | if (rc) |
4923 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", | 4929 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", |
4924 | rc); | 4930 | rc); |
4931 | |||
4932 | return rc; | ||
4925 | } | 4933 | } |
4926 | 4934 | ||
4927 | static netdev_features_t bnxt_fix_features(struct net_device *dev, | 4935 | static netdev_features_t bnxt_fix_features(struct net_device *dev, |
@@ -5023,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) | |||
5023 | static void bnxt_reset_task(struct bnxt *bp) | 5031 | static void bnxt_reset_task(struct bnxt *bp) |
5024 | { | 5032 | { |
5025 | bnxt_dbg_dump_states(bp); | 5033 | bnxt_dbg_dump_states(bp); |
5026 | if (netif_running(bp->dev)) | 5034 | if (netif_running(bp->dev)) { |
5027 | bnxt_tx_disable(bp); /* prevent tx timout again */ | 5035 | bnxt_close_nic(bp, false, false); |
5036 | bnxt_open_nic(bp, false, false); | ||
5037 | } | ||
5028 | } | 5038 | } |
5029 | 5039 | ||
5030 | static void bnxt_tx_timeout(struct net_device *dev) | 5040 | static void bnxt_tx_timeout(struct net_device *dev) |
@@ -5074,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work) | |||
5074 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); | 5084 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); |
5075 | int rc; | 5085 | int rc; |
5076 | 5086 | ||
5077 | if (bp->state != BNXT_STATE_OPEN) | 5087 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
5088 | smp_mb__after_atomic(); | ||
5089 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { | ||
5090 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5078 | return; | 5091 | return; |
5092 | } | ||
5079 | 5093 | ||
5080 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) | 5094 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) |
5081 | bnxt_cfg_rx_mode(bp); | 5095 | bnxt_cfg_rx_mode(bp); |
@@ -5099,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work) | |||
5099 | bnxt_hwrm_tunnel_dst_port_free( | 5113 | bnxt_hwrm_tunnel_dst_port_free( |
5100 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); | 5114 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
5101 | } | 5115 | } |
5102 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | 5116 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { |
5117 | /* bnxt_reset_task() calls bnxt_close_nic() which waits | ||
5118 | * for BNXT_STATE_IN_SP_TASK to clear. | ||
5119 | */ | ||
5120 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5121 | rtnl_lock(); | ||
5103 | bnxt_reset_task(bp); | 5122 | bnxt_reset_task(bp); |
5123 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5124 | rtnl_unlock(); | ||
5125 | } | ||
5126 | |||
5127 | smp_mb__before_atomic(); | ||
5128 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5104 | } | 5129 | } |
5105 | 5130 | ||
5106 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) | 5131 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) |
@@ -5179,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5179 | bp->timer.function = bnxt_timer; | 5204 | bp->timer.function = bnxt_timer; |
5180 | bp->current_interval = BNXT_TIMER_INTERVAL; | 5205 | bp->current_interval = BNXT_TIMER_INTERVAL; |
5181 | 5206 | ||
5182 | bp->state = BNXT_STATE_CLOSED; | 5207 | clear_bit(BNXT_STATE_OPEN, &bp->state); |
5183 | 5208 | ||
5184 | return 0; | 5209 | return 0; |
5185 | 5210 | ||
@@ -5212,13 +5237,27 @@ init_err: | |||
5212 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) | 5237 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) |
5213 | { | 5238 | { |
5214 | struct sockaddr *addr = p; | 5239 | struct sockaddr *addr = p; |
5240 | struct bnxt *bp = netdev_priv(dev); | ||
5241 | int rc = 0; | ||
5215 | 5242 | ||
5216 | if (!is_valid_ether_addr(addr->sa_data)) | 5243 | if (!is_valid_ether_addr(addr->sa_data)) |
5217 | return -EADDRNOTAVAIL; | 5244 | return -EADDRNOTAVAIL; |
5218 | 5245 | ||
5246 | #ifdef CONFIG_BNXT_SRIOV | ||
5247 | if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) | ||
5248 | return -EADDRNOTAVAIL; | ||
5249 | #endif | ||
5250 | |||
5251 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) | ||
5252 | return 0; | ||
5253 | |||
5219 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 5254 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
5255 | if (netif_running(dev)) { | ||
5256 | bnxt_close_nic(bp, false, false); | ||
5257 | rc = bnxt_open_nic(bp, false, false); | ||
5258 | } | ||
5220 | 5259 | ||
5221 | return 0; | 5260 | return rc; |
5222 | } | 5261 | } |
5223 | 5262 | ||
5224 | /* rtnl_lock held */ | 5263 | /* rtnl_lock held */ |
@@ -5686,15 +5725,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5686 | bnxt_set_tpa_flags(bp); | 5725 | bnxt_set_tpa_flags(bp); |
5687 | bnxt_set_ring_params(bp); | 5726 | bnxt_set_ring_params(bp); |
5688 | dflt_rings = netif_get_num_default_rss_queues(); | 5727 | dflt_rings = netif_get_num_default_rss_queues(); |
5689 | if (BNXT_PF(bp)) { | 5728 | if (BNXT_PF(bp)) |
5690 | memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); | ||
5691 | bp->pf.max_irqs = max_irqs; | 5729 | bp->pf.max_irqs = max_irqs; |
5692 | } else { | ||
5693 | #if defined(CONFIG_BNXT_SRIOV) | 5730 | #if defined(CONFIG_BNXT_SRIOV) |
5694 | memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | 5731 | else |
5695 | bp->vf.max_irqs = max_irqs; | 5732 | bp->vf.max_irqs = max_irqs; |
5696 | #endif | 5733 | #endif |
5697 | } | ||
5698 | bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); | 5734 | bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); |
5699 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); | 5735 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); |
5700 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); | 5736 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 674bc5159b91..f199f4cc8ffe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -925,9 +925,9 @@ struct bnxt { | |||
925 | 925 | ||
926 | struct timer_list timer; | 926 | struct timer_list timer; |
927 | 927 | ||
928 | int state; | 928 | unsigned long state; |
929 | #define BNXT_STATE_CLOSED 0 | 929 | #define BNXT_STATE_OPEN 0 |
930 | #define BNXT_STATE_OPEN 1 | 930 | #define BNXT_STATE_IN_SP_TASK 1 |
931 | 931 | ||
932 | struct bnxt_irq *irq_tbl; | 932 | struct bnxt_irq *irq_tbl; |
933 | u8 mac_addr[ETH_ALEN]; | 933 | u8 mac_addr[ETH_ALEN]; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f4cf68861069..ea044bbcd384 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #ifdef CONFIG_BNXT_SRIOV | 21 | #ifdef CONFIG_BNXT_SRIOV |
22 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) | 22 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) |
23 | { | 23 | { |
24 | if (bp->state != BNXT_STATE_OPEN) { | 24 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
25 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); | 25 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); |
26 | return -EINVAL; | 26 | return -EINVAL; |
27 | } | 27 | } |
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp) | |||
804 | if (!is_valid_ether_addr(resp->perm_mac_address)) | 804 | if (!is_valid_ether_addr(resp->perm_mac_address)) |
805 | goto update_vf_mac_exit; | 805 | goto update_vf_mac_exit; |
806 | 806 | ||
807 | if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) | 807 | if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) |
808 | goto update_vf_mac_exit; | 808 | memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); |
809 | 809 | /* overwrite netdev dev_adr with admin VF MAC */ | |
810 | memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); | ||
811 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | 810 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); |
812 | update_vf_mac_exit: | 811 | update_vf_mac_exit: |
813 | mutex_unlock(&bp->hwrm_cmd_lock); | 812 | mutex_unlock(&bp->hwrm_cmd_lock); |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 88c1e1a834f8..169059c92f80 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp) | |||
1682 | macb_set_hwaddr(bp); | 1682 | macb_set_hwaddr(bp); |
1683 | 1683 | ||
1684 | config = macb_mdc_clk_div(bp); | 1684 | config = macb_mdc_clk_div(bp); |
1685 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) | ||
1686 | config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | ||
1685 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ | 1687 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
1686 | config |= MACB_BIT(PAE); /* PAuse Enable */ | 1688 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
1687 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | 1689 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev) | |||
2416 | /* Set MII management clock divider */ | 2418 | /* Set MII management clock divider */ |
2417 | val = macb_mdc_clk_div(bp); | 2419 | val = macb_mdc_clk_div(bp); |
2418 | val |= macb_dbw(bp); | 2420 | val |= macb_dbw(bp); |
2421 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) | ||
2422 | val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | ||
2419 | macb_writel(bp, NCFGR, val); | 2423 | macb_writel(bp, NCFGR, val); |
2420 | 2424 | ||
2421 | return 0; | 2425 | return 0; |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 6e1faea00ca8..d83b0db77821 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -215,12 +215,17 @@ | |||
215 | /* GEM specific NCFGR bitfields. */ | 215 | /* GEM specific NCFGR bitfields. */ |
216 | #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ | 216 | #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ |
217 | #define GEM_GBE_SIZE 1 | 217 | #define GEM_GBE_SIZE 1 |
218 | #define GEM_PCSSEL_OFFSET 11 | ||
219 | #define GEM_PCSSEL_SIZE 1 | ||
218 | #define GEM_CLK_OFFSET 18 /* MDC clock division */ | 220 | #define GEM_CLK_OFFSET 18 /* MDC clock division */ |
219 | #define GEM_CLK_SIZE 3 | 221 | #define GEM_CLK_SIZE 3 |
220 | #define GEM_DBW_OFFSET 21 /* Data bus width */ | 222 | #define GEM_DBW_OFFSET 21 /* Data bus width */ |
221 | #define GEM_DBW_SIZE 2 | 223 | #define GEM_DBW_SIZE 2 |
222 | #define GEM_RXCOEN_OFFSET 24 | 224 | #define GEM_RXCOEN_OFFSET 24 |
223 | #define GEM_RXCOEN_SIZE 1 | 225 | #define GEM_RXCOEN_SIZE 1 |
226 | #define GEM_SGMIIEN_OFFSET 27 | ||
227 | #define GEM_SGMIIEN_SIZE 1 | ||
228 | |||
224 | 229 | ||
225 | /* Constants for data bus width. */ | 230 | /* Constants for data bus width. */ |
226 | #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ | 231 | #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index d3950b20feb9..39ca6744a4e6 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -120,10 +120,9 @@ | |||
120 | * Calculated for SCLK of 700Mhz | 120 | * Calculated for SCLK of 700Mhz |
121 | * value written should be a 1/16th of what is expected | 121 | * value written should be a 1/16th of what is expected |
122 | * | 122 | * |
123 | * 1 tick per 0.05usec = value of 2.2 | 123 | * 1 tick per 0.025usec |
124 | * This 10% would be covered in CQ timer thresh value | ||
125 | */ | 124 | */ |
126 | #define NICPF_CLK_PER_INT_TICK 2 | 125 | #define NICPF_CLK_PER_INT_TICK 1 |
127 | 126 | ||
128 | /* Time to wait before we decide that a SQ is stuck. | 127 | /* Time to wait before we decide that a SQ is stuck. |
129 | * | 128 | * |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index c561fdcb79a7..5f24d11cb16a 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -615,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) | |||
615 | return 0; | 615 | return 0; |
616 | } | 616 | } |
617 | 617 | ||
618 | static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) | ||
619 | { | ||
620 | int bgx, lmac; | ||
621 | |||
622 | nic->vf_enabled[vf] = enable; | ||
623 | |||
624 | if (vf >= nic->num_vf_en) | ||
625 | return; | ||
626 | |||
627 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
628 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
629 | |||
630 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); | ||
631 | } | ||
632 | |||
618 | /* Interrupt handler to handle mailbox messages from VFs */ | 633 | /* Interrupt handler to handle mailbox messages from VFs */ |
619 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | 634 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) |
620 | { | 635 | { |
@@ -714,14 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
714 | break; | 729 | break; |
715 | case NIC_MBOX_MSG_CFG_DONE: | 730 | case NIC_MBOX_MSG_CFG_DONE: |
716 | /* Last message of VF config msg sequence */ | 731 | /* Last message of VF config msg sequence */ |
717 | nic->vf_enabled[vf] = true; | 732 | nic_enable_vf(nic, vf, true); |
718 | goto unlock; | 733 | goto unlock; |
719 | case NIC_MBOX_MSG_SHUTDOWN: | 734 | case NIC_MBOX_MSG_SHUTDOWN: |
720 | /* First msg in VF teardown sequence */ | 735 | /* First msg in VF teardown sequence */ |
721 | nic->vf_enabled[vf] = false; | ||
722 | if (vf >= nic->num_vf_en) | 736 | if (vf >= nic->num_vf_en) |
723 | nic->sqs_used[vf - nic->num_vf_en] = false; | 737 | nic->sqs_used[vf - nic->num_vf_en] = false; |
724 | nic->pqs_vf[vf] = 0; | 738 | nic->pqs_vf[vf] = 0; |
739 | nic_enable_vf(nic, vf, false); | ||
725 | break; | 740 | break; |
726 | case NIC_MBOX_MSG_ALLOC_SQS: | 741 | case NIC_MBOX_MSG_ALLOC_SQS: |
727 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | 742 | nic_alloc_sqs(nic, &mbx.sqs_alloc); |
@@ -1074,8 +1089,7 @@ static void nic_remove(struct pci_dev *pdev) | |||
1074 | 1089 | ||
1075 | if (nic->check_link) { | 1090 | if (nic->check_link) { |
1076 | /* Destroy work Queue */ | 1091 | /* Destroy work Queue */ |
1077 | cancel_delayed_work(&nic->dwork); | 1092 | cancel_delayed_work_sync(&nic->dwork); |
1078 | flush_workqueue(nic->check_link); | ||
1079 | destroy_workqueue(nic->check_link); | 1093 | destroy_workqueue(nic->check_link); |
1080 | } | 1094 | } |
1081 | 1095 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index af54c10945c2..a12b2e38cf61 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | |||
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev, | |||
112 | 112 | ||
113 | cmd->supported = 0; | 113 | cmd->supported = 0; |
114 | cmd->transceiver = XCVR_EXTERNAL; | 114 | cmd->transceiver = XCVR_EXTERNAL; |
115 | |||
116 | if (!nic->link_up) { | ||
117 | cmd->duplex = DUPLEX_UNKNOWN; | ||
118 | ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
115 | if (nic->speed <= 1000) { | 122 | if (nic->speed <= 1000) { |
116 | cmd->port = PORT_MII; | 123 | cmd->port = PORT_MII; |
117 | cmd->autoneg = AUTONEG_ENABLE; | 124 | cmd->autoneg = AUTONEG_ENABLE; |
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev, | |||
125 | return 0; | 132 | return 0; |
126 | } | 133 | } |
127 | 134 | ||
135 | static u32 nicvf_get_link(struct net_device *netdev) | ||
136 | { | ||
137 | struct nicvf *nic = netdev_priv(netdev); | ||
138 | |||
139 | return nic->link_up; | ||
140 | } | ||
141 | |||
128 | static void nicvf_get_drvinfo(struct net_device *netdev, | 142 | static void nicvf_get_drvinfo(struct net_device *netdev, |
129 | struct ethtool_drvinfo *info) | 143 | struct ethtool_drvinfo *info) |
130 | { | 144 | { |
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev, | |||
660 | 674 | ||
661 | static const struct ethtool_ops nicvf_ethtool_ops = { | 675 | static const struct ethtool_ops nicvf_ethtool_ops = { |
662 | .get_settings = nicvf_get_settings, | 676 | .get_settings = nicvf_get_settings, |
663 | .get_link = ethtool_op_get_link, | 677 | .get_link = nicvf_get_link, |
664 | .get_drvinfo = nicvf_get_drvinfo, | 678 | .get_drvinfo = nicvf_get_drvinfo, |
665 | .get_msglevel = nicvf_get_msglevel, | 679 | .get_msglevel = nicvf_get_msglevel, |
666 | .set_msglevel = nicvf_set_msglevel, | 680 | .set_msglevel = nicvf_set_msglevel, |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7f709cbdcd87..dde8dc720cd3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev) | |||
1057 | 1057 | ||
1058 | netif_carrier_off(netdev); | 1058 | netif_carrier_off(netdev); |
1059 | netif_tx_stop_all_queues(nic->netdev); | 1059 | netif_tx_stop_all_queues(nic->netdev); |
1060 | nic->link_up = false; | ||
1060 | 1061 | ||
1061 | /* Teardown secondary qsets first */ | 1062 | /* Teardown secondary qsets first */ |
1062 | if (!nic->sqs_mode) { | 1063 | if (!nic->sqs_mode) { |
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev) | |||
1211 | nic->drv_stats.txq_stop = 0; | 1212 | nic->drv_stats.txq_stop = 0; |
1212 | nic->drv_stats.txq_wake = 0; | 1213 | nic->drv_stats.txq_wake = 0; |
1213 | 1214 | ||
1214 | netif_carrier_on(netdev); | ||
1215 | netif_tx_start_all_queues(netdev); | ||
1216 | |||
1217 | return 0; | 1215 | return 0; |
1218 | cleanup: | 1216 | cleanup: |
1219 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | 1217 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index e404ea837727..206b6a71a545 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |||
592 | /* Set threshold value for interrupt generation */ | 592 | /* Set threshold value for interrupt generation */ |
593 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | 593 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
594 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | 594 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, |
595 | qidx, nic->cq_coalesce_usecs); | 595 | qidx, CMP_QUEUE_TIMER_THRESH); |
596 | } | 596 | } |
597 | 597 | ||
598 | /* Configures transmit queue */ | 598 | /* Configures transmit queue */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index fb4957d09914..033e8306e91c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -76,7 +76,7 @@ | |||
76 | #define CMP_QSIZE CMP_QUEUE_SIZE2 | 76 | #define CMP_QSIZE CMP_QUEUE_SIZE2 |
77 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) | 77 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) |
78 | #define CMP_QUEUE_CQE_THRESH 0 | 78 | #define CMP_QUEUE_CQE_THRESH 0 |
79 | #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ | 79 | #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ |
80 | 80 | ||
81 | #define RBDR_SIZE RBDR_SIZE0 | 81 | #define RBDR_SIZE RBDR_SIZE0 |
82 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) | 82 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 180aa9fabf48..9df26c2263bc 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) | |||
186 | } | 186 | } |
187 | EXPORT_SYMBOL(bgx_set_lmac_mac); | 187 | EXPORT_SYMBOL(bgx_set_lmac_mac); |
188 | 188 | ||
189 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) | ||
190 | { | ||
191 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
192 | u64 cfg; | ||
193 | |||
194 | if (!bgx) | ||
195 | return; | ||
196 | |||
197 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | ||
198 | if (enable) | ||
199 | cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; | ||
200 | else | ||
201 | cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
202 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | ||
203 | } | ||
204 | EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); | ||
205 | |||
189 | static void bgx_sgmii_change_link_state(struct lmac *lmac) | 206 | static void bgx_sgmii_change_link_state(struct lmac *lmac) |
190 | { | 207 | { |
191 | struct bgx *bgx = lmac->bgx; | 208 | struct bgx *bgx = lmac->bgx; |
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work) | |||
612 | lmac->last_duplex = 1; | 629 | lmac->last_duplex = 1; |
613 | } else { | 630 | } else { |
614 | lmac->link_up = 0; | 631 | lmac->link_up = 0; |
632 | lmac->last_speed = SPEED_UNKNOWN; | ||
633 | lmac->last_duplex = DUPLEX_UNKNOWN; | ||
615 | } | 634 | } |
616 | 635 | ||
617 | if (lmac->last_link != lmac->link_up) { | 636 | if (lmac->last_link != lmac->link_up) { |
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | |||
654 | } | 673 | } |
655 | 674 | ||
656 | /* Enable lmac */ | 675 | /* Enable lmac */ |
657 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, | 676 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); |
658 | CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
659 | 677 | ||
660 | /* Restore default cfg, incase low level firmware changed it */ | 678 | /* Restore default cfg, incase low level firmware changed it */ |
661 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); | 679 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); |
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) | |||
695 | lmac = &bgx->lmac[lmacid]; | 713 | lmac = &bgx->lmac[lmacid]; |
696 | if (lmac->check_link) { | 714 | if (lmac->check_link) { |
697 | /* Destroy work queue */ | 715 | /* Destroy work queue */ |
698 | cancel_delayed_work(&lmac->dwork); | 716 | cancel_delayed_work_sync(&lmac->dwork); |
699 | flush_workqueue(lmac->check_link); | ||
700 | destroy_workqueue(lmac->check_link); | 717 | destroy_workqueue(lmac->check_link); |
701 | } | 718 | } |
702 | 719 | ||
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1009 | struct bgx *bgx = NULL; | 1026 | struct bgx *bgx = NULL; |
1010 | u8 lmac; | 1027 | u8 lmac; |
1011 | 1028 | ||
1029 | /* Load octeon mdio driver */ | ||
1030 | octeon_mdiobus_force_mod_depencency(); | ||
1031 | |||
1012 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); | 1032 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); |
1013 | if (!bgx) | 1033 | if (!bgx) |
1014 | return -ENOMEM; | 1034 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 07b7ec66c60d..149e179363a1 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -182,6 +182,8 @@ enum MCAST_MODE { | |||
182 | #define BCAST_ACCEPT 1 | 182 | #define BCAST_ACCEPT 1 |
183 | #define CAM_ACCEPT 1 | 183 | #define CAM_ACCEPT 1 |
184 | 184 | ||
185 | void octeon_mdiobus_force_mod_depencency(void); | ||
186 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); | ||
185 | void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); | 187 | void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); |
186 | unsigned bgx_get_map(int node); | 188 | unsigned bgx_get_map(int node); |
187 | int bgx_get_lmac_count(int node, int bgx); | 189 | int bgx_get_lmac_count(int node, int bgx); |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index ed41559bae77..b553409e04ad 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800; | |||
98 | #elif defined(__mips__) | 98 | #elif defined(__mips__) |
99 | static int csr0 = 0x00200000 | 0x4000; | 99 | static int csr0 = 0x00200000 | 0x4000; |
100 | #else | 100 | #else |
101 | #warning Processor architecture undefined! | 101 | static int csr0; |
102 | static int csr0 = 0x00A00000 | 0x4800; | ||
103 | #endif | 102 | #endif |
104 | 103 | ||
105 | /* Operational parameters that usually are not changed. */ | 104 | /* Operational parameters that usually are not changed. */ |
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void) | |||
1982 | pr_info("%s", version); | 1981 | pr_info("%s", version); |
1983 | #endif | 1982 | #endif |
1984 | 1983 | ||
1984 | if (!csr0) { | ||
1985 | pr_warn("tulip: unknown CPU architecture, using default csr0\n"); | ||
1986 | /* default to 8 longword cache line alignment */ | ||
1987 | csr0 = 0x00A00000 | 0x4800; | ||
1988 | } | ||
1989 | |||
1985 | /* copy module parms into globals */ | 1990 | /* copy module parms into globals */ |
1986 | tulip_rx_copybreak = rx_copybreak; | 1991 | tulip_rx_copybreak = rx_copybreak; |
1987 | tulip_max_interrupt_work = max_interrupt_work; | 1992 | tulip_max_interrupt_work = max_interrupt_work; |
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 9beb3d34d4ba..3c0e4d5c5fef 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c | |||
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev) | |||
907 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) | 907 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) |
908 | i |= 0x4800; | 908 | i |= 0x4800; |
909 | #else | 909 | #else |
910 | #warning Processor architecture undefined | 910 | dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n"); |
911 | i |= 0x4800; | 911 | i |= 0x4800; |
912 | #endif | 912 | #endif |
913 | iowrite32(i, ioaddr + PCIBusCfg); | 913 | iowrite32(i, ioaddr + PCIBusCfg); |
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 63c2bcf8031a..b1026689b78f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, | |||
48 | *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 48 | *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
49 | else { /* !dst_is_aligned */ | 49 | else { /* !dst_is_aligned */ |
50 | for (i = 0; i < len; i++, reg++) { | 50 | for (i = 0; i < len; i++, reg++) { |
51 | u32 buf = | 51 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
52 | nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 52 | put_unaligned(buf, reg); |
53 | |||
54 | /* to accommodate word-unaligned address of "reg" | ||
55 | * we have to do memcpy_toio() instead of simple "=". | ||
56 | */ | ||
57 | memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); | ||
58 | } | 53 | } |
59 | } | 54 | } |
60 | 55 | ||
61 | /* copy last bytes (if any) */ | 56 | /* copy last bytes (if any) */ |
62 | if (last) { | 57 | if (last) { |
63 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 58 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
64 | 59 | memcpy((u8*)reg, &buf, last); | |
65 | memcpy_toio((void __iomem *)reg, &buf, last); | ||
66 | } | 60 | } |
67 | } | 61 | } |
68 | 62 | ||
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev, | |||
367 | struct nps_enet_tx_ctl tx_ctrl; | 361 | struct nps_enet_tx_ctl tx_ctrl; |
368 | short length = skb->len; | 362 | short length = skb->len; |
369 | u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); | 363 | u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); |
370 | u32 *src = (u32 *)virt_to_phys(skb->data); | 364 | u32 *src = (void *)skb->data; |
371 | bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); | 365 | bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); |
372 | 366 | ||
373 | tx_ctrl.value = 0; | 367 | tx_ctrl.value = 0; |
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev, | |||
375 | if (src_is_aligned) | 369 | if (src_is_aligned) |
376 | for (i = 0; i < len; i++, src++) | 370 | for (i = 0; i < len; i++, src++) |
377 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); | 371 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); |
378 | else { /* !src_is_aligned */ | 372 | else /* !src_is_aligned */ |
379 | for (i = 0; i < len; i++, src++) { | 373 | for (i = 0; i < len; i++, src++) |
380 | u32 buf; | 374 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, |
381 | 375 | get_unaligned(src)); | |
382 | /* to accommodate word-unaligned address of "src" | 376 | |
383 | * we have to do memcpy_fromio() instead of simple "=" | ||
384 | */ | ||
385 | memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf)); | ||
386 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf); | ||
387 | } | ||
388 | } | ||
389 | /* Write the length of the Frame */ | 377 | /* Write the length of the Frame */ |
390 | tx_ctrl.nt = length; | 378 | tx_ctrl.nt = length; |
391 | 379 | ||
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ff76d4e9dc1b..bee32a9d9876 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE | |||
7 | default y | 7 | default y |
8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ | 8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ |
9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ | 9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ |
10 | ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) | 10 | ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ |
11 | ARCH_LAYERSCAPE | ||
11 | ---help--- | 12 | ---help--- |
12 | If you have a network (Ethernet) card belonging to this class, say Y. | 13 | If you have a network (Ethernet) card belonging to this class, say Y. |
13 | 14 | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 08f5b911d96b..52e0091b4fb2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | |||
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) | |||
552 | cbd_t __iomem *prev_bd; | 552 | cbd_t __iomem *prev_bd; |
553 | cbd_t __iomem *last_tx_bd; | 553 | cbd_t __iomem *last_tx_bd; |
554 | 554 | ||
555 | last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); | 555 | last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); |
556 | 556 | ||
557 | /* get the current bd held in TBPTR and scan back from this point */ | 557 | /* get the current bd held in TBPTR and scan back from this point */ |
558 | recheck_bd = curr_tbptr = (cbd_t __iomem *) | 558 | recheck_bd = curr_tbptr = (cbd_t __iomem *) |
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 55c36230e176..40071dad1c57 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
464 | * address). Print error message but continue anyway. | 464 | * address). Print error message but continue anyway. |
465 | */ | 465 | */ |
466 | if ((void *)tbipa > priv->map + resource_size(&res) - 4) | 466 | if ((void *)tbipa > priv->map + resource_size(&res) - 4) |
467 | dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", | 467 | dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", |
468 | ((void *)tbipa - priv->map) + 4); | 468 | ((void *)tbipa - priv->map) + 4); |
469 | 469 | ||
470 | iowrite32be(be32_to_cpup(prop), tbipa); | 470 | iowrite32be(be32_to_cpup(prop), tbipa); |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3e6b9b437497..3e233d924cce 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np, | |||
647 | if (model && strcasecmp(model, "FEC")) { | 647 | if (model && strcasecmp(model, "FEC")) { |
648 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); | 648 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
649 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); | 649 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); |
650 | if (gfar_irq(grp, TX)->irq == NO_IRQ || | 650 | if (!gfar_irq(grp, TX)->irq || |
651 | gfar_irq(grp, RX)->irq == NO_IRQ || | 651 | !gfar_irq(grp, RX)->irq || |
652 | gfar_irq(grp, ER)->irq == NO_IRQ) | 652 | !gfar_irq(grp, ER)->irq) |
653 | return -EINVAL; | 653 | return -EINVAL; |
654 | } | 654 | } |
655 | 655 | ||
@@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
894 | FSL_GIANFAR_DEV_HAS_VLAN | | 894 | FSL_GIANFAR_DEV_HAS_VLAN | |
895 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | 895 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
896 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | 896 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
897 | FSL_GIANFAR_DEV_HAS_TIMER; | 897 | FSL_GIANFAR_DEV_HAS_TIMER | |
898 | FSL_GIANFAR_DEV_HAS_RX_FILER; | ||
898 | 899 | ||
899 | err = of_property_read_string(np, "phy-connection-type", &ctype); | 900 | err = of_property_read_string(np, "phy-connection-type", &ctype); |
900 | 901 | ||
@@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1396 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | 1397 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
1397 | } | 1398 | } |
1398 | 1399 | ||
1399 | /* always enable rx filer */ | 1400 | /* Always enable rx filer if available */ |
1400 | priv->rx_filer_enable = 1; | 1401 | priv->rx_filer_enable = |
1402 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; | ||
1401 | /* Enable most messages by default */ | 1403 | /* Enable most messages by default */ |
1402 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1404 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
1403 | /* use pritority h/w tx queue scheduling for single queue devices */ | 1405 | /* use pritority h/w tx queue scheduling for single queue devices */ |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f266b20f9ef5..cb77667971a7 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -923,6 +923,7 @@ struct gfar { | |||
923 | #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 | 923 | #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 |
924 | #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 | 924 | #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 |
925 | #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 | 925 | #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 |
926 | #define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 | ||
926 | 927 | ||
927 | #if (MAXGROUPS == 2) | 928 | #if (MAXGROUPS == 2) |
928 | #define DEFAULT_MAPPING 0xAA | 929 | #define DEFAULT_MAPPING 0xAA |
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 664d0c261269..b40fba929d65 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
467 | 467 | ||
468 | etsects->irq = platform_get_irq(dev, 0); | 468 | etsects->irq = platform_get_irq(dev, 0); |
469 | 469 | ||
470 | if (etsects->irq == NO_IRQ) { | 470 | if (etsects->irq < 0) { |
471 | pr_err("irq not in device tree\n"); | 471 | pr_err("irq not in device tree\n"); |
472 | goto no_node; | 472 | goto no_node; |
473 | } | 473 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 2a98eba660c0..b674414a4d72 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry( | |||
1259 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1259 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
1260 | MAC_IS_BROADCAST(mac_entry->addr) || | 1260 | MAC_IS_BROADCAST(mac_entry->addr) || |
1261 | MAC_IS_MULTICAST(mac_entry->addr)) { | 1261 | MAC_IS_MULTICAST(mac_entry->addr)) { |
1262 | dev_err(dsaf_dev->dev, | 1262 | dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", |
1263 | "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", | 1263 | dsaf_dev->ae_dev.name, mac_entry->addr); |
1264 | dsaf_dev->ae_dev.name, mac_entry->addr[0], | ||
1265 | mac_entry->addr[1], mac_entry->addr[2], | ||
1266 | mac_entry->addr[3], mac_entry->addr[4], | ||
1267 | mac_entry->addr[5]); | ||
1268 | return -EINVAL; | 1264 | return -EINVAL; |
1269 | } | 1265 | } |
1270 | 1266 | ||
@@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry( | |||
1331 | 1327 | ||
1332 | /* mac addr check */ | 1328 | /* mac addr check */ |
1333 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1329 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
1334 | dev_err(dsaf_dev->dev, | 1330 | dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", |
1335 | "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", | 1331 | dsaf_dev->ae_dev.name, mac_entry->addr); |
1336 | dsaf_dev->ae_dev.name, mac_entry->addr[0], | ||
1337 | mac_entry->addr[1], mac_entry->addr[2], | ||
1338 | mac_entry->addr[3], | ||
1339 | mac_entry->addr[4], mac_entry->addr[5]); | ||
1340 | return -EINVAL; | 1332 | return -EINVAL; |
1341 | } | 1333 | } |
1342 | 1334 | ||
@@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
1410 | 1402 | ||
1411 | /*chechk mac addr */ | 1403 | /*chechk mac addr */ |
1412 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1404 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
1413 | dev_err(dsaf_dev->dev, | 1405 | dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", |
1414 | "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1406 | mac_entry->addr); |
1415 | mac_entry->addr[0], mac_entry->addr[1], | ||
1416 | mac_entry->addr[2], mac_entry->addr[3], | ||
1417 | mac_entry->addr[4], mac_entry->addr[5]); | ||
1418 | return -EINVAL; | 1407 | return -EINVAL; |
1419 | } | 1408 | } |
1420 | 1409 | ||
@@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, | |||
1497 | 1486 | ||
1498 | /*check mac addr */ | 1487 | /*check mac addr */ |
1499 | if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { | 1488 | if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { |
1500 | dev_err(dsaf_dev->dev, | 1489 | dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", |
1501 | "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1490 | addr); |
1502 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); | ||
1503 | return -EINVAL; | 1491 | return -EINVAL; |
1504 | } | 1492 | } |
1505 | 1493 | ||
@@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
1563 | 1551 | ||
1564 | /*check mac addr */ | 1552 | /*check mac addr */ |
1565 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1553 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
1566 | dev_err(dsaf_dev->dev, | 1554 | dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", |
1567 | "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1555 | mac_entry->addr); |
1568 | mac_entry->addr[0], mac_entry->addr[1], | ||
1569 | mac_entry->addr[2], mac_entry->addr[3], | ||
1570 | mac_entry->addr[4], mac_entry->addr[5]); | ||
1571 | return -EINVAL; | 1556 | return -EINVAL; |
1572 | } | 1557 | } |
1573 | 1558 | ||
@@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, | |||
1644 | /* check macaddr */ | 1629 | /* check macaddr */ |
1645 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1630 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
1646 | MAC_IS_BROADCAST(mac_entry->addr)) { | 1631 | MAC_IS_BROADCAST(mac_entry->addr)) { |
1647 | dev_err(dsaf_dev->dev, | 1632 | dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", |
1648 | "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", | 1633 | mac_entry->addr); |
1649 | mac_entry->addr[0], mac_entry->addr[1], | ||
1650 | mac_entry->addr[2], mac_entry->addr[3], | ||
1651 | mac_entry->addr[4], mac_entry->addr[5]); | ||
1652 | return -EINVAL; | 1634 | return -EINVAL; |
1653 | } | 1635 | } |
1654 | 1636 | ||
@@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, | |||
1695 | /*check mac addr */ | 1677 | /*check mac addr */ |
1696 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1678 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
1697 | MAC_IS_BROADCAST(mac_entry->addr)) { | 1679 | MAC_IS_BROADCAST(mac_entry->addr)) { |
1698 | dev_err(dsaf_dev->dev, | 1680 | dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", |
1699 | "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", | 1681 | mac_entry->addr); |
1700 | mac_entry->addr[0], mac_entry->addr[1], | ||
1701 | mac_entry->addr[2], mac_entry->addr[3], | ||
1702 | mac_entry->addr[4], mac_entry->addr[5]); | ||
1703 | return -EINVAL; | 1682 | return -EINVAL; |
1704 | } | 1683 | } |
1705 | 1684 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b475e1bf2e6f..bdbd80423b17 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -898,7 +898,7 @@ | |||
898 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 | 898 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 |
899 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 | 899 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 |
900 | 900 | ||
901 | static inline void dsaf_write_reg(void *base, u32 reg, u32 value) | 901 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) |
902 | { | 902 | { |
903 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 903 | u8 __iomem *reg_addr = ACCESS_ONCE(base); |
904 | 904 | ||
@@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value) | |||
908 | #define dsaf_write_dev(a, reg, value) \ | 908 | #define dsaf_write_dev(a, reg, value) \ |
909 | dsaf_write_reg((a)->io_base, (reg), (value)) | 909 | dsaf_write_reg((a)->io_base, (reg), (value)) |
910 | 910 | ||
911 | static inline u32 dsaf_read_reg(u8 *base, u32 reg) | 911 | static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) |
912 | { | 912 | { |
913 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 913 | u8 __iomem *reg_addr = ACCESS_ONCE(base); |
914 | 914 | ||
@@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg) | |||
927 | #define dsaf_set_bit(origin, shift, val) \ | 927 | #define dsaf_set_bit(origin, shift, val) \ |
928 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) | 928 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) |
929 | 929 | ||
930 | static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | 930 | static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, |
931 | u32 val) | 931 | u32 shift, u32 val) |
932 | { | 932 | { |
933 | u32 origin = dsaf_read_reg(base, reg); | 933 | u32 origin = dsaf_read_reg(base, reg); |
934 | 934 | ||
@@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | |||
947 | #define dsaf_get_bit(origin, shift) \ | 947 | #define dsaf_get_bit(origin, shift) \ |
948 | dsaf_get_field((origin), (1ull << (shift)), (shift)) | 948 | dsaf_get_field((origin), (1ull << (shift)), (shift)) |
949 | 949 | ||
950 | static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) | 950 | static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, |
951 | u32 shift) | ||
951 | { | 952 | { |
952 | u32 origin; | 953 | u32 origin; |
953 | 954 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 639263d5e833..7781e80896a6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
627 | 627 | ||
628 | /* verify the skb head is not shared */ | 628 | /* verify the skb head is not shared */ |
629 | err = skb_cow_head(skb, 0); | 629 | err = skb_cow_head(skb, 0); |
630 | if (err) | 630 | if (err) { |
631 | dev_kfree_skb(skb); | ||
631 | return NETDEV_TX_OK; | 632 | return NETDEV_TX_OK; |
633 | } | ||
632 | 634 | ||
633 | /* locate vlan header */ | 635 | /* locate vlan header */ |
634 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | 636 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0ff8f01e57ee..1fd5ea82a9bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) | |||
567 | goto init_adminq_exit; | 567 | goto init_adminq_exit; |
568 | } | 568 | } |
569 | 569 | ||
570 | /* initialize locks */ | ||
571 | mutex_init(&hw->aq.asq_mutex); | ||
572 | mutex_init(&hw->aq.arq_mutex); | ||
573 | |||
574 | /* Set up register offsets */ | 570 | /* Set up register offsets */ |
575 | i40e_adminq_init_regs(hw); | 571 | i40e_adminq_init_regs(hw); |
576 | 572 | ||
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) | |||
664 | i40e_shutdown_asq(hw); | 660 | i40e_shutdown_asq(hw); |
665 | i40e_shutdown_arq(hw); | 661 | i40e_shutdown_arq(hw); |
666 | 662 | ||
667 | /* destroy the locks */ | ||
668 | |||
669 | if (hw->nvm_buff.va) | 663 | if (hw->nvm_buff.va) |
670 | i40e_free_virt_mem(hw, &hw->nvm_buff); | 664 | i40e_free_virt_mem(hw, &hw->nvm_buff); |
671 | 665 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b825f978d441..4a9873ec28c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10295 | /* set up a default setting for link flow control */ | 10295 | /* set up a default setting for link flow control */ |
10296 | pf->hw.fc.requested_mode = I40E_FC_NONE; | 10296 | pf->hw.fc.requested_mode = I40E_FC_NONE; |
10297 | 10297 | ||
10298 | /* set up the locks for the AQ, do this only once in probe | ||
10299 | * and destroy them only once in remove | ||
10300 | */ | ||
10301 | mutex_init(&hw->aq.asq_mutex); | ||
10302 | mutex_init(&hw->aq.arq_mutex); | ||
10303 | |||
10298 | err = i40e_init_adminq(hw); | 10304 | err = i40e_init_adminq(hw); |
10299 | 10305 | ||
10300 | /* provide nvm, fw, api versions */ | 10306 | /* provide nvm, fw, api versions */ |
@@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev) | |||
10697 | set_bit(__I40E_DOWN, &pf->state); | 10703 | set_bit(__I40E_DOWN, &pf->state); |
10698 | del_timer_sync(&pf->service_timer); | 10704 | del_timer_sync(&pf->service_timer); |
10699 | cancel_work_sync(&pf->service_task); | 10705 | cancel_work_sync(&pf->service_task); |
10700 | i40e_fdir_teardown(pf); | ||
10701 | 10706 | ||
10702 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { | 10707 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { |
10703 | i40e_free_vfs(pf); | 10708 | i40e_free_vfs(pf); |
@@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev) | |||
10740 | "Failed to destroy the Admin Queue resources: %d\n", | 10745 | "Failed to destroy the Admin Queue resources: %d\n", |
10741 | ret_code); | 10746 | ret_code); |
10742 | 10747 | ||
10748 | /* destroy the locks only once, here */ | ||
10749 | mutex_destroy(&hw->aq.arq_mutex); | ||
10750 | mutex_destroy(&hw->aq.asq_mutex); | ||
10751 | |||
10743 | /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ | 10752 | /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ |
10744 | i40e_clear_interrupt_scheme(pf); | 10753 | i40e_clear_interrupt_scheme(pf); |
10745 | for (i = 0; i < pf->num_alloc_vsi; i++) { | 10754 | for (i = 0; i < pf->num_alloc_vsi; i++) { |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index fd123ca60761..3f65e39b3fe4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c | |||
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw) | |||
551 | goto init_adminq_exit; | 551 | goto init_adminq_exit; |
552 | } | 552 | } |
553 | 553 | ||
554 | /* initialize locks */ | ||
555 | mutex_init(&hw->aq.asq_mutex); | ||
556 | mutex_init(&hw->aq.arq_mutex); | ||
557 | |||
558 | /* Set up register offsets */ | 554 | /* Set up register offsets */ |
559 | i40e_adminq_init_regs(hw); | 555 | i40e_adminq_init_regs(hw); |
560 | 556 | ||
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) | |||
596 | i40e_shutdown_asq(hw); | 592 | i40e_shutdown_asq(hw); |
597 | i40e_shutdown_arq(hw); | 593 | i40e_shutdown_arq(hw); |
598 | 594 | ||
599 | /* destroy the locks */ | ||
600 | |||
601 | if (hw->nvm_buff.va) | 595 | if (hw->nvm_buff.va) |
602 | i40e_free_virt_mem(hw, &hw->nvm_buff); | 596 | i40e_free_virt_mem(hw, &hw->nvm_buff); |
603 | 597 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d962164dfb0f..99d2cffae0cd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2476 | hw->bus.device = PCI_SLOT(pdev->devfn); | 2476 | hw->bus.device = PCI_SLOT(pdev->devfn); |
2477 | hw->bus.func = PCI_FUNC(pdev->devfn); | 2477 | hw->bus.func = PCI_FUNC(pdev->devfn); |
2478 | 2478 | ||
2479 | /* set up the locks for the AQ, do this only once in probe | ||
2480 | * and destroy them only once in remove | ||
2481 | */ | ||
2482 | mutex_init(&hw->aq.asq_mutex); | ||
2483 | mutex_init(&hw->aq.arq_mutex); | ||
2484 | |||
2479 | INIT_LIST_HEAD(&adapter->mac_filter_list); | 2485 | INIT_LIST_HEAD(&adapter->mac_filter_list); |
2480 | INIT_LIST_HEAD(&adapter->vlan_filter_list); | 2486 | INIT_LIST_HEAD(&adapter->vlan_filter_list); |
2481 | 2487 | ||
@@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev) | |||
2629 | if (hw->aq.asq.count) | 2635 | if (hw->aq.asq.count) |
2630 | i40evf_shutdown_adminq(hw); | 2636 | i40evf_shutdown_adminq(hw); |
2631 | 2637 | ||
2638 | /* destroy the locks only once, here */ | ||
2639 | mutex_destroy(&hw->aq.arq_mutex); | ||
2640 | mutex_destroy(&hw->aq.asq_mutex); | ||
2641 | |||
2632 | iounmap(hw->hw_addr); | 2642 | iounmap(hw->hw_addr); |
2633 | pci_release_regions(pdev); | 2643 | pci_release_regions(pdev); |
2634 | 2644 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 47395ff5d908..aed8d029b23d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7920 | */ | 7920 | */ |
7921 | if (netif_running(dev)) | 7921 | if (netif_running(dev)) |
7922 | ixgbe_close(dev); | 7922 | ixgbe_close(dev); |
7923 | else | ||
7924 | ixgbe_reset(adapter); | ||
7925 | |||
7923 | ixgbe_clear_interrupt_scheme(adapter); | 7926 | ixgbe_clear_interrupt_scheme(adapter); |
7924 | 7927 | ||
7925 | #ifdef CONFIG_IXGBE_DCB | 7928 | #ifdef CONFIG_IXGBE_DCB |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e84c7f2634d3..ed622fa29dfa 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | /* Registers */ | 37 | /* Registers */ |
38 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | 38 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
39 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) | 39 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) |
40 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) | 40 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
41 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | 41 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
42 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | 42 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
@@ -62,6 +62,7 @@ | |||
62 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | 62 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
63 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | 63 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
64 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 | 64 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
65 | #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 | ||
65 | #define MVNETA_PORT_CONFIG 0x2400 | 66 | #define MVNETA_PORT_CONFIG 0x2400 |
66 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | 67 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
67 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | 68 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
@@ -159,7 +160,7 @@ | |||
159 | 160 | ||
160 | #define MVNETA_INTR_ENABLE 0x25b8 | 161 | #define MVNETA_INTR_ENABLE 0x25b8 |
161 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 | 162 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 |
162 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF | 163 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff |
163 | 164 | ||
164 | #define MVNETA_RXQ_CMD 0x2680 | 165 | #define MVNETA_RXQ_CMD 0x2680 |
165 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | 166 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
@@ -242,6 +243,7 @@ | |||
242 | #define MVNETA_VLAN_TAG_LEN 4 | 243 | #define MVNETA_VLAN_TAG_LEN 4 |
243 | 244 | ||
244 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | 245 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 |
246 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 | ||
245 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | 247 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
246 | #define MVNETA_ACC_MODE_EXT 1 | 248 | #define MVNETA_ACC_MODE_EXT 1 |
247 | 249 | ||
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1579 | } | 1581 | } |
1580 | 1582 | ||
1581 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); | 1583 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); |
1582 | if (!skb) | ||
1583 | goto err_drop_frame; | ||
1584 | 1584 | ||
1585 | /* After refill old buffer has to be unmapped regardless | ||
1586 | * the skb is successfully built or not. | ||
1587 | */ | ||
1585 | dma_unmap_single(dev->dev.parent, phys_addr, | 1588 | dma_unmap_single(dev->dev.parent, phys_addr, |
1586 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1589 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1587 | 1590 | ||
1591 | if (!skb) | ||
1592 | goto err_drop_frame; | ||
1593 | |||
1588 | rcvd_pkts++; | 1594 | rcvd_pkts++; |
1589 | rcvd_bytes += rx_bytes; | 1595 | rcvd_bytes += rx_bytes; |
1590 | 1596 | ||
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, | |||
3191 | } | 3197 | } |
3192 | 3198 | ||
3193 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | 3199 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); |
3200 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); | ||
3194 | } | 3201 | } |
3195 | 3202 | ||
3196 | /* Power up the port */ | 3203 | /* Power up the port */ |
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
3250 | char hw_mac_addr[ETH_ALEN]; | 3257 | char hw_mac_addr[ETH_ALEN]; |
3251 | const char *mac_from; | 3258 | const char *mac_from; |
3252 | const char *managed; | 3259 | const char *managed; |
3260 | int tx_csum_limit; | ||
3253 | int phy_mode; | 3261 | int phy_mode; |
3254 | int err; | 3262 | int err; |
3255 | int cpu; | 3263 | int cpu; |
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev) | |||
3350 | } | 3358 | } |
3351 | } | 3359 | } |
3352 | 3360 | ||
3353 | if (of_device_is_compatible(dn, "marvell,armada-370-neta")) | 3361 | if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { |
3354 | pp->tx_csum_limit = 1600; | 3362 | if (tx_csum_limit < 0 || |
3363 | tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { | ||
3364 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | ||
3365 | dev_info(&pdev->dev, | ||
3366 | "Wrong TX csum limit in DT, set to %dB\n", | ||
3367 | MVNETA_TX_CSUM_DEF_SIZE); | ||
3368 | } | ||
3369 | } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { | ||
3370 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | ||
3371 | } else { | ||
3372 | tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; | ||
3373 | } | ||
3374 | |||
3375 | pp->tx_csum_limit = tx_csum_limit; | ||
3355 | 3376 | ||
3356 | pp->tx_ring_size = MVNETA_MAX_TXD; | 3377 | pp->tx_ring_size = MVNETA_MAX_TXD; |
3357 | pp->rx_ring_size = MVNETA_MAX_RXD; | 3378 | pp->rx_ring_size = MVNETA_MAX_RXD; |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d9884fd15b45..a4beccf1fd46 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, | |||
3413 | } | 3413 | } |
3414 | 3414 | ||
3415 | /* Free all buffers from the pool */ | 3415 | /* Free all buffers from the pool */ |
3416 | static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) | 3416 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
3417 | struct mvpp2_bm_pool *bm_pool) | ||
3417 | { | 3418 | { |
3418 | int i; | 3419 | int i; |
3419 | 3420 | ||
3420 | for (i = 0; i < bm_pool->buf_num; i++) { | 3421 | for (i = 0; i < bm_pool->buf_num; i++) { |
3422 | dma_addr_t buf_phys_addr; | ||
3421 | u32 vaddr; | 3423 | u32 vaddr; |
3422 | 3424 | ||
3423 | /* Get buffer virtual address (indirect access) */ | 3425 | /* Get buffer virtual address (indirect access) */ |
3424 | mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | 3426 | buf_phys_addr = mvpp2_read(priv, |
3427 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | ||
3425 | vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); | 3428 | vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); |
3429 | |||
3430 | dma_unmap_single(dev, buf_phys_addr, | ||
3431 | bm_pool->buf_size, DMA_FROM_DEVICE); | ||
3432 | |||
3426 | if (!vaddr) | 3433 | if (!vaddr) |
3427 | break; | 3434 | break; |
3428 | dev_kfree_skb_any((struct sk_buff *)vaddr); | 3435 | dev_kfree_skb_any((struct sk_buff *)vaddr); |
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, | |||
3439 | { | 3446 | { |
3440 | u32 val; | 3447 | u32 val; |
3441 | 3448 | ||
3442 | mvpp2_bm_bufs_free(priv, bm_pool); | 3449 | mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); |
3443 | if (bm_pool->buf_num) { | 3450 | if (bm_pool->buf_num) { |
3444 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); | 3451 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); |
3445 | return 0; | 3452 | return 0; |
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, | |||
3692 | MVPP2_BM_LONG_BUF_NUM : | 3699 | MVPP2_BM_LONG_BUF_NUM : |
3693 | MVPP2_BM_SHORT_BUF_NUM; | 3700 | MVPP2_BM_SHORT_BUF_NUM; |
3694 | else | 3701 | else |
3695 | mvpp2_bm_bufs_free(port->priv, new_pool); | 3702 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
3703 | port->priv, new_pool); | ||
3696 | 3704 | ||
3697 | new_pool->pkt_size = pkt_size; | 3705 | new_pool->pkt_size = pkt_size; |
3698 | 3706 | ||
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) | |||
3756 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); | 3764 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
3757 | 3765 | ||
3758 | /* Update BM pool with new buffer size */ | 3766 | /* Update BM pool with new buffer size */ |
3759 | mvpp2_bm_bufs_free(port->priv, port_pool); | 3767 | mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); |
3760 | if (port_pool->buf_num) { | 3768 | if (port_pool->buf_num) { |
3761 | WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); | 3769 | WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); |
3762 | return -EIO; | 3770 | return -EIO; |
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |||
4401 | 4409 | ||
4402 | mvpp2_txq_inc_get(txq_pcpu); | 4410 | mvpp2_txq_inc_get(txq_pcpu); |
4403 | 4411 | ||
4404 | if (!skb) | ||
4405 | continue; | ||
4406 | |||
4407 | dma_unmap_single(port->dev->dev.parent, buf_phys_addr, | 4412 | dma_unmap_single(port->dev->dev.parent, buf_phys_addr, |
4408 | skb_headlen(skb), DMA_TO_DEVICE); | 4413 | skb_headlen(skb), DMA_TO_DEVICE); |
4414 | if (!skb) | ||
4415 | continue; | ||
4409 | dev_kfree_skb_any(skb); | 4416 | dev_kfree_skb_any(skb); |
4410 | } | 4417 | } |
4411 | } | 4418 | } |
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5092 | struct mvpp2_rx_queue *rxq) | 5099 | struct mvpp2_rx_queue *rxq) |
5093 | { | 5100 | { |
5094 | struct net_device *dev = port->dev; | 5101 | struct net_device *dev = port->dev; |
5095 | int rx_received, rx_filled, i; | 5102 | int rx_received; |
5103 | int rx_done = 0; | ||
5096 | u32 rcvd_pkts = 0; | 5104 | u32 rcvd_pkts = 0; |
5097 | u32 rcvd_bytes = 0; | 5105 | u32 rcvd_bytes = 0; |
5098 | 5106 | ||
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5101 | if (rx_todo > rx_received) | 5109 | if (rx_todo > rx_received) |
5102 | rx_todo = rx_received; | 5110 | rx_todo = rx_received; |
5103 | 5111 | ||
5104 | rx_filled = 0; | 5112 | while (rx_done < rx_todo) { |
5105 | for (i = 0; i < rx_todo; i++) { | ||
5106 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | 5113 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
5107 | struct mvpp2_bm_pool *bm_pool; | 5114 | struct mvpp2_bm_pool *bm_pool; |
5108 | struct sk_buff *skb; | 5115 | struct sk_buff *skb; |
5116 | dma_addr_t phys_addr; | ||
5109 | u32 bm, rx_status; | 5117 | u32 bm, rx_status; |
5110 | int pool, rx_bytes, err; | 5118 | int pool, rx_bytes, err; |
5111 | 5119 | ||
5112 | rx_filled++; | 5120 | rx_done++; |
5113 | rx_status = rx_desc->status; | 5121 | rx_status = rx_desc->status; |
5114 | rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; | 5122 | rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; |
5123 | phys_addr = rx_desc->buf_phys_addr; | ||
5115 | 5124 | ||
5116 | bm = mvpp2_bm_cookie_build(rx_desc); | 5125 | bm = mvpp2_bm_cookie_build(rx_desc); |
5117 | pool = mvpp2_bm_cookie_pool_get(bm); | 5126 | pool = mvpp2_bm_cookie_pool_get(bm); |
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5128 | * comprised by the RX descriptor. | 5137 | * comprised by the RX descriptor. |
5129 | */ | 5138 | */ |
5130 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) { | 5139 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) { |
5140 | err_drop_frame: | ||
5131 | dev->stats.rx_errors++; | 5141 | dev->stats.rx_errors++; |
5132 | mvpp2_rx_error(port, rx_desc); | 5142 | mvpp2_rx_error(port, rx_desc); |
5143 | /* Return the buffer to the pool */ | ||
5133 | mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, | 5144 | mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, |
5134 | rx_desc->buf_cookie); | 5145 | rx_desc->buf_cookie); |
5135 | continue; | 5146 | continue; |
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5137 | 5148 | ||
5138 | skb = (struct sk_buff *)rx_desc->buf_cookie; | 5149 | skb = (struct sk_buff *)rx_desc->buf_cookie; |
5139 | 5150 | ||
5151 | err = mvpp2_rx_refill(port, bm_pool, bm, 0); | ||
5152 | if (err) { | ||
5153 | netdev_err(port->dev, "failed to refill BM pools\n"); | ||
5154 | goto err_drop_frame; | ||
5155 | } | ||
5156 | |||
5157 | dma_unmap_single(dev->dev.parent, phys_addr, | ||
5158 | bm_pool->buf_size, DMA_FROM_DEVICE); | ||
5159 | |||
5140 | rcvd_pkts++; | 5160 | rcvd_pkts++; |
5141 | rcvd_bytes += rx_bytes; | 5161 | rcvd_bytes += rx_bytes; |
5142 | atomic_inc(&bm_pool->in_use); | 5162 | atomic_inc(&bm_pool->in_use); |
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5147 | mvpp2_rx_csum(port, rx_status, skb); | 5167 | mvpp2_rx_csum(port, rx_status, skb); |
5148 | 5168 | ||
5149 | napi_gro_receive(&port->napi, skb); | 5169 | napi_gro_receive(&port->napi, skb); |
5150 | |||
5151 | err = mvpp2_rx_refill(port, bm_pool, bm, 0); | ||
5152 | if (err) { | ||
5153 | netdev_err(port->dev, "failed to refill BM pools\n"); | ||
5154 | rx_filled--; | ||
5155 | } | ||
5156 | } | 5170 | } |
5157 | 5171 | ||
5158 | if (rcvd_pkts) { | 5172 | if (rcvd_pkts) { |
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5166 | 5180 | ||
5167 | /* Update Rx queue management counters */ | 5181 | /* Update Rx queue management counters */ |
5168 | wmb(); | 5182 | wmb(); |
5169 | mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); | 5183 | mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); |
5170 | 5184 | ||
5171 | return rx_todo; | 5185 | return rx_todo; |
5172 | } | 5186 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 2177e56ed0be..d48d5793407d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | |||
1010 | if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && | 1010 | if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && |
1011 | smp->method == IB_MGMT_METHOD_GET) || network_view) { | 1011 | smp->method == IB_MGMT_METHOD_GET) || network_view) { |
1012 | mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", | 1012 | mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", |
1013 | slave, smp->method, smp->mgmt_class, | 1013 | slave, smp->mgmt_class, smp->method, |
1014 | network_view ? "Network" : "Host", | 1014 | network_view ? "Network" : "Host", |
1015 | be16_to_cpu(smp->attr_id)); | 1015 | be16_to_cpu(smp->attr_id)); |
1016 | return -EPERM; | 1016 | return -EPERM; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 6fec3e993d02..cad6c44df91c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
4306 | return -EOPNOTSUPP; | 4306 | return -EOPNOTSUPP; |
4307 | 4307 | ||
4308 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; | 4308 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; |
4309 | ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); | 4309 | err = mlx4_slave_convert_port(dev, slave, ctrl->port); |
4310 | if (ctrl->port <= 0) | 4310 | if (err <= 0) |
4311 | return -EINVAL; | 4311 | return -EINVAL; |
4312 | ctrl->port = err; | ||
4312 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; | 4313 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; |
4313 | err = get_res(dev, slave, qpn, RES_QP, &rqp); | 4314 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
4314 | if (err) { | 4315 | if (err) { |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b159ef8303cc..057665180f13 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) | |||
1326 | /* Get platform resources */ | 1326 | /* Get platform resources */ |
1327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1328 | irq = platform_get_irq(pdev, 0); | 1328 | irq = platform_get_irq(pdev, 0); |
1329 | if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { | 1329 | if (!res || irq < 0) { |
1330 | dev_err(&pdev->dev, "error getting resources.\n"); | 1330 | dev_err(&pdev->dev, "error getting resources.\n"); |
1331 | ret = -ENXIO; | 1331 | ret = -ENXIO; |
1332 | goto err_exit; | 1332 | goto err_exit; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ac17d8669b1a..1292c360390c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -299,6 +299,7 @@ struct qed_hwfn { | |||
299 | 299 | ||
300 | /* Flag indicating whether interrupts are enabled or not*/ | 300 | /* Flag indicating whether interrupts are enabled or not*/ |
301 | bool b_int_enabled; | 301 | bool b_int_enabled; |
302 | bool b_int_requested; | ||
302 | 303 | ||
303 | struct qed_mcp_info *mcp_info; | 304 | struct qed_mcp_info *mcp_info; |
304 | 305 | ||
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, | |||
491 | u32 input_len, u8 *input_buf, | 492 | u32 input_len, u8 *input_buf, |
492 | u32 max_size, u8 *unzip_buf); | 493 | u32 max_size, u8 *unzip_buf); |
493 | 494 | ||
495 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn); | ||
496 | |||
494 | #define QED_ETH_INTERFACE_VERSION 300 | 497 | #define QED_ETH_INTERFACE_VERSION 300 |
495 | 498 | ||
496 | #endif /* _QED_H */ | 499 | #endif /* _QED_H */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 803b190ccada..817bbd5476ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -1385,52 +1385,63 @@ err0: | |||
1385 | return rc; | 1385 | return rc; |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static u32 qed_hw_bar_size(struct qed_dev *cdev, | 1388 | static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, |
1389 | u8 bar_id) | 1389 | u8 bar_id) |
1390 | { | 1390 | { |
1391 | u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); | 1391 | u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE |
1392 | : PGLUE_B_REG_PF_BAR1_SIZE); | ||
1393 | u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); | ||
1392 | 1394 | ||
1393 | return size / cdev->num_hwfns; | 1395 | /* Get the BAR size(in KB) from hardware given val */ |
1396 | return 1 << (val + 15); | ||
1394 | } | 1397 | } |
1395 | 1398 | ||
1396 | int qed_hw_prepare(struct qed_dev *cdev, | 1399 | int qed_hw_prepare(struct qed_dev *cdev, |
1397 | int personality) | 1400 | int personality) |
1398 | { | 1401 | { |
1399 | int rc, i; | 1402 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
1403 | int rc; | ||
1400 | 1404 | ||
1401 | /* Store the precompiled init data ptrs */ | 1405 | /* Store the precompiled init data ptrs */ |
1402 | qed_init_iro_array(cdev); | 1406 | qed_init_iro_array(cdev); |
1403 | 1407 | ||
1404 | /* Initialize the first hwfn - will learn number of hwfns */ | 1408 | /* Initialize the first hwfn - will learn number of hwfns */ |
1405 | rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, | 1409 | rc = qed_hw_prepare_single(p_hwfn, |
1410 | cdev->regview, | ||
1406 | cdev->doorbells, personality); | 1411 | cdev->doorbells, personality); |
1407 | if (rc) | 1412 | if (rc) |
1408 | return rc; | 1413 | return rc; |
1409 | 1414 | ||
1410 | personality = cdev->hwfns[0].hw_info.personality; | 1415 | personality = p_hwfn->hw_info.personality; |
1411 | 1416 | ||
1412 | /* Initialize the rest of the hwfns */ | 1417 | /* Initialize the rest of the hwfns */ |
1413 | for (i = 1; i < cdev->num_hwfns; i++) { | 1418 | if (cdev->num_hwfns > 1) { |
1414 | void __iomem *p_regview, *p_doorbell; | 1419 | void __iomem *p_regview, *p_doorbell; |
1420 | u8 __iomem *addr; | ||
1421 | |||
1422 | /* adjust bar offset for second engine */ | ||
1423 | addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; | ||
1424 | p_regview = addr; | ||
1415 | 1425 | ||
1416 | p_regview = cdev->regview + | 1426 | /* adjust doorbell bar offset for second engine */ |
1417 | i * qed_hw_bar_size(cdev, 0); | 1427 | addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; |
1418 | p_doorbell = cdev->doorbells + | 1428 | p_doorbell = addr; |
1419 | i * qed_hw_bar_size(cdev, 1); | 1429 | |
1420 | rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, | 1430 | /* prepare second hw function */ |
1431 | rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, | ||
1421 | p_doorbell, personality); | 1432 | p_doorbell, personality); |
1433 | |||
1434 | /* in case of error, need to free the previously | ||
1435 | * initiliazed hwfn 0. | ||
1436 | */ | ||
1422 | if (rc) { | 1437 | if (rc) { |
1423 | /* Cleanup previously initialized hwfns */ | 1438 | qed_init_free(p_hwfn); |
1424 | while (--i >= 0) { | 1439 | qed_mcp_free(p_hwfn); |
1425 | qed_init_free(&cdev->hwfns[i]); | 1440 | qed_hw_hwfn_free(p_hwfn); |
1426 | qed_mcp_free(&cdev->hwfns[i]); | ||
1427 | qed_hw_hwfn_free(&cdev->hwfns[i]); | ||
1428 | } | ||
1429 | return rc; | ||
1430 | } | 1441 | } |
1431 | } | 1442 | } |
1432 | 1443 | ||
1433 | return 0; | 1444 | return rc; |
1434 | } | 1445 | } |
1435 | 1446 | ||
1436 | void qed_hw_remove(struct qed_dev *cdev) | 1447 | void qed_hw_remove(struct qed_dev *cdev) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index de50e84902af..9cc9d62c1fec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, | |||
783 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); | 783 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); |
784 | } | 784 | } |
785 | 785 | ||
786 | void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | 786 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
787 | struct qed_ptt *p_ptt, | 787 | enum qed_int_mode int_mode) |
788 | enum qed_int_mode int_mode) | ||
789 | { | 788 | { |
790 | int i; | 789 | int rc, i; |
791 | |||
792 | p_hwfn->b_int_enabled = 1; | ||
793 | 790 | ||
794 | /* Mask non-link attentions */ | 791 | /* Mask non-link attentions */ |
795 | for (i = 0; i < 9; i++) | 792 | for (i = 0; i < 9; i++) |
796 | qed_wr(p_hwfn, p_ptt, | 793 | qed_wr(p_hwfn, p_ptt, |
797 | MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); | 794 | MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); |
798 | 795 | ||
799 | /* Enable interrupt Generation */ | ||
800 | qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); | ||
801 | |||
802 | /* Configure AEU signal change to produce attentions for link */ | 796 | /* Configure AEU signal change to produce attentions for link */ |
803 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); | 797 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); |
804 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); | 798 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); |
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | |||
808 | 802 | ||
809 | /* Unmask AEU signals toward IGU */ | 803 | /* Unmask AEU signals toward IGU */ |
810 | qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); | 804 | qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); |
805 | if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { | ||
806 | rc = qed_slowpath_irq_req(p_hwfn); | ||
807 | if (rc != 0) { | ||
808 | DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | p_hwfn->b_int_requested = true; | ||
812 | } | ||
813 | /* Enable interrupt Generation */ | ||
814 | qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); | ||
815 | p_hwfn->b_int_enabled = 1; | ||
816 | |||
817 | return rc; | ||
811 | } | 818 | } |
812 | 819 | ||
813 | void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, | 820 | void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, |
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, | |||
1127 | 1134 | ||
1128 | return info->igu_sb_cnt; | 1135 | return info->igu_sb_cnt; |
1129 | } | 1136 | } |
1137 | |||
1138 | void qed_int_disable_post_isr_release(struct qed_dev *cdev) | ||
1139 | { | ||
1140 | int i; | ||
1141 | |||
1142 | for_each_hwfn(cdev, i) | ||
1143 | cdev->hwfns[i].b_int_requested = false; | ||
1144 | } | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 16b57518e706..51e0b09a7f47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h | |||
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, | |||
169 | int *p_iov_blks); | 169 | int *p_iov_blks); |
170 | 170 | ||
171 | /** | 171 | /** |
172 | * @file | 172 | * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR |
173 | * release. The API need to be called after releasing all slowpath IRQs | ||
174 | * of the device. | ||
175 | * | ||
176 | * @param cdev | ||
173 | * | 177 | * |
174 | * @brief Interrupt handler | ||
175 | */ | 178 | */ |
179 | void qed_int_disable_post_isr_release(struct qed_dev *cdev); | ||
176 | 180 | ||
177 | #define QED_CAU_DEF_RX_TIMER_RES 0 | 181 | #define QED_CAU_DEF_RX_TIMER_RES 0 |
178 | #define QED_CAU_DEF_TX_TIMER_RES 0 | 182 | #define QED_CAU_DEF_TX_TIMER_RES 0 |
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, | |||
366 | * @param p_hwfn | 370 | * @param p_hwfn |
367 | * @param p_ptt | 371 | * @param p_ptt |
368 | * @param int_mode | 372 | * @param int_mode |
373 | * | ||
374 | * @return int | ||
369 | */ | 375 | */ |
370 | void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | 376 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
371 | struct qed_ptt *p_ptt, | 377 | enum qed_int_mode int_mode); |
372 | enum qed_int_mode int_mode); | ||
373 | 378 | ||
374 | /** | 379 | /** |
375 | * @brief - Initialize CAU status block entry | 380 | * @brief - Initialize CAU status block entry |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 947c7af72b25..174f7341c5c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) | |||
476 | return rc; | 476 | return rc; |
477 | } | 477 | } |
478 | 478 | ||
479 | static int qed_slowpath_irq_req(struct qed_dev *cdev) | 479 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn) |
480 | { | 480 | { |
481 | int i = 0, rc = 0; | 481 | struct qed_dev *cdev = hwfn->cdev; |
482 | int rc = 0; | ||
483 | u8 id; | ||
482 | 484 | ||
483 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | 485 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
484 | /* Request all the slowpath MSI-X vectors */ | 486 | id = hwfn->my_id; |
485 | for (i = 0; i < cdev->num_hwfns; i++) { | 487 | snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", |
486 | snprintf(cdev->hwfns[i].name, NAME_SIZE, | 488 | id, cdev->pdev->bus->number, |
487 | "sp-%d-%02x:%02x.%02x", | 489 | PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); |
488 | i, cdev->pdev->bus->number, | 490 | rc = request_irq(cdev->int_params.msix_table[id].vector, |
489 | PCI_SLOT(cdev->pdev->devfn), | 491 | qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); |
490 | cdev->hwfns[i].abs_pf_id); | 492 | if (!rc) |
491 | 493 | DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), | |
492 | rc = request_irq(cdev->int_params.msix_table[i].vector, | ||
493 | qed_msix_sp_int, 0, | ||
494 | cdev->hwfns[i].name, | ||
495 | cdev->hwfns[i].sp_dpc); | ||
496 | if (rc) | ||
497 | break; | ||
498 | |||
499 | DP_VERBOSE(&cdev->hwfns[i], | ||
500 | (NETIF_MSG_INTR | QED_MSG_SP), | ||
501 | "Requested slowpath MSI-X\n"); | 494 | "Requested slowpath MSI-X\n"); |
502 | } | ||
503 | |||
504 | if (i != cdev->num_hwfns) { | ||
505 | /* Free already request MSI-X vectors */ | ||
506 | for (i--; i >= 0; i--) { | ||
507 | unsigned int vec = | ||
508 | cdev->int_params.msix_table[i].vector; | ||
509 | synchronize_irq(vec); | ||
510 | free_irq(cdev->int_params.msix_table[i].vector, | ||
511 | cdev->hwfns[i].sp_dpc); | ||
512 | } | ||
513 | } | ||
514 | } else { | 495 | } else { |
515 | unsigned long flags = 0; | 496 | unsigned long flags = 0; |
516 | 497 | ||
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) | |||
534 | 515 | ||
535 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | 516 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
536 | for_each_hwfn(cdev, i) { | 517 | for_each_hwfn(cdev, i) { |
518 | if (!cdev->hwfns[i].b_int_requested) | ||
519 | break; | ||
537 | synchronize_irq(cdev->int_params.msix_table[i].vector); | 520 | synchronize_irq(cdev->int_params.msix_table[i].vector); |
538 | free_irq(cdev->int_params.msix_table[i].vector, | 521 | free_irq(cdev->int_params.msix_table[i].vector, |
539 | cdev->hwfns[i].sp_dpc); | 522 | cdev->hwfns[i].sp_dpc); |
540 | } | 523 | } |
541 | } else { | 524 | } else { |
542 | free_irq(cdev->pdev->irq, cdev); | 525 | if (QED_LEADING_HWFN(cdev)->b_int_requested) |
526 | free_irq(cdev->pdev->irq, cdev); | ||
543 | } | 527 | } |
528 | qed_int_disable_post_isr_release(cdev); | ||
544 | } | 529 | } |
545 | 530 | ||
546 | static int qed_nic_stop(struct qed_dev *cdev) | 531 | static int qed_nic_stop(struct qed_dev *cdev) |
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev, | |||
765 | if (rc) | 750 | if (rc) |
766 | goto err1; | 751 | goto err1; |
767 | 752 | ||
768 | /* Request the slowpath IRQ */ | ||
769 | rc = qed_slowpath_irq_req(cdev); | ||
770 | if (rc) | ||
771 | goto err2; | ||
772 | |||
773 | /* Allocate stream for unzipping */ | 753 | /* Allocate stream for unzipping */ |
774 | rc = qed_alloc_stream_mem(cdev); | 754 | rc = qed_alloc_stream_mem(cdev); |
775 | if (rc) { | 755 | if (rc) { |
776 | DP_NOTICE(cdev, "Failed to allocate stream memory\n"); | 756 | DP_NOTICE(cdev, "Failed to allocate stream memory\n"); |
777 | goto err3; | 757 | goto err2; |
778 | } | 758 | } |
779 | 759 | ||
780 | /* Start the slowpath */ | 760 | /* Start the slowpath */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 7a5ce5914ace..e8df12335a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -363,4 +363,8 @@ | |||
363 | 0x7 << 0) | 363 | 0x7 << 0) |
364 | #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ | 364 | #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ |
365 | 0 | 365 | 0 |
366 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | ||
367 | 0x2aae60UL | ||
368 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | ||
369 | 0x2aae64UL | ||
366 | #endif | 370 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 31a1f1eb4f56..287fadfab52d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -124,8 +124,12 @@ struct qed_spq { | |||
124 | dma_addr_t p_phys; | 124 | dma_addr_t p_phys; |
125 | struct qed_spq_entry *p_virt; | 125 | struct qed_spq_entry *p_virt; |
126 | 126 | ||
127 | /* Used as index for completions (returns on EQ by FW) */ | 127 | #define SPQ_RING_SIZE \ |
128 | u16 echo_idx; | 128 | (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) |
129 | |||
130 | /* Bitmap for handling out-of-order completions */ | ||
131 | DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); | ||
132 | u8 comp_bitmap_idx; | ||
129 | 133 | ||
130 | /* Statistics */ | 134 | /* Statistics */ |
131 | u32 unlimited_pending_count; | 135 | u32 unlimited_pending_count; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 7c0b8459666e..3dd548ab8df1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -112,8 +112,6 @@ static int | |||
112 | qed_spq_fill_entry(struct qed_hwfn *p_hwfn, | 112 | qed_spq_fill_entry(struct qed_hwfn *p_hwfn, |
113 | struct qed_spq_entry *p_ent) | 113 | struct qed_spq_entry *p_ent) |
114 | { | 114 | { |
115 | p_ent->elem.hdr.echo = 0; | ||
116 | p_hwfn->p_spq->echo_idx++; | ||
117 | p_ent->flags = 0; | 115 | p_ent->flags = 0; |
118 | 116 | ||
119 | switch (p_ent->comp_mode) { | 117 | switch (p_ent->comp_mode) { |
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, | |||
195 | struct qed_spq *p_spq, | 193 | struct qed_spq *p_spq, |
196 | struct qed_spq_entry *p_ent) | 194 | struct qed_spq_entry *p_ent) |
197 | { | 195 | { |
198 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; | 196 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; |
197 | u16 echo = qed_chain_get_prod_idx(p_chain); | ||
199 | struct slow_path_element *elem; | 198 | struct slow_path_element *elem; |
200 | struct core_db_data db; | 199 | struct core_db_data db; |
201 | 200 | ||
201 | p_ent->elem.hdr.echo = cpu_to_le16(echo); | ||
202 | elem = qed_chain_produce(p_chain); | 202 | elem = qed_chain_produce(p_chain); |
203 | if (!elem) { | 203 | if (!elem) { |
204 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); | 204 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); |
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) | |||
437 | p_spq->comp_count = 0; | 437 | p_spq->comp_count = 0; |
438 | p_spq->comp_sent_count = 0; | 438 | p_spq->comp_sent_count = 0; |
439 | p_spq->unlimited_pending_count = 0; | 439 | p_spq->unlimited_pending_count = 0; |
440 | p_spq->echo_idx = 0; | 440 | |
441 | bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); | ||
442 | p_spq->comp_bitmap_idx = 0; | ||
441 | 443 | ||
442 | /* SPQ cid, cannot fail */ | 444 | /* SPQ cid, cannot fail */ |
443 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); | 445 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); |
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, | |||
582 | struct qed_spq *p_spq = p_hwfn->p_spq; | 584 | struct qed_spq *p_spq = p_hwfn->p_spq; |
583 | 585 | ||
584 | if (p_ent->queue == &p_spq->unlimited_pending) { | 586 | if (p_ent->queue == &p_spq->unlimited_pending) { |
585 | struct qed_spq_entry *p_en2; | ||
586 | 587 | ||
587 | if (list_empty(&p_spq->free_pool)) { | 588 | if (list_empty(&p_spq->free_pool)) { |
588 | list_add_tail(&p_ent->list, &p_spq->unlimited_pending); | 589 | list_add_tail(&p_ent->list, &p_spq->unlimited_pending); |
589 | p_spq->unlimited_pending_count++; | 590 | p_spq->unlimited_pending_count++; |
590 | 591 | ||
591 | return 0; | 592 | return 0; |
592 | } | 593 | } else { |
594 | struct qed_spq_entry *p_en2; | ||
593 | 595 | ||
594 | p_en2 = list_first_entry(&p_spq->free_pool, | 596 | p_en2 = list_first_entry(&p_spq->free_pool, |
595 | struct qed_spq_entry, | 597 | struct qed_spq_entry, |
596 | list); | 598 | list); |
597 | list_del(&p_en2->list); | 599 | list_del(&p_en2->list); |
600 | |||
601 | /* Copy the ring element physical pointer to the new | ||
602 | * entry, since we are about to override the entire ring | ||
603 | * entry and don't want to lose the pointer. | ||
604 | */ | ||
605 | p_ent->elem.data_ptr = p_en2->elem.data_ptr; | ||
598 | 606 | ||
599 | /* Strcut assignment */ | 607 | *p_en2 = *p_ent; |
600 | *p_en2 = *p_ent; | ||
601 | 608 | ||
602 | kfree(p_ent); | 609 | kfree(p_ent); |
603 | 610 | ||
604 | p_ent = p_en2; | 611 | p_ent = p_en2; |
612 | } | ||
605 | } | 613 | } |
606 | 614 | ||
607 | /* entry is to be placed in 'pending' queue */ | 615 | /* entry is to be placed in 'pending' queue */ |
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
777 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, | 785 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, |
778 | list) { | 786 | list) { |
779 | if (p_ent->elem.hdr.echo == echo) { | 787 | if (p_ent->elem.hdr.echo == echo) { |
788 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; | ||
789 | |||
780 | list_del(&p_ent->list); | 790 | list_del(&p_ent->list); |
781 | 791 | ||
782 | qed_chain_return_produced(&p_spq->chain); | 792 | /* Avoid overriding of SPQ entries when getting |
793 | * out-of-order completions, by marking the completions | ||
794 | * in a bitmap and increasing the chain consumer only | ||
795 | * for the first successive completed entries. | ||
796 | */ | ||
797 | bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); | ||
798 | |||
799 | while (test_bit(p_spq->comp_bitmap_idx, | ||
800 | p_spq->p_comp_bitmap)) { | ||
801 | bitmap_clear(p_spq->p_comp_bitmap, | ||
802 | p_spq->comp_bitmap_idx, | ||
803 | SPQ_RING_SIZE); | ||
804 | p_spq->comp_bitmap_idx++; | ||
805 | qed_chain_return_produced(&p_spq->chain); | ||
806 | } | ||
807 | |||
783 | p_spq->comp_count++; | 808 | p_spq->comp_count++; |
784 | found = p_ent; | 809 | found = p_ent; |
785 | break; | 810 | break; |
786 | } | 811 | } |
812 | |||
813 | /* This is relatively uncommon - depends on scenarios | ||
814 | * which have mutliple per-PF sent ramrods. | ||
815 | */ | ||
816 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | ||
817 | "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", | ||
818 | le16_to_cpu(echo), | ||
819 | le16_to_cpu(p_ent->elem.hdr.echo)); | ||
787 | } | 820 | } |
788 | 821 | ||
789 | /* Release lock before callback, as callback may post | 822 | /* Release lock before callback, as callback may post |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index be7d7a62cc0d..b1a452f291ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | |||
@@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) | |||
246 | u32 state; | 246 | u32 state; |
247 | 247 | ||
248 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); | 248 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); |
249 | while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { | 249 | while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { |
250 | idc->vnic_wait_limit--; | ||
250 | msleep(1000); | 251 | msleep(1000); |
251 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); | 252 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); |
252 | } | 253 | } |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 02b7115b6aaa..997976426799 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) | |||
4211 | 4211 | ||
4212 | /* Wait for an outstanding reset to complete. */ | 4212 | /* Wait for an outstanding reset to complete. */ |
4213 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4213 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { |
4214 | int i = 3; | 4214 | int i = 4; |
4215 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4215 | |
4216 | while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | ||
4216 | netif_err(qdev, ifup, qdev->ndev, | 4217 | netif_err(qdev, ifup, qdev->ndev, |
4217 | "Waiting for adapter UP...\n"); | 4218 | "Waiting for adapter UP...\n"); |
4218 | ssleep(1); | 4219 | ssleep(1); |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ddb2c6c6ec94..689a4a5c8dcf 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) | |||
736 | netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", | 736 | netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", |
737 | jiffies, jiffies - dev->trans_start); | 737 | jiffies, jiffies - dev->trans_start); |
738 | qca->net_dev->stats.tx_errors++; | 738 | qca->net_dev->stats.tx_errors++; |
739 | /* wake the queue if there is room */ | 739 | /* Trigger tx queue flush and QCA7000 reset */ |
740 | if (qcaspi_tx_ring_has_space(&qca->txr)) | 740 | qca->sync = QCASPI_SYNC_UNKNOWN; |
741 | netif_wake_queue(dev); | ||
742 | } | 741 | } |
743 | 742 | ||
744 | static int | 743 | static int |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ee8d1ec61fab..467d41698fd5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev) | |||
905 | netdev_info(ndev, "limited PHY to 100Mbit/s\n"); | 905 | netdev_info(ndev, "limited PHY to 100Mbit/s\n"); |
906 | } | 906 | } |
907 | 907 | ||
908 | /* 10BASE is not supported */ | ||
909 | phydev->supported &= ~PHY_10BT_FEATURES; | ||
910 | |||
908 | netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", | 911 | netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", |
909 | phydev->addr, phydev->irq, phydev->drv->name); | 912 | phydev->addr, phydev->irq, phydev->drv->name); |
910 | 913 | ||
@@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { | |||
1037 | "rx_queue_1_mcast_packets", | 1040 | "rx_queue_1_mcast_packets", |
1038 | "rx_queue_1_errors", | 1041 | "rx_queue_1_errors", |
1039 | "rx_queue_1_crc_errors", | 1042 | "rx_queue_1_crc_errors", |
1040 | "rx_queue_1_frame_errors_", | 1043 | "rx_queue_1_frame_errors", |
1041 | "rx_queue_1_length_errors", | 1044 | "rx_queue_1_length_errors", |
1042 | "rx_queue_1_missed_errors", | 1045 | "rx_queue_1_missed_errors", |
1043 | "rx_queue_1_over_errors", | 1046 | "rx_queue_1_over_errors", |
@@ -1225,7 +1228,7 @@ static int ravb_open(struct net_device *ndev) | |||
1225 | /* Device init */ | 1228 | /* Device init */ |
1226 | error = ravb_dmac_init(ndev); | 1229 | error = ravb_dmac_init(ndev); |
1227 | if (error) | 1230 | if (error) |
1228 | goto out_free_irq; | 1231 | goto out_free_irq2; |
1229 | ravb_emac_init(ndev); | 1232 | ravb_emac_init(ndev); |
1230 | 1233 | ||
1231 | /* Initialise PTP Clock driver */ | 1234 | /* Initialise PTP Clock driver */ |
@@ -1243,9 +1246,11 @@ static int ravb_open(struct net_device *ndev) | |||
1243 | out_ptp_stop: | 1246 | out_ptp_stop: |
1244 | /* Stop PTP Clock driver */ | 1247 | /* Stop PTP Clock driver */ |
1245 | ravb_ptp_stop(ndev); | 1248 | ravb_ptp_stop(ndev); |
1249 | out_free_irq2: | ||
1250 | if (priv->chip_id == RCAR_GEN3) | ||
1251 | free_irq(priv->emac_irq, ndev); | ||
1246 | out_free_irq: | 1252 | out_free_irq: |
1247 | free_irq(ndev->irq, ndev); | 1253 | free_irq(ndev->irq, ndev); |
1248 | free_irq(priv->emac_irq, ndev); | ||
1249 | out_napi_off: | 1254 | out_napi_off: |
1250 | napi_disable(&priv->napi[RAVB_NC]); | 1255 | napi_disable(&priv->napi[RAVB_NC]); |
1251 | napi_disable(&priv->napi[RAVB_BE]); | 1256 | napi_disable(&priv->napi[RAVB_BE]); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e7bab7909ed9..a0eaf50499a2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -52,6 +52,8 @@ | |||
52 | NETIF_MSG_RX_ERR| \ | 52 | NETIF_MSG_RX_ERR| \ |
53 | NETIF_MSG_TX_ERR) | 53 | NETIF_MSG_TX_ERR) |
54 | 54 | ||
55 | #define SH_ETH_OFFSET_INVALID ((u16)~0) | ||
56 | |||
55 | #define SH_ETH_OFFSET_DEFAULTS \ | 57 | #define SH_ETH_OFFSET_DEFAULTS \ |
56 | [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID | 58 | [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID |
57 | 59 | ||
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
404 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | 406 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); |
405 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | 407 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); |
406 | 408 | ||
409 | static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) | ||
410 | { | ||
411 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
412 | u16 offset = mdp->reg_offset[enum_index]; | ||
413 | |||
414 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
415 | return; | ||
416 | |||
417 | iowrite32(data, mdp->addr + offset); | ||
418 | } | ||
419 | |||
420 | static u32 sh_eth_read(struct net_device *ndev, int enum_index) | ||
421 | { | ||
422 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
423 | u16 offset = mdp->reg_offset[enum_index]; | ||
424 | |||
425 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
426 | return ~0U; | ||
427 | |||
428 | return ioread32(mdp->addr + offset); | ||
429 | } | ||
430 | |||
407 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 431 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
408 | { | 432 | { |
409 | return mdp->reg_offset == sh_eth_offset_gigabit; | 433 | return mdp->reg_offset == sh_eth_offset_gigabit; |
@@ -1172,7 +1196,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1172 | break; | 1196 | break; |
1173 | } | 1197 | } |
1174 | mdp->rx_skbuff[i] = skb; | 1198 | mdp->rx_skbuff[i] = skb; |
1175 | rxdesc->addr = dma_addr; | 1199 | rxdesc->addr = cpu_to_edmac(mdp, dma_addr); |
1176 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1200 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1177 | 1201 | ||
1178 | /* Rx descriptor address set */ | 1202 | /* Rx descriptor address set */ |
@@ -1403,7 +1427,8 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
1403 | entry, edmac_to_cpu(mdp, txdesc->status)); | 1427 | entry, edmac_to_cpu(mdp, txdesc->status)); |
1404 | /* Free the original skb. */ | 1428 | /* Free the original skb. */ |
1405 | if (mdp->tx_skbuff[entry]) { | 1429 | if (mdp->tx_skbuff[entry]) { |
1406 | dma_unmap_single(&ndev->dev, txdesc->addr, | 1430 | dma_unmap_single(&ndev->dev, |
1431 | edmac_to_cpu(mdp, txdesc->addr), | ||
1407 | txdesc->buffer_length, DMA_TO_DEVICE); | 1432 | txdesc->buffer_length, DMA_TO_DEVICE); |
1408 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); | 1433 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); |
1409 | mdp->tx_skbuff[entry] = NULL; | 1434 | mdp->tx_skbuff[entry] = NULL; |
@@ -1462,6 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1462 | if (mdp->cd->shift_rd0) | 1487 | if (mdp->cd->shift_rd0) |
1463 | desc_status >>= 16; | 1488 | desc_status >>= 16; |
1464 | 1489 | ||
1490 | skb = mdp->rx_skbuff[entry]; | ||
1465 | if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | | 1491 | if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | |
1466 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { | 1492 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { |
1467 | ndev->stats.rx_errors++; | 1493 | ndev->stats.rx_errors++; |
@@ -1477,16 +1503,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1477 | ndev->stats.rx_missed_errors++; | 1503 | ndev->stats.rx_missed_errors++; |
1478 | if (desc_status & RD_RFS10) | 1504 | if (desc_status & RD_RFS10) |
1479 | ndev->stats.rx_over_errors++; | 1505 | ndev->stats.rx_over_errors++; |
1480 | } else { | 1506 | } else if (skb) { |
1507 | dma_addr = edmac_to_cpu(mdp, rxdesc->addr); | ||
1481 | if (!mdp->cd->hw_swap) | 1508 | if (!mdp->cd->hw_swap) |
1482 | sh_eth_soft_swap( | 1509 | sh_eth_soft_swap( |
1483 | phys_to_virt(ALIGN(rxdesc->addr, 4)), | 1510 | phys_to_virt(ALIGN(dma_addr, 4)), |
1484 | pkt_len + 2); | 1511 | pkt_len + 2); |
1485 | skb = mdp->rx_skbuff[entry]; | ||
1486 | mdp->rx_skbuff[entry] = NULL; | 1512 | mdp->rx_skbuff[entry] = NULL; |
1487 | if (mdp->cd->rpadir) | 1513 | if (mdp->cd->rpadir) |
1488 | skb_reserve(skb, NET_IP_ALIGN); | 1514 | skb_reserve(skb, NET_IP_ALIGN); |
1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, | 1515 | dma_unmap_single(&ndev->dev, dma_addr, |
1490 | ALIGN(mdp->rx_buf_sz, 32), | 1516 | ALIGN(mdp->rx_buf_sz, 32), |
1491 | DMA_FROM_DEVICE); | 1517 | DMA_FROM_DEVICE); |
1492 | skb_put(skb, pkt_len); | 1518 | skb_put(skb, pkt_len); |
@@ -1523,7 +1549,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1523 | mdp->rx_skbuff[entry] = skb; | 1549 | mdp->rx_skbuff[entry] = skb; |
1524 | 1550 | ||
1525 | skb_checksum_none_assert(skb); | 1551 | skb_checksum_none_assert(skb); |
1526 | rxdesc->addr = dma_addr; | 1552 | rxdesc->addr = cpu_to_edmac(mdp, dma_addr); |
1527 | } | 1553 | } |
1528 | dma_wmb(); /* RACT bit must be set after all the above writes */ | 1554 | dma_wmb(); /* RACT bit must be set after all the above writes */ |
1529 | if (entry >= mdp->num_rx_ring - 1) | 1555 | if (entry >= mdp->num_rx_ring - 1) |
@@ -2331,8 +2357,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) | |||
2331 | /* Free all the skbuffs in the Rx queue. */ | 2357 | /* Free all the skbuffs in the Rx queue. */ |
2332 | for (i = 0; i < mdp->num_rx_ring; i++) { | 2358 | for (i = 0; i < mdp->num_rx_ring; i++) { |
2333 | rxdesc = &mdp->rx_ring[i]; | 2359 | rxdesc = &mdp->rx_ring[i]; |
2334 | rxdesc->status = 0; | 2360 | rxdesc->status = cpu_to_edmac(mdp, 0); |
2335 | rxdesc->addr = 0xBADF00D0; | 2361 | rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); |
2336 | dev_kfree_skb(mdp->rx_skbuff[i]); | 2362 | dev_kfree_skb(mdp->rx_skbuff[i]); |
2337 | mdp->rx_skbuff[i] = NULL; | 2363 | mdp->rx_skbuff[i] = NULL; |
2338 | } | 2364 | } |
@@ -2350,6 +2376,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2350 | { | 2376 | { |
2351 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2377 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2352 | struct sh_eth_txdesc *txdesc; | 2378 | struct sh_eth_txdesc *txdesc; |
2379 | dma_addr_t dma_addr; | ||
2353 | u32 entry; | 2380 | u32 entry; |
2354 | unsigned long flags; | 2381 | unsigned long flags; |
2355 | 2382 | ||
@@ -2372,14 +2399,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2372 | txdesc = &mdp->tx_ring[entry]; | 2399 | txdesc = &mdp->tx_ring[entry]; |
2373 | /* soft swap. */ | 2400 | /* soft swap. */ |
2374 | if (!mdp->cd->hw_swap) | 2401 | if (!mdp->cd->hw_swap) |
2375 | sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), | 2402 | sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); |
2376 | skb->len + 2); | 2403 | dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
2377 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2404 | DMA_TO_DEVICE); |
2378 | DMA_TO_DEVICE); | 2405 | if (dma_mapping_error(&ndev->dev, dma_addr)) { |
2379 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { | ||
2380 | kfree_skb(skb); | 2406 | kfree_skb(skb); |
2381 | return NETDEV_TX_OK; | 2407 | return NETDEV_TX_OK; |
2382 | } | 2408 | } |
2409 | txdesc->addr = cpu_to_edmac(mdp, dma_addr); | ||
2383 | txdesc->buffer_length = skb->len; | 2410 | txdesc->buffer_length = skb->len; |
2384 | 2411 | ||
2385 | dma_wmb(); /* TACT bit must be set after all the above writes */ | 2412 | dma_wmb(); /* TACT bit must be set after all the above writes */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 50382b1c9ddc..26ad1cf0bcf1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len) | |||
546 | #endif | 546 | #endif |
547 | } | 547 | } |
548 | 548 | ||
549 | #define SH_ETH_OFFSET_INVALID ((u16) ~0) | ||
550 | |||
551 | static inline void sh_eth_write(struct net_device *ndev, u32 data, | ||
552 | int enum_index) | ||
553 | { | ||
554 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
555 | u16 offset = mdp->reg_offset[enum_index]; | ||
556 | |||
557 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
558 | return; | ||
559 | |||
560 | iowrite32(data, mdp->addr + offset); | ||
561 | } | ||
562 | |||
563 | static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) | ||
564 | { | ||
565 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
566 | u16 offset = mdp->reg_offset[enum_index]; | ||
567 | |||
568 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
569 | return ~0U; | ||
570 | |||
571 | return ioread32(mdp->addr + offset); | ||
572 | } | ||
573 | |||
574 | static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, | 549 | static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, |
575 | int enum_index) | 550 | int enum_index) |
576 | { | 551 | { |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index bc6d21b471be..e6a084a6be12 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |||
3299 | 3299 | ||
3300 | new_spec.priority = EFX_FILTER_PRI_AUTO; | 3300 | new_spec.priority = EFX_FILTER_PRI_AUTO; |
3301 | new_spec.flags = (EFX_FILTER_FLAG_RX | | 3301 | new_spec.flags = (EFX_FILTER_FLAG_RX | |
3302 | EFX_FILTER_FLAG_RX_RSS); | 3302 | (efx_rss_enabled(efx) ? |
3303 | EFX_FILTER_FLAG_RX_RSS : 0)); | ||
3303 | new_spec.dmaq_id = 0; | 3304 | new_spec.dmaq_id = 0; |
3304 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; | 3305 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; |
3305 | rc = efx_ef10_filter_push(efx, &new_spec, | 3306 | rc = efx_ef10_filter_push(efx, &new_spec, |
@@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
3921 | { | 3922 | { |
3922 | struct efx_ef10_filter_table *table = efx->filter_state; | 3923 | struct efx_ef10_filter_table *table = efx->filter_state; |
3923 | struct efx_ef10_dev_addr *addr_list; | 3924 | struct efx_ef10_dev_addr *addr_list; |
3925 | enum efx_filter_flags filter_flags; | ||
3924 | struct efx_filter_spec spec; | 3926 | struct efx_filter_spec spec; |
3925 | u8 baddr[ETH_ALEN]; | 3927 | u8 baddr[ETH_ALEN]; |
3926 | unsigned int i, j; | 3928 | unsigned int i, j; |
@@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
3935 | addr_count = table->dev_uc_count; | 3937 | addr_count = table->dev_uc_count; |
3936 | } | 3938 | } |
3937 | 3939 | ||
3940 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | ||
3941 | |||
3938 | /* Insert/renew filters */ | 3942 | /* Insert/renew filters */ |
3939 | for (i = 0; i < addr_count; i++) { | 3943 | for (i = 0; i < addr_count; i++) { |
3940 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 3944 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
3941 | EFX_FILTER_FLAG_RX_RSS, | ||
3942 | 0); | ||
3943 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | 3945 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
3944 | addr_list[i].addr); | 3946 | addr_list[i].addr); |
3945 | rc = efx_ef10_filter_insert(efx, &spec, true); | 3947 | rc = efx_ef10_filter_insert(efx, &spec, true); |
@@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
3968 | 3970 | ||
3969 | if (multicast && rollback) { | 3971 | if (multicast && rollback) { |
3970 | /* Also need an Ethernet broadcast filter */ | 3972 | /* Also need an Ethernet broadcast filter */ |
3971 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 3973 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
3972 | EFX_FILTER_FLAG_RX_RSS, | ||
3973 | 0); | ||
3974 | eth_broadcast_addr(baddr); | 3974 | eth_broadcast_addr(baddr); |
3975 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); | 3975 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); |
3976 | rc = efx_ef10_filter_insert(efx, &spec, true); | 3976 | rc = efx_ef10_filter_insert(efx, &spec, true); |
@@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, | |||
4000 | { | 4000 | { |
4001 | struct efx_ef10_filter_table *table = efx->filter_state; | 4001 | struct efx_ef10_filter_table *table = efx->filter_state; |
4002 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 4002 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
4003 | enum efx_filter_flags filter_flags; | ||
4003 | struct efx_filter_spec spec; | 4004 | struct efx_filter_spec spec; |
4004 | u8 baddr[ETH_ALEN]; | 4005 | u8 baddr[ETH_ALEN]; |
4005 | int rc; | 4006 | int rc; |
4006 | 4007 | ||
4007 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 4008 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
4008 | EFX_FILTER_FLAG_RX_RSS, | 4009 | |
4009 | 0); | 4010 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
4010 | 4011 | ||
4011 | if (multicast) | 4012 | if (multicast) |
4012 | efx_filter_set_mc_def(&spec); | 4013 | efx_filter_set_mc_def(&spec); |
@@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, | |||
4023 | if (!nic_data->workaround_26807) { | 4024 | if (!nic_data->workaround_26807) { |
4024 | /* Also need an Ethernet broadcast filter */ | 4025 | /* Also need an Ethernet broadcast filter */ |
4025 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 4026 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, |
4026 | EFX_FILTER_FLAG_RX_RSS, | 4027 | filter_flags, 0); |
4027 | 0); | ||
4028 | eth_broadcast_addr(baddr); | 4028 | eth_broadcast_addr(baddr); |
4029 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | 4029 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
4030 | baddr); | 4030 | baddr); |
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 1aaf76c1ace8..10827476bc0b 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h | |||
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); | |||
76 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ | 76 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ |
77 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) | 77 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) |
78 | 78 | ||
79 | static inline bool efx_rss_enabled(struct efx_nic *efx) | ||
80 | { | ||
81 | return efx->rss_spread > 1; | ||
82 | } | ||
83 | |||
79 | /* Filters */ | 84 | /* Filters */ |
80 | 85 | ||
81 | void efx_mac_reconfigure(struct efx_nic *efx); | 86 | void efx_mac_reconfigure(struct efx_nic *efx); |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 5a1c5a8f278a..133e9e35be9e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx, | |||
2242 | */ | 2242 | */ |
2243 | spec->priority = EFX_FILTER_PRI_AUTO; | 2243 | spec->priority = EFX_FILTER_PRI_AUTO; |
2244 | spec->flags = (EFX_FILTER_FLAG_RX | | 2244 | spec->flags = (EFX_FILTER_FLAG_RX | |
2245 | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | | 2245 | (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | |
2246 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); | 2246 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); |
2247 | spec->dmaq_id = 0; | 2247 | spec->dmaq_id = 0; |
2248 | } | 2248 | } |
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c index 3d5ee3259885..194f67d9f3bf 100644 --- a/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/txc43128_phy.c | |||
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) | |||
418 | 418 | ||
419 | val |= (1 << TXC_GLCMD_LMTSWRST_LBN); | 419 | val |= (1 << TXC_GLCMD_LMTSWRST_LBN); |
420 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); | 420 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); |
421 | while (tries--) { | 421 | while (--tries) { |
422 | val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); | 422 | val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); |
423 | if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) | 423 | if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) |
424 | break; | 424 | break; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 7f6f4a4fcc70..58c05acc2aab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | |||
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, | |||
299 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { | 299 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { |
300 | const char *rs; | 300 | const char *rs; |
301 | 301 | ||
302 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | ||
303 | |||
302 | err = of_property_read_string(np, "st,tx-retime-src", &rs); | 304 | err = of_property_read_string(np, "st,tx-retime-src", &rs); |
303 | if (err < 0) { | 305 | if (err < 0) { |
304 | dev_warn(dev, "Use internal clock source\n"); | 306 | dev_warn(dev, "Use internal clock source\n"); |
305 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | 307 | } else { |
306 | } else if (!strcasecmp(rs, "clk_125")) { | 308 | if (!strcasecmp(rs, "clk_125")) |
307 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; | 309 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; |
308 | } else if (!strcasecmp(rs, "txclk")) { | 310 | else if (!strcasecmp(rs, "txclk")) |
309 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; | 311 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; |
310 | } | 312 | } |
311 | |||
312 | dwmac->speed = SPEED_1000; | 313 | dwmac->speed = SPEED_1000; |
313 | } | 314 | } |
314 | 315 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 52b8ed9bd87c..adff46375a32 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | |||
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) | |||
153 | if (ret) | 153 | if (ret) |
154 | return ret; | 154 | return ret; |
155 | 155 | ||
156 | return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); | 156 | ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); |
157 | if (ret) | ||
158 | sun7i_gmac_exit(pdev, plat_dat->bsp_priv); | ||
159 | |||
160 | return ret; | ||
157 | } | 161 | } |
158 | 162 | ||
159 | static const struct of_device_id sun7i_dwmac_match[] = { | 163 | static const struct of_device_id sun7i_dwmac_match[] = { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 64d8aa4e0cad..a5b869eb4678 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) | |||
185 | priv->clk_csr = STMMAC_CSR_100_150M; | 185 | priv->clk_csr = STMMAC_CSR_100_150M; |
186 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) | 186 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) |
187 | priv->clk_csr = STMMAC_CSR_150_250M; | 187 | priv->clk_csr = STMMAC_CSR_150_250M; |
188 | else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) | 188 | else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) |
189 | priv->clk_csr = STMMAC_CSR_250_300M; | 189 | priv->clk_csr = STMMAC_CSR_250_300M; |
190 | } | 190 | } |
191 | } | 191 | } |
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2232 | 2232 | ||
2233 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); | 2233 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); |
2234 | 2234 | ||
2235 | /* check if frame_len fits the preallocated memory */ | ||
2236 | if (frame_len > priv->dma_buf_sz) { | ||
2237 | priv->dev->stats.rx_length_errors++; | ||
2238 | break; | ||
2239 | } | ||
2240 | |||
2235 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 2241 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
2236 | * Type frames (LLC/LLC-SNAP) | 2242 | * Type frames (LLC/LLC-SNAP) |
2237 | */ | 2243 | */ |
@@ -3040,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev) | |||
3040 | priv->hw->dma->stop_tx(priv->ioaddr); | 3046 | priv->hw->dma->stop_tx(priv->ioaddr); |
3041 | priv->hw->dma->stop_rx(priv->ioaddr); | 3047 | priv->hw->dma->stop_rx(priv->ioaddr); |
3042 | 3048 | ||
3043 | stmmac_clear_descriptors(priv); | ||
3044 | |||
3045 | /* Enable Power down mode by programming the PMT regs */ | 3049 | /* Enable Power down mode by programming the PMT regs */ |
3046 | if (device_may_wakeup(priv->device)) { | 3050 | if (device_may_wakeup(priv->device)) { |
3047 | priv->hw->mac->pmt(priv->hw, priv->wolopts); | 3051 | priv->hw->mac->pmt(priv->hw, priv->wolopts); |
@@ -3099,9 +3103,15 @@ int stmmac_resume(struct net_device *ndev) | |||
3099 | 3103 | ||
3100 | netif_device_attach(ndev); | 3104 | netif_device_attach(ndev); |
3101 | 3105 | ||
3102 | init_dma_desc_rings(ndev, GFP_ATOMIC); | 3106 | priv->cur_rx = 0; |
3107 | priv->dirty_rx = 0; | ||
3108 | priv->dirty_tx = 0; | ||
3109 | priv->cur_tx = 0; | ||
3110 | stmmac_clear_descriptors(priv); | ||
3111 | |||
3103 | stmmac_hw_setup(ndev, false); | 3112 | stmmac_hw_setup(ndev, false); |
3104 | stmmac_init_tx_coalesce(priv); | 3113 | stmmac_init_tx_coalesce(priv); |
3114 | stmmac_set_rx_mode(ndev); | ||
3105 | 3115 | ||
3106 | napi_enable(&priv->napi); | 3116 | napi_enable(&priv->napi); |
3107 | 3117 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ebf6abc4853f..bba670c42e37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus) | |||
138 | 138 | ||
139 | #ifdef CONFIG_OF | 139 | #ifdef CONFIG_OF |
140 | if (priv->device->of_node) { | 140 | if (priv->device->of_node) { |
141 | int reset_gpio, active_low; | ||
142 | 141 | ||
143 | if (data->reset_gpio < 0) { | 142 | if (data->reset_gpio < 0) { |
144 | struct device_node *np = priv->device->of_node; | 143 | struct device_node *np = priv->device->of_node; |
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus) | |||
154 | "snps,reset-active-low"); | 153 | "snps,reset-active-low"); |
155 | of_property_read_u32_array(np, | 154 | of_property_read_u32_array(np, |
156 | "snps,reset-delays-us", data->delays, 3); | 155 | "snps,reset-delays-us", data->delays, 3); |
157 | } | ||
158 | 156 | ||
159 | reset_gpio = data->reset_gpio; | 157 | if (gpio_request(data->reset_gpio, "mdio-reset")) |
160 | active_low = data->active_low; | 158 | return 0; |
159 | } | ||
161 | 160 | ||
162 | if (!gpio_request(reset_gpio, "mdio-reset")) { | 161 | gpio_direction_output(data->reset_gpio, |
163 | gpio_direction_output(reset_gpio, active_low ? 1 : 0); | 162 | data->active_low ? 1 : 0); |
164 | if (data->delays[0]) | 163 | if (data->delays[0]) |
165 | msleep(DIV_ROUND_UP(data->delays[0], 1000)); | 164 | msleep(DIV_ROUND_UP(data->delays[0], 1000)); |
166 | 165 | ||
167 | gpio_set_value(reset_gpio, active_low ? 0 : 1); | 166 | gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1); |
168 | if (data->delays[1]) | 167 | if (data->delays[1]) |
169 | msleep(DIV_ROUND_UP(data->delays[1], 1000)); | 168 | msleep(DIV_ROUND_UP(data->delays[1], 1000)); |
170 | 169 | ||
171 | gpio_set_value(reset_gpio, active_low ? 1 : 0); | 170 | gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0); |
172 | if (data->delays[2]) | 171 | if (data->delays[2]) |
173 | msleep(DIV_ROUND_UP(data->delays[2], 1000)); | 172 | msleep(DIV_ROUND_UP(data->delays[2], 1000)); |
174 | } | ||
175 | } | 173 | } |
176 | #endif | 174 | #endif |
177 | 175 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index c08be62bceba..1562ab4151e1 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c | |||
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, | |||
78 | 78 | ||
79 | int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) | 79 | int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) |
80 | { | 80 | { |
81 | if (of_machine_is_compatible("ti,dm8148")) | ||
82 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); | ||
83 | |||
81 | if (of_machine_is_compatible("ti,am33xx")) | 84 | if (of_machine_is_compatible("ti,am33xx")) |
82 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); | 85 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); |
83 | 86 | ||