diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
| -rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 91 |
1 files changed, 48 insertions, 43 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 078ad0ec8593..8543e1cfd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; | |||
| 92 | module_param(tc, int, S_IRUGO | S_IWUSR); | 92 | module_param(tc, int, S_IRUGO | S_IWUSR); |
| 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); |
| 94 | 94 | ||
| 95 | #define DMA_BUFFER_SIZE BUF_SIZE_4KiB | 95 | #define DEFAULT_BUFSIZE 1536 |
| 96 | static int buf_sz = DMA_BUFFER_SIZE; | 96 | static int buf_sz = DEFAULT_BUFSIZE; |
| 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); | 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
| 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); |
| 99 | 99 | ||
| @@ -136,8 +136,8 @@ static void stmmac_verify_args(void) | |||
| 136 | dma_rxsize = DMA_RX_SIZE; | 136 | dma_rxsize = DMA_RX_SIZE; |
| 137 | if (unlikely(dma_txsize < 0)) | 137 | if (unlikely(dma_txsize < 0)) |
| 138 | dma_txsize = DMA_TX_SIZE; | 138 | dma_txsize = DMA_TX_SIZE; |
| 139 | if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) | 139 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
| 140 | buf_sz = DMA_BUFFER_SIZE; | 140 | buf_sz = DEFAULT_BUFSIZE; |
| 141 | if (unlikely(flow_ctrl > 1)) | 141 | if (unlikely(flow_ctrl > 1)) |
| 142 | flow_ctrl = FLOW_AUTO; | 142 | flow_ctrl = FLOW_AUTO; |
| 143 | else if (likely(flow_ctrl < 0)) | 143 | else if (likely(flow_ctrl < 0)) |
| @@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 286 | 286 | ||
| 287 | /* MAC core supports the EEE feature. */ | 287 | /* MAC core supports the EEE feature. */ |
| 288 | if (priv->dma_cap.eee) { | 288 | if (priv->dma_cap.eee) { |
| 289 | int tx_lpi_timer = priv->tx_lpi_timer; | ||
| 290 | |||
| 289 | /* Check if the PHY supports EEE */ | 291 | /* Check if the PHY supports EEE */ |
| 290 | if (phy_init_eee(priv->phydev, 1)) | 292 | if (phy_init_eee(priv->phydev, 1)) { |
| 293 | /* To manage at run-time if the EEE cannot be supported | ||
| 294 | * anymore (for example because the lp caps have been | ||
| 295 | * changed). | ||
| 296 | * In that case the driver disable own timers. | ||
| 297 | */ | ||
| 298 | if (priv->eee_active) { | ||
| 299 | pr_debug("stmmac: disable EEE\n"); | ||
| 300 | del_timer_sync(&priv->eee_ctrl_timer); | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, 0, | ||
| 302 | tx_lpi_timer); | ||
| 303 | } | ||
| 304 | priv->eee_active = 0; | ||
| 291 | goto out; | 305 | goto out; |
| 292 | 306 | } | |
| 307 | /* Activate the EEE and start timers */ | ||
| 293 | if (!priv->eee_active) { | 308 | if (!priv->eee_active) { |
| 294 | priv->eee_active = 1; | 309 | priv->eee_active = 1; |
| 295 | init_timer(&priv->eee_ctrl_timer); | 310 | init_timer(&priv->eee_ctrl_timer); |
| @@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 300 | 315 | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, | 316 | priv->hw->mac->set_eee_timer(priv->ioaddr, |
| 302 | STMMAC_DEFAULT_LIT_LS, | 317 | STMMAC_DEFAULT_LIT_LS, |
| 303 | priv->tx_lpi_timer); | 318 | tx_lpi_timer); |
| 304 | } else | 319 | } else |
| 305 | /* Set HW EEE according to the speed */ | 320 | /* Set HW EEE according to the speed */ |
| 306 | priv->hw->mac->set_eee_pls(priv->ioaddr, | 321 | priv->hw->mac->set_eee_pls(priv->ioaddr, |
| 307 | priv->phydev->link); | 322 | priv->phydev->link); |
| 308 | 323 | ||
| 309 | pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); | 324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
| 310 | 325 | ||
| 311 | ret = true; | 326 | ret = true; |
| 312 | } | 327 | } |
| @@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) | |||
| 886 | ret = BUF_SIZE_8KiB; | 901 | ret = BUF_SIZE_8KiB; |
| 887 | else if (mtu >= BUF_SIZE_2KiB) | 902 | else if (mtu >= BUF_SIZE_2KiB) |
| 888 | ret = BUF_SIZE_4KiB; | 903 | ret = BUF_SIZE_4KiB; |
| 889 | else if (mtu >= DMA_BUFFER_SIZE) | 904 | else if (mtu > DEFAULT_BUFSIZE) |
| 890 | ret = BUF_SIZE_2KiB; | 905 | ret = BUF_SIZE_2KiB; |
| 891 | else | 906 | else |
| 892 | ret = DMA_BUFFER_SIZE; | 907 | ret = DEFAULT_BUFSIZE; |
| 893 | 908 | ||
| 894 | return ret; | 909 | return ret; |
| 895 | } | 910 | } |
| @@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 951 | 966 | ||
| 952 | p->des2 = priv->rx_skbuff_dma[i]; | 967 | p->des2 = priv->rx_skbuff_dma[i]; |
| 953 | 968 | ||
| 954 | if ((priv->mode == STMMAC_RING_MODE) && | 969 | if ((priv->hw->mode->init_desc3) && |
| 955 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) | 970 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
| 956 | priv->hw->ring->init_desc3(p); | 971 | priv->hw->mode->init_desc3(p); |
| 957 | 972 | ||
| 958 | return 0; | 973 | return 0; |
| 959 | } | 974 | } |
| @@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 984 | unsigned int bfsize = 0; | 999 | unsigned int bfsize = 0; |
| 985 | int ret = -ENOMEM; | 1000 | int ret = -ENOMEM; |
| 986 | 1001 | ||
| 987 | /* Set the max buffer size according to the DESC mode | 1002 | if (priv->hw->mode->set_16kib_bfsize) |
| 988 | * and the MTU. Note that RING mode allows 16KiB bsize. | 1003 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); |
| 989 | */ | ||
| 990 | if (priv->mode == STMMAC_RING_MODE) | ||
| 991 | bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); | ||
| 992 | 1004 | ||
| 993 | if (bfsize < BUF_SIZE_16KiB) | 1005 | if (bfsize < BUF_SIZE_16KiB) |
| 994 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); | 1006 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
| @@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 1029 | /* Setup the chained descriptor addresses */ | 1041 | /* Setup the chained descriptor addresses */ |
| 1030 | if (priv->mode == STMMAC_CHAIN_MODE) { | 1042 | if (priv->mode == STMMAC_CHAIN_MODE) { |
| 1031 | if (priv->extend_desc) { | 1043 | if (priv->extend_desc) { |
| 1032 | priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, | 1044 | priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, |
| 1033 | rxsize, 1); | 1045 | rxsize, 1); |
| 1034 | priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, | 1046 | priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, |
| 1035 | txsize, 1); | 1047 | txsize, 1); |
| 1036 | } else { | 1048 | } else { |
| 1037 | priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, | 1049 | priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, |
| 1038 | rxsize, 0); | 1050 | rxsize, 0); |
| 1039 | priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, | 1051 | priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, |
| 1040 | txsize, 0); | 1052 | txsize, 0); |
| 1041 | } | 1053 | } |
| 1042 | } | 1054 | } |
| 1043 | 1055 | ||
| @@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
| 1288 | DMA_TO_DEVICE); | 1300 | DMA_TO_DEVICE); |
| 1289 | priv->tx_skbuff_dma[entry] = 0; | 1301 | priv->tx_skbuff_dma[entry] = 0; |
| 1290 | } | 1302 | } |
| 1291 | priv->hw->ring->clean_desc3(priv, p); | 1303 | priv->hw->mode->clean_desc3(priv, p); |
| 1292 | 1304 | ||
| 1293 | if (likely(skb != NULL)) { | 1305 | if (likely(skb != NULL)) { |
| 1294 | dev_kfree_skb(skb); | 1306 | dev_kfree_skb(skb); |
| @@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1844 | int nfrags = skb_shinfo(skb)->nr_frags; | 1856 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1845 | struct dma_desc *desc, *first; | 1857 | struct dma_desc *desc, *first; |
| 1846 | unsigned int nopaged_len = skb_headlen(skb); | 1858 | unsigned int nopaged_len = skb_headlen(skb); |
| 1859 | unsigned int enh_desc = priv->plat->enh_desc; | ||
| 1847 | 1860 | ||
| 1848 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | 1861 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
| 1849 | if (!netif_queue_stopped(dev)) { | 1862 | if (!netif_queue_stopped(dev)) { |
| @@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1871 | first = desc; | 1884 | first = desc; |
| 1872 | 1885 | ||
| 1873 | /* To program the descriptors according to the size of the frame */ | 1886 | /* To program the descriptors according to the size of the frame */ |
| 1874 | if (priv->mode == STMMAC_RING_MODE) { | 1887 | if (enh_desc) |
| 1875 | is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, | 1888 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); |
| 1876 | priv->plat->enh_desc); | 1889 | |
| 1877 | if (unlikely(is_jumbo)) | ||
| 1878 | entry = priv->hw->ring->jumbo_frm(priv, skb, | ||
| 1879 | csum_insertion); | ||
| 1880 | } else { | ||
| 1881 | is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, | ||
| 1882 | priv->plat->enh_desc); | ||
| 1883 | if (unlikely(is_jumbo)) | ||
| 1884 | entry = priv->hw->chain->jumbo_frm(priv, skb, | ||
| 1885 | csum_insertion); | ||
| 1886 | } | ||
| 1887 | if (likely(!is_jumbo)) { | 1890 | if (likely(!is_jumbo)) { |
| 1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1891 | desc->des2 = dma_map_single(priv->device, skb->data, |
| 1889 | nopaged_len, DMA_TO_DEVICE); | 1892 | nopaged_len, DMA_TO_DEVICE); |
| 1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1893 | priv->tx_skbuff_dma[entry] = desc->des2; |
| 1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1894 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
| 1892 | csum_insertion, priv->mode); | 1895 | csum_insertion, priv->mode); |
| 1893 | } else | 1896 | } else { |
| 1894 | desc = first; | 1897 | desc = first; |
| 1898 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | ||
| 1899 | } | ||
| 1895 | 1900 | ||
| 1896 | for (i = 0; i < nfrags; i++) { | 1901 | for (i = 0; i < nfrags; i++) { |
| 1897 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1902 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| @@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
| 2029 | 2034 | ||
| 2030 | p->des2 = priv->rx_skbuff_dma[entry]; | 2035 | p->des2 = priv->rx_skbuff_dma[entry]; |
| 2031 | 2036 | ||
| 2032 | priv->hw->ring->refill_desc3(priv, p); | 2037 | priv->hw->mode->refill_desc3(priv, p); |
| 2033 | 2038 | ||
| 2034 | if (netif_msg_rx_status(priv)) | 2039 | if (netif_msg_rx_status(priv)) |
| 2035 | pr_debug("\trefill entry #%d\n", entry); | 2040 | pr_debug("\trefill entry #%d\n", entry); |
| @@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 2633 | 2638 | ||
| 2634 | /* To use the chained or ring mode */ | 2639 | /* To use the chained or ring mode */ |
| 2635 | if (chain_mode) { | 2640 | if (chain_mode) { |
| 2636 | priv->hw->chain = &chain_mode_ops; | 2641 | priv->hw->mode = &chain_mode_ops; |
| 2637 | pr_info(" Chain mode enabled\n"); | 2642 | pr_info(" Chain mode enabled\n"); |
| 2638 | priv->mode = STMMAC_CHAIN_MODE; | 2643 | priv->mode = STMMAC_CHAIN_MODE; |
| 2639 | } else { | 2644 | } else { |
| 2640 | priv->hw->ring = &ring_mode_ops; | 2645 | priv->hw->mode = &ring_mode_ops; |
| 2641 | pr_info(" Ring mode enabled\n"); | 2646 | pr_info(" Ring mode enabled\n"); |
| 2642 | priv->mode = STMMAC_RING_MODE; | 2647 | priv->mode = STMMAC_RING_MODE; |
| 2643 | } | 2648 | } |
