diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/common.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 68 |
5 files changed, 79 insertions, 27 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index c553f6b5a913..cf28daba4346 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
47 | 47 | ||
48 | desc->des2 = dma_map_single(priv->device, skb->data, | 48 | desc->des2 = dma_map_single(priv->device, skb->data, |
49 | bmax, DMA_TO_DEVICE); | 49 | bmax, DMA_TO_DEVICE); |
50 | priv->tx_skbuff_dma[entry] = desc->des2; | 50 | if (dma_mapping_error(priv->device, desc->des2)) |
51 | return -1; | ||
52 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
51 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); | 53 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); |
52 | 54 | ||
53 | while (len != 0) { | 55 | while (len != 0) { |
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
59 | desc->des2 = dma_map_single(priv->device, | 61 | desc->des2 = dma_map_single(priv->device, |
60 | (skb->data + bmax * i), | 62 | (skb->data + bmax * i), |
61 | bmax, DMA_TO_DEVICE); | 63 | bmax, DMA_TO_DEVICE); |
62 | priv->tx_skbuff_dma[entry] = desc->des2; | 64 | if (dma_mapping_error(priv->device, desc->des2)) |
65 | return -1; | ||
66 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
63 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, | 67 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, |
64 | STMMAC_CHAIN_MODE); | 68 | STMMAC_CHAIN_MODE); |
65 | priv->hw->desc->set_tx_owner(desc); | 69 | priv->hw->desc->set_tx_owner(desc); |
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
69 | desc->des2 = dma_map_single(priv->device, | 73 | desc->des2 = dma_map_single(priv->device, |
70 | (skb->data + bmax * i), len, | 74 | (skb->data + bmax * i), len, |
71 | DMA_TO_DEVICE); | 75 | DMA_TO_DEVICE); |
72 | priv->tx_skbuff_dma[entry] = desc->des2; | 76 | if (dma_mapping_error(priv->device, desc->des2)) |
77 | return -1; | ||
78 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 79 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_CHAIN_MODE); | 80 | STMMAC_CHAIN_MODE); |
75 | priv->hw->desc->set_tx_owner(desc); | 81 | priv->hw->desc->set_tx_owner(desc); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index de507c32036c..bd54238e2df8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -425,7 +425,7 @@ struct stmmac_mode_ops { | |||
425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
426 | unsigned int extend_desc); | 426 | unsigned int extend_desc); |
427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
428 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 428 | int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); |
429 | int (*set_16kib_bfsize)(int mtu); | 429 | int (*set_16kib_bfsize)(int mtu); |
430 | void (*init_desc3)(struct dma_desc *p); | 430 | void (*init_desc3)(struct dma_desc *p); |
431 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 431 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 650a4be6bce5..5dd50c6cda5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
53 | 53 | ||
54 | desc->des2 = dma_map_single(priv->device, skb->data, | 54 | desc->des2 = dma_map_single(priv->device, skb->data, |
55 | bmax, DMA_TO_DEVICE); | 55 | bmax, DMA_TO_DEVICE); |
56 | priv->tx_skbuff_dma[entry] = desc->des2; | 56 | if (dma_mapping_error(priv->device, desc->des2)) |
57 | return -1; | ||
58 | |||
59 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
57 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 60 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
58 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, | 61 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, |
59 | STMMAC_RING_MODE); | 62 | STMMAC_RING_MODE); |
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
68 | 71 | ||
69 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 72 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
70 | len, DMA_TO_DEVICE); | 73 | len, DMA_TO_DEVICE); |
71 | priv->tx_skbuff_dma[entry] = desc->des2; | 74 | if (dma_mapping_error(priv->device, desc->des2)) |
75 | return -1; | ||
76 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
72 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 77 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 78 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_RING_MODE); | 79 | STMMAC_RING_MODE); |
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
77 | } else { | 82 | } else { |
78 | desc->des2 = dma_map_single(priv->device, skb->data, | 83 | desc->des2 = dma_map_single(priv->device, skb->data, |
79 | nopaged_len, DMA_TO_DEVICE); | 84 | nopaged_len, DMA_TO_DEVICE); |
80 | priv->tx_skbuff_dma[entry] = desc->des2; | 85 | if (dma_mapping_error(priv->device, desc->des2)) |
86 | return -1; | ||
87 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
81 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 88 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
82 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, | 89 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, |
83 | STMMAC_RING_MODE); | 90 | STMMAC_RING_MODE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 128a0b723a00..58097c0e2ad5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -34,6 +34,11 @@ | |||
34 | #include <linux/ptp_clock_kernel.h> | 34 | #include <linux/ptp_clock_kernel.h> |
35 | #include <linux/reset.h> | 35 | #include <linux/reset.h> |
36 | 36 | ||
37 | struct stmmac_tx_info { | ||
38 | dma_addr_t buf; | ||
39 | bool map_as_page; | ||
40 | }; | ||
41 | |||
37 | struct stmmac_priv { | 42 | struct stmmac_priv { |
38 | /* Frequently used values are kept adjacent for cache effect */ | 43 | /* Frequently used values are kept adjacent for cache effect */ |
39 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 44 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
@@ -45,7 +50,7 @@ struct stmmac_priv { | |||
45 | u32 tx_count_frames; | 50 | u32 tx_count_frames; |
46 | u32 tx_coal_frames; | 51 | u32 tx_coal_frames; |
47 | u32 tx_coal_timer; | 52 | u32 tx_coal_timer; |
48 | dma_addr_t *tx_skbuff_dma; | 53 | struct stmmac_tx_info *tx_skbuff_dma; |
49 | dma_addr_t dma_tx_phy; | 54 | dma_addr_t dma_tx_phy; |
50 | int tx_coalesce; | 55 | int tx_coalesce; |
51 | int hwts_tx_en; | 56 | int hwts_tx_en; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 03652891fcbf..df15f00b1246 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1073,7 +1073,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
1073 | else | 1073 | else |
1074 | p = priv->dma_tx + i; | 1074 | p = priv->dma_tx + i; |
1075 | p->des2 = 0; | 1075 | p->des2 = 0; |
1076 | priv->tx_skbuff_dma[i] = 0; | 1076 | priv->tx_skbuff_dma[i].buf = 0; |
1077 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1077 | priv->tx_skbuff[i] = NULL; | 1078 | priv->tx_skbuff[i] = NULL; |
1078 | } | 1079 | } |
1079 | 1080 | ||
@@ -1112,17 +1113,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) | |||
1112 | else | 1113 | else |
1113 | p = priv->dma_tx + i; | 1114 | p = priv->dma_tx + i; |
1114 | 1115 | ||
1115 | if (priv->tx_skbuff_dma[i]) { | 1116 | if (priv->tx_skbuff_dma[i].buf) { |
1116 | dma_unmap_single(priv->device, | 1117 | if (priv->tx_skbuff_dma[i].map_as_page) |
1117 | priv->tx_skbuff_dma[i], | 1118 | dma_unmap_page(priv->device, |
1118 | priv->hw->desc->get_tx_len(p), | 1119 | priv->tx_skbuff_dma[i].buf, |
1119 | DMA_TO_DEVICE); | 1120 | priv->hw->desc->get_tx_len(p), |
1120 | priv->tx_skbuff_dma[i] = 0; | 1121 | DMA_TO_DEVICE); |
1122 | else | ||
1123 | dma_unmap_single(priv->device, | ||
1124 | priv->tx_skbuff_dma[i].buf, | ||
1125 | priv->hw->desc->get_tx_len(p), | ||
1126 | DMA_TO_DEVICE); | ||
1121 | } | 1127 | } |
1122 | 1128 | ||
1123 | if (priv->tx_skbuff[i] != NULL) { | 1129 | if (priv->tx_skbuff[i] != NULL) { |
1124 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 1130 | dev_kfree_skb_any(priv->tx_skbuff[i]); |
1125 | priv->tx_skbuff[i] = NULL; | 1131 | priv->tx_skbuff[i] = NULL; |
1132 | priv->tx_skbuff_dma[i].buf = 0; | ||
1133 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1126 | } | 1134 | } |
1127 | } | 1135 | } |
1128 | } | 1136 | } |
@@ -1143,7 +1151,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) | |||
1143 | if (!priv->rx_skbuff) | 1151 | if (!priv->rx_skbuff) |
1144 | goto err_rx_skbuff; | 1152 | goto err_rx_skbuff; |
1145 | 1153 | ||
1146 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1154 | priv->tx_skbuff_dma = kmalloc_array(txsize, |
1155 | sizeof(*priv->tx_skbuff_dma), | ||
1147 | GFP_KERNEL); | 1156 | GFP_KERNEL); |
1148 | if (!priv->tx_skbuff_dma) | 1157 | if (!priv->tx_skbuff_dma) |
1149 | goto err_tx_skbuff_dma; | 1158 | goto err_tx_skbuff_dma; |
@@ -1305,12 +1314,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
1305 | pr_debug("%s: curr %d, dirty %d\n", __func__, | 1314 | pr_debug("%s: curr %d, dirty %d\n", __func__, |
1306 | priv->cur_tx, priv->dirty_tx); | 1315 | priv->cur_tx, priv->dirty_tx); |
1307 | 1316 | ||
1308 | if (likely(priv->tx_skbuff_dma[entry])) { | 1317 | if (likely(priv->tx_skbuff_dma[entry].buf)) { |
1309 | dma_unmap_single(priv->device, | 1318 | if (priv->tx_skbuff_dma[entry].map_as_page) |
1310 | priv->tx_skbuff_dma[entry], | 1319 | dma_unmap_page(priv->device, |
1311 | priv->hw->desc->get_tx_len(p), | 1320 | priv->tx_skbuff_dma[entry].buf, |
1312 | DMA_TO_DEVICE); | 1321 | priv->hw->desc->get_tx_len(p), |
1313 | priv->tx_skbuff_dma[entry] = 0; | 1322 | DMA_TO_DEVICE); |
1323 | else | ||
1324 | dma_unmap_single(priv->device, | ||
1325 | priv->tx_skbuff_dma[entry].buf, | ||
1326 | priv->hw->desc->get_tx_len(p), | ||
1327 | DMA_TO_DEVICE); | ||
1328 | priv->tx_skbuff_dma[entry].buf = 0; | ||
1329 | priv->tx_skbuff_dma[entry].map_as_page = false; | ||
1314 | } | 1330 | } |
1315 | priv->hw->mode->clean_desc3(priv, p); | 1331 | priv->hw->mode->clean_desc3(priv, p); |
1316 | 1332 | ||
@@ -1905,12 +1921,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1905 | if (likely(!is_jumbo)) { | 1921 | if (likely(!is_jumbo)) { |
1906 | desc->des2 = dma_map_single(priv->device, skb->data, | 1922 | desc->des2 = dma_map_single(priv->device, skb->data, |
1907 | nopaged_len, DMA_TO_DEVICE); | 1923 | nopaged_len, DMA_TO_DEVICE); |
1908 | priv->tx_skbuff_dma[entry] = desc->des2; | 1924 | if (dma_mapping_error(priv->device, desc->des2)) |
1925 | goto dma_map_err; | ||
1926 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1909 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1927 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
1910 | csum_insertion, priv->mode); | 1928 | csum_insertion, priv->mode); |
1911 | } else { | 1929 | } else { |
1912 | desc = first; | 1930 | desc = first; |
1913 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | 1931 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); |
1932 | if (unlikely(entry < 0)) | ||
1933 | goto dma_map_err; | ||
1914 | } | 1934 | } |
1915 | 1935 | ||
1916 | for (i = 0; i < nfrags; i++) { | 1936 | for (i = 0; i < nfrags; i++) { |
@@ -1926,7 +1946,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1926 | 1946 | ||
1927 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, | 1947 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, |
1928 | DMA_TO_DEVICE); | 1948 | DMA_TO_DEVICE); |
1929 | priv->tx_skbuff_dma[entry] = desc->des2; | 1949 | if (dma_mapping_error(priv->device, desc->des2)) |
1950 | goto dma_map_err; /* should reuse desc w/o issues */ | ||
1951 | |||
1952 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1953 | priv->tx_skbuff_dma[entry].map_as_page = true; | ||
1930 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, | 1954 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
1931 | priv->mode); | 1955 | priv->mode); |
1932 | wmb(); | 1956 | wmb(); |
@@ -1993,7 +2017,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1993 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); | 2017 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); |
1994 | 2018 | ||
1995 | spin_unlock(&priv->tx_lock); | 2019 | spin_unlock(&priv->tx_lock); |
2020 | return NETDEV_TX_OK; | ||
1996 | 2021 | ||
2022 | dma_map_err: | ||
2023 | dev_err(priv->device, "Tx dma map failed\n"); | ||
2024 | dev_kfree_skb(skb); | ||
2025 | priv->dev->stats.tx_dropped++; | ||
1997 | return NETDEV_TX_OK; | 2026 | return NETDEV_TX_OK; |
1998 | } | 2027 | } |
1999 | 2028 | ||
@@ -2046,7 +2075,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
2046 | priv->rx_skbuff_dma[entry] = | 2075 | priv->rx_skbuff_dma[entry] = |
2047 | dma_map_single(priv->device, skb->data, bfsize, | 2076 | dma_map_single(priv->device, skb->data, bfsize, |
2048 | DMA_FROM_DEVICE); | 2077 | DMA_FROM_DEVICE); |
2049 | 2078 | if (dma_mapping_error(priv->device, | |
2079 | priv->rx_skbuff_dma[entry])) { | ||
2080 | dev_err(priv->device, "Rx dma map failed\n"); | ||
2081 | dev_kfree_skb(skb); | ||
2082 | break; | ||
2083 | } | ||
2050 | p->des2 = priv->rx_skbuff_dma[entry]; | 2084 | p->des2 = priv->rx_skbuff_dma[entry]; |
2051 | 2085 | ||
2052 | priv->hw->mode->refill_desc3(priv, p); | 2086 | priv->hw->mode->refill_desc3(priv, p); |