diff options
author | David S. Miller <davem@davemloft.net> | 2019-02-06 13:49:55 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-02-06 13:49:55 -0500 |
commit | 8ce5cd5ed48b985839b6cadb997403cbcf7fe9db (patch) | |
tree | 5436b168c7bd9f0d315ab1db35c57951f112147d | |
parent | e37268eb1b2191788d08a65682e3b15e2258ea55 (diff) | |
parent | 20fb0572826b8dca465ad97b0b7eddd78bafb7ae (diff) |
Merge branch 'dpaa2-eth-Driver-updates'
Ioana Ciocoi Radulescu says:
====================
dpaa2-eth: Driver updates
First patch moves the driver to a page-per-frame memory model.
The others are minor tweaks and optimizations.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 28 |
2 files changed, 83 insertions, 55 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 04925c731f0b..87777b09f5e0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | |||
@@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, | |||
86 | for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { | 86 | for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
87 | addr = dpaa2_sg_get_addr(&sgt[i]); | 87 | addr = dpaa2_sg_get_addr(&sgt[i]); |
88 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); | 88 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
89 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, | 89 | dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
90 | DMA_BIDIRECTIONAL); | 90 | DMA_BIDIRECTIONAL); |
91 | 91 | ||
92 | skb_free_frag(sg_vaddr); | 92 | free_pages((unsigned long)sg_vaddr, 0); |
93 | if (dpaa2_sg_is_final(&sgt[i])) | 93 | if (dpaa2_sg_is_final(&sgt[i])) |
94 | break; | 94 | break; |
95 | } | 95 | } |
96 | 96 | ||
97 | free_buf: | 97 | free_buf: |
98 | skb_free_frag(vaddr); | 98 | free_pages((unsigned long)vaddr, 0); |
99 | } | 99 | } |
100 | 100 | ||
101 | /* Build a linear skb based on a single-buffer frame descriptor */ | 101 | /* Build a linear skb based on a single-buffer frame descriptor */ |
@@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, | |||
109 | 109 | ||
110 | ch->buf_count--; | 110 | ch->buf_count--; |
111 | 111 | ||
112 | skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); | 112 | skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
113 | if (unlikely(!skb)) | 113 | if (unlikely(!skb)) |
114 | return NULL; | 114 | return NULL; |
115 | 115 | ||
@@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, | |||
144 | /* Get the address and length from the S/G entry */ | 144 | /* Get the address and length from the S/G entry */ |
145 | sg_addr = dpaa2_sg_get_addr(sge); | 145 | sg_addr = dpaa2_sg_get_addr(sge); |
146 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); | 146 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); |
147 | dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, | 147 | dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, |
148 | DMA_BIDIRECTIONAL); | 148 | DMA_BIDIRECTIONAL); |
149 | 149 | ||
150 | sg_length = dpaa2_sg_get_len(sge); | 150 | sg_length = dpaa2_sg_get_len(sge); |
151 | 151 | ||
152 | if (i == 0) { | 152 | if (i == 0) { |
153 | /* We build the skb around the first data buffer */ | 153 | /* We build the skb around the first data buffer */ |
154 | skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); | 154 | skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
155 | if (unlikely(!skb)) { | 155 | if (unlikely(!skb)) { |
156 | /* Free the first SG entry now, since we already | 156 | /* Free the first SG entry now, since we already |
157 | * unmapped it and obtained the virtual address | 157 | * unmapped it and obtained the virtual address |
158 | */ | 158 | */ |
159 | skb_free_frag(sg_vaddr); | 159 | free_pages((unsigned long)sg_vaddr, 0); |
160 | 160 | ||
161 | /* We still need to subtract the buffers used | 161 | /* We still need to subtract the buffers used |
162 | * by this FD from our software counter | 162 | * by this FD from our software counter |
@@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) | |||
211 | 211 | ||
212 | for (i = 0; i < count; i++) { | 212 | for (i = 0; i < count; i++) { |
213 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); | 213 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); |
214 | dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, | 214 | dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, |
215 | DMA_BIDIRECTIONAL); | 215 | DMA_BIDIRECTIONAL); |
216 | skb_free_frag(vaddr); | 216 | free_pages((unsigned long)vaddr, 0); |
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
@@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, | |||
264 | 264 | ||
265 | fq = &priv->fq[queue_id]; | 265 | fq = &priv->fq[queue_id]; |
266 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | 266 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
267 | err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, | 267 | err = priv->enqueue(priv, fq, fd, 0); |
268 | priv->tx_qdid, 0, | ||
269 | fq->tx_qdbin, fd); | ||
270 | if (err != -EBUSY) | 268 | if (err != -EBUSY) |
271 | break; | 269 | break; |
272 | } | 270 | } |
@@ -378,16 +376,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, | |||
378 | return; | 376 | return; |
379 | } | 377 | } |
380 | 378 | ||
381 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, | 379 | dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
382 | DMA_BIDIRECTIONAL); | 380 | DMA_BIDIRECTIONAL); |
383 | skb = build_linear_skb(ch, fd, vaddr); | 381 | skb = build_linear_skb(ch, fd, vaddr); |
384 | } else if (fd_format == dpaa2_fd_sg) { | 382 | } else if (fd_format == dpaa2_fd_sg) { |
385 | WARN_ON(priv->xdp_prog); | 383 | WARN_ON(priv->xdp_prog); |
386 | 384 | ||
387 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, | 385 | dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
388 | DMA_BIDIRECTIONAL); | 386 | DMA_BIDIRECTIONAL); |
389 | skb = build_frag_skb(priv, ch, buf_data); | 387 | skb = build_frag_skb(priv, ch, buf_data); |
390 | skb_free_frag(vaddr); | 388 | free_pages((unsigned long)vaddr, 0); |
391 | percpu_extras->rx_sg_frames++; | 389 | percpu_extras->rx_sg_frames++; |
392 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); | 390 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); |
393 | } else { | 391 | } else { |
@@ -657,7 +655,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, | |||
657 | * dpaa2_eth_tx(). | 655 | * dpaa2_eth_tx(). |
658 | */ | 656 | */ |
659 | static void free_tx_fd(const struct dpaa2_eth_priv *priv, | 657 | static void free_tx_fd(const struct dpaa2_eth_priv *priv, |
660 | const struct dpaa2_fd *fd) | 658 | const struct dpaa2_fd *fd, bool in_napi) |
661 | { | 659 | { |
662 | struct device *dev = priv->net_dev->dev.parent; | 660 | struct device *dev = priv->net_dev->dev.parent; |
663 | dma_addr_t fd_addr; | 661 | dma_addr_t fd_addr; |
@@ -712,7 +710,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, | |||
712 | skb_free_frag(skbh); | 710 | skb_free_frag(skbh); |
713 | 711 | ||
714 | /* Move on with skb release */ | 712 | /* Move on with skb release */ |
715 | dev_kfree_skb(skb); | 713 | napi_consume_skb(skb, in_napi); |
716 | } | 714 | } |
717 | 715 | ||
718 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) | 716 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
@@ -785,9 +783,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) | |||
785 | queue_mapping = skb_get_queue_mapping(skb); | 783 | queue_mapping = skb_get_queue_mapping(skb); |
786 | fq = &priv->fq[queue_mapping]; | 784 | fq = &priv->fq[queue_mapping]; |
787 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | 785 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
788 | err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, | 786 | err = priv->enqueue(priv, fq, &fd, 0); |
789 | priv->tx_qdid, 0, | ||
790 | fq->tx_qdbin, &fd); | ||
791 | if (err != -EBUSY) | 787 | if (err != -EBUSY) |
792 | break; | 788 | break; |
793 | } | 789 | } |
@@ -795,7 +791,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) | |||
795 | if (unlikely(err < 0)) { | 791 | if (unlikely(err < 0)) { |
796 | percpu_stats->tx_errors++; | 792 | percpu_stats->tx_errors++; |
797 | /* Clean up everything, including freeing the skb */ | 793 | /* Clean up everything, including freeing the skb */ |
798 | free_tx_fd(priv, &fd); | 794 | free_tx_fd(priv, &fd, false); |
799 | } else { | 795 | } else { |
800 | fd_len = dpaa2_fd_get_len(&fd); | 796 | fd_len = dpaa2_fd_get_len(&fd); |
801 | percpu_stats->tx_packets++; | 797 | percpu_stats->tx_packets++; |
@@ -837,7 +833,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, | |||
837 | 833 | ||
838 | /* Check frame errors in the FD field */ | 834 | /* Check frame errors in the FD field */ |
839 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; | 835 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; |
840 | free_tx_fd(priv, fd); | 836 | free_tx_fd(priv, fd, true); |
841 | 837 | ||
842 | if (likely(!fd_errors)) | 838 | if (likely(!fd_errors)) |
843 | return; | 839 | return; |
@@ -903,7 +899,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, | |||
903 | { | 899 | { |
904 | struct device *dev = priv->net_dev->dev.parent; | 900 | struct device *dev = priv->net_dev->dev.parent; |
905 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; | 901 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
906 | void *buf; | 902 | struct page *page; |
907 | dma_addr_t addr; | 903 | dma_addr_t addr; |
908 | int i, err; | 904 | int i, err; |
909 | 905 | ||
@@ -911,14 +907,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv, | |||
911 | /* Allocate buffer visible to WRIOP + skb shared info + | 907 | /* Allocate buffer visible to WRIOP + skb shared info + |
912 | * alignment padding | 908 | * alignment padding |
913 | */ | 909 | */ |
914 | buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); | 910 | /* allocate one page for each Rx buffer. WRIOP sees |
915 | if (unlikely(!buf)) | 911 | * the entire page except for a tailroom reserved for |
912 | * skb shared info | ||
913 | */ | ||
914 | page = dev_alloc_pages(0); | ||
915 | if (!page) | ||
916 | goto err_alloc; | 916 | goto err_alloc; |
917 | 917 | ||
918 | buf = PTR_ALIGN(buf, priv->rx_buf_align); | 918 | addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, |
919 | 919 | DMA_BIDIRECTIONAL); | |
920 | addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, | ||
921 | DMA_BIDIRECTIONAL); | ||
922 | if (unlikely(dma_mapping_error(dev, addr))) | 920 | if (unlikely(dma_mapping_error(dev, addr))) |
923 | goto err_map; | 921 | goto err_map; |
924 | 922 | ||
@@ -926,7 +924,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, | |||
926 | 924 | ||
927 | /* tracing point */ | 925 | /* tracing point */ |
928 | trace_dpaa2_eth_buf_seed(priv->net_dev, | 926 | trace_dpaa2_eth_buf_seed(priv->net_dev, |
929 | buf, dpaa2_eth_buf_raw_size(priv), | 927 | page, DPAA2_ETH_RX_BUF_RAW_SIZE, |
930 | addr, DPAA2_ETH_RX_BUF_SIZE, | 928 | addr, DPAA2_ETH_RX_BUF_SIZE, |
931 | bpid); | 929 | bpid); |
932 | } | 930 | } |
@@ -948,7 +946,7 @@ release_bufs: | |||
948 | return i; | 946 | return i; |
949 | 947 | ||
950 | err_map: | 948 | err_map: |
951 | skb_free_frag(buf); | 949 | __free_pages(page, 0); |
952 | err_alloc: | 950 | err_alloc: |
953 | /* If we managed to allocate at least some buffers, | 951 | /* If we managed to allocate at least some buffers, |
954 | * release them to hardware | 952 | * release them to hardware |
@@ -2134,6 +2132,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) | |||
2134 | { | 2132 | { |
2135 | struct device *dev = priv->net_dev->dev.parent; | 2133 | struct device *dev = priv->net_dev->dev.parent; |
2136 | struct dpni_buffer_layout buf_layout = {0}; | 2134 | struct dpni_buffer_layout buf_layout = {0}; |
2135 | u16 rx_buf_align; | ||
2137 | int err; | 2136 | int err; |
2138 | 2137 | ||
2139 | /* We need to check for WRIOP version 1.0.0, but depending on the MC | 2138 | /* We need to check for WRIOP version 1.0.0, but depending on the MC |
@@ -2142,9 +2141,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) | |||
2142 | */ | 2141 | */ |
2143 | if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || | 2142 | if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || |
2144 | priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) | 2143 | priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) |
2145 | priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; | 2144 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; |
2146 | else | 2145 | else |
2147 | priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; | 2146 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; |
2148 | 2147 | ||
2149 | /* tx buffer */ | 2148 | /* tx buffer */ |
2150 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; | 2149 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
@@ -2184,7 +2183,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) | |||
2184 | /* rx buffer */ | 2183 | /* rx buffer */ |
2185 | buf_layout.pass_frame_status = true; | 2184 | buf_layout.pass_frame_status = true; |
2186 | buf_layout.pass_parser_result = true; | 2185 | buf_layout.pass_parser_result = true; |
2187 | buf_layout.data_align = priv->rx_buf_align; | 2186 | buf_layout.data_align = rx_buf_align; |
2188 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); | 2187 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); |
2189 | buf_layout.private_data_size = 0; | 2188 | buf_layout.private_data_size = 0; |
2190 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | | 2189 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | |
@@ -2202,6 +2201,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) | |||
2202 | return 0; | 2201 | return 0; |
2203 | } | 2202 | } |
2204 | 2203 | ||
2204 | #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 | ||
2205 | #define DPNI_ENQUEUE_FQID_VER_MINOR 9 | ||
2206 | |||
2207 | static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, | ||
2208 | struct dpaa2_eth_fq *fq, | ||
2209 | struct dpaa2_fd *fd, u8 prio) | ||
2210 | { | ||
2211 | return dpaa2_io_service_enqueue_qd(fq->channel->dpio, | ||
2212 | priv->tx_qdid, prio, | ||
2213 | fq->tx_qdbin, fd); | ||
2214 | } | ||
2215 | |||
2216 | static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, | ||
2217 | struct dpaa2_eth_fq *fq, | ||
2218 | struct dpaa2_fd *fd, | ||
2219 | u8 prio __always_unused) | ||
2220 | { | ||
2221 | return dpaa2_io_service_enqueue_fq(fq->channel->dpio, | ||
2222 | fq->tx_fqid, fd); | ||
2223 | } | ||
2224 | |||
2225 | static void set_enqueue_mode(struct dpaa2_eth_priv *priv) | ||
2226 | { | ||
2227 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, | ||
2228 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) | ||
2229 | priv->enqueue = dpaa2_eth_enqueue_qd; | ||
2230 | else | ||
2231 | priv->enqueue = dpaa2_eth_enqueue_fq; | ||
2232 | } | ||
2233 | |||
2205 | /* Configure the DPNI object this interface is associated with */ | 2234 | /* Configure the DPNI object this interface is associated with */ |
2206 | static int setup_dpni(struct fsl_mc_device *ls_dev) | 2235 | static int setup_dpni(struct fsl_mc_device *ls_dev) |
2207 | { | 2236 | { |
@@ -2255,6 +2284,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) | |||
2255 | if (err) | 2284 | if (err) |
2256 | goto close; | 2285 | goto close; |
2257 | 2286 | ||
2287 | set_enqueue_mode(priv); | ||
2288 | |||
2258 | priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * | 2289 | priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * |
2259 | dpaa2_eth_fs_count(priv), GFP_KERNEL); | 2290 | dpaa2_eth_fs_count(priv), GFP_KERNEL); |
2260 | if (!priv->cls_rules) | 2291 | if (!priv->cls_rules) |
@@ -2339,6 +2370,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, | |||
2339 | } | 2370 | } |
2340 | 2371 | ||
2341 | fq->tx_qdbin = qid.qdbin; | 2372 | fq->tx_qdbin = qid.qdbin; |
2373 | fq->tx_fqid = qid.fqid; | ||
2342 | 2374 | ||
2343 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | 2375 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
2344 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, | 2376 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, |
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 31fe486ec25f..9510928b7cca 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | |||
@@ -53,7 +53,8 @@ | |||
53 | */ | 53 | */ |
54 | #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) | 54 | #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) |
55 | #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) | 55 | #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) |
56 | #define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE | 56 | #define DPAA2_ETH_REFILL_THRESH \ |
57 | (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) | ||
57 | 58 | ||
58 | /* Maximum number of buffers that can be acquired/released through a single | 59 | /* Maximum number of buffers that can be acquired/released through a single |
59 | * QBMan command | 60 | * QBMan command |
@@ -63,9 +64,11 @@ | |||
63 | /* Hardware requires alignment for ingress/egress buffer addresses */ | 64 | /* Hardware requires alignment for ingress/egress buffer addresses */ |
64 | #define DPAA2_ETH_TX_BUF_ALIGN 64 | 65 | #define DPAA2_ETH_TX_BUF_ALIGN 64 |
65 | 66 | ||
66 | #define DPAA2_ETH_RX_BUF_SIZE 2048 | 67 | #define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE |
67 | #define DPAA2_ETH_SKB_SIZE \ | 68 | #define DPAA2_ETH_RX_BUF_TAILROOM \ |
68 | (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | 69 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
70 | #define DPAA2_ETH_RX_BUF_SIZE \ | ||
71 | (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM) | ||
69 | 72 | ||
70 | /* Hardware annotation area in RX/TX buffers */ | 73 | /* Hardware annotation area in RX/TX buffers */ |
71 | #define DPAA2_ETH_RX_HWA_SIZE 64 | 74 | #define DPAA2_ETH_RX_HWA_SIZE 64 |
@@ -274,6 +277,7 @@ struct dpaa2_eth_priv; | |||
274 | struct dpaa2_eth_fq { | 277 | struct dpaa2_eth_fq { |
275 | u32 fqid; | 278 | u32 fqid; |
276 | u32 tx_qdbin; | 279 | u32 tx_qdbin; |
280 | u32 tx_fqid; | ||
277 | u16 flowid; | 281 | u16 flowid; |
278 | int target_cpu; | 282 | int target_cpu; |
279 | u32 dq_frames; | 283 | u32 dq_frames; |
@@ -326,6 +330,9 @@ struct dpaa2_eth_priv { | |||
326 | 330 | ||
327 | u8 num_fqs; | 331 | u8 num_fqs; |
328 | struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; | 332 | struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; |
333 | int (*enqueue)(struct dpaa2_eth_priv *priv, | ||
334 | struct dpaa2_eth_fq *fq, | ||
335 | struct dpaa2_fd *fd, u8 prio); | ||
329 | 336 | ||
330 | u8 num_channels; | 337 | u8 num_channels; |
331 | struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; | 338 | struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; |
@@ -343,7 +350,6 @@ struct dpaa2_eth_priv { | |||
343 | bool rx_tstamp; /* Rx timestamping enabled */ | 350 | bool rx_tstamp; /* Rx timestamping enabled */ |
344 | 351 | ||
345 | u16 tx_qdid; | 352 | u16 tx_qdid; |
346 | u16 rx_buf_align; | ||
347 | struct fsl_mc_io *mc_io; | 353 | struct fsl_mc_io *mc_io; |
348 | /* Cores which have an affine DPIO/DPCON. | 354 | /* Cores which have an affine DPIO/DPCON. |
349 | * This is the cpu set on which Rx and Tx conf frames are processed | 355 | * This is the cpu set on which Rx and Tx conf frames are processed |
@@ -418,15 +424,6 @@ enum dpaa2_eth_rx_dist { | |||
418 | DPAA2_ETH_RX_DIST_CLS | 424 | DPAA2_ETH_RX_DIST_CLS |
419 | }; | 425 | }; |
420 | 426 | ||
421 | /* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around | ||
422 | * the buffer also needs space for its shared info struct, and we need | ||
423 | * to allocate enough to accommodate hardware alignment restrictions | ||
424 | */ | ||
425 | static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) | ||
426 | { | ||
427 | return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; | ||
428 | } | ||
429 | |||
430 | static inline | 427 | static inline |
431 | unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, | 428 | unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, |
432 | struct sk_buff *skb) | 429 | struct sk_buff *skb) |
@@ -451,8 +448,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, | |||
451 | */ | 448 | */ |
452 | static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) | 449 | static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) |
453 | { | 450 | { |
454 | return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - | 451 | return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; |
455 | DPAA2_ETH_RX_HWA_SIZE; | ||
456 | } | 452 | } |
457 | 453 | ||
458 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); | 454 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); |