diff options
Diffstat (limited to 'drivers/net/mlx4/en_tx.c')
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 828 |
1 files changed, 828 insertions, 0 deletions
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c new file mode 100644 index 00000000000..f76ab6bf309 --- /dev/null +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -0,0 +1,828 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <asm/page.h> | ||
35 | #include <linux/mlx4/cq.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/mlx4/qp.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/if_vlan.h> | ||
40 | #include <linux/vmalloc.h> | ||
41 | #include <linux/tcp.h> | ||
42 | |||
43 | #include "mlx4_en.h" | ||
44 | |||
45 | enum { | ||
46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | ||
47 | MAX_BF = 256, | ||
48 | }; | ||
49 | |||
50 | static int inline_thold __read_mostly = MAX_INLINE; | ||
51 | |||
52 | module_param_named(inline_thold, inline_thold, int, 0444); | ||
53 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); | ||
54 | |||
55 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | ||
56 | struct mlx4_en_tx_ring *ring, int qpn, u32 size, | ||
57 | u16 stride) | ||
58 | { | ||
59 | struct mlx4_en_dev *mdev = priv->mdev; | ||
60 | int tmp; | ||
61 | int err; | ||
62 | |||
63 | ring->size = size; | ||
64 | ring->size_mask = size - 1; | ||
65 | ring->stride = stride; | ||
66 | |||
67 | inline_thold = min(inline_thold, MAX_INLINE); | ||
68 | |||
69 | spin_lock_init(&ring->comp_lock); | ||
70 | |||
71 | tmp = size * sizeof(struct mlx4_en_tx_info); | ||
72 | ring->tx_info = vmalloc(tmp); | ||
73 | if (!ring->tx_info) { | ||
74 | en_err(priv, "Failed allocating tx_info ring\n"); | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", | ||
78 | ring->tx_info, tmp); | ||
79 | |||
80 | ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); | ||
81 | if (!ring->bounce_buf) { | ||
82 | en_err(priv, "Failed allocating bounce buffer\n"); | ||
83 | err = -ENOMEM; | ||
84 | goto err_tx; | ||
85 | } | ||
86 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); | ||
87 | |||
88 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, | ||
89 | 2 * PAGE_SIZE); | ||
90 | if (err) { | ||
91 | en_err(priv, "Failed allocating hwq resources\n"); | ||
92 | goto err_bounce; | ||
93 | } | ||
94 | |||
95 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
96 | if (err) { | ||
97 | en_err(priv, "Failed to map TX buffer\n"); | ||
98 | goto err_hwq_res; | ||
99 | } | ||
100 | |||
101 | ring->buf = ring->wqres.buf.direct.buf; | ||
102 | |||
103 | en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " | ||
104 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | ||
105 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | ||
106 | |||
107 | ring->qpn = qpn; | ||
108 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | ||
109 | if (err) { | ||
110 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); | ||
111 | goto err_map; | ||
112 | } | ||
113 | ring->qp.event = mlx4_en_sqp_event; | ||
114 | |||
115 | err = mlx4_bf_alloc(mdev->dev, &ring->bf); | ||
116 | if (err) { | ||
117 | en_dbg(DRV, priv, "working without blueflame (%d)", err); | ||
118 | ring->bf.uar = &mdev->priv_uar; | ||
119 | ring->bf.uar->map = mdev->uar_map; | ||
120 | ring->bf_enabled = false; | ||
121 | } else | ||
122 | ring->bf_enabled = true; | ||
123 | |||
124 | return 0; | ||
125 | |||
126 | err_map: | ||
127 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
128 | err_hwq_res: | ||
129 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
130 | err_bounce: | ||
131 | kfree(ring->bounce_buf); | ||
132 | ring->bounce_buf = NULL; | ||
133 | err_tx: | ||
134 | vfree(ring->tx_info); | ||
135 | ring->tx_info = NULL; | ||
136 | return err; | ||
137 | } | ||
138 | |||
139 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | ||
140 | struct mlx4_en_tx_ring *ring) | ||
141 | { | ||
142 | struct mlx4_en_dev *mdev = priv->mdev; | ||
143 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | ||
144 | |||
145 | if (ring->bf_enabled) | ||
146 | mlx4_bf_free(mdev->dev, &ring->bf); | ||
147 | mlx4_qp_remove(mdev->dev, &ring->qp); | ||
148 | mlx4_qp_free(mdev->dev, &ring->qp); | ||
149 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
150 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
151 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
152 | kfree(ring->bounce_buf); | ||
153 | ring->bounce_buf = NULL; | ||
154 | vfree(ring->tx_info); | ||
155 | ring->tx_info = NULL; | ||
156 | } | ||
157 | |||
158 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
159 | struct mlx4_en_tx_ring *ring, | ||
160 | int cq) | ||
161 | { | ||
162 | struct mlx4_en_dev *mdev = priv->mdev; | ||
163 | int err; | ||
164 | |||
165 | ring->cqn = cq; | ||
166 | ring->prod = 0; | ||
167 | ring->cons = 0xffffffff; | ||
168 | ring->last_nr_txbb = 1; | ||
169 | ring->poll_cnt = 0; | ||
170 | ring->blocked = 0; | ||
171 | memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); | ||
172 | memset(ring->buf, 0, ring->buf_size); | ||
173 | |||
174 | ring->qp_state = MLX4_QP_STATE_RST; | ||
175 | ring->doorbell_qpn = ring->qp.qpn << 8; | ||
176 | |||
177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | ||
178 | ring->cqn, &ring->context); | ||
179 | if (ring->bf_enabled) | ||
180 | ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); | ||
181 | |||
182 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | ||
183 | &ring->qp, &ring->qp_state); | ||
184 | |||
185 | return err; | ||
186 | } | ||
187 | |||
188 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
189 | struct mlx4_en_tx_ring *ring) | ||
190 | { | ||
191 | struct mlx4_en_dev *mdev = priv->mdev; | ||
192 | |||
193 | mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, | ||
194 | MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); | ||
195 | } | ||
196 | |||
197 | |||
198 | static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | ||
199 | struct mlx4_en_tx_ring *ring, | ||
200 | int index, u8 owner) | ||
201 | { | ||
202 | struct mlx4_en_dev *mdev = priv->mdev; | ||
203 | struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; | ||
204 | struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; | ||
205 | struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; | ||
206 | struct sk_buff *skb = tx_info->skb; | ||
207 | struct skb_frag_struct *frag; | ||
208 | void *end = ring->buf + ring->buf_size; | ||
209 | int frags = skb_shinfo(skb)->nr_frags; | ||
210 | int i; | ||
211 | __be32 *ptr = (__be32 *)tx_desc; | ||
212 | __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); | ||
213 | |||
214 | /* Optimize the common case when there are no wraparounds */ | ||
215 | if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { | ||
216 | if (!tx_info->inl) { | ||
217 | if (tx_info->linear) { | ||
218 | pci_unmap_single(mdev->pdev, | ||
219 | (dma_addr_t) be64_to_cpu(data->addr), | ||
220 | be32_to_cpu(data->byte_count), | ||
221 | PCI_DMA_TODEVICE); | ||
222 | ++data; | ||
223 | } | ||
224 | |||
225 | for (i = 0; i < frags; i++) { | ||
226 | frag = &skb_shinfo(skb)->frags[i]; | ||
227 | pci_unmap_page(mdev->pdev, | ||
228 | (dma_addr_t) be64_to_cpu(data[i].addr), | ||
229 | frag->size, PCI_DMA_TODEVICE); | ||
230 | } | ||
231 | } | ||
232 | /* Stamp the freed descriptor */ | ||
233 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
234 | *ptr = stamp; | ||
235 | ptr += STAMP_DWORDS; | ||
236 | } | ||
237 | |||
238 | } else { | ||
239 | if (!tx_info->inl) { | ||
240 | if ((void *) data >= end) { | ||
241 | data = ring->buf + ((void *)data - end); | ||
242 | } | ||
243 | |||
244 | if (tx_info->linear) { | ||
245 | pci_unmap_single(mdev->pdev, | ||
246 | (dma_addr_t) be64_to_cpu(data->addr), | ||
247 | be32_to_cpu(data->byte_count), | ||
248 | PCI_DMA_TODEVICE); | ||
249 | ++data; | ||
250 | } | ||
251 | |||
252 | for (i = 0; i < frags; i++) { | ||
253 | /* Check for wraparound before unmapping */ | ||
254 | if ((void *) data >= end) | ||
255 | data = ring->buf; | ||
256 | frag = &skb_shinfo(skb)->frags[i]; | ||
257 | pci_unmap_page(mdev->pdev, | ||
258 | (dma_addr_t) be64_to_cpu(data->addr), | ||
259 | frag->size, PCI_DMA_TODEVICE); | ||
260 | ++data; | ||
261 | } | ||
262 | } | ||
263 | /* Stamp the freed descriptor */ | ||
264 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
265 | *ptr = stamp; | ||
266 | ptr += STAMP_DWORDS; | ||
267 | if ((void *) ptr >= end) { | ||
268 | ptr = ring->buf; | ||
269 | stamp ^= cpu_to_be32(0x80000000); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | } | ||
274 | dev_kfree_skb_any(skb); | ||
275 | return tx_info->nr_txbb; | ||
276 | } | ||
277 | |||
278 | |||
279 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) | ||
280 | { | ||
281 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
282 | int cnt = 0; | ||
283 | |||
284 | /* Skip last polled descriptor */ | ||
285 | ring->cons += ring->last_nr_txbb; | ||
286 | en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", | ||
287 | ring->cons, ring->prod); | ||
288 | |||
289 | if ((u32) (ring->prod - ring->cons) > ring->size) { | ||
290 | if (netif_msg_tx_err(priv)) | ||
291 | en_warn(priv, "Tx consumer passed producer!\n"); | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | while (ring->cons != ring->prod) { | ||
296 | ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, | ||
297 | ring->cons & ring->size_mask, | ||
298 | !!(ring->cons & ring->size)); | ||
299 | ring->cons += ring->last_nr_txbb; | ||
300 | cnt++; | ||
301 | } | ||
302 | |||
303 | if (cnt) | ||
304 | en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); | ||
305 | |||
306 | return cnt; | ||
307 | } | ||
308 | |||
309 | |||
310 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | ||
311 | { | ||
312 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
313 | struct mlx4_cq *mcq = &cq->mcq; | ||
314 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
315 | struct mlx4_cqe *cqe = cq->buf; | ||
316 | u16 index; | ||
317 | u16 new_index; | ||
318 | u32 txbbs_skipped = 0; | ||
319 | u32 cq_last_sav; | ||
320 | |||
321 | /* index always points to the first TXBB of the last polled descriptor */ | ||
322 | index = ring->cons & ring->size_mask; | ||
323 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
324 | if (index == new_index) | ||
325 | return; | ||
326 | |||
327 | if (!priv->port_up) | ||
328 | return; | ||
329 | |||
330 | /* | ||
331 | * We use a two-stage loop: | ||
332 | * - the first samples the HW-updated CQE | ||
333 | * - the second frees TXBBs until the last sample | ||
334 | * This lets us amortize CQE cache misses, while still polling the CQ | ||
335 | * until is quiescent. | ||
336 | */ | ||
337 | cq_last_sav = mcq->cons_index; | ||
338 | do { | ||
339 | do { | ||
340 | /* Skip over last polled CQE */ | ||
341 | index = (index + ring->last_nr_txbb) & ring->size_mask; | ||
342 | txbbs_skipped += ring->last_nr_txbb; | ||
343 | |||
344 | /* Poll next CQE */ | ||
345 | ring->last_nr_txbb = mlx4_en_free_tx_desc( | ||
346 | priv, ring, index, | ||
347 | !!((ring->cons + txbbs_skipped) & | ||
348 | ring->size)); | ||
349 | ++mcq->cons_index; | ||
350 | |||
351 | } while (index != new_index); | ||
352 | |||
353 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
354 | } while (index != new_index); | ||
355 | AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, | ||
356 | (u32) (mcq->cons_index - cq_last_sav)); | ||
357 | |||
358 | /* | ||
359 | * To prevent CQ overflow we first update CQ consumer and only then | ||
360 | * the ring consumer. | ||
361 | */ | ||
362 | mlx4_cq_set_ci(mcq); | ||
363 | wmb(); | ||
364 | ring->cons += txbbs_skipped; | ||
365 | |||
366 | /* Wakeup Tx queue if this ring stopped it */ | ||
367 | if (unlikely(ring->blocked)) { | ||
368 | if ((u32) (ring->prod - ring->cons) <= | ||
369 | ring->size - HEADROOM - MAX_DESC_TXBBS) { | ||
370 | ring->blocked = 0; | ||
371 | netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); | ||
372 | priv->port_stats.wake_queue++; | ||
373 | } | ||
374 | } | ||
375 | } | ||
376 | |||
377 | void mlx4_en_tx_irq(struct mlx4_cq *mcq) | ||
378 | { | ||
379 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
380 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
381 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
382 | |||
383 | if (!spin_trylock(&ring->comp_lock)) | ||
384 | return; | ||
385 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
386 | mod_timer(&cq->timer, jiffies + 1); | ||
387 | spin_unlock(&ring->comp_lock); | ||
388 | } | ||
389 | |||
390 | |||
391 | void mlx4_en_poll_tx_cq(unsigned long data) | ||
392 | { | ||
393 | struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; | ||
394 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
395 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
396 | u32 inflight; | ||
397 | |||
398 | INC_PERF_COUNTER(priv->pstats.tx_poll); | ||
399 | |||
400 | if (!spin_trylock_irq(&ring->comp_lock)) { | ||
401 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
402 | return; | ||
403 | } | ||
404 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
405 | inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); | ||
406 | |||
407 | /* If there are still packets in flight and the timer has not already | ||
408 | * been scheduled by the Tx routine then schedule it here to guarantee | ||
409 | * completion processing of these packets */ | ||
410 | if (inflight && priv->port_up) | ||
411 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
412 | |||
413 | spin_unlock_irq(&ring->comp_lock); | ||
414 | } | ||
415 | |||
416 | static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, | ||
417 | struct mlx4_en_tx_ring *ring, | ||
418 | u32 index, | ||
419 | unsigned int desc_size) | ||
420 | { | ||
421 | u32 copy = (ring->size - index) * TXBB_SIZE; | ||
422 | int i; | ||
423 | |||
424 | for (i = desc_size - copy - 4; i >= 0; i -= 4) { | ||
425 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
426 | wmb(); | ||
427 | |||
428 | *((u32 *) (ring->buf + i)) = | ||
429 | *((u32 *) (ring->bounce_buf + copy + i)); | ||
430 | } | ||
431 | |||
432 | for (i = copy - 4; i >= 4 ; i -= 4) { | ||
433 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
434 | wmb(); | ||
435 | |||
436 | *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = | ||
437 | *((u32 *) (ring->bounce_buf + i)); | ||
438 | } | ||
439 | |||
440 | /* Return real descriptor location */ | ||
441 | return ring->buf + index * TXBB_SIZE; | ||
442 | } | ||
443 | |||
444 | static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | ||
445 | { | ||
446 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; | ||
447 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; | ||
448 | unsigned long flags; | ||
449 | |||
450 | /* If we don't have a pending timer, set one up to catch our recent | ||
451 | post in case the interface becomes idle */ | ||
452 | if (!timer_pending(&cq->timer)) | ||
453 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
454 | |||
455 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ | ||
456 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) | ||
457 | if (spin_trylock_irqsave(&ring->comp_lock, flags)) { | ||
458 | mlx4_en_process_tx_cq(priv->dev, cq); | ||
459 | spin_unlock_irqrestore(&ring->comp_lock, flags); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | static void *get_frag_ptr(struct sk_buff *skb) | ||
464 | { | ||
465 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
466 | struct page *page = frag->page; | ||
467 | void *ptr; | ||
468 | |||
469 | ptr = page_address(page); | ||
470 | if (unlikely(!ptr)) | ||
471 | return NULL; | ||
472 | |||
473 | return ptr + frag->page_offset; | ||
474 | } | ||
475 | |||
476 | static int is_inline(struct sk_buff *skb, void **pfrag) | ||
477 | { | ||
478 | void *ptr; | ||
479 | |||
480 | if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { | ||
481 | if (skb_shinfo(skb)->nr_frags == 1) { | ||
482 | ptr = get_frag_ptr(skb); | ||
483 | if (unlikely(!ptr)) | ||
484 | return 0; | ||
485 | |||
486 | if (pfrag) | ||
487 | *pfrag = ptr; | ||
488 | |||
489 | return 1; | ||
490 | } else if (unlikely(skb_shinfo(skb)->nr_frags)) | ||
491 | return 0; | ||
492 | else | ||
493 | return 1; | ||
494 | } | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | static int inline_size(struct sk_buff *skb) | ||
500 | { | ||
501 | if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) | ||
502 | <= MLX4_INLINE_ALIGN) | ||
503 | return ALIGN(skb->len + CTRL_SIZE + | ||
504 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
505 | else | ||
506 | return ALIGN(skb->len + CTRL_SIZE + 2 * | ||
507 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
508 | } | ||
509 | |||
510 | static int get_real_size(struct sk_buff *skb, struct net_device *dev, | ||
511 | int *lso_header_size) | ||
512 | { | ||
513 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
514 | int real_size; | ||
515 | |||
516 | if (skb_is_gso(skb)) { | ||
517 | *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
518 | real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + | ||
519 | ALIGN(*lso_header_size + 4, DS_SIZE); | ||
520 | if (unlikely(*lso_header_size != skb_headlen(skb))) { | ||
521 | /* We add a segment for the skb linear buffer only if | ||
522 | * it contains data */ | ||
523 | if (*lso_header_size < skb_headlen(skb)) | ||
524 | real_size += DS_SIZE; | ||
525 | else { | ||
526 | if (netif_msg_tx_err(priv)) | ||
527 | en_warn(priv, "Non-linear headers\n"); | ||
528 | return 0; | ||
529 | } | ||
530 | } | ||
531 | } else { | ||
532 | *lso_header_size = 0; | ||
533 | if (!is_inline(skb, NULL)) | ||
534 | real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; | ||
535 | else | ||
536 | real_size = inline_size(skb); | ||
537 | } | ||
538 | |||
539 | return real_size; | ||
540 | } | ||
541 | |||
542 | static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, | ||
543 | int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) | ||
544 | { | ||
545 | struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; | ||
546 | int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; | ||
547 | |||
548 | if (skb->len <= spc) { | ||
549 | inl->byte_count = cpu_to_be32(1 << 31 | skb->len); | ||
550 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
551 | if (skb_shinfo(skb)->nr_frags) | ||
552 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, | ||
553 | skb_shinfo(skb)->frags[0].size); | ||
554 | |||
555 | } else { | ||
556 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | ||
557 | if (skb_headlen(skb) <= spc) { | ||
558 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
559 | if (skb_headlen(skb) < spc) { | ||
560 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), | ||
561 | fragptr, spc - skb_headlen(skb)); | ||
562 | fragptr += spc - skb_headlen(skb); | ||
563 | } | ||
564 | inl = (void *) (inl + 1) + spc; | ||
565 | memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); | ||
566 | } else { | ||
567 | skb_copy_from_linear_data(skb, inl + 1, spc); | ||
568 | inl = (void *) (inl + 1) + spc; | ||
569 | skb_copy_from_linear_data_offset(skb, spc, inl + 1, | ||
570 | skb_headlen(skb) - spc); | ||
571 | if (skb_shinfo(skb)->nr_frags) | ||
572 | memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, | ||
573 | fragptr, skb_shinfo(skb)->frags[0].size); | ||
574 | } | ||
575 | |||
576 | wmb(); | ||
577 | inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); | ||
578 | } | ||
579 | tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); | ||
580 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); | ||
581 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
582 | } | ||
583 | |||
584 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
585 | { | ||
586 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
587 | u16 vlan_tag = 0; | ||
588 | |||
589 | /* If we support per priority flow control and the packet contains | ||
590 | * a vlan tag, send the packet to the TX ring assigned to that priority | ||
591 | */ | ||
592 | if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { | ||
593 | vlan_tag = vlan_tx_tag_get(skb); | ||
594 | return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); | ||
595 | } | ||
596 | |||
597 | return skb_tx_hash(dev, skb); | ||
598 | } | ||
599 | |||
600 | static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) | ||
601 | { | ||
602 | __iowrite64_copy(dst, src, bytecnt / 8); | ||
603 | } | ||
604 | |||
605 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | ||
606 | { | ||
607 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
608 | struct mlx4_en_dev *mdev = priv->mdev; | ||
609 | struct mlx4_en_tx_ring *ring; | ||
610 | struct mlx4_en_cq *cq; | ||
611 | struct mlx4_en_tx_desc *tx_desc; | ||
612 | struct mlx4_wqe_data_seg *data; | ||
613 | struct skb_frag_struct *frag; | ||
614 | struct mlx4_en_tx_info *tx_info; | ||
615 | struct ethhdr *ethh; | ||
616 | u64 mac; | ||
617 | u32 mac_l, mac_h; | ||
618 | int tx_ind = 0; | ||
619 | int nr_txbb; | ||
620 | int desc_size; | ||
621 | int real_size; | ||
622 | dma_addr_t dma; | ||
623 | u32 index, bf_index; | ||
624 | __be32 op_own; | ||
625 | u16 vlan_tag = 0; | ||
626 | int i; | ||
627 | int lso_header_size; | ||
628 | void *fragptr; | ||
629 | bool bounce = false; | ||
630 | |||
631 | if (!priv->port_up) | ||
632 | goto tx_drop; | ||
633 | |||
634 | real_size = get_real_size(skb, dev, &lso_header_size); | ||
635 | if (unlikely(!real_size)) | ||
636 | goto tx_drop; | ||
637 | |||
638 | /* Align descriptor to TXBB size */ | ||
639 | desc_size = ALIGN(real_size, TXBB_SIZE); | ||
640 | nr_txbb = desc_size / TXBB_SIZE; | ||
641 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | ||
642 | if (netif_msg_tx_err(priv)) | ||
643 | en_warn(priv, "Oversized header or SG list\n"); | ||
644 | goto tx_drop; | ||
645 | } | ||
646 | |||
647 | tx_ind = skb->queue_mapping; | ||
648 | ring = &priv->tx_ring[tx_ind]; | ||
649 | if (vlan_tx_tag_present(skb)) | ||
650 | vlan_tag = vlan_tx_tag_get(skb); | ||
651 | |||
652 | /* Check available TXBBs And 2K spare for prefetch */ | ||
653 | if (unlikely(((int)(ring->prod - ring->cons)) > | ||
654 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
655 | /* every full Tx ring stops queue */ | ||
656 | netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); | ||
657 | ring->blocked = 1; | ||
658 | priv->port_stats.queue_stopped++; | ||
659 | |||
660 | /* Use interrupts to find out when queue opened */ | ||
661 | cq = &priv->tx_cq[tx_ind]; | ||
662 | mlx4_en_arm_cq(priv, cq); | ||
663 | return NETDEV_TX_BUSY; | ||
664 | } | ||
665 | |||
666 | /* Track current inflight packets for performance analysis */ | ||
667 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | ||
668 | (u32) (ring->prod - ring->cons - 1)); | ||
669 | |||
670 | /* Packet is good - grab an index and transmit it */ | ||
671 | index = ring->prod & ring->size_mask; | ||
672 | bf_index = ring->prod; | ||
673 | |||
674 | /* See if we have enough space for whole descriptor TXBB for setting | ||
675 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | ||
676 | if (likely(index + nr_txbb <= ring->size)) | ||
677 | tx_desc = ring->buf + index * TXBB_SIZE; | ||
678 | else { | ||
679 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | ||
680 | bounce = true; | ||
681 | } | ||
682 | |||
683 | /* Save skb in tx_info ring */ | ||
684 | tx_info = &ring->tx_info[index]; | ||
685 | tx_info->skb = skb; | ||
686 | tx_info->nr_txbb = nr_txbb; | ||
687 | |||
688 | /* Prepare ctrl segement apart opcode+ownership, which depends on | ||
689 | * whether LSO is used */ | ||
690 | tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); | ||
691 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; | ||
692 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
693 | tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | | ||
694 | MLX4_WQE_CTRL_SOLICITED); | ||
695 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
696 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | ||
697 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
698 | priv->port_stats.tx_chksum_offload++; | ||
699 | } | ||
700 | |||
701 | if (unlikely(priv->validate_loopback)) { | ||
702 | /* Copy dst mac address to wqe */ | ||
703 | skb_reset_mac_header(skb); | ||
704 | ethh = eth_hdr(skb); | ||
705 | if (ethh && ethh->h_dest) { | ||
706 | mac = mlx4_en_mac_to_u64(ethh->h_dest); | ||
707 | mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); | ||
708 | mac_l = (u32) (mac & 0xffffffff); | ||
709 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); | ||
710 | tx_desc->ctrl.imm = cpu_to_be32(mac_l); | ||
711 | } | ||
712 | } | ||
713 | |||
714 | /* Handle LSO (TSO) packets */ | ||
715 | if (lso_header_size) { | ||
716 | /* Mark opcode as LSO */ | ||
717 | op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | | ||
718 | ((ring->prod & ring->size) ? | ||
719 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
720 | |||
721 | /* Fill in the LSO prefix */ | ||
722 | tx_desc->lso.mss_hdr_size = cpu_to_be32( | ||
723 | skb_shinfo(skb)->gso_size << 16 | lso_header_size); | ||
724 | |||
725 | /* Copy headers; | ||
726 | * note that we already verified that it is linear */ | ||
727 | memcpy(tx_desc->lso.header, skb->data, lso_header_size); | ||
728 | data = ((void *) &tx_desc->lso + | ||
729 | ALIGN(lso_header_size + 4, DS_SIZE)); | ||
730 | |||
731 | priv->port_stats.tso_packets++; | ||
732 | i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + | ||
733 | !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); | ||
734 | ring->bytes += skb->len + (i - 1) * lso_header_size; | ||
735 | ring->packets += i; | ||
736 | } else { | ||
737 | /* Normal (Non LSO) packet */ | ||
738 | op_own = cpu_to_be32(MLX4_OPCODE_SEND) | | ||
739 | ((ring->prod & ring->size) ? | ||
740 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
741 | data = &tx_desc->data; | ||
742 | ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); | ||
743 | ring->packets++; | ||
744 | |||
745 | } | ||
746 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); | ||
747 | |||
748 | |||
749 | /* valid only for none inline segments */ | ||
750 | tx_info->data_offset = (void *) data - (void *) tx_desc; | ||
751 | |||
752 | tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; | ||
753 | data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; | ||
754 | |||
755 | if (!is_inline(skb, &fragptr)) { | ||
756 | /* Map fragments */ | ||
757 | for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { | ||
758 | frag = &skb_shinfo(skb)->frags[i]; | ||
759 | dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, | ||
760 | frag->size, PCI_DMA_TODEVICE); | ||
761 | data->addr = cpu_to_be64(dma); | ||
762 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
763 | wmb(); | ||
764 | data->byte_count = cpu_to_be32(frag->size); | ||
765 | --data; | ||
766 | } | ||
767 | |||
768 | /* Map linear part */ | ||
769 | if (tx_info->linear) { | ||
770 | dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, | ||
771 | skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); | ||
772 | data->addr = cpu_to_be64(dma); | ||
773 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
774 | wmb(); | ||
775 | data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); | ||
776 | } | ||
777 | tx_info->inl = 0; | ||
778 | } else { | ||
779 | build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); | ||
780 | tx_info->inl = 1; | ||
781 | } | ||
782 | |||
783 | ring->prod += nr_txbb; | ||
784 | |||
785 | /* If we used a bounce buffer then copy descriptor back into place */ | ||
786 | if (bounce) | ||
787 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | ||
788 | |||
789 | /* Run destructor before passing skb to HW */ | ||
790 | if (likely(!skb_shared(skb))) | ||
791 | skb_orphan(skb); | ||
792 | |||
793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { | ||
794 | *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); | ||
795 | op_own |= htonl((bf_index & 0xffff) << 8); | ||
796 | /* Ensure new descirptor hits memory | ||
797 | * before setting ownership of this descriptor to HW */ | ||
798 | wmb(); | ||
799 | tx_desc->ctrl.owner_opcode = op_own; | ||
800 | |||
801 | wmb(); | ||
802 | |||
803 | mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, | ||
804 | desc_size); | ||
805 | |||
806 | wmb(); | ||
807 | |||
808 | ring->bf.offset ^= ring->bf.buf_size; | ||
809 | } else { | ||
810 | /* Ensure new descirptor hits memory | ||
811 | * before setting ownership of this descriptor to HW */ | ||
812 | wmb(); | ||
813 | tx_desc->ctrl.owner_opcode = op_own; | ||
814 | wmb(); | ||
815 | iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | ||
816 | } | ||
817 | |||
818 | /* Poll CQ here */ | ||
819 | mlx4_en_xmit_poll(priv, tx_ind); | ||
820 | |||
821 | return NETDEV_TX_OK; | ||
822 | |||
823 | tx_drop: | ||
824 | dev_kfree_skb_any(skb); | ||
825 | priv->stats.tx_dropped++; | ||
826 | return NETDEV_TX_OK; | ||
827 | } | ||
828 | |||