aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-desc.c')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6f1c85956d50..1c5d62e8dab6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
131 131
132 if (ring->rdata) { 132 if (ring->rdata) {
133 for (i = 0; i < ring->rdesc_count; i++) { 133 for (i = 0; i < ring->rdesc_count; i++) {
134 rdata = GET_DESC_DATA(ring, i); 134 rdata = XGBE_GET_DESC_DATA(ring, i);
135 xgbe_unmap_skb(pdata, rdata); 135 xgbe_unmap_skb(pdata, rdata);
136 } 136 }
137 137
@@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
256 rdesc_dma = ring->rdesc_dma; 256 rdesc_dma = ring->rdesc_dma;
257 257
258 for (j = 0; j < ring->rdesc_count; j++) { 258 for (j = 0; j < ring->rdesc_count; j++) {
259 rdata = GET_DESC_DATA(ring, j); 259 rdata = XGBE_GET_DESC_DATA(ring, j);
260 260
261 rdata->rdesc = rdesc; 261 rdata->rdesc = rdesc;
262 rdata->rdesc_dma = rdesc_dma; 262 rdata->rdesc_dma = rdesc_dma;
@@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
298 rdesc_dma = ring->rdesc_dma; 298 rdesc_dma = ring->rdesc_dma;
299 299
300 for (j = 0; j < ring->rdesc_count; j++) { 300 for (j = 0; j < ring->rdesc_count; j++) {
301 rdata = GET_DESC_DATA(ring, j); 301 rdata = XGBE_GET_DESC_DATA(ring, j);
302 302
303 rdata->rdesc = rdesc; 303 rdata->rdesc = rdesc;
304 rdata->rdesc_dma = rdesc_dma; 304 rdata->rdesc_dma = rdesc_dma;
@@ -359,6 +359,15 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
359 rdata->len = 0; 359 rdata->len = 0;
360 rdata->interrupt = 0; 360 rdata->interrupt = 0;
361 rdata->mapped_as_page = 0; 361 rdata->mapped_as_page = 0;
362
363 if (rdata->state_saved) {
364 rdata->state_saved = 0;
365 rdata->state.incomplete = 0;
366 rdata->state.context_next = 0;
367 rdata->state.skb = NULL;
368 rdata->state.len = 0;
369 rdata->state.error = 0;
370 }
362} 371}
363 372
364static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) 373static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
@@ -392,7 +401,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
392 if ((tso && (packet->mss != ring->tx.cur_mss)) || 401 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
393 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) 402 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
394 cur_index++; 403 cur_index++;
395 rdata = GET_DESC_DATA(ring, cur_index); 404 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
396 405
397 if (tso) { 406 if (tso) {
398 DBGPR(" TSO packet\n"); 407 DBGPR(" TSO packet\n");
@@ -413,12 +422,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
413 packet->length += packet->header_len; 422 packet->length += packet->header_len;
414 423
415 cur_index++; 424 cur_index++;
416 rdata = GET_DESC_DATA(ring, cur_index); 425 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
417 } 426 }
418 427
419 /* Map the (remainder of the) packet */ 428 /* Map the (remainder of the) packet */
420 for (datalen = skb_headlen(skb) - offset; datalen; ) { 429 for (datalen = skb_headlen(skb) - offset; datalen; ) {
421 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE); 430 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
422 431
423 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, 432 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
424 DMA_TO_DEVICE); 433 DMA_TO_DEVICE);
@@ -437,7 +446,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
437 packet->length += len; 446 packet->length += len;
438 447
439 cur_index++; 448 cur_index++;
440 rdata = GET_DESC_DATA(ring, cur_index); 449 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
441 } 450 }
442 451
443 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 452 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -447,7 +456,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
447 offset = 0; 456 offset = 0;
448 457
449 for (datalen = skb_frag_size(frag); datalen; ) { 458 for (datalen = skb_frag_size(frag); datalen; ) {
450 len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE); 459 len = min_t(unsigned int, datalen,
460 XGBE_TX_MAX_BUF_SIZE);
451 461
452 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, 462 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
453 len, DMA_TO_DEVICE); 463 len, DMA_TO_DEVICE);
@@ -468,7 +478,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
468 packet->length += len; 478 packet->length += len;
469 479
470 cur_index++; 480 cur_index++;
471 rdata = GET_DESC_DATA(ring, cur_index); 481 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
472 } 482 }
473 } 483 }
474 484
@@ -484,7 +494,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
484 494
485err_out: 495err_out:
486 while (start_index < cur_index) { 496 while (start_index < cur_index) {
487 rdata = GET_DESC_DATA(ring, start_index++); 497 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
488 xgbe_unmap_skb(pdata, rdata); 498 xgbe_unmap_skb(pdata, rdata);
489 } 499 }
490 500
@@ -507,7 +517,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
507 ring->rx.realloc_index); 517 ring->rx.realloc_index);
508 518
509 for (i = 0; i < ring->dirty; i++) { 519 for (i = 0; i < ring->dirty; i++) {
510 rdata = GET_DESC_DATA(ring, ring->rx.realloc_index); 520 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
511 521
512 /* Reset rdata values */ 522 /* Reset rdata values */
513 xgbe_unmap_skb(pdata, rdata); 523 xgbe_unmap_skb(pdata, rdata);