diff options
-rw-r--r-- | drivers/net/sky2.c | 78 | ||||
-rw-r--r-- | drivers/net/sky2.h | 2 |
2 files changed, 41 insertions, 39 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index bc95aacab20f..c057694cf047 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -76,7 +76,7 @@ | |||
76 | #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) | 76 | #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) |
77 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 1) | 77 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 1) |
78 | #define RX_DEF_PENDING 128 | 78 | #define RX_DEF_PENDING 128 |
79 | #define RX_COPY_THRESHOLD 128 | 79 | #define RX_COPY_THRESHOLD 256 |
80 | 80 | ||
81 | #define TX_RING_SIZE 512 | 81 | #define TX_RING_SIZE 512 |
82 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) | 82 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) |
@@ -692,18 +692,10 @@ static void sky2_rx_clean(struct sky2_port *sky2) | |||
692 | } | 692 | } |
693 | } | 693 | } |
694 | 694 | ||
695 | static inline struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, | 695 | #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) |
696 | unsigned int size, | 696 | static inline unsigned sky2_rx_size(const struct sky2_port *sky2) |
697 | unsigned int gfp_mask) | ||
698 | { | 697 | { |
699 | struct sk_buff *skb; | 698 | return roundup(sky2->netdev->mtu + ETH_HLEN + 4, 8); |
700 | |||
701 | skb = alloc_skb(size + NET_IP_ALIGN, gfp_mask); | ||
702 | if (likely(skb)) { | ||
703 | skb->dev = sky2->netdev; | ||
704 | skb_reserve(skb, NET_IP_ALIGN); | ||
705 | } | ||
706 | return skb; | ||
707 | } | 699 | } |
708 | 700 | ||
709 | /* | 701 | /* |
@@ -711,22 +703,28 @@ static inline struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, | |||
711 | * In case of 64 bit dma, there are 2X as many list elements | 703 | * In case of 64 bit dma, there are 2X as many list elements |
712 | * available as ring entries | 704 | * available as ring entries |
713 | * and need to reserve one list element so we don't wrap around. | 705 | * and need to reserve one list element so we don't wrap around. |
706 | * | ||
707 | * It appears the hardware has a bug in the FIFO logic that | ||
708 | * cause it to hang if the FIFO gets overrun and the receive buffer | ||
709 | * is not aligned. This means we can't use skb_reserve to align | ||
710 | * the IP header. | ||
714 | */ | 711 | */ |
715 | static int sky2_rx_fill(struct sky2_port *sky2) | 712 | static int sky2_rx_fill(struct sky2_port *sky2) |
716 | { | 713 | { |
717 | unsigned i; | 714 | unsigned i; |
718 | const unsigned rx_buf_size = sky2->netdev->mtu + ETH_HLEN + 8; | 715 | unsigned size = sky2_rx_size(sky2); |
719 | 716 | ||
717 | pr_debug("rx_fill size=%d\n", size); | ||
720 | for (i = 0; i < sky2->rx_pending; i++) { | 718 | for (i = 0; i < sky2->rx_pending; i++) { |
721 | struct ring_info *re = sky2->rx_ring + i; | 719 | struct ring_info *re = sky2->rx_ring + i; |
722 | 720 | ||
723 | re->skb = sky2_rx_alloc(sky2, rx_buf_size, GFP_KERNEL); | 721 | re->skb = dev_alloc_skb(size); |
724 | if (!re->skb) | 722 | if (!re->skb) |
725 | goto nomem; | 723 | goto nomem; |
726 | 724 | ||
727 | re->mapaddr = pci_map_single(sky2->hw->pdev, re->skb->data, | 725 | re->mapaddr = pci_map_single(sky2->hw->pdev, re->skb->data, |
728 | rx_buf_size, PCI_DMA_FROMDEVICE); | 726 | size, PCI_DMA_FROMDEVICE); |
729 | re->maplen = rx_buf_size; | 727 | re->maplen = size; |
730 | sky2_rx_add(sky2, re); | 728 | sky2_rx_add(sky2, re); |
731 | } | 729 | } |
732 | 730 | ||
@@ -1408,8 +1406,8 @@ static struct sk_buff *sky2_receive(struct sky2_hw *hw, unsigned port, | |||
1408 | struct net_device *dev = hw->dev[port]; | 1406 | struct net_device *dev = hw->dev[port]; |
1409 | struct sky2_port *sky2 = netdev_priv(dev); | 1407 | struct sky2_port *sky2 = netdev_priv(dev); |
1410 | struct ring_info *re = sky2->rx_ring + sky2->rx_next; | 1408 | struct ring_info *re = sky2->rx_ring + sky2->rx_next; |
1411 | struct sk_buff *skb, *nskb; | 1409 | struct sk_buff *skb = NULL; |
1412 | const unsigned int rx_buf_size = dev->mtu + ETH_HLEN + 8; | 1410 | const unsigned int bufsize = sky2_rx_size(sky2); |
1413 | 1411 | ||
1414 | if (unlikely(netif_msg_rx_status(sky2))) | 1412 | if (unlikely(netif_msg_rx_status(sky2))) |
1415 | printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", | 1413 | printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", |
@@ -1417,43 +1415,49 @@ static struct sk_buff *sky2_receive(struct sky2_hw *hw, unsigned port, | |||
1417 | 1415 | ||
1418 | sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; | 1416 | sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; |
1419 | 1417 | ||
1420 | skb = NULL; | ||
1421 | if (!(status & GMR_FS_RX_OK) | 1418 | if (!(status & GMR_FS_RX_OK) |
1422 | || (status & GMR_FS_ANY_ERR) | 1419 | || (status & GMR_FS_ANY_ERR) |
1423 | || (length << 16) != (status & GMR_FS_LEN) | 1420 | || (length << 16) != (status & GMR_FS_LEN) |
1424 | || length > rx_buf_size) | 1421 | || length > bufsize) |
1425 | goto error; | 1422 | goto error; |
1426 | 1423 | ||
1427 | if (length < RX_COPY_THRESHOLD) { | 1424 | if (length < RX_COPY_THRESHOLD) { |
1428 | nskb = sky2_rx_alloc(sky2, length, GFP_ATOMIC); | 1425 | skb = alloc_skb(length + 2, GFP_ATOMIC); |
1429 | if (!nskb) | 1426 | if (!skb) |
1430 | goto resubmit; | 1427 | goto resubmit; |
1431 | 1428 | ||
1429 | skb_reserve(skb, 2); | ||
1432 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr, | 1430 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr, |
1433 | length, PCI_DMA_FROMDEVICE); | 1431 | length, PCI_DMA_FROMDEVICE); |
1434 | memcpy(nskb->data, re->skb->data, length); | 1432 | memcpy(skb->data, re->skb->data, length); |
1435 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr, | 1433 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr, |
1436 | length, PCI_DMA_FROMDEVICE); | 1434 | length, PCI_DMA_FROMDEVICE); |
1437 | skb = nskb; | ||
1438 | } else { | 1435 | } else { |
1439 | nskb = sky2_rx_alloc(sky2, rx_buf_size, GFP_ATOMIC); | 1436 | struct sk_buff *nskb; |
1437 | |||
1438 | nskb = dev_alloc_skb(bufsize); | ||
1440 | if (!nskb) | 1439 | if (!nskb) |
1441 | goto resubmit; | 1440 | goto resubmit; |
1442 | 1441 | ||
1443 | skb = re->skb; | 1442 | skb = re->skb; |
1443 | re->skb = nskb; | ||
1444 | pci_unmap_single(sky2->hw->pdev, re->mapaddr, | 1444 | pci_unmap_single(sky2->hw->pdev, re->mapaddr, |
1445 | re->maplen, PCI_DMA_FROMDEVICE); | 1445 | re->maplen, PCI_DMA_FROMDEVICE); |
1446 | prefetch(skb->data); | 1446 | prefetch(skb->data); |
1447 | 1447 | ||
1448 | re->skb = nskb; | ||
1449 | re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data, | 1448 | re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data, |
1450 | rx_buf_size, PCI_DMA_FROMDEVICE); | 1449 | bufsize, PCI_DMA_FROMDEVICE); |
1451 | re->maplen = rx_buf_size; | 1450 | re->maplen = bufsize; |
1452 | } | 1451 | } |
1453 | 1452 | ||
1453 | skb->dev = dev; | ||
1454 | skb_put(skb, length); | ||
1455 | skb->protocol = eth_type_trans(skb, dev); | ||
1456 | dev->last_rx = jiffies; | ||
1457 | |||
1454 | resubmit: | 1458 | resubmit: |
1455 | BUG_ON(re->skb == skb); | ||
1456 | sky2_rx_add(sky2, re); | 1459 | sky2_rx_add(sky2, re); |
1460 | |||
1457 | return skb; | 1461 | return skb; |
1458 | 1462 | ||
1459 | error: | 1463 | error: |
@@ -1472,6 +1476,7 @@ error: | |||
1472 | sky2->net_stats.rx_crc_errors++; | 1476 | sky2->net_stats.rx_crc_errors++; |
1473 | if (status & GMR_FS_RX_FF_OV) | 1477 | if (status & GMR_FS_RX_FF_OV) |
1474 | sky2->net_stats.rx_fifo_errors++; | 1478 | sky2->net_stats.rx_fifo_errors++; |
1479 | |||
1475 | goto resubmit; | 1480 | goto resubmit; |
1476 | } | 1481 | } |
1477 | 1482 | ||
@@ -1502,6 +1507,7 @@ static int sky2_poll(struct net_device *dev, int *budget) | |||
1502 | unsigned int csum[2]; | 1507 | unsigned int csum[2]; |
1503 | 1508 | ||
1504 | hwidx = sky2_read16(hw, STAT_PUT_IDX); | 1509 | hwidx = sky2_read16(hw, STAT_PUT_IDX); |
1510 | BUG_ON(hwidx >= STATUS_RING_SIZE); | ||
1505 | rmb(); | 1511 | rmb(); |
1506 | while (hw->st_idx != hwidx && work_done < to_do) { | 1512 | while (hw->st_idx != hwidx && work_done < to_do) { |
1507 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | 1513 | struct sky2_status_le *le = hw->st_le + hw->st_idx; |
@@ -1520,22 +1526,18 @@ static int sky2_poll(struct net_device *dev, int *budget) | |||
1520 | case OP_RXSTAT: | 1526 | case OP_RXSTAT: |
1521 | skb = sky2_receive(hw, port, length, status); | 1527 | skb = sky2_receive(hw, port, length, status); |
1522 | if (likely(skb)) { | 1528 | if (likely(skb)) { |
1523 | __skb_put(skb, length); | ||
1524 | skb->protocol = eth_type_trans(skb, dev); | ||
1525 | |||
1526 | /* Add hw checksum if available */ | 1529 | /* Add hw checksum if available */ |
1527 | skb->ip_summed = summed[port]; | 1530 | skb->ip_summed = summed[port]; |
1528 | skb->csum = csum[port]; | 1531 | skb->csum = csum[port]; |
1529 | 1532 | ||
1530 | /* Clear for next packet */ | ||
1531 | csum[port] = 0; | ||
1532 | summed[port] = CHECKSUM_NONE; | ||
1533 | |||
1534 | netif_receive_skb(skb); | 1533 | netif_receive_skb(skb); |
1535 | |||
1536 | dev->last_rx = jiffies; | ||
1537 | ++work_done; | 1534 | ++work_done; |
1538 | } | 1535 | } |
1536 | |||
1537 | /* Clear for next packet */ | ||
1538 | csum[port] = 0; | ||
1539 | summed[port] = CHECKSUM_NONE; | ||
1540 | |||
1539 | break; | 1541 | break; |
1540 | 1542 | ||
1541 | case OP_RXCHKS: | 1543 | case OP_RXCHKS: |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 9256303acf76..1a8a004e1060 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1679,7 +1679,7 @@ enum { | |||
1679 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | 1679 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ |
1680 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | 1680 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ |
1681 | 1681 | ||
1682 | #define GMAC_DEF_MSK (GM_IS_TX_FF_UR|GM_IS_RX_FF_OR) | 1682 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR |
1683 | 1683 | ||
1684 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | 1684 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ |
1685 | /* Bits 15.. 2: reserved */ | 1685 | /* Bits 15.. 2: reserved */ |