aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/netxen
diff options
context:
space:
mode:
authorDhananjay Phadke <dhananjay@netxen.com>2009-08-23 04:35:11 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-23 22:00:25 -0400
commitce644ed4db3ee1075ebd9f4acc403e1f9410db21 (patch)
treeeb84575826cb9f38a140dea285883661228b9ca4 /drivers/net/netxen
parent1dbc84a7f6c2ebd8c69299e1adef22ee26db38c0 (diff)
netxen: refactor tx dma mapping code
Move all tx skb mapping code into netxen_map_tx_skb(). Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen')
-rw-r--r--drivers/net/netxen/netxen_nic_main.c112
1 files changed, 60 insertions, 52 deletions
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 008657423f8e..f618e7c8eeb1 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1518,22 +1518,52 @@ netxen_tso_check(struct net_device *netdev,
1518 barrier(); 1518 barrier();
1519} 1519}
1520 1520
1521static void 1521static int
1522netxen_clean_tx_dma_mapping(struct pci_dev *pdev, 1522netxen_map_tx_skb(struct pci_dev *pdev,
1523 struct netxen_cmd_buffer *pbuf, int last) 1523 struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
1524{ 1524{
1525 int k; 1525 struct netxen_skb_frag *nf;
1526 struct netxen_skb_frag *buffrag; 1526 struct skb_frag_struct *frag;
1527 int i, nr_frags;
1528 dma_addr_t map;
1529
1530 nr_frags = skb_shinfo(skb)->nr_frags;
1531 nf = &pbuf->frag_array[0];
1532
1533 map = pci_map_single(pdev, skb->data,
1534 skb_headlen(skb), PCI_DMA_TODEVICE);
1535 if (pci_dma_mapping_error(pdev, map))
1536 goto out_err;
1527 1537
1528 buffrag = &pbuf->frag_array[0]; 1538 nf->dma = map;
1529 pci_unmap_single(pdev, buffrag->dma, 1539 nf->length = skb_headlen(skb);
1530 buffrag->length, PCI_DMA_TODEVICE); 1540
1541 for (i = 0; i < nr_frags; i++) {
1542 frag = &skb_shinfo(skb)->frags[i];
1543 nf = &pbuf->frag_array[i+1];
1544
1545 map = pci_map_page(pdev, frag->page, frag->page_offset,
1546 frag->size, PCI_DMA_TODEVICE);
1547 if (pci_dma_mapping_error(pdev, map))
1548 goto unwind;
1549
1550 nf->dma = map;
1551 nf->length = frag->size;
1552 }
1553
1554 return 0;
1531 1555
1532 for (k = 1; k < last; k++) { 1556unwind:
1533 buffrag = &pbuf->frag_array[k]; 1557 while (i > 0) {
1534 pci_unmap_page(pdev, buffrag->dma, 1558 nf = &pbuf->frag_array[i];
1535 buffrag->length, PCI_DMA_TODEVICE); 1559 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1536 } 1560 }
1561
1562 nf = &pbuf->frag_array[0];
1563 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1564
1565out_err:
1566 return -ENOMEM;
1537} 1567}
1538 1568
1539static inline void 1569static inline void
@@ -1548,17 +1578,14 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1548{ 1578{
1549 struct netxen_adapter *adapter = netdev_priv(netdev); 1579 struct netxen_adapter *adapter = netdev_priv(netdev);
1550 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 1580 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1551 struct skb_frag_struct *frag;
1552 struct netxen_cmd_buffer *pbuf; 1581 struct netxen_cmd_buffer *pbuf;
1553 struct netxen_skb_frag *buffrag; 1582 struct netxen_skb_frag *buffrag;
1554 struct cmd_desc_type0 *hwdesc, *first_desc; 1583 struct cmd_desc_type0 *hwdesc, *first_desc;
1555 struct pci_dev *pdev; 1584 struct pci_dev *pdev;
1556 dma_addr_t temp_dma;
1557 int i, k; 1585 int i, k;
1558 unsigned long offset;
1559 1586
1560 u32 producer; 1587 u32 producer;
1561 int len, frag_count, no_of_desc; 1588 int frag_count, no_of_desc;
1562 u32 num_txd = tx_ring->num_desc; 1589 u32 num_txd = tx_ring->num_desc;
1563 1590
1564 frag_count = skb_shinfo(skb)->nr_frags + 1; 1591 frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -1572,72 +1599,53 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1572 } 1599 }
1573 1600
1574 producer = tx_ring->producer; 1601 producer = tx_ring->producer;
1602 pbuf = &tx_ring->cmd_buf_arr[producer];
1575 1603
1576 pdev = adapter->pdev; 1604 pdev = adapter->pdev;
1577 len = skb->len - skb->data_len;
1578 1605
1579 temp_dma = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE); 1606 if (netxen_map_tx_skb(pdev, skb, pbuf))
1580 if (pci_dma_mapping_error(pdev, temp_dma))
1581 goto drop_packet; 1607 goto drop_packet;
1582 1608
1583 pbuf = &tx_ring->cmd_buf_arr[producer];
1584 pbuf->skb = skb; 1609 pbuf->skb = skb;
1585 pbuf->frag_count = frag_count; 1610 pbuf->frag_count = frag_count;
1586 1611
1587 buffrag = &pbuf->frag_array[0];
1588 buffrag->dma = temp_dma;
1589 buffrag->length = len;
1590
1591 first_desc = hwdesc = &tx_ring->desc_head[producer]; 1612 first_desc = hwdesc = &tx_ring->desc_head[producer];
1592 netxen_clear_cmddesc((u64 *)hwdesc); 1613 netxen_clear_cmddesc((u64 *)hwdesc);
1593 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1594 netxen_set_tx_port(hwdesc, adapter->portnum);
1595 1614
1596 hwdesc->buffer_length[0] = cpu_to_le16(len); 1615 netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
1597 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); 1616 netxen_set_tx_port(first_desc, adapter->portnum);
1598 1617
1599 for (i = 1, k = 1; i < frag_count; i++, k++) { 1618 for (i = 0; i < frag_count; i++) {
1600 1619
1601 /* move to next desc. if there is a need */ 1620 k = i % 4;
1602 if ((i & 0x3) == 0) { 1621
1603 k = 0; 1622 if ((k == 0) && (i > 0)) {
1623 /* move to next desc.*/
1604 producer = get_next_index(producer, num_txd); 1624 producer = get_next_index(producer, num_txd);
1605 hwdesc = &tx_ring->desc_head[producer]; 1625 hwdesc = &tx_ring->desc_head[producer];
1606 netxen_clear_cmddesc((u64 *)hwdesc); 1626 netxen_clear_cmddesc((u64 *)hwdesc);
1607 pbuf = &tx_ring->cmd_buf_arr[producer]; 1627 tx_ring->cmd_buf_arr[producer].skb = NULL;
1608 pbuf->skb = NULL;
1609 }
1610 buffrag = &pbuf->frag_array[i];
1611 frag = &skb_shinfo(skb)->frags[i - 1];
1612 len = frag->size;
1613 offset = frag->page_offset;
1614
1615 temp_dma = pci_map_page(pdev, frag->page, offset,
1616 len, PCI_DMA_TODEVICE);
1617 if (pci_dma_mapping_error(pdev, temp_dma)) {
1618 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1619 goto drop_packet;
1620 } 1628 }
1621 1629
1622 buffrag->dma = temp_dma; 1630 buffrag = &pbuf->frag_array[i];
1623 buffrag->length = len;
1624 1631
1625 hwdesc->buffer_length[k] = cpu_to_le16(len); 1632 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1626 switch (k) { 1633 switch (k) {
1627 case 0: 1634 case 0:
1628 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); 1635 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1629 break; 1636 break;
1630 case 1: 1637 case 1:
1631 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma); 1638 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1632 break; 1639 break;
1633 case 2: 1640 case 2:
1634 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma); 1641 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1635 break; 1642 break;
1636 case 3: 1643 case 3:
1637 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma); 1644 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1638 break; 1645 break;
1639 } 1646 }
1640 } 1647 }
1648
1641 tx_ring->producer = get_next_index(producer, num_txd); 1649 tx_ring->producer = get_next_index(producer, num_txd);
1642 1650
1643 netxen_tso_check(netdev, tx_ring, first_desc, skb); 1651 netxen_tso_check(netdev, tx_ring, first_desc, skb);