aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2011-10-14 01:31:06 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-17 19:00:55 -0400
commit13946f5e4eefd5162733a75c03bb9f52c9c69614 (patch)
treee285c34ed8ac9acc46a21e9cd5a2adeb85fcfb0c /drivers/net/ethernet/ibm
parentd695c335f9165cb73f9389479cce755e8207b5f4 (diff)
ehea: Merge swqe2 TSO and non TSO paths
write_swqe2_TSO and write_swqe2_nonTSO are almost identical. For TSO we have to set the TSO and mss bits in the wqe and we only put the header in the immediate area, no data. Collapse both functions into write_swqe2_immediate. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c76
1 files changed, 21 insertions, 55 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 77aafba8272c..0fc0ae8b830f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1676,65 +1676,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1676 return ret; 1676 return ret;
1677} 1677}
1678 1678
1679static void write_swqe2_TSO(struct sk_buff *skb, 1679static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1680 struct ehea_swqe *swqe, u32 lkey) 1680 u32 lkey)
1681{
1682 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1683 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1684 int skb_data_size = skb_headlen(skb);
1685 int headersize;
1686
1687 /* Packet is TCP with TSO enabled */
1688 swqe->tx_control |= EHEA_SWQE_TSO;
1689 swqe->mss = skb_shinfo(skb)->gso_size;
1690 /* copy only eth/ip/tcp headers to immediate data and
1691 * the rest of skb->data to sg1entry
1692 */
1693 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1694
1695 skb_data_size = skb_headlen(skb);
1696
1697 if (skb_data_size >= headersize) {
1698 /* copy immediate data */
1699 skb_copy_from_linear_data(skb, imm_data, headersize);
1700 swqe->immediate_data_length = headersize;
1701
1702 if (skb_data_size > headersize) {
1703 /* set sg1entry data */
1704 sg1entry->l_key = lkey;
1705 sg1entry->len = skb_data_size - headersize;
1706 sg1entry->vaddr =
1707 ehea_map_vaddr(skb->data + headersize);
1708 swqe->descriptors++;
1709 }
1710 } else
1711 pr_err("cannot handle fragmented headers\n");
1712}
1713
1714static void write_swqe2_nonTSO(struct sk_buff *skb,
1715 struct ehea_swqe *swqe, u32 lkey)
1716{ 1681{
1717 int skb_data_size = skb_headlen(skb); 1682 int skb_data_size = skb_headlen(skb);
1718 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1683 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1719 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1684 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1685 unsigned int immediate_len = SWQE2_MAX_IMM;
1686
1687 swqe->descriptors = 0;
1720 1688
1721 /* Packet is any nonTSO type 1689 if (skb_is_gso(skb)) {
1722 * 1690 swqe->tx_control |= EHEA_SWQE_TSO;
1723 * Copy as much as possible skb->data to immediate data and 1691 swqe->mss = skb_shinfo(skb)->gso_size;
1724 * the rest to sg1entry 1692 /*
1725 */ 1693 * For TSO packets we only copy the headers into the
1726 if (skb_data_size >= SWQE2_MAX_IMM) { 1694 * immediate area.
1727 /* copy immediate data */ 1695 */
1728 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); 1696 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1697 }
1729 1698
1730 swqe->immediate_data_length = SWQE2_MAX_IMM; 1699 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1700 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1701 swqe->immediate_data_length = immediate_len;
1731 1702
1732 if (skb_data_size > SWQE2_MAX_IMM) { 1703 if (skb_data_size > immediate_len) {
1733 /* copy sg1entry data */
1734 sg1entry->l_key = lkey; 1704 sg1entry->l_key = lkey;
1735 sg1entry->len = skb_data_size - SWQE2_MAX_IMM; 1705 sg1entry->len = skb_data_size - immediate_len;
1736 sg1entry->vaddr = 1706 sg1entry->vaddr =
1737 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); 1707 ehea_map_vaddr(skb->data + immediate_len);
1738 swqe->descriptors++; 1708 swqe->descriptors++;
1739 } 1709 }
1740 } else { 1710 } else {
@@ -1753,13 +1723,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1753 nfrags = skb_shinfo(skb)->nr_frags; 1723 nfrags = skb_shinfo(skb)->nr_frags;
1754 sg1entry = &swqe->u.immdata_desc.sg_entry; 1724 sg1entry = &swqe->u.immdata_desc.sg_entry;
1755 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; 1725 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1756 swqe->descriptors = 0;
1757 sg1entry_contains_frag_data = 0; 1726 sg1entry_contains_frag_data = 0;
1758 1727
1759 if (skb_is_gso(skb)) 1728 write_swqe2_immediate(skb, swqe, lkey);
1760 write_swqe2_TSO(skb, swqe, lkey);
1761 else
1762 write_swqe2_nonTSO(skb, swqe, lkey);
1763 1729
1764 /* write descriptors */ 1730 /* write descriptors */
1765 if (nfrags > 0) { 1731 if (nfrags > 0) {