aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-11-20 23:14:53 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:14:53 -0500
commit008298231abbeb91bc7be9e8b078607b816d1a4a (patch)
tree8cb0c17720086ef97c614b96241f06aa63ce8511 /net/core/dev.c
parent6ab33d51713d6d60c7677c0d020910a8cb37e513 (diff)
netdev: add more functions to netdevice ops
This patch moves neigh_setup and hard_start_xmit into the network device ops structure. For bisection, fix all the previously converted drivers as well. Bonding driver took the biggest hit on this. Added a prefetch of the hard_start_xmit in the fast path to try and reduce any impact this would have. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 8843f4e3f5e1..4615e9a443aa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1660,6 +1660,9 @@ static int dev_gso_segment(struct sk_buff *skb)
1660int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1660int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1661 struct netdev_queue *txq) 1661 struct netdev_queue *txq)
1662{ 1662{
1663 const struct net_device_ops *ops = dev->netdev_ops;
1664
1665 prefetch(&dev->netdev_ops->ndo_start_xmit);
1663 if (likely(!skb->next)) { 1666 if (likely(!skb->next)) {
1664 if (!list_empty(&ptype_all)) 1667 if (!list_empty(&ptype_all))
1665 dev_queue_xmit_nit(skb, dev); 1668 dev_queue_xmit_nit(skb, dev);
@@ -1671,7 +1674,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1671 goto gso; 1674 goto gso;
1672 } 1675 }
1673 1676
1674 return dev->hard_start_xmit(skb, dev); 1677 return ops->ndo_start_xmit(skb, dev);
1675 } 1678 }
1676 1679
1677gso: 1680gso:
@@ -1681,7 +1684,7 @@ gso:
1681 1684
1682 skb->next = nskb->next; 1685 skb->next = nskb->next;
1683 nskb->next = NULL; 1686 nskb->next = NULL;
1684 rc = dev->hard_start_xmit(nskb, dev); 1687 rc = ops->ndo_start_xmit(nskb, dev);
1685 if (unlikely(rc)) { 1688 if (unlikely(rc)) {
1686 nskb->next = skb->next; 1689 nskb->next = skb->next;
1687 skb->next = nskb; 1690 skb->next = nskb;
@@ -1755,10 +1758,11 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1755static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1758static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1756 struct sk_buff *skb) 1759 struct sk_buff *skb)
1757{ 1760{
1761 const struct net_device_ops *ops = dev->netdev_ops;
1758 u16 queue_index = 0; 1762 u16 queue_index = 0;
1759 1763
1760 if (dev->select_queue) 1764 if (ops->ndo_select_queue)
1761 queue_index = dev->select_queue(dev, skb); 1765 queue_index = ops->ndo_select_queue(dev, skb);
1762 else if (dev->real_num_tx_queues > 1) 1766 else if (dev->real_num_tx_queues > 1)
1763 queue_index = simple_tx_hash(dev, skb); 1767 queue_index = simple_tx_hash(dev, skb);
1764 1768