aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-11-20 23:14:53 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:14:53 -0500
commit008298231abbeb91bc7be9e8b078607b816d1a4a (patch)
tree8cb0c17720086ef97c614b96241f06aa63ce8511 /net/core
parent6ab33d51713d6d60c7677c0d020910a8cb37e513 (diff)
netdev: add more functions to netdevice ops
This patch moves neigh_setup and hard_start_xmit into the network device ops structure. For bisection, fix all the previously converted drivers as well. Bonding driver took the biggest hit on this. Added a prefetch of the hard_start_xmit in the fast path to try and reduce any impact this would have. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c8
4 files changed, 19 insertions, 13 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 8843f4e3f5e1..4615e9a443aa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1660,6 +1660,9 @@ static int dev_gso_segment(struct sk_buff *skb)
1660int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1660int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1661 struct netdev_queue *txq) 1661 struct netdev_queue *txq)
1662{ 1662{
1663 const struct net_device_ops *ops = dev->netdev_ops;
1664
1665 prefetch(&dev->netdev_ops->ndo_start_xmit);
1663 if (likely(!skb->next)) { 1666 if (likely(!skb->next)) {
1664 if (!list_empty(&ptype_all)) 1667 if (!list_empty(&ptype_all))
1665 dev_queue_xmit_nit(skb, dev); 1668 dev_queue_xmit_nit(skb, dev);
@@ -1671,7 +1674,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1671 goto gso; 1674 goto gso;
1672 } 1675 }
1673 1676
1674 return dev->hard_start_xmit(skb, dev); 1677 return ops->ndo_start_xmit(skb, dev);
1675 } 1678 }
1676 1679
1677gso: 1680gso:
@@ -1681,7 +1684,7 @@ gso:
1681 1684
1682 skb->next = nskb->next; 1685 skb->next = nskb->next;
1683 nskb->next = NULL; 1686 nskb->next = NULL;
1684 rc = dev->hard_start_xmit(nskb, dev); 1687 rc = ops->ndo_start_xmit(nskb, dev);
1685 if (unlikely(rc)) { 1688 if (unlikely(rc)) {
1686 nskb->next = skb->next; 1689 nskb->next = skb->next;
1687 skb->next = nskb; 1690 skb->next = nskb;
@@ -1755,10 +1758,11 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1755static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1758static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1756 struct sk_buff *skb) 1759 struct sk_buff *skb)
1757{ 1760{
1761 const struct net_device_ops *ops = dev->netdev_ops;
1758 u16 queue_index = 0; 1762 u16 queue_index = 0;
1759 1763
1760 if (dev->select_queue) 1764 if (ops->ndo_select_queue)
1761 queue_index = dev->select_queue(dev, skb); 1765 queue_index = ops->ndo_select_queue(dev, skb);
1762 else if (dev->real_num_tx_queues > 1) 1766 else if (dev->real_num_tx_queues > 1)
1763 queue_index = simple_tx_hash(dev, skb); 1767 queue_index = simple_tx_hash(dev, skb);
1764 1768
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cca6a55909eb..9c3717a23cf7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1327,9 +1327,9 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1327 struct neigh_table *tbl) 1327 struct neigh_table *tbl)
1328{ 1328{
1329 struct neigh_parms *p, *ref; 1329 struct neigh_parms *p, *ref;
1330 struct net *net; 1330 struct net *net = dev_net(dev);
1331 const struct net_device_ops *ops = dev->netdev_ops;
1331 1332
1332 net = dev_net(dev);
1333 ref = lookup_neigh_params(tbl, net, 0); 1333 ref = lookup_neigh_params(tbl, net, 0);
1334 if (!ref) 1334 if (!ref)
1335 return NULL; 1335 return NULL;
@@ -1341,7 +1341,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1341 p->reachable_time = 1341 p->reachable_time =
1342 neigh_rand_reach_time(p->base_reachable_time); 1342 neigh_rand_reach_time(p->base_reachable_time);
1343 1343
1344 if (dev->neigh_setup && dev->neigh_setup(dev, p)) { 1344 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1345 kfree(p); 1345 kfree(p);
1346 return NULL; 1346 return NULL;
1347 } 1347 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 630df6034444..96fb0519eb7a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,6 +58,7 @@ static void queue_process(struct work_struct *work)
58 58
59 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 60 struct net_device *dev = skb->dev;
61 const struct net_device_ops *ops = dev->netdev_ops;
61 struct netdev_queue *txq; 62 struct netdev_queue *txq;
62 63
63 if (!netif_device_present(dev) || !netif_running(dev)) { 64 if (!netif_device_present(dev) || !netif_running(dev)) {
@@ -71,7 +72,7 @@ static void queue_process(struct work_struct *work)
71 __netif_tx_lock(txq, smp_processor_id()); 72 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 73 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) || 74 netif_tx_queue_frozen(txq) ||
74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 75 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
75 skb_queue_head(&npinfo->txq, skb); 76 skb_queue_head(&npinfo->txq, skb);
76 __netif_tx_unlock(txq); 77 __netif_tx_unlock(txq);
77 local_irq_restore(flags); 78 local_irq_restore(flags);
@@ -273,6 +274,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
273 int status = NETDEV_TX_BUSY; 274 int status = NETDEV_TX_BUSY;
274 unsigned long tries; 275 unsigned long tries;
275 struct net_device *dev = np->dev; 276 struct net_device *dev = np->dev;
277 const struct net_device_ops *ops = dev->netdev_ops;
276 struct netpoll_info *npinfo = np->dev->npinfo; 278 struct netpoll_info *npinfo = np->dev->npinfo;
277 279
278 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 280 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -293,7 +295,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
293 tries > 0; --tries) { 295 tries > 0; --tries) {
294 if (__netif_tx_trylock(txq)) { 296 if (__netif_tx_trylock(txq)) {
295 if (!netif_tx_queue_stopped(txq)) 297 if (!netif_tx_queue_stopped(txq))
296 status = dev->hard_start_xmit(skb, dev); 298 status = ops->ndo_start_xmit(skb, dev);
297 __netif_tx_unlock(txq); 299 __netif_tx_unlock(txq);
298 300
299 if (status == NETDEV_TX_OK) 301 if (status == NETDEV_TX_OK)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4e77914c4d42..15e0c2c7aacf 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3352,14 +3352,14 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3352 3352
3353static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3353static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3354{ 3354{
3355 struct net_device *odev = NULL; 3355 struct net_device *odev = pkt_dev->odev;
3356 int (*xmit)(struct sk_buff *, struct net_device *)
3357 = odev->netdev_ops->ndo_start_xmit;
3356 struct netdev_queue *txq; 3358 struct netdev_queue *txq;
3357 __u64 idle_start = 0; 3359 __u64 idle_start = 0;
3358 u16 queue_map; 3360 u16 queue_map;
3359 int ret; 3361 int ret;
3360 3362
3361 odev = pkt_dev->odev;
3362
3363 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3363 if (pkt_dev->delay_us || pkt_dev->delay_ns) {
3364 u64 now; 3364 u64 now;
3365 3365
@@ -3440,7 +3440,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3440 3440
3441 atomic_inc(&(pkt_dev->skb->users)); 3441 atomic_inc(&(pkt_dev->skb->users));
3442 retry_now: 3442 retry_now:
3443 ret = odev->hard_start_xmit(pkt_dev->skb, odev); 3443 ret = (*xmit)(pkt_dev->skb, odev);
3444 if (likely(ret == NETDEV_TX_OK)) { 3444 if (likely(ret == NETDEV_TX_OK)) {
3445 pkt_dev->last_ok = 1; 3445 pkt_dev->last_ok = 1;
3446 pkt_dev->sofar++; 3446 pkt_dev->sofar++;