aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-11-20 23:14:53 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:14:53 -0500
commit008298231abbeb91bc7be9e8b078607b816d1a4a (patch)
tree8cb0c17720086ef97c614b96241f06aa63ce8511 /net/core/netpoll.c
parent6ab33d51713d6d60c7677c0d020910a8cb37e513 (diff)
netdev: add more functions to netdevice ops
This patch moves neigh_setup and hard_start_xmit into the network device ops structure. For bisection, fix all the previously converted drivers as well. Bonding driver took the biggest hit on this. Added a prefetch of the hard_start_xmit in the fast path to try and reduce any impact this would have. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 630df6034444..96fb0519eb7a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,6 +58,7 @@ static void queue_process(struct work_struct *work)
58 58
59 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 60 struct net_device *dev = skb->dev;
61 const struct net_device_ops *ops = dev->netdev_ops;
61 struct netdev_queue *txq; 62 struct netdev_queue *txq;
62 63
63 if (!netif_device_present(dev) || !netif_running(dev)) { 64 if (!netif_device_present(dev) || !netif_running(dev)) {
@@ -71,7 +72,7 @@ static void queue_process(struct work_struct *work)
71 __netif_tx_lock(txq, smp_processor_id()); 72 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 73 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) || 74 netif_tx_queue_frozen(txq) ||
74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 75 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
75 skb_queue_head(&npinfo->txq, skb); 76 skb_queue_head(&npinfo->txq, skb);
76 __netif_tx_unlock(txq); 77 __netif_tx_unlock(txq);
77 local_irq_restore(flags); 78 local_irq_restore(flags);
@@ -273,6 +274,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
273 int status = NETDEV_TX_BUSY; 274 int status = NETDEV_TX_BUSY;
274 unsigned long tries; 275 unsigned long tries;
275 struct net_device *dev = np->dev; 276 struct net_device *dev = np->dev;
277 const struct net_device_ops *ops = dev->netdev_ops;
276 struct netpoll_info *npinfo = np->dev->npinfo; 278 struct netpoll_info *npinfo = np->dev->npinfo;
277 279
278 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 280 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -293,7 +295,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
293 tries > 0; --tries) { 295 tries > 0; --tries) {
294 if (__netif_tx_trylock(txq)) { 296 if (__netif_tx_trylock(txq)) {
295 if (!netif_tx_queue_stopped(txq)) 297 if (!netif_tx_queue_stopped(txq))
296 status = dev->hard_start_xmit(skb, dev); 298 status = ops->ndo_start_xmit(skb, dev);
297 __netif_tx_unlock(txq); 299 __netif_tx_unlock(txq);
298 300
299 if (status == NETDEV_TX_OK) 301 if (status == NETDEV_TX_OK)