diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 4445810335a8..70e551c19e3a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -72,6 +72,7 @@ struct netfront_info { | |||
72 | struct list_head list; | 72 | struct list_head list; |
73 | struct net_device *netdev; | 73 | struct net_device *netdev; |
74 | 74 | ||
75 | struct napi_struct napi; | ||
75 | struct net_device_stats stats; | 76 | struct net_device_stats stats; |
76 | 77 | ||
77 | struct xen_netif_tx_front_ring tx; | 78 | struct xen_netif_tx_front_ring tx; |
@@ -185,7 +186,8 @@ static int xennet_can_sg(struct net_device *dev) | |||
185 | static void rx_refill_timeout(unsigned long data) | 186 | static void rx_refill_timeout(unsigned long data) |
186 | { | 187 | { |
187 | struct net_device *dev = (struct net_device *)data; | 188 | struct net_device *dev = (struct net_device *)data; |
188 | netif_rx_schedule(dev); | 189 | struct netfront_info *np = netdev_priv(dev); |
190 | netif_rx_schedule(dev, &np->napi); | ||
189 | } | 191 | } |
190 | 192 | ||
191 | static int netfront_tx_slot_available(struct netfront_info *np) | 193 | static int netfront_tx_slot_available(struct netfront_info *np) |
@@ -342,12 +344,14 @@ static int xennet_open(struct net_device *dev) | |||
342 | 344 | ||
343 | memset(&np->stats, 0, sizeof(np->stats)); | 345 | memset(&np->stats, 0, sizeof(np->stats)); |
344 | 346 | ||
347 | napi_enable(&np->napi); | ||
348 | |||
345 | spin_lock_bh(&np->rx_lock); | 349 | spin_lock_bh(&np->rx_lock); |
346 | if (netif_carrier_ok(dev)) { | 350 | if (netif_carrier_ok(dev)) { |
347 | xennet_alloc_rx_buffers(dev); | 351 | xennet_alloc_rx_buffers(dev); |
348 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | 352 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; |
349 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 353 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
350 | netif_rx_schedule(dev); | 354 | netif_rx_schedule(dev, &np->napi); |
351 | } | 355 | } |
352 | spin_unlock_bh(&np->rx_lock); | 356 | spin_unlock_bh(&np->rx_lock); |
353 | 357 | ||
@@ -589,6 +593,7 @@ static int xennet_close(struct net_device *dev) | |||
589 | { | 593 | { |
590 | struct netfront_info *np = netdev_priv(dev); | 594 | struct netfront_info *np = netdev_priv(dev); |
591 | netif_stop_queue(np->netdev); | 595 | netif_stop_queue(np->netdev); |
596 | napi_disable(&np->napi); | ||
592 | return 0; | 597 | return 0; |
593 | } | 598 | } |
594 | 599 | ||
@@ -872,15 +877,16 @@ static int handle_incoming_queue(struct net_device *dev, | |||
872 | return packets_dropped; | 877 | return packets_dropped; |
873 | } | 878 | } |
874 | 879 | ||
875 | static int xennet_poll(struct net_device *dev, int *pbudget) | 880 | static int xennet_poll(struct napi_struct *napi, int budget) |
876 | { | 881 | { |
877 | struct netfront_info *np = netdev_priv(dev); | 882 | struct netfront_info *np = container_of(napi, struct netfront_info, napi); |
883 | struct net_device *dev = np->netdev; | ||
878 | struct sk_buff *skb; | 884 | struct sk_buff *skb; |
879 | struct netfront_rx_info rinfo; | 885 | struct netfront_rx_info rinfo; |
880 | struct xen_netif_rx_response *rx = &rinfo.rx; | 886 | struct xen_netif_rx_response *rx = &rinfo.rx; |
881 | struct xen_netif_extra_info *extras = rinfo.extras; | 887 | struct xen_netif_extra_info *extras = rinfo.extras; |
882 | RING_IDX i, rp; | 888 | RING_IDX i, rp; |
883 | int work_done, budget, more_to_do = 1; | 889 | int work_done; |
884 | struct sk_buff_head rxq; | 890 | struct sk_buff_head rxq; |
885 | struct sk_buff_head errq; | 891 | struct sk_buff_head errq; |
886 | struct sk_buff_head tmpq; | 892 | struct sk_buff_head tmpq; |
@@ -899,9 +905,6 @@ static int xennet_poll(struct net_device *dev, int *pbudget) | |||
899 | skb_queue_head_init(&errq); | 905 | skb_queue_head_init(&errq); |
900 | skb_queue_head_init(&tmpq); | 906 | skb_queue_head_init(&tmpq); |
901 | 907 | ||
902 | budget = *pbudget; | ||
903 | if (budget > dev->quota) | ||
904 | budget = dev->quota; | ||
905 | rp = np->rx.sring->rsp_prod; | 908 | rp = np->rx.sring->rsp_prod; |
906 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | 909 | rmb(); /* Ensure we see queued responses up to 'rp'. */ |
907 | 910 | ||
@@ -1006,22 +1009,21 @@ err: | |||
1006 | 1009 | ||
1007 | xennet_alloc_rx_buffers(dev); | 1010 | xennet_alloc_rx_buffers(dev); |
1008 | 1011 | ||
1009 | *pbudget -= work_done; | ||
1010 | dev->quota -= work_done; | ||
1011 | |||
1012 | if (work_done < budget) { | 1012 | if (work_done < budget) { |
1013 | int more_to_do = 0; | ||
1014 | |||
1013 | local_irq_save(flags); | 1015 | local_irq_save(flags); |
1014 | 1016 | ||
1015 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | 1017 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); |
1016 | if (!more_to_do) | 1018 | if (!more_to_do) |
1017 | __netif_rx_complete(dev); | 1019 | __netif_rx_complete(dev, napi); |
1018 | 1020 | ||
1019 | local_irq_restore(flags); | 1021 | local_irq_restore(flags); |
1020 | } | 1022 | } |
1021 | 1023 | ||
1022 | spin_unlock(&np->rx_lock); | 1024 | spin_unlock(&np->rx_lock); |
1023 | 1025 | ||
1024 | return more_to_do; | 1026 | return work_done; |
1025 | } | 1027 | } |
1026 | 1028 | ||
1027 | static int xennet_change_mtu(struct net_device *dev, int mtu) | 1029 | static int xennet_change_mtu(struct net_device *dev, int mtu) |
@@ -1201,10 +1203,9 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev | |||
1201 | netdev->hard_start_xmit = xennet_start_xmit; | 1203 | netdev->hard_start_xmit = xennet_start_xmit; |
1202 | netdev->stop = xennet_close; | 1204 | netdev->stop = xennet_close; |
1203 | netdev->get_stats = xennet_get_stats; | 1205 | netdev->get_stats = xennet_get_stats; |
1204 | netdev->poll = xennet_poll; | 1206 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
1205 | netdev->uninit = xennet_uninit; | 1207 | netdev->uninit = xennet_uninit; |
1206 | netdev->change_mtu = xennet_change_mtu; | 1208 | netdev->change_mtu = xennet_change_mtu; |
1207 | netdev->weight = 64; | ||
1208 | netdev->features = NETIF_F_IP_CSUM; | 1209 | netdev->features = NETIF_F_IP_CSUM; |
1209 | 1210 | ||
1210 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | 1211 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); |
@@ -1349,7 +1350,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) | |||
1349 | xennet_tx_buf_gc(dev); | 1350 | xennet_tx_buf_gc(dev); |
1350 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | 1351 | /* Under tx_lock: protects access to rx shared-ring indexes. */ |
1351 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 1352 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
1352 | netif_rx_schedule(dev); | 1353 | netif_rx_schedule(dev, &np->napi); |
1353 | } | 1354 | } |
1354 | 1355 | ||
1355 | spin_unlock_irqrestore(&np->tx_lock, flags); | 1356 | spin_unlock_irqrestore(&np->tx_lock, flags); |