diff options
author | Chris Zankel <chris@zankel.net> | 2014-02-24 03:34:36 -0500 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2014-02-24 03:34:36 -0500 |
commit | b3fdfc1b4b641d372e35ced98814289bc60bc5d1 (patch) | |
tree | 5f11d5ba885031dde45690745646519fb887f447 /drivers/net/xen-netback/interface.c | |
parent | c0e50d41126e4786d9cf1105bdf783e55c99f915 (diff) | |
parent | f63b6d7555cd4064554b39da4d44c4cbbc9d6a4a (diff) |
Merge tag 'xtensa-for-next-20140221-1' into for_next
Xtensa fixes for 3.14:
- allow booting xtfpga on boards with new uBoot and >128MBytes memory;
- drop nonexistent GPIO32 support from fsf variant;
- don't select USE_GENERIC_SMP_HELPERS;
- enable common clock framework support, set up ethoc clock on xtfpga;
- wire up sched_setattr and sched_getattr syscalls.
Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 57 |
1 files changed, 34 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa58370..7669d49a67e2 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/rtnetlink.h> | 35 | #include <linux/rtnetlink.h> |
36 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
37 | #include <linux/vmalloc.h> | ||
37 | 38 | ||
38 | #include <xen/events.h> | 39 | #include <xen/events.h> |
39 | #include <asm/xen/hypercall.h> | 40 | #include <asm/xen/hypercall.h> |
@@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
47 | } | 48 | } |
48 | 49 | ||
49 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
50 | { | ||
51 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
52 | } | ||
53 | |||
54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
55 | { | 51 | { |
56 | struct xenvif *vif = dev_id; | 52 | struct xenvif *vif = dev_id; |
@@ -104,8 +100,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
104 | { | 100 | { |
105 | struct xenvif *vif = dev_id; | 101 | struct xenvif *vif = dev_id; |
106 | 102 | ||
107 | if (xenvif_rx_schedulable(vif)) | 103 | xenvif_kick_thread(vif); |
108 | netif_wake_queue(vif->dev); | ||
109 | 104 | ||
110 | return IRQ_HANDLED; | 105 | return IRQ_HANDLED; |
111 | } | 106 | } |
@@ -121,24 +116,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
121 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 116 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
122 | { | 117 | { |
123 | struct xenvif *vif = netdev_priv(dev); | 118 | struct xenvif *vif = netdev_priv(dev); |
119 | int min_slots_needed; | ||
124 | 120 | ||
125 | BUG_ON(skb->dev != dev); | 121 | BUG_ON(skb->dev != dev); |
126 | 122 | ||
127 | /* Drop the packet if vif is not ready */ | 123 | /* Drop the packet if vif is not ready */ |
128 | if (vif->task == NULL) | 124 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
129 | goto drop; | 125 | goto drop; |
130 | 126 | ||
131 | /* Drop the packet if the target domain has no receive buffers. */ | 127 | /* At best we'll need one slot for the header and one for each |
132 | if (!xenvif_rx_schedulable(vif)) | 128 | * frag. |
133 | goto drop; | 129 | */ |
130 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
134 | 131 | ||
135 | /* Reserve ring slots for the worst-case number of fragments. */ | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
136 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 133 | * metadata. |
134 | */ | ||
135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
137 | min_slots_needed++; | ||
137 | 138 | ||
138 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 139 | /* If the skb can't possibly fit in the remaining slots |
139 | netif_stop_queue(dev); | 140 | * then turn off the queue to give the ring a chance to |
141 | * drain. | ||
142 | */ | ||
143 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
144 | xenvif_stop_queue(vif); | ||
140 | 145 | ||
141 | xenvif_queue_tx_skb(vif, skb); | 146 | skb_queue_tail(&vif->rx_queue, skb); |
147 | xenvif_kick_thread(vif); | ||
142 | 148 | ||
143 | return NETDEV_TX_OK; | 149 | return NETDEV_TX_OK; |
144 | 150 | ||
@@ -148,12 +154,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
148 | return NETDEV_TX_OK; | 154 | return NETDEV_TX_OK; |
149 | } | 155 | } |
150 | 156 | ||
151 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
152 | { | ||
153 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
154 | netif_wake_queue(vif->dev); | ||
155 | } | ||
156 | |||
157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
158 | { | 158 | { |
159 | struct xenvif *vif = netdev_priv(dev); | 159 | struct xenvif *vif = netdev_priv(dev); |
@@ -307,6 +307,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
307 | SET_NETDEV_DEV(dev, parent); | 307 | SET_NETDEV_DEV(dev, parent); |
308 | 308 | ||
309 | vif = netdev_priv(dev); | 309 | vif = netdev_priv(dev); |
310 | |||
311 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
312 | MAX_GRANT_COPY_OPS); | ||
313 | if (vif->grant_copy_op == NULL) { | ||
314 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
315 | free_netdev(dev); | ||
316 | return ERR_PTR(-ENOMEM); | ||
317 | } | ||
318 | |||
310 | vif->domid = domid; | 319 | vif->domid = domid; |
311 | vif->handle = handle; | 320 | vif->handle = handle; |
312 | vif->can_sg = 1; | 321 | vif->can_sg = 1; |
@@ -378,6 +387,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
378 | if (err < 0) | 387 | if (err < 0) |
379 | goto err; | 388 | goto err; |
380 | 389 | ||
390 | init_waitqueue_head(&vif->wq); | ||
391 | |||
381 | if (tx_evtchn == rx_evtchn) { | 392 | if (tx_evtchn == rx_evtchn) { |
382 | /* feature-split-event-channels == 0 */ | 393 | /* feature-split-event-channels == 0 */ |
383 | err = bind_interdomain_evtchn_to_irqhandler( | 394 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -410,7 +421,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
410 | disable_irq(vif->rx_irq); | 421 | disable_irq(vif->rx_irq); |
411 | } | 422 | } |
412 | 423 | ||
413 | init_waitqueue_head(&vif->wq); | ||
414 | task = kthread_create(xenvif_kthread, | 424 | task = kthread_create(xenvif_kthread, |
415 | (void *)vif, "%s", vif->dev->name); | 425 | (void *)vif, "%s", vif->dev->name); |
416 | if (IS_ERR(task)) { | 426 | if (IS_ERR(task)) { |
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif) | |||
487 | 497 | ||
488 | unregister_netdev(vif->dev); | 498 | unregister_netdev(vif->dev); |
489 | 499 | ||
500 | vfree(vif->grant_copy_op); | ||
490 | free_netdev(vif->dev); | 501 | free_netdev(vif->dev); |
491 | 502 | ||
492 | module_put(THIS_MODULE); | 503 | module_put(THIS_MODULE); |