aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 895fe84011e7..12f9e2708afb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
166 goto drop; 166 goto drop;
167 167
168 cb = XENVIF_RX_CB(skb); 168 cb = XENVIF_RX_CB(skb);
169 cb->expires = jiffies + rx_drain_timeout_jiffies; 169 cb->expires = jiffies + vif->drain_timeout;
170 170
171 xenvif_rx_queue_tail(queue, skb); 171 xenvif_rx_queue_tail(queue, skb);
172 xenvif_kick_thread(queue); 172 xenvif_kick_thread(queue);
@@ -235,10 +235,10 @@ static void xenvif_down(struct xenvif *vif)
235 235
236 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 236 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
237 queue = &vif->queues[queue_index]; 237 queue = &vif->queues[queue_index];
238 napi_disable(&queue->napi);
239 disable_irq(queue->tx_irq); 238 disable_irq(queue->tx_irq);
240 if (queue->tx_irq != queue->rx_irq) 239 if (queue->tx_irq != queue->rx_irq)
241 disable_irq(queue->rx_irq); 240 disable_irq(queue->rx_irq);
241 napi_disable(&queue->napi);
242 del_timer_sync(&queue->credit_timeout); 242 del_timer_sync(&queue->credit_timeout);
243 } 243 }
244} 244}
@@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
414 vif->ip_csum = 1; 414 vif->ip_csum = 1;
415 vif->dev = dev; 415 vif->dev = dev;
416 vif->disabled = false; 416 vif->disabled = false;
417 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
418 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
417 419
418 /* Start out with no queues. */ 420 /* Start out with no queues. */
419 vif->queues = NULL; 421 vif->queues = NULL;
@@ -481,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
481 * better enable it. The long term solution would be to use just a 483 * better enable it. The long term solution would be to use just a
482 * bunch of valid page descriptors, without dependency on ballooning 484 * bunch of valid page descriptors, without dependency on ballooning
483 */ 485 */
484 err = alloc_xenballooned_pages(MAX_PENDING_REQS, 486 err = gnttab_alloc_pages(MAX_PENDING_REQS,
485 queue->mmap_pages, 487 queue->mmap_pages);
486 false);
487 if (err) { 488 if (err) {
488 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); 489 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
489 return -ENOMEM; 490 return -ENOMEM;
@@ -576,6 +577,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
576 goto err_rx_unbind; 577 goto err_rx_unbind;
577 } 578 }
578 queue->task = task; 579 queue->task = task;
580 get_task_struct(task);
579 581
580 task = kthread_create(xenvif_dealloc_kthread, 582 task = kthread_create(xenvif_dealloc_kthread,
581 (void *)queue, "%s-dealloc", queue->name); 583 (void *)queue, "%s-dealloc", queue->name);
@@ -632,6 +634,7 @@ void xenvif_disconnect(struct xenvif *vif)
632 634
633 if (queue->task) { 635 if (queue->task) {
634 kthread_stop(queue->task); 636 kthread_stop(queue->task);
637 put_task_struct(queue->task);
635 queue->task = NULL; 638 queue->task = NULL;
636 } 639 }
637 640
@@ -660,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
660 */ 663 */
661void xenvif_deinit_queue(struct xenvif_queue *queue) 664void xenvif_deinit_queue(struct xenvif_queue *queue)
662{ 665{
663 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); 666 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
664} 667}
665 668
666void xenvif_free(struct xenvif *vif) 669void xenvif_free(struct xenvif *vif)