diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 78 |
1 files changed, 18 insertions, 60 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f379689dde30..9259a732e8a4 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -43,6 +43,9 @@ | |||
| 43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
| 44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
| 45 | 45 | ||
| 46 | /* Number of bytes allowed on the internal guest Rx queue. */ | ||
| 47 | #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) | ||
| 48 | |||
| 46 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as | 49 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as |
| 47 | * increasing the inflight counter. We need to increase the inflight | 50 | * increasing the inflight counter. We need to increase the inflight |
| 48 | * counter because core driver calls into xenvif_zerocopy_callback | 51 | * counter because core driver calls into xenvif_zerocopy_callback |
| @@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) | |||
| 60 | atomic_dec(&queue->inflight_packets); | 63 | atomic_dec(&queue->inflight_packets); |
| 61 | } | 64 | } |
| 62 | 65 | ||
| 63 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | ||
| 64 | { | ||
| 65 | struct net_device *dev = queue->vif->dev; | ||
| 66 | |||
| 67 | if (!queue->vif->can_queue) | ||
| 68 | return; | ||
| 69 | |||
| 70 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | ||
| 71 | } | ||
| 72 | |||
| 73 | int xenvif_schedulable(struct xenvif *vif) | 66 | int xenvif_schedulable(struct xenvif *vif) |
| 74 | { | 67 | { |
| 75 | return netif_running(vif->dev) && | 68 | return netif_running(vif->dev) && |
| 76 | test_bit(VIF_STATUS_CONNECTED, &vif->status); | 69 | test_bit(VIF_STATUS_CONNECTED, &vif->status) && |
| 70 | !vif->disabled; | ||
| 77 | } | 71 | } |
| 78 | 72 | ||
| 79 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 73 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
| @@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 114 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 108 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
| 115 | { | 109 | { |
| 116 | struct xenvif_queue *queue = dev_id; | 110 | struct xenvif_queue *queue = dev_id; |
| 117 | struct netdev_queue *net_queue = | ||
| 118 | netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
| 119 | 111 | ||
| 120 | /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR | ||
| 121 | * the carrier went down and this queue was previously blocked | ||
| 122 | */ | ||
| 123 | if (unlikely(netif_tx_queue_stopped(net_queue) || | ||
| 124 | (!netif_carrier_ok(queue->vif->dev) && | ||
| 125 | test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) | ||
| 126 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
| 127 | xenvif_kick_thread(queue); | 112 | xenvif_kick_thread(queue); |
| 128 | 113 | ||
| 129 | return IRQ_HANDLED; | 114 | return IRQ_HANDLED; |
| @@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue) | |||
| 151 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | 136 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); |
| 152 | } | 137 | } |
| 153 | 138 | ||
| 154 | /* Callback to wake the queue's thread and turn the carrier off on timeout */ | ||
| 155 | static void xenvif_rx_stalled(unsigned long data) | ||
| 156 | { | ||
| 157 | struct xenvif_queue *queue = (struct xenvif_queue *)data; | ||
| 158 | |||
| 159 | if (xenvif_queue_stopped(queue)) { | ||
| 160 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
| 161 | xenvif_kick_thread(queue); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 139 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 166 | { | 140 | { |
| 167 | struct xenvif *vif = netdev_priv(dev); | 141 | struct xenvif *vif = netdev_priv(dev); |
| 168 | struct xenvif_queue *queue = NULL; | 142 | struct xenvif_queue *queue = NULL; |
| 169 | unsigned int num_queues = vif->num_queues; | 143 | unsigned int num_queues = vif->num_queues; |
| 170 | u16 index; | 144 | u16 index; |
| 171 | int min_slots_needed; | 145 | struct xenvif_rx_cb *cb; |
| 172 | 146 | ||
| 173 | BUG_ON(skb->dev != dev); | 147 | BUG_ON(skb->dev != dev); |
| 174 | 148 | ||
| @@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 191 | !xenvif_schedulable(vif)) | 165 | !xenvif_schedulable(vif)) |
| 192 | goto drop; | 166 | goto drop; |
| 193 | 167 | ||
| 194 | /* At best we'll need one slot for the header and one for each | 168 | cb = XENVIF_RX_CB(skb); |
| 195 | * frag. | 169 | cb->expires = jiffies + vif->drain_timeout; |
| 196 | */ | ||
| 197 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
| 198 | |||
| 199 | /* If the skb is GSO then we'll also need an extra slot for the | ||
| 200 | * metadata. | ||
| 201 | */ | ||
| 202 | if (skb_is_gso(skb)) | ||
| 203 | min_slots_needed++; | ||
| 204 | |||
| 205 | /* If the skb can't possibly fit in the remaining slots | ||
| 206 | * then turn off the queue to give the ring a chance to | ||
| 207 | * drain. | ||
| 208 | */ | ||
| 209 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { | ||
| 210 | queue->rx_stalled.function = xenvif_rx_stalled; | ||
| 211 | queue->rx_stalled.data = (unsigned long)queue; | ||
| 212 | xenvif_stop_queue(queue); | ||
| 213 | mod_timer(&queue->rx_stalled, | ||
| 214 | jiffies + rx_drain_timeout_jiffies); | ||
| 215 | } | ||
| 216 | 170 | ||
| 217 | skb_queue_tail(&queue->rx_queue, skb); | 171 | xenvif_rx_queue_tail(queue, skb); |
| 218 | xenvif_kick_thread(queue); | 172 | xenvif_kick_thread(queue); |
| 219 | 173 | ||
| 220 | return NETDEV_TX_OK; | 174 | return NETDEV_TX_OK; |
| @@ -281,10 +235,10 @@ static void xenvif_down(struct xenvif *vif) | |||
| 281 | 235 | ||
| 282 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 236 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 283 | queue = &vif->queues[queue_index]; | 237 | queue = &vif->queues[queue_index]; |
| 284 | napi_disable(&queue->napi); | ||
| 285 | disable_irq(queue->tx_irq); | 238 | disable_irq(queue->tx_irq); |
| 286 | if (queue->tx_irq != queue->rx_irq) | 239 | if (queue->tx_irq != queue->rx_irq) |
| 287 | disable_irq(queue->rx_irq); | 240 | disable_irq(queue->rx_irq); |
| 241 | napi_disable(&queue->napi); | ||
| 288 | del_timer_sync(&queue->credit_timeout); | 242 | del_timer_sync(&queue->credit_timeout); |
| 289 | } | 243 | } |
| 290 | } | 244 | } |
| @@ -460,11 +414,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 460 | vif->ip_csum = 1; | 414 | vif->ip_csum = 1; |
| 461 | vif->dev = dev; | 415 | vif->dev = dev; |
| 462 | vif->disabled = false; | 416 | vif->disabled = false; |
| 417 | vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); | ||
| 418 | vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); | ||
| 463 | 419 | ||
| 464 | /* Start out with no queues. */ | 420 | /* Start out with no queues. */ |
| 465 | vif->queues = NULL; | 421 | vif->queues = NULL; |
| 466 | vif->num_queues = 0; | 422 | vif->num_queues = 0; |
| 467 | 423 | ||
| 424 | spin_lock_init(&vif->lock); | ||
| 425 | |||
| 468 | dev->netdev_ops = &xenvif_netdev_ops; | 426 | dev->netdev_ops = &xenvif_netdev_ops; |
| 469 | dev->hw_features = NETIF_F_SG | | 427 | dev->hw_features = NETIF_F_SG | |
| 470 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 428 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| @@ -508,6 +466,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
| 508 | init_timer(&queue->credit_timeout); | 466 | init_timer(&queue->credit_timeout); |
| 509 | queue->credit_window_start = get_jiffies_64(); | 467 | queue->credit_window_start = get_jiffies_64(); |
| 510 | 468 | ||
| 469 | queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; | ||
| 470 | |||
| 511 | skb_queue_head_init(&queue->rx_queue); | 471 | skb_queue_head_init(&queue->rx_queue); |
| 512 | skb_queue_head_init(&queue->tx_queue); | 472 | skb_queue_head_init(&queue->tx_queue); |
| 513 | 473 | ||
| @@ -539,8 +499,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
| 539 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | 499 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
| 540 | } | 500 | } |
| 541 | 501 | ||
| 542 | init_timer(&queue->rx_stalled); | ||
| 543 | |||
| 544 | return 0; | 502 | return 0; |
| 545 | } | 503 | } |
| 546 | 504 | ||
| @@ -551,7 +509,6 @@ void xenvif_carrier_on(struct xenvif *vif) | |||
| 551 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 509 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
| 552 | netdev_update_features(vif->dev); | 510 | netdev_update_features(vif->dev); |
| 553 | set_bit(VIF_STATUS_CONNECTED, &vif->status); | 511 | set_bit(VIF_STATUS_CONNECTED, &vif->status); |
| 554 | netif_carrier_on(vif->dev); | ||
| 555 | if (netif_running(vif->dev)) | 512 | if (netif_running(vif->dev)) |
| 556 | xenvif_up(vif); | 513 | xenvif_up(vif); |
| 557 | rtnl_unlock(); | 514 | rtnl_unlock(); |
| @@ -611,6 +568,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
| 611 | disable_irq(queue->rx_irq); | 568 | disable_irq(queue->rx_irq); |
| 612 | } | 569 | } |
| 613 | 570 | ||
| 571 | queue->stalled = true; | ||
| 572 | |||
| 614 | task = kthread_create(xenvif_kthread_guest_rx, | 573 | task = kthread_create(xenvif_kthread_guest_rx, |
| 615 | (void *)queue, "%s-guest-rx", queue->name); | 574 | (void *)queue, "%s-guest-rx", queue->name); |
| 616 | if (IS_ERR(task)) { | 575 | if (IS_ERR(task)) { |
| @@ -674,7 +633,6 @@ void xenvif_disconnect(struct xenvif *vif) | |||
| 674 | netif_napi_del(&queue->napi); | 633 | netif_napi_del(&queue->napi); |
| 675 | 634 | ||
| 676 | if (queue->task) { | 635 | if (queue->task) { |
| 677 | del_timer_sync(&queue->rx_stalled); | ||
| 678 | kthread_stop(queue->task); | 636 | kthread_stop(queue->task); |
| 679 | queue->task = NULL; | 637 | queue->task = NULL; |
| 680 | } | 638 | } |
