diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 549 |
1 files changed, 339 insertions, 210 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ef05c5c49d41..852da34b8961 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -43,6 +43,16 @@ | |||
| 43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
| 44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
| 45 | 45 | ||
| 46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | ||
| 47 | { | ||
| 48 | struct net_device *dev = queue->vif->dev; | ||
| 49 | |||
| 50 | if (!queue->vif->can_queue) | ||
| 51 | return; | ||
| 52 | |||
| 53 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | ||
| 54 | } | ||
| 55 | |||
| 46 | int xenvif_schedulable(struct xenvif *vif) | 56 | int xenvif_schedulable(struct xenvif *vif) |
| 47 | { | 57 | { |
| 48 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 58 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
| @@ -50,57 +60,34 @@ int xenvif_schedulable(struct xenvif *vif) | |||
| 50 | 60 | ||
| 51 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 61 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
| 52 | { | 62 | { |
| 53 | struct xenvif *vif = dev_id; | 63 | struct xenvif_queue *queue = dev_id; |
| 54 | 64 | ||
| 55 | if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) | 65 | if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
| 56 | napi_schedule(&vif->napi); | 66 | napi_schedule(&queue->napi); |
| 57 | 67 | ||
| 58 | return IRQ_HANDLED; | 68 | return IRQ_HANDLED; |
| 59 | } | 69 | } |
| 60 | 70 | ||
| 61 | static int xenvif_poll(struct napi_struct *napi, int budget) | 71 | int xenvif_poll(struct napi_struct *napi, int budget) |
| 62 | { | 72 | { |
| 63 | struct xenvif *vif = container_of(napi, struct xenvif, napi); | 73 | struct xenvif_queue *queue = |
| 74 | container_of(napi, struct xenvif_queue, napi); | ||
| 64 | int work_done; | 75 | int work_done; |
| 65 | 76 | ||
| 66 | /* This vif is rogue, we pretend we've there is nothing to do | 77 | /* This vif is rogue, we pretend we've there is nothing to do |
| 67 | * for this vif to deschedule it from NAPI. But this interface | 78 | * for this vif to deschedule it from NAPI. But this interface |
| 68 | * will be turned off in thread context later. | 79 | * will be turned off in thread context later. |
| 69 | */ | 80 | */ |
| 70 | if (unlikely(vif->disabled)) { | 81 | if (unlikely(queue->vif->disabled)) { |
| 71 | napi_complete(napi); | 82 | napi_complete(napi); |
| 72 | return 0; | 83 | return 0; |
| 73 | } | 84 | } |
| 74 | 85 | ||
| 75 | work_done = xenvif_tx_action(vif, budget); | 86 | work_done = xenvif_tx_action(queue, budget); |
| 76 | 87 | ||
| 77 | if (work_done < budget) { | 88 | if (work_done < budget) { |
| 78 | int more_to_do = 0; | 89 | napi_complete(napi); |
| 79 | unsigned long flags; | 90 | xenvif_napi_schedule_or_enable_events(queue); |
| 80 | |||
| 81 | /* It is necessary to disable IRQ before calling | ||
| 82 | * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might | ||
| 83 | * lose event from the frontend. | ||
| 84 | * | ||
| 85 | * Consider: | ||
| 86 | * RING_HAS_UNCONSUMED_REQUESTS | ||
| 87 | * <frontend generates event to trigger napi_schedule> | ||
| 88 | * __napi_complete | ||
| 89 | * | ||
| 90 | * This handler is still in scheduled state so the | ||
| 91 | * event has no effect at all. After __napi_complete | ||
| 92 | * this handler is descheduled and cannot get | ||
| 93 | * scheduled again. We lose event in this case and the ring | ||
| 94 | * will be completely stalled. | ||
| 95 | */ | ||
| 96 | |||
| 97 | local_irq_save(flags); | ||
| 98 | |||
| 99 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); | ||
| 100 | if (!more_to_do) | ||
| 101 | __napi_complete(napi); | ||
| 102 | |||
| 103 | local_irq_restore(flags); | ||
| 104 | } | 91 | } |
| 105 | 92 | ||
| 106 | return work_done; | 93 | return work_done; |
| @@ -108,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 108 | 95 | ||
| 109 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 96 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
| 110 | { | 97 | { |
| 111 | struct xenvif *vif = dev_id; | 98 | struct xenvif_queue *queue = dev_id; |
| 112 | 99 | ||
| 113 | xenvif_kick_thread(vif); | 100 | xenvif_kick_thread(queue); |
| 114 | 101 | ||
| 115 | return IRQ_HANDLED; | 102 | return IRQ_HANDLED; |
| 116 | } | 103 | } |
| @@ -123,28 +110,80 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
| 123 | return IRQ_HANDLED; | 110 | return IRQ_HANDLED; |
| 124 | } | 111 | } |
| 125 | 112 | ||
| 126 | static void xenvif_wake_queue(unsigned long data) | 113 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
| 127 | { | 114 | { |
| 128 | struct xenvif *vif = (struct xenvif *)data; | 115 | struct net_device *dev = queue->vif->dev; |
| 116 | unsigned int id = queue->id; | ||
| 117 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); | ||
| 118 | } | ||
| 119 | |||
| 120 | void xenvif_wake_queue(struct xenvif_queue *queue) | ||
| 121 | { | ||
| 122 | struct net_device *dev = queue->vif->dev; | ||
| 123 | unsigned int id = queue->id; | ||
| 124 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | ||
| 125 | } | ||
| 129 | 126 | ||
| 130 | if (netif_queue_stopped(vif->dev)) { | 127 | /* Callback to wake the queue and drain it on timeout */ |
| 131 | netdev_err(vif->dev, "draining TX queue\n"); | 128 | static void xenvif_wake_queue_callback(unsigned long data) |
| 132 | vif->rx_queue_purge = true; | 129 | { |
| 133 | xenvif_kick_thread(vif); | 130 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
| 134 | netif_wake_queue(vif->dev); | 131 | |
| 132 | if (xenvif_queue_stopped(queue)) { | ||
| 133 | netdev_err(queue->vif->dev, "draining TX queue\n"); | ||
| 134 | queue->rx_queue_purge = true; | ||
| 135 | xenvif_kick_thread(queue); | ||
| 136 | xenvif_wake_queue(queue); | ||
| 135 | } | 137 | } |
| 136 | } | 138 | } |
| 137 | 139 | ||
| 140 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
| 141 | void *accel_priv, select_queue_fallback_t fallback) | ||
| 142 | { | ||
| 143 | unsigned int num_queues = dev->real_num_tx_queues; | ||
| 144 | u32 hash; | ||
| 145 | u16 queue_index; | ||
| 146 | |||
| 147 | /* First, check if there is only one queue to optimise the | ||
| 148 | * single-queue or old frontend scenario. | ||
| 149 | */ | ||
| 150 | if (num_queues == 1) { | ||
| 151 | queue_index = 0; | ||
| 152 | } else { | ||
| 153 | /* Use skb_get_hash to obtain an L4 hash if available */ | ||
| 154 | hash = skb_get_hash(skb); | ||
| 155 | queue_index = hash % num_queues; | ||
| 156 | } | ||
| 157 | |||
| 158 | return queue_index; | ||
| 159 | } | ||
| 160 | |||
| 138 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 161 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 139 | { | 162 | { |
| 140 | struct xenvif *vif = netdev_priv(dev); | 163 | struct xenvif *vif = netdev_priv(dev); |
| 164 | struct xenvif_queue *queue = NULL; | ||
| 165 | unsigned int num_queues = dev->real_num_tx_queues; | ||
| 166 | u16 index; | ||
| 141 | int min_slots_needed; | 167 | int min_slots_needed; |
| 142 | 168 | ||
| 143 | BUG_ON(skb->dev != dev); | 169 | BUG_ON(skb->dev != dev); |
| 144 | 170 | ||
| 145 | /* Drop the packet if vif is not ready */ | 171 | /* Drop the packet if queues are not set up */ |
| 146 | if (vif->task == NULL || | 172 | if (num_queues < 1) |
| 147 | vif->dealloc_task == NULL || | 173 | goto drop; |
| 174 | |||
| 175 | /* Obtain the queue to be used to transmit this packet */ | ||
| 176 | index = skb_get_queue_mapping(skb); | ||
| 177 | if (index >= num_queues) { | ||
| 178 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", | ||
| 179 | index, vif->dev->name); | ||
| 180 | index %= num_queues; | ||
| 181 | } | ||
| 182 | queue = &vif->queues[index]; | ||
| 183 | |||
| 184 | /* Drop the packet if queue is not ready */ | ||
| 185 | if (queue->task == NULL || | ||
| 186 | queue->dealloc_task == NULL || | ||
| 148 | !xenvif_schedulable(vif)) | 187 | !xenvif_schedulable(vif)) |
| 149 | goto drop; | 188 | goto drop; |
| 150 | 189 | ||
| @@ -163,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 163 | * then turn off the queue to give the ring a chance to | 202 | * then turn off the queue to give the ring a chance to |
| 164 | * drain. | 203 | * drain. |
| 165 | */ | 204 | */ |
| 166 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { | 205 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
| 167 | vif->wake_queue.function = xenvif_wake_queue; | 206 | queue->wake_queue.function = xenvif_wake_queue_callback; |
| 168 | vif->wake_queue.data = (unsigned long)vif; | 207 | queue->wake_queue.data = (unsigned long)queue; |
| 169 | xenvif_stop_queue(vif); | 208 | xenvif_stop_queue(queue); |
| 170 | mod_timer(&vif->wake_queue, | 209 | mod_timer(&queue->wake_queue, |
| 171 | jiffies + rx_drain_timeout_jiffies); | 210 | jiffies + rx_drain_timeout_jiffies); |
| 172 | } | 211 | } |
| 173 | 212 | ||
| 174 | skb_queue_tail(&vif->rx_queue, skb); | 213 | skb_queue_tail(&queue->rx_queue, skb); |
| 175 | xenvif_kick_thread(vif); | 214 | xenvif_kick_thread(queue); |
| 176 | 215 | ||
| 177 | return NETDEV_TX_OK; | 216 | return NETDEV_TX_OK; |
| 178 | 217 | ||
| @@ -185,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 185 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 224 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
| 186 | { | 225 | { |
| 187 | struct xenvif *vif = netdev_priv(dev); | 226 | struct xenvif *vif = netdev_priv(dev); |
| 227 | struct xenvif_queue *queue = NULL; | ||
| 228 | unsigned int num_queues = dev->real_num_tx_queues; | ||
| 229 | unsigned long rx_bytes = 0; | ||
| 230 | unsigned long rx_packets = 0; | ||
| 231 | unsigned long tx_bytes = 0; | ||
| 232 | unsigned long tx_packets = 0; | ||
| 233 | unsigned int index; | ||
| 234 | |||
| 235 | if (vif->queues == NULL) | ||
| 236 | goto out; | ||
| 237 | |||
| 238 | /* Aggregate tx and rx stats from each queue */ | ||
| 239 | for (index = 0; index < num_queues; ++index) { | ||
| 240 | queue = &vif->queues[index]; | ||
| 241 | rx_bytes += queue->stats.rx_bytes; | ||
| 242 | rx_packets += queue->stats.rx_packets; | ||
| 243 | tx_bytes += queue->stats.tx_bytes; | ||
| 244 | tx_packets += queue->stats.tx_packets; | ||
| 245 | } | ||
| 246 | |||
| 247 | out: | ||
| 248 | vif->dev->stats.rx_bytes = rx_bytes; | ||
| 249 | vif->dev->stats.rx_packets = rx_packets; | ||
| 250 | vif->dev->stats.tx_bytes = tx_bytes; | ||
| 251 | vif->dev->stats.tx_packets = tx_packets; | ||
| 252 | |||
| 188 | return &vif->dev->stats; | 253 | return &vif->dev->stats; |
| 189 | } | 254 | } |
| 190 | 255 | ||
| 191 | static void xenvif_up(struct xenvif *vif) | 256 | static void xenvif_up(struct xenvif *vif) |
| 192 | { | 257 | { |
| 193 | napi_enable(&vif->napi); | 258 | struct xenvif_queue *queue = NULL; |
| 194 | enable_irq(vif->tx_irq); | 259 | unsigned int num_queues = vif->dev->real_num_tx_queues; |
| 195 | if (vif->tx_irq != vif->rx_irq) | 260 | unsigned int queue_index; |
| 196 | enable_irq(vif->rx_irq); | 261 | |
| 197 | xenvif_check_rx_xenvif(vif); | 262 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 263 | queue = &vif->queues[queue_index]; | ||
| 264 | napi_enable(&queue->napi); | ||
| 265 | enable_irq(queue->tx_irq); | ||
| 266 | if (queue->tx_irq != queue->rx_irq) | ||
| 267 | enable_irq(queue->rx_irq); | ||
| 268 | xenvif_napi_schedule_or_enable_events(queue); | ||
| 269 | } | ||
| 198 | } | 270 | } |
| 199 | 271 | ||
| 200 | static void xenvif_down(struct xenvif *vif) | 272 | static void xenvif_down(struct xenvif *vif) |
| 201 | { | 273 | { |
| 202 | napi_disable(&vif->napi); | 274 | struct xenvif_queue *queue = NULL; |
| 203 | disable_irq(vif->tx_irq); | 275 | unsigned int num_queues = vif->dev->real_num_tx_queues; |
| 204 | if (vif->tx_irq != vif->rx_irq) | 276 | unsigned int queue_index; |
| 205 | disable_irq(vif->rx_irq); | 277 | |
| 206 | del_timer_sync(&vif->credit_timeout); | 278 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 279 | queue = &vif->queues[queue_index]; | ||
| 280 | napi_disable(&queue->napi); | ||
| 281 | disable_irq(queue->tx_irq); | ||
| 282 | if (queue->tx_irq != queue->rx_irq) | ||
| 283 | disable_irq(queue->rx_irq); | ||
| 284 | del_timer_sync(&queue->credit_timeout); | ||
| 285 | } | ||
| 207 | } | 286 | } |
| 208 | 287 | ||
| 209 | static int xenvif_open(struct net_device *dev) | 288 | static int xenvif_open(struct net_device *dev) |
| @@ -211,7 +290,7 @@ static int xenvif_open(struct net_device *dev) | |||
| 211 | struct xenvif *vif = netdev_priv(dev); | 290 | struct xenvif *vif = netdev_priv(dev); |
| 212 | if (netif_carrier_ok(dev)) | 291 | if (netif_carrier_ok(dev)) |
| 213 | xenvif_up(vif); | 292 | xenvif_up(vif); |
| 214 | netif_start_queue(dev); | 293 | netif_tx_start_all_queues(dev); |
| 215 | return 0; | 294 | return 0; |
| 216 | } | 295 | } |
| 217 | 296 | ||
| @@ -220,7 +299,7 @@ static int xenvif_close(struct net_device *dev) | |||
| 220 | struct xenvif *vif = netdev_priv(dev); | 299 | struct xenvif *vif = netdev_priv(dev); |
| 221 | if (netif_carrier_ok(dev)) | 300 | if (netif_carrier_ok(dev)) |
| 222 | xenvif_down(vif); | 301 | xenvif_down(vif); |
| 223 | netif_stop_queue(dev); | 302 | netif_tx_stop_all_queues(dev); |
| 224 | return 0; | 303 | return 0; |
| 225 | } | 304 | } |
| 226 | 305 | ||
| @@ -260,29 +339,29 @@ static const struct xenvif_stat { | |||
| 260 | } xenvif_stats[] = { | 339 | } xenvif_stats[] = { |
| 261 | { | 340 | { |
| 262 | "rx_gso_checksum_fixup", | 341 | "rx_gso_checksum_fixup", |
| 263 | offsetof(struct xenvif, rx_gso_checksum_fixup) | 342 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
| 264 | }, | 343 | }, |
| 265 | /* If (sent != success + fail), there are probably packets never | 344 | /* If (sent != success + fail), there are probably packets never |
| 266 | * freed up properly! | 345 | * freed up properly! |
| 267 | */ | 346 | */ |
| 268 | { | 347 | { |
| 269 | "tx_zerocopy_sent", | 348 | "tx_zerocopy_sent", |
| 270 | offsetof(struct xenvif, tx_zerocopy_sent), | 349 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
| 271 | }, | 350 | }, |
| 272 | { | 351 | { |
| 273 | "tx_zerocopy_success", | 352 | "tx_zerocopy_success", |
| 274 | offsetof(struct xenvif, tx_zerocopy_success), | 353 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
| 275 | }, | 354 | }, |
| 276 | { | 355 | { |
| 277 | "tx_zerocopy_fail", | 356 | "tx_zerocopy_fail", |
| 278 | offsetof(struct xenvif, tx_zerocopy_fail) | 357 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
| 279 | }, | 358 | }, |
| 280 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use | 359 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
| 281 | * a guest with the same MAX_SKB_FRAG | 360 | * a guest with the same MAX_SKB_FRAG |
| 282 | */ | 361 | */ |
| 283 | { | 362 | { |
| 284 | "tx_frag_overflow", | 363 | "tx_frag_overflow", |
| 285 | offsetof(struct xenvif, tx_frag_overflow) | 364 | offsetof(struct xenvif_stats, tx_frag_overflow) |
| 286 | }, | 365 | }, |
| 287 | }; | 366 | }; |
| 288 | 367 | ||
| @@ -299,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set) | |||
| 299 | static void xenvif_get_ethtool_stats(struct net_device *dev, | 378 | static void xenvif_get_ethtool_stats(struct net_device *dev, |
| 300 | struct ethtool_stats *stats, u64 * data) | 379 | struct ethtool_stats *stats, u64 * data) |
| 301 | { | 380 | { |
| 302 | void *vif = netdev_priv(dev); | 381 | struct xenvif *vif = netdev_priv(dev); |
| 382 | unsigned int num_queues = dev->real_num_tx_queues; | ||
| 303 | int i; | 383 | int i; |
| 304 | 384 | unsigned int queue_index; | |
| 305 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) | 385 | struct xenvif_stats *vif_stats; |
| 306 | data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); | 386 | |
| 387 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | ||
| 388 | unsigned long accum = 0; | ||
| 389 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | ||
| 390 | vif_stats = &vif->queues[queue_index].stats; | ||
| 391 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | ||
| 392 | } | ||
| 393 | data[i] = accum; | ||
| 394 | } | ||
| 307 | } | 395 | } |
| 308 | 396 | ||
| 309 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) | 397 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) |
| @@ -336,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = { | |||
| 336 | .ndo_fix_features = xenvif_fix_features, | 424 | .ndo_fix_features = xenvif_fix_features, |
| 337 | .ndo_set_mac_address = eth_mac_addr, | 425 | .ndo_set_mac_address = eth_mac_addr, |
| 338 | .ndo_validate_addr = eth_validate_addr, | 426 | .ndo_validate_addr = eth_validate_addr, |
| 427 | .ndo_select_queue = xenvif_select_queue, | ||
| 339 | }; | 428 | }; |
| 340 | 429 | ||
| 341 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | 430 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, |
| @@ -345,10 +434,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 345 | struct net_device *dev; | 434 | struct net_device *dev; |
| 346 | struct xenvif *vif; | 435 | struct xenvif *vif; |
| 347 | char name[IFNAMSIZ] = {}; | 436 | char name[IFNAMSIZ] = {}; |
| 348 | int i; | ||
| 349 | 437 | ||
| 350 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | 438 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); |
| 351 | dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); | 439 | /* Allocate a netdev with the max. supported number of queues. |
| 440 | * When the guest selects the desired number, it will be updated | ||
| 441 | * via netif_set_real_num_tx_queues(). | ||
| 442 | */ | ||
| 443 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, | ||
| 444 | xenvif_max_queues); | ||
| 352 | if (dev == NULL) { | 445 | if (dev == NULL) { |
| 353 | pr_warn("Could not allocate netdev for %s\n", name); | 446 | pr_warn("Could not allocate netdev for %s\n", name); |
| 354 | return ERR_PTR(-ENOMEM); | 447 | return ERR_PTR(-ENOMEM); |
| @@ -358,66 +451,28 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 358 | 451 | ||
| 359 | vif = netdev_priv(dev); | 452 | vif = netdev_priv(dev); |
| 360 | 453 | ||
| 361 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
| 362 | MAX_GRANT_COPY_OPS); | ||
| 363 | if (vif->grant_copy_op == NULL) { | ||
| 364 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
| 365 | free_netdev(dev); | ||
| 366 | return ERR_PTR(-ENOMEM); | ||
| 367 | } | ||
| 368 | |||
| 369 | vif->domid = domid; | 454 | vif->domid = domid; |
| 370 | vif->handle = handle; | 455 | vif->handle = handle; |
| 371 | vif->can_sg = 1; | 456 | vif->can_sg = 1; |
| 372 | vif->ip_csum = 1; | 457 | vif->ip_csum = 1; |
| 373 | vif->dev = dev; | 458 | vif->dev = dev; |
| 374 | |||
| 375 | vif->disabled = false; | 459 | vif->disabled = false; |
| 376 | 460 | ||
| 377 | vif->credit_bytes = vif->remaining_credit = ~0UL; | 461 | /* Start out with no queues. The call below does not require |
| 378 | vif->credit_usec = 0UL; | 462 | * rtnl_lock() as it happens before register_netdev(). |
| 379 | init_timer(&vif->credit_timeout); | 463 | */ |
| 380 | vif->credit_window_start = get_jiffies_64(); | 464 | vif->queues = NULL; |
| 381 | 465 | netif_set_real_num_tx_queues(dev, 0); | |
| 382 | init_timer(&vif->wake_queue); | ||
| 383 | 466 | ||
| 384 | dev->netdev_ops = &xenvif_netdev_ops; | 467 | dev->netdev_ops = &xenvif_netdev_ops; |
| 385 | dev->hw_features = NETIF_F_SG | | 468 | dev->hw_features = NETIF_F_SG | |
| 386 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 469 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 387 | NETIF_F_TSO | NETIF_F_TSO6; | 470 | NETIF_F_TSO | NETIF_F_TSO6; |
| 388 | dev->features = dev->hw_features | NETIF_F_RXCSUM; | 471 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
| 389 | SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); | 472 | dev->ethtool_ops = &xenvif_ethtool_ops; |
| 390 | 473 | ||
| 391 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | 474 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; |
| 392 | 475 | ||
| 393 | skb_queue_head_init(&vif->rx_queue); | ||
| 394 | skb_queue_head_init(&vif->tx_queue); | ||
| 395 | |||
| 396 | vif->pending_cons = 0; | ||
| 397 | vif->pending_prod = MAX_PENDING_REQS; | ||
| 398 | for (i = 0; i < MAX_PENDING_REQS; i++) | ||
| 399 | vif->pending_ring[i] = i; | ||
| 400 | spin_lock_init(&vif->callback_lock); | ||
| 401 | spin_lock_init(&vif->response_lock); | ||
| 402 | /* If ballooning is disabled, this will consume real memory, so you | ||
| 403 | * better enable it. The long term solution would be to use just a | ||
| 404 | * bunch of valid page descriptors, without dependency on ballooning | ||
| 405 | */ | ||
| 406 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | ||
| 407 | vif->mmap_pages, | ||
| 408 | false); | ||
| 409 | if (err) { | ||
| 410 | netdev_err(dev, "Could not reserve mmap_pages\n"); | ||
| 411 | return ERR_PTR(-ENOMEM); | ||
| 412 | } | ||
| 413 | for (i = 0; i < MAX_PENDING_REQS; i++) { | ||
| 414 | vif->pending_tx_info[i].callback_struct = (struct ubuf_info) | ||
| 415 | { .callback = xenvif_zerocopy_callback, | ||
| 416 | .ctx = NULL, | ||
| 417 | .desc = i }; | ||
| 418 | vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | ||
| 419 | } | ||
| 420 | |||
| 421 | /* | 476 | /* |
| 422 | * Initialise a dummy MAC address. We choose the numerically | 477 | * Initialise a dummy MAC address. We choose the numerically |
| 423 | * largest non-broadcast address to prevent the address getting | 478 | * largest non-broadcast address to prevent the address getting |
| @@ -427,8 +482,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 427 | memset(dev->dev_addr, 0xFF, ETH_ALEN); | 482 | memset(dev->dev_addr, 0xFF, ETH_ALEN); |
| 428 | dev->dev_addr[0] &= ~0x01; | 483 | dev->dev_addr[0] &= ~0x01; |
| 429 | 484 | ||
| 430 | netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT); | ||
| 431 | |||
| 432 | netif_carrier_off(dev); | 485 | netif_carrier_off(dev); |
| 433 | 486 | ||
| 434 | err = register_netdev(dev); | 487 | err = register_netdev(dev); |
| @@ -445,98 +498,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 445 | return vif; | 498 | return vif; |
| 446 | } | 499 | } |
| 447 | 500 | ||
| 448 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | 501 | int xenvif_init_queue(struct xenvif_queue *queue) |
| 502 | { | ||
| 503 | int err, i; | ||
| 504 | |||
| 505 | queue->credit_bytes = queue->remaining_credit = ~0UL; | ||
| 506 | queue->credit_usec = 0UL; | ||
| 507 | init_timer(&queue->credit_timeout); | ||
| 508 | queue->credit_window_start = get_jiffies_64(); | ||
| 509 | |||
| 510 | skb_queue_head_init(&queue->rx_queue); | ||
| 511 | skb_queue_head_init(&queue->tx_queue); | ||
| 512 | |||
| 513 | queue->pending_cons = 0; | ||
| 514 | queue->pending_prod = MAX_PENDING_REQS; | ||
| 515 | for (i = 0; i < MAX_PENDING_REQS; ++i) | ||
| 516 | queue->pending_ring[i] = i; | ||
| 517 | |||
| 518 | spin_lock_init(&queue->callback_lock); | ||
| 519 | spin_lock_init(&queue->response_lock); | ||
| 520 | |||
| 521 | /* If ballooning is disabled, this will consume real memory, so you | ||
| 522 | * better enable it. The long term solution would be to use just a | ||
| 523 | * bunch of valid page descriptors, without dependency on ballooning | ||
| 524 | */ | ||
| 525 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | ||
| 526 | queue->mmap_pages, | ||
| 527 | false); | ||
| 528 | if (err) { | ||
| 529 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | ||
| 530 | return -ENOMEM; | ||
| 531 | } | ||
| 532 | |||
| 533 | for (i = 0; i < MAX_PENDING_REQS; i++) { | ||
| 534 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info) | ||
| 535 | { .callback = xenvif_zerocopy_callback, | ||
| 536 | .ctx = NULL, | ||
| 537 | .desc = i }; | ||
| 538 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | ||
| 539 | } | ||
| 540 | |||
| 541 | init_timer(&queue->wake_queue); | ||
| 542 | |||
| 543 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
| 544 | XENVIF_NAPI_WEIGHT); | ||
| 545 | |||
| 546 | return 0; | ||
| 547 | } | ||
| 548 | |||
| 549 | void xenvif_carrier_on(struct xenvif *vif) | ||
| 550 | { | ||
| 551 | rtnl_lock(); | ||
| 552 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | ||
| 553 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | ||
| 554 | netdev_update_features(vif->dev); | ||
| 555 | netif_carrier_on(vif->dev); | ||
| 556 | if (netif_running(vif->dev)) | ||
| 557 | xenvif_up(vif); | ||
| 558 | rtnl_unlock(); | ||
| 559 | } | ||
| 560 | |||
| 561 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | ||
| 449 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 562 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
| 450 | unsigned int rx_evtchn) | 563 | unsigned int rx_evtchn) |
| 451 | { | 564 | { |
| 452 | struct task_struct *task; | 565 | struct task_struct *task; |
| 453 | int err = -ENOMEM; | 566 | int err = -ENOMEM; |
| 454 | 567 | ||
| 455 | BUG_ON(vif->tx_irq); | 568 | BUG_ON(queue->tx_irq); |
| 456 | BUG_ON(vif->task); | 569 | BUG_ON(queue->task); |
| 457 | BUG_ON(vif->dealloc_task); | 570 | BUG_ON(queue->dealloc_task); |
| 458 | 571 | ||
| 459 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 572 | err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); |
| 460 | if (err < 0) | 573 | if (err < 0) |
| 461 | goto err; | 574 | goto err; |
| 462 | 575 | ||
| 463 | init_waitqueue_head(&vif->wq); | 576 | init_waitqueue_head(&queue->wq); |
| 464 | init_waitqueue_head(&vif->dealloc_wq); | 577 | init_waitqueue_head(&queue->dealloc_wq); |
| 465 | 578 | ||
| 466 | if (tx_evtchn == rx_evtchn) { | 579 | if (tx_evtchn == rx_evtchn) { |
| 467 | /* feature-split-event-channels == 0 */ | 580 | /* feature-split-event-channels == 0 */ |
| 468 | err = bind_interdomain_evtchn_to_irqhandler( | 581 | err = bind_interdomain_evtchn_to_irqhandler( |
| 469 | vif->domid, tx_evtchn, xenvif_interrupt, 0, | 582 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
| 470 | vif->dev->name, vif); | 583 | queue->name, queue); |
| 471 | if (err < 0) | 584 | if (err < 0) |
| 472 | goto err_unmap; | 585 | goto err_unmap; |
| 473 | vif->tx_irq = vif->rx_irq = err; | 586 | queue->tx_irq = queue->rx_irq = err; |
| 474 | disable_irq(vif->tx_irq); | 587 | disable_irq(queue->tx_irq); |
| 475 | } else { | 588 | } else { |
| 476 | /* feature-split-event-channels == 1 */ | 589 | /* feature-split-event-channels == 1 */ |
| 477 | snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), | 590 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
| 478 | "%s-tx", vif->dev->name); | 591 | "%s-tx", queue->name); |
| 479 | err = bind_interdomain_evtchn_to_irqhandler( | 592 | err = bind_interdomain_evtchn_to_irqhandler( |
| 480 | vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, | 593 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
| 481 | vif->tx_irq_name, vif); | 594 | queue->tx_irq_name, queue); |
| 482 | if (err < 0) | 595 | if (err < 0) |
| 483 | goto err_unmap; | 596 | goto err_unmap; |
| 484 | vif->tx_irq = err; | 597 | queue->tx_irq = err; |
| 485 | disable_irq(vif->tx_irq); | 598 | disable_irq(queue->tx_irq); |
| 486 | 599 | ||
| 487 | snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), | 600 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
| 488 | "%s-rx", vif->dev->name); | 601 | "%s-rx", queue->name); |
| 489 | err = bind_interdomain_evtchn_to_irqhandler( | 602 | err = bind_interdomain_evtchn_to_irqhandler( |
| 490 | vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, | 603 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
| 491 | vif->rx_irq_name, vif); | 604 | queue->rx_irq_name, queue); |
| 492 | if (err < 0) | 605 | if (err < 0) |
| 493 | goto err_tx_unbind; | 606 | goto err_tx_unbind; |
| 494 | vif->rx_irq = err; | 607 | queue->rx_irq = err; |
| 495 | disable_irq(vif->rx_irq); | 608 | disable_irq(queue->rx_irq); |
| 496 | } | 609 | } |
| 497 | 610 | ||
| 498 | task = kthread_create(xenvif_kthread_guest_rx, | 611 | task = kthread_create(xenvif_kthread_guest_rx, |
| 499 | (void *)vif, "%s-guest-rx", vif->dev->name); | 612 | (void *)queue, "%s-guest-rx", queue->name); |
| 500 | if (IS_ERR(task)) { | 613 | if (IS_ERR(task)) { |
| 501 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 614 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 502 | err = PTR_ERR(task); | 615 | err = PTR_ERR(task); |
| 503 | goto err_rx_unbind; | 616 | goto err_rx_unbind; |
| 504 | } | 617 | } |
| 505 | 618 | queue->task = task; | |
| 506 | vif->task = task; | ||
| 507 | 619 | ||
| 508 | task = kthread_create(xenvif_dealloc_kthread, | 620 | task = kthread_create(xenvif_dealloc_kthread, |
| 509 | (void *)vif, "%s-dealloc", vif->dev->name); | 621 | (void *)queue, "%s-dealloc", queue->name); |
| 510 | if (IS_ERR(task)) { | 622 | if (IS_ERR(task)) { |
| 511 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 623 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 512 | err = PTR_ERR(task); | 624 | err = PTR_ERR(task); |
| 513 | goto err_rx_unbind; | 625 | goto err_rx_unbind; |
| 514 | } | 626 | } |
| 627 | queue->dealloc_task = task; | ||
| 515 | 628 | ||
| 516 | vif->dealloc_task = task; | 629 | wake_up_process(queue->task); |
| 517 | 630 | wake_up_process(queue->dealloc_task); | |
| 518 | rtnl_lock(); | ||
| 519 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | ||
| 520 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | ||
| 521 | netdev_update_features(vif->dev); | ||
| 522 | netif_carrier_on(vif->dev); | ||
| 523 | if (netif_running(vif->dev)) | ||
| 524 | xenvif_up(vif); | ||
| 525 | rtnl_unlock(); | ||
| 526 | |||
| 527 | wake_up_process(vif->task); | ||
| 528 | wake_up_process(vif->dealloc_task); | ||
| 529 | 631 | ||
| 530 | return 0; | 632 | return 0; |
| 531 | 633 | ||
| 532 | err_rx_unbind: | 634 | err_rx_unbind: |
| 533 | unbind_from_irqhandler(vif->rx_irq, vif); | 635 | unbind_from_irqhandler(queue->rx_irq, queue); |
| 534 | vif->rx_irq = 0; | 636 | queue->rx_irq = 0; |
| 535 | err_tx_unbind: | 637 | err_tx_unbind: |
| 536 | unbind_from_irqhandler(vif->tx_irq, vif); | 638 | unbind_from_irqhandler(queue->tx_irq, queue); |
| 537 | vif->tx_irq = 0; | 639 | queue->tx_irq = 0; |
| 538 | err_unmap: | 640 | err_unmap: |
| 539 | xenvif_unmap_frontend_rings(vif); | 641 | xenvif_unmap_frontend_rings(queue); |
| 540 | err: | 642 | err: |
| 541 | module_put(THIS_MODULE); | 643 | module_put(THIS_MODULE); |
| 542 | return err; | 644 | return err; |
| @@ -553,38 +655,77 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
| 553 | rtnl_unlock(); | 655 | rtnl_unlock(); |
| 554 | } | 656 | } |
| 555 | 657 | ||
| 658 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, | ||
| 659 | unsigned int worst_case_skb_lifetime) | ||
| 660 | { | ||
| 661 | int i, unmap_timeout = 0; | ||
| 662 | |||
| 663 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | ||
| 664 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
| 665 | unmap_timeout++; | ||
| 666 | schedule_timeout(msecs_to_jiffies(1000)); | ||
| 667 | if (unmap_timeout > worst_case_skb_lifetime && | ||
| 668 | net_ratelimit()) | ||
| 669 | netdev_err(queue->vif->dev, | ||
| 670 | "Page still granted! Index: %x\n", | ||
| 671 | i); | ||
| 672 | i = -1; | ||
| 673 | } | ||
| 674 | } | ||
| 675 | } | ||
| 676 | |||
| 556 | void xenvif_disconnect(struct xenvif *vif) | 677 | void xenvif_disconnect(struct xenvif *vif) |
| 557 | { | 678 | { |
| 679 | struct xenvif_queue *queue = NULL; | ||
| 680 | unsigned int num_queues = vif->dev->real_num_tx_queues; | ||
| 681 | unsigned int queue_index; | ||
| 682 | |||
| 558 | if (netif_carrier_ok(vif->dev)) | 683 | if (netif_carrier_ok(vif->dev)) |
| 559 | xenvif_carrier_off(vif); | 684 | xenvif_carrier_off(vif); |
| 560 | 685 | ||
| 561 | if (vif->task) { | 686 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 562 | del_timer_sync(&vif->wake_queue); | 687 | queue = &vif->queues[queue_index]; |
| 563 | kthread_stop(vif->task); | ||
| 564 | vif->task = NULL; | ||
| 565 | } | ||
| 566 | 688 | ||
| 567 | if (vif->dealloc_task) { | 689 | if (queue->task) { |
| 568 | kthread_stop(vif->dealloc_task); | 690 | del_timer_sync(&queue->wake_queue); |
| 569 | vif->dealloc_task = NULL; | 691 | kthread_stop(queue->task); |
| 570 | } | 692 | queue->task = NULL; |
| 693 | } | ||
| 571 | 694 | ||
| 572 | if (vif->tx_irq) { | 695 | if (queue->dealloc_task) { |
| 573 | if (vif->tx_irq == vif->rx_irq) | 696 | kthread_stop(queue->dealloc_task); |
| 574 | unbind_from_irqhandler(vif->tx_irq, vif); | 697 | queue->dealloc_task = NULL; |
| 575 | else { | ||
| 576 | unbind_from_irqhandler(vif->tx_irq, vif); | ||
| 577 | unbind_from_irqhandler(vif->rx_irq, vif); | ||
| 578 | } | 698 | } |
| 579 | vif->tx_irq = 0; | 699 | |
| 700 | if (queue->tx_irq) { | ||
| 701 | if (queue->tx_irq == queue->rx_irq) | ||
| 702 | unbind_from_irqhandler(queue->tx_irq, queue); | ||
| 703 | else { | ||
| 704 | unbind_from_irqhandler(queue->tx_irq, queue); | ||
| 705 | unbind_from_irqhandler(queue->rx_irq, queue); | ||
| 706 | } | ||
| 707 | queue->tx_irq = 0; | ||
| 708 | } | ||
| 709 | |||
| 710 | xenvif_unmap_frontend_rings(queue); | ||
| 580 | } | 711 | } |
| 712 | } | ||
| 581 | 713 | ||
| 582 | xenvif_unmap_frontend_rings(vif); | 714 | /* Reverse the relevant parts of xenvif_init_queue(). |
| 715 | * Used for queue teardown from xenvif_free(), and on the | ||
| 716 | * error handling paths in xenbus.c:connect(). | ||
| 717 | */ | ||
| 718 | void xenvif_deinit_queue(struct xenvif_queue *queue) | ||
| 719 | { | ||
| 720 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | ||
| 721 | netif_napi_del(&queue->napi); | ||
| 583 | } | 722 | } |
| 584 | 723 | ||
| 585 | void xenvif_free(struct xenvif *vif) | 724 | void xenvif_free(struct xenvif *vif) |
| 586 | { | 725 | { |
| 587 | int i, unmap_timeout = 0; | 726 | struct xenvif_queue *queue = NULL; |
| 727 | unsigned int num_queues = vif->dev->real_num_tx_queues; | ||
| 728 | unsigned int queue_index; | ||
| 588 | /* Here we want to avoid timeout messages if an skb can be legitimately | 729 | /* Here we want to avoid timeout messages if an skb can be legitimately |
| 589 | * stuck somewhere else. Realistically this could be an another vif's | 730 | * stuck somewhere else. Realistically this could be an another vif's |
| 590 | * internal or QDisc queue. That another vif also has this | 731 | * internal or QDisc queue. That another vif also has this |
| @@ -599,33 +740,21 @@ void xenvif_free(struct xenvif *vif) | |||
| 599 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | 740 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * |
| 600 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | 741 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); |
| 601 | 742 | ||
| 602 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | 743 | unregister_netdev(vif->dev); |
| 603 | if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
| 604 | unmap_timeout++; | ||
| 605 | schedule_timeout(msecs_to_jiffies(1000)); | ||
| 606 | if (unmap_timeout > worst_case_skb_lifetime && | ||
| 607 | net_ratelimit()) | ||
| 608 | netdev_err(vif->dev, | ||
| 609 | "Page still granted! Index: %x\n", | ||
| 610 | i); | ||
| 611 | /* If there are still unmapped pages, reset the loop to | ||
| 612 | * start checking again. We shouldn't exit here until | ||
| 613 | * dealloc thread and NAPI instance release all the | ||
| 614 | * pages. If a kernel bug causes the skbs to stall | ||
| 615 | * somewhere, the interface cannot be brought down | ||
| 616 | * properly. | ||
| 617 | */ | ||
| 618 | i = -1; | ||
| 619 | } | ||
| 620 | } | ||
| 621 | |||
| 622 | free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages); | ||
| 623 | 744 | ||
| 624 | netif_napi_del(&vif->napi); | 745 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 746 | queue = &vif->queues[queue_index]; | ||
| 747 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); | ||
| 748 | xenvif_deinit_queue(queue); | ||
| 749 | } | ||
| 625 | 750 | ||
| 626 | unregister_netdev(vif->dev); | 751 | /* Free the array of queues. The call below does not require |
| 752 | * rtnl_lock() because it happens after unregister_netdev(). | ||
| 753 | */ | ||
| 754 | netif_set_real_num_tx_queues(vif->dev, 0); | ||
| 755 | vfree(vif->queues); | ||
| 756 | vif->queues = NULL; | ||
| 627 | 757 | ||
| 628 | vfree(vif->grant_copy_op); | ||
| 629 | free_netdev(vif->dev); | 758 | free_netdev(vif->dev); |
| 630 | 759 | ||
| 631 | module_put(THIS_MODULE); | 760 | module_put(THIS_MODULE); |
