diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 544 |
1 files changed, 340 insertions, 204 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 20e9defa1060..48a55cda979b 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -43,40 +43,56 @@ | |||
| 43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
| 44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
| 45 | 45 | ||
| 46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | ||
| 47 | { | ||
| 48 | struct net_device *dev = queue->vif->dev; | ||
| 49 | |||
| 50 | if (!queue->vif->can_queue) | ||
| 51 | return; | ||
| 52 | |||
| 53 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | ||
| 54 | } | ||
| 55 | |||
| 46 | int xenvif_schedulable(struct xenvif *vif) | 56 | int xenvif_schedulable(struct xenvif *vif) |
| 47 | { | 57 | { |
| 48 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 58 | return netif_running(vif->dev) && |
| 59 | test_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
| 49 | } | 60 | } |
| 50 | 61 | ||
| 51 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 62 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
| 52 | { | 63 | { |
| 53 | struct xenvif *vif = dev_id; | 64 | struct xenvif_queue *queue = dev_id; |
| 54 | 65 | ||
| 55 | if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) | 66 | if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
| 56 | napi_schedule(&vif->napi); | 67 | napi_schedule(&queue->napi); |
| 57 | 68 | ||
| 58 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
| 59 | } | 70 | } |
| 60 | 71 | ||
| 61 | static int xenvif_poll(struct napi_struct *napi, int budget) | 72 | int xenvif_poll(struct napi_struct *napi, int budget) |
| 62 | { | 73 | { |
| 63 | struct xenvif *vif = container_of(napi, struct xenvif, napi); | 74 | struct xenvif_queue *queue = |
| 75 | container_of(napi, struct xenvif_queue, napi); | ||
| 64 | int work_done; | 76 | int work_done; |
| 65 | 77 | ||
| 66 | /* This vif is rogue, we pretend we've there is nothing to do | 78 | /* This vif is rogue, we pretend we've there is nothing to do |
| 67 | * for this vif to deschedule it from NAPI. But this interface | 79 | * for this vif to deschedule it from NAPI. But this interface |
| 68 | * will be turned off in thread context later. | 80 | * will be turned off in thread context later. |
| 81 | * Also, if a guest doesn't post enough slots to receive data on one of | ||
| 82 | * its queues, the carrier goes down and NAPI is descheduled here so | ||
| 83 | * the guest can't send more packets until it's ready to receive. | ||
| 69 | */ | 84 | */ |
| 70 | if (unlikely(vif->disabled)) { | 85 | if (unlikely(queue->vif->disabled || |
| 86 | !netif_carrier_ok(queue->vif->dev))) { | ||
| 71 | napi_complete(napi); | 87 | napi_complete(napi); |
| 72 | return 0; | 88 | return 0; |
| 73 | } | 89 | } |
| 74 | 90 | ||
| 75 | work_done = xenvif_tx_action(vif, budget); | 91 | work_done = xenvif_tx_action(queue, budget); |
| 76 | 92 | ||
| 77 | if (work_done < budget) { | 93 | if (work_done < budget) { |
| 78 | napi_complete(napi); | 94 | napi_complete(napi); |
| 79 | xenvif_napi_schedule_or_enable_events(vif); | 95 | xenvif_napi_schedule_or_enable_events(queue); |
| 80 | } | 96 | } |
| 81 | 97 | ||
| 82 | return work_done; | 98 | return work_done; |
| @@ -84,14 +100,23 @@ static int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 84 | 100 | ||
| 85 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 101 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
| 86 | { | 102 | { |
| 87 | struct xenvif *vif = dev_id; | 103 | struct xenvif_queue *queue = dev_id; |
| 104 | struct netdev_queue *net_queue = | ||
| 105 | netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
| 88 | 106 | ||
| 89 | xenvif_kick_thread(vif); | 107 | /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR |
| 108 | * the carrier went down and this queue was previously blocked | ||
| 109 | */ | ||
| 110 | if (unlikely(netif_tx_queue_stopped(net_queue) || | ||
| 111 | (!netif_carrier_ok(queue->vif->dev) && | ||
| 112 | test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) | ||
| 113 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
| 114 | xenvif_kick_thread(queue); | ||
| 90 | 115 | ||
| 91 | return IRQ_HANDLED; | 116 | return IRQ_HANDLED; |
| 92 | } | 117 | } |
| 93 | 118 | ||
| 94 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | 119 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
| 95 | { | 120 | { |
| 96 | xenvif_tx_interrupt(irq, dev_id); | 121 | xenvif_tx_interrupt(irq, dev_id); |
| 97 | xenvif_rx_interrupt(irq, dev_id); | 122 | xenvif_rx_interrupt(irq, dev_id); |
| @@ -99,28 +124,57 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
| 99 | return IRQ_HANDLED; | 124 | return IRQ_HANDLED; |
| 100 | } | 125 | } |
| 101 | 126 | ||
| 102 | static void xenvif_wake_queue(unsigned long data) | 127 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
| 128 | { | ||
| 129 | struct net_device *dev = queue->vif->dev; | ||
| 130 | unsigned int id = queue->id; | ||
| 131 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); | ||
| 132 | } | ||
| 133 | |||
| 134 | void xenvif_wake_queue(struct xenvif_queue *queue) | ||
| 103 | { | 135 | { |
| 104 | struct xenvif *vif = (struct xenvif *)data; | 136 | struct net_device *dev = queue->vif->dev; |
| 137 | unsigned int id = queue->id; | ||
| 138 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | ||
| 139 | } | ||
| 140 | |||
| 141 | /* Callback to wake the queue's thread and turn the carrier off on timeout */ | ||
| 142 | static void xenvif_rx_stalled(unsigned long data) | ||
| 143 | { | ||
| 144 | struct xenvif_queue *queue = (struct xenvif_queue *)data; | ||
| 105 | 145 | ||
| 106 | if (netif_queue_stopped(vif->dev)) { | 146 | if (xenvif_queue_stopped(queue)) { |
| 107 | netdev_err(vif->dev, "draining TX queue\n"); | 147 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); |
| 108 | vif->rx_queue_purge = true; | 148 | xenvif_kick_thread(queue); |
| 109 | xenvif_kick_thread(vif); | ||
| 110 | netif_wake_queue(vif->dev); | ||
| 111 | } | 149 | } |
| 112 | } | 150 | } |
| 113 | 151 | ||
| 114 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 152 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 115 | { | 153 | { |
| 116 | struct xenvif *vif = netdev_priv(dev); | 154 | struct xenvif *vif = netdev_priv(dev); |
| 155 | struct xenvif_queue *queue = NULL; | ||
| 156 | unsigned int num_queues = vif->num_queues; | ||
| 157 | u16 index; | ||
| 117 | int min_slots_needed; | 158 | int min_slots_needed; |
| 118 | 159 | ||
| 119 | BUG_ON(skb->dev != dev); | 160 | BUG_ON(skb->dev != dev); |
| 120 | 161 | ||
| 121 | /* Drop the packet if vif is not ready */ | 162 | /* Drop the packet if queues are not set up */ |
| 122 | if (vif->task == NULL || | 163 | if (num_queues < 1) |
| 123 | vif->dealloc_task == NULL || | 164 | goto drop; |
| 165 | |||
| 166 | /* Obtain the queue to be used to transmit this packet */ | ||
| 167 | index = skb_get_queue_mapping(skb); | ||
| 168 | if (index >= num_queues) { | ||
| 169 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", | ||
| 170 | index, vif->dev->name); | ||
| 171 | index %= num_queues; | ||
| 172 | } | ||
| 173 | queue = &vif->queues[index]; | ||
| 174 | |||
| 175 | /* Drop the packet if queue is not ready */ | ||
| 176 | if (queue->task == NULL || | ||
| 177 | queue->dealloc_task == NULL || | ||
| 124 | !xenvif_schedulable(vif)) | 178 | !xenvif_schedulable(vif)) |
| 125 | goto drop; | 179 | goto drop; |
| 126 | 180 | ||
| @@ -139,16 +193,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 139 | * then turn off the queue to give the ring a chance to | 193 | * then turn off the queue to give the ring a chance to |
| 140 | * drain. | 194 | * drain. |
| 141 | */ | 195 | */ |
| 142 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { | 196 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
| 143 | vif->wake_queue.function = xenvif_wake_queue; | 197 | queue->rx_stalled.function = xenvif_rx_stalled; |
| 144 | vif->wake_queue.data = (unsigned long)vif; | 198 | queue->rx_stalled.data = (unsigned long)queue; |
| 145 | xenvif_stop_queue(vif); | 199 | xenvif_stop_queue(queue); |
| 146 | mod_timer(&vif->wake_queue, | 200 | mod_timer(&queue->rx_stalled, |
| 147 | jiffies + rx_drain_timeout_jiffies); | 201 | jiffies + rx_drain_timeout_jiffies); |
| 148 | } | 202 | } |
| 149 | 203 | ||
| 150 | skb_queue_tail(&vif->rx_queue, skb); | 204 | skb_queue_tail(&queue->rx_queue, skb); |
| 151 | xenvif_kick_thread(vif); | 205 | xenvif_kick_thread(queue); |
| 152 | 206 | ||
| 153 | return NETDEV_TX_OK; | 207 | return NETDEV_TX_OK; |
| 154 | 208 | ||
| @@ -161,42 +215,82 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 161 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 215 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
| 162 | { | 216 | { |
| 163 | struct xenvif *vif = netdev_priv(dev); | 217 | struct xenvif *vif = netdev_priv(dev); |
| 218 | struct xenvif_queue *queue = NULL; | ||
| 219 | unsigned int num_queues = vif->num_queues; | ||
| 220 | unsigned long rx_bytes = 0; | ||
| 221 | unsigned long rx_packets = 0; | ||
| 222 | unsigned long tx_bytes = 0; | ||
| 223 | unsigned long tx_packets = 0; | ||
| 224 | unsigned int index; | ||
| 225 | |||
| 226 | if (vif->queues == NULL) | ||
| 227 | goto out; | ||
| 228 | |||
| 229 | /* Aggregate tx and rx stats from each queue */ | ||
| 230 | for (index = 0; index < num_queues; ++index) { | ||
| 231 | queue = &vif->queues[index]; | ||
| 232 | rx_bytes += queue->stats.rx_bytes; | ||
| 233 | rx_packets += queue->stats.rx_packets; | ||
| 234 | tx_bytes += queue->stats.tx_bytes; | ||
| 235 | tx_packets += queue->stats.tx_packets; | ||
| 236 | } | ||
| 237 | |||
| 238 | out: | ||
| 239 | vif->dev->stats.rx_bytes = rx_bytes; | ||
| 240 | vif->dev->stats.rx_packets = rx_packets; | ||
| 241 | vif->dev->stats.tx_bytes = tx_bytes; | ||
| 242 | vif->dev->stats.tx_packets = tx_packets; | ||
| 243 | |||
| 164 | return &vif->dev->stats; | 244 | return &vif->dev->stats; |
| 165 | } | 245 | } |
| 166 | 246 | ||
| 167 | static void xenvif_up(struct xenvif *vif) | 247 | static void xenvif_up(struct xenvif *vif) |
| 168 | { | 248 | { |
| 169 | napi_enable(&vif->napi); | 249 | struct xenvif_queue *queue = NULL; |
| 170 | enable_irq(vif->tx_irq); | 250 | unsigned int num_queues = vif->num_queues; |
| 171 | if (vif->tx_irq != vif->rx_irq) | 251 | unsigned int queue_index; |
| 172 | enable_irq(vif->rx_irq); | 252 | |
| 173 | xenvif_napi_schedule_or_enable_events(vif); | 253 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 254 | queue = &vif->queues[queue_index]; | ||
| 255 | napi_enable(&queue->napi); | ||
| 256 | enable_irq(queue->tx_irq); | ||
| 257 | if (queue->tx_irq != queue->rx_irq) | ||
| 258 | enable_irq(queue->rx_irq); | ||
| 259 | xenvif_napi_schedule_or_enable_events(queue); | ||
| 260 | } | ||
| 174 | } | 261 | } |
| 175 | 262 | ||
| 176 | static void xenvif_down(struct xenvif *vif) | 263 | static void xenvif_down(struct xenvif *vif) |
| 177 | { | 264 | { |
| 178 | napi_disable(&vif->napi); | 265 | struct xenvif_queue *queue = NULL; |
| 179 | disable_irq(vif->tx_irq); | 266 | unsigned int num_queues = vif->num_queues; |
| 180 | if (vif->tx_irq != vif->rx_irq) | 267 | unsigned int queue_index; |
| 181 | disable_irq(vif->rx_irq); | 268 | |
| 182 | del_timer_sync(&vif->credit_timeout); | 269 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 270 | queue = &vif->queues[queue_index]; | ||
| 271 | napi_disable(&queue->napi); | ||
| 272 | disable_irq(queue->tx_irq); | ||
| 273 | if (queue->tx_irq != queue->rx_irq) | ||
| 274 | disable_irq(queue->rx_irq); | ||
| 275 | del_timer_sync(&queue->credit_timeout); | ||
| 276 | } | ||
| 183 | } | 277 | } |
| 184 | 278 | ||
| 185 | static int xenvif_open(struct net_device *dev) | 279 | static int xenvif_open(struct net_device *dev) |
| 186 | { | 280 | { |
| 187 | struct xenvif *vif = netdev_priv(dev); | 281 | struct xenvif *vif = netdev_priv(dev); |
| 188 | if (netif_carrier_ok(dev)) | 282 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
| 189 | xenvif_up(vif); | 283 | xenvif_up(vif); |
| 190 | netif_start_queue(dev); | 284 | netif_tx_start_all_queues(dev); |
| 191 | return 0; | 285 | return 0; |
| 192 | } | 286 | } |
| 193 | 287 | ||
| 194 | static int xenvif_close(struct net_device *dev) | 288 | static int xenvif_close(struct net_device *dev) |
| 195 | { | 289 | { |
| 196 | struct xenvif *vif = netdev_priv(dev); | 290 | struct xenvif *vif = netdev_priv(dev); |
| 197 | if (netif_carrier_ok(dev)) | 291 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
| 198 | xenvif_down(vif); | 292 | xenvif_down(vif); |
| 199 | netif_stop_queue(dev); | 293 | netif_tx_stop_all_queues(dev); |
| 200 | return 0; | 294 | return 0; |
| 201 | } | 295 | } |
| 202 | 296 | ||
| @@ -236,29 +330,29 @@ static const struct xenvif_stat { | |||
| 236 | } xenvif_stats[] = { | 330 | } xenvif_stats[] = { |
| 237 | { | 331 | { |
| 238 | "rx_gso_checksum_fixup", | 332 | "rx_gso_checksum_fixup", |
| 239 | offsetof(struct xenvif, rx_gso_checksum_fixup) | 333 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
| 240 | }, | 334 | }, |
| 241 | /* If (sent != success + fail), there are probably packets never | 335 | /* If (sent != success + fail), there are probably packets never |
| 242 | * freed up properly! | 336 | * freed up properly! |
| 243 | */ | 337 | */ |
| 244 | { | 338 | { |
| 245 | "tx_zerocopy_sent", | 339 | "tx_zerocopy_sent", |
| 246 | offsetof(struct xenvif, tx_zerocopy_sent), | 340 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
| 247 | }, | 341 | }, |
| 248 | { | 342 | { |
| 249 | "tx_zerocopy_success", | 343 | "tx_zerocopy_success", |
| 250 | offsetof(struct xenvif, tx_zerocopy_success), | 344 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
| 251 | }, | 345 | }, |
| 252 | { | 346 | { |
| 253 | "tx_zerocopy_fail", | 347 | "tx_zerocopy_fail", |
| 254 | offsetof(struct xenvif, tx_zerocopy_fail) | 348 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
| 255 | }, | 349 | }, |
| 256 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use | 350 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
| 257 | * a guest with the same MAX_SKB_FRAG | 351 | * a guest with the same MAX_SKB_FRAG |
| 258 | */ | 352 | */ |
| 259 | { | 353 | { |
| 260 | "tx_frag_overflow", | 354 | "tx_frag_overflow", |
| 261 | offsetof(struct xenvif, tx_frag_overflow) | 355 | offsetof(struct xenvif_stats, tx_frag_overflow) |
| 262 | }, | 356 | }, |
| 263 | }; | 357 | }; |
| 264 | 358 | ||
| @@ -275,11 +369,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set) | |||
| 275 | static void xenvif_get_ethtool_stats(struct net_device *dev, | 369 | static void xenvif_get_ethtool_stats(struct net_device *dev, |
| 276 | struct ethtool_stats *stats, u64 * data) | 370 | struct ethtool_stats *stats, u64 * data) |
| 277 | { | 371 | { |
| 278 | void *vif = netdev_priv(dev); | 372 | struct xenvif *vif = netdev_priv(dev); |
| 373 | unsigned int num_queues = vif->num_queues; | ||
| 279 | int i; | 374 | int i; |
| 280 | 375 | unsigned int queue_index; | |
| 281 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) | 376 | struct xenvif_stats *vif_stats; |
| 282 | data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); | 377 | |
| 378 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | ||
| 379 | unsigned long accum = 0; | ||
| 380 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | ||
| 381 | vif_stats = &vif->queues[queue_index].stats; | ||
| 382 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | ||
| 383 | } | ||
| 384 | data[i] = accum; | ||
| 385 | } | ||
| 283 | } | 386 | } |
| 284 | 387 | ||
| 285 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) | 388 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) |
| @@ -321,10 +424,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 321 | struct net_device *dev; | 424 | struct net_device *dev; |
| 322 | struct xenvif *vif; | 425 | struct xenvif *vif; |
| 323 | char name[IFNAMSIZ] = {}; | 426 | char name[IFNAMSIZ] = {}; |
| 324 | int i; | ||
| 325 | 427 | ||
| 326 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | 428 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); |
| 327 | dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); | 429 | /* Allocate a netdev with the max. supported number of queues. |
| 430 | * When the guest selects the desired number, it will be updated | ||
| 431 | * via netif_set_real_num_*_queues(). | ||
| 432 | */ | ||
| 433 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, | ||
| 434 | ether_setup, xenvif_max_queues); | ||
| 328 | if (dev == NULL) { | 435 | if (dev == NULL) { |
| 329 | pr_warn("Could not allocate netdev for %s\n", name); | 436 | pr_warn("Could not allocate netdev for %s\n", name); |
| 330 | return ERR_PTR(-ENOMEM); | 437 | return ERR_PTR(-ENOMEM); |
| @@ -334,66 +441,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 334 | 441 | ||
| 335 | vif = netdev_priv(dev); | 442 | vif = netdev_priv(dev); |
| 336 | 443 | ||
| 337 | vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * | ||
| 338 | MAX_GRANT_COPY_OPS); | ||
| 339 | if (vif->grant_copy_op == NULL) { | ||
| 340 | pr_warn("Could not allocate grant copy space for %s\n", name); | ||
| 341 | free_netdev(dev); | ||
| 342 | return ERR_PTR(-ENOMEM); | ||
| 343 | } | ||
| 344 | |||
| 345 | vif->domid = domid; | 444 | vif->domid = domid; |
| 346 | vif->handle = handle; | 445 | vif->handle = handle; |
| 347 | vif->can_sg = 1; | 446 | vif->can_sg = 1; |
| 348 | vif->ip_csum = 1; | 447 | vif->ip_csum = 1; |
| 349 | vif->dev = dev; | 448 | vif->dev = dev; |
| 350 | |||
| 351 | vif->disabled = false; | 449 | vif->disabled = false; |
| 352 | 450 | ||
| 353 | vif->credit_bytes = vif->remaining_credit = ~0UL; | 451 | /* Start out with no queues. */ |
| 354 | vif->credit_usec = 0UL; | 452 | vif->queues = NULL; |
| 355 | init_timer(&vif->credit_timeout); | 453 | vif->num_queues = 0; |
| 356 | vif->credit_window_start = get_jiffies_64(); | ||
| 357 | |||
| 358 | init_timer(&vif->wake_queue); | ||
| 359 | 454 | ||
| 360 | dev->netdev_ops = &xenvif_netdev_ops; | 455 | dev->netdev_ops = &xenvif_netdev_ops; |
| 361 | dev->hw_features = NETIF_F_SG | | 456 | dev->hw_features = NETIF_F_SG | |
| 362 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 457 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 363 | NETIF_F_TSO | NETIF_F_TSO6; | 458 | NETIF_F_TSO | NETIF_F_TSO6; |
| 364 | dev->features = dev->hw_features | NETIF_F_RXCSUM; | 459 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
| 365 | SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); | 460 | dev->ethtool_ops = &xenvif_ethtool_ops; |
| 366 | 461 | ||
| 367 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | 462 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; |
| 368 | 463 | ||
| 369 | skb_queue_head_init(&vif->rx_queue); | ||
| 370 | skb_queue_head_init(&vif->tx_queue); | ||
| 371 | |||
| 372 | vif->pending_cons = 0; | ||
| 373 | vif->pending_prod = MAX_PENDING_REQS; | ||
| 374 | for (i = 0; i < MAX_PENDING_REQS; i++) | ||
| 375 | vif->pending_ring[i] = i; | ||
| 376 | spin_lock_init(&vif->callback_lock); | ||
| 377 | spin_lock_init(&vif->response_lock); | ||
| 378 | /* If ballooning is disabled, this will consume real memory, so you | ||
| 379 | * better enable it. The long term solution would be to use just a | ||
| 380 | * bunch of valid page descriptors, without dependency on ballooning | ||
| 381 | */ | ||
| 382 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | ||
| 383 | vif->mmap_pages, | ||
| 384 | false); | ||
| 385 | if (err) { | ||
| 386 | netdev_err(dev, "Could not reserve mmap_pages\n"); | ||
| 387 | return ERR_PTR(-ENOMEM); | ||
| 388 | } | ||
| 389 | for (i = 0; i < MAX_PENDING_REQS; i++) { | ||
| 390 | vif->pending_tx_info[i].callback_struct = (struct ubuf_info) | ||
| 391 | { .callback = xenvif_zerocopy_callback, | ||
| 392 | .ctx = NULL, | ||
| 393 | .desc = i }; | ||
| 394 | vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | ||
| 395 | } | ||
| 396 | |||
| 397 | /* | 464 | /* |
| 398 | * Initialise a dummy MAC address. We choose the numerically | 465 | * Initialise a dummy MAC address. We choose the numerically |
| 399 | * largest non-broadcast address to prevent the address getting | 466 | * largest non-broadcast address to prevent the address getting |
| @@ -403,8 +470,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 403 | memset(dev->dev_addr, 0xFF, ETH_ALEN); | 470 | memset(dev->dev_addr, 0xFF, ETH_ALEN); |
| 404 | dev->dev_addr[0] &= ~0x01; | 471 | dev->dev_addr[0] &= ~0x01; |
| 405 | 472 | ||
| 406 | netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT); | ||
| 407 | |||
| 408 | netif_carrier_off(dev); | 473 | netif_carrier_off(dev); |
| 409 | 474 | ||
| 410 | err = register_netdev(dev); | 475 | err = register_netdev(dev); |
| @@ -421,98 +486,148 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 421 | return vif; | 486 | return vif; |
| 422 | } | 487 | } |
| 423 | 488 | ||
| 424 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | 489 | int xenvif_init_queue(struct xenvif_queue *queue) |
| 490 | { | ||
| 491 | int err, i; | ||
| 492 | |||
| 493 | queue->credit_bytes = queue->remaining_credit = ~0UL; | ||
| 494 | queue->credit_usec = 0UL; | ||
| 495 | init_timer(&queue->credit_timeout); | ||
| 496 | queue->credit_window_start = get_jiffies_64(); | ||
| 497 | |||
| 498 | skb_queue_head_init(&queue->rx_queue); | ||
| 499 | skb_queue_head_init(&queue->tx_queue); | ||
| 500 | |||
| 501 | queue->pending_cons = 0; | ||
| 502 | queue->pending_prod = MAX_PENDING_REQS; | ||
| 503 | for (i = 0; i < MAX_PENDING_REQS; ++i) | ||
| 504 | queue->pending_ring[i] = i; | ||
| 505 | |||
| 506 | spin_lock_init(&queue->callback_lock); | ||
| 507 | spin_lock_init(&queue->response_lock); | ||
| 508 | |||
| 509 | /* If ballooning is disabled, this will consume real memory, so you | ||
| 510 | * better enable it. The long term solution would be to use just a | ||
| 511 | * bunch of valid page descriptors, without dependency on ballooning | ||
| 512 | */ | ||
| 513 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | ||
| 514 | queue->mmap_pages, | ||
| 515 | false); | ||
| 516 | if (err) { | ||
| 517 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | ||
| 518 | return -ENOMEM; | ||
| 519 | } | ||
| 520 | |||
| 521 | for (i = 0; i < MAX_PENDING_REQS; i++) { | ||
| 522 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info) | ||
| 523 | { .callback = xenvif_zerocopy_callback, | ||
| 524 | .ctx = NULL, | ||
| 525 | .desc = i }; | ||
| 526 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | ||
| 527 | } | ||
| 528 | |||
| 529 | init_timer(&queue->rx_stalled); | ||
| 530 | |||
| 531 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
| 532 | XENVIF_NAPI_WEIGHT); | ||
| 533 | |||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | |||
| 537 | void xenvif_carrier_on(struct xenvif *vif) | ||
| 538 | { | ||
| 539 | rtnl_lock(); | ||
| 540 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | ||
| 541 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | ||
| 542 | netdev_update_features(vif->dev); | ||
| 543 | set_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
| 544 | netif_carrier_on(vif->dev); | ||
| 545 | if (netif_running(vif->dev)) | ||
| 546 | xenvif_up(vif); | ||
| 547 | rtnl_unlock(); | ||
| 548 | } | ||
| 549 | |||
| 550 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | ||
| 425 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 551 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
| 426 | unsigned int rx_evtchn) | 552 | unsigned int rx_evtchn) |
| 427 | { | 553 | { |
| 428 | struct task_struct *task; | 554 | struct task_struct *task; |
| 429 | int err = -ENOMEM; | 555 | int err = -ENOMEM; |
| 430 | 556 | ||
| 431 | BUG_ON(vif->tx_irq); | 557 | BUG_ON(queue->tx_irq); |
| 432 | BUG_ON(vif->task); | 558 | BUG_ON(queue->task); |
| 433 | BUG_ON(vif->dealloc_task); | 559 | BUG_ON(queue->dealloc_task); |
| 434 | 560 | ||
| 435 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 561 | err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); |
| 436 | if (err < 0) | 562 | if (err < 0) |
| 437 | goto err; | 563 | goto err; |
| 438 | 564 | ||
| 439 | init_waitqueue_head(&vif->wq); | 565 | init_waitqueue_head(&queue->wq); |
| 440 | init_waitqueue_head(&vif->dealloc_wq); | 566 | init_waitqueue_head(&queue->dealloc_wq); |
| 441 | 567 | ||
| 442 | if (tx_evtchn == rx_evtchn) { | 568 | if (tx_evtchn == rx_evtchn) { |
| 443 | /* feature-split-event-channels == 0 */ | 569 | /* feature-split-event-channels == 0 */ |
| 444 | err = bind_interdomain_evtchn_to_irqhandler( | 570 | err = bind_interdomain_evtchn_to_irqhandler( |
| 445 | vif->domid, tx_evtchn, xenvif_interrupt, 0, | 571 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
| 446 | vif->dev->name, vif); | 572 | queue->name, queue); |
| 447 | if (err < 0) | 573 | if (err < 0) |
| 448 | goto err_unmap; | 574 | goto err_unmap; |
| 449 | vif->tx_irq = vif->rx_irq = err; | 575 | queue->tx_irq = queue->rx_irq = err; |
| 450 | disable_irq(vif->tx_irq); | 576 | disable_irq(queue->tx_irq); |
| 451 | } else { | 577 | } else { |
| 452 | /* feature-split-event-channels == 1 */ | 578 | /* feature-split-event-channels == 1 */ |
| 453 | snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), | 579 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
| 454 | "%s-tx", vif->dev->name); | 580 | "%s-tx", queue->name); |
| 455 | err = bind_interdomain_evtchn_to_irqhandler( | 581 | err = bind_interdomain_evtchn_to_irqhandler( |
| 456 | vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, | 582 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
| 457 | vif->tx_irq_name, vif); | 583 | queue->tx_irq_name, queue); |
| 458 | if (err < 0) | 584 | if (err < 0) |
| 459 | goto err_unmap; | 585 | goto err_unmap; |
| 460 | vif->tx_irq = err; | 586 | queue->tx_irq = err; |
| 461 | disable_irq(vif->tx_irq); | 587 | disable_irq(queue->tx_irq); |
| 462 | 588 | ||
| 463 | snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), | 589 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
| 464 | "%s-rx", vif->dev->name); | 590 | "%s-rx", queue->name); |
| 465 | err = bind_interdomain_evtchn_to_irqhandler( | 591 | err = bind_interdomain_evtchn_to_irqhandler( |
| 466 | vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, | 592 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
| 467 | vif->rx_irq_name, vif); | 593 | queue->rx_irq_name, queue); |
| 468 | if (err < 0) | 594 | if (err < 0) |
| 469 | goto err_tx_unbind; | 595 | goto err_tx_unbind; |
| 470 | vif->rx_irq = err; | 596 | queue->rx_irq = err; |
| 471 | disable_irq(vif->rx_irq); | 597 | disable_irq(queue->rx_irq); |
| 472 | } | 598 | } |
| 473 | 599 | ||
| 474 | task = kthread_create(xenvif_kthread_guest_rx, | 600 | task = kthread_create(xenvif_kthread_guest_rx, |
| 475 | (void *)vif, "%s-guest-rx", vif->dev->name); | 601 | (void *)queue, "%s-guest-rx", queue->name); |
| 476 | if (IS_ERR(task)) { | 602 | if (IS_ERR(task)) { |
| 477 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 603 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 478 | err = PTR_ERR(task); | 604 | err = PTR_ERR(task); |
| 479 | goto err_rx_unbind; | 605 | goto err_rx_unbind; |
| 480 | } | 606 | } |
| 481 | 607 | queue->task = task; | |
| 482 | vif->task = task; | ||
| 483 | 608 | ||
| 484 | task = kthread_create(xenvif_dealloc_kthread, | 609 | task = kthread_create(xenvif_dealloc_kthread, |
| 485 | (void *)vif, "%s-dealloc", vif->dev->name); | 610 | (void *)queue, "%s-dealloc", queue->name); |
| 486 | if (IS_ERR(task)) { | 611 | if (IS_ERR(task)) { |
| 487 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 612 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 488 | err = PTR_ERR(task); | 613 | err = PTR_ERR(task); |
| 489 | goto err_rx_unbind; | 614 | goto err_rx_unbind; |
| 490 | } | 615 | } |
| 616 | queue->dealloc_task = task; | ||
| 491 | 617 | ||
| 492 | vif->dealloc_task = task; | 618 | wake_up_process(queue->task); |
| 493 | 619 | wake_up_process(queue->dealloc_task); | |
| 494 | rtnl_lock(); | ||
| 495 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | ||
| 496 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | ||
| 497 | netdev_update_features(vif->dev); | ||
| 498 | netif_carrier_on(vif->dev); | ||
| 499 | if (netif_running(vif->dev)) | ||
| 500 | xenvif_up(vif); | ||
| 501 | rtnl_unlock(); | ||
| 502 | |||
| 503 | wake_up_process(vif->task); | ||
| 504 | wake_up_process(vif->dealloc_task); | ||
| 505 | 620 | ||
| 506 | return 0; | 621 | return 0; |
| 507 | 622 | ||
| 508 | err_rx_unbind: | 623 | err_rx_unbind: |
| 509 | unbind_from_irqhandler(vif->rx_irq, vif); | 624 | unbind_from_irqhandler(queue->rx_irq, queue); |
| 510 | vif->rx_irq = 0; | 625 | queue->rx_irq = 0; |
| 511 | err_tx_unbind: | 626 | err_tx_unbind: |
| 512 | unbind_from_irqhandler(vif->tx_irq, vif); | 627 | unbind_from_irqhandler(queue->tx_irq, queue); |
| 513 | vif->tx_irq = 0; | 628 | queue->tx_irq = 0; |
| 514 | err_unmap: | 629 | err_unmap: |
| 515 | xenvif_unmap_frontend_rings(vif); | 630 | xenvif_unmap_frontend_rings(queue); |
| 516 | err: | 631 | err: |
| 517 | module_put(THIS_MODULE); | 632 | module_put(THIS_MODULE); |
| 518 | return err; | 633 | return err; |
| @@ -523,85 +638,106 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
| 523 | struct net_device *dev = vif->dev; | 638 | struct net_device *dev = vif->dev; |
| 524 | 639 | ||
| 525 | rtnl_lock(); | 640 | rtnl_lock(); |
| 526 | netif_carrier_off(dev); /* discard queued packets */ | 641 | if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { |
| 527 | if (netif_running(dev)) | 642 | netif_carrier_off(dev); /* discard queued packets */ |
| 528 | xenvif_down(vif); | 643 | if (netif_running(dev)) |
| 644 | xenvif_down(vif); | ||
| 645 | } | ||
| 529 | rtnl_unlock(); | 646 | rtnl_unlock(); |
| 530 | } | 647 | } |
| 531 | 648 | ||
| 532 | void xenvif_disconnect(struct xenvif *vif) | 649 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, |
| 650 | unsigned int worst_case_skb_lifetime) | ||
| 533 | { | 651 | { |
| 534 | if (netif_carrier_ok(vif->dev)) | 652 | int i, unmap_timeout = 0; |
| 535 | xenvif_carrier_off(vif); | ||
| 536 | 653 | ||
| 537 | if (vif->task) { | 654 | for (i = 0; i < MAX_PENDING_REQS; ++i) { |
| 538 | del_timer_sync(&vif->wake_queue); | 655 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { |
| 539 | kthread_stop(vif->task); | 656 | unmap_timeout++; |
| 540 | vif->task = NULL; | 657 | schedule_timeout(msecs_to_jiffies(1000)); |
| 658 | if (unmap_timeout > worst_case_skb_lifetime && | ||
| 659 | net_ratelimit()) | ||
| 660 | netdev_err(queue->vif->dev, | ||
| 661 | "Page still granted! Index: %x\n", | ||
| 662 | i); | ||
| 663 | i = -1; | ||
| 664 | } | ||
| 541 | } | 665 | } |
| 666 | } | ||
| 542 | 667 | ||
| 543 | if (vif->dealloc_task) { | 668 | void xenvif_disconnect(struct xenvif *vif) |
| 544 | kthread_stop(vif->dealloc_task); | 669 | { |
| 545 | vif->dealloc_task = NULL; | 670 | struct xenvif_queue *queue = NULL; |
| 546 | } | 671 | unsigned int num_queues = vif->num_queues; |
| 672 | unsigned int queue_index; | ||
| 673 | |||
| 674 | xenvif_carrier_off(vif); | ||
| 675 | |||
| 676 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | ||
| 677 | queue = &vif->queues[queue_index]; | ||
| 678 | |||
| 679 | if (queue->task) { | ||
| 680 | del_timer_sync(&queue->rx_stalled); | ||
| 681 | kthread_stop(queue->task); | ||
| 682 | queue->task = NULL; | ||
| 683 | } | ||
| 547 | 684 | ||
| 548 | if (vif->tx_irq) { | 685 | if (queue->dealloc_task) { |
| 549 | if (vif->tx_irq == vif->rx_irq) | 686 | kthread_stop(queue->dealloc_task); |
| 550 | unbind_from_irqhandler(vif->tx_irq, vif); | 687 | queue->dealloc_task = NULL; |
| 551 | else { | ||
| 552 | unbind_from_irqhandler(vif->tx_irq, vif); | ||
| 553 | unbind_from_irqhandler(vif->rx_irq, vif); | ||
| 554 | } | 688 | } |
| 555 | vif->tx_irq = 0; | 689 | |
| 690 | if (queue->tx_irq) { | ||
| 691 | if (queue->tx_irq == queue->rx_irq) | ||
| 692 | unbind_from_irqhandler(queue->tx_irq, queue); | ||
| 693 | else { | ||
| 694 | unbind_from_irqhandler(queue->tx_irq, queue); | ||
| 695 | unbind_from_irqhandler(queue->rx_irq, queue); | ||
| 696 | } | ||
| 697 | queue->tx_irq = 0; | ||
| 698 | } | ||
| 699 | |||
| 700 | xenvif_unmap_frontend_rings(queue); | ||
| 556 | } | 701 | } |
| 702 | } | ||
| 557 | 703 | ||
| 558 | xenvif_unmap_frontend_rings(vif); | 704 | /* Reverse the relevant parts of xenvif_init_queue(). |
| 705 | * Used for queue teardown from xenvif_free(), and on the | ||
| 706 | * error handling paths in xenbus.c:connect(). | ||
| 707 | */ | ||
| 708 | void xenvif_deinit_queue(struct xenvif_queue *queue) | ||
| 709 | { | ||
| 710 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | ||
| 711 | netif_napi_del(&queue->napi); | ||
| 559 | } | 712 | } |
| 560 | 713 | ||
| 561 | void xenvif_free(struct xenvif *vif) | 714 | void xenvif_free(struct xenvif *vif) |
| 562 | { | 715 | { |
| 563 | int i, unmap_timeout = 0; | 716 | struct xenvif_queue *queue = NULL; |
| 717 | unsigned int num_queues = vif->num_queues; | ||
| 718 | unsigned int queue_index; | ||
| 564 | /* Here we want to avoid timeout messages if an skb can be legitimately | 719 | /* Here we want to avoid timeout messages if an skb can be legitimately |
| 565 | * stuck somewhere else. Realistically this could be an another vif's | 720 | * stuck somewhere else. Realistically this could be an another vif's |
| 566 | * internal or QDisc queue. That another vif also has this | 721 | * internal or QDisc queue. That another vif also has this |
| 567 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | 722 | * rx_drain_timeout_msecs timeout, so give it time to drain out. |
| 568 | * internal queue. After that, the QDisc queue can put in worst case | 723 | * Although if that other guest wakes up just before its timeout happens |
| 569 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | 724 | * and takes only one skb from QDisc, it can hold onto other skbs for a |
| 570 | * internal queue, so we need several rounds of such timeouts until we | 725 | * longer period. |
| 571 | * can be sure that no another vif should have skb's from us. We are | ||
| 572 | * not sending more skb's, so newly stuck packets are not interesting | ||
| 573 | * for us here. | ||
| 574 | */ | 726 | */ |
| 575 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | 727 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); |
| 576 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | ||
| 577 | 728 | ||
| 578 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | 729 | unregister_netdev(vif->dev); |
| 579 | if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
| 580 | unmap_timeout++; | ||
| 581 | schedule_timeout(msecs_to_jiffies(1000)); | ||
| 582 | if (unmap_timeout > worst_case_skb_lifetime && | ||
| 583 | net_ratelimit()) | ||
| 584 | netdev_err(vif->dev, | ||
| 585 | "Page still granted! Index: %x\n", | ||
| 586 | i); | ||
| 587 | /* If there are still unmapped pages, reset the loop to | ||
| 588 | * start checking again. We shouldn't exit here until | ||
| 589 | * dealloc thread and NAPI instance release all the | ||
| 590 | * pages. If a kernel bug causes the skbs to stall | ||
| 591 | * somewhere, the interface cannot be brought down | ||
| 592 | * properly. | ||
| 593 | */ | ||
| 594 | i = -1; | ||
| 595 | } | ||
| 596 | } | ||
| 597 | |||
| 598 | free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages); | ||
| 599 | 730 | ||
| 600 | netif_napi_del(&vif->napi); | 731 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 732 | queue = &vif->queues[queue_index]; | ||
| 733 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); | ||
| 734 | xenvif_deinit_queue(queue); | ||
| 735 | } | ||
| 601 | 736 | ||
| 602 | unregister_netdev(vif->dev); | 737 | vfree(vif->queues); |
| 738 | vif->queues = NULL; | ||
| 739 | vif->num_queues = 0; | ||
| 603 | 740 | ||
| 604 | vfree(vif->grant_copy_op); | ||
| 605 | free_netdev(vif->dev); | 741 | free_netdev(vif->dev); |
| 606 | 742 | ||
| 607 | module_put(THIS_MODULE); | 743 | module_put(THIS_MODULE); |
