aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Liu <wei.liu2@citrix.com>2014-06-04 05:30:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-04 17:48:16 -0400
commite9ce7cb6b107407e4798e8905b18ad8b642766f6 (patch)
treedd99d31fa4f2bae0e836c99a811e5de4e1202567
parenta55d9766cecf2b1b9af4fcf93b2d41b71e599c76 (diff)
xen-netback: Factor queue-specific data into queue struct
In preparation for multi-queue support in xen-netback, move the queue-specific data from struct xenvif into struct xenvif_queue, and update the rest of the code to use this. Also adds loops over queues where appropriate, even though only one is configured at this point, and uses alloc_netdev_mq() and the corresponding multi-queue netif wake/start/stop functions in preparation for multiple active queues. Finally, implements a trivial queue selection function suitable for ndo_select_queue, which simply returns 0 for a single queue and uses skb_get_hash() to compute the queue index otherwise. Signed-off-by: Andrew J. Bennieston <andrew.bennieston@citrix.com> Signed-off-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h102
-rw-r--r--drivers/net/xen-netback/interface.c502
-rw-r--r--drivers/net/xen-netback/netback.c710
-rw-r--r--drivers/net/xen-netback/xenbus.c95
4 files changed, 819 insertions, 590 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 2c283d693330..b6885cfcc7df 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
99 */ 99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN 100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101 101
102struct xenvif { 102/* Queue name is interface name with "-qNNN" appended */
103 /* Unique identifier for this interface. */ 103#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
104 domid_t domid;
105 unsigned int handle;
106 104
107 /* Is this interface disabled? True when backend discovers 105/* IRQ name is queue name with "-tx" or "-rx" appended */
108 * frontend is rogue. 106#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
107
108struct xenvif;
109
110struct xenvif_stats {
111 /* Stats fields to be updated per-queue.
112 * A subset of struct net_device_stats that contains only the
113 * fields that are updated in netback.c for each queue.
109 */ 114 */
110 bool disabled; 115 unsigned int rx_bytes;
116 unsigned int rx_packets;
117 unsigned int tx_bytes;
118 unsigned int tx_packets;
119
120 /* Additional stats used by xenvif */
121 unsigned long rx_gso_checksum_fixup;
122 unsigned long tx_zerocopy_sent;
123 unsigned long tx_zerocopy_success;
124 unsigned long tx_zerocopy_fail;
125 unsigned long tx_frag_overflow;
126};
127
128struct xenvif_queue { /* Per-queue data for xenvif */
129 unsigned int id; /* Queue ID, 0-based */
130 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
131 struct xenvif *vif; /* Parent VIF */
111 132
112 /* Use NAPI for guest TX */ 133 /* Use NAPI for guest TX */
113 struct napi_struct napi; 134 struct napi_struct napi;
114 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 135 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
115 unsigned int tx_irq; 136 unsigned int tx_irq;
116 /* Only used when feature-split-event-channels = 1 */ 137 /* Only used when feature-split-event-channels = 1 */
117 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 138 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
118 struct xen_netif_tx_back_ring tx; 139 struct xen_netif_tx_back_ring tx;
119 struct sk_buff_head tx_queue; 140 struct sk_buff_head tx_queue;
120 struct page *mmap_pages[MAX_PENDING_REQS]; 141 struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
150 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 171 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
151 unsigned int rx_irq; 172 unsigned int rx_irq;
152 /* Only used when feature-split-event-channels = 1 */ 173 /* Only used when feature-split-event-channels = 1 */
153 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 174 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
154 struct xen_netif_rx_back_ring rx; 175 struct xen_netif_rx_back_ring rx;
155 struct sk_buff_head rx_queue; 176 struct sk_buff_head rx_queue;
156 RING_IDX rx_last_skb_slots; 177 RING_IDX rx_last_skb_slots;
@@ -165,6 +186,22 @@ struct xenvif {
165 */ 186 */
166 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; 187 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
167 188
189 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
190 unsigned long credit_bytes;
191 unsigned long credit_usec;
192 unsigned long remaining_credit;
193 struct timer_list credit_timeout;
194 u64 credit_window_start;
195
196 /* Statistics */
197 struct xenvif_stats stats;
198};
199
200struct xenvif {
201 /* Unique identifier for this interface. */
202 domid_t domid;
203 unsigned int handle;
204
168 u8 fe_dev_addr[6]; 205 u8 fe_dev_addr[6];
169 206
170 /* Frontend feature information. */ 207 /* Frontend feature information. */
@@ -178,19 +215,13 @@ struct xenvif {
178 /* Internal feature information. */ 215 /* Internal feature information. */
179 u8 can_queue:1; /* can queue packets for receiver? */ 216 u8 can_queue:1; /* can queue packets for receiver? */
180 217
181 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 218 /* Is this interface disabled? True when backend discovers
182 unsigned long credit_bytes; 219 * frontend is rogue.
183 unsigned long credit_usec; 220 */
184 unsigned long remaining_credit; 221 bool disabled;
185 struct timer_list credit_timeout;
186 u64 credit_window_start;
187 222
188 /* Statistics */ 223 /* Queues */
189 unsigned long rx_gso_checksum_fixup; 224 struct xenvif_queue *queues;
190 unsigned long tx_zerocopy_sent;
191 unsigned long tx_zerocopy_success;
192 unsigned long tx_zerocopy_fail;
193 unsigned long tx_frag_overflow;
194 225
195 /* Miscellaneous private stuff. */ 226 /* Miscellaneous private stuff. */
196 struct net_device *dev; 227 struct net_device *dev;
@@ -205,7 +236,9 @@ struct xenvif *xenvif_alloc(struct device *parent,
205 domid_t domid, 236 domid_t domid,
206 unsigned int handle); 237 unsigned int handle);
207 238
208int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 239int xenvif_init_queue(struct xenvif_queue *queue);
240
241int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
209 unsigned long rx_ring_ref, unsigned int tx_evtchn, 242 unsigned long rx_ring_ref, unsigned int tx_evtchn,
210 unsigned int rx_evtchn); 243 unsigned int rx_evtchn);
211void xenvif_disconnect(struct xenvif *vif); 244void xenvif_disconnect(struct xenvif *vif);
@@ -216,44 +249,47 @@ void xenvif_xenbus_fini(void);
216 249
217int xenvif_schedulable(struct xenvif *vif); 250int xenvif_schedulable(struct xenvif *vif);
218 251
219int xenvif_must_stop_queue(struct xenvif *vif); 252int xenvif_must_stop_queue(struct xenvif_queue *queue);
253
254int xenvif_queue_stopped(struct xenvif_queue *queue);
255void xenvif_wake_queue(struct xenvif_queue *queue);
220 256
221/* (Un)Map communication rings. */ 257/* (Un)Map communication rings. */
222void xenvif_unmap_frontend_rings(struct xenvif *vif); 258void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
223int xenvif_map_frontend_rings(struct xenvif *vif, 259int xenvif_map_frontend_rings(struct xenvif_queue *queue,
224 grant_ref_t tx_ring_ref, 260 grant_ref_t tx_ring_ref,
225 grant_ref_t rx_ring_ref); 261 grant_ref_t rx_ring_ref);
226 262
227/* Check for SKBs from frontend and schedule backend processing */ 263/* Check for SKBs from frontend and schedule backend processing */
228void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); 264void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
229 265
230/* Prevent the device from generating any further traffic. */ 266/* Prevent the device from generating any further traffic. */
231void xenvif_carrier_off(struct xenvif *vif); 267void xenvif_carrier_off(struct xenvif *vif);
232 268
233int xenvif_tx_action(struct xenvif *vif, int budget); 269int xenvif_tx_action(struct xenvif_queue *queue, int budget);
234 270
235int xenvif_kthread_guest_rx(void *data); 271int xenvif_kthread_guest_rx(void *data);
236void xenvif_kick_thread(struct xenvif *vif); 272void xenvif_kick_thread(struct xenvif_queue *queue);
237 273
238int xenvif_dealloc_kthread(void *data); 274int xenvif_dealloc_kthread(void *data);
239 275
240/* Determine whether the needed number of slots (req) are available, 276/* Determine whether the needed number of slots (req) are available,
241 * and set req_event if not. 277 * and set req_event if not.
242 */ 278 */
243bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); 279bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
244 280
245void xenvif_stop_queue(struct xenvif *vif); 281void xenvif_carrier_on(struct xenvif *vif);
246 282
247/* Callback from stack when TX packet can be released */ 283/* Callback from stack when TX packet can be released */
248void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); 284void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
249 285
250/* Unmap a pending page and release it back to the guest */ 286/* Unmap a pending page and release it back to the guest */
251void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); 287void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
252 288
253static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) 289static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
254{ 290{
255 return MAX_PENDING_REQS - 291 return MAX_PENDING_REQS -
256 vif->pending_prod + vif->pending_cons; 292 queue->pending_prod + queue->pending_cons;
257} 293}
258 294
259/* Callback from stack when TX packet can be released */ 295/* Callback from stack when TX packet can be released */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8fdedac3fab2..6005b5d1d404 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,7 +34,6 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
38 37
39#include <xen/events.h> 38#include <xen/events.h>
40#include <asm/xen/hypercall.h> 39#include <asm/xen/hypercall.h>
@@ -43,6 +42,16 @@
43#define XENVIF_QUEUE_LENGTH 32 42#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 43#define XENVIF_NAPI_WEIGHT 64
45 44
45static inline void xenvif_stop_queue(struct xenvif_queue *queue)
46{
47 struct net_device *dev = queue->vif->dev;
48
49 if (!queue->vif->can_queue)
50 return;
51
52 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
53}
54
46int xenvif_schedulable(struct xenvif *vif) 55int xenvif_schedulable(struct xenvif *vif)
47{ 56{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 57 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +59,34 @@ int xenvif_schedulable(struct xenvif *vif)
50 59
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 60static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{ 61{
53 struct xenvif *vif = dev_id; 62 struct xenvif_queue *queue = dev_id;
54 63
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 64 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
56 napi_schedule(&vif->napi); 65 napi_schedule(&queue->napi);
57 66
58 return IRQ_HANDLED; 67 return IRQ_HANDLED;
59} 68}
60 69
61static int xenvif_poll(struct napi_struct *napi, int budget) 70int xenvif_poll(struct napi_struct *napi, int budget)
62{ 71{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 72 struct xenvif_queue *queue =
73 container_of(napi, struct xenvif_queue, napi);
64 int work_done; 74 int work_done;
65 75
66 /* This vif is rogue, we pretend we've there is nothing to do 76 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface 77 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later. 78 * will be turned off in thread context later.
69 */ 79 */
70 if (unlikely(vif->disabled)) { 80 if (unlikely(queue->vif->disabled)) {
71 napi_complete(napi); 81 napi_complete(napi);
72 return 0; 82 return 0;
73 } 83 }
74 84
75 work_done = xenvif_tx_action(vif, budget); 85 work_done = xenvif_tx_action(queue, budget);
76 86
77 if (work_done < budget) { 87 if (work_done < budget) {
78 napi_complete(napi); 88 napi_complete(napi);
79 xenvif_napi_schedule_or_enable_events(vif); 89 xenvif_napi_schedule_or_enable_events(queue);
80 } 90 }
81 91
82 return work_done; 92 return work_done;
@@ -84,9 +94,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
84 94
85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 95static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
86{ 96{
87 struct xenvif *vif = dev_id; 97 struct xenvif_queue *queue = dev_id;
88 98
89 xenvif_kick_thread(vif); 99 xenvif_kick_thread(queue);
90 100
91 return IRQ_HANDLED; 101 return IRQ_HANDLED;
92} 102}
@@ -99,28 +109,81 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
99 return IRQ_HANDLED; 109 return IRQ_HANDLED;
100} 110}
101 111
102static void xenvif_wake_queue(unsigned long data) 112int xenvif_queue_stopped(struct xenvif_queue *queue)
113{
114 struct net_device *dev = queue->vif->dev;
115 unsigned int id = queue->id;
116 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
117}
118
119void xenvif_wake_queue(struct xenvif_queue *queue)
120{
121 struct net_device *dev = queue->vif->dev;
122 unsigned int id = queue->id;
123 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
124}
125
126/* Callback to wake the queue and drain it on timeout */
127static void xenvif_wake_queue_callback(unsigned long data)
103{ 128{
104 struct xenvif *vif = (struct xenvif *)data; 129 struct xenvif_queue *queue = (struct xenvif_queue *)data;
130
131 if (xenvif_queue_stopped(queue)) {
132 netdev_err(queue->vif->dev, "draining TX queue\n");
133 queue->rx_queue_purge = true;
134 xenvif_kick_thread(queue);
135 xenvif_wake_queue(queue);
136 }
137}
105 138
106 if (netif_queue_stopped(vif->dev)) { 139static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
107 netdev_err(vif->dev, "draining TX queue\n"); 140 void *accel_priv, select_queue_fallback_t fallback)
108 vif->rx_queue_purge = true; 141{
109 xenvif_kick_thread(vif); 142 struct xenvif *vif = netdev_priv(dev);
110 netif_wake_queue(vif->dev); 143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
111 } 156 }
157
158 return queue_index;
112} 159}
113 160
114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115{ 162{
116 struct xenvif *vif = netdev_priv(dev); 163 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues;
166 u16 index;
117 int min_slots_needed; 167 int min_slots_needed;
118 168
119 BUG_ON(skb->dev != dev); 169 BUG_ON(skb->dev != dev);
120 170
121 /* Drop the packet if vif is not ready */ 171 /* Drop the packet if queues are not set up */
122 if (vif->task == NULL || 172 if (num_queues < 1)
123 vif->dealloc_task == NULL || 173 goto drop;
174
175 /* Obtain the queue to be used to transmit this packet */
176 index = skb_get_queue_mapping(skb);
177 if (index >= num_queues) {
178 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
179 index, vif->dev->name);
180 index %= num_queues;
181 }
182 queue = &vif->queues[index];
183
184 /* Drop the packet if queue is not ready */
185 if (queue->task == NULL ||
186 queue->dealloc_task == NULL ||
124 !xenvif_schedulable(vif)) 187 !xenvif_schedulable(vif))
125 goto drop; 188 goto drop;
126 189
@@ -139,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
139 * then turn off the queue to give the ring a chance to 202 * then turn off the queue to give the ring a chance to
140 * drain. 203 * drain.
141 */ 204 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 205 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
143 vif->wake_queue.function = xenvif_wake_queue; 206 queue->wake_queue.function = xenvif_wake_queue_callback;
144 vif->wake_queue.data = (unsigned long)vif; 207 queue->wake_queue.data = (unsigned long)queue;
145 xenvif_stop_queue(vif); 208 xenvif_stop_queue(queue);
146 mod_timer(&vif->wake_queue, 209 mod_timer(&queue->wake_queue,
147 jiffies + rx_drain_timeout_jiffies); 210 jiffies + rx_drain_timeout_jiffies);
148 } 211 }
149 212
150 skb_queue_tail(&vif->rx_queue, skb); 213 skb_queue_tail(&queue->rx_queue, skb);
151 xenvif_kick_thread(vif); 214 xenvif_kick_thread(queue);
152 215
153 return NETDEV_TX_OK; 216 return NETDEV_TX_OK;
154 217
@@ -161,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
161static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 224static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{ 225{
163 struct xenvif *vif = netdev_priv(dev); 226 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues;
229 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0;
232 unsigned long tx_packets = 0;
233 unsigned int index;
234
235 if (vif->queues == NULL)
236 goto out;
237
238 /* Aggregate tx and rx stats from each queue */
239 for (index = 0; index < num_queues; ++index) {
240 queue = &vif->queues[index];
241 rx_bytes += queue->stats.rx_bytes;
242 rx_packets += queue->stats.rx_packets;
243 tx_bytes += queue->stats.tx_bytes;
244 tx_packets += queue->stats.tx_packets;
245 }
246
247out:
248 vif->dev->stats.rx_bytes = rx_bytes;
249 vif->dev->stats.rx_packets = rx_packets;
250 vif->dev->stats.tx_bytes = tx_bytes;
251 vif->dev->stats.tx_packets = tx_packets;
252
164 return &vif->dev->stats; 253 return &vif->dev->stats;
165} 254}
166 255
167static void xenvif_up(struct xenvif *vif) 256static void xenvif_up(struct xenvif *vif)
168{ 257{
169 napi_enable(&vif->napi); 258 struct xenvif_queue *queue = NULL;
170 enable_irq(vif->tx_irq); 259 unsigned int num_queues = vif->dev->real_num_tx_queues;
171 if (vif->tx_irq != vif->rx_irq) 260 unsigned int queue_index;
172 enable_irq(vif->rx_irq); 261
173 xenvif_napi_schedule_or_enable_events(vif); 262 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
263 queue = &vif->queues[queue_index];
264 napi_enable(&queue->napi);
265 enable_irq(queue->tx_irq);
266 if (queue->tx_irq != queue->rx_irq)
267 enable_irq(queue->rx_irq);
268 xenvif_napi_schedule_or_enable_events(queue);
269 }
174} 270}
175 271
176static void xenvif_down(struct xenvif *vif) 272static void xenvif_down(struct xenvif *vif)
177{ 273{
178 napi_disable(&vif->napi); 274 struct xenvif_queue *queue = NULL;
179 disable_irq(vif->tx_irq); 275 unsigned int num_queues = vif->dev->real_num_tx_queues;
180 if (vif->tx_irq != vif->rx_irq) 276 unsigned int queue_index;
181 disable_irq(vif->rx_irq); 277
182 del_timer_sync(&vif->credit_timeout); 278 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
279 queue = &vif->queues[queue_index];
280 napi_disable(&queue->napi);
281 disable_irq(queue->tx_irq);
282 if (queue->tx_irq != queue->rx_irq)
283 disable_irq(queue->rx_irq);
284 del_timer_sync(&queue->credit_timeout);
285 }
183} 286}
184 287
185static int xenvif_open(struct net_device *dev) 288static int xenvif_open(struct net_device *dev)
@@ -187,7 +290,7 @@ static int xenvif_open(struct net_device *dev)
187 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev)) 291 if (netif_carrier_ok(dev))
189 xenvif_up(vif); 292 xenvif_up(vif);
190 netif_start_queue(dev); 293 netif_tx_start_all_queues(dev);
191 return 0; 294 return 0;
192} 295}
193 296
@@ -196,7 +299,7 @@ static int xenvif_close(struct net_device *dev)
196 struct xenvif *vif = netdev_priv(dev); 299 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev)) 300 if (netif_carrier_ok(dev))
198 xenvif_down(vif); 301 xenvif_down(vif);
199 netif_stop_queue(dev); 302 netif_tx_stop_all_queues(dev);
200 return 0; 303 return 0;
201} 304}
202 305
@@ -236,29 +339,29 @@ static const struct xenvif_stat {
236} xenvif_stats[] = { 339} xenvif_stats[] = {
237 { 340 {
238 "rx_gso_checksum_fixup", 341 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup) 342 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
240 }, 343 },
241 /* If (sent != success + fail), there are probably packets never 344 /* If (sent != success + fail), there are probably packets never
242 * freed up properly! 345 * freed up properly!
243 */ 346 */
244 { 347 {
245 "tx_zerocopy_sent", 348 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent), 349 offsetof(struct xenvif_stats, tx_zerocopy_sent),
247 }, 350 },
248 { 351 {
249 "tx_zerocopy_success", 352 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success), 353 offsetof(struct xenvif_stats, tx_zerocopy_success),
251 }, 354 },
252 { 355 {
253 "tx_zerocopy_fail", 356 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail) 357 offsetof(struct xenvif_stats, tx_zerocopy_fail)
255 }, 358 },
256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 359 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG 360 * a guest with the same MAX_SKB_FRAG
258 */ 361 */
259 { 362 {
260 "tx_frag_overflow", 363 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow) 364 offsetof(struct xenvif_stats, tx_frag_overflow)
262 }, 365 },
263}; 366};
264 367
@@ -275,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
275static void xenvif_get_ethtool_stats(struct net_device *dev, 378static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data) 379 struct ethtool_stats *stats, u64 * data)
277{ 380{
278 void *vif = netdev_priv(dev); 381 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues;
279 int i; 383 int i;
280 384 unsigned int queue_index;
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 385 struct xenvif_stats *vif_stats;
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 386
387 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
388 unsigned long accum = 0;
389 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
390 vif_stats = &vif->queues[queue_index].stats;
391 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
392 }
393 data[i] = accum;
394 }
283} 395}
284 396
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 397static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -312,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = {
312 .ndo_fix_features = xenvif_fix_features, 424 .ndo_fix_features = xenvif_fix_features,
313 .ndo_set_mac_address = eth_mac_addr, 425 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr, 426 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
315}; 428};
316 429
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -321,10 +434,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
321 struct net_device *dev; 434 struct net_device *dev;
322 struct xenvif *vif; 435 struct xenvif *vif;
323 char name[IFNAMSIZ] = {}; 436 char name[IFNAMSIZ] = {};
324 int i;
325 437
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 439 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 1);
328 if (dev == NULL) { 440 if (dev == NULL) {
329 pr_warn("Could not allocate netdev for %s\n", name); 441 pr_warn("Could not allocate netdev for %s\n", name);
330 return ERR_PTR(-ENOMEM); 442 return ERR_PTR(-ENOMEM);
@@ -339,15 +451,13 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
339 vif->can_sg = 1; 451 vif->can_sg = 1;
340 vif->ip_csum = 1; 452 vif->ip_csum = 1;
341 vif->dev = dev; 453 vif->dev = dev;
342
343 vif->disabled = false; 454 vif->disabled = false;
344 455
345 vif->credit_bytes = vif->remaining_credit = ~0UL; 456 /* Start out with no queues. The call below does not require
346 vif->credit_usec = 0UL; 457 * rtnl_lock() as it happens before register_netdev().
347 init_timer(&vif->credit_timeout); 458 */
348 vif->credit_window_start = get_jiffies_64(); 459 vif->queues = NULL;
349 460 netif_set_real_num_tx_queues(dev, 0);
350 init_timer(&vif->wake_queue);
351 461
352 dev->netdev_ops = &xenvif_netdev_ops; 462 dev->netdev_ops = &xenvif_netdev_ops;
353 dev->hw_features = NETIF_F_SG | 463 dev->hw_features = NETIF_F_SG |
@@ -358,34 +468,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
358 468
359 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 469 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
360 470
361 skb_queue_head_init(&vif->rx_queue);
362 skb_queue_head_init(&vif->tx_queue);
363
364 vif->pending_cons = 0;
365 vif->pending_prod = MAX_PENDING_REQS;
366 for (i = 0; i < MAX_PENDING_REQS; i++)
367 vif->pending_ring[i] = i;
368 spin_lock_init(&vif->callback_lock);
369 spin_lock_init(&vif->response_lock);
370 /* If ballooning is disabled, this will consume real memory, so you
371 * better enable it. The long term solution would be to use just a
372 * bunch of valid page descriptors, without dependency on ballooning
373 */
374 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
375 vif->mmap_pages,
376 false);
377 if (err) {
378 netdev_err(dev, "Could not reserve mmap_pages\n");
379 return ERR_PTR(-ENOMEM);
380 }
381 for (i = 0; i < MAX_PENDING_REQS; i++) {
382 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
383 { .callback = xenvif_zerocopy_callback,
384 .ctx = NULL,
385 .desc = i };
386 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
387 }
388
389 /* 471 /*
390 * Initialise a dummy MAC address. We choose the numerically 472 * Initialise a dummy MAC address. We choose the numerically
391 * largest non-broadcast address to prevent the address getting 473 * largest non-broadcast address to prevent the address getting
@@ -395,8 +477,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
395 memset(dev->dev_addr, 0xFF, ETH_ALEN); 477 memset(dev->dev_addr, 0xFF, ETH_ALEN);
396 dev->dev_addr[0] &= ~0x01; 478 dev->dev_addr[0] &= ~0x01;
397 479
398 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
399
400 netif_carrier_off(dev); 480 netif_carrier_off(dev);
401 481
402 err = register_netdev(dev); 482 err = register_netdev(dev);
@@ -413,98 +493,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
413 return vif; 493 return vif;
414} 494}
415 495
416int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 496int xenvif_init_queue(struct xenvif_queue *queue)
497{
498 int err, i;
499
500 queue->credit_bytes = queue->remaining_credit = ~0UL;
501 queue->credit_usec = 0UL;
502 init_timer(&queue->credit_timeout);
503 queue->credit_window_start = get_jiffies_64();
504
505 skb_queue_head_init(&queue->rx_queue);
506 skb_queue_head_init(&queue->tx_queue);
507
508 queue->pending_cons = 0;
509 queue->pending_prod = MAX_PENDING_REQS;
510 for (i = 0; i < MAX_PENDING_REQS; ++i)
511 queue->pending_ring[i] = i;
512
513 spin_lock_init(&queue->callback_lock);
514 spin_lock_init(&queue->response_lock);
515
516 /* If ballooning is disabled, this will consume real memory, so you
517 * better enable it. The long term solution would be to use just a
518 * bunch of valid page descriptors, without dependency on ballooning
519 */
520 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
521 queue->mmap_pages,
522 false);
523 if (err) {
524 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
525 return -ENOMEM;
526 }
527
528 for (i = 0; i < MAX_PENDING_REQS; i++) {
529 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
530 { .callback = xenvif_zerocopy_callback,
531 .ctx = NULL,
532 .desc = i };
533 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
534 }
535
536 init_timer(&queue->wake_queue);
537
538 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
539 XENVIF_NAPI_WEIGHT);
540
541 return 0;
542}
543
544void xenvif_carrier_on(struct xenvif *vif)
545{
546 rtnl_lock();
547 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
548 dev_set_mtu(vif->dev, ETH_DATA_LEN);
549 netdev_update_features(vif->dev);
550 netif_carrier_on(vif->dev);
551 if (netif_running(vif->dev))
552 xenvif_up(vif);
553 rtnl_unlock();
554}
555
556int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
417 unsigned long rx_ring_ref, unsigned int tx_evtchn, 557 unsigned long rx_ring_ref, unsigned int tx_evtchn,
418 unsigned int rx_evtchn) 558 unsigned int rx_evtchn)
419{ 559{
420 struct task_struct *task; 560 struct task_struct *task;
421 int err = -ENOMEM; 561 int err = -ENOMEM;
422 562
423 BUG_ON(vif->tx_irq); 563 BUG_ON(queue->tx_irq);
424 BUG_ON(vif->task); 564 BUG_ON(queue->task);
425 BUG_ON(vif->dealloc_task); 565 BUG_ON(queue->dealloc_task);
426 566
427 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 567 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
428 if (err < 0) 568 if (err < 0)
429 goto err; 569 goto err;
430 570
431 init_waitqueue_head(&vif->wq); 571 init_waitqueue_head(&queue->wq);
432 init_waitqueue_head(&vif->dealloc_wq); 572 init_waitqueue_head(&queue->dealloc_wq);
433 573
434 if (tx_evtchn == rx_evtchn) { 574 if (tx_evtchn == rx_evtchn) {
435 /* feature-split-event-channels == 0 */ 575 /* feature-split-event-channels == 0 */
436 err = bind_interdomain_evtchn_to_irqhandler( 576 err = bind_interdomain_evtchn_to_irqhandler(
437 vif->domid, tx_evtchn, xenvif_interrupt, 0, 577 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
438 vif->dev->name, vif); 578 queue->name, queue);
439 if (err < 0) 579 if (err < 0)
440 goto err_unmap; 580 goto err_unmap;
441 vif->tx_irq = vif->rx_irq = err; 581 queue->tx_irq = queue->rx_irq = err;
442 disable_irq(vif->tx_irq); 582 disable_irq(queue->tx_irq);
443 } else { 583 } else {
444 /* feature-split-event-channels == 1 */ 584 /* feature-split-event-channels == 1 */
445 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 585 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
446 "%s-tx", vif->dev->name); 586 "%s-tx", queue->name);
447 err = bind_interdomain_evtchn_to_irqhandler( 587 err = bind_interdomain_evtchn_to_irqhandler(
448 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 588 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
449 vif->tx_irq_name, vif); 589 queue->tx_irq_name, queue);
450 if (err < 0) 590 if (err < 0)
451 goto err_unmap; 591 goto err_unmap;
452 vif->tx_irq = err; 592 queue->tx_irq = err;
453 disable_irq(vif->tx_irq); 593 disable_irq(queue->tx_irq);
454 594
455 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 595 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
456 "%s-rx", vif->dev->name); 596 "%s-rx", queue->name);
457 err = bind_interdomain_evtchn_to_irqhandler( 597 err = bind_interdomain_evtchn_to_irqhandler(
458 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 598 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
459 vif->rx_irq_name, vif); 599 queue->rx_irq_name, queue);
460 if (err < 0) 600 if (err < 0)
461 goto err_tx_unbind; 601 goto err_tx_unbind;
462 vif->rx_irq = err; 602 queue->rx_irq = err;
463 disable_irq(vif->rx_irq); 603 disable_irq(queue->rx_irq);
464 } 604 }
465 605
466 task = kthread_create(xenvif_kthread_guest_rx, 606 task = kthread_create(xenvif_kthread_guest_rx,
467 (void *)vif, "%s-guest-rx", vif->dev->name); 607 (void *)queue, "%s-guest-rx", queue->name);
468 if (IS_ERR(task)) { 608 if (IS_ERR(task)) {
469 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 609 pr_warn("Could not allocate kthread for %s\n", queue->name);
470 err = PTR_ERR(task); 610 err = PTR_ERR(task);
471 goto err_rx_unbind; 611 goto err_rx_unbind;
472 } 612 }
473 613 queue->task = task;
474 vif->task = task;
475 614
476 task = kthread_create(xenvif_dealloc_kthread, 615 task = kthread_create(xenvif_dealloc_kthread,
477 (void *)vif, "%s-dealloc", vif->dev->name); 616 (void *)queue, "%s-dealloc", queue->name);
478 if (IS_ERR(task)) { 617 if (IS_ERR(task)) {
479 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 618 pr_warn("Could not allocate kthread for %s\n", queue->name);
480 err = PTR_ERR(task); 619 err = PTR_ERR(task);
481 goto err_rx_unbind; 620 goto err_rx_unbind;
482 } 621 }
622 queue->dealloc_task = task;
483 623
484 vif->dealloc_task = task; 624 wake_up_process(queue->task);
485 625 wake_up_process(queue->dealloc_task);
486 rtnl_lock();
487 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
488 dev_set_mtu(vif->dev, ETH_DATA_LEN);
489 netdev_update_features(vif->dev);
490 netif_carrier_on(vif->dev);
491 if (netif_running(vif->dev))
492 xenvif_up(vif);
493 rtnl_unlock();
494
495 wake_up_process(vif->task);
496 wake_up_process(vif->dealloc_task);
497 626
498 return 0; 627 return 0;
499 628
500err_rx_unbind: 629err_rx_unbind:
501 unbind_from_irqhandler(vif->rx_irq, vif); 630 unbind_from_irqhandler(queue->rx_irq, queue);
502 vif->rx_irq = 0; 631 queue->rx_irq = 0;
503err_tx_unbind: 632err_tx_unbind:
504 unbind_from_irqhandler(vif->tx_irq, vif); 633 unbind_from_irqhandler(queue->tx_irq, queue);
505 vif->tx_irq = 0; 634 queue->tx_irq = 0;
506err_unmap: 635err_unmap:
507 xenvif_unmap_frontend_rings(vif); 636 xenvif_unmap_frontend_rings(queue);
508err: 637err:
509 module_put(THIS_MODULE); 638 module_put(THIS_MODULE);
510 return err; 639 return err;
@@ -521,38 +650,67 @@ void xenvif_carrier_off(struct xenvif *vif)
521 rtnl_unlock(); 650 rtnl_unlock();
522} 651}
523 652
653static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
654 unsigned int worst_case_skb_lifetime)
655{
656 int i, unmap_timeout = 0;
657
658 for (i = 0; i < MAX_PENDING_REQS; ++i) {
659 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
660 unmap_timeout++;
661 schedule_timeout(msecs_to_jiffies(1000));
662 if (unmap_timeout > worst_case_skb_lifetime &&
663 net_ratelimit())
664 netdev_err(queue->vif->dev,
665 "Page still granted! Index: %x\n",
666 i);
667 i = -1;
668 }
669 }
670}
671
524void xenvif_disconnect(struct xenvif *vif) 672void xenvif_disconnect(struct xenvif *vif)
525{ 673{
674 struct xenvif_queue *queue = NULL;
675 unsigned int num_queues = vif->dev->real_num_tx_queues;
676 unsigned int queue_index;
677
526 if (netif_carrier_ok(vif->dev)) 678 if (netif_carrier_ok(vif->dev))
527 xenvif_carrier_off(vif); 679 xenvif_carrier_off(vif);
528 680
529 if (vif->task) { 681 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
530 del_timer_sync(&vif->wake_queue); 682 queue = &vif->queues[queue_index];
531 kthread_stop(vif->task);
532 vif->task = NULL;
533 }
534 683
535 if (vif->dealloc_task) { 684 if (queue->task) {
536 kthread_stop(vif->dealloc_task); 685 del_timer_sync(&queue->wake_queue);
537 vif->dealloc_task = NULL; 686 kthread_stop(queue->task);
538 } 687 queue->task = NULL;
688 }
539 689
540 if (vif->tx_irq) { 690 if (queue->dealloc_task) {
541 if (vif->tx_irq == vif->rx_irq) 691 kthread_stop(queue->dealloc_task);
542 unbind_from_irqhandler(vif->tx_irq, vif); 692 queue->dealloc_task = NULL;
543 else { 693 }
544 unbind_from_irqhandler(vif->tx_irq, vif); 694
545 unbind_from_irqhandler(vif->rx_irq, vif); 695 if (queue->tx_irq) {
696 if (queue->tx_irq == queue->rx_irq)
697 unbind_from_irqhandler(queue->tx_irq, queue);
698 else {
699 unbind_from_irqhandler(queue->tx_irq, queue);
700 unbind_from_irqhandler(queue->rx_irq, queue);
701 }
702 queue->tx_irq = 0;
546 } 703 }
547 vif->tx_irq = 0;
548 }
549 704
550 xenvif_unmap_frontend_rings(vif); 705 xenvif_unmap_frontend_rings(queue);
706 }
551} 707}
552 708
553void xenvif_free(struct xenvif *vif) 709void xenvif_free(struct xenvif *vif)
554{ 710{
555 int i, unmap_timeout = 0; 711 struct xenvif_queue *queue = NULL;
712 unsigned int num_queues = vif->dev->real_num_tx_queues;
713 unsigned int queue_index;
556 /* Here we want to avoid timeout messages if an skb can be legitimately 714 /* Here we want to avoid timeout messages if an skb can be legitimately
557 * stuck somewhere else. Realistically this could be an another vif's 715 * stuck somewhere else. Realistically this could be an another vif's
558 * internal or QDisc queue. That another vif also has this 716 * internal or QDisc queue. That another vif also has this
@@ -567,31 +725,23 @@ void xenvif_free(struct xenvif *vif)
567 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 725 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
568 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 726 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
569 727
570 for (i = 0; i < MAX_PENDING_REQS; ++i) { 728 unregister_netdev(vif->dev);
571 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
572 unmap_timeout++;
573 schedule_timeout(msecs_to_jiffies(1000));
574 if (unmap_timeout > worst_case_skb_lifetime &&
575 net_ratelimit())
576 netdev_err(vif->dev,
577 "Page still granted! Index: %x\n",
578 i);
579 /* If there are still unmapped pages, reset the loop to
580 * start checking again. We shouldn't exit here until
581 * dealloc thread and NAPI instance release all the
582 * pages. If a kernel bug causes the skbs to stall
583 * somewhere, the interface cannot be brought down
584 * properly.
585 */
586 i = -1;
587 }
588 }
589 729
590 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages); 730 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
731 queue = &vif->queues[queue_index];
591 732
592 netif_napi_del(&vif->napi); 733 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
734 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
593 735
594 unregister_netdev(vif->dev); 736 netif_napi_del(&queue->napi);
737 }
738
739 /* Free the array of queues. The call below does not require
740 * rtnl_lock() because it happens after unregister_netdev().
741 */
742 netif_set_real_num_tx_queues(vif->dev, 0);
743 vfree(vif->queues);
744 vif->queues = NULL;
595 745
596 free_netdev(vif->dev); 746 free_netdev(vif->dev);
597 747
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7367208ee8cd..a5484e8cb06e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -70,33 +70,33 @@ unsigned int rx_drain_timeout_jiffies;
70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
71module_param(fatal_skb_slots, uint, 0444); 71module_param(fatal_skb_slots, uint, 0444);
72 72
73static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 73static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
74 u8 status); 74 u8 status);
75 75
76static void make_tx_response(struct xenvif *vif, 76static void make_tx_response(struct xenvif_queue *queue,
77 struct xen_netif_tx_request *txp, 77 struct xen_netif_tx_request *txp,
78 s8 st); 78 s8 st);
79 79
80static inline int tx_work_todo(struct xenvif *vif); 80static inline int tx_work_todo(struct xenvif_queue *queue);
81static inline int rx_work_todo(struct xenvif *vif); 81static inline int rx_work_todo(struct xenvif_queue *queue);
82 82
83static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 83static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
84 u16 id, 84 u16 id,
85 s8 st, 85 s8 st,
86 u16 offset, 86 u16 offset,
87 u16 size, 87 u16 size,
88 u16 flags); 88 u16 flags);
89 89
90static inline unsigned long idx_to_pfn(struct xenvif *vif, 90static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
91 u16 idx) 91 u16 idx)
92{ 92{
93 return page_to_pfn(vif->mmap_pages[idx]); 93 return page_to_pfn(queue->mmap_pages[idx]);
94} 94}
95 95
96static inline unsigned long idx_to_kaddr(struct xenvif *vif, 96static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
97 u16 idx) 97 u16 idx)
98{ 98{
99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
100} 100}
101 101
102#define callback_param(vif, pending_idx) \ 102#define callback_param(vif, pending_idx) \
@@ -104,13 +104,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) 107static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct); 111 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx, 112 return container_of(temp - pending_idx,
113 struct xenvif, 113 struct xenvif_queue,
114 pending_tx_info[0]); 114 pending_tx_info[0]);
115} 115}
116 116
@@ -136,24 +136,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
136 return i & (MAX_PENDING_REQS-1); 136 return i & (MAX_PENDING_REQS-1);
137} 137}
138 138
139bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) 139bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
140{ 140{
141 RING_IDX prod, cons; 141 RING_IDX prod, cons;
142 142
143 do { 143 do {
144 prod = vif->rx.sring->req_prod; 144 prod = queue->rx.sring->req_prod;
145 cons = vif->rx.req_cons; 145 cons = queue->rx.req_cons;
146 146
147 if (prod - cons >= needed) 147 if (prod - cons >= needed)
148 return true; 148 return true;
149 149
150 vif->rx.sring->req_event = prod + 1; 150 queue->rx.sring->req_event = prod + 1;
151 151
152 /* Make sure event is visible before we check prod 152 /* Make sure event is visible before we check prod
153 * again. 153 * again.
154 */ 154 */
155 mb(); 155 mb();
156 } while (vif->rx.sring->req_prod != prod); 156 } while (queue->rx.sring->req_prod != prod);
157 157
158 return false; 158 return false;
159} 159}
@@ -207,13 +207,13 @@ struct netrx_pending_operations {
207 grant_ref_t copy_gref; 207 grant_ref_t copy_gref;
208}; 208};
209 209
210static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, 210static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
211 struct netrx_pending_operations *npo) 211 struct netrx_pending_operations *npo)
212{ 212{
213 struct xenvif_rx_meta *meta; 213 struct xenvif_rx_meta *meta;
214 struct xen_netif_rx_request *req; 214 struct xen_netif_rx_request *req;
215 215
216 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 216 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
217 217
218 meta = npo->meta + npo->meta_prod++; 218 meta = npo->meta + npo->meta_prod++;
219 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 219 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -231,11 +231,11 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
231 * Set up the grant operations for this fragment. If it's a flipping 231 * Set up the grant operations for this fragment. If it's a flipping
232 * interface, we also set up the unmap request from here. 232 * interface, we also set up the unmap request from here.
233 */ 233 */
234static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 234static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
235 struct netrx_pending_operations *npo, 235 struct netrx_pending_operations *npo,
236 struct page *page, unsigned long size, 236 struct page *page, unsigned long size,
237 unsigned long offset, int *head, 237 unsigned long offset, int *head,
238 struct xenvif *foreign_vif, 238 struct xenvif_queue *foreign_queue,
239 grant_ref_t foreign_gref) 239 grant_ref_t foreign_gref)
240{ 240{
241 struct gnttab_copy *copy_gop; 241 struct gnttab_copy *copy_gop;
@@ -268,7 +268,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
268 */ 268 */
269 BUG_ON(*head); 269 BUG_ON(*head);
270 270
271 meta = get_next_rx_buffer(vif, npo); 271 meta = get_next_rx_buffer(queue, npo);
272 } 272 }
273 273
274 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 274 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -278,8 +278,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
278 copy_gop->flags = GNTCOPY_dest_gref; 278 copy_gop->flags = GNTCOPY_dest_gref;
279 copy_gop->len = bytes; 279 copy_gop->len = bytes;
280 280
281 if (foreign_vif) { 281 if (foreign_queue) {
282 copy_gop->source.domid = foreign_vif->domid; 282 copy_gop->source.domid = foreign_queue->vif->domid;
283 copy_gop->source.u.ref = foreign_gref; 283 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref; 284 copy_gop->flags |= GNTCOPY_source_gref;
285 } else { 285 } else {
@@ -289,7 +289,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
289 } 289 }
290 copy_gop->source.offset = offset; 290 copy_gop->source.offset = offset;
291 291
292 copy_gop->dest.domid = vif->domid; 292 copy_gop->dest.domid = queue->vif->domid;
293 copy_gop->dest.offset = npo->copy_off; 293 copy_gop->dest.offset = npo->copy_off;
294 copy_gop->dest.u.ref = npo->copy_gref; 294 copy_gop->dest.u.ref = npo->copy_gref;
295 295
@@ -314,8 +314,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
314 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 314 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
315 } 315 }
316 316
317 if (*head && ((1 << gso_type) & vif->gso_mask)) 317 if (*head && ((1 << gso_type) & queue->vif->gso_mask))
318 vif->rx.req_cons++; 318 queue->rx.req_cons++;
319 319
320 *head = 0; /* There must be something in this buffer now. */ 320 *head = 0; /* There must be something in this buffer now. */
321 321
@@ -337,13 +337,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i, 337 const int i,
338 const struct ubuf_info *ubuf) 338 const struct ubuf_info *ubuf)
339{ 339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf); 340 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
341 341
342 do { 342 do {
343 u16 pending_idx = ubuf->desc; 343 u16 pending_idx = ubuf->desc;
344 344
345 if (skb_shinfo(skb)->frags[i].page.p == 345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx]) 346 foreign_queue->mmap_pages[pending_idx])
347 break; 347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx; 348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf); 349 } while (ubuf);
@@ -364,7 +364,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
364 * frontend-side LRO). 364 * frontend-side LRO).
365 */ 365 */
366static int xenvif_gop_skb(struct sk_buff *skb, 366static int xenvif_gop_skb(struct sk_buff *skb,
367 struct netrx_pending_operations *npo) 367 struct netrx_pending_operations *npo,
368 struct xenvif_queue *queue)
368{ 369{
369 struct xenvif *vif = netdev_priv(skb->dev); 370 struct xenvif *vif = netdev_priv(skb->dev);
370 int nr_frags = skb_shinfo(skb)->nr_frags; 371 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -390,7 +391,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
390 391
391 /* Set up a GSO prefix descriptor, if necessary */ 392 /* Set up a GSO prefix descriptor, if necessary */
392 if ((1 << gso_type) & vif->gso_prefix_mask) { 393 if ((1 << gso_type) & vif->gso_prefix_mask) {
393 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 394 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
394 meta = npo->meta + npo->meta_prod++; 395 meta = npo->meta + npo->meta_prod++;
395 meta->gso_type = gso_type; 396 meta->gso_type = gso_type;
396 meta->gso_size = skb_shinfo(skb)->gso_size; 397 meta->gso_size = skb_shinfo(skb)->gso_size;
@@ -398,7 +399,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
398 meta->id = req->id; 399 meta->id = req->id;
399 } 400 }
400 401
401 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 402 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
402 meta = npo->meta + npo->meta_prod++; 403 meta = npo->meta + npo->meta_prod++;
403 404
404 if ((1 << gso_type) & vif->gso_mask) { 405 if ((1 << gso_type) & vif->gso_mask) {
@@ -422,7 +423,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
422 if (data + len > skb_tail_pointer(skb)) 423 if (data + len > skb_tail_pointer(skb))
423 len = skb_tail_pointer(skb) - data; 424 len = skb_tail_pointer(skb) - data;
424 425
425 xenvif_gop_frag_copy(vif, skb, npo, 426 xenvif_gop_frag_copy(queue, skb, npo,
426 virt_to_page(data), len, offset, &head, 427 virt_to_page(data), len, offset, &head,
427 NULL, 428 NULL,
428 0); 429 0);
@@ -433,7 +434,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
433 /* This variable also signals whether foreign_gref has a real 434 /* This variable also signals whether foreign_gref has a real
434 * value or not. 435 * value or not.
435 */ 436 */
436 struct xenvif *foreign_vif = NULL; 437 struct xenvif_queue *foreign_queue = NULL;
437 grant_ref_t foreign_gref; 438 grant_ref_t foreign_gref;
438 439
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && 440 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
@@ -458,8 +459,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
458 if (likely(ubuf)) { 459 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc; 460 u16 pending_idx = ubuf->desc;
460 461
461 foreign_vif = ubuf_to_vif(ubuf); 462 foreign_queue = ubuf_to_queue(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; 463 foreign_gref =
464 foreign_queue->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last 465 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will 466 * element on the list, the for loop will
465 * iterate again if a local page were added to 467 * iterate again if a local page were added to
@@ -477,13 +479,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
477 */ 479 */
478 ubuf = head_ubuf; 480 ubuf = head_ubuf;
479 } 481 }
480 xenvif_gop_frag_copy(vif, skb, npo, 482 xenvif_gop_frag_copy(queue, skb, npo,
481 skb_frag_page(&skb_shinfo(skb)->frags[i]), 483 skb_frag_page(&skb_shinfo(skb)->frags[i]),
482 skb_frag_size(&skb_shinfo(skb)->frags[i]), 484 skb_frag_size(&skb_shinfo(skb)->frags[i]),
483 skb_shinfo(skb)->frags[i].page_offset, 485 skb_shinfo(skb)->frags[i].page_offset,
484 &head, 486 &head,
485 foreign_vif, 487 foreign_queue,
486 foreign_vif ? foreign_gref : UINT_MAX); 488 foreign_queue ? foreign_gref : UINT_MAX);
487 } 489 }
488 490
489 return npo->meta_prod - old_meta_prod; 491 return npo->meta_prod - old_meta_prod;
@@ -515,7 +517,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
515 return status; 517 return status;
516} 518}
517 519
518static void xenvif_add_frag_responses(struct xenvif *vif, int status, 520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
519 struct xenvif_rx_meta *meta, 521 struct xenvif_rx_meta *meta,
520 int nr_meta_slots) 522 int nr_meta_slots)
521{ 523{
@@ -536,7 +538,7 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
536 flags = XEN_NETRXF_more_data; 538 flags = XEN_NETRXF_more_data;
537 539
538 offset = 0; 540 offset = 0;
539 make_rx_response(vif, meta[i].id, status, offset, 541 make_rx_response(queue, meta[i].id, status, offset,
540 meta[i].size, flags); 542 meta[i].size, flags);
541 } 543 }
542} 544}
@@ -547,12 +549,12 @@ struct xenvif_rx_cb {
547 549
548#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) 550#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
549 551
550void xenvif_kick_thread(struct xenvif *vif) 552void xenvif_kick_thread(struct xenvif_queue *queue)
551{ 553{
552 wake_up(&vif->wq); 554 wake_up(&queue->wq);
553} 555}
554 556
555static void xenvif_rx_action(struct xenvif *vif) 557static void xenvif_rx_action(struct xenvif_queue *queue)
556{ 558{
557 s8 status; 559 s8 status;
558 u16 flags; 560 u16 flags;
@@ -565,13 +567,13 @@ static void xenvif_rx_action(struct xenvif *vif)
565 bool need_to_notify = false; 567 bool need_to_notify = false;
566 568
567 struct netrx_pending_operations npo = { 569 struct netrx_pending_operations npo = {
568 .copy = vif->grant_copy_op, 570 .copy = queue->grant_copy_op,
569 .meta = vif->meta, 571 .meta = queue->meta,
570 }; 572 };
571 573
572 skb_queue_head_init(&rxq); 574 skb_queue_head_init(&rxq);
573 575
574 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 576 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
575 RING_IDX max_slots_needed; 577 RING_IDX max_slots_needed;
576 RING_IDX old_req_cons; 578 RING_IDX old_req_cons;
577 RING_IDX ring_slots_used; 579 RING_IDX ring_slots_used;
@@ -614,42 +616,42 @@ static void xenvif_rx_action(struct xenvif *vif)
614 max_slots_needed++; 616 max_slots_needed++;
615 617
616 /* If the skb may not fit then bail out now */ 618 /* If the skb may not fit then bail out now */
617 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 619 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
618 skb_queue_head(&vif->rx_queue, skb); 620 skb_queue_head(&queue->rx_queue, skb);
619 need_to_notify = true; 621 need_to_notify = true;
620 vif->rx_last_skb_slots = max_slots_needed; 622 queue->rx_last_skb_slots = max_slots_needed;
621 break; 623 break;
622 } else 624 } else
623 vif->rx_last_skb_slots = 0; 625 queue->rx_last_skb_slots = 0;
624 626
625 old_req_cons = vif->rx.req_cons; 627 old_req_cons = queue->rx.req_cons;
626 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); 628 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
627 ring_slots_used = vif->rx.req_cons - old_req_cons; 629 ring_slots_used = queue->rx.req_cons - old_req_cons;
628 630
629 BUG_ON(ring_slots_used > max_slots_needed); 631 BUG_ON(ring_slots_used > max_slots_needed);
630 632
631 __skb_queue_tail(&rxq, skb); 633 __skb_queue_tail(&rxq, skb);
632 } 634 }
633 635
634 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 636 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
635 637
636 if (!npo.copy_prod) 638 if (!npo.copy_prod)
637 goto done; 639 goto done;
638 640
639 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); 641 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
640 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 642 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
641 643
642 while ((skb = __skb_dequeue(&rxq)) != NULL) { 644 while ((skb = __skb_dequeue(&rxq)) != NULL) {
643 645
644 if ((1 << vif->meta[npo.meta_cons].gso_type) & 646 if ((1 << queue->meta[npo.meta_cons].gso_type) &
645 vif->gso_prefix_mask) { 647 queue->vif->gso_prefix_mask) {
646 resp = RING_GET_RESPONSE(&vif->rx, 648 resp = RING_GET_RESPONSE(&queue->rx,
647 vif->rx.rsp_prod_pvt++); 649 queue->rx.rsp_prod_pvt++);
648 650
649 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; 651 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
650 652
651 resp->offset = vif->meta[npo.meta_cons].gso_size; 653 resp->offset = queue->meta[npo.meta_cons].gso_size;
652 resp->id = vif->meta[npo.meta_cons].id; 654 resp->id = queue->meta[npo.meta_cons].id;
653 resp->status = XENVIF_RX_CB(skb)->meta_slots_used; 655 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
654 656
655 npo.meta_cons++; 657 npo.meta_cons++;
@@ -657,10 +659,10 @@ static void xenvif_rx_action(struct xenvif *vif)
657 } 659 }
658 660
659 661
660 vif->dev->stats.tx_bytes += skb->len; 662 queue->stats.tx_bytes += skb->len;
661 vif->dev->stats.tx_packets++; 663 queue->stats.tx_packets++;
662 664
663 status = xenvif_check_gop(vif, 665 status = xenvif_check_gop(queue->vif,
664 XENVIF_RX_CB(skb)->meta_slots_used, 666 XENVIF_RX_CB(skb)->meta_slots_used,
665 &npo); 667 &npo);
666 668
@@ -676,22 +678,22 @@ static void xenvif_rx_action(struct xenvif *vif)
676 flags |= XEN_NETRXF_data_validated; 678 flags |= XEN_NETRXF_data_validated;
677 679
678 offset = 0; 680 offset = 0;
679 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, 681 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
680 status, offset, 682 status, offset,
681 vif->meta[npo.meta_cons].size, 683 queue->meta[npo.meta_cons].size,
682 flags); 684 flags);
683 685
684 if ((1 << vif->meta[npo.meta_cons].gso_type) & 686 if ((1 << queue->meta[npo.meta_cons].gso_type) &
685 vif->gso_mask) { 687 queue->vif->gso_mask) {
686 struct xen_netif_extra_info *gso = 688 struct xen_netif_extra_info *gso =
687 (struct xen_netif_extra_info *) 689 (struct xen_netif_extra_info *)
688 RING_GET_RESPONSE(&vif->rx, 690 RING_GET_RESPONSE(&queue->rx,
689 vif->rx.rsp_prod_pvt++); 691 queue->rx.rsp_prod_pvt++);
690 692
691 resp->flags |= XEN_NETRXF_extra_info; 693 resp->flags |= XEN_NETRXF_extra_info;
692 694
693 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; 695 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
694 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 696 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
695 gso->u.gso.pad = 0; 697 gso->u.gso.pad = 0;
696 gso->u.gso.features = 0; 698 gso->u.gso.features = 0;
697 699
@@ -699,11 +701,11 @@ static void xenvif_rx_action(struct xenvif *vif)
699 gso->flags = 0; 701 gso->flags = 0;
700 } 702 }
701 703
702 xenvif_add_frag_responses(vif, status, 704 xenvif_add_frag_responses(queue, status,
703 vif->meta + npo.meta_cons + 1, 705 queue->meta + npo.meta_cons + 1,
704 XENVIF_RX_CB(skb)->meta_slots_used); 706 XENVIF_RX_CB(skb)->meta_slots_used);
705 707
706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 708 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
707 709
708 need_to_notify |= !!ret; 710 need_to_notify |= !!ret;
709 711
@@ -713,20 +715,20 @@ static void xenvif_rx_action(struct xenvif *vif)
713 715
714done: 716done:
715 if (need_to_notify) 717 if (need_to_notify)
716 notify_remote_via_irq(vif->rx_irq); 718 notify_remote_via_irq(queue->rx_irq);
717} 719}
718 720
719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) 721void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
720{ 722{
721 int more_to_do; 723 int more_to_do;
722 724
723 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); 725 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
724 726
725 if (more_to_do) 727 if (more_to_do)
726 napi_schedule(&vif->napi); 728 napi_schedule(&queue->napi);
727} 729}
728 730
729static void tx_add_credit(struct xenvif *vif) 731static void tx_add_credit(struct xenvif_queue *queue)
730{ 732{
731 unsigned long max_burst, max_credit; 733 unsigned long max_burst, max_credit;
732 734
@@ -734,55 +736,57 @@ static void tx_add_credit(struct xenvif *vif)
734 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 736 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
735 * Otherwise the interface can seize up due to insufficient credit. 737 * Otherwise the interface can seize up due to insufficient credit.
736 */ 738 */
737 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; 739 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
738 max_burst = min(max_burst, 131072UL); 740 max_burst = min(max_burst, 131072UL);
739 max_burst = max(max_burst, vif->credit_bytes); 741 max_burst = max(max_burst, queue->credit_bytes);
740 742
741 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 743 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
742 max_credit = vif->remaining_credit + vif->credit_bytes; 744 max_credit = queue->remaining_credit + queue->credit_bytes;
743 if (max_credit < vif->remaining_credit) 745 if (max_credit < queue->remaining_credit)
744 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 746 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
745 747
746 vif->remaining_credit = min(max_credit, max_burst); 748 queue->remaining_credit = min(max_credit, max_burst);
747} 749}
748 750
749static void tx_credit_callback(unsigned long data) 751static void tx_credit_callback(unsigned long data)
750{ 752{
751 struct xenvif *vif = (struct xenvif *)data; 753 struct xenvif_queue *queue = (struct xenvif_queue *)data;
752 tx_add_credit(vif); 754 tx_add_credit(queue);
753 xenvif_napi_schedule_or_enable_events(vif); 755 xenvif_napi_schedule_or_enable_events(queue);
754} 756}
755 757
756static void xenvif_tx_err(struct xenvif *vif, 758static void xenvif_tx_err(struct xenvif_queue *queue,
757 struct xen_netif_tx_request *txp, RING_IDX end) 759 struct xen_netif_tx_request *txp, RING_IDX end)
758{ 760{
759 RING_IDX cons = vif->tx.req_cons; 761 RING_IDX cons = queue->tx.req_cons;
760 unsigned long flags; 762 unsigned long flags;
761 763
762 do { 764 do {
763 spin_lock_irqsave(&vif->response_lock, flags); 765 spin_lock_irqsave(&queue->response_lock, flags);
764 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 766 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
765 spin_unlock_irqrestore(&vif->response_lock, flags); 767 spin_unlock_irqrestore(&queue->response_lock, flags);
766 if (cons == end) 768 if (cons == end)
767 break; 769 break;
768 txp = RING_GET_REQUEST(&vif->tx, cons++); 770 txp = RING_GET_REQUEST(&queue->tx, cons++);
769 } while (1); 771 } while (1);
770 vif->tx.req_cons = cons; 772 queue->tx.req_cons = cons;
771} 773}
772 774
773static void xenvif_fatal_tx_err(struct xenvif *vif) 775static void xenvif_fatal_tx_err(struct xenvif *vif)
774{ 776{
775 netdev_err(vif->dev, "fatal error; disabling device\n"); 777 netdev_err(vif->dev, "fatal error; disabling device\n");
776 vif->disabled = true; 778 vif->disabled = true;
777 xenvif_kick_thread(vif); 779 /* Disable the vif from queue 0's kthread */
780 if (vif->queues)
781 xenvif_kick_thread(&vif->queues[0]);
778} 782}
779 783
780static int xenvif_count_requests(struct xenvif *vif, 784static int xenvif_count_requests(struct xenvif_queue *queue,
781 struct xen_netif_tx_request *first, 785 struct xen_netif_tx_request *first,
782 struct xen_netif_tx_request *txp, 786 struct xen_netif_tx_request *txp,
783 int work_to_do) 787 int work_to_do)
784{ 788{
785 RING_IDX cons = vif->tx.req_cons; 789 RING_IDX cons = queue->tx.req_cons;
786 int slots = 0; 790 int slots = 0;
787 int drop_err = 0; 791 int drop_err = 0;
788 int more_data; 792 int more_data;
@@ -794,10 +798,10 @@ static int xenvif_count_requests(struct xenvif *vif,
794 struct xen_netif_tx_request dropped_tx = { 0 }; 798 struct xen_netif_tx_request dropped_tx = { 0 };
795 799
796 if (slots >= work_to_do) { 800 if (slots >= work_to_do) {
797 netdev_err(vif->dev, 801 netdev_err(queue->vif->dev,
798 "Asked for %d slots but exceeds this limit\n", 802 "Asked for %d slots but exceeds this limit\n",
799 work_to_do); 803 work_to_do);
800 xenvif_fatal_tx_err(vif); 804 xenvif_fatal_tx_err(queue->vif);
801 return -ENODATA; 805 return -ENODATA;
802 } 806 }
803 807
@@ -805,10 +809,10 @@ static int xenvif_count_requests(struct xenvif *vif,
805 * considered malicious. 809 * considered malicious.
806 */ 810 */
807 if (unlikely(slots >= fatal_skb_slots)) { 811 if (unlikely(slots >= fatal_skb_slots)) {
808 netdev_err(vif->dev, 812 netdev_err(queue->vif->dev,
809 "Malicious frontend using %d slots, threshold %u\n", 813 "Malicious frontend using %d slots, threshold %u\n",
810 slots, fatal_skb_slots); 814 slots, fatal_skb_slots);
811 xenvif_fatal_tx_err(vif); 815 xenvif_fatal_tx_err(queue->vif);
812 return -E2BIG; 816 return -E2BIG;
813 } 817 }
814 818
@@ -821,7 +825,7 @@ static int xenvif_count_requests(struct xenvif *vif,
821 */ 825 */
822 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { 826 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
823 if (net_ratelimit()) 827 if (net_ratelimit())
824 netdev_dbg(vif->dev, 828 netdev_dbg(queue->vif->dev,
825 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 829 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
826 slots, XEN_NETBK_LEGACY_SLOTS_MAX); 830 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
827 drop_err = -E2BIG; 831 drop_err = -E2BIG;
@@ -830,7 +834,7 @@ static int xenvif_count_requests(struct xenvif *vif,
830 if (drop_err) 834 if (drop_err)
831 txp = &dropped_tx; 835 txp = &dropped_tx;
832 836
833 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), 837 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
834 sizeof(*txp)); 838 sizeof(*txp));
835 839
836 /* If the guest submitted a frame >= 64 KiB then 840 /* If the guest submitted a frame >= 64 KiB then
@@ -844,7 +848,7 @@ static int xenvif_count_requests(struct xenvif *vif,
844 */ 848 */
845 if (!drop_err && txp->size > first->size) { 849 if (!drop_err && txp->size > first->size) {
846 if (net_ratelimit()) 850 if (net_ratelimit())
847 netdev_dbg(vif->dev, 851 netdev_dbg(queue->vif->dev,
848 "Invalid tx request, slot size %u > remaining size %u\n", 852 "Invalid tx request, slot size %u > remaining size %u\n",
849 txp->size, first->size); 853 txp->size, first->size);
850 drop_err = -EIO; 854 drop_err = -EIO;
@@ -854,9 +858,9 @@ static int xenvif_count_requests(struct xenvif *vif,
854 slots++; 858 slots++;
855 859
856 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 860 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
857 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 861 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
858 txp->offset, txp->size); 862 txp->offset, txp->size);
859 xenvif_fatal_tx_err(vif); 863 xenvif_fatal_tx_err(queue->vif);
860 return -EINVAL; 864 return -EINVAL;
861 } 865 }
862 866
@@ -868,7 +872,7 @@ static int xenvif_count_requests(struct xenvif *vif,
868 } while (more_data); 872 } while (more_data);
869 873
870 if (drop_err) { 874 if (drop_err) {
871 xenvif_tx_err(vif, first, cons + slots); 875 xenvif_tx_err(queue, first, cons + slots);
872 return drop_err; 876 return drop_err;
873 } 877 }
874 878
@@ -882,17 +886,17 @@ struct xenvif_tx_cb {
882 886
883#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 887#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
884 888
885static inline void xenvif_tx_create_map_op(struct xenvif *vif, 889static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
886 u16 pending_idx, 890 u16 pending_idx,
887 struct xen_netif_tx_request *txp, 891 struct xen_netif_tx_request *txp,
888 struct gnttab_map_grant_ref *mop) 892 struct gnttab_map_grant_ref *mop)
889{ 893{
890 vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx]; 894 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
891 gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx), 895 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
892 GNTMAP_host_map | GNTMAP_readonly, 896 GNTMAP_host_map | GNTMAP_readonly,
893 txp->gref, vif->domid); 897 txp->gref, queue->vif->domid);
894 898
895 memcpy(&vif->pending_tx_info[pending_idx].req, txp, 899 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
896 sizeof(*txp)); 900 sizeof(*txp));
897} 901}
898 902
@@ -913,7 +917,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
913 return skb; 917 return skb;
914} 918}
915 919
916static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif, 920static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
917 struct sk_buff *skb, 921 struct sk_buff *skb,
918 struct xen_netif_tx_request *txp, 922 struct xen_netif_tx_request *txp,
919 struct gnttab_map_grant_ref *gop) 923 struct gnttab_map_grant_ref *gop)
@@ -940,9 +944,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
940 944
941 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; 945 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
942 shinfo->nr_frags++, txp++, gop++) { 946 shinfo->nr_frags++, txp++, gop++) {
943 index = pending_index(vif->pending_cons++); 947 index = pending_index(queue->pending_cons++);
944 pending_idx = vif->pending_ring[index]; 948 pending_idx = queue->pending_ring[index];
945 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 949 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
946 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 950 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
947 } 951 }
948 952
@@ -950,7 +954,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
950 struct sk_buff *nskb = xenvif_alloc_skb(0); 954 struct sk_buff *nskb = xenvif_alloc_skb(0);
951 if (unlikely(nskb == NULL)) { 955 if (unlikely(nskb == NULL)) {
952 if (net_ratelimit()) 956 if (net_ratelimit())
953 netdev_err(vif->dev, 957 netdev_err(queue->vif->dev,
954 "Can't allocate the frag_list skb.\n"); 958 "Can't allocate the frag_list skb.\n");
955 return NULL; 959 return NULL;
956 } 960 }
@@ -960,9 +964,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
960 964
961 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; 965 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
962 shinfo->nr_frags++, txp++, gop++) { 966 shinfo->nr_frags++, txp++, gop++) {
963 index = pending_index(vif->pending_cons++); 967 index = pending_index(queue->pending_cons++);
964 pending_idx = vif->pending_ring[index]; 968 pending_idx = queue->pending_ring[index];
965 xenvif_tx_create_map_op(vif, pending_idx, txp, gop); 969 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
966 frag_set_pending_idx(&frags[shinfo->nr_frags], 970 frag_set_pending_idx(&frags[shinfo->nr_frags],
967 pending_idx); 971 pending_idx);
968 } 972 }
@@ -973,34 +977,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
973 return gop; 977 return gop;
974} 978}
975 979
976static inline void xenvif_grant_handle_set(struct xenvif *vif, 980static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
977 u16 pending_idx, 981 u16 pending_idx,
978 grant_handle_t handle) 982 grant_handle_t handle)
979{ 983{
980 if (unlikely(vif->grant_tx_handle[pending_idx] != 984 if (unlikely(queue->grant_tx_handle[pending_idx] !=
981 NETBACK_INVALID_HANDLE)) { 985 NETBACK_INVALID_HANDLE)) {
982 netdev_err(vif->dev, 986 netdev_err(queue->vif->dev,
983 "Trying to overwrite active handle! pending_idx: %x\n", 987 "Trying to overwrite active handle! pending_idx: %x\n",
984 pending_idx); 988 pending_idx);
985 BUG(); 989 BUG();
986 } 990 }
987 vif->grant_tx_handle[pending_idx] = handle; 991 queue->grant_tx_handle[pending_idx] = handle;
988} 992}
989 993
990static inline void xenvif_grant_handle_reset(struct xenvif *vif, 994static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
991 u16 pending_idx) 995 u16 pending_idx)
992{ 996{
993 if (unlikely(vif->grant_tx_handle[pending_idx] == 997 if (unlikely(queue->grant_tx_handle[pending_idx] ==
994 NETBACK_INVALID_HANDLE)) { 998 NETBACK_INVALID_HANDLE)) {
995 netdev_err(vif->dev, 999 netdev_err(queue->vif->dev,
996 "Trying to unmap invalid handle! pending_idx: %x\n", 1000 "Trying to unmap invalid handle! pending_idx: %x\n",
997 pending_idx); 1001 pending_idx);
998 BUG(); 1002 BUG();
999 } 1003 }
1000 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; 1004 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1001} 1005}
1002 1006
1003static int xenvif_tx_check_gop(struct xenvif *vif, 1007static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1004 struct sk_buff *skb, 1008 struct sk_buff *skb,
1005 struct gnttab_map_grant_ref **gopp_map, 1009 struct gnttab_map_grant_ref **gopp_map,
1006 struct gnttab_copy **gopp_copy) 1010 struct gnttab_copy **gopp_copy)
@@ -1017,12 +1021,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
1017 (*gopp_copy)++; 1021 (*gopp_copy)++;
1018 if (unlikely(err)) { 1022 if (unlikely(err)) {
1019 if (net_ratelimit()) 1023 if (net_ratelimit())
1020 netdev_dbg(vif->dev, 1024 netdev_dbg(queue->vif->dev,
1021 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", 1025 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1022 (*gopp_copy)->status, 1026 (*gopp_copy)->status,
1023 pending_idx, 1027 pending_idx,
1024 (*gopp_copy)->source.u.ref); 1028 (*gopp_copy)->source.u.ref);
1025 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1029 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1026 } 1030 }
1027 1031
1028check_frags: 1032check_frags:
@@ -1035,24 +1039,24 @@ check_frags:
1035 newerr = gop_map->status; 1039 newerr = gop_map->status;
1036 1040
1037 if (likely(!newerr)) { 1041 if (likely(!newerr)) {
1038 xenvif_grant_handle_set(vif, 1042 xenvif_grant_handle_set(queue,
1039 pending_idx, 1043 pending_idx,
1040 gop_map->handle); 1044 gop_map->handle);
1041 /* Had a previous error? Invalidate this fragment. */ 1045 /* Had a previous error? Invalidate this fragment. */
1042 if (unlikely(err)) 1046 if (unlikely(err))
1043 xenvif_idx_unmap(vif, pending_idx); 1047 xenvif_idx_unmap(queue, pending_idx);
1044 continue; 1048 continue;
1045 } 1049 }
1046 1050
1047 /* Error on this fragment: respond to client with an error. */ 1051 /* Error on this fragment: respond to client with an error. */
1048 if (net_ratelimit()) 1052 if (net_ratelimit())
1049 netdev_dbg(vif->dev, 1053 netdev_dbg(queue->vif->dev,
1050 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", 1054 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1051 i, 1055 i,
1052 gop_map->status, 1056 gop_map->status,
1053 pending_idx, 1057 pending_idx,
1054 gop_map->ref); 1058 gop_map->ref);
1055 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 1059 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1056 1060
1057 /* Not the first error? Preceding frags already invalidated. */ 1061 /* Not the first error? Preceding frags already invalidated. */
1058 if (err) 1062 if (err)
@@ -1060,7 +1064,7 @@ check_frags:
1060 /* First error: invalidate preceding fragments. */ 1064 /* First error: invalidate preceding fragments. */
1061 for (j = 0; j < i; j++) { 1065 for (j = 0; j < i; j++) {
1062 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1066 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1063 xenvif_idx_unmap(vif, pending_idx); 1067 xenvif_idx_unmap(queue, pending_idx);
1064 } 1068 }
1065 1069
1066 /* Remember the error: invalidate all subsequent fragments. */ 1070 /* Remember the error: invalidate all subsequent fragments. */
@@ -1084,7 +1088,7 @@ check_frags:
1084 shinfo = skb_shinfo(first_skb); 1088 shinfo = skb_shinfo(first_skb);
1085 for (j = 0; j < shinfo->nr_frags; j++) { 1089 for (j = 0; j < shinfo->nr_frags; j++) {
1086 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1090 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1087 xenvif_idx_unmap(vif, pending_idx); 1091 xenvif_idx_unmap(queue, pending_idx);
1088 } 1092 }
1089 } 1093 }
1090 1094
@@ -1092,7 +1096,7 @@ check_frags:
1092 return err; 1096 return err;
1093} 1097}
1094 1098
1095static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) 1099static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1096{ 1100{
1097 struct skb_shared_info *shinfo = skb_shinfo(skb); 1101 struct skb_shared_info *shinfo = skb_shinfo(skb);
1098 int nr_frags = shinfo->nr_frags; 1102 int nr_frags = shinfo->nr_frags;
@@ -1110,23 +1114,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1110 /* If this is not the first frag, chain it to the previous*/ 1114 /* If this is not the first frag, chain it to the previous*/
1111 if (prev_pending_idx == INVALID_PENDING_IDX) 1115 if (prev_pending_idx == INVALID_PENDING_IDX)
1112 skb_shinfo(skb)->destructor_arg = 1116 skb_shinfo(skb)->destructor_arg =
1113 &callback_param(vif, pending_idx); 1117 &callback_param(queue, pending_idx);
1114 else 1118 else
1115 callback_param(vif, prev_pending_idx).ctx = 1119 callback_param(queue, prev_pending_idx).ctx =
1116 &callback_param(vif, pending_idx); 1120 &callback_param(queue, pending_idx);
1117 1121
1118 callback_param(vif, pending_idx).ctx = NULL; 1122 callback_param(queue, pending_idx).ctx = NULL;
1119 prev_pending_idx = pending_idx; 1123 prev_pending_idx = pending_idx;
1120 1124
1121 txp = &vif->pending_tx_info[pending_idx].req; 1125 txp = &queue->pending_tx_info[pending_idx].req;
1122 page = virt_to_page(idx_to_kaddr(vif, pending_idx)); 1126 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1123 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1127 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1124 skb->len += txp->size; 1128 skb->len += txp->size;
1125 skb->data_len += txp->size; 1129 skb->data_len += txp->size;
1126 skb->truesize += txp->size; 1130 skb->truesize += txp->size;
1127 1131
1128 /* Take an extra reference to offset network stack's put_page */ 1132 /* Take an extra reference to offset network stack's put_page */
1129 get_page(vif->mmap_pages[pending_idx]); 1133 get_page(queue->mmap_pages[pending_idx]);
1130 } 1134 }
1131 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc 1135 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1132 * overlaps with "index", and "mapping" is not set. I think mapping 1136 * overlaps with "index", and "mapping" is not set. I think mapping
@@ -1136,33 +1140,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1136 skb->pfmemalloc = false; 1140 skb->pfmemalloc = false;
1137} 1141}
1138 1142
1139static int xenvif_get_extras(struct xenvif *vif, 1143static int xenvif_get_extras(struct xenvif_queue *queue,
1140 struct xen_netif_extra_info *extras, 1144 struct xen_netif_extra_info *extras,
1141 int work_to_do) 1145 int work_to_do)
1142{ 1146{
1143 struct xen_netif_extra_info extra; 1147 struct xen_netif_extra_info extra;
1144 RING_IDX cons = vif->tx.req_cons; 1148 RING_IDX cons = queue->tx.req_cons;
1145 1149
1146 do { 1150 do {
1147 if (unlikely(work_to_do-- <= 0)) { 1151 if (unlikely(work_to_do-- <= 0)) {
1148 netdev_err(vif->dev, "Missing extra info\n"); 1152 netdev_err(queue->vif->dev, "Missing extra info\n");
1149 xenvif_fatal_tx_err(vif); 1153 xenvif_fatal_tx_err(queue->vif);
1150 return -EBADR; 1154 return -EBADR;
1151 } 1155 }
1152 1156
1153 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), 1157 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
1154 sizeof(extra)); 1158 sizeof(extra));
1155 if (unlikely(!extra.type || 1159 if (unlikely(!extra.type ||
1156 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1160 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1157 vif->tx.req_cons = ++cons; 1161 queue->tx.req_cons = ++cons;
1158 netdev_err(vif->dev, 1162 netdev_err(queue->vif->dev,
1159 "Invalid extra type: %d\n", extra.type); 1163 "Invalid extra type: %d\n", extra.type);
1160 xenvif_fatal_tx_err(vif); 1164 xenvif_fatal_tx_err(queue->vif);
1161 return -EINVAL; 1165 return -EINVAL;
1162 } 1166 }
1163 1167
1164 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 1168 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1165 vif->tx.req_cons = ++cons; 1169 queue->tx.req_cons = ++cons;
1166 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 1170 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1167 1171
1168 return work_to_do; 1172 return work_to_do;
@@ -1197,7 +1201,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1197 return 0; 1201 return 0;
1198} 1202}
1199 1203
1200static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1204static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1201{ 1205{
1202 bool recalculate_partial_csum = false; 1206 bool recalculate_partial_csum = false;
1203 1207
@@ -1207,7 +1211,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1207 * recalculate the partial checksum. 1211 * recalculate the partial checksum.
1208 */ 1212 */
1209 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1213 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1210 vif->rx_gso_checksum_fixup++; 1214 queue->stats.rx_gso_checksum_fixup++;
1211 skb->ip_summed = CHECKSUM_PARTIAL; 1215 skb->ip_summed = CHECKSUM_PARTIAL;
1212 recalculate_partial_csum = true; 1216 recalculate_partial_csum = true;
1213 } 1217 }
@@ -1219,31 +1223,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1219 return skb_checksum_setup(skb, recalculate_partial_csum); 1223 return skb_checksum_setup(skb, recalculate_partial_csum);
1220} 1224}
1221 1225
1222static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1226static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1223{ 1227{
1224 u64 now = get_jiffies_64(); 1228 u64 now = get_jiffies_64();
1225 u64 next_credit = vif->credit_window_start + 1229 u64 next_credit = queue->credit_window_start +
1226 msecs_to_jiffies(vif->credit_usec / 1000); 1230 msecs_to_jiffies(queue->credit_usec / 1000);
1227 1231
1228 /* Timer could already be pending in rare cases. */ 1232 /* Timer could already be pending in rare cases. */
1229 if (timer_pending(&vif->credit_timeout)) 1233 if (timer_pending(&queue->credit_timeout))
1230 return true; 1234 return true;
1231 1235
1232 /* Passed the point where we can replenish credit? */ 1236 /* Passed the point where we can replenish credit? */
1233 if (time_after_eq64(now, next_credit)) { 1237 if (time_after_eq64(now, next_credit)) {
1234 vif->credit_window_start = now; 1238 queue->credit_window_start = now;
1235 tx_add_credit(vif); 1239 tx_add_credit(queue);
1236 } 1240 }
1237 1241
1238 /* Still too big to send right now? Set a callback. */ 1242 /* Still too big to send right now? Set a callback. */
1239 if (size > vif->remaining_credit) { 1243 if (size > queue->remaining_credit) {
1240 vif->credit_timeout.data = 1244 queue->credit_timeout.data =
1241 (unsigned long)vif; 1245 (unsigned long)queue;
1242 vif->credit_timeout.function = 1246 queue->credit_timeout.function =
1243 tx_credit_callback; 1247 tx_credit_callback;
1244 mod_timer(&vif->credit_timeout, 1248 mod_timer(&queue->credit_timeout,
1245 next_credit); 1249 next_credit);
1246 vif->credit_window_start = next_credit; 1250 queue->credit_window_start = next_credit;
1247 1251
1248 return true; 1252 return true;
1249 } 1253 }
@@ -1251,16 +1255,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1251 return false; 1255 return false;
1252} 1256}
1253 1257
1254static void xenvif_tx_build_gops(struct xenvif *vif, 1258static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1255 int budget, 1259 int budget,
1256 unsigned *copy_ops, 1260 unsigned *copy_ops,
1257 unsigned *map_ops) 1261 unsigned *map_ops)
1258{ 1262{
1259 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop; 1263 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
1260 struct sk_buff *skb; 1264 struct sk_buff *skb;
1261 int ret; 1265 int ret;
1262 1266
1263 while (skb_queue_len(&vif->tx_queue) < budget) { 1267 while (skb_queue_len(&queue->tx_queue) < budget) {
1264 struct xen_netif_tx_request txreq; 1268 struct xen_netif_tx_request txreq;
1265 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1269 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1266 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1270 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1270,69 +1274,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1270 unsigned int data_len; 1274 unsigned int data_len;
1271 pending_ring_idx_t index; 1275 pending_ring_idx_t index;
1272 1276
1273 if (vif->tx.sring->req_prod - vif->tx.req_cons > 1277 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1274 XEN_NETIF_TX_RING_SIZE) { 1278 XEN_NETIF_TX_RING_SIZE) {
1275 netdev_err(vif->dev, 1279 netdev_err(queue->vif->dev,
1276 "Impossible number of requests. " 1280 "Impossible number of requests. "
1277 "req_prod %d, req_cons %d, size %ld\n", 1281 "req_prod %d, req_cons %d, size %ld\n",
1278 vif->tx.sring->req_prod, vif->tx.req_cons, 1282 queue->tx.sring->req_prod, queue->tx.req_cons,
1279 XEN_NETIF_TX_RING_SIZE); 1283 XEN_NETIF_TX_RING_SIZE);
1280 xenvif_fatal_tx_err(vif); 1284 xenvif_fatal_tx_err(queue->vif);
1281 break; 1285 break;
1282 } 1286 }
1283 1287
1284 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); 1288 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1285 if (!work_to_do) 1289 if (!work_to_do)
1286 break; 1290 break;
1287 1291
1288 idx = vif->tx.req_cons; 1292 idx = queue->tx.req_cons;
1289 rmb(); /* Ensure that we see the request before we copy it. */ 1293 rmb(); /* Ensure that we see the request before we copy it. */
1290 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); 1294 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
1291 1295
1292 /* Credit-based scheduling. */ 1296 /* Credit-based scheduling. */
1293 if (txreq.size > vif->remaining_credit && 1297 if (txreq.size > queue->remaining_credit &&
1294 tx_credit_exceeded(vif, txreq.size)) 1298 tx_credit_exceeded(queue, txreq.size))
1295 break; 1299 break;
1296 1300
1297 vif->remaining_credit -= txreq.size; 1301 queue->remaining_credit -= txreq.size;
1298 1302
1299 work_to_do--; 1303 work_to_do--;
1300 vif->tx.req_cons = ++idx; 1304 queue->tx.req_cons = ++idx;
1301 1305
1302 memset(extras, 0, sizeof(extras)); 1306 memset(extras, 0, sizeof(extras));
1303 if (txreq.flags & XEN_NETTXF_extra_info) { 1307 if (txreq.flags & XEN_NETTXF_extra_info) {
1304 work_to_do = xenvif_get_extras(vif, extras, 1308 work_to_do = xenvif_get_extras(queue, extras,
1305 work_to_do); 1309 work_to_do);
1306 idx = vif->tx.req_cons; 1310 idx = queue->tx.req_cons;
1307 if (unlikely(work_to_do < 0)) 1311 if (unlikely(work_to_do < 0))
1308 break; 1312 break;
1309 } 1313 }
1310 1314
1311 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); 1315 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1312 if (unlikely(ret < 0)) 1316 if (unlikely(ret < 0))
1313 break; 1317 break;
1314 1318
1315 idx += ret; 1319 idx += ret;
1316 1320
1317 if (unlikely(txreq.size < ETH_HLEN)) { 1321 if (unlikely(txreq.size < ETH_HLEN)) {
1318 netdev_dbg(vif->dev, 1322 netdev_dbg(queue->vif->dev,
1319 "Bad packet size: %d\n", txreq.size); 1323 "Bad packet size: %d\n", txreq.size);
1320 xenvif_tx_err(vif, &txreq, idx); 1324 xenvif_tx_err(queue, &txreq, idx);
1321 break; 1325 break;
1322 } 1326 }
1323 1327
1324 /* No crossing a page as the payload mustn't fragment. */ 1328 /* No crossing a page as the payload mustn't fragment. */
1325 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1329 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1326 netdev_err(vif->dev, 1330 netdev_err(queue->vif->dev,
1327 "txreq.offset: %x, size: %u, end: %lu\n", 1331 "txreq.offset: %x, size: %u, end: %lu\n",
1328 txreq.offset, txreq.size, 1332 txreq.offset, txreq.size,
1329 (txreq.offset&~PAGE_MASK) + txreq.size); 1333 (txreq.offset&~PAGE_MASK) + txreq.size);
1330 xenvif_fatal_tx_err(vif); 1334 xenvif_fatal_tx_err(queue->vif);
1331 break; 1335 break;
1332 } 1336 }
1333 1337
1334 index = pending_index(vif->pending_cons); 1338 index = pending_index(queue->pending_cons);
1335 pending_idx = vif->pending_ring[index]; 1339 pending_idx = queue->pending_ring[index];
1336 1340
1337 data_len = (txreq.size > PKT_PROT_LEN && 1341 data_len = (txreq.size > PKT_PROT_LEN &&
1338 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1342 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1340,9 +1344,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1340 1344
1341 skb = xenvif_alloc_skb(data_len); 1345 skb = xenvif_alloc_skb(data_len);
1342 if (unlikely(skb == NULL)) { 1346 if (unlikely(skb == NULL)) {
1343 netdev_dbg(vif->dev, 1347 netdev_dbg(queue->vif->dev,
1344 "Can't allocate a skb in start_xmit.\n"); 1348 "Can't allocate a skb in start_xmit.\n");
1345 xenvif_tx_err(vif, &txreq, idx); 1349 xenvif_tx_err(queue, &txreq, idx);
1346 break; 1350 break;
1347 } 1351 }
1348 1352
@@ -1350,7 +1354,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1350 struct xen_netif_extra_info *gso; 1354 struct xen_netif_extra_info *gso;
1351 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1355 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1352 1356
1353 if (xenvif_set_skb_gso(vif, skb, gso)) { 1357 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1354 /* Failure in xenvif_set_skb_gso is fatal. */ 1358 /* Failure in xenvif_set_skb_gso is fatal. */
1355 kfree_skb(skb); 1359 kfree_skb(skb);
1356 break; 1360 break;
@@ -1360,18 +1364,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1360 XENVIF_TX_CB(skb)->pending_idx = pending_idx; 1364 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1361 1365
1362 __skb_put(skb, data_len); 1366 __skb_put(skb, data_len);
1363 vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; 1367 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1364 vif->tx_copy_ops[*copy_ops].source.domid = vif->domid; 1368 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1365 vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1369 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1366 1370
1367 vif->tx_copy_ops[*copy_ops].dest.u.gmfn = 1371 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1368 virt_to_mfn(skb->data); 1372 virt_to_mfn(skb->data);
1369 vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1373 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1370 vif->tx_copy_ops[*copy_ops].dest.offset = 1374 queue->tx_copy_ops[*copy_ops].dest.offset =
1371 offset_in_page(skb->data); 1375 offset_in_page(skb->data);
1372 1376
1373 vif->tx_copy_ops[*copy_ops].len = data_len; 1377 queue->tx_copy_ops[*copy_ops].len = data_len;
1374 vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; 1378 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1375 1379
1376 (*copy_ops)++; 1380 (*copy_ops)++;
1377 1381
@@ -1380,42 +1384,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
1380 skb_shinfo(skb)->nr_frags++; 1384 skb_shinfo(skb)->nr_frags++;
1381 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1385 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1382 pending_idx); 1386 pending_idx);
1383 xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop); 1387 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1384 gop++; 1388 gop++;
1385 } else { 1389 } else {
1386 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1390 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1387 INVALID_PENDING_IDX); 1391 INVALID_PENDING_IDX);
1388 memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, 1392 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1389 sizeof(txreq)); 1393 sizeof(txreq));
1390 } 1394 }
1391 1395
1392 vif->pending_cons++; 1396 queue->pending_cons++;
1393 1397
1394 request_gop = xenvif_get_requests(vif, skb, txfrags, gop); 1398 request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
1395 if (request_gop == NULL) { 1399 if (request_gop == NULL) {
1396 kfree_skb(skb); 1400 kfree_skb(skb);
1397 xenvif_tx_err(vif, &txreq, idx); 1401 xenvif_tx_err(queue, &txreq, idx);
1398 break; 1402 break;
1399 } 1403 }
1400 gop = request_gop; 1404 gop = request_gop;
1401 1405
1402 __skb_queue_tail(&vif->tx_queue, skb); 1406 __skb_queue_tail(&queue->tx_queue, skb);
1403 1407
1404 vif->tx.req_cons = idx; 1408 queue->tx.req_cons = idx;
1405 1409
1406 if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) || 1410 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1407 (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops))) 1411 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1408 break; 1412 break;
1409 } 1413 }
1410 1414
1411 (*map_ops) = gop - vif->tx_map_ops; 1415 (*map_ops) = gop - queue->tx_map_ops;
1412 return; 1416 return;
1413} 1417}
1414 1418
1415/* Consolidate skb with a frag_list into a brand new one with local pages on 1419/* Consolidate skb with a frag_list into a brand new one with local pages on
1416 * frags. Returns 0 or -ENOMEM if can't allocate new pages. 1420 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1417 */ 1421 */
1418static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) 1422static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1419{ 1423{
1420 unsigned int offset = skb_headlen(skb); 1424 unsigned int offset = skb_headlen(skb);
1421 skb_frag_t frags[MAX_SKB_FRAGS]; 1425 skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1423,10 +1427,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1423 struct ubuf_info *uarg; 1427 struct ubuf_info *uarg;
1424 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1428 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1425 1429
1426 vif->tx_zerocopy_sent += 2; 1430 queue->stats.tx_zerocopy_sent += 2;
1427 vif->tx_frag_overflow++; 1431 queue->stats.tx_frag_overflow++;
1428 1432
1429 xenvif_fill_frags(vif, nskb); 1433 xenvif_fill_frags(queue, nskb);
1430 /* Subtract frags size, we will correct it later */ 1434 /* Subtract frags size, we will correct it later */
1431 skb->truesize -= skb->data_len; 1435 skb->truesize -= skb->data_len;
1432 skb->len += nskb->len; 1436 skb->len += nskb->len;
@@ -1478,37 +1482,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1478 return 0; 1482 return 0;
1479} 1483}
1480 1484
1481static int xenvif_tx_submit(struct xenvif *vif) 1485static int xenvif_tx_submit(struct xenvif_queue *queue)
1482{ 1486{
1483 struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; 1487 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1484 struct gnttab_copy *gop_copy = vif->tx_copy_ops; 1488 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1485 struct sk_buff *skb; 1489 struct sk_buff *skb;
1486 int work_done = 0; 1490 int work_done = 0;
1487 1491
1488 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { 1492 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1489 struct xen_netif_tx_request *txp; 1493 struct xen_netif_tx_request *txp;
1490 u16 pending_idx; 1494 u16 pending_idx;
1491 unsigned data_len; 1495 unsigned data_len;
1492 1496
1493 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1497 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1494 txp = &vif->pending_tx_info[pending_idx].req; 1498 txp = &queue->pending_tx_info[pending_idx].req;
1495 1499
1496 /* Check the remap error code. */ 1500 /* Check the remap error code. */
1497 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { 1501 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1498 skb_shinfo(skb)->nr_frags = 0; 1502 skb_shinfo(skb)->nr_frags = 0;
1499 kfree_skb(skb); 1503 kfree_skb(skb);
1500 continue; 1504 continue;
1501 } 1505 }
1502 1506
1503 data_len = skb->len; 1507 data_len = skb->len;
1504 callback_param(vif, pending_idx).ctx = NULL; 1508 callback_param(queue, pending_idx).ctx = NULL;
1505 if (data_len < txp->size) { 1509 if (data_len < txp->size) {
1506 /* Append the packet payload as a fragment. */ 1510 /* Append the packet payload as a fragment. */
1507 txp->offset += data_len; 1511 txp->offset += data_len;
1508 txp->size -= data_len; 1512 txp->size -= data_len;
1509 } else { 1513 } else {
1510 /* Schedule a response immediately. */ 1514 /* Schedule a response immediately. */
1511 xenvif_idx_release(vif, pending_idx, 1515 xenvif_idx_release(queue, pending_idx,
1512 XEN_NETIF_RSP_OKAY); 1516 XEN_NETIF_RSP_OKAY);
1513 } 1517 }
1514 1518
@@ -1517,12 +1521,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1517 else if (txp->flags & XEN_NETTXF_data_validated) 1521 else if (txp->flags & XEN_NETTXF_data_validated)
1518 skb->ip_summed = CHECKSUM_UNNECESSARY; 1522 skb->ip_summed = CHECKSUM_UNNECESSARY;
1519 1523
1520 xenvif_fill_frags(vif, skb); 1524 xenvif_fill_frags(queue, skb);
1521 1525
1522 if (unlikely(skb_has_frag_list(skb))) { 1526 if (unlikely(skb_has_frag_list(skb))) {
1523 if (xenvif_handle_frag_list(vif, skb)) { 1527 if (xenvif_handle_frag_list(queue, skb)) {
1524 if (net_ratelimit()) 1528 if (net_ratelimit())
1525 netdev_err(vif->dev, 1529 netdev_err(queue->vif->dev,
1526 "Not enough memory to consolidate frag_list!\n"); 1530 "Not enough memory to consolidate frag_list!\n");
1527 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1531 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1528 kfree_skb(skb); 1532 kfree_skb(skb);
@@ -1535,12 +1539,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
1535 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1539 __pskb_pull_tail(skb, target - skb_headlen(skb));
1536 } 1540 }
1537 1541
1538 skb->dev = vif->dev; 1542 skb->dev = queue->vif->dev;
1539 skb->protocol = eth_type_trans(skb, skb->dev); 1543 skb->protocol = eth_type_trans(skb, skb->dev);
1540 skb_reset_network_header(skb); 1544 skb_reset_network_header(skb);
1541 1545
1542 if (checksum_setup(vif, skb)) { 1546 if (checksum_setup(queue, skb)) {
1543 netdev_dbg(vif->dev, 1547 netdev_dbg(queue->vif->dev,
1544 "Can't setup checksum in net_tx_action\n"); 1548 "Can't setup checksum in net_tx_action\n");
1545 /* We have to set this flag to trigger the callback */ 1549 /* We have to set this flag to trigger the callback */
1546 if (skb_shinfo(skb)->destructor_arg) 1550 if (skb_shinfo(skb)->destructor_arg)
@@ -1565,8 +1569,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
1565 DIV_ROUND_UP(skb->len - hdrlen, mss); 1569 DIV_ROUND_UP(skb->len - hdrlen, mss);
1566 } 1570 }
1567 1571
1568 vif->dev->stats.rx_bytes += skb->len; 1572 queue->stats.rx_bytes += skb->len;
1569 vif->dev->stats.rx_packets++; 1573 queue->stats.rx_packets++;
1570 1574
1571 work_done++; 1575 work_done++;
1572 1576
@@ -1577,7 +1581,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1577 */ 1581 */
1578 if (skb_shinfo(skb)->destructor_arg) { 1582 if (skb_shinfo(skb)->destructor_arg) {
1579 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1583 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1580 vif->tx_zerocopy_sent++; 1584 queue->stats.tx_zerocopy_sent++;
1581 } 1585 }
1582 1586
1583 netif_receive_skb(skb); 1587 netif_receive_skb(skb);
@@ -1590,47 +1594,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1590{ 1594{
1591 unsigned long flags; 1595 unsigned long flags;
1592 pending_ring_idx_t index; 1596 pending_ring_idx_t index;
1593 struct xenvif *vif = ubuf_to_vif(ubuf); 1597 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1594 1598
1595 /* This is the only place where we grab this lock, to protect callbacks 1599 /* This is the only place where we grab this lock, to protect callbacks
1596 * from each other. 1600 * from each other.
1597 */ 1601 */
1598 spin_lock_irqsave(&vif->callback_lock, flags); 1602 spin_lock_irqsave(&queue->callback_lock, flags);
1599 do { 1603 do {
1600 u16 pending_idx = ubuf->desc; 1604 u16 pending_idx = ubuf->desc;
1601 ubuf = (struct ubuf_info *) ubuf->ctx; 1605 ubuf = (struct ubuf_info *) ubuf->ctx;
1602 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >= 1606 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1603 MAX_PENDING_REQS); 1607 MAX_PENDING_REQS);
1604 index = pending_index(vif->dealloc_prod); 1608 index = pending_index(queue->dealloc_prod);
1605 vif->dealloc_ring[index] = pending_idx; 1609 queue->dealloc_ring[index] = pending_idx;
1606 /* Sync with xenvif_tx_dealloc_action: 1610 /* Sync with xenvif_tx_dealloc_action:
1607 * insert idx then incr producer. 1611 * insert idx then incr producer.
1608 */ 1612 */
1609 smp_wmb(); 1613 smp_wmb();
1610 vif->dealloc_prod++; 1614 queue->dealloc_prod++;
1611 } while (ubuf); 1615 } while (ubuf);
1612 wake_up(&vif->dealloc_wq); 1616 wake_up(&queue->dealloc_wq);
1613 spin_unlock_irqrestore(&vif->callback_lock, flags); 1617 spin_unlock_irqrestore(&queue->callback_lock, flags);
1614 1618
1615 if (likely(zerocopy_success)) 1619 if (likely(zerocopy_success))
1616 vif->tx_zerocopy_success++; 1620 queue->stats.tx_zerocopy_success++;
1617 else 1621 else
1618 vif->tx_zerocopy_fail++; 1622 queue->stats.tx_zerocopy_fail++;
1619} 1623}
1620 1624
1621static inline void xenvif_tx_dealloc_action(struct xenvif *vif) 1625static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1622{ 1626{
1623 struct gnttab_unmap_grant_ref *gop; 1627 struct gnttab_unmap_grant_ref *gop;
1624 pending_ring_idx_t dc, dp; 1628 pending_ring_idx_t dc, dp;
1625 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; 1629 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1626 unsigned int i = 0; 1630 unsigned int i = 0;
1627 1631
1628 dc = vif->dealloc_cons; 1632 dc = queue->dealloc_cons;
1629 gop = vif->tx_unmap_ops; 1633 gop = queue->tx_unmap_ops;
1630 1634
1631 /* Free up any grants we have finished using */ 1635 /* Free up any grants we have finished using */
1632 do { 1636 do {
1633 dp = vif->dealloc_prod; 1637 dp = queue->dealloc_prod;
1634 1638
1635 /* Ensure we see all indices enqueued by all 1639 /* Ensure we see all indices enqueued by all
1636 * xenvif_zerocopy_callback(). 1640 * xenvif_zerocopy_callback().
@@ -1638,38 +1642,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1638 smp_rmb(); 1642 smp_rmb();
1639 1643
1640 while (dc != dp) { 1644 while (dc != dp) {
1641 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS); 1645 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1642 pending_idx = 1646 pending_idx =
1643 vif->dealloc_ring[pending_index(dc++)]; 1647 queue->dealloc_ring[pending_index(dc++)];
1644 1648
1645 pending_idx_release[gop-vif->tx_unmap_ops] = 1649 pending_idx_release[gop-queue->tx_unmap_ops] =
1646 pending_idx; 1650 pending_idx;
1647 vif->pages_to_unmap[gop-vif->tx_unmap_ops] = 1651 queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1648 vif->mmap_pages[pending_idx]; 1652 queue->mmap_pages[pending_idx];
1649 gnttab_set_unmap_op(gop, 1653 gnttab_set_unmap_op(gop,
1650 idx_to_kaddr(vif, pending_idx), 1654 idx_to_kaddr(queue, pending_idx),
1651 GNTMAP_host_map, 1655 GNTMAP_host_map,
1652 vif->grant_tx_handle[pending_idx]); 1656 queue->grant_tx_handle[pending_idx]);
1653 xenvif_grant_handle_reset(vif, pending_idx); 1657 xenvif_grant_handle_reset(queue, pending_idx);
1654 ++gop; 1658 ++gop;
1655 } 1659 }
1656 1660
1657 } while (dp != vif->dealloc_prod); 1661 } while (dp != queue->dealloc_prod);
1658 1662
1659 vif->dealloc_cons = dc; 1663 queue->dealloc_cons = dc;
1660 1664
1661 if (gop - vif->tx_unmap_ops > 0) { 1665 if (gop - queue->tx_unmap_ops > 0) {
1662 int ret; 1666 int ret;
1663 ret = gnttab_unmap_refs(vif->tx_unmap_ops, 1667 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1664 NULL, 1668 NULL,
1665 vif->pages_to_unmap, 1669 queue->pages_to_unmap,
1666 gop - vif->tx_unmap_ops); 1670 gop - queue->tx_unmap_ops);
1667 if (ret) { 1671 if (ret) {
1668 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n", 1672 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1669 gop - vif->tx_unmap_ops, ret); 1673 gop - queue->tx_unmap_ops, ret);
1670 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) { 1674 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1671 if (gop[i].status != GNTST_okay) 1675 if (gop[i].status != GNTST_okay)
1672 netdev_err(vif->dev, 1676 netdev_err(queue->vif->dev,
1673 " host_addr: %llx handle: %x status: %d\n", 1677 " host_addr: %llx handle: %x status: %d\n",
1674 gop[i].host_addr, 1678 gop[i].host_addr,
1675 gop[i].handle, 1679 gop[i].handle,
@@ -1679,91 +1683,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1679 } 1683 }
1680 } 1684 }
1681 1685
1682 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) 1686 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1683 xenvif_idx_release(vif, pending_idx_release[i], 1687 xenvif_idx_release(queue, pending_idx_release[i],
1684 XEN_NETIF_RSP_OKAY); 1688 XEN_NETIF_RSP_OKAY);
1685} 1689}
1686 1690
1687 1691
1688/* Called after netfront has transmitted */ 1692/* Called after netfront has transmitted */
1689int xenvif_tx_action(struct xenvif *vif, int budget) 1693int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1690{ 1694{
1691 unsigned nr_mops, nr_cops = 0; 1695 unsigned nr_mops, nr_cops = 0;
1692 int work_done, ret; 1696 int work_done, ret;
1693 1697
1694 if (unlikely(!tx_work_todo(vif))) 1698 if (unlikely(!tx_work_todo(queue)))
1695 return 0; 1699 return 0;
1696 1700
1697 xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops); 1701 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1698 1702
1699 if (nr_cops == 0) 1703 if (nr_cops == 0)
1700 return 0; 1704 return 0;
1701 1705
1702 gnttab_batch_copy(vif->tx_copy_ops, nr_cops); 1706 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1703 if (nr_mops != 0) { 1707 if (nr_mops != 0) {
1704 ret = gnttab_map_refs(vif->tx_map_ops, 1708 ret = gnttab_map_refs(queue->tx_map_ops,
1705 NULL, 1709 NULL,
1706 vif->pages_to_map, 1710 queue->pages_to_map,
1707 nr_mops); 1711 nr_mops);
1708 BUG_ON(ret); 1712 BUG_ON(ret);
1709 } 1713 }
1710 1714
1711 work_done = xenvif_tx_submit(vif); 1715 work_done = xenvif_tx_submit(queue);
1712 1716
1713 return work_done; 1717 return work_done;
1714} 1718}
1715 1719
1716static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 1720static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1717 u8 status) 1721 u8 status)
1718{ 1722{
1719 struct pending_tx_info *pending_tx_info; 1723 struct pending_tx_info *pending_tx_info;
1720 pending_ring_idx_t index; 1724 pending_ring_idx_t index;
1721 unsigned long flags; 1725 unsigned long flags;
1722 1726
1723 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1727 pending_tx_info = &queue->pending_tx_info[pending_idx];
1724 spin_lock_irqsave(&vif->response_lock, flags); 1728 spin_lock_irqsave(&queue->response_lock, flags);
1725 make_tx_response(vif, &pending_tx_info->req, status); 1729 make_tx_response(queue, &pending_tx_info->req, status);
1726 index = pending_index(vif->pending_prod); 1730 index = pending_index(queue->pending_prod);
1727 vif->pending_ring[index] = pending_idx; 1731 queue->pending_ring[index] = pending_idx;
1728 /* TX shouldn't use the index before we give it back here */ 1732 /* TX shouldn't use the index before we give it back here */
1729 mb(); 1733 mb();
1730 vif->pending_prod++; 1734 queue->pending_prod++;
1731 spin_unlock_irqrestore(&vif->response_lock, flags); 1735 spin_unlock_irqrestore(&queue->response_lock, flags);
1732} 1736}
1733 1737
1734 1738
1735static void make_tx_response(struct xenvif *vif, 1739static void make_tx_response(struct xenvif_queue *queue,
1736 struct xen_netif_tx_request *txp, 1740 struct xen_netif_tx_request *txp,
1737 s8 st) 1741 s8 st)
1738{ 1742{
1739 RING_IDX i = vif->tx.rsp_prod_pvt; 1743 RING_IDX i = queue->tx.rsp_prod_pvt;
1740 struct xen_netif_tx_response *resp; 1744 struct xen_netif_tx_response *resp;
1741 int notify; 1745 int notify;
1742 1746
1743 resp = RING_GET_RESPONSE(&vif->tx, i); 1747 resp = RING_GET_RESPONSE(&queue->tx, i);
1744 resp->id = txp->id; 1748 resp->id = txp->id;
1745 resp->status = st; 1749 resp->status = st;
1746 1750
1747 if (txp->flags & XEN_NETTXF_extra_info) 1751 if (txp->flags & XEN_NETTXF_extra_info)
1748 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1752 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1749 1753
1750 vif->tx.rsp_prod_pvt = ++i; 1754 queue->tx.rsp_prod_pvt = ++i;
1751 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); 1755 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1752 if (notify) 1756 if (notify)
1753 notify_remote_via_irq(vif->tx_irq); 1757 notify_remote_via_irq(queue->tx_irq);
1754} 1758}
1755 1759
1756static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, 1760static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1757 u16 id, 1761 u16 id,
1758 s8 st, 1762 s8 st,
1759 u16 offset, 1763 u16 offset,
1760 u16 size, 1764 u16 size,
1761 u16 flags) 1765 u16 flags)
1762{ 1766{
1763 RING_IDX i = vif->rx.rsp_prod_pvt; 1767 RING_IDX i = queue->rx.rsp_prod_pvt;
1764 struct xen_netif_rx_response *resp; 1768 struct xen_netif_rx_response *resp;
1765 1769
1766 resp = RING_GET_RESPONSE(&vif->rx, i); 1770 resp = RING_GET_RESPONSE(&queue->rx, i);
1767 resp->offset = offset; 1771 resp->offset = offset;
1768 resp->flags = flags; 1772 resp->flags = flags;
1769 resp->id = id; 1773 resp->id = id;
@@ -1771,26 +1775,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1771 if (st < 0) 1775 if (st < 0)
1772 resp->status = (s16)st; 1776 resp->status = (s16)st;
1773 1777
1774 vif->rx.rsp_prod_pvt = ++i; 1778 queue->rx.rsp_prod_pvt = ++i;
1775 1779
1776 return resp; 1780 return resp;
1777} 1781}
1778 1782
1779void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx) 1783void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1780{ 1784{
1781 int ret; 1785 int ret;
1782 struct gnttab_unmap_grant_ref tx_unmap_op; 1786 struct gnttab_unmap_grant_ref tx_unmap_op;
1783 1787
1784 gnttab_set_unmap_op(&tx_unmap_op, 1788 gnttab_set_unmap_op(&tx_unmap_op,
1785 idx_to_kaddr(vif, pending_idx), 1789 idx_to_kaddr(queue, pending_idx),
1786 GNTMAP_host_map, 1790 GNTMAP_host_map,
1787 vif->grant_tx_handle[pending_idx]); 1791 queue->grant_tx_handle[pending_idx]);
1788 xenvif_grant_handle_reset(vif, pending_idx); 1792 xenvif_grant_handle_reset(queue, pending_idx);
1789 1793
1790 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1794 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1791 &vif->mmap_pages[pending_idx], 1); 1795 &queue->mmap_pages[pending_idx], 1);
1792 if (ret) { 1796 if (ret) {
1793 netdev_err(vif->dev, 1797 netdev_err(queue->vif->dev,
1794 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", 1798 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1795 ret, 1799 ret,
1796 pending_idx, 1800 pending_idx,
@@ -1800,41 +1804,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1800 BUG(); 1804 BUG();
1801 } 1805 }
1802 1806
1803 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 1807 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1804} 1808}
1805 1809
1806static inline int rx_work_todo(struct xenvif *vif) 1810static inline int rx_work_todo(struct xenvif_queue *queue)
1807{ 1811{
1808 return (!skb_queue_empty(&vif->rx_queue) && 1812 return (!skb_queue_empty(&queue->rx_queue) &&
1809 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) || 1813 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
1810 vif->rx_queue_purge; 1814 queue->rx_queue_purge;
1811} 1815}
1812 1816
1813static inline int tx_work_todo(struct xenvif *vif) 1817static inline int tx_work_todo(struct xenvif_queue *queue)
1814{ 1818{
1815 1819 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1816 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1817 return 1; 1820 return 1;
1818 1821
1819 return 0; 1822 return 0;
1820} 1823}
1821 1824
1822static inline bool tx_dealloc_work_todo(struct xenvif *vif) 1825static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1823{ 1826{
1824 return vif->dealloc_cons != vif->dealloc_prod; 1827 return queue->dealloc_cons != queue->dealloc_prod;
1825} 1828}
1826 1829
1827void xenvif_unmap_frontend_rings(struct xenvif *vif) 1830void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1828{ 1831{
1829 if (vif->tx.sring) 1832 if (queue->tx.sring)
1830 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1833 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1831 vif->tx.sring); 1834 queue->tx.sring);
1832 if (vif->rx.sring) 1835 if (queue->rx.sring)
1833 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 1836 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1834 vif->rx.sring); 1837 queue->rx.sring);
1835} 1838}
1836 1839
1837int xenvif_map_frontend_rings(struct xenvif *vif, 1840int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1838 grant_ref_t tx_ring_ref, 1841 grant_ref_t tx_ring_ref,
1839 grant_ref_t rx_ring_ref) 1842 grant_ref_t rx_ring_ref)
1840{ 1843{
@@ -1844,85 +1847,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
1844 1847
1845 int err = -ENOMEM; 1848 int err = -ENOMEM;
1846 1849
1847 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1850 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1848 tx_ring_ref, &addr); 1851 tx_ring_ref, &addr);
1849 if (err) 1852 if (err)
1850 goto err; 1853 goto err;
1851 1854
1852 txs = (struct xen_netif_tx_sring *)addr; 1855 txs = (struct xen_netif_tx_sring *)addr;
1853 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); 1856 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1854 1857
1855 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 1858 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1856 rx_ring_ref, &addr); 1859 rx_ring_ref, &addr);
1857 if (err) 1860 if (err)
1858 goto err; 1861 goto err;
1859 1862
1860 rxs = (struct xen_netif_rx_sring *)addr; 1863 rxs = (struct xen_netif_rx_sring *)addr;
1861 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); 1864 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1862 1865
1863 return 0; 1866 return 0;
1864 1867
1865err: 1868err:
1866 xenvif_unmap_frontend_rings(vif); 1869 xenvif_unmap_frontend_rings(queue);
1867 return err; 1870 return err;
1868} 1871}
1869 1872
1870void xenvif_stop_queue(struct xenvif *vif) 1873static void xenvif_start_queue(struct xenvif_queue *queue)
1871{
1872 if (!vif->can_queue)
1873 return;
1874
1875 netif_stop_queue(vif->dev);
1876}
1877
1878static void xenvif_start_queue(struct xenvif *vif)
1879{ 1874{
1880 if (xenvif_schedulable(vif)) 1875 if (xenvif_schedulable(queue->vif))
1881 netif_wake_queue(vif->dev); 1876 xenvif_wake_queue(queue);
1882} 1877}
1883 1878
1884int xenvif_kthread_guest_rx(void *data) 1879int xenvif_kthread_guest_rx(void *data)
1885{ 1880{
1886 struct xenvif *vif = data; 1881 struct xenvif_queue *queue = data;
1887 struct sk_buff *skb; 1882 struct sk_buff *skb;
1888 1883
1889 while (!kthread_should_stop()) { 1884 while (!kthread_should_stop()) {
1890 wait_event_interruptible(vif->wq, 1885 wait_event_interruptible(queue->wq,
1891 rx_work_todo(vif) || 1886 rx_work_todo(queue) ||
1892 vif->disabled || 1887 queue->vif->disabled ||
1893 kthread_should_stop()); 1888 kthread_should_stop());
1894 1889
1895 /* This frontend is found to be rogue, disable it in 1890 /* This frontend is found to be rogue, disable it in
1896 * kthread context. Currently this is only set when 1891 * kthread context. Currently this is only set when
1897 * netback finds out frontend sends malformed packet, 1892 * netback finds out frontend sends malformed packet,
1898 * but we cannot disable the interface in softirq 1893 * but we cannot disable the interface in softirq
1899 * context so we defer it here. 1894 * context so we defer it here, if this thread is
1895 * associated with queue 0.
1900 */ 1896 */
1901 if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) 1897 if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
1902 xenvif_carrier_off(vif); 1898 xenvif_carrier_off(queue->vif);
1903 1899
1904 if (kthread_should_stop()) 1900 if (kthread_should_stop())
1905 break; 1901 break;
1906 1902
1907 if (vif->rx_queue_purge) { 1903 if (queue->rx_queue_purge) {
1908 skb_queue_purge(&vif->rx_queue); 1904 skb_queue_purge(&queue->rx_queue);
1909 vif->rx_queue_purge = false; 1905 queue->rx_queue_purge = false;
1910 } 1906 }
1911 1907
1912 if (!skb_queue_empty(&vif->rx_queue)) 1908 if (!skb_queue_empty(&queue->rx_queue))
1913 xenvif_rx_action(vif); 1909 xenvif_rx_action(queue);
1914 1910
1915 if (skb_queue_empty(&vif->rx_queue) && 1911 if (skb_queue_empty(&queue->rx_queue) &&
1916 netif_queue_stopped(vif->dev)) { 1912 xenvif_queue_stopped(queue)) {
1917 del_timer_sync(&vif->wake_queue); 1913 del_timer_sync(&queue->wake_queue);
1918 xenvif_start_queue(vif); 1914 xenvif_start_queue(queue);
1919 } 1915 }
1920 1916
1921 cond_resched(); 1917 cond_resched();
1922 } 1918 }
1923 1919
1924 /* Bin any remaining skbs */ 1920 /* Bin any remaining skbs */
1925 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) 1921 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
1926 dev_kfree_skb(skb); 1922 dev_kfree_skb(skb);
1927 1923
1928 return 0; 1924 return 0;
@@ -1930,22 +1926,22 @@ int xenvif_kthread_guest_rx(void *data)
1930 1926
1931int xenvif_dealloc_kthread(void *data) 1927int xenvif_dealloc_kthread(void *data)
1932{ 1928{
1933 struct xenvif *vif = data; 1929 struct xenvif_queue *queue = data;
1934 1930
1935 while (!kthread_should_stop()) { 1931 while (!kthread_should_stop()) {
1936 wait_event_interruptible(vif->dealloc_wq, 1932 wait_event_interruptible(queue->dealloc_wq,
1937 tx_dealloc_work_todo(vif) || 1933 tx_dealloc_work_todo(queue) ||
1938 kthread_should_stop()); 1934 kthread_should_stop());
1939 if (kthread_should_stop()) 1935 if (kthread_should_stop())
1940 break; 1936 break;
1941 1937
1942 xenvif_tx_dealloc_action(vif); 1938 xenvif_tx_dealloc_action(queue);
1943 cond_resched(); 1939 cond_resched();
1944 } 1940 }
1945 1941
1946 /* Unmap anything remaining*/ 1942 /* Unmap anything remaining*/
1947 if (tx_dealloc_work_todo(vif)) 1943 if (tx_dealloc_work_todo(queue))
1948 xenvif_tx_dealloc_action(vif); 1944 xenvif_tx_dealloc_action(queue);
1949 1945
1950 return 0; 1946 return 0;
1951} 1947}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7a206cffb062..358602f55afa 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -19,6 +19,8 @@
19*/ 19*/
20 20
21#include "common.h" 21#include "common.h"
22#include <linux/vmalloc.h>
23#include <linux/rtnetlink.h>
22 24
23struct backend_info { 25struct backend_info {
24 struct xenbus_device *dev; 26 struct xenbus_device *dev;
@@ -34,8 +36,9 @@ struct backend_info {
34 u8 have_hotplug_status_watch:1; 36 u8 have_hotplug_status_watch:1;
35}; 37};
36 38
37static int connect_rings(struct backend_info *); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
38static void connect(struct backend_info *); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be);
39static void backend_create_xenvif(struct backend_info *be); 42static void backend_create_xenvif(struct backend_info *be);
40static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
41static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
@@ -485,10 +488,10 @@ static void connect(struct backend_info *be)
485{ 488{
486 int err; 489 int err;
487 struct xenbus_device *dev = be->dev; 490 struct xenbus_device *dev = be->dev;
488 491 unsigned long credit_bytes, credit_usec;
489 err = connect_rings(be); 492 unsigned int queue_index;
490 if (err) 493 unsigned int requested_num_queues = 1;
491 return; 494 struct xenvif_queue *queue;
492 495
493 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 496 err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
494 if (err) { 497 if (err) {
@@ -496,9 +499,34 @@ static void connect(struct backend_info *be)
496 return; 499 return;
497 } 500 }
498 501
499 xen_net_read_rate(dev, &be->vif->credit_bytes, 502 xen_net_read_rate(dev, &credit_bytes, &credit_usec);
500 &be->vif->credit_usec); 503 read_xenbus_vif_flags(be);
501 be->vif->remaining_credit = be->vif->credit_bytes; 504
505 be->vif->queues = vzalloc(requested_num_queues *
506 sizeof(struct xenvif_queue));
507 rtnl_lock();
508 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
509 rtnl_unlock();
510
511 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
512 queue = &be->vif->queues[queue_index];
513 queue->vif = be->vif;
514 queue->id = queue_index;
515 snprintf(queue->name, sizeof(queue->name), "%s-q%u",
516 be->vif->dev->name, queue->id);
517
518 err = xenvif_init_queue(queue);
519 if (err)
520 goto err;
521
522 queue->remaining_credit = credit_bytes;
523
524 err = connect_rings(be, queue);
525 if (err)
526 goto err;
527 }
528
529 xenvif_carrier_on(be->vif);
502 530
503 unregister_hotplug_status_watch(be); 531 unregister_hotplug_status_watch(be);
504 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 532 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -507,18 +535,26 @@ static void connect(struct backend_info *be)
507 if (!err) 535 if (!err)
508 be->have_hotplug_status_watch = 1; 536 be->have_hotplug_status_watch = 1;
509 537
510 netif_wake_queue(be->vif->dev); 538 netif_tx_wake_all_queues(be->vif->dev);
539
540 return;
541
542err:
543 vfree(be->vif->queues);
544 be->vif->queues = NULL;
545 rtnl_lock();
546 netif_set_real_num_tx_queues(be->vif->dev, 0);
547 rtnl_unlock();
548 return;
511} 549}
512 550
513 551
514static int connect_rings(struct backend_info *be) 552static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
515{ 553{
516 struct xenvif *vif = be->vif;
517 struct xenbus_device *dev = be->dev; 554 struct xenbus_device *dev = be->dev;
518 unsigned long tx_ring_ref, rx_ring_ref; 555 unsigned long tx_ring_ref, rx_ring_ref;
519 unsigned int tx_evtchn, rx_evtchn, rx_copy; 556 unsigned int tx_evtchn, rx_evtchn;
520 int err; 557 int err;
521 int val;
522 558
523 err = xenbus_gather(XBT_NIL, dev->otherend, 559 err = xenbus_gather(XBT_NIL, dev->otherend,
524 "tx-ring-ref", "%lu", &tx_ring_ref, 560 "tx-ring-ref", "%lu", &tx_ring_ref,
@@ -546,6 +582,27 @@ static int connect_rings(struct backend_info *be)
546 rx_evtchn = tx_evtchn; 582 rx_evtchn = tx_evtchn;
547 } 583 }
548 584
585 /* Map the shared frame, irq etc. */
586 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
587 tx_evtchn, rx_evtchn);
588 if (err) {
589 xenbus_dev_fatal(dev, err,
590 "mapping shared-frames %lu/%lu port tx %u rx %u",
591 tx_ring_ref, rx_ring_ref,
592 tx_evtchn, rx_evtchn);
593 return err;
594 }
595
596 return 0;
597}
598
599static int read_xenbus_vif_flags(struct backend_info *be)
600{
601 struct xenvif *vif = be->vif;
602 struct xenbus_device *dev = be->dev;
603 unsigned int rx_copy;
604 int err, val;
605
549 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 606 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
550 &rx_copy); 607 &rx_copy);
551 if (err == -ENOENT) { 608 if (err == -ENOENT) {
@@ -621,16 +678,6 @@ static int connect_rings(struct backend_info *be)
621 val = 0; 678 val = 0;
622 vif->ipv6_csum = !!val; 679 vif->ipv6_csum = !!val;
623 680
624 /* Map the shared frame, irq etc. */
625 err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
626 tx_evtchn, rx_evtchn);
627 if (err) {
628 xenbus_dev_fatal(dev, err,
629 "mapping shared-frames %lu/%lu port tx %u rx %u",
630 tx_ring_ref, rx_ring_ref,
631 tx_evtchn, rx_evtchn);
632 return err;
633 }
634 return 0; 681 return 0;
635} 682}
636 683