aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h107
1 files changed, 72 insertions, 35 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0d4a285cbd7e..4dd7c4a1923b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
99 */ 99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN 100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101 101
102struct xenvif { 102/* Queue name is interface name with "-qNNN" appended */
103 /* Unique identifier for this interface. */ 103#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
104 domid_t domid;
105 unsigned int handle;
106 104
107 /* Is this interface disabled? True when backend discovers 105/* IRQ name is queue name with "-tx" or "-rx" appended */
108 * frontend is rogue. 106#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
107
108struct xenvif;
109
110struct xenvif_stats {
111 /* Stats fields to be updated per-queue.
112 * A subset of struct net_device_stats that contains only the
113 * fields that are updated in netback.c for each queue.
109 */ 114 */
110 bool disabled; 115 unsigned int rx_bytes;
116 unsigned int rx_packets;
117 unsigned int tx_bytes;
118 unsigned int tx_packets;
119
120 /* Additional stats used by xenvif */
121 unsigned long rx_gso_checksum_fixup;
122 unsigned long tx_zerocopy_sent;
123 unsigned long tx_zerocopy_success;
124 unsigned long tx_zerocopy_fail;
125 unsigned long tx_frag_overflow;
126};
127
128struct xenvif_queue { /* Per-queue data for xenvif */
129 unsigned int id; /* Queue ID, 0-based */
130 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
131 struct xenvif *vif; /* Parent VIF */
111 132
112 /* Use NAPI for guest TX */ 133 /* Use NAPI for guest TX */
113 struct napi_struct napi; 134 struct napi_struct napi;
114 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 135 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
115 unsigned int tx_irq; 136 unsigned int tx_irq;
116 /* Only used when feature-split-event-channels = 1 */ 137 /* Only used when feature-split-event-channels = 1 */
117 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ 138 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
118 struct xen_netif_tx_back_ring tx; 139 struct xen_netif_tx_back_ring tx;
119 struct sk_buff_head tx_queue; 140 struct sk_buff_head tx_queue;
120 struct page *mmap_pages[MAX_PENDING_REQS]; 141 struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
150 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ 171 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
151 unsigned int rx_irq; 172 unsigned int rx_irq;
152 /* Only used when feature-split-event-channels = 1 */ 173 /* Only used when feature-split-event-channels = 1 */
153 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 174 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
154 struct xen_netif_rx_back_ring rx; 175 struct xen_netif_rx_back_ring rx;
155 struct sk_buff_head rx_queue; 176 struct sk_buff_head rx_queue;
156 RING_IDX rx_last_skb_slots; 177 RING_IDX rx_last_skb_slots;
@@ -158,14 +179,29 @@ struct xenvif {
158 179
159 struct timer_list wake_queue; 180 struct timer_list wake_queue;
160 181
161 /* This array is allocated seperately as it is large */ 182 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
162 struct gnttab_copy *grant_copy_op;
163 183
164 /* We create one meta structure per ring request we consume, so 184 /* We create one meta structure per ring request we consume, so
165 * the maximum number is the same as the ring size. 185 * the maximum number is the same as the ring size.
166 */ 186 */
167 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; 187 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
168 188
189 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
190 unsigned long credit_bytes;
191 unsigned long credit_usec;
192 unsigned long remaining_credit;
193 struct timer_list credit_timeout;
194 u64 credit_window_start;
195
196 /* Statistics */
197 struct xenvif_stats stats;
198};
199
200struct xenvif {
201 /* Unique identifier for this interface. */
202 domid_t domid;
203 unsigned int handle;
204
169 u8 fe_dev_addr[6]; 205 u8 fe_dev_addr[6];
170 206
171 /* Frontend feature information. */ 207 /* Frontend feature information. */
@@ -179,19 +215,13 @@ struct xenvif {
179 /* Internal feature information. */ 215 /* Internal feature information. */
180 u8 can_queue:1; /* can queue packets for receiver? */ 216 u8 can_queue:1; /* can queue packets for receiver? */
181 217
182 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 218 /* Is this interface disabled? True when backend discovers
183 unsigned long credit_bytes; 219 * frontend is rogue.
184 unsigned long credit_usec; 220 */
185 unsigned long remaining_credit; 221 bool disabled;
186 struct timer_list credit_timeout;
187 u64 credit_window_start;
188 222
189 /* Statistics */ 223 /* Queues */
190 unsigned long rx_gso_checksum_fixup; 224 struct xenvif_queue *queues;
191 unsigned long tx_zerocopy_sent;
192 unsigned long tx_zerocopy_success;
193 unsigned long tx_zerocopy_fail;
194 unsigned long tx_frag_overflow;
195 225
196 /* Miscellaneous private stuff. */ 226 /* Miscellaneous private stuff. */
197 struct net_device *dev; 227 struct net_device *dev;
@@ -206,7 +236,10 @@ struct xenvif *xenvif_alloc(struct device *parent,
206 domid_t domid, 236 domid_t domid,
207 unsigned int handle); 237 unsigned int handle);
208 238
209int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 239int xenvif_init_queue(struct xenvif_queue *queue);
240void xenvif_deinit_queue(struct xenvif_queue *queue);
241
242int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
210 unsigned long rx_ring_ref, unsigned int tx_evtchn, 243 unsigned long rx_ring_ref, unsigned int tx_evtchn,
211 unsigned int rx_evtchn); 244 unsigned int rx_evtchn);
212void xenvif_disconnect(struct xenvif *vif); 245void xenvif_disconnect(struct xenvif *vif);
@@ -217,44 +250,47 @@ void xenvif_xenbus_fini(void);
217 250
218int xenvif_schedulable(struct xenvif *vif); 251int xenvif_schedulable(struct xenvif *vif);
219 252
220int xenvif_must_stop_queue(struct xenvif *vif); 253int xenvif_must_stop_queue(struct xenvif_queue *queue);
254
255int xenvif_queue_stopped(struct xenvif_queue *queue);
256void xenvif_wake_queue(struct xenvif_queue *queue);
221 257
222/* (Un)Map communication rings. */ 258/* (Un)Map communication rings. */
223void xenvif_unmap_frontend_rings(struct xenvif *vif); 259void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
224int xenvif_map_frontend_rings(struct xenvif *vif, 260int xenvif_map_frontend_rings(struct xenvif_queue *queue,
225 grant_ref_t tx_ring_ref, 261 grant_ref_t tx_ring_ref,
226 grant_ref_t rx_ring_ref); 262 grant_ref_t rx_ring_ref);
227 263
228/* Check for SKBs from frontend and schedule backend processing */ 264/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); 265void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
230 266
231/* Prevent the device from generating any further traffic. */ 267/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 268void xenvif_carrier_off(struct xenvif *vif);
233 269
234int xenvif_tx_action(struct xenvif *vif, int budget); 270int xenvif_tx_action(struct xenvif_queue *queue, int budget);
235 271
236int xenvif_kthread_guest_rx(void *data); 272int xenvif_kthread_guest_rx(void *data);
237void xenvif_kick_thread(struct xenvif *vif); 273void xenvif_kick_thread(struct xenvif_queue *queue);
238 274
239int xenvif_dealloc_kthread(void *data); 275int xenvif_dealloc_kthread(void *data);
240 276
241/* Determine whether the needed number of slots (req) are available, 277/* Determine whether the needed number of slots (req) are available,
242 * and set req_event if not. 278 * and set req_event if not.
243 */ 279 */
244bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); 280bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
245 281
246void xenvif_stop_queue(struct xenvif *vif); 282void xenvif_carrier_on(struct xenvif *vif);
247 283
248/* Callback from stack when TX packet can be released */ 284/* Callback from stack when TX packet can be released */
249void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); 285void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
250 286
251/* Unmap a pending page and release it back to the guest */ 287/* Unmap a pending page and release it back to the guest */
252void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); 288void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
253 289
254static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) 290static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
255{ 291{
256 return MAX_PENDING_REQS - 292 return MAX_PENDING_REQS -
257 vif->pending_prod + vif->pending_cons; 293 queue->pending_prod + queue->pending_cons;
258} 294}
259 295
260/* Callback from stack when TX packet can be released */ 296/* Callback from stack when TX packet can be released */
@@ -264,5 +300,6 @@ extern bool separate_tx_rx_irq;
264 300
265extern unsigned int rx_drain_timeout_msecs; 301extern unsigned int rx_drain_timeout_msecs;
266extern unsigned int rx_drain_timeout_jiffies; 302extern unsigned int rx_drain_timeout_jiffies;
303extern unsigned int xenvif_max_queues;
267 304
268#endif /* __XEN_NETBACK__COMMON_H__ */ 305#endif /* __XEN_NETBACK__COMMON_H__ */