aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h27
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..ba30a6d9fefa 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -136,12 +136,10 @@ struct xenvif {
136 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 136 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
137 struct xen_netif_rx_back_ring rx; 137 struct xen_netif_rx_back_ring rx;
138 struct sk_buff_head rx_queue; 138 struct sk_buff_head rx_queue;
139 139 /* Set when the RX interrupt is triggered by the frontend.
140 /* Allow xenvif_start_xmit() to peek ahead in the rx request 140 * The worker thread may need to wake the queue.
141 * ring. This is a prediction of what rx_req_cons will be
142 * once all queued skbs are put on the ring.
143 */ 141 */
144 RING_IDX rx_req_cons_peek; 142 bool rx_event;
145 143
146 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 144 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
147 * head/fragment page uses 2 copy operations because it 145 * head/fragment page uses 2 copy operations because it
@@ -198,8 +196,6 @@ void xenvif_xenbus_fini(void);
198 196
199int xenvif_schedulable(struct xenvif *vif); 197int xenvif_schedulable(struct xenvif *vif);
200 198
201int xenvif_rx_ring_full(struct xenvif *vif);
202
203int xenvif_must_stop_queue(struct xenvif *vif); 199int xenvif_must_stop_queue(struct xenvif *vif);
204 200
205/* (Un)Map communication rings. */ 201/* (Un)Map communication rings. */
@@ -211,21 +207,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
211/* Check for SKBs from frontend and schedule backend processing */ 207/* Check for SKBs from frontend and schedule backend processing */
212void xenvif_check_rx_xenvif(struct xenvif *vif); 208void xenvif_check_rx_xenvif(struct xenvif *vif);
213 209
214/* Queue an SKB for transmission to the frontend */
215void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
216/* Notify xenvif that ring now has space to send an skb to the frontend */
217void xenvif_notify_tx_completion(struct xenvif *vif);
218
219/* Prevent the device from generating any further traffic. */ 210/* Prevent the device from generating any further traffic. */
220void xenvif_carrier_off(struct xenvif *vif); 211void xenvif_carrier_off(struct xenvif *vif);
221 212
222/* Returns number of ring slots required to send an skb to the frontend */
223unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
224
225int xenvif_tx_action(struct xenvif *vif, int budget); 213int xenvif_tx_action(struct xenvif *vif, int budget);
226void xenvif_rx_action(struct xenvif *vif);
227 214
228int xenvif_kthread(void *data); 215int xenvif_kthread(void *data);
216void xenvif_kick_thread(struct xenvif *vif);
217
218/* Determine whether the needed number of slots (req) are available,
219 * and set req_event if not.
220 */
221bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
222
223void xenvif_stop_queue(struct xenvif *vif);
229 224
230extern bool separate_tx_rx_irq; 225extern bool separate_tx_rx_irq;
231 226