aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h111
1 files changed, 74 insertions, 37 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ae413a2cbee7..0355f8767e3b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,37 +48,19 @@
48typedef unsigned int pending_ring_idx_t; 48typedef unsigned int pending_ring_idx_t;
49#define INVALID_PENDING_RING_IDX (~0U) 49#define INVALID_PENDING_RING_IDX (~0U)
50 50
51/* For the head field in pending_tx_info: it is used to indicate
52 * whether this tx info is the head of one or more coalesced requests.
53 *
54 * When head != INVALID_PENDING_RING_IDX, it means the start of a new
55 * tx requests queue and the end of previous queue.
56 *
57 * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
58 *
59 * ...|0 I I I|5 I|9 I I I|...
60 * -->|<-INUSE----------------
61 *
62 * After consuming the first slot(s) we have:
63 *
64 * ...|V V V V|5 I|9 I I I|...
65 * -----FREE->|<-INUSE--------
66 *
67 * where V stands for "valid pending ring index". Any number other
68 * than INVALID_PENDING_RING_IDX is OK. These entries are considered
69 * free and can contain any number other than
70 * INVALID_PENDING_RING_IDX. In practice we use 0.
71 *
72 * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
73 * above example) number is the index into pending_tx_info and
74 * mmap_pages arrays.
75 */
76struct pending_tx_info { 51struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */ 52 struct xen_netif_tx_request req; /* tx request */
78 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX 53 /* Callback data for released SKBs. The callback is always
79 * if it is head of one or more tx 54 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
80 * reqs 55 * also an index in pending_tx_info array. It is initialized in
81 */ 56 * xenvif_alloc and it never changes.
57 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
58 * callback_struct in this array of struct pending_tx_info's, then ctx
59 * to the next, or NULL if there is no more slot for this skb.
60 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
61 * to this field.
62 */
63 struct ubuf_info callback_struct;
82}; 64};
83 65
84#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 66#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -108,6 +90,15 @@ struct xenvif_rx_meta {
108 */ 90 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) 91#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110 92
93#define NETBACK_INVALID_HANDLE -1
94
95/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
96 * the maximum slots a valid packet can use. Now this value is defined
97 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
98 * all backend.
99 */
100#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
101
111struct xenvif { 102struct xenvif {
112 /* Unique identifier for this interface. */ 103 /* Unique identifier for this interface. */
113 domid_t domid; 104 domid_t domid;
@@ -126,13 +117,26 @@ struct xenvif {
126 pending_ring_idx_t pending_cons; 117 pending_ring_idx_t pending_cons;
127 u16 pending_ring[MAX_PENDING_REQS]; 118 u16 pending_ring[MAX_PENDING_REQS];
128 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; 119 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
129 120 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
130 /* Coalescing tx requests before copying makes number of grant 121
131 * copy ops greater or equal to number of slots required. In 122 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
132 * worst case a tx request consumes 2 gnttab_copy. 123 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
124 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
125 struct page *pages_to_map[MAX_PENDING_REQS];
126 struct page *pages_to_unmap[MAX_PENDING_REQS];
127
128 /* This prevents zerocopy callbacks to race over dealloc_ring */
129 spinlock_t callback_lock;
130 /* This prevents dealloc thread and NAPI instance to race over response
131 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
132 * it only protect response creation
133 */ 133 */
134 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; 134 spinlock_t response_lock;
135 135 pending_ring_idx_t dealloc_prod;
136 pending_ring_idx_t dealloc_cons;
137 u16 dealloc_ring[MAX_PENDING_REQS];
138 struct task_struct *dealloc_task;
139 wait_queue_head_t dealloc_wq;
136 140
137 /* Use kthread for guest RX */ 141 /* Use kthread for guest RX */
138 struct task_struct *task; 142 struct task_struct *task;
@@ -144,6 +148,9 @@ struct xenvif {
144 struct xen_netif_rx_back_ring rx; 148 struct xen_netif_rx_back_ring rx;
145 struct sk_buff_head rx_queue; 149 struct sk_buff_head rx_queue;
146 RING_IDX rx_last_skb_slots; 150 RING_IDX rx_last_skb_slots;
151 bool rx_queue_purge;
152
153 struct timer_list wake_queue;
147 154
148 /* This array is allocated seperately as it is large */ 155 /* This array is allocated seperately as it is large */
149 struct gnttab_copy *grant_copy_op; 156 struct gnttab_copy *grant_copy_op;
@@ -175,6 +182,10 @@ struct xenvif {
175 182
176 /* Statistics */ 183 /* Statistics */
177 unsigned long rx_gso_checksum_fixup; 184 unsigned long rx_gso_checksum_fixup;
185 unsigned long tx_zerocopy_sent;
186 unsigned long tx_zerocopy_success;
187 unsigned long tx_zerocopy_fail;
188 unsigned long tx_frag_overflow;
178 189
179 /* Miscellaneous private stuff. */ 190 /* Miscellaneous private stuff. */
180 struct net_device *dev; 191 struct net_device *dev;
@@ -216,9 +227,11 @@ void xenvif_carrier_off(struct xenvif *vif);
216 227
217int xenvif_tx_action(struct xenvif *vif, int budget); 228int xenvif_tx_action(struct xenvif *vif, int budget);
218 229
219int xenvif_kthread(void *data); 230int xenvif_kthread_guest_rx(void *data);
220void xenvif_kick_thread(struct xenvif *vif); 231void xenvif_kick_thread(struct xenvif *vif);
221 232
233int xenvif_dealloc_kthread(void *data);
234
222/* Determine whether the needed number of slots (req) are available, 235/* Determine whether the needed number of slots (req) are available,
223 * and set req_event if not. 236 * and set req_event if not.
224 */ 237 */
@@ -226,6 +239,30 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
226 239
227void xenvif_stop_queue(struct xenvif *vif); 240void xenvif_stop_queue(struct xenvif *vif);
228 241
242/* Callback from stack when TX packet can be released */
243void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
244
245/* Unmap a pending page and release it back to the guest */
246void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
247
248static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
249{
250 return MAX_PENDING_REQS -
251 vif->pending_prod + vif->pending_cons;
252}
253
254static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif)
255{
256 return nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
257 < MAX_PENDING_REQS;
258}
259
260/* Callback from stack when TX packet can be released */
261void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
262
229extern bool separate_tx_rx_irq; 263extern bool separate_tx_rx_irq;
230 264
265extern unsigned int rx_drain_timeout_msecs;
266extern unsigned int rx_drain_timeout_jiffies;
267
231#endif /* __XEN_NETBACK__COMMON_H__ */ 268#endif /* __XEN_NETBACK__COMMON_H__ */