diff options
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r-- | drivers/net/xen-netback/common.h | 112 |
1 files changed, 74 insertions, 38 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ae413a2cbee7..89d1d0556b6e 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -48,37 +48,19 @@ | |||
48 | typedef unsigned int pending_ring_idx_t; | 48 | typedef unsigned int pending_ring_idx_t; |
49 | #define INVALID_PENDING_RING_IDX (~0U) | 49 | #define INVALID_PENDING_RING_IDX (~0U) |
50 | 50 | ||
51 | /* For the head field in pending_tx_info: it is used to indicate | ||
52 | * whether this tx info is the head of one or more coalesced requests. | ||
53 | * | ||
54 | * When head != INVALID_PENDING_RING_IDX, it means the start of a new | ||
55 | * tx requests queue and the end of previous queue. | ||
56 | * | ||
57 | * An example sequence of head fields (I = INVALID_PENDING_RING_IDX): | ||
58 | * | ||
59 | * ...|0 I I I|5 I|9 I I I|... | ||
60 | * -->|<-INUSE---------------- | ||
61 | * | ||
62 | * After consuming the first slot(s) we have: | ||
63 | * | ||
64 | * ...|V V V V|5 I|9 I I I|... | ||
65 | * -----FREE->|<-INUSE-------- | ||
66 | * | ||
67 | * where V stands for "valid pending ring index". Any number other | ||
68 | * than INVALID_PENDING_RING_IDX is OK. These entries are considered | ||
69 | * free and can contain any number other than | ||
70 | * INVALID_PENDING_RING_IDX. In practice we use 0. | ||
71 | * | ||
72 | * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the | ||
73 | * above example) number is the index into pending_tx_info and | ||
74 | * mmap_pages arrays. | ||
75 | */ | ||
76 | struct pending_tx_info { | 51 | struct pending_tx_info { |
77 | struct xen_netif_tx_request req; /* coalesced tx request */ | 52 | struct xen_netif_tx_request req; /* tx request */ |
78 | pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX | 53 | /* Callback data for released SKBs. The callback is always |
79 | * if it is head of one or more tx | 54 | * xenvif_zerocopy_callback, desc contains the pending_idx, which is |
80 | * reqs | 55 | * also an index in pending_tx_info array. It is initialized in |
81 | */ | 56 | * xenvif_alloc and it never changes. |
57 | * skb_shinfo(skb)->destructor_arg points to the first mapped slot's | ||
58 | * callback_struct in this array of struct pending_tx_info's, then ctx | ||
59 | * to the next, or NULL if there is no more slot for this skb. | ||
60 | * ubuf_to_vif is a helper which finds the struct xenvif from a pointer | ||
61 | * to this field. | ||
62 | */ | ||
63 | struct ubuf_info callback_struct; | ||
82 | }; | 64 | }; |
83 | 65 | ||
84 | #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) | 66 | #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) |
@@ -99,7 +81,7 @@ struct xenvif_rx_meta { | |||
99 | 81 | ||
100 | #define MAX_BUFFER_OFFSET PAGE_SIZE | 82 | #define MAX_BUFFER_OFFSET PAGE_SIZE |
101 | 83 | ||
102 | #define MAX_PENDING_REQS 256 | 84 | #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE |
103 | 85 | ||
104 | /* It's possible for an skb to have a maximal number of frags | 86 | /* It's possible for an skb to have a maximal number of frags |
105 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the | 87 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the |
@@ -108,11 +90,25 @@ struct xenvif_rx_meta { | |||
108 | */ | 90 | */ |
109 | #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) | 91 | #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) |
110 | 92 | ||
93 | #define NETBACK_INVALID_HANDLE -1 | ||
94 | |||
95 | /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating | ||
96 | * the maximum slots a valid packet can use. Now this value is defined | ||
97 | * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by | ||
98 | * all backend. | ||
99 | */ | ||
100 | #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN | ||
101 | |||
111 | struct xenvif { | 102 | struct xenvif { |
112 | /* Unique identifier for this interface. */ | 103 | /* Unique identifier for this interface. */ |
113 | domid_t domid; | 104 | domid_t domid; |
114 | unsigned int handle; | 105 | unsigned int handle; |
115 | 106 | ||
107 | /* Is this interface disabled? True when backend discovers | ||
108 | * frontend is rogue. | ||
109 | */ | ||
110 | bool disabled; | ||
111 | |||
116 | /* Use NAPI for guest TX */ | 112 | /* Use NAPI for guest TX */ |
117 | struct napi_struct napi; | 113 | struct napi_struct napi; |
118 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ | 114 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
@@ -126,13 +122,26 @@ struct xenvif { | |||
126 | pending_ring_idx_t pending_cons; | 122 | pending_ring_idx_t pending_cons; |
127 | u16 pending_ring[MAX_PENDING_REQS]; | 123 | u16 pending_ring[MAX_PENDING_REQS]; |
128 | struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; | 124 | struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; |
129 | 125 | grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; | |
130 | /* Coalescing tx requests before copying makes number of grant | 126 | |
131 | * copy ops greater or equal to number of slots required. In | 127 | struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; |
132 | * worst case a tx request consumes 2 gnttab_copy. | 128 | struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; |
129 | /* passed to gnttab_[un]map_refs with pages under (un)mapping */ | ||
130 | struct page *pages_to_map[MAX_PENDING_REQS]; | ||
131 | struct page *pages_to_unmap[MAX_PENDING_REQS]; | ||
132 | |||
133 | /* This prevents zerocopy callbacks to race over dealloc_ring */ | ||
134 | spinlock_t callback_lock; | ||
135 | /* This prevents dealloc thread and NAPI instance to race over response | ||
136 | * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err | ||
137 | * it only protect response creation | ||
133 | */ | 138 | */ |
134 | struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; | 139 | spinlock_t response_lock; |
135 | 140 | pending_ring_idx_t dealloc_prod; | |
141 | pending_ring_idx_t dealloc_cons; | ||
142 | u16 dealloc_ring[MAX_PENDING_REQS]; | ||
143 | struct task_struct *dealloc_task; | ||
144 | wait_queue_head_t dealloc_wq; | ||
136 | 145 | ||
137 | /* Use kthread for guest RX */ | 146 | /* Use kthread for guest RX */ |
138 | struct task_struct *task; | 147 | struct task_struct *task; |
@@ -144,6 +153,9 @@ struct xenvif { | |||
144 | struct xen_netif_rx_back_ring rx; | 153 | struct xen_netif_rx_back_ring rx; |
145 | struct sk_buff_head rx_queue; | 154 | struct sk_buff_head rx_queue; |
146 | RING_IDX rx_last_skb_slots; | 155 | RING_IDX rx_last_skb_slots; |
156 | bool rx_queue_purge; | ||
157 | |||
158 | struct timer_list wake_queue; | ||
147 | 159 | ||
148 | /* This array is allocated seperately as it is large */ | 160 | /* This array is allocated seperately as it is large */ |
149 | struct gnttab_copy *grant_copy_op; | 161 | struct gnttab_copy *grant_copy_op; |
@@ -175,6 +187,10 @@ struct xenvif { | |||
175 | 187 | ||
176 | /* Statistics */ | 188 | /* Statistics */ |
177 | unsigned long rx_gso_checksum_fixup; | 189 | unsigned long rx_gso_checksum_fixup; |
190 | unsigned long tx_zerocopy_sent; | ||
191 | unsigned long tx_zerocopy_success; | ||
192 | unsigned long tx_zerocopy_fail; | ||
193 | unsigned long tx_frag_overflow; | ||
178 | 194 | ||
179 | /* Miscellaneous private stuff. */ | 195 | /* Miscellaneous private stuff. */ |
180 | struct net_device *dev; | 196 | struct net_device *dev; |
@@ -216,9 +232,11 @@ void xenvif_carrier_off(struct xenvif *vif); | |||
216 | 232 | ||
217 | int xenvif_tx_action(struct xenvif *vif, int budget); | 233 | int xenvif_tx_action(struct xenvif *vif, int budget); |
218 | 234 | ||
219 | int xenvif_kthread(void *data); | 235 | int xenvif_kthread_guest_rx(void *data); |
220 | void xenvif_kick_thread(struct xenvif *vif); | 236 | void xenvif_kick_thread(struct xenvif *vif); |
221 | 237 | ||
238 | int xenvif_dealloc_kthread(void *data); | ||
239 | |||
222 | /* Determine whether the needed number of slots (req) are available, | 240 | /* Determine whether the needed number of slots (req) are available, |
223 | * and set req_event if not. | 241 | * and set req_event if not. |
224 | */ | 242 | */ |
@@ -226,6 +244,24 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); | |||
226 | 244 | ||
227 | void xenvif_stop_queue(struct xenvif *vif); | 245 | void xenvif_stop_queue(struct xenvif *vif); |
228 | 246 | ||
247 | /* Callback from stack when TX packet can be released */ | ||
248 | void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); | ||
249 | |||
250 | /* Unmap a pending page and release it back to the guest */ | ||
251 | void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); | ||
252 | |||
253 | static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) | ||
254 | { | ||
255 | return MAX_PENDING_REQS - | ||
256 | vif->pending_prod + vif->pending_cons; | ||
257 | } | ||
258 | |||
259 | /* Callback from stack when TX packet can be released */ | ||
260 | void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); | ||
261 | |||
229 | extern bool separate_tx_rx_irq; | 262 | extern bool separate_tx_rx_irq; |
230 | 263 | ||
264 | extern unsigned int rx_drain_timeout_msecs; | ||
265 | extern unsigned int rx_drain_timeout_jiffies; | ||
266 | |||
231 | #endif /* __XEN_NETBACK__COMMON_H__ */ | 267 | #endif /* __XEN_NETBACK__COMMON_H__ */ |