aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:26 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:56:35 -0500
commitf53c3fe8dad725b014e9c7682720d8e3e2a8a5b3 (patch)
tree11cb77466fbb32cd1ca6f84a5ea0daca233a49c0 /drivers/net/xen-netback/common.h
parent3e2234b3149f66bc4be2343a3a0f637d922e4a36 (diff)
xen-netback: Introduce TX grant mapping
This patch introduces grant mapping on netback TX path. It replaces grant copy operations, ditching grant copy coalescing along the way. Another solution for copy coalescing is introduced in "xen-netback: Handle guests with too many frags", older guests and Windows can broke before that patch applies. There is a callback (xenvif_zerocopy_callback) from core stack to release the slots back to the guests when kfree_skb or skb_orphan_frags called. It feeds a separate dealloc thread, as scheduling NAPI instance from there is inefficient, therefore we can't do dealloc from the instance. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h39
1 files changed, 38 insertions, 1 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8f264df8818a..5a991266a394 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -79,6 +79,17 @@ struct pending_tx_info {
79 * if it is head of one or more tx 79 * if it is head of one or more tx
80 * reqs 80 * reqs
81 */ 81 */
82 /* Callback data for released SKBs. The callback is always
83 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
84 * also an index in pending_tx_info array. It is initialized in
85 * xenvif_alloc and it never changes.
86 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
87 * callback_struct in this array of struct pending_tx_info's, then ctx
88 * to the next, or NULL if there is no more slot for this skb.
89 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
90 * to this field.
91 */
92 struct ubuf_info callback_struct;
82}; 93};
83 94
84#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 95#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -135,13 +146,31 @@ struct xenvif {
135 pending_ring_idx_t pending_cons; 146 pending_ring_idx_t pending_cons;
136 u16 pending_ring[MAX_PENDING_REQS]; 147 u16 pending_ring[MAX_PENDING_REQS];
137 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; 148 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
149 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
138 150
139 /* Coalescing tx requests before copying makes number of grant 151 /* Coalescing tx requests before copying makes number of grant
140 * copy ops greater or equal to number of slots required. In 152 * copy ops greater or equal to number of slots required. In
141 * worst case a tx request consumes 2 gnttab_copy. 153 * worst case a tx request consumes 2 gnttab_copy.
142 */ 154 */
143 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; 155 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
144 156 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
157 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
158 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
159 struct page *pages_to_map[MAX_PENDING_REQS];
160 struct page *pages_to_unmap[MAX_PENDING_REQS];
161
162 /* This prevents zerocopy callbacks to race over dealloc_ring */
163 spinlock_t callback_lock;
164 /* This prevents dealloc thread and NAPI instance to race over response
165 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
166 * it only protect response creation
167 */
168 spinlock_t response_lock;
169 pending_ring_idx_t dealloc_prod;
170 pending_ring_idx_t dealloc_cons;
171 u16 dealloc_ring[MAX_PENDING_REQS];
172 struct task_struct *dealloc_task;
173 wait_queue_head_t dealloc_wq;
145 174
146 /* Use kthread for guest RX */ 175 /* Use kthread for guest RX */
147 struct task_struct *task; 176 struct task_struct *task;
@@ -228,6 +257,8 @@ int xenvif_tx_action(struct xenvif *vif, int budget);
228int xenvif_kthread_guest_rx(void *data); 257int xenvif_kthread_guest_rx(void *data);
229void xenvif_kick_thread(struct xenvif *vif); 258void xenvif_kick_thread(struct xenvif *vif);
230 259
260int xenvif_dealloc_kthread(void *data);
261
231/* Determine whether the needed number of slots (req) are available, 262/* Determine whether the needed number of slots (req) are available,
232 * and set req_event if not. 263 * and set req_event if not.
233 */ 264 */
@@ -235,6 +266,12 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
235 266
236void xenvif_stop_queue(struct xenvif *vif); 267void xenvif_stop_queue(struct xenvif *vif);
237 268
269/* Callback from stack when TX packet can be released */
270void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
271
272/* Unmap a pending page and release it back to the guest */
273void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
274
238static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) 275static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
239{ 276{
240 return MAX_PENDING_REQS - 277 return MAX_PENDING_REQS -