diff options
author | Wei Liu <wei.liu2@citrix.com> | 2014-06-04 05:30:42 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-04 17:48:16 -0400 |
commit | e9ce7cb6b107407e4798e8905b18ad8b642766f6 (patch) | |
tree | dd99d31fa4f2bae0e836c99a811e5de4e1202567 /drivers/net/xen-netback/common.h | |
parent | a55d9766cecf2b1b9af4fcf93b2d41b71e599c76 (diff) |
xen-netback: Factor queue-specific data into queue struct
In preparation for multi-queue support in xen-netback, move the
queue-specific data from struct xenvif into struct xenvif_queue, and
update the rest of the code to use this.
Also adds loops over queues where appropriate, even though only one is
configured at this point, and uses alloc_netdev_mq() and the
corresponding multi-queue netif wake/start/stop functions in preparation
for multiple active queues.
Finally, implements a trivial queue selection function suitable for
ndo_select_queue, which simply returns 0 for a single queue and uses
skb_get_hash() to compute the queue index otherwise.
Signed-off-by: Andrew J. Bennieston <andrew.bennieston@citrix.com>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r-- | drivers/net/xen-netback/common.h | 102 |
1 files changed, 69 insertions, 33 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 2c283d693330..b6885cfcc7df 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -99,22 +99,43 @@ struct xenvif_rx_meta { | |||
99 | */ | 99 | */ |
100 | #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN | 100 | #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN |
101 | 101 | ||
102 | struct xenvif { | 102 | /* Queue name is interface name with "-qNNN" appended */ |
103 | /* Unique identifier for this interface. */ | 103 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 5) |
104 | domid_t domid; | ||
105 | unsigned int handle; | ||
106 | 104 | ||
107 | /* Is this interface disabled? True when backend discovers | 105 | /* IRQ name is queue name with "-tx" or "-rx" appended */ |
108 | * frontend is rogue. | 106 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
107 | |||
108 | struct xenvif; | ||
109 | |||
110 | struct xenvif_stats { | ||
111 | /* Stats fields to be updated per-queue. | ||
112 | * A subset of struct net_device_stats that contains only the | ||
113 | * fields that are updated in netback.c for each queue. | ||
109 | */ | 114 | */ |
110 | bool disabled; | 115 | unsigned int rx_bytes; |
116 | unsigned int rx_packets; | ||
117 | unsigned int tx_bytes; | ||
118 | unsigned int tx_packets; | ||
119 | |||
120 | /* Additional stats used by xenvif */ | ||
121 | unsigned long rx_gso_checksum_fixup; | ||
122 | unsigned long tx_zerocopy_sent; | ||
123 | unsigned long tx_zerocopy_success; | ||
124 | unsigned long tx_zerocopy_fail; | ||
125 | unsigned long tx_frag_overflow; | ||
126 | }; | ||
127 | |||
128 | struct xenvif_queue { /* Per-queue data for xenvif */ | ||
129 | unsigned int id; /* Queue ID, 0-based */ | ||
130 | char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ | ||
131 | struct xenvif *vif; /* Parent VIF */ | ||
111 | 132 | ||
112 | /* Use NAPI for guest TX */ | 133 | /* Use NAPI for guest TX */ |
113 | struct napi_struct napi; | 134 | struct napi_struct napi; |
114 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ | 135 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
115 | unsigned int tx_irq; | 136 | unsigned int tx_irq; |
116 | /* Only used when feature-split-event-channels = 1 */ | 137 | /* Only used when feature-split-event-channels = 1 */ |
117 | char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ | 138 | char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ |
118 | struct xen_netif_tx_back_ring tx; | 139 | struct xen_netif_tx_back_ring tx; |
119 | struct sk_buff_head tx_queue; | 140 | struct sk_buff_head tx_queue; |
120 | struct page *mmap_pages[MAX_PENDING_REQS]; | 141 | struct page *mmap_pages[MAX_PENDING_REQS]; |
@@ -150,7 +171,7 @@ struct xenvif { | |||
150 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ | 171 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
151 | unsigned int rx_irq; | 172 | unsigned int rx_irq; |
152 | /* Only used when feature-split-event-channels = 1 */ | 173 | /* Only used when feature-split-event-channels = 1 */ |
153 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ | 174 | char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ |
154 | struct xen_netif_rx_back_ring rx; | 175 | struct xen_netif_rx_back_ring rx; |
155 | struct sk_buff_head rx_queue; | 176 | struct sk_buff_head rx_queue; |
156 | RING_IDX rx_last_skb_slots; | 177 | RING_IDX rx_last_skb_slots; |
@@ -165,6 +186,22 @@ struct xenvif { | |||
165 | */ | 186 | */ |
166 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; | 187 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; |
167 | 188 | ||
189 | /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ | ||
190 | unsigned long credit_bytes; | ||
191 | unsigned long credit_usec; | ||
192 | unsigned long remaining_credit; | ||
193 | struct timer_list credit_timeout; | ||
194 | u64 credit_window_start; | ||
195 | |||
196 | /* Statistics */ | ||
197 | struct xenvif_stats stats; | ||
198 | }; | ||
199 | |||
200 | struct xenvif { | ||
201 | /* Unique identifier for this interface. */ | ||
202 | domid_t domid; | ||
203 | unsigned int handle; | ||
204 | |||
168 | u8 fe_dev_addr[6]; | 205 | u8 fe_dev_addr[6]; |
169 | 206 | ||
170 | /* Frontend feature information. */ | 207 | /* Frontend feature information. */ |
@@ -178,19 +215,13 @@ struct xenvif { | |||
178 | /* Internal feature information. */ | 215 | /* Internal feature information. */ |
179 | u8 can_queue:1; /* can queue packets for receiver? */ | 216 | u8 can_queue:1; /* can queue packets for receiver? */ |
180 | 217 | ||
181 | /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ | 218 | /* Is this interface disabled? True when backend discovers |
182 | unsigned long credit_bytes; | 219 | * frontend is rogue. |
183 | unsigned long credit_usec; | 220 | */ |
184 | unsigned long remaining_credit; | 221 | bool disabled; |
185 | struct timer_list credit_timeout; | ||
186 | u64 credit_window_start; | ||
187 | 222 | ||
188 | /* Statistics */ | 223 | /* Queues */ |
189 | unsigned long rx_gso_checksum_fixup; | 224 | struct xenvif_queue *queues; |
190 | unsigned long tx_zerocopy_sent; | ||
191 | unsigned long tx_zerocopy_success; | ||
192 | unsigned long tx_zerocopy_fail; | ||
193 | unsigned long tx_frag_overflow; | ||
194 | 225 | ||
195 | /* Miscellaneous private stuff. */ | 226 | /* Miscellaneous private stuff. */ |
196 | struct net_device *dev; | 227 | struct net_device *dev; |
@@ -205,7 +236,9 @@ struct xenvif *xenvif_alloc(struct device *parent, | |||
205 | domid_t domid, | 236 | domid_t domid, |
206 | unsigned int handle); | 237 | unsigned int handle); |
207 | 238 | ||
208 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | 239 | int xenvif_init_queue(struct xenvif_queue *queue); |
240 | |||
241 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | ||
209 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 242 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
210 | unsigned int rx_evtchn); | 243 | unsigned int rx_evtchn); |
211 | void xenvif_disconnect(struct xenvif *vif); | 244 | void xenvif_disconnect(struct xenvif *vif); |
@@ -216,44 +249,47 @@ void xenvif_xenbus_fini(void); | |||
216 | 249 | ||
217 | int xenvif_schedulable(struct xenvif *vif); | 250 | int xenvif_schedulable(struct xenvif *vif); |
218 | 251 | ||
219 | int xenvif_must_stop_queue(struct xenvif *vif); | 252 | int xenvif_must_stop_queue(struct xenvif_queue *queue); |
253 | |||
254 | int xenvif_queue_stopped(struct xenvif_queue *queue); | ||
255 | void xenvif_wake_queue(struct xenvif_queue *queue); | ||
220 | 256 | ||
221 | /* (Un)Map communication rings. */ | 257 | /* (Un)Map communication rings. */ |
222 | void xenvif_unmap_frontend_rings(struct xenvif *vif); | 258 | void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); |
223 | int xenvif_map_frontend_rings(struct xenvif *vif, | 259 | int xenvif_map_frontend_rings(struct xenvif_queue *queue, |
224 | grant_ref_t tx_ring_ref, | 260 | grant_ref_t tx_ring_ref, |
225 | grant_ref_t rx_ring_ref); | 261 | grant_ref_t rx_ring_ref); |
226 | 262 | ||
227 | /* Check for SKBs from frontend and schedule backend processing */ | 263 | /* Check for SKBs from frontend and schedule backend processing */ |
228 | void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); | 264 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); |
229 | 265 | ||
230 | /* Prevent the device from generating any further traffic. */ | 266 | /* Prevent the device from generating any further traffic. */ |
231 | void xenvif_carrier_off(struct xenvif *vif); | 267 | void xenvif_carrier_off(struct xenvif *vif); |
232 | 268 | ||
233 | int xenvif_tx_action(struct xenvif *vif, int budget); | 269 | int xenvif_tx_action(struct xenvif_queue *queue, int budget); |
234 | 270 | ||
235 | int xenvif_kthread_guest_rx(void *data); | 271 | int xenvif_kthread_guest_rx(void *data); |
236 | void xenvif_kick_thread(struct xenvif *vif); | 272 | void xenvif_kick_thread(struct xenvif_queue *queue); |
237 | 273 | ||
238 | int xenvif_dealloc_kthread(void *data); | 274 | int xenvif_dealloc_kthread(void *data); |
239 | 275 | ||
240 | /* Determine whether the needed number of slots (req) are available, | 276 | /* Determine whether the needed number of slots (req) are available, |
241 | * and set req_event if not. | 277 | * and set req_event if not. |
242 | */ | 278 | */ |
243 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); | 279 | bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed); |
244 | 280 | ||
245 | void xenvif_stop_queue(struct xenvif *vif); | 281 | void xenvif_carrier_on(struct xenvif *vif); |
246 | 282 | ||
247 | /* Callback from stack when TX packet can be released */ | 283 | /* Callback from stack when TX packet can be released */ |
248 | void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); | 284 | void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); |
249 | 285 | ||
250 | /* Unmap a pending page and release it back to the guest */ | 286 | /* Unmap a pending page and release it back to the guest */ |
251 | void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx); | 287 | void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); |
252 | 288 | ||
253 | static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) | 289 | static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) |
254 | { | 290 | { |
255 | return MAX_PENDING_REQS - | 291 | return MAX_PENDING_REQS - |
256 | vif->pending_prod + vif->pending_cons; | 292 | queue->pending_prod + queue->pending_cons; |
257 | } | 293 | } |
258 | 294 | ||
259 | /* Callback from stack when TX packet can be released */ | 295 | /* Callback from stack when TX packet can be released */ |