aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h32
1 files changed, 23 insertions, 9 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5715318d6bab..c47794b9d42f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@ struct pending_tx_info {
87struct xenvif_rx_meta { 87struct xenvif_rx_meta {
88 int id; 88 int id;
89 int size; 89 int size;
90 int gso_type;
90 int gso_size; 91 int gso_size;
91}; 92};
92 93
94#define GSO_BIT(type) \
95 (1 << XEN_NETIF_GSO_TYPE_ ## type)
96
93/* Discriminate from any valid pending_idx value. */ 97/* Discriminate from any valid pending_idx value. */
94#define INVALID_PENDING_IDX 0xFFFF 98#define INVALID_PENDING_IDX 0xFFFF
95 99
@@ -97,6 +101,13 @@ struct xenvif_rx_meta {
97 101
98#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
99 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
100struct xenvif { 111struct xenvif {
101 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
102 domid_t domid; 113 domid_t domid;
@@ -139,21 +150,23 @@ struct xenvif {
139 */ 150 */
140 RING_IDX rx_req_cons_peek; 151 RING_IDX rx_req_cons_peek;
141 152
142 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 153 /* This array is allocated seperately as it is large */
143 * head/fragment page uses 2 copy operations because it 154 struct gnttab_copy *grant_copy_op;
144 * straddles two buffers in the frontend.
145 */
146 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
147 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
148 155
156 /* We create one meta structure per ring request we consume, so
157 * the maximum number is the same as the ring size.
158 */
159 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
149 160
150 u8 fe_dev_addr[6]; 161 u8 fe_dev_addr[6];
151 162
152 /* Frontend feature information. */ 163 /* Frontend feature information. */
164 int gso_mask;
165 int gso_prefix_mask;
166
153 u8 can_sg:1; 167 u8 can_sg:1;
154 u8 gso:1; 168 u8 ip_csum:1;
155 u8 gso_prefix:1; 169 u8 ipv6_csum:1;
156 u8 csum:1;
157 170
158 /* Internal feature information. */ 171 /* Internal feature information. */
159 u8 can_queue:1; /* can queue packets for receiver? */ 172 u8 can_queue:1; /* can queue packets for receiver? */
@@ -163,6 +176,7 @@ struct xenvif {
163 unsigned long credit_usec; 176 unsigned long credit_usec;
164 unsigned long remaining_credit; 177 unsigned long remaining_credit;
165 struct timer_list credit_timeout; 178 struct timer_list credit_timeout;
179 u64 credit_window_start;
166 180
167 /* Statistics */ 181 /* Statistics */
168 unsigned long rx_gso_checksum_fixup; 182 unsigned long rx_gso_checksum_fixup;