aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-06 17:37:45 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-06 17:37:45 -0500
commit56a4342dfe3145cd66f766adccb28fd9b571606d (patch)
treed1593764488ff8cbb0b83cb9ae35fd968bf81760 /drivers/net/xen-netback/common.h
parent805c1f4aedaba1bc8d839e7c27b128083dd5c2f0 (diff)
parentfe0d692bbc645786bce1a98439e548ae619269f5 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c net/ipv6/ip6_tunnel.c net/ipv6/ip6_vti.c ipv6 tunnel statistic bug fixes conflicting with consolidation into generic sw per-cpu net stats. qlogic conflict between queue counting bug fix and the addition of multiple MAC address support. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ba30a6d9fefa..c955fc39d69a 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -141,13 +148,13 @@ struct xenvif {
141 */ 148 */
142 bool rx_event; 149 bool rx_event;
143 150
144 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 151 /* This array is allocated seperately as it is large */
145 * head/fragment page uses 2 copy operations because it 152 struct gnttab_copy *grant_copy_op;
146 * straddles two buffers in the frontend.
147 */
148 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
149 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
150 153
154 /* We create one meta structure per ring request we consume, so
155 * the maximum number is the same as the ring size.
156 */
157 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
151 158
152 u8 fe_dev_addr[6]; 159 u8 fe_dev_addr[6];
153 160