aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-10-15 15:59:53 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-15 16:13:36 -0400
commit84284d3c1d6372bc9ab496607661d230d9c45de4 (patch)
tree3226b21ccb77955380ebfb1201c5714c88f1dc38 /drivers/net/xen-netfront.c
parente9edda697ed7697f1288d0656570e49c47e204ae (diff)
xen-netfront: rearrange netfront structure to separate tx and rx
Keep tx and rx elements separate on different cachelines to prevent bouncing. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Acked-by: Jeff Garzik <jgarzik@pobox.com> Cc: Stephen Hemminger <shemminger@linux-foundation.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f464b82c7d5f..7fd505cc4f7a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -74,22 +74,12 @@ struct netfront_info {
74 74
75 struct napi_struct napi; 75 struct napi_struct napi;
76 76
77 struct xen_netif_tx_front_ring tx;
78 struct xen_netif_rx_front_ring rx;
79
80 spinlock_t tx_lock;
81 spinlock_t rx_lock;
82
83 unsigned int evtchn; 77 unsigned int evtchn;
78 struct xenbus_device *xbdev;
84 79
85 /* Receive-ring batched refills. */ 80 spinlock_t tx_lock;
86#define RX_MIN_TARGET 8 81 struct xen_netif_tx_front_ring tx;
87#define RX_DFL_MIN_TARGET 64 82 int tx_ring_ref;
88#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
89 unsigned rx_min_target, rx_max_target, rx_target;
90 struct sk_buff_head rx_batch;
91
92 struct timer_list rx_refill_timer;
93 83
94 /* 84 /*
95 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
@@ -108,14 +98,23 @@ struct netfront_info {
108 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
109 unsigned tx_skb_freelist; 99 unsigned tx_skb_freelist;
110 100
101 spinlock_t rx_lock ____cacheline_aligned_in_smp;
102 struct xen_netif_rx_front_ring rx;
103 int rx_ring_ref;
104
105 /* Receive-ring batched refills. */
106#define RX_MIN_TARGET 8
107#define RX_DFL_MIN_TARGET 64
108#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
109 unsigned rx_min_target, rx_max_target, rx_target;
110 struct sk_buff_head rx_batch;
111
112 struct timer_list rx_refill_timer;
113
111 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
112 grant_ref_t gref_rx_head; 115 grant_ref_t gref_rx_head;
113 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
114 117
115 struct xenbus_device *xbdev;
116 int tx_ring_ref;
117 int rx_ring_ref;
118
119 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 118 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
120 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
121 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 120 struct mmu_update rx_mmu[NET_RX_RING_SIZE];