aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-08-07 17:56:42 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:50:30 -0400
commit5dcddfae63bd26da0f11a8e40f0ae555b5ac624e (patch)
tree288c60c497aa6e33785fd5463e8af413d2b4730d /drivers/net/xen-netfront.c
parentdf570f93337ddb46f1e30dd167c27b5fdcec637b (diff)
xen-netfront: remove dead code
This patch removes some residual dead code left over from removing the "flip" receive mode. This patch doesn't change the generated output at all, since gcc already realized it was dead. This resolves the "regression" reported by Adrian. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Adrian Bunk <bunk@stusta.de> Cc: Michal Piotrowski <michal.k.k.piotrowski@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c37
1 files changed, 2 insertions, 35 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 70e551c19e3a..b4de126825e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -214,11 +214,9 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
214 struct page *page; 214 struct page *page;
215 int i, batch_target, notify; 215 int i, batch_target, notify;
216 RING_IDX req_prod = np->rx.req_prod_pvt; 216 RING_IDX req_prod = np->rx.req_prod_pvt;
217 struct xen_memory_reservation reservation;
218 grant_ref_t ref; 217 grant_ref_t ref;
219 unsigned long pfn; 218 unsigned long pfn;
220 void *vaddr; 219 void *vaddr;
221 int nr_flips;
222 struct xen_netif_rx_request *req; 220 struct xen_netif_rx_request *req;
223 221
224 if (unlikely(!netif_carrier_ok(dev))) 222 if (unlikely(!netif_carrier_ok(dev)))
@@ -268,7 +266,7 @@ no_skb:
268 np->rx_target = np->rx_max_target; 266 np->rx_target = np->rx_max_target;
269 267
270 refill: 268 refill:
271 for (nr_flips = i = 0; ; i++) { 269 for (i = 0; ; i++) {
272 skb = __skb_dequeue(&np->rx_batch); 270 skb = __skb_dequeue(&np->rx_batch);
273 if (skb == NULL) 271 if (skb == NULL)
274 break; 272 break;
@@ -297,38 +295,7 @@ no_skb:
297 req->gref = ref; 295 req->gref = ref;
298 } 296 }
299 297
300 if (nr_flips != 0) { 298 wmb(); /* barrier so backend seens requests */
301 reservation.extent_start = np->rx_pfn_array;
302 reservation.nr_extents = nr_flips;
303 reservation.extent_order = 0;
304 reservation.address_bits = 0;
305 reservation.domid = DOMID_SELF;
306
307 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
308 /* After all PTEs have been zapped, flush the TLB. */
309 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
310 UVMF_TLB_FLUSH|UVMF_ALL;
311
312 /* Give away a batch of pages. */
313 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
314 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
315 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
316
317 /* Zap PTEs and give away pages in one big
318 * multicall. */
319 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
320
321 /* Check return status of HYPERVISOR_memory_op(). */
322 if (unlikely(np->rx_mcl[i].result != i))
323 panic("Unable to reduce memory reservation\n");
324 } else {
325 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
326 &reservation) != i)
327 panic("Unable to reduce memory reservation\n");
328 }
329 } else {
330 wmb(); /* barrier so backend seens requests */
331 }
332 299
333 /* Above is a suitable barrier to ensure backend will see requests. */ 300 /* Above is a suitable barrier to ensure backend will see requests. */
334 np->rx.req_prod_pvt = req_prod + i; 301 np->rx.req_prod_pvt = req_prod + i;