aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/net/xen-netback/netback.c
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo) 258 struct netrx_pending_operations *npo)
259{ 259{
260 struct xenvif_rx_meta *meta; 260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request *req; 261 struct xen_netif_rx_request req;
262 262
263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264 264
265 meta = npo->meta + npo->meta_prod++; 265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0; 267 meta->gso_size = 0;
268 meta->size = 0; 268 meta->size = 0;
269 meta->id = req->id; 269 meta->id = req.id;
270 270
271 npo->copy_off = 0; 271 npo->copy_off = 0;
272 npo->copy_gref = req->gref; 272 npo->copy_gref = req.gref;
273 273
274 return meta; 274 return meta;
275} 275}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
424 struct xenvif *vif = netdev_priv(skb->dev); 424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags; 425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i; 426 int i;
427 struct xen_netif_rx_request *req; 427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta; 428 struct xenvif_rx_meta *meta;
429 unsigned char *data; 429 unsigned char *data;
430 int head = 1; 430 int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 443
444 /* Set up a GSO prefix descriptor, if necessary */ 444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) { 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++; 447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type; 448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size; 449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0; 450 meta->size = 0;
451 meta->id = req->id; 451 meta->id = req.id;
452 } 452 }
453 453
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++; 455 meta = npo->meta + npo->meta_prod++;
456 456
457 if ((1 << gso_type) & vif->gso_mask) { 457 if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
463 } 463 }
464 464
465 meta->size = 0; 465 meta->size = 0;
466 meta->id = req->id; 466 meta->id = req.id;
467 npo->copy_off = 0; 467 npo->copy_off = 0;
468 npo->copy_gref = req->gref; 468 npo->copy_gref = req.gref;
469 469
470 data = skb->data; 470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) { 471 while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit. 680 * Otherwise the interface can seize up due to insufficient credit.
681 */ 681 */
682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 682 max_burst = max(131072UL, queue->credit_bytes);
683 max_burst = min(max_burst, 131072UL);
684 max_burst = max(max_burst, queue->credit_bytes);
685 683
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit = queue->remaining_credit + queue->credit_bytes; 685 max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
711 spin_unlock_irqrestore(&queue->response_lock, flags); 709 spin_unlock_irqrestore(&queue->response_lock, flags);
712 if (cons == end) 710 if (cons == end)
713 break; 711 break;
714 txp = RING_GET_REQUEST(&queue->tx, cons++); 712 RING_COPY_REQUEST(&queue->tx, cons++, txp);
715 } while (1); 713 } while (1);
716 queue->tx.req_cons = cons; 714 queue->tx.req_cons = cons;
717} 715}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
778 if (drop_err) 776 if (drop_err)
779 txp = &dropped_tx; 777 txp = &dropped_tx;
780 778
781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
782 sizeof(*txp));
783 780
784 /* If the guest submitted a frame >= 64 KiB then 781 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will 782 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1112 return -EBADR; 1109 return -EBADR;
1113 } 1110 }
1114 1111
1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1112 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1116 sizeof(extra));
1117 if (unlikely(!extra.type || 1113 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1114 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 queue->tx.req_cons = ++cons; 1115 queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1322 1318
1323 idx = queue->tx.req_cons; 1319 idx = queue->tx.req_cons;
1324 rmb(); /* Ensure that we see the request before we copy it. */ 1320 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1326 1322
1327 /* Credit-based scheduling. */ 1323 /* Credit-based scheduling. */
1328 if (txreq.size > queue->remaining_credit && 1324 if (txreq.size > queue->remaining_credit &&