aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-24 19:59:51 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-26 16:33:42 -0400
commit7aceb47a9df3383b24824c3e4bd4f029e4598fda (patch)
treef6a875320312166b874ec5b57ec0541ea7aef900 /drivers/net/xen-netback
parent0e59a4a553df312b5308c75085f7f02b12680d12 (diff)
xen-netback: Functional follow-up patch for grant mapping series
Ian made some late comments about the grant mapping series, I incorporated the functional outcomes into this patch: - use callback_param macro to shorten access to pending_tx_info in xenvif_fill_frags() and xenvif_tx_submit() - print an error message in xenvif_idx_unmap() before panic Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/netback.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d3172fe0306f..cb784fe5220c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -99,6 +99,9 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
100} 100}
101 101
102#define callback_param(vif, pending_idx) \
103 (vif->pending_tx_info[pending_idx].callback_struct)
104
102/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
103 */ 106 */
104static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
@@ -1020,12 +1023,12 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1020 /* If this is not the first frag, chain it to the previous*/ 1023 /* If this is not the first frag, chain it to the previous*/
1021 if (unlikely(prev_pending_idx == INVALID_PENDING_IDX)) 1024 if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
1022 skb_shinfo(skb)->destructor_arg = 1025 skb_shinfo(skb)->destructor_arg =
1023 &vif->pending_tx_info[pending_idx].callback_struct; 1026 &callback_param(vif, pending_idx);
1024 else if (likely(pending_idx != prev_pending_idx)) 1027 else if (likely(pending_idx != prev_pending_idx))
1025 vif->pending_tx_info[prev_pending_idx].callback_struct.ctx = 1028 callback_param(vif, prev_pending_idx).ctx =
1026 &(vif->pending_tx_info[pending_idx].callback_struct); 1029 &callback_param(vif, pending_idx);
1027 1030
1028 vif->pending_tx_info[pending_idx].callback_struct.ctx = NULL; 1031 callback_param(vif, pending_idx).ctx = NULL;
1029 prev_pending_idx = pending_idx; 1032 prev_pending_idx = pending_idx;
1030 1033
1031 txp = &vif->pending_tx_info[pending_idx].req; 1034 txp = &vif->pending_tx_info[pending_idx].req;
@@ -1395,13 +1398,13 @@ static int xenvif_tx_submit(struct xenvif *vif)
1395 memcpy(skb->data, 1398 memcpy(skb->data,
1396 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), 1399 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1397 data_len); 1400 data_len);
1398 vif->pending_tx_info[pending_idx].callback_struct.ctx = NULL; 1401 callback_param(vif, pending_idx).ctx = NULL;
1399 if (data_len < txp->size) { 1402 if (data_len < txp->size) {
1400 /* Append the packet payload as a fragment. */ 1403 /* Append the packet payload as a fragment. */
1401 txp->offset += data_len; 1404 txp->offset += data_len;
1402 txp->size -= data_len; 1405 txp->size -= data_len;
1403 skb_shinfo(skb)->destructor_arg = 1406 skb_shinfo(skb)->destructor_arg =
1404 &vif->pending_tx_info[pending_idx].callback_struct; 1407 &callback_param(vif, pending_idx);
1405 } else { 1408 } else {
1406 /* Schedule a response immediately. */ 1409 /* Schedule a response immediately. */
1407 xenvif_idx_unmap(vif, pending_idx); 1410 xenvif_idx_unmap(vif, pending_idx);
@@ -1681,7 +1684,16 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1681 1684
1682 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1685 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1683 &vif->mmap_pages[pending_idx], 1); 1686 &vif->mmap_pages[pending_idx], 1);
1684 BUG_ON(ret); 1687 if (ret) {
1688 netdev_err(vif->dev,
1689 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1690 ret,
1691 pending_idx,
1692 tx_unmap_op.host_addr,
1693 tx_unmap_op.handle,
1694 tx_unmap_op.status);
1695 BUG();
1696 }
1685 1697
1686 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 1698 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1687} 1699}