aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:27 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:56:35 -0500
commit62bad3199a4c20505fc36c169deef20b25e17c5f (patch)
tree9aea137ac129ba2a011974dbb45fe9cf292215f4
parentf53c3fe8dad725b014e9c7682720d8e3e2a8a5b3 (diff)
xen-netback: Remove old TX grant copy definitons and fix indentations
These became obsolete with grant mapping. I've left intentionally the indentations in this way, to improve readability of previous patches. NOTE: if bisect brought you here, you should apply the series up until "xen-netback: Timeout packets in RX path", otherwise Windows guests can't work properly and malicious guests can block other guests by not releasing their sent packets. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h36
-rw-r--r--drivers/net/xen-netback/netback.c72
2 files changed, 15 insertions, 93 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5a991266a394..49109afa2253 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,37 +48,8 @@
48typedef unsigned int pending_ring_idx_t; 48typedef unsigned int pending_ring_idx_t;
49#define INVALID_PENDING_RING_IDX (~0U) 49#define INVALID_PENDING_RING_IDX (~0U)
50 50
51/* For the head field in pending_tx_info: it is used to indicate
52 * whether this tx info is the head of one or more coalesced requests.
53 *
54 * When head != INVALID_PENDING_RING_IDX, it means the start of a new
55 * tx requests queue and the end of previous queue.
56 *
57 * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
58 *
59 * ...|0 I I I|5 I|9 I I I|...
60 * -->|<-INUSE----------------
61 *
62 * After consuming the first slot(s) we have:
63 *
64 * ...|V V V V|5 I|9 I I I|...
65 * -----FREE->|<-INUSE--------
66 *
67 * where V stands for "valid pending ring index". Any number other
68 * than INVALID_PENDING_RING_IDX is OK. These entries are considered
69 * free and can contain any number other than
70 * INVALID_PENDING_RING_IDX. In practice we use 0.
71 *
72 * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
73 * above example) number is the index into pending_tx_info and
74 * mmap_pages arrays.
75 */
76struct pending_tx_info { 51struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */ 52 struct xen_netif_tx_request req; /* tx request */
78 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
79 * if it is head of one or more tx
80 * reqs
81 */
82 /* Callback data for released SKBs. The callback is always 53 /* Callback data for released SKBs. The callback is always
83 * xenvif_zerocopy_callback, desc contains the pending_idx, which is 54 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
84 * also an index in pending_tx_info array. It is initialized in 55 * also an index in pending_tx_info array. It is initialized in
@@ -148,11 +119,6 @@ struct xenvif {
148 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; 119 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
149 grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; 120 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
150 121
151 /* Coalescing tx requests before copying makes number of grant
152 * copy ops greater or equal to number of slots required. In
153 * worst case a tx request consumes 2 gnttab_copy.
154 */
155 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
156 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; 122 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
157 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; 123 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
158 /* passed to gnttab_[un]map_refs with pages under (un)mapping */ 124 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cb29134147d1..46a75706cb78 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -62,16 +62,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444); 63module_param(fatal_skb_slots, uint, 0444);
64 64
65/*
66 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
67 * one or more merged tx requests, otherwise it is the continuation of
68 * previous tx request.
69 */
70static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
71{
72 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
73}
74
75static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 65static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
76 u8 status); 66 u8 status);
77 67
@@ -790,19 +780,6 @@ static int xenvif_count_requests(struct xenvif *vif,
790 return slots; 780 return slots;
791} 781}
792 782
793static struct page *xenvif_alloc_page(struct xenvif *vif,
794 u16 pending_idx)
795{
796 struct page *page;
797
798 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
799 if (!page)
800 return NULL;
801 vif->mmap_pages[pending_idx] = page;
802
803 return page;
804}
805
806 783
807struct xenvif_tx_cb { 784struct xenvif_tx_cb {
808 u16 pending_idx; 785 u16 pending_idx;
@@ -832,13 +809,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
832 struct skb_shared_info *shinfo = skb_shinfo(skb); 809 struct skb_shared_info *shinfo = skb_shinfo(skb);
833 skb_frag_t *frags = shinfo->frags; 810 skb_frag_t *frags = shinfo->frags;
834 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 811 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
835 u16 head_idx = 0; 812 int start;
836 int slot, start; 813 pending_ring_idx_t index;
837 struct page *page;
838 pending_ring_idx_t index, start_idx = 0;
839 uint16_t dst_offset;
840 unsigned int nr_slots; 814 unsigned int nr_slots;
841 struct pending_tx_info *first = NULL;
842 815
843 /* At this point shinfo->nr_frags is in fact the number of 816 /* At this point shinfo->nr_frags is in fact the number of
844 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. 817 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
@@ -850,8 +823,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
850 823
851 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; 824 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
852 shinfo->nr_frags++, txp++, gop++) { 825 shinfo->nr_frags++, txp++, gop++) {
853 index = pending_index(vif->pending_cons++); 826 index = pending_index(vif->pending_cons++);
854 pending_idx = vif->pending_ring[index]; 827 pending_idx = vif->pending_ring[index];
855 xenvif_tx_create_gop(vif, pending_idx, txp, gop); 828 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
856 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 829 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
857 } 830 }
@@ -859,18 +832,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
859 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); 832 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
860 833
861 return gop; 834 return gop;
862err:
863 /* Unwind, freeing all pages and sending error responses. */
864 while (shinfo->nr_frags-- > start) {
865 xenvif_idx_release(vif,
866 frag_get_pending_idx(&frags[shinfo->nr_frags]),
867 XEN_NETIF_RSP_ERROR);
868 }
869 /* The head too, if necessary. */
870 if (start)
871 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
872
873 return NULL;
874} 835}
875 836
876static inline void xenvif_grant_handle_set(struct xenvif *vif, 837static inline void xenvif_grant_handle_set(struct xenvif *vif,
@@ -910,7 +871,6 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
910 struct pending_tx_info *tx_info; 871 struct pending_tx_info *tx_info;
911 int nr_frags = shinfo->nr_frags; 872 int nr_frags = shinfo->nr_frags;
912 int i, err, start; 873 int i, err, start;
913 u16 peek; /* peek into next tx request */
914 874
915 /* Check status of header. */ 875 /* Check status of header. */
916 err = gop->status; 876 err = gop->status;
@@ -924,14 +884,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
924 884
925 for (i = start; i < nr_frags; i++) { 885 for (i = start; i < nr_frags; i++) {
926 int j, newerr; 886 int j, newerr;
927 pending_ring_idx_t head;
928 887
929 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 888 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
930 tx_info = &vif->pending_tx_info[pending_idx]; 889 tx_info = &vif->pending_tx_info[pending_idx];
931 head = tx_info->head;
932 890
933 /* Check error status: if okay then remember grant handle. */ 891 /* Check error status: if okay then remember grant handle. */
934 newerr = (++gop)->status; 892 newerr = (++gop)->status;
935 893
936 if (likely(!newerr)) { 894 if (likely(!newerr)) {
937 xenvif_grant_handle_set(vif, pending_idx , gop->handle); 895 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
@@ -1136,7 +1094,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1136 (skb_queue_len(&vif->tx_queue) < budget)) { 1094 (skb_queue_len(&vif->tx_queue) < budget)) {
1137 struct xen_netif_tx_request txreq; 1095 struct xen_netif_tx_request txreq;
1138 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1096 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1139 struct page *page;
1140 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1097 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1141 u16 pending_idx; 1098 u16 pending_idx;
1142 RING_IDX idx; 1099 RING_IDX idx;
@@ -1507,18 +1464,17 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1507{ 1464{
1508 struct pending_tx_info *pending_tx_info; 1465 struct pending_tx_info *pending_tx_info;
1509 pending_ring_idx_t index; 1466 pending_ring_idx_t index;
1510 u16 peek; /* peek into next tx request */
1511 unsigned long flags; 1467 unsigned long flags;
1512 1468
1513 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1469 pending_tx_info = &vif->pending_tx_info[pending_idx];
1514 spin_lock_irqsave(&vif->response_lock, flags); 1470 spin_lock_irqsave(&vif->response_lock, flags);
1515 make_tx_response(vif, &pending_tx_info->req, status); 1471 make_tx_response(vif, &pending_tx_info->req, status);
1516 index = pending_index(vif->pending_prod); 1472 index = pending_index(vif->pending_prod);
1517 vif->pending_ring[index] = pending_idx; 1473 vif->pending_ring[index] = pending_idx;
1518 /* TX shouldn't use the index before we give it back here */ 1474 /* TX shouldn't use the index before we give it back here */
1519 mb(); 1475 mb();
1520 vif->pending_prod++; 1476 vif->pending_prod++;
1521 spin_unlock_irqrestore(&vif->response_lock, flags); 1477 spin_unlock_irqrestore(&vif->response_lock, flags);
1522} 1478}
1523 1479
1524 1480