aboutsummaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2018-06-04 07:57:11 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2018-06-04 11:21:02 -0400
commit4e64c835254095f55044d393e628dd3e92fca304 (patch)
tree51db5592136aab51ca7451006085044715affbe9 /net/xdp
parentbd3a08aaa9a383ffbbd5b788b797ae6e64eaa7a1 (diff)
xsk: proper fill queue descriptor validation
Previously the fill queue descriptor was not copied to kernel space prior validating it, making it possible for userland to change the descriptor post-kernel-validation. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk.c11
-rw-r--r--net/xdp/xsk_queue.h32
2 files changed, 14 insertions, 29 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index cce0e4f8a536..43554eb56fe6 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -41,20 +41,19 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
41 41
42static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 42static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
43{ 43{
44 u32 *id, len = xdp->data_end - xdp->data; 44 u32 id, len = xdp->data_end - xdp->data;
45 void *buffer; 45 void *buffer;
46 int err = 0; 46 int err;
47 47
48 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 48 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
49 return -EINVAL; 49 return -EINVAL;
50 50
51 id = xskq_peek_id(xs->umem->fq); 51 if (!xskq_peek_id(xs->umem->fq, &id))
52 if (!id)
53 return -ENOSPC; 52 return -ENOSPC;
54 53
55 buffer = xdp_umem_get_data_with_headroom(xs->umem, *id); 54 buffer = xdp_umem_get_data_with_headroom(xs->umem, id);
56 memcpy(buffer, xdp->data, len); 55 memcpy(buffer, xdp->data, len);
57 err = xskq_produce_batch_desc(xs->rx, *id, len, 56 err = xskq_produce_batch_desc(xs->rx, id, len,
58 xs->umem->frame_headroom); 57 xs->umem->frame_headroom);
59 if (!err) 58 if (!err)
60 xskq_discard_id(xs->umem->fq); 59 xskq_discard_id(xs->umem->fq);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cb8e5be35110..b5924e7aeb2b 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
85 return true; 85 return true;
86} 86}
87 87
88static inline u32 *xskq_validate_id(struct xsk_queue *q) 88static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id)
89{ 89{
90 while (q->cons_tail != q->cons_head) { 90 while (q->cons_tail != q->cons_head) {
91 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 91 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
92 unsigned int idx = q->cons_tail & q->ring_mask; 92 unsigned int idx = q->cons_tail & q->ring_mask;
93 93
94 if (xskq_is_valid_id(q, ring->desc[idx])) 94 *id = READ_ONCE(ring->desc[idx]);
95 return &ring->desc[idx]; 95 if (xskq_is_valid_id(q, *id))
96 return id;
96 97
97 q->cons_tail++; 98 q->cons_tail++;
98 } 99 }
@@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
100 return NULL; 101 return NULL;
101} 102}
102 103
103static inline u32 *xskq_peek_id(struct xsk_queue *q) 104static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id)
104{ 105{
105 struct xdp_umem_ring *ring;
106
107 if (q->cons_tail == q->cons_head) { 106 if (q->cons_tail == q->cons_head) {
108 WRITE_ONCE(q->ring->consumer, q->cons_tail); 107 WRITE_ONCE(q->ring->consumer, q->cons_tail);
109 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); 108 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
110 109
111 /* Order consumer and data */ 110 /* Order consumer and data */
112 smp_rmb(); 111 smp_rmb();
113
114 return xskq_validate_id(q);
115 } 112 }
116 113
117 ring = (struct xdp_umem_ring *)q->ring; 114 return xskq_validate_id(q, id);
118 return &ring->desc[q->cons_tail & q->ring_mask];
119} 115}
120 116
121static inline void xskq_discard_id(struct xsk_queue *q) 117static inline void xskq_discard_id(struct xsk_queue *q)
122{ 118{
123 q->cons_tail++; 119 q->cons_tail++;
124 (void)xskq_validate_id(q);
125} 120}
126 121
127static inline int xskq_produce_id(struct xsk_queue *q, u32 id) 122static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
@@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
174 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 169 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
175 unsigned int idx = q->cons_tail & q->ring_mask; 170 unsigned int idx = q->cons_tail & q->ring_mask;
176 171
177 if (xskq_is_valid_desc(q, &ring->desc[idx])) { 172 *desc = READ_ONCE(ring->desc[idx]);
178 if (desc) 173 if (xskq_is_valid_desc(q, desc))
179 *desc = ring->desc[idx];
180 return desc; 174 return desc;
181 }
182 175
183 q->cons_tail++; 176 q->cons_tail++;
184 } 177 }
@@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
189static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, 182static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
190 struct xdp_desc *desc) 183 struct xdp_desc *desc)
191{ 184{
192 struct xdp_rxtx_ring *ring;
193
194 if (q->cons_tail == q->cons_head) { 185 if (q->cons_tail == q->cons_head) {
195 WRITE_ONCE(q->ring->consumer, q->cons_tail); 186 WRITE_ONCE(q->ring->consumer, q->cons_tail);
196 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); 187 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
197 188
198 /* Order consumer and data */ 189 /* Order consumer and data */
199 smp_rmb(); 190 smp_rmb();
200
201 return xskq_validate_desc(q, desc);
202 } 191 }
203 192
204 ring = (struct xdp_rxtx_ring *)q->ring; 193 return xskq_validate_desc(q, desc);
205 *desc = ring->desc[q->cons_tail & q->ring_mask];
206 return desc;
207} 194}
208 195
209static inline void xskq_discard_desc(struct xsk_queue *q) 196static inline void xskq_discard_desc(struct xsk_queue *q)
210{ 197{
211 q->cons_tail++; 198 q->cons_tail++;
212 (void)xskq_validate_desc(q, NULL);
213} 199}
214 200
215static inline int xskq_produce_batch_desc(struct xsk_queue *q, 201static inline int xskq_produce_batch_desc(struct xsk_queue *q,