diff options
author | Toke Høiland-Jørgensen <toke@redhat.com> | 2019-01-09 11:09:42 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-01-15 23:12:00 -0500 |
commit | f6bab199315b70fd83fe3ee0947bc84c7a35f3d4 (patch) | |
tree | c662d696e0bfe2ee2be86cbdf0da095d9d6389ba | |
parent | 80b3671e9377916bf2b02e56113fa7377ce5705a (diff) |
sched: Avoid dereferencing skb pointer after child enqueue
Parent qdiscs may dereference the pointer to the enqueued skb after
enqueue. However, both CAKE and TBF call consume_skb() on the original skb
when splitting GSO packets, leading to a potential use-after-free in the
parent. Fix this by avoiding dereferencing the skb pointer after enqueueing
to the child.
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_cbs.c | 3 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 3 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 3 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 5 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 3 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 3 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 16 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 3 |
8 files changed, 23 insertions, 16 deletions
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e689e11b6d0f..c6a502933fe7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c | |||
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
88 | struct Qdisc *child, | 88 | struct Qdisc *child, |
89 | struct sk_buff **to_free) | 89 | struct sk_buff **to_free) |
90 | { | 90 | { |
91 | unsigned int len = qdisc_pkt_len(skb); | ||
91 | int err; | 92 | int err; |
92 | 93 | ||
93 | err = child->ops->enqueue(skb, child, to_free); | 94 | err = child->ops->enqueue(skb, child, to_free); |
94 | if (err != NET_XMIT_SUCCESS) | 95 | if (err != NET_XMIT_SUCCESS) |
95 | return err; | 96 | return err; |
96 | 97 | ||
97 | qdisc_qstats_backlog_inc(sch, skb); | 98 | sch->qstats.backlog += len; |
98 | sch->q.qlen++; | 99 | sch->q.qlen++; |
99 | 100 | ||
100 | return NET_XMIT_SUCCESS; | 101 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index cdebaed0f8cf..feaf47178653 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -350,6 +350,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
351 | struct sk_buff **to_free) | 351 | struct sk_buff **to_free) |
352 | { | 352 | { |
353 | unsigned int len = qdisc_pkt_len(skb); | ||
353 | struct drr_sched *q = qdisc_priv(sch); | 354 | struct drr_sched *q = qdisc_priv(sch); |
354 | struct drr_class *cl; | 355 | struct drr_class *cl; |
355 | int err = 0; | 356 | int err = 0; |
@@ -376,7 +377,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
376 | cl->deficit = cl->quantum; | 377 | cl->deficit = cl->quantum; |
377 | } | 378 | } |
378 | 379 | ||
379 | qdisc_qstats_backlog_inc(sch, skb); | 380 | sch->qstats.backlog += len; |
380 | sch->q.qlen++; | 381 | sch->q.qlen++; |
381 | return err; | 382 | return err; |
382 | } | 383 | } |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index f6f480784bc6..42471464ded3 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, | |||
199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
200 | struct sk_buff **to_free) | 200 | struct sk_buff **to_free) |
201 | { | 201 | { |
202 | unsigned int len = qdisc_pkt_len(skb); | ||
202 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 203 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
203 | int err; | 204 | int err; |
204 | 205 | ||
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
271 | return err; | 272 | return err; |
272 | } | 273 | } |
273 | 274 | ||
274 | qdisc_qstats_backlog_inc(sch, skb); | 275 | sch->qstats.backlog += len; |
275 | sch->q.qlen++; | 276 | sch->q.qlen++; |
276 | 277 | ||
277 | return NET_XMIT_SUCCESS; | 278 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b18ec1f6de60..6bb8f73a8473 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1539,6 +1539,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
1539 | static int | 1539 | static int |
1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
1541 | { | 1541 | { |
1542 | unsigned int len = qdisc_pkt_len(skb); | ||
1542 | struct hfsc_class *cl; | 1543 | struct hfsc_class *cl; |
1543 | int uninitialized_var(err); | 1544 | int uninitialized_var(err); |
1544 | 1545 | ||
@@ -1560,8 +1561,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
1560 | } | 1561 | } |
1561 | 1562 | ||
1562 | if (cl->qdisc->q.qlen == 1) { | 1563 | if (cl->qdisc->q.qlen == 1) { |
1563 | unsigned int len = qdisc_pkt_len(skb); | ||
1564 | |||
1565 | if (cl->cl_flags & HFSC_RSC) | 1564 | if (cl->cl_flags & HFSC_RSC) |
1566 | init_ed(cl, len); | 1565 | init_ed(cl, len); |
1567 | if (cl->cl_flags & HFSC_FSC) | 1566 | if (cl->cl_flags & HFSC_FSC) |
@@ -1576,7 +1575,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
1576 | 1575 | ||
1577 | } | 1576 | } |
1578 | 1577 | ||
1579 | qdisc_qstats_backlog_inc(sch, skb); | 1578 | sch->qstats.backlog += len; |
1580 | sch->q.qlen++; | 1579 | sch->q.qlen++; |
1581 | 1580 | ||
1582 | return NET_XMIT_SUCCESS; | 1581 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 58b449490757..30f9da7e1076 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
581 | struct sk_buff **to_free) | 581 | struct sk_buff **to_free) |
582 | { | 582 | { |
583 | int uninitialized_var(ret); | 583 | int uninitialized_var(ret); |
584 | unsigned int len = qdisc_pkt_len(skb); | ||
584 | struct htb_sched *q = qdisc_priv(sch); | 585 | struct htb_sched *q = qdisc_priv(sch); |
585 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 586 | struct htb_class *cl = htb_classify(skb, sch, &ret); |
586 | 587 | ||
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
610 | htb_activate(q, cl); | 611 | htb_activate(q, cl); |
611 | } | 612 | } |
612 | 613 | ||
613 | qdisc_qstats_backlog_inc(sch, skb); | 614 | sch->qstats.backlog += len; |
614 | sch->q.qlen++; | 615 | sch->q.qlen++; |
615 | return NET_XMIT_SUCCESS; | 616 | return NET_XMIT_SUCCESS; |
616 | } | 617 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cdf68706e40f..847141cd900f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
72 | static int | 72 | static int |
73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
74 | { | 74 | { |
75 | unsigned int len = qdisc_pkt_len(skb); | ||
75 | struct Qdisc *qdisc; | 76 | struct Qdisc *qdisc; |
76 | int ret; | 77 | int ret; |
77 | 78 | ||
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
88 | 89 | ||
89 | ret = qdisc_enqueue(skb, qdisc, to_free); | 90 | ret = qdisc_enqueue(skb, qdisc, to_free); |
90 | if (ret == NET_XMIT_SUCCESS) { | 91 | if (ret == NET_XMIT_SUCCESS) { |
91 | qdisc_qstats_backlog_inc(sch, skb); | 92 | sch->qstats.backlog += len; |
92 | sch->q.qlen++; | 93 | sch->q.qlen++; |
93 | return NET_XMIT_SUCCESS; | 94 | return NET_XMIT_SUCCESS; |
94 | } | 95 | } |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index dc37c4ead439..8d5e55d5bed2 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -1210,6 +1210,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) | |||
1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1211 | struct sk_buff **to_free) | 1211 | struct sk_buff **to_free) |
1212 | { | 1212 | { |
1213 | unsigned int len = qdisc_pkt_len(skb), gso_segs; | ||
1213 | struct qfq_sched *q = qdisc_priv(sch); | 1214 | struct qfq_sched *q = qdisc_priv(sch); |
1214 | struct qfq_class *cl; | 1215 | struct qfq_class *cl; |
1215 | struct qfq_aggregate *agg; | 1216 | struct qfq_aggregate *agg; |
@@ -1224,17 +1225,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1224 | } | 1225 | } |
1225 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); | 1226 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); |
1226 | 1227 | ||
1227 | if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { | 1228 | if (unlikely(cl->agg->lmax < len)) { |
1228 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", | 1229 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", |
1229 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1230 | cl->agg->lmax, len, cl->common.classid); |
1230 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1231 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); |
1231 | qdisc_pkt_len(skb)); | ||
1232 | if (err) { | 1232 | if (err) { |
1233 | cl->qstats.drops++; | 1233 | cl->qstats.drops++; |
1234 | return qdisc_drop(skb, sch, to_free); | 1234 | return qdisc_drop(skb, sch, to_free); |
1235 | } | 1235 | } |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
1238 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 1239 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
1239 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1240 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
1240 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); | 1241 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); |
@@ -1245,8 +1246,9 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1245 | return err; | 1246 | return err; |
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | bstats_update(&cl->bstats, skb); | 1249 | cl->bstats.bytes += len; |
1249 | qdisc_qstats_backlog_inc(sch, skb); | 1250 | cl->bstats.packets += gso_segs; |
1251 | sch->qstats.backlog += len; | ||
1250 | ++sch->q.qlen; | 1252 | ++sch->q.qlen; |
1251 | 1253 | ||
1252 | agg = cl->agg; | 1254 | agg = cl->agg; |
@@ -1254,7 +1256,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1254 | if (cl->qdisc->q.qlen != 1) { | 1256 | if (cl->qdisc->q.qlen != 1) { |
1255 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && | 1257 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && |
1256 | list_first_entry(&agg->active, struct qfq_class, alist) | 1258 | list_first_entry(&agg->active, struct qfq_class, alist) |
1257 | == cl && cl->deficit < qdisc_pkt_len(skb)) | 1259 | == cl && cl->deficit < len) |
1258 | list_move_tail(&cl->alist, &agg->active); | 1260 | list_move_tail(&cl->alist, &agg->active); |
1259 | 1261 | ||
1260 | return err; | 1262 | return err; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 942dcca09cf2..7f272a9070c5 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
185 | struct sk_buff **to_free) | 185 | struct sk_buff **to_free) |
186 | { | 186 | { |
187 | struct tbf_sched_data *q = qdisc_priv(sch); | 187 | struct tbf_sched_data *q = qdisc_priv(sch); |
188 | unsigned int len = qdisc_pkt_len(skb); | ||
188 | int ret; | 189 | int ret; |
189 | 190 | ||
190 | if (qdisc_pkt_len(skb) > q->max_size) { | 191 | if (qdisc_pkt_len(skb) > q->max_size) { |
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
200 | return ret; | 201 | return ret; |
201 | } | 202 | } |
202 | 203 | ||
203 | qdisc_qstats_backlog_inc(sch, skb); | 204 | sch->qstats.backlog += len; |
204 | sch->q.qlen++; | 205 | sch->q.qlen++; |
205 | return NET_XMIT_SUCCESS; | 206 | return NET_XMIT_SUCCESS; |
206 | } | 207 | } |