diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-05-26 15:53:49 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-05-26 15:53:49 -0400 |
commit | 0afb51e72855971dba83b3c6b70c547c2d1161fd (patch) | |
tree | 8f0e3cd40e381f4dd9de2e7431490ae8cbbf6498 /net/sched/sch_netem.c | |
parent | cdbbde14cb55dd10771ce79154f787322d88411b (diff) |
[PKT_SCHED]: netem: reinsert for duplication
Handle duplication of packets in netem by re-inserting at top of qdisc tree.
This avoid problems with qlen accounting with nested qdisc. This recursion
requires no additional locking but will potentially increase stack depth.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r-- | net/sched/sch_netem.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e0c9fbe73b15..5c0f0c209a4c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -203,42 +203,47 @@ static int netem_run(struct Qdisc *sch) | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | /* | ||
207 | * Insert one skb into qdisc. | ||
208 | * Note: parent depends on return value to account for queue length. | ||
209 | * NET_XMIT_DROP: queue length didn't change. | ||
210 | * NET_XMIT_SUCCESS: one skb was queued. | ||
211 | */ | ||
206 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 212 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
207 | { | 213 | { |
208 | struct netem_sched_data *q = qdisc_priv(sch); | 214 | struct netem_sched_data *q = qdisc_priv(sch); |
215 | struct sk_buff *skb2; | ||
209 | int ret; | 216 | int ret; |
217 | int count = 1; | ||
210 | 218 | ||
211 | pr_debug("netem_enqueue skb=%p\n", skb); | 219 | pr_debug("netem_enqueue skb=%p\n", skb); |
212 | 220 | ||
221 | /* Random duplication */ | ||
222 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | ||
223 | ++count; | ||
224 | |||
213 | /* Random packet drop 0 => none, ~0 => all */ | 225 | /* Random packet drop 0 => none, ~0 => all */ |
214 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { | 226 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) |
215 | pr_debug("netem_enqueue: random loss\n"); | 227 | --count; |
228 | |||
229 | if (count == 0) { | ||
216 | sch->qstats.drops++; | 230 | sch->qstats.drops++; |
217 | kfree_skb(skb); | 231 | kfree_skb(skb); |
218 | return 0; /* lie about loss so TCP doesn't know */ | 232 | return NET_XMIT_DROP; |
219 | } | 233 | } |
220 | 234 | ||
221 | /* Random duplication */ | 235 | /* |
222 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { | 236 | * If we need to duplicate packet, then re-insert at top of the |
223 | struct sk_buff *skb2; | 237 | * qdisc tree, since parent queuer expects that only one |
224 | 238 | * skb will be queued. | |
225 | skb2 = skb_clone(skb, GFP_ATOMIC); | 239 | */ |
226 | if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { | 240 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
227 | struct Qdisc *qp; | 241 | struct Qdisc *rootq = sch->dev->qdisc; |
228 | 242 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | |
229 | /* Since one packet can generate two packets in the | 243 | q->duplicate = 0; |
230 | * queue, the parent's qlen accounting gets confused, | 244 | |
231 | * so fix it. | 245 | rootq->enqueue(skb2, rootq); |
232 | */ | 246 | q->duplicate = dupsave; |
233 | qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); | ||
234 | if (qp) | ||
235 | qp->q.qlen++; | ||
236 | |||
237 | sch->q.qlen++; | ||
238 | sch->bstats.bytes += skb2->len; | ||
239 | sch->bstats.packets++; | ||
240 | } else | ||
241 | sch->qstats.drops++; | ||
242 | } | 247 | } |
243 | 248 | ||
244 | /* If doing simple delay then gap == 0 so all packets | 249 | /* If doing simple delay then gap == 0 so all packets |