aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_sfq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_sfq.c')
-rw-r--r--net/sched/sch_sfq.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f0463d757a98..8589da666568 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -245,7 +245,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
245 if (d > 1) { 245 if (d > 1) {
246 sfq_index x = q->dep[d + SFQ_DEPTH].next; 246 sfq_index x = q->dep[d + SFQ_DEPTH].next;
247 skb = q->qs[x].prev; 247 skb = q->qs[x].prev;
248 len = skb->len; 248 len = qdisc_pkt_len(skb);
249 __skb_unlink(skb, &q->qs[x]); 249 __skb_unlink(skb, &q->qs[x]);
250 kfree_skb(skb); 250 kfree_skb(skb);
251 sfq_dec(q, x); 251 sfq_dec(q, x);
@@ -261,7 +261,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
261 q->next[q->tail] = q->next[d]; 261 q->next[q->tail] = q->next[d];
262 q->allot[q->next[d]] += q->quantum; 262 q->allot[q->next[d]] += q->quantum;
263 skb = q->qs[d].prev; 263 skb = q->qs[d].prev;
264 len = skb->len; 264 len = qdisc_pkt_len(skb);
265 __skb_unlink(skb, &q->qs[d]); 265 __skb_unlink(skb, &q->qs[d]);
266 kfree_skb(skb); 266 kfree_skb(skb);
267 sfq_dec(q, d); 267 sfq_dec(q, d);
@@ -305,7 +305,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
305 if (q->qs[x].qlen >= q->limit) 305 if (q->qs[x].qlen >= q->limit)
306 return qdisc_drop(skb, sch); 306 return qdisc_drop(skb, sch);
307 307
308 sch->qstats.backlog += skb->len; 308 sch->qstats.backlog += qdisc_pkt_len(skb);
309 __skb_queue_tail(&q->qs[x], skb); 309 __skb_queue_tail(&q->qs[x], skb);
310 sfq_inc(q, x); 310 sfq_inc(q, x);
311 if (q->qs[x].qlen == 1) { /* The flow is new */ 311 if (q->qs[x].qlen == 1) { /* The flow is new */
@@ -320,7 +320,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
320 } 320 }
321 } 321 }
322 if (++sch->q.qlen <= q->limit) { 322 if (++sch->q.qlen <= q->limit) {
323 sch->bstats.bytes += skb->len; 323 sch->bstats.bytes += qdisc_pkt_len(skb);
324 sch->bstats.packets++; 324 sch->bstats.packets++;
325 return 0; 325 return 0;
326 } 326 }
@@ -352,7 +352,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
352 q->hash[x] = hash; 352 q->hash[x] = hash;
353 } 353 }
354 354
355 sch->qstats.backlog += skb->len; 355 sch->qstats.backlog += qdisc_pkt_len(skb);
356 __skb_queue_head(&q->qs[x], skb); 356 __skb_queue_head(&q->qs[x], skb);
357 /* If selected queue has length q->limit+1, this means that 357 /* If selected queue has length q->limit+1, this means that
358 * all another queues are empty and we do simple tail drop. 358 * all another queues are empty and we do simple tail drop.
@@ -363,7 +363,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
363 skb = q->qs[x].prev; 363 skb = q->qs[x].prev;
364 __skb_unlink(skb, &q->qs[x]); 364 __skb_unlink(skb, &q->qs[x]);
365 sch->qstats.drops++; 365 sch->qstats.drops++;
366 sch->qstats.backlog -= skb->len; 366 sch->qstats.backlog -= qdisc_pkt_len(skb);
367 kfree_skb(skb); 367 kfree_skb(skb);
368 return NET_XMIT_CN; 368 return NET_XMIT_CN;
369 } 369 }
@@ -411,7 +411,7 @@ sfq_dequeue(struct Qdisc *sch)
411 skb = __skb_dequeue(&q->qs[a]); 411 skb = __skb_dequeue(&q->qs[a]);
412 sfq_dec(q, a); 412 sfq_dec(q, a);
413 sch->q.qlen--; 413 sch->q.qlen--;
414 sch->qstats.backlog -= skb->len; 414 sch->qstats.backlog -= qdisc_pkt_len(skb);
415 415
416 /* Is the slot empty? */ 416 /* Is the slot empty? */
417 if (q->qs[a].qlen == 0) { 417 if (q->qs[a].qlen == 0) {
@@ -423,7 +423,7 @@ sfq_dequeue(struct Qdisc *sch)
423 } 423 }
424 q->next[q->tail] = a; 424 q->next[q->tail] = a;
425 q->allot[a] += q->quantum; 425 q->allot[a] += q->quantum;
426 } else if ((q->allot[a] -= skb->len) <= 0) { 426 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
427 q->tail = a; 427 q->tail = a;
428 a = q->next[a]; 428 a = q->next[a];
429 q->allot[a] += q->quantum; 429 q->allot[a] += q->quantum;
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
461 return -EINVAL; 461 return -EINVAL;
462 462
463 sch_tree_lock(sch); 463 sch_tree_lock(sch);
464 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 464 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
465 q->perturb_period = ctl->perturb_period * HZ; 465 q->perturb_period = ctl->perturb_period * HZ;
466 if (ctl->limit) 466 if (ctl->limit)
467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
502 q->max_depth = 0; 502 q->max_depth = 0;
503 q->tail = SFQ_DEPTH; 503 q->tail = SFQ_DEPTH;
504 if (opt == NULL) { 504 if (opt == NULL) {
505 q->quantum = psched_mtu(sch->dev); 505 q->quantum = psched_mtu(qdisc_dev(sch));
506 q->perturb_period = 0; 506 q->perturb_period = 0;
507 q->perturbation = net_random(); 507 q->perturbation = net_random();
508 } else { 508 } else {
@@ -520,7 +520,7 @@ static void sfq_destroy(struct Qdisc *sch)
520{ 520{
521 struct sfq_sched_data *q = qdisc_priv(sch); 521 struct sfq_sched_data *q = qdisc_priv(sch);
522 522
523 tcf_destroy_chain(q->filter_list); 523 tcf_destroy_chain(&q->filter_list);
524 q->perturb_period = 0; 524 q->perturb_period = 0;
525 del_timer_sync(&q->perturb_timer); 525 del_timer_sync(&q->perturb_timer);
526} 526}