aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2007-02-09 09:25:16 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:20:08 -0500
commit10297b99315e5e08fe623ba56da35db1fee69ba9 (patch)
tree06cfd5434ad5d4cb9dd8e0715716da0abd52849c /net/sched/sch_cbq.c
parent7612713fb69a17b79ca7d757df4446700f4afe6c (diff)
[NET] SCHED: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f79a4f3d0a95..48830cac1014 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -40,12 +40,12 @@
40 ======================================= 40 =======================================
41 41
42 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource 42 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
43 Management Models for Packet Networks", 43 Management Models for Packet Networks",
44 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 44 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
45 45
46 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 46 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
47 47
48 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting 48 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
49 Parameters", 1996 49 Parameters", 1996
50 50
51 [4] Sally Floyd and Michael Speer, "Experimental Results 51 [4] Sally Floyd and Michael Speer, "Experimental Results
@@ -59,12 +59,12 @@
59 the implementation is different. Particularly: 59 the implementation is different. Particularly:
60 60
61 --- The WRR algorithm is different. Our version looks more 61 --- The WRR algorithm is different. Our version looks more
62 reasonable (I hope) and works when quanta are allowed to be 62 reasonable (I hope) and works when quanta are allowed to be
63 less than MTU, which is always the case when real time classes 63 less than MTU, which is always the case when real time classes
64 have small rates. Note, that the statement of [3] is 64 have small rates. Note, that the statement of [3] is
65 incomplete, delay may actually be estimated even if class 65 incomplete, delay may actually be estimated even if class
66 per-round allotment is less than MTU. Namely, if per-round 66 per-round allotment is less than MTU. Namely, if per-round
67 allotment is W*r_i, and r_1+...+r_k = r < 1 67 allotment is W*r_i, and r_1+...+r_k = r < 1
68 68
69 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B 69 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
70 70
@@ -280,7 +280,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
280#ifdef CONFIG_NET_CLS_ACT 280#ifdef CONFIG_NET_CLS_ACT
281 switch (result) { 281 switch (result) {
282 case TC_ACT_QUEUED: 282 case TC_ACT_QUEUED:
283 case TC_ACT_STOLEN: 283 case TC_ACT_STOLEN:
284 *qerr = NET_XMIT_SUCCESS; 284 *qerr = NET_XMIT_SUCCESS;
285 case TC_ACT_SHOT: 285 case TC_ACT_SHOT:
286 return NULL; 286 return NULL;
@@ -479,7 +479,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
479 if (!cl->delayed) { 479 if (!cl->delayed) {
480 delay += cl->offtime; 480 delay += cl->offtime;
481 481
482 /* 482 /*
483 Class goes to sleep, so that it will have no 483 Class goes to sleep, so that it will have no
484 chance to work avgidle. Let's forgive it 8) 484 chance to work avgidle. Let's forgive it 8)
485 485
@@ -717,7 +717,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
717} 717}
718#endif 718#endif
719 719
720/* 720/*
721 It is mission critical procedure. 721 It is mission critical procedure.
722 722
723 We "regenerate" toplevel cutoff, if transmitting class 723 We "regenerate" toplevel cutoff, if transmitting class
@@ -739,7 +739,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
739 } 739 }
740 } while ((borrowed=borrowed->borrow) != NULL); 740 } while ((borrowed=borrowed->borrow) != NULL);
741 } 741 }
742#if 0 742#if 0
743 /* It is not necessary now. Uncommenting it 743 /* It is not necessary now. Uncommenting it
744 will save CPU cycles, but decrease fairness. 744 will save CPU cycles, but decrease fairness.
745 */ 745 */
@@ -768,7 +768,7 @@ cbq_update(struct cbq_sched_data *q)
768 (now - last) is total time between packet right edges. 768 (now - last) is total time between packet right edges.
769 (last_pktlen/rate) is "virtual" busy time, so that 769 (last_pktlen/rate) is "virtual" busy time, so that
770 770
771 idle = (now - last) - last_pktlen/rate 771 idle = (now - last) - last_pktlen/rate
772 */ 772 */
773 773
774 idle = PSCHED_TDIFF(q->now, cl->last); 774 idle = PSCHED_TDIFF(q->now, cl->last);
@@ -907,7 +907,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
907 skb = cl->q->dequeue(cl->q); 907 skb = cl->q->dequeue(cl->q);
908 908
909 /* Class did not give us any skb :-( 909 /* Class did not give us any skb :-(
910 It could occur even if cl->q->q.qlen != 0 910 It could occur even if cl->q->q.qlen != 0
911 f.e. if cl->q == "tbf" 911 f.e. if cl->q == "tbf"
912 */ 912 */
913 if (skb == NULL) 913 if (skb == NULL)
@@ -2131,7 +2131,7 @@ static int __init cbq_module_init(void)
2131{ 2131{
2132 return register_qdisc(&cbq_qdisc_ops); 2132 return register_qdisc(&cbq_qdisc_ops);
2133} 2133}
2134static void __exit cbq_module_exit(void) 2134static void __exit cbq_module_exit(void)
2135{ 2135{
2136 unregister_qdisc(&cbq_qdisc_ops); 2136 unregister_qdisc(&cbq_qdisc_ops);
2137} 2137}