aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJiri Pirko <jiri@resnulli.us>2013-02-11 19:12:03 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-12 18:59:45 -0500
commit292f1c7ff6cc10516076ceeea45ed11833bb71c7 (patch)
tree14e97f023e75d908cbebc183ce3b07fcb0233d05 /net
parentb9a7afdefdf90dc9e64902b2565170b8b017aa75 (diff)
sch: make htb_rate_cfg and functions around that generic
As it is going to be used in tbf as well, push these to generic code. Signed-off-by: Jiri Pirko <jiri@resnulli.us> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_generic.c37
-rw-r--r--net/sched/sch_htb.c65
2 files changed, 46 insertions, 56 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5d81a4478514..ffad48109a22 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <net/sch_generic.h>
28#include <net/pkt_sched.h> 29#include <net/pkt_sched.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
@@ -896,3 +897,39 @@ void dev_shutdown(struct net_device *dev)
896 897
897 WARN_ON(timer_pending(&dev->watchdog_timer)); 898 WARN_ON(timer_pending(&dev->watchdog_timer));
898} 899}
900
901void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
902{
903 u64 factor;
904 u64 mult;
905 int shift;
906
907 r->rate_bps = rate << 3;
908 r->shift = 0;
909 r->mult = 1;
910 /*
911 * Calibrate mult, shift so that token counting is accurate
912 * for smallest packet size (64 bytes). Token (time in ns) is
913 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
914 * work as long as the smallest packet transfer time can be
915 * accurately represented in nanosec.
916 */
917 if (r->rate_bps > 0) {
918 /*
919 * Higher shift gives better accuracy. Find the largest
920 * shift such that mult fits in 32 bits.
921 */
922 for (shift = 0; shift < 16; shift++) {
923 r->shift = shift;
924 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
925 mult = div64_u64(factor, r->rate_bps);
926 if (mult > UINT_MAX)
927 break;
928 }
929
930 r->shift = shift - 1;
931 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
932 r->mult = div64_u64(factor, r->rate_bps);
933 }
934}
935EXPORT_SYMBOL(psched_ratecfg_precompute);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2b225446b3de..03c2692ca01e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -38,6 +38,7 @@
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <net/netlink.h> 40#include <net/netlink.h>
41#include <net/sch_generic.h>
41#include <net/pkt_sched.h> 42#include <net/pkt_sched.h>
42 43
43/* HTB algorithm. 44/* HTB algorithm.
@@ -71,12 +72,6 @@ enum htb_cmode {
71 HTB_CAN_SEND /* class can send */ 72 HTB_CAN_SEND /* class can send */
72}; 73};
73 74
74struct htb_rate_cfg {
75 u64 rate_bps;
76 u32 mult;
77 u32 shift;
78};
79
80/* interior & leaf nodes; props specific to leaves are marked L: */ 75/* interior & leaf nodes; props specific to leaves are marked L: */
81struct htb_class { 76struct htb_class {
82 struct Qdisc_class_common common; 77 struct Qdisc_class_common common;
@@ -124,8 +119,8 @@ struct htb_class {
124 int filter_cnt; 119 int filter_cnt;
125 120
126 /* token bucket parameters */ 121 /* token bucket parameters */
127 struct htb_rate_cfg rate; 122 struct psched_ratecfg rate;
128 struct htb_rate_cfg ceil; 123 struct psched_ratecfg ceil;
129 s64 buffer, cbuffer; /* token bucket depth/rate */ 124 s64 buffer, cbuffer; /* token bucket depth/rate */
130 psched_tdiff_t mbuffer; /* max wait time */ 125 psched_tdiff_t mbuffer; /* max wait time */
131 s64 tokens, ctokens; /* current number of tokens */ 126 s64 tokens, ctokens; /* current number of tokens */
@@ -168,45 +163,6 @@ struct htb_sched {
168 struct work_struct work; 163 struct work_struct work;
169}; 164};
170 165
171static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
172{
173 return ((u64)len * r->mult) >> r->shift;
174}
175
176static void htb_precompute_ratedata(struct htb_rate_cfg *r)
177{
178 u64 factor;
179 u64 mult;
180 int shift;
181
182 r->shift = 0;
183 r->mult = 1;
184 /*
185 * Calibrate mult, shift so that token counting is accurate
186 * for smallest packet size (64 bytes). Token (time in ns) is
187 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
188 * work as long as the smallest packet transfer time can be
189 * accurately represented in nanosec.
190 */
191 if (r->rate_bps > 0) {
192 /*
193 * Higher shift gives better accuracy. Find the largest
194 * shift such that mult fits in 32 bits.
195 */
196 for (shift = 0; shift < 16; shift++) {
197 r->shift = shift;
198 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
199 mult = div64_u64(factor, r->rate_bps);
200 if (mult > UINT_MAX)
201 break;
202 }
203
204 r->shift = shift - 1;
205 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
206 r->mult = div64_u64(factor, r->rate_bps);
207 }
208}
209
210/* find class in global hash table using given handle */ 166/* find class in global hash table using given handle */
211static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 167static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
212{ 168{
@@ -632,7 +588,7 @@ static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
632 588
633 if (toks > cl->buffer) 589 if (toks > cl->buffer)
634 toks = cl->buffer; 590 toks = cl->buffer;
635 toks -= (s64) l2t_ns(&cl->rate, bytes); 591 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
636 if (toks <= -cl->mbuffer) 592 if (toks <= -cl->mbuffer)
637 toks = 1 - cl->mbuffer; 593 toks = 1 - cl->mbuffer;
638 594
@@ -645,7 +601,7 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
645 601
646 if (toks > cl->cbuffer) 602 if (toks > cl->cbuffer)
647 toks = cl->cbuffer; 603 toks = cl->cbuffer;
648 toks -= (s64) l2t_ns(&cl->ceil, bytes); 604 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
649 if (toks <= -cl->mbuffer) 605 if (toks <= -cl->mbuffer)
650 toks = 1 - cl->mbuffer; 606 toks = 1 - cl->mbuffer;
651 607
@@ -1134,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1134 1090
1135 memset(&opt, 0, sizeof(opt)); 1091 memset(&opt, 0, sizeof(opt));
1136 1092
1137 opt.rate.rate = cl->rate.rate_bps >> 3; 1093 opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
1138 opt.buffer = PSCHED_NS2TICKS(cl->buffer); 1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1139 opt.ceil.rate = cl->ceil.rate_bps >> 3; 1095 opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
1140 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); 1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1141 opt.quantum = cl->quantum; 1097 opt.quantum = cl->quantum;
1142 opt.prio = cl->prio; 1098 opt.prio = cl->prio;
@@ -1503,11 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1503 cl->prio = TC_HTB_NUMPRIO - 1; 1459 cl->prio = TC_HTB_NUMPRIO - 1;
1504 } 1460 }
1505 1461
1506 cl->rate.rate_bps = (u64)hopt->rate.rate << 3; 1462 psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
1507 cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3; 1463 psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
1508
1509 htb_precompute_ratedata(&cl->rate);
1510 htb_precompute_ratedata(&cl->ceil);
1511 1464
1512 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); 1465 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1513 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer); 1466 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);