aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-06-12 17:20:58 -0400
committerPaul Mackerras <paulus@samba.org>2008-06-16 01:00:54 -0400
commitda3de6df33f5f42ff9dc40093fbc884f524c9a49 (patch)
treeb0c5d209cde49e55bdd41b6125ae58918bd31ed5
parent143580ecfb7999147e546cc3814023e233e95fa5 (diff)
[POWERPC] Fix -Os kernel builds with newer gcc versions
GCC 4.4.x looks to be adding support for generating out-of-line register saves/restores based on: http://gcc.gnu.org/ml/gcc-patches/2008-04/msg01678.html This breaks the kernel if we enable CONFIG_CC_OPTIMIZE_FOR_SIZE. To fix this we add the use the save/restore code from gcc and simplified it down for our needs (integer only). Additionally, we have to link this code into each module. The other solution was to add EXPORT_SYMBOL() which meant going through the trampoline which seemed nonsensical for these out-of-line routines. Finally, we add some checks to prom_init_check.sh to ignore the out-of-line save/restore functions. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh14
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/crtsavres.S229
4 files changed, 246 insertions, 1 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 86096ccc5914..b7d4c4ce2fe6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -96,6 +96,8 @@ endif
96else 96else
97 KBUILD_CFLAGS += $(call cc-option,-mtune=power4) 97 KBUILD_CFLAGS += $(call cc-option,-mtune=power4)
98endif 98endif
99else
100LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
99endif 101endif
100 102
101ifeq ($(CONFIG_TUNE_CELL),y) 103ifeq ($(CONFIG_TUNE_CELL),y)
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 31729a9387df..2c7e8e87f770 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -48,6 +48,20 @@ do
48 fi 48 fi
49 done 49 done
50 50
51 # ignore register save/restore funcitons
52 if [ "${UNDEF:0:9}" = "_restgpr_" ]; then
53 OK=1
54 fi
55 if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then
56 OK=1
57 fi
58 if [ "${UNDEF:0:9}" = "_savegpr_" ]; then
59 OK=1
60 fi
61 if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then
62 OK=1
63 fi
64
51 if [ $OK -eq 0 ]; then 65 if [ $OK -eq 0 ]; then
52 ERROR=1 66 ERROR=1
53 echo "Error: External symbol '$UNDEF' referenced" \ 67 echo "Error: External symbol '$UNDEF' referenced" \
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index c71d37dc6a88..e522b06cc42f 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,7 +9,7 @@ endif
9ifeq ($(CONFIG_PPC_MERGE),y) 9ifeq ($(CONFIG_PPC_MERGE),y)
10obj-y := string.o alloc.o \ 10obj-y := string.o alloc.o \
11 checksum_$(CONFIG_WORD_SIZE).o 11 checksum_$(CONFIG_WORD_SIZE).o
12obj-$(CONFIG_PPC32) += div64.o copy_32.o 12obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
13obj-$(CONFIG_HAS_IOMEM) += devres.o 13obj-$(CONFIG_HAS_IOMEM) += devres.o
14endif 14endif
15 15
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
new file mode 100644
index 000000000000..70a9cd8a3008
--- /dev/null
+++ b/arch/powerpc/lib/crtsavres.S
@@ -0,0 +1,229 @@
1/*
2 * Special support for eabi and SVR4
3 *
4 * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc.
5 * Copyright 2008 Freescale Semiconductor, Inc.
6 * Written By Michael Meissner
7 *
8 * Based on gcc/config/rs6000/crtsavres.asm from gcc
9 *
10 * This file is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
13 * later version.
14 *
15 * In addition to the permissions in the GNU General Public License, the
16 * Free Software Foundation gives you unlimited permission to link the
17 * compiled version of this file with other programs, and to distribute
18 * those programs without any restriction coming from the use of this
19 * file. (The General Public License restrictions do apply in other
20 * respects; for example, they cover modification of the file, and
21 * distribution when not linked into another program.)
22 *
23 * This file is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; see the file COPYING. If not, write to
30 * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
31 * Boston, MA 02110-1301, USA.
32 *
33 * As a special exception, if you link this library with files
34 * compiled with GCC to produce an executable, this does not cause
35 * the resulting executable to be covered by the GNU General Public License.
36 * This exception does not however invalidate any other reasons why
37 * the executable file might be covered by the GNU General Public License.
38 */
39
40#include <asm/ppc_asm.h>
41
42 .file "crtsavres.S"
43 .section ".text"
44
45#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
46
47/* Routines for saving integer registers, called by the compiler. */
48/* Called with r11 pointing to the stack header word of the caller of the */
49/* function, just beyond the end of the integer save area. */
50
51_GLOBAL(_savegpr_14)
52_GLOBAL(_save32gpr_14)
53 stw 14,-72(11) /* save gp registers */
54_GLOBAL(_savegpr_15)
55_GLOBAL(_save32gpr_15)
56 stw 15,-68(11)
57_GLOBAL(_savegpr_16)
58_GLOBAL(_save32gpr_16)
59 stw 16,-64(11)
60_GLOBAL(_savegpr_17)
61_GLOBAL(_save32gpr_17)
62 stw 17,-60(11)
63_GLOBAL(_savegpr_18)
64_GLOBAL(_save32gpr_18)
65 stw 18,-56(11)
66_GLOBAL(_savegpr_19)
67_GLOBAL(_save32gpr_19)
68 stw 19,-52(11)
69_GLOBAL(_savegpr_20)
70_GLOBAL(_save32gpr_20)
71 stw 20,-48(11)
72_GLOBAL(_savegpr_21)
73_GLOBAL(_save32gpr_21)
74 stw 21,-44(11)
75_GLOBAL(_savegpr_22)
76_GLOBAL(_save32gpr_22)
77 stw 22,-40(11)
78_GLOBAL(_savegpr_23)
79_GLOBAL(_save32gpr_23)
80 stw 23,-36(11)
81_GLOBAL(_savegpr_24)
82_GLOBAL(_save32gpr_24)
83 stw 24,-32(11)
84_GLOBAL(_savegpr_25)
85_GLOBAL(_save32gpr_25)
86 stw 25,-28(11)
87_GLOBAL(_savegpr_26)
88_GLOBAL(_save32gpr_26)
89 stw 26,-24(11)
90_GLOBAL(_savegpr_27)
91_GLOBAL(_save32gpr_27)
92 stw 27,-20(11)
93_GLOBAL(_savegpr_28)
94_GLOBAL(_save32gpr_28)
95 stw 28,-16(11)
96_GLOBAL(_savegpr_29)
97_GLOBAL(_save32gpr_29)
98 stw 29,-12(11)
99_GLOBAL(_savegpr_30)
100_GLOBAL(_save32gpr_30)
101 stw 30,-8(11)
102_GLOBAL(_savegpr_31)
103_GLOBAL(_save32gpr_31)
104 stw 31,-4(11)
105 blr
106
107/* Routines for restoring integer registers, called by the compiler. */
108/* Called with r11 pointing to the stack header word of the caller of the */
109/* function, just beyond the end of the integer restore area. */
110
111_GLOBAL(_restgpr_14)
112_GLOBAL(_rest32gpr_14)
113 lwz 14,-72(11) /* restore gp registers */
114_GLOBAL(_restgpr_15)
115_GLOBAL(_rest32gpr_15)
116 lwz 15,-68(11)
117_GLOBAL(_restgpr_16)
118_GLOBAL(_rest32gpr_16)
119 lwz 16,-64(11)
120_GLOBAL(_restgpr_17)
121_GLOBAL(_rest32gpr_17)
122 lwz 17,-60(11)
123_GLOBAL(_restgpr_18)
124_GLOBAL(_rest32gpr_18)
125 lwz 18,-56(11)
126_GLOBAL(_restgpr_19)
127_GLOBAL(_rest32gpr_19)
128 lwz 19,-52(11)
129_GLOBAL(_restgpr_20)
130_GLOBAL(_rest32gpr_20)
131 lwz 20,-48(11)
132_GLOBAL(_restgpr_21)
133_GLOBAL(_rest32gpr_21)
134 lwz 21,-44(11)
135_GLOBAL(_restgpr_22)
136_GLOBAL(_rest32gpr_22)
137 lwz 22,-40(11)
138_GLOBAL(_restgpr_23)
139_GLOBAL(_rest32gpr_23)
140 lwz 23,-36(11)
141_GLOBAL(_restgpr_24)
142_GLOBAL(_rest32gpr_24)
143 lwz 24,-32(11)
144_GLOBAL(_restgpr_25)
145_GLOBAL(_rest32gpr_25)
146 lwz 25,-28(11)
147_GLOBAL(_restgpr_26)
148_GLOBAL(_rest32gpr_26)
149 lwz 26,-24(11)
150_GLOBAL(_restgpr_27)
151_GLOBAL(_rest32gpr_27)
152 lwz 27,-20(11)
153_GLOBAL(_restgpr_28)
154_GLOBAL(_rest32gpr_28)
155 lwz 28,-16(11)
156_GLOBAL(_restgpr_29)
157_GLOBAL(_rest32gpr_29)
158 lwz 29,-12(11)
159_GLOBAL(_restgpr_30)
160_GLOBAL(_rest32gpr_30)
161 lwz 30,-8(11)
162_GLOBAL(_restgpr_31)
163_GLOBAL(_rest32gpr_31)
164 lwz 31,-4(11)
165 blr
166
167/* Routines for restoring integer registers, called by the compiler. */
168/* Called with r11 pointing to the stack header word of the caller of the */
169/* function, just beyond the end of the integer restore area. */
170
171_GLOBAL(_restgpr_14_x)
172_GLOBAL(_rest32gpr_14_x)
173 lwz 14,-72(11) /* restore gp registers */
174_GLOBAL(_restgpr_15_x)
175_GLOBAL(_rest32gpr_15_x)
176 lwz 15,-68(11)
177_GLOBAL(_restgpr_16_x)
178_GLOBAL(_rest32gpr_16_x)
179 lwz 16,-64(11)
180_GLOBAL(_restgpr_17_x)
181_GLOBAL(_rest32gpr_17_x)
182 lwz 17,-60(11)
183_GLOBAL(_restgpr_18_x)
184_GLOBAL(_rest32gpr_18_x)
185 lwz 18,-56(11)
186_GLOBAL(_restgpr_19_x)
187_GLOBAL(_rest32gpr_19_x)
188 lwz 19,-52(11)
189_GLOBAL(_restgpr_20_x)
190_GLOBAL(_rest32gpr_20_x)
191 lwz 20,-48(11)
192_GLOBAL(_restgpr_21_x)
193_GLOBAL(_rest32gpr_21_x)
194 lwz 21,-44(11)
195_GLOBAL(_restgpr_22_x)
196_GLOBAL(_rest32gpr_22_x)
197 lwz 22,-40(11)
198_GLOBAL(_restgpr_23_x)
199_GLOBAL(_rest32gpr_23_x)
200 lwz 23,-36(11)
201_GLOBAL(_restgpr_24_x)
202_GLOBAL(_rest32gpr_24_x)
203 lwz 24,-32(11)
204_GLOBAL(_restgpr_25_x)
205_GLOBAL(_rest32gpr_25_x)
206 lwz 25,-28(11)
207_GLOBAL(_restgpr_26_x)
208_GLOBAL(_rest32gpr_26_x)
209 lwz 26,-24(11)
210_GLOBAL(_restgpr_27_x)
211_GLOBAL(_rest32gpr_27_x)
212 lwz 27,-20(11)
213_GLOBAL(_restgpr_28_x)
214_GLOBAL(_rest32gpr_28_x)
215 lwz 28,-16(11)
216_GLOBAL(_restgpr_29_x)
217_GLOBAL(_rest32gpr_29_x)
218 lwz 29,-12(11)
219_GLOBAL(_restgpr_30_x)
220_GLOBAL(_rest32gpr_30_x)
221 lwz 30,-8(11)
222_GLOBAL(_restgpr_31_x)
223_GLOBAL(_rest32gpr_31_x)
224 lwz 0,4(11)
225 lwz 31,-4(11)
226 mtlr 0
227 mr 1,11
228 blr
229#endif
ef CONFIG_NET_SCH_CLK_GETTIMEOFDAY #include <linux/time.h> #undef PSCHED_GET_TIME #define PSCHED_GET_TIME(stamp) \ do { \ struct timeval tv; \ do_gettimeofday(&tv); \ (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ } while (0) #endif #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ /* * eligible tree holds backlogged classes being sorted by their eligible times. * there is one eligible tree per hfsc instance. */ static void eltree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->sched->eligible.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, el_node); if (cl->cl_e >= cl1->cl_e) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->el_node, parent, p); rb_insert_color(&cl->el_node, &cl->sched->eligible); } static inline void eltree_remove(struct hfsc_class *cl) { rb_erase(&cl->el_node, &cl->sched->eligible); } static inline void eltree_update(struct hfsc_class *cl) { eltree_remove(cl); eltree_insert(cl); } /* find the class with the minimum deadline among the eligible classes */ static inline struct hfsc_class * eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) { struct hfsc_class *p, *cl = NULL; struct rb_node *n; for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { p = rb_entry(n, struct hfsc_class, el_node); if (p->cl_e > cur_time) break; if (cl == NULL || p->cl_d < cl->cl_d) cl = p; } return cl; } /* find the class with minimum eligible time among the eligible classes */ static inline struct hfsc_class * eltree_get_minel(struct hfsc_sched *q) { struct rb_node *n; n = rb_first(&q->eligible); if (n == NULL) return NULL; return rb_entry(n, struct hfsc_class, el_node); } /* * vttree holds holds backlogged child classes being sorted by their virtual * time. each intermediate class has one vttree. */ static void vttree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, vt_node); if (cl->cl_vt >= cl1->cl_vt) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->vt_node, parent, p); rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); } static inline void vttree_remove(struct hfsc_class *cl) { rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); } static inline void vttree_update(struct hfsc_class *cl) { vttree_remove(cl); vttree_insert(cl); } static inline struct hfsc_class * vttree_firstfit(struct hfsc_class *cl, u64 cur_time) { struct hfsc_class *p; struct rb_node *n; for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { p = rb_entry(n, struct hfsc_class, vt_node); if (p->cl_f <= cur_time) return p; } return NULL; } /* * get the leaf class with the minimum vt in the hierarchy */ static struct hfsc_class * vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) { /* if root-class's cfmin is bigger than cur_time nothing to do */ if (cl->cl_cfmin > cur_time) return NULL; while (cl->level > 0) { cl = vttree_firstfit(cl, cur_time); if (cl == NULL) return NULL; /* * update parent's cl_cvtmin. */ if (cl->cl_parent->cl_cvtmin < cl->cl_vt) cl->cl_parent->cl_cvtmin = cl->cl_vt; } return cl; } static void cftree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, cf_node); if (cl->cl_f >= cl1->cl_f) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->cf_node, parent, p); rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); } static inline void cftree_remove(struct hfsc_class *cl) { rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); } static inline void cftree_update(struct hfsc_class *cl) { cftree_remove(cl); cftree_insert(cl); } /* * service curve support functions * * external service curve parameters * m: bps * d: us * internal service curve parameters * sm: (bytes/psched_us) << SM_SHIFT * ism: (psched_us/byte) << ISM_SHIFT * dx: psched_us * * Clock source resolution (CONFIG_NET_SCH_CLK_*) * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us. * CPU: resolution is between 0.5us and 1us. * GETTIMEOFDAY: resolution is exactly 1us. * * sm and ism are scaled in order to keep effective digits. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective * digits in decimal using the following table. * * Note: We can afford the additional accuracy (altq hfsc keeps at most * 3 effective digits) thanks to the fact that linux clock is bounded * much more tightly. * * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps * ------------+------------------------------------------------------- * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3 * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3 * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3 * * 0.5us/byte 160 16 1.6 0.16 0.016 * us/byte 80 8 0.8 0.08 0.008 * 1.27us/byte 63 6.3 0.63 0.063 0.0063 */ #define SM_SHIFT 20 #define ISM_SHIFT 18 #define SM_MASK ((1ULL << SM_SHIFT) - 1) #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) static inline u64 seg_x2y(u64 x, u64 sm) { u64 y; /* * compute * y = x * sm >> SM_SHIFT * but divide it for the upper and lower bits to avoid overflow */ y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); return y; } static inline u64 seg_y2x(u64 y, u64 ism) { u64 x; if (y == 0) x = 0; else if (ism == HT_INFINITY) x = HT_INFINITY; else { x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); } return x; } /* Convert m (bps) into sm (bytes/psched us) */ static u64 m2sm(u32 m) { u64 sm; sm = ((u64)m << SM_SHIFT); sm += PSCHED_JIFFIE2US(HZ) - 1; do_div(sm, PSCHED_JIFFIE2US(HZ)); return sm; } /* convert m (bps) into ism (psched us/byte) */ static u64 m2ism(u32 m) { u64 ism; if (m == 0) ism = HT_INFINITY; else { ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT); ism += m - 1; do_div(ism, m); } return ism; } /* convert d (us) into dx (psched us) */ static u64 d2dx(u32 d) { u64 dx; dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); dx += USEC_PER_SEC - 1; do_div(dx, USEC_PER_SEC); return dx; } /* convert sm (bytes/psched us) into m (bps) */ static u32 sm2m(u64 sm) { u64 m; m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT; return (u32)m; } /* convert dx (psched us) into d (us) */ static u32 dx2d(u64 dx) { u64 d; d = dx * USEC_PER_SEC; do_div(d, PSCHED_JIFFIE2US(HZ)); return (u32)d; } static void sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) { isc->sm1 = m2sm(sc->m1); isc->ism1 = m2ism(sc->m1); isc->dx = d2dx(sc->d); isc->dy = seg_x2y(isc->dx, isc->sm1); isc->sm2 = m2sm(sc->m2); isc->ism2 = m2ism(sc->m2); } /* * initialize the runtime service curve with the given internal * service curve starting at (x, y). */ static void rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) { rtsc->x = x; rtsc->y = y; rtsc->sm1 = isc->sm1; rtsc->ism1 = isc->ism1; rtsc->dx = isc->dx; rtsc->dy = isc->dy; rtsc->sm2 = isc->sm2; rtsc->ism2 = isc->ism2; } /* * calculate the y-projection of the runtime service curve by the * given x-projection value */ static u64 rtsc_y2x(struct runtime_sc *rtsc, u64 y) { u64 x; if (y < rtsc->y) x = rtsc->x; else if (y <= rtsc->y + rtsc->dy) { /* x belongs to the 1st segment */ if (rtsc->dy == 0) x = rtsc->x + rtsc->dx; else x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); } else { /* x belongs to the 2nd segment */ x = rtsc->x + rtsc->dx + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); } return x; } static u64 rtsc_x2y(struct runtime_sc *rtsc, u64 x) { u64 y; if (x <= rtsc->x) y = rtsc->y; else if (x <= rtsc->x + rtsc->dx) /* y belongs to the 1st segment */ y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); else /* y belongs to the 2nd segment */ y = rtsc->y + rtsc->dy + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); return y; } /* * update the runtime service curve by taking the minimum of the current * runtime service curve and the service curve starting at (x, y). */ static void rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) { u64 y1, y2, dx, dy; u32 dsm; if (isc->sm1 <= isc->sm2) { /* service curve is convex */ y1 = rtsc_x2y(rtsc, x); if (y1 < y) /* the current rtsc is smaller */ return; rtsc->x = x; rtsc->y = y; return; } /* * service curve is concave * compute the two y values of the current rtsc * y1: at x * y2: at (x + dx) */ y1 = rtsc_x2y(rtsc, x); if (y1 <= y) { /* rtsc is below isc, no change to rtsc */ return; } y2 = rtsc_x2y(rtsc, x + isc->dx); if (y2 >= y + isc->dy) { /* rtsc is above isc, replace rtsc by isc */ rtsc->x = x; rtsc->y = y; rtsc->dx = isc->dx; rtsc->dy = isc->dy; return; } /* * the two curves intersect * compute the offsets (dx, dy) using the reverse * function of seg_x2y() * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) */ dx = (y1 - y) << SM_SHIFT; dsm = isc->sm1 - isc->sm2; do_div(dx, dsm); /* * check if (x, y1) belongs to the 1st segment of rtsc. * if so, add the offset. */ if (rtsc->x + rtsc->dx > x) dx += rtsc->x + rtsc->dx - x; dy = seg_x2y(dx, isc->sm1); rtsc->x = x; rtsc->y = y; rtsc->dx = dx; rtsc->dy = dy; return; } static void init_ed(struct hfsc_class *cl, unsigned int next_len) { u64 cur_time; PSCHED_GET_TIME(cur_time); /* update the deadline curve */ rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); /* * update the eligible curve. * for concave, it is equal to the deadline curve. * for convex, it is a linear curve with slope m2. */ cl->cl_eligible = cl->cl_deadline; if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { cl->cl_eligible.dx = 0; cl->cl_eligible.dy = 0; } /* compute e and d */ cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); eltree_insert(cl); } static void update_ed(struct hfsc_class *cl, unsigned int next_len) { cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); eltree_update(cl); } static inline void update_d(struct hfsc_class *cl, unsigned int next_len) { cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); } static inline void update_cfmin(struct hfsc_class *cl) { struct rb_node *n = rb_first(&cl->cf_tree); struct hfsc_class *p; if (n == NULL) { cl->cl_cfmin = 0; return; } p = rb_entry(n, struct hfsc_class, cf_node); cl->cl_cfmin = p->cl_f; } static void init_vf(struct hfsc_class *cl, unsigned int len) { struct hfsc_class *max_cl; struct rb_node *n; u64 vt, f, cur_time; int go_active; cur_time = 0; go_active = 1; for (; cl->cl_parent != NULL; cl = cl->cl_parent) { if (go_active && cl->cl_nactive++ == 0) go_active = 1; else go_active = 0; if (go_active) { n = rb_last(&cl->cl_parent->vt_tree); if (n != NULL) { max_cl = rb_entry(n, struct hfsc_class,vt_node); /* * set vt to the average of the min and max * classes. if the parent's period didn't * change, don't decrease vt of the class. */ vt = max_cl->cl_vt; if (cl->cl_parent->cl_cvtmin != 0) vt = (cl->cl_parent->cl_cvtmin + vt)/2; if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod || vt > cl->cl_vt) cl->cl_vt = vt; } else { /* * first child for a new parent backlog period. * add parent's cvtmax to cvtoff to make a new * vt (vtoff + vt) larger than the vt in the * last period for all children. */ vt = cl->cl_parent->cl_cvtmax; cl->cl_parent->cl_cvtoff += vt; cl->cl_parent->cl_cvtmax = 0; cl->cl_parent->cl_cvtmin = 0; cl->cl_vt = 0; } cl->cl_vtoff = cl->cl_parent->cl_cvtoff - cl->cl_pcvtoff; /* update the virtual curve */ vt = cl->cl_vt + cl->cl_vtoff; rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, cl->cl_total); if (cl->cl_virtual.x == vt) { cl->cl_virtual.x -= cl->cl_vtoff; cl->cl_vtoff = 0; } cl->cl_vtadj = 0; cl->cl_vtperiod++; /* increment vt period */ cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; if (cl->cl_parent->cl_nactive == 0) cl->cl_parentperiod++; cl->cl_f = 0; vttree_insert(cl); cftree_insert(cl); if (cl->cl_flags & HFSC_USC) { /* class has upper limit curve */ if (cur_time == 0) PSCHED_GET_TIME(cur_time); /* update the ulimit curve */ rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); /* compute myf */ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total); cl->cl_myfadj = 0; } } f = max(cl->cl_myf, cl->cl_cfmin); if (f != cl->cl_f) { cl->cl_f = f; cftree_update(cl); update_cfmin(cl->cl_parent); } } } static void update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) { u64 f; /* , myf_bound, delta; */ int go_passive = 0; if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) go_passive = 1; for (; cl->cl_parent != NULL; cl = cl->cl_parent) { cl->cl_total += len; if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) continue; if (go_passive && --cl->cl_nactive == 0) go_passive = 1; else go_passive = 0; if (go_passive) { /* no more active child, going passive */ /* update cvtmax of the parent class */ if (cl->cl_vt > cl->cl_parent->cl_cvtmax) cl->cl_parent->cl_cvtmax = cl->cl_vt; /* remove this class from the vt tree */ vttree_remove(cl); cftree_remove(cl); update_cfmin(cl->cl_parent); continue; } /* * update vt and f */ cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) - cl->cl_vtoff + cl->cl_vtadj; /* * if vt of the class is smaller than cvtmin, * the class was skipped in the past due to non-fit. * if so, we need to adjust vtadj. */ if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; cl->cl_vt = cl->cl_parent->cl_cvtmin; } /* update the vt tree */ vttree_update(cl); if (cl->cl_flags & HFSC_USC) { cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); #if 0 /* * This code causes classes to stay way under their * limit when multiple classes are used at gigabit * speed. needs investigation. -kaber */ /* * if myf lags behind by more than one clock tick * from the current time, adjust myfadj to prevent * a rate-limited class from going greedy. * in a steady state under rate-limiting, myf * fluctuates within one clock tick. */ myf_bound = cur_time - PSCHED_JIFFIE2US(1); if (cl->cl_myf < myf_bound) { delta = cur_time - cl->cl_myf; cl->cl_myfadj += delta; cl->cl_myf += delta; } #endif } f = max(cl->cl_myf, cl->cl_cfmin); if (f != cl->cl_f) { cl->cl_f = f; cftree_update(cl); update_cfmin(cl->cl_parent); } } } static void set_active(struct hfsc_class *cl, unsigned int len) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) init_vf(cl, len); list_add_tail(&cl->dlist, &cl->sched->droplist); } static void set_passive(struct hfsc_class *cl) { if (cl->cl_flags & HFSC_RSC) eltree_remove(cl); list_del(&cl->dlist); /* * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) * needs to be called explicitly to remove a class from vttree. */ } /* * hack to get length of first packet in queue. */ static unsigned int qdisc_peek_len(struct Qdisc *sch) { struct sk_buff *skb; unsigned int len; skb = sch->dequeue(sch); if (skb == NULL) { if (net_ratelimit()) printk("qdisc_peek_len: non work-conserving qdisc ?\n"); return 0; } len = skb->len; if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { if (net_ratelimit()) printk("qdisc_peek_len: failed to requeue\n"); qdisc_tree_decrease_qlen(sch, 1); return 0; } return len; } static void hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) { unsigned int len = cl->qdisc->q.qlen; qdisc_reset(cl->qdisc); qdisc_tree_decrease_qlen(cl->qdisc, len); } static void hfsc_adjust_levels(struct hfsc_class *cl) { struct hfsc_class *p; unsigned int level; do { level = 0; list_for_each_entry(p, &cl->children, siblings) { if (p->level >= level) level = p->level + 1; } cl->level = level; } while ((cl = cl->cl_parent) != NULL); } static inline unsigned int hfsc_hash(u32 h) { h ^= h >> 8; h ^= h >> 4; return h & (HFSC_HSIZE - 1); } static inline struct hfsc_class * hfsc_find_class(u32 classid, struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) { if (cl->classid == classid) return cl; } return NULL; } static void hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, u64 cur_time) { sc2isc(rsc, &cl->cl_rsc); rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); cl->cl_eligible = cl->cl_deadline; if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { cl->cl_eligible.dx = 0; cl->cl_eligible.dy = 0; } cl->cl_flags |= HFSC_RSC; } static void hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) { sc2isc(fsc, &cl->cl_fsc); rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); cl->cl_flags |= HFSC_FSC; } static void hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, u64 cur_time) { sc2isc(usc, &cl->cl_usc); rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); cl->cl_flags |= HFSC_USC; } static int hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, unsigned long *arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)*arg; struct hfsc_class *parent = NULL; struct rtattr *opt = tca[TCA_OPTIONS-1]; struct rtattr *tb[TCA_HFSC_MAX]; struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; u64 cur_time; if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt)) return -EINVAL; if (tb[TCA_HFSC_RSC-1]) { if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc)) return -EINVAL; rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]); if (rsc->m1 == 0 && rsc->m2 == 0) rsc = NULL; } if (tb[TCA_HFSC_FSC-1]) { if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc)) return -EINVAL; fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]); if (fsc->m1 == 0 && fsc->m2 == 0) fsc = NULL; } if (tb[TCA_HFSC_USC-1]) { if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc)) return -EINVAL; usc = RTA_DATA(tb[TCA_HFSC_USC-1]); if (usc->m1 == 0 && usc->m2 == 0) usc = NULL; } if (cl != NULL) { if (parentid) { if (cl->cl_parent && cl->cl_parent->classid != parentid) return -EINVAL; if (cl->cl_parent == NULL && parentid != TC_H_ROOT) return -EINVAL; } PSCHED_GET_TIME(cur_time); sch_tree_lock(sch); if (rsc != NULL) hfsc_change_rsc(cl, rsc, cur_time); if (fsc != NULL) hfsc_change_fsc(cl, fsc); if (usc != NULL) hfsc_change_usc(cl, usc, cur_time); if (cl->qdisc->q.qlen != 0) { if (cl->cl_flags & HFSC_RSC) update_ed(cl, qdisc_peek_len(cl->qdisc)); if (cl->cl_flags & HFSC_FSC) update_vf(cl, 0, cur_time); } sch_tree_unlock(sch); #ifdef CONFIG_NET_ESTIMATOR if (tca[TCA_RATE-1]) gen_replace_estimator(&cl->bstats, &cl->rate_est, cl->stats_lock, tca[TCA_RATE-1]); #endif return 0; } if (parentid == TC_H_ROOT) return -EEXIST; parent = &q->root; if (parentid) { parent = hfsc_find_class(parentid, sch); if (parent == NULL) return -ENOENT; } if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) return -EINVAL; if (hfsc_find_class(classid, sch)) return -EEXIST; if (rsc == NULL && fsc == NULL) return -EINVAL; cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; if (rsc != NULL) hfsc_change_rsc(cl, rsc, 0); if (fsc != NULL) hfsc_change_fsc(cl, fsc); if (usc != NULL) hfsc_change_usc(cl, usc, 0); cl->refcnt = 1; cl->classid = classid; cl->sched = q; cl->cl_parent = parent; cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; cl->stats_lock = &sch->dev->queue_lock; INIT_LIST_HEAD(&cl->children); cl->vt_tree = RB_ROOT; cl->cf_tree = RB_ROOT; sch_tree_lock(sch); list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]); list_add_tail(&cl->siblings, &parent->children); if (parent->level == 0) hfsc_purge_queue(sch, parent); hfsc_adjust_levels(parent); cl->cl_pcvtoff = parent->cl_cvtoff; sch_tree_unlock(sch); #ifdef CONFIG_NET_ESTIMATOR if (tca[TCA_RATE-1]) gen_new_estimator(&cl->bstats, &cl->rate_est, cl->stats_lock, tca[TCA_RATE-1]); #endif *arg = (unsigned long)cl; return 0; } static void hfsc_destroy_filters(struct tcf_proto **fl) { struct tcf_proto *tp; while ((tp = *fl) != NULL) { *fl = tp->next; tcf_destroy(tp); } } static void hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) { struct hfsc_sched *q = qdisc_priv(sch); hfsc_destroy_filters(&cl->filter_list); qdisc_destroy(cl->qdisc); #ifdef CONFIG_NET_ESTIMATOR gen_kill_estimator(&cl->bstats, &cl->rate_est); #endif if (cl != &q->root) kfree(cl); } static int hfsc_delete_class(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) return -EBUSY; sch_tree_lock(sch); list_del(&cl->hlist); list_del(&cl->siblings); hfsc_adjust_levels(cl->cl_parent); hfsc_purge_queue(sch, cl); if (--cl->refcnt == 0) hfsc_destroy_class(sch, cl); sch_tree_unlock(sch); return 0; } static struct hfsc_class * hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; struct tcf_result res; struct tcf_proto *tcf; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && (cl = hfsc_find_class(skb->priority, sch)) != NULL) if (cl->level == 0) return cl; *qerr = NET_XMIT_BYPASS; tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS; case TC_ACT_SHOT: return NULL; } #elif defined(CONFIG_NET_CLS_POLICE) if (result == TC_POLICE_SHOT) return NULL; #endif if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = hfsc_find_class(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ } if (cl->level == 0) return cl; /* hit leaf class */ /* apply inner filter chain */ tcf = cl->filter_list; } /* classification failed, try default class */ cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); if (cl == NULL || cl->level > 0) return NULL; return cl; } static int hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl == NULL) return -ENOENT; if (cl->level > 0) return -EINVAL; if (new == NULL) { new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, cl->classid); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); hfsc_purge_queue(sch, cl); *old = xchg(&cl->qdisc, new); sch_tree_unlock(sch); return 0; } static struct Qdisc * hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl != NULL && cl->level == 0) return cl->qdisc; return NULL; } static void hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->qdisc->q.qlen == 0) { update_vf(cl, 0, 0); set_passive(cl); } } static unsigned long hfsc_get_class(struct Qdisc *sch, u32 classid) { struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) cl->refcnt++; return (unsigned long)cl; } static void hfsc_put_class(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (--cl->refcnt == 0) hfsc_destroy_class(sch, cl); } static unsigned long hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) { struct hfsc_class *p = (struct hfsc_class *)parent; struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) { if (p != NULL && p->level <= cl->level) return 0; cl->filter_cnt++; } return (unsigned long)cl; } static void hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; cl->filter_cnt--; } static struct tcf_proto ** hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl == NULL) cl = &q->root; return &cl->filter_list; } static int hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) { struct tc_service_curve tsc; tsc.m1 = sm2m(sc->sm1); tsc.d = dx2d(sc->dx); tsc.m2 = sm2m(sc->sm2); RTA_PUT(skb, attr, sizeof(tsc), &tsc); return skb->len; rtattr_failure: return -1; } static inline int hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) { if ((cl->cl_flags & HFSC_RSC) && (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) goto rtattr_failure; if ((cl->cl_flags & HFSC_FSC) && (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) goto rtattr_failure; if ((cl->cl_flags & HFSC_USC) && (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) goto rtattr_failure; return skb->len; rtattr_failure: return -1; } static int hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct hfsc_class *cl = (struct hfsc_class *)arg; unsigned char *b = skb->tail; struct rtattr *rta = (struct rtattr *)b; tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; tcm->tcm_handle = cl->classid; if (cl->level == 0) tcm->tcm_info = cl->qdisc->handle; RTA_PUT(skb, TCA_OPTIONS, 0, NULL);