aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--net/sched/sch_api.c67
-rw-r--r--net/sched/sch_atm.c11
-rw-r--r--net/sched/sch_cbq.c39
-rw-r--r--net/sched/sch_tbf.c2
6 files changed, 73 insertions, 52 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 5754d53d9efc..9e22526e80e7 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,8 +89,10 @@ static inline void qdisc_run(struct net_device *dev)
89 __qdisc_run(dev); 89 __qdisc_run(dev);
90} 90}
91 91
92extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
93 struct tcf_result *res);
92extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 94extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
93 struct tcf_result *res); 95 struct tcf_result *res);
94 96
95/* Calculate maximal size of packet seen by hard_start_xmit 97/* Calculate maximal size of packet seen by hard_start_xmit
96 routine of this device. 98 routine of this device.
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 1b8e35197ebe..0153cd9d1b8d 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -290,7 +290,7 @@ static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
290{ 290{
291 sch->qstats.drops++; 291 sch->qstats.drops++;
292 292
293#ifdef CONFIG_NET_CLS_POLICE 293#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
294 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 294 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
295 goto drop; 295 goto drop;
296 296
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 4fd0beca9450..13c09bc32aa3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1145,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1145 to this qdisc, (optionally) tests for protocol and asks 1145 to this qdisc, (optionally) tests for protocol and asks
1146 specific classifiers. 1146 specific classifiers.
1147 */ 1147 */
1148int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1149 struct tcf_result *res)
1150{
1151 __be16 protocol = skb->protocol;
1152 int err = 0;
1153
1154 for (; tp; tp = tp->next) {
1155 if ((tp->protocol == protocol ||
1156 tp->protocol == htons(ETH_P_ALL)) &&
1157 (err = tp->classify(skb, tp, res)) >= 0) {
1158#ifdef CONFIG_NET_CLS_ACT
1159 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1160 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1161#endif
1162 return err;
1163 }
1164 }
1165 return -1;
1166}
1167EXPORT_SYMBOL(tc_classify_compat);
1168
1148int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1169int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1149 struct tcf_result *res) 1170 struct tcf_result *res)
1150{ 1171{
1151 int err = 0; 1172 int err = 0;
1152 __be16 protocol = skb->protocol; 1173 __be16 protocol;
1153#ifdef CONFIG_NET_CLS_ACT 1174#ifdef CONFIG_NET_CLS_ACT
1154 struct tcf_proto *otp = tp; 1175 struct tcf_proto *otp = tp;
1155reclassify: 1176reclassify:
1156#endif 1177#endif
1157 protocol = skb->protocol; 1178 protocol = skb->protocol;
1158 1179
1159 for ( ; tp; tp = tp->next) { 1180 err = tc_classify_compat(skb, tp, res);
1160 if ((tp->protocol == protocol ||
1161 tp->protocol == htons(ETH_P_ALL)) &&
1162 (err = tp->classify(skb, tp, res)) >= 0) {
1163#ifdef CONFIG_NET_CLS_ACT 1181#ifdef CONFIG_NET_CLS_ACT
1164 if ( TC_ACT_RECLASSIFY == err) { 1182 if (err == TC_ACT_RECLASSIFY) {
1165 __u32 verd = (__u32) G_TC_VERD(skb->tc_verd); 1183 u32 verd = G_TC_VERD(skb->tc_verd);
1166 tp = otp; 1184 tp = otp;
1167 1185
1168 if (MAX_REC_LOOP < verd++) { 1186 if (verd++ >= MAX_REC_LOOP) {
1169 printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n", 1187 printk("rule prio %u protocol %02x reclassify loop, "
1170 tp->prio&0xffff, ntohs(tp->protocol)); 1188 "packet dropped\n",
1171 return TC_ACT_SHOT; 1189 tp->prio&0xffff, ntohs(tp->protocol));
1172 } 1190 return TC_ACT_SHOT;
1173 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1174 goto reclassify;
1175 } else {
1176 if (skb->tc_verd)
1177 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1178 return err;
1179 }
1180#else
1181
1182 return err;
1183#endif
1184 } 1191 }
1185 1192 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1193 goto reclassify;
1186 } 1194 }
1187 return -1; 1195#endif
1196 return err;
1188} 1197}
1198EXPORT_SYMBOL(tc_classify);
1189 1199
1190void tcf_destroy(struct tcf_proto *tp) 1200void tcf_destroy(struct tcf_proto *tp)
1191{ 1201{
@@ -1252,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
1252EXPORT_SYMBOL(qdisc_put_rtab); 1262EXPORT_SYMBOL(qdisc_put_rtab);
1253EXPORT_SYMBOL(register_qdisc); 1263EXPORT_SYMBOL(register_qdisc);
1254EXPORT_SYMBOL(unregister_qdisc); 1264EXPORT_SYMBOL(unregister_qdisc);
1255EXPORT_SYMBOL(tc_classify);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ccee10dae66d..37ae6d1deb14 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -396,8 +396,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) 396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
397 for (flow = p->flows; flow; flow = flow->next) 397 for (flow = p->flows; flow; flow = flow->next)
398 if (flow->filter_list) { 398 if (flow->filter_list) {
399 result = tc_classify(skb, flow->filter_list, 399 result = tc_classify_compat(skb,
400 &res); 400 flow->filter_list,
401 &res);
401 if (result < 0) 402 if (result < 0)
402 continue; 403 continue;
403 flow = (struct atm_flow_data *)res.class; 404 flow = (struct atm_flow_data *)res.class;
@@ -420,6 +421,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
420 case TC_ACT_SHOT: 421 case TC_ACT_SHOT:
421 kfree_skb(skb); 422 kfree_skb(skb);
422 goto drop; 423 goto drop;
424 case TC_POLICE_RECLASSIFY:
425 if (flow->excess)
426 flow = flow->excess;
427 else
428 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
429 break;
423 } 430 }
424#elif defined(CONFIG_NET_CLS_POLICE) 431#elif defined(CONFIG_NET_CLS_POLICE)
425 switch (result) { 432 switch (result) {
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index b184c3545145..77381f1c6541 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -82,7 +82,7 @@ struct cbq_class
82 unsigned char priority2; /* priority to be used after overlimit */ 82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 83 unsigned char ewma_log; /* time constant for idle time calculation */
84 unsigned char ovl_strategy; 84 unsigned char ovl_strategy;
85#ifdef CONFIG_NET_CLS_POLICE 85#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
86 unsigned char police; 86 unsigned char police;
87#endif 87#endif
88 88
@@ -154,7 +154,7 @@ struct cbq_sched_data
154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes 154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
155 with backlog */ 155 with backlog */
156 156
157#ifdef CONFIG_NET_CLS_POLICE 157#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
158 struct cbq_class *rx_class; 158 struct cbq_class *rx_class;
159#endif 159#endif
160 struct cbq_class *tx_class; 160 struct cbq_class *tx_class;
@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
196 return NULL; 196 return NULL;
197} 197}
198 198
199#ifdef CONFIG_NET_CLS_POLICE 199#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
200 200
201static struct cbq_class * 201static struct cbq_class *
202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) 202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
247 /* 247 /*
248 * Step 2+n. Apply classifier. 248 * Step 2+n. Apply classifier.
249 */ 249 */
250 if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0) 250 if (!head->filter_list ||
251 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
251 goto fallback; 252 goto fallback;
252 253
253 if ((cl = (void*)res.class) == NULL) { 254 if ((cl = (void*)res.class) == NULL) {
@@ -267,6 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
267 *qerr = NET_XMIT_SUCCESS; 268 *qerr = NET_XMIT_SUCCESS;
268 case TC_ACT_SHOT: 269 case TC_ACT_SHOT:
269 return NULL; 270 return NULL;
271 case TC_ACT_RECLASSIFY:
272 return cbq_reclassify(skb, cl);
270 } 273 }
271#elif defined(CONFIG_NET_CLS_POLICE) 274#elif defined(CONFIG_NET_CLS_POLICE)
272 switch (result) { 275 switch (result) {
@@ -389,7 +392,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
389 int ret; 392 int ret;
390 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 393 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
391 394
392#ifdef CONFIG_NET_CLS_POLICE 395#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
393 q->rx_class = cl; 396 q->rx_class = cl;
394#endif 397#endif
395 if (cl == NULL) { 398 if (cl == NULL) {
@@ -399,7 +402,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
399 return ret; 402 return ret;
400 } 403 }
401 404
402#ifdef CONFIG_NET_CLS_POLICE 405#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
403 cl->q->__parent = sch; 406 cl->q->__parent = sch;
404#endif 407#endif
405 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 408 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
@@ -434,7 +437,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
434 437
435 cbq_mark_toplevel(q, cl); 438 cbq_mark_toplevel(q, cl);
436 439
437#ifdef CONFIG_NET_CLS_POLICE 440#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
438 q->rx_class = cl; 441 q->rx_class = cl;
439 cl->q->__parent = sch; 442 cl->q->__parent = sch;
440#endif 443#endif
@@ -670,7 +673,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
670} 673}
671 674
672 675
673#ifdef CONFIG_NET_CLS_POLICE 676#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
674 677
675static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 678static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
676{ 679{
@@ -1364,7 +1367,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1364 return 0; 1367 return 0;
1365} 1368}
1366 1369
1367#ifdef CONFIG_NET_CLS_POLICE 1370#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1368static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) 1371static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1369{ 1372{
1370 cl->police = p->police; 1373 cl->police = p->police;
@@ -1532,7 +1535,7 @@ rtattr_failure:
1532 return -1; 1535 return -1;
1533} 1536}
1534 1537
1535#ifdef CONFIG_NET_CLS_POLICE 1538#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1536static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1539static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1537{ 1540{
1538 unsigned char *b = skb_tail_pointer(skb); 1541 unsigned char *b = skb_tail_pointer(skb);
@@ -1558,7 +1561,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1558 cbq_dump_rate(skb, cl) < 0 || 1561 cbq_dump_rate(skb, cl) < 0 ||
1559 cbq_dump_wrr(skb, cl) < 0 || 1562 cbq_dump_wrr(skb, cl) < 0 ||
1560 cbq_dump_ovl(skb, cl) < 0 || 1563 cbq_dump_ovl(skb, cl) < 0 ||
1561#ifdef CONFIG_NET_CLS_POLICE 1564#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1562 cbq_dump_police(skb, cl) < 0 || 1565 cbq_dump_police(skb, cl) < 0 ||
1563#endif 1566#endif
1564 cbq_dump_fopt(skb, cl) < 0) 1567 cbq_dump_fopt(skb, cl) < 0)
@@ -1653,7 +1656,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1653 cl->classid)) == NULL) 1656 cl->classid)) == NULL)
1654 return -ENOBUFS; 1657 return -ENOBUFS;
1655 } else { 1658 } else {
1656#ifdef CONFIG_NET_CLS_POLICE 1659#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1657 if (cl->police == TC_POLICE_RECLASSIFY) 1660 if (cl->police == TC_POLICE_RECLASSIFY)
1658 new->reshape_fail = cbq_reshape_fail; 1661 new->reshape_fail = cbq_reshape_fail;
1659#endif 1662#endif
@@ -1718,7 +1721,7 @@ cbq_destroy(struct Qdisc* sch)
1718 struct cbq_class *cl; 1721 struct cbq_class *cl;
1719 unsigned h; 1722 unsigned h;
1720 1723
1721#ifdef CONFIG_NET_CLS_POLICE 1724#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1722 q->rx_class = NULL; 1725 q->rx_class = NULL;
1723#endif 1726#endif
1724 /* 1727 /*
@@ -1747,7 +1750,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 struct cbq_class *cl = (struct cbq_class*)arg; 1750 struct cbq_class *cl = (struct cbq_class*)arg;
1748 1751
1749 if (--cl->refcnt == 0) { 1752 if (--cl->refcnt == 0) {
1750#ifdef CONFIG_NET_CLS_POLICE 1753#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1751 struct cbq_sched_data *q = qdisc_priv(sch); 1754 struct cbq_sched_data *q = qdisc_priv(sch);
1752 1755
1753 spin_lock_bh(&sch->dev->queue_lock); 1756 spin_lock_bh(&sch->dev->queue_lock);
@@ -1795,7 +1798,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1795 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) 1798 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
1796 return -EINVAL; 1799 return -EINVAL;
1797 1800
1798#ifdef CONFIG_NET_CLS_POLICE 1801#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1799 if (tb[TCA_CBQ_POLICE-1] && 1802 if (tb[TCA_CBQ_POLICE-1] &&
1800 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) 1803 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
1801 return -EINVAL; 1804 return -EINVAL;
@@ -1838,7 +1841,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1838 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1841 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1839 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1842 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1840 1843
1841#ifdef CONFIG_NET_CLS_POLICE 1844#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1842 if (tb[TCA_CBQ_POLICE-1]) 1845 if (tb[TCA_CBQ_POLICE-1])
1843 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1846 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1844#endif 1847#endif
@@ -1931,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1931 cl->overlimit = cbq_ovl_classic; 1934 cl->overlimit = cbq_ovl_classic;
1932 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1935 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1933 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1936 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1934#ifdef CONFIG_NET_CLS_POLICE 1937#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1935 if (tb[TCA_CBQ_POLICE-1]) 1938 if (tb[TCA_CBQ_POLICE-1])
1936 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1939 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1937#endif 1940#endif
@@ -1975,7 +1978,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1975 q->tx_class = NULL; 1978 q->tx_class = NULL;
1976 q->tx_borrowed = NULL; 1979 q->tx_borrowed = NULL;
1977 } 1980 }
1978#ifdef CONFIG_NET_CLS_POLICE 1981#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
1979 if (q->rx_class == cl) 1982 if (q->rx_class == cl)
1980 q->rx_class = NULL; 1983 q->rx_class = NULL;
1981#endif 1984#endif
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 22e431dace54..b8b3345cede8 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
125 125
126 if (skb->len > q->max_size) { 126 if (skb->len > q->max_size) {
127 sch->qstats.drops++; 127 sch->qstats.drops++;
128#ifdef CONFIG_NET_CLS_POLICE 128#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
130#endif 130#endif
131 kfree_skb(skb); 131 kfree_skb(skb);