aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/cls_api.c11
-rw-r--r--net/sched/cls_bpf.c12
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/em_canid.c7
-rw-r--r--net/sched/ematch.c6
-rw-r--r--net/sched/sch_cbq.c48
-rw-r--r--net/sched/sch_choke.c18
-rw-r--r--net/sched/sch_generic.c4
-rw-r--r--net/sched/sch_teql.c4
10 files changed, 53 insertions, 61 deletions
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 4f912c0e225b..eb48306033d9 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -218,10 +218,12 @@ static int mirred_device_event(struct notifier_block *unused,
218 218
219 if (event == NETDEV_UNREGISTER) 219 if (event == NETDEV_UNREGISTER)
220 list_for_each_entry(m, &mirred_list, tcfm_list) { 220 list_for_each_entry(m, &mirred_list, tcfm_list) {
221 spin_lock_bh(&m->tcf_lock);
221 if (m->tcfm_dev == dev) { 222 if (m->tcfm_dev == dev) {
222 dev_put(dev); 223 dev_put(dev);
223 m->tcfm_dev = NULL; 224 m->tcfm_dev = NULL;
224 } 225 }
226 spin_unlock_bh(&m->tcf_lock);
225 } 227 }
226 228
227 return NOTIFY_DONE; 229 return NOTIFY_DONE;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 45527e6b52db..c28b0d327b12 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -561,13 +561,14 @@ EXPORT_SYMBOL(tcf_exts_change);
561int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 561int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
562{ 562{
563#ifdef CONFIG_NET_CLS_ACT 563#ifdef CONFIG_NET_CLS_ACT
564 struct nlattr *nest;
565
564 if (exts->action && !list_empty(&exts->actions)) { 566 if (exts->action && !list_empty(&exts->actions)) {
565 /* 567 /*
566 * again for backward compatible mode - we want 568 * again for backward compatible mode - we want
567 * to work with both old and new modes of entering 569 * to work with both old and new modes of entering
568 * tc data even if iproute2 was newer - jhs 570 * tc data even if iproute2 was newer - jhs
569 */ 571 */
570 struct nlattr *nest;
571 if (exts->type != TCA_OLD_COMPAT) { 572 if (exts->type != TCA_OLD_COMPAT) {
572 nest = nla_nest_start(skb, exts->action); 573 nest = nla_nest_start(skb, exts->action);
573 if (nest == NULL) 574 if (nest == NULL)
@@ -585,10 +586,14 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
585 nla_nest_end(skb, nest); 586 nla_nest_end(skb, nest);
586 } 587 }
587 } 588 }
588#endif
589 return 0; 589 return 0;
590nla_put_failure: __attribute__ ((unused)) 590
591nla_put_failure:
592 nla_nest_cancel(skb, nest);
591 return -1; 593 return -1;
594#else
595 return 0;
596#endif
592} 597}
593EXPORT_SYMBOL(tcf_exts_dump); 598EXPORT_SYMBOL(tcf_exts_dump);
594 599
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 13f64df2c710..0e30d58149da 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -30,7 +30,7 @@ struct cls_bpf_head {
30}; 30};
31 31
32struct cls_bpf_prog { 32struct cls_bpf_prog {
33 struct sk_filter *filter; 33 struct bpf_prog *filter;
34 struct sock_filter *bpf_ops; 34 struct sock_filter *bpf_ops;
35 struct tcf_exts exts; 35 struct tcf_exts exts;
36 struct tcf_result res; 36 struct tcf_result res;
@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
54 int ret; 54 int ret;
55 55
56 list_for_each_entry(prog, &head->plist, link) { 56 list_for_each_entry(prog, &head->plist, link) {
57 int filter_res = SK_RUN_FILTER(prog->filter, skb); 57 int filter_res = BPF_PROG_RUN(prog->filter, skb);
58 58
59 if (filter_res == 0) 59 if (filter_res == 0)
60 continue; 60 continue;
@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
92 tcf_unbind_filter(tp, &prog->res); 92 tcf_unbind_filter(tp, &prog->res);
93 tcf_exts_destroy(tp, &prog->exts); 93 tcf_exts_destroy(tp, &prog->exts);
94 94
95 sk_unattached_filter_destroy(prog->filter); 95 bpf_prog_destroy(prog->filter);
96 96
97 kfree(prog->bpf_ops); 97 kfree(prog->bpf_ops);
98 kfree(prog); 98 kfree(prog);
@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog_kern tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct bpf_prog *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
167 int ret; 167 int ret;
@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = bpf_prog_create(&fp, &tmp);
197 if (ret) 197 if (ret)
198 goto errout_free; 198 goto errout_free;
199 199
@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
211 tcf_exts_change(tp, &prog->exts, &exts); 211 tcf_exts_change(tp, &prog->exts, &exts);
212 212
213 if (fp_old) 213 if (fp_old)
214 sk_unattached_filter_destroy(fp_old); 214 bpf_prog_destroy(fp_old);
215 if (bpf_old) 215 if (bpf_old)
216 kfree(bpf_old); 216 kfree(bpf_old);
217 217
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index c721cd4a469f..3e9f76413b3b 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -420,7 +420,7 @@ static void tcindex_destroy(struct tcf_proto *tp)
420 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); 420 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
421 walker.count = 0; 421 walker.count = 0;
422 walker.skip = 0; 422 walker.skip = 0;
423 walker.fn = &tcindex_destroy_element; 423 walker.fn = tcindex_destroy_element;
424 tcindex_walk(tp, &walker); 424 tcindex_walk(tp, &walker);
425 kfree(p->perfect); 425 kfree(p->perfect);
426 kfree(p->h); 426 kfree(p->h);
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index bfd34e4c1afc..7c292d474f47 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -125,7 +125,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
125{ 125{
126 struct can_filter *conf = data; /* Array with rules */ 126 struct can_filter *conf = data; /* Array with rules */
127 struct canid_match *cm; 127 struct canid_match *cm;
128 struct canid_match *cm_old = (struct canid_match *)m->data;
129 int i; 128 int i;
130 129
131 if (!len) 130 if (!len)
@@ -181,12 +180,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
181 180
182 m->datalen = sizeof(struct canid_match) + len; 181 m->datalen = sizeof(struct canid_match) + len;
183 m->data = (unsigned long)cm; 182 m->data = (unsigned long)cm;
184
185 if (cm_old != NULL) {
186 pr_err("canid: Configuring an existing ematch!\n");
187 kfree(cm_old);
188 }
189
190 return 0; 183 return 0;
191} 184}
192 185
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 3a633debb6df..ad57f4444b9c 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -526,9 +526,11 @@ pop_stack:
526 match_idx = stack[--stackp]; 526 match_idx = stack[--stackp];
527 cur_match = tcf_em_get_match(tree, match_idx); 527 cur_match = tcf_em_get_match(tree, match_idx);
528 528
529 if (tcf_em_early_end(cur_match, res)) 529 if (tcf_em_early_end(cur_match, res)) {
530 if (tcf_em_is_inverted(cur_match))
531 res = !res;
530 goto pop_stack; 532 goto pop_stack;
531 else { 533 } else {
532 match_idx++; 534 match_idx++;
533 goto proceed; 535 goto proceed;
534 } 536 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index ead526467cca..762a04bb8f6d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -159,7 +159,6 @@ struct cbq_sched_data {
159 struct cbq_class *tx_borrowed; 159 struct cbq_class *tx_borrowed;
160 int tx_len; 160 int tx_len;
161 psched_time_t now; /* Cached timestamp */ 161 psched_time_t now; /* Cached timestamp */
162 psched_time_t now_rt; /* Cached real time */
163 unsigned int pmask; 162 unsigned int pmask;
164 163
165 struct hrtimer delay_timer; 164 struct hrtimer delay_timer;
@@ -353,12 +352,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
353 int toplevel = q->toplevel; 352 int toplevel = q->toplevel;
354 353
355 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { 354 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
356 psched_time_t now; 355 psched_time_t now = psched_get_time();
357 psched_tdiff_t incr;
358
359 now = psched_get_time();
360 incr = now - q->now_rt;
361 now = q->now + incr;
362 356
363 do { 357 do {
364 if (cl->undertime < now) { 358 if (cl->undertime < now) {
@@ -700,8 +694,13 @@ cbq_update(struct cbq_sched_data *q)
700 struct cbq_class *this = q->tx_class; 694 struct cbq_class *this = q->tx_class;
701 struct cbq_class *cl = this; 695 struct cbq_class *cl = this;
702 int len = q->tx_len; 696 int len = q->tx_len;
697 psched_time_t now;
703 698
704 q->tx_class = NULL; 699 q->tx_class = NULL;
700 /* Time integrator. We calculate EOS time
701 * by adding expected packet transmission time.
702 */
703 now = q->now + L2T(&q->link, len);
705 704
706 for ( ; cl; cl = cl->share) { 705 for ( ; cl; cl = cl->share) {
707 long avgidle = cl->avgidle; 706 long avgidle = cl->avgidle;
@@ -717,7 +716,7 @@ cbq_update(struct cbq_sched_data *q)
717 * idle = (now - last) - last_pktlen/rate 716 * idle = (now - last) - last_pktlen/rate
718 */ 717 */
719 718
720 idle = q->now - cl->last; 719 idle = now - cl->last;
721 if ((unsigned long)idle > 128*1024*1024) { 720 if ((unsigned long)idle > 128*1024*1024) {
722 avgidle = cl->maxidle; 721 avgidle = cl->maxidle;
723 } else { 722 } else {
@@ -761,7 +760,7 @@ cbq_update(struct cbq_sched_data *q)
761 idle -= L2T(&q->link, len); 760 idle -= L2T(&q->link, len);
762 idle += L2T(cl, len); 761 idle += L2T(cl, len);
763 762
764 cl->undertime = q->now + idle; 763 cl->undertime = now + idle;
765 } else { 764 } else {
766 /* Underlimit */ 765 /* Underlimit */
767 766
@@ -771,7 +770,8 @@ cbq_update(struct cbq_sched_data *q)
771 else 770 else
772 cl->avgidle = avgidle; 771 cl->avgidle = avgidle;
773 } 772 }
774 cl->last = q->now; 773 if ((s64)(now - cl->last) > 0)
774 cl->last = now;
775 } 775 }
776 776
777 cbq_update_toplevel(q, this, q->tx_borrowed); 777 cbq_update_toplevel(q, this, q->tx_borrowed);
@@ -943,31 +943,13 @@ cbq_dequeue(struct Qdisc *sch)
943 struct sk_buff *skb; 943 struct sk_buff *skb;
944 struct cbq_sched_data *q = qdisc_priv(sch); 944 struct cbq_sched_data *q = qdisc_priv(sch);
945 psched_time_t now; 945 psched_time_t now;
946 psched_tdiff_t incr;
947 946
948 now = psched_get_time(); 947 now = psched_get_time();
949 incr = now - q->now_rt; 948
950 949 if (q->tx_class)
951 if (q->tx_class) {
952 psched_tdiff_t incr2;
953 /* Time integrator. We calculate EOS time
954 * by adding expected packet transmission time.
955 * If real time is greater, we warp artificial clock,
956 * so that:
957 *
958 * cbq_time = max(real_time, work);
959 */
960 incr2 = L2T(&q->link, q->tx_len);
961 q->now += incr2;
962 cbq_update(q); 950 cbq_update(q);
963 if ((incr -= incr2) < 0) 951
964 incr = 0; 952 q->now = now;
965 q->now += incr;
966 } else {
967 if (now > q->now)
968 q->now = now;
969 }
970 q->now_rt = now;
971 953
972 for (;;) { 954 for (;;) {
973 q->wd_expires = 0; 955 q->wd_expires = 0;
@@ -1223,7 +1205,6 @@ cbq_reset(struct Qdisc *sch)
1223 hrtimer_cancel(&q->delay_timer); 1205 hrtimer_cancel(&q->delay_timer);
1224 q->toplevel = TC_CBQ_MAXLEVEL; 1206 q->toplevel = TC_CBQ_MAXLEVEL;
1225 q->now = psched_get_time(); 1207 q->now = psched_get_time();
1226 q->now_rt = q->now;
1227 1208
1228 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) 1209 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1229 q->active[prio] = NULL; 1210 q->active[prio] = NULL;
@@ -1407,7 +1388,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1407 q->delay_timer.function = cbq_undelay; 1388 q->delay_timer.function = cbq_undelay;
1408 q->toplevel = TC_CBQ_MAXLEVEL; 1389 q->toplevel = TC_CBQ_MAXLEVEL;
1409 q->now = psched_get_time(); 1390 q->now = psched_get_time();
1410 q->now_rt = q->now;
1411 1391
1412 cbq_link_class(&q->link); 1392 cbq_link_class(&q->link);
1413 1393
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ed30e436128b..fb666d1e4de3 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -133,10 +133,16 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
133 --sch->q.qlen; 133 --sch->q.qlen;
134} 134}
135 135
136/* private part of skb->cb[] that a qdisc is allowed to use
137 * is limited to QDISC_CB_PRIV_LEN bytes.
138 * As a flow key might be too large, we store a part of it only.
139 */
140#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
141
136struct choke_skb_cb { 142struct choke_skb_cb {
137 u16 classid; 143 u16 classid;
138 u8 keys_valid; 144 u8 keys_valid;
139 struct flow_keys keys; 145 u8 keys[QDISC_CB_PRIV_LEN - 3];
140}; 146};
141 147
142static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 148static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -163,22 +169,26 @@ static u16 choke_get_classid(const struct sk_buff *skb)
163static bool choke_match_flow(struct sk_buff *skb1, 169static bool choke_match_flow(struct sk_buff *skb1,
164 struct sk_buff *skb2) 170 struct sk_buff *skb2)
165{ 171{
172 struct flow_keys temp;
173
166 if (skb1->protocol != skb2->protocol) 174 if (skb1->protocol != skb2->protocol)
167 return false; 175 return false;
168 176
169 if (!choke_skb_cb(skb1)->keys_valid) { 177 if (!choke_skb_cb(skb1)->keys_valid) {
170 choke_skb_cb(skb1)->keys_valid = 1; 178 choke_skb_cb(skb1)->keys_valid = 1;
171 skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys); 179 skb_flow_dissect(skb1, &temp);
180 memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
172 } 181 }
173 182
174 if (!choke_skb_cb(skb2)->keys_valid) { 183 if (!choke_skb_cb(skb2)->keys_valid) {
175 choke_skb_cb(skb2)->keys_valid = 1; 184 choke_skb_cb(skb2)->keys_valid = 1;
176 skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys); 185 skb_flow_dissect(skb2, &temp);
186 memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
177 } 187 }
178 188
179 return !memcmp(&choke_skb_cb(skb1)->keys, 189 return !memcmp(&choke_skb_cb(skb1)->keys,
180 &choke_skb_cb(skb2)->keys, 190 &choke_skb_cb(skb2)->keys,
181 sizeof(struct flow_keys)); 191 CHOKE_K_LEN);
182} 192}
183 193
184/* 194/*
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e1543b03e39d..fc04fe93c2da 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -108,7 +108,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
108 108
109/* 109/*
110 * Transmit one skb, and handle the return status as required. Holding the 110 * Transmit one skb, and handle the return status as required. Holding the
111 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 111 * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this
112 * function. 112 * function.
113 * 113 *
114 * Returns to the caller: 114 * Returns to the caller:
@@ -156,7 +156,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
156/* 156/*
157 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 157 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
158 * 158 *
159 * __QDISC_STATE_RUNNING guarantees only one CPU can process 159 * __QDISC___STATE_RUNNING guarantees only one CPU can process
160 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 160 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
161 * this queue. 161 * this queue.
162 * 162 *
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 474167162947..bd33793b527e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -485,8 +485,8 @@ static int __init teql_init(void)
485 struct net_device *dev; 485 struct net_device *dev;
486 struct teql_master *master; 486 struct teql_master *master;
487 487
488 dev = alloc_netdev(sizeof(struct teql_master), 488 dev = alloc_netdev(sizeof(struct teql_master), "teql%d",
489 "teql%d", teql_master_setup); 489 NET_NAME_UNKNOWN, teql_master_setup);
490 if (!dev) { 490 if (!dev) {
491 err = -ENOMEM; 491 err = -ENOMEM;
492 break; 492 break;