aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_ife.c53
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_prio.c67
7 files changed, 73 insertions, 74 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f8c61d2a7963..47ec2305f920 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1122,7 +1122,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1122 nla_nest_end(skb, nest); 1122 nla_nest_end(skb, nest);
1123 ret = skb->len; 1123 ret = skb->len;
1124 } else 1124 } else
1125 nla_nest_cancel(skb, nest); 1125 nlmsg_trim(skb, b);
1126 1126
1127 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1127 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1128 if (NETLINK_CB(cb->skb).portid && ret) 1128 if (NETLINK_CB(cb->skb).portid && ret)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index b7fa96926c90..845ab5119c05 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
106} 106}
107EXPORT_SYMBOL_GPL(ife_get_meta_u16); 107EXPORT_SYMBOL_GPL(ife_get_meta_u16);
108 108
109int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) 109int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
110{ 110{
111 mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); 111 mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
112 if (!mi->metaval) 112 if (!mi->metaval)
113 return -ENOMEM; 113 return -ENOMEM;
114 114
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
116} 116}
117EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 117EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
118 118
119int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) 119int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
120{ 120{
121 mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); 121 mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
122 if (!mi->metaval) 122 if (!mi->metaval)
123 return -ENOMEM; 123 return -ENOMEM;
124 124
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
240} 240}
241 241
242/* called when adding new meta information 242/* called when adding new meta information
243 * under ife->tcf_lock 243 * under ife->tcf_lock for existing action
244*/ 244*/
245static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, 245static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
246 void *val, int len) 246 void *val, int len, bool exists)
247{ 247{
248 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 248 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
249 int ret = 0; 249 int ret = 0;
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
251 if (!ops) { 251 if (!ops) {
252 ret = -ENOENT; 252 ret = -ENOENT;
253#ifdef CONFIG_MODULES 253#ifdef CONFIG_MODULES
254 spin_unlock_bh(&ife->tcf_lock); 254 if (exists)
255 spin_unlock_bh(&ife->tcf_lock);
255 rtnl_unlock(); 256 rtnl_unlock();
256 request_module("ifemeta%u", metaid); 257 request_module("ifemeta%u", metaid);
257 rtnl_lock(); 258 rtnl_lock();
258 spin_lock_bh(&ife->tcf_lock); 259 if (exists)
260 spin_lock_bh(&ife->tcf_lock);
259 ops = find_ife_oplist(metaid); 261 ops = find_ife_oplist(metaid);
260#endif 262#endif
261 } 263 }
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
272} 274}
273 275
274/* called when adding new meta information 276/* called when adding new meta information
275 * under ife->tcf_lock 277 * under ife->tcf_lock for existing action
276*/ 278*/
277static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 279static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
278 int len) 280 int len, bool atomic)
279{ 281{
280 struct tcf_meta_info *mi = NULL; 282 struct tcf_meta_info *mi = NULL;
281 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 283 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
284 if (!ops) 286 if (!ops)
285 return -ENOENT; 287 return -ENOENT;
286 288
287 mi = kzalloc(sizeof(*mi), GFP_KERNEL); 289 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
288 if (!mi) { 290 if (!mi) {
289 /*put back what find_ife_oplist took */ 291 /*put back what find_ife_oplist took */
290 module_put(ops->owner); 292 module_put(ops->owner);
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
294 mi->metaid = metaid; 296 mi->metaid = metaid;
295 mi->ops = ops; 297 mi->ops = ops;
296 if (len > 0) { 298 if (len > 0) {
297 ret = ops->alloc(mi, metaval); 299 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
298 if (ret != 0) { 300 if (ret != 0) {
299 kfree(mi); 301 kfree(mi);
300 module_put(ops->owner); 302 module_put(ops->owner);
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
313 int rc = 0; 315 int rc = 0;
314 int installed = 0; 316 int installed = 0;
315 317
318 read_lock(&ife_mod_lock);
316 list_for_each_entry(o, &ifeoplist, list) { 319 list_for_each_entry(o, &ifeoplist, list) {
317 rc = add_metainfo(ife, o->metaid, NULL, 0); 320 rc = add_metainfo(ife, o->metaid, NULL, 0, true);
318 if (rc == 0) 321 if (rc == 0)
319 installed += 1; 322 installed += 1;
320 } 323 }
324 read_unlock(&ife_mod_lock);
321 325
322 if (installed) 326 if (installed)
323 return 0; 327 return 0;
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
385 spin_unlock_bh(&ife->tcf_lock); 389 spin_unlock_bh(&ife->tcf_lock);
386} 390}
387 391
388/* under ife->tcf_lock */ 392/* under ife->tcf_lock for existing action */
389static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) 393static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
394 bool exists)
390{ 395{
391 int len = 0; 396 int len = 0;
392 int rc = 0; 397 int rc = 0;
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
398 val = nla_data(tb[i]); 403 val = nla_data(tb[i]);
399 len = nla_len(tb[i]); 404 len = nla_len(tb[i]);
400 405
401 rc = load_metaops_and_vet(ife, i, val, len); 406 rc = load_metaops_and_vet(ife, i, val, len, exists);
402 if (rc != 0) 407 if (rc != 0)
403 return rc; 408 return rc;
404 409
405 rc = add_metainfo(ife, i, val, len); 410 rc = add_metainfo(ife, i, val, len, exists);
406 if (rc) 411 if (rc)
407 return rc; 412 return rc;
408 } 413 }
@@ -475,7 +480,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
475 saddr = nla_data(tb[TCA_IFE_SMAC]); 480 saddr = nla_data(tb[TCA_IFE_SMAC]);
476 } 481 }
477 482
478 spin_lock_bh(&ife->tcf_lock); 483 if (exists)
484 spin_lock_bh(&ife->tcf_lock);
479 ife->tcf_action = parm->action; 485 ife->tcf_action = parm->action;
480 486
481 if (parm->flags & IFE_ENCODE) { 487 if (parm->flags & IFE_ENCODE) {
@@ -505,11 +511,12 @@ metadata_parse_err:
505 if (ret == ACT_P_CREATED) 511 if (ret == ACT_P_CREATED)
506 _tcf_ife_cleanup(a, bind); 512 _tcf_ife_cleanup(a, bind);
507 513
508 spin_unlock_bh(&ife->tcf_lock); 514 if (exists)
515 spin_unlock_bh(&ife->tcf_lock);
509 return err; 516 return err;
510 } 517 }
511 518
512 err = populate_metalist(ife, tb2); 519 err = populate_metalist(ife, tb2, exists);
513 if (err) 520 if (err)
514 goto metadata_parse_err; 521 goto metadata_parse_err;
515 522
@@ -524,12 +531,14 @@ metadata_parse_err:
524 if (ret == ACT_P_CREATED) 531 if (ret == ACT_P_CREATED)
525 _tcf_ife_cleanup(a, bind); 532 _tcf_ife_cleanup(a, bind);
526 533
527 spin_unlock_bh(&ife->tcf_lock); 534 if (exists)
535 spin_unlock_bh(&ife->tcf_lock);
528 return err; 536 return err;
529 } 537 }
530 } 538 }
531 539
532 spin_unlock_bh(&ife->tcf_lock); 540 if (exists)
541 spin_unlock_bh(&ife->tcf_lock);
533 542
534 if (ret == ACT_P_CREATED) 543 if (ret == ACT_P_CREATED)
535 tcf_hash_insert(tn, a); 544 tcf_hash_insert(tn, a);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 6148e323ed93..b8c50600697a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -123,10 +123,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
123 } 123 }
124 124
125 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 125 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
126 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) 126 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
127 if (exists)
128 tcf_hash_release(a, bind);
127 return -EINVAL; 129 return -EINVAL;
130 }
128 131
129 if (!tcf_hash_check(tn, index, a, bind)) { 132 if (!exists) {
130 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, 133 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
131 false); 134 false);
132 if (ret) 135 if (ret)
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 6ea0db427f91..baeed6a78d28 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -40,14 +40,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, 40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
41 struct sk_buff **to_free) 41 struct sk_buff **to_free)
42{ 42{
43 unsigned int prev_backlog;
44
43 if (likely(skb_queue_len(&sch->q) < sch->limit)) 45 if (likely(skb_queue_len(&sch->q) < sch->limit))
44 return qdisc_enqueue_tail(skb, sch); 46 return qdisc_enqueue_tail(skb, sch);
45 47
48 prev_backlog = sch->qstats.backlog;
46 /* queue full, remove one skb to fulfill the limit */ 49 /* queue full, remove one skb to fulfill the limit */
47 __qdisc_queue_drop_head(sch, &sch->q, to_free); 50 __qdisc_queue_drop_head(sch, &sch->q, to_free);
48 qdisc_qstats_drop(sch); 51 qdisc_qstats_drop(sch);
49 qdisc_enqueue_tail(skb, sch); 52 qdisc_enqueue_tail(skb, sch);
50 53
54 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
51 return NET_XMIT_CN; 55 return NET_XMIT_CN;
52} 56}
53 57
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index ba098f2654b4..91982d9784b3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -984,7 +984,9 @@ static void htb_work_func(struct work_struct *work)
984 struct htb_sched *q = container_of(work, struct htb_sched, work); 984 struct htb_sched *q = container_of(work, struct htb_sched, work);
985 struct Qdisc *sch = q->watchdog.qdisc; 985 struct Qdisc *sch = q->watchdog.qdisc;
986 986
987 rcu_read_lock();
987 __netif_schedule(qdisc_root(sch)); 988 __netif_schedule(qdisc_root(sch));
989 rcu_read_unlock();
988} 990}
989 991
990static int htb_init(struct Qdisc *sch, struct nlattr *opt) 992static int htb_init(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6eac3d880048..aaaf02175338 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -621,17 +621,17 @@ deliver:
621#endif 621#endif
622 622
623 if (q->qdisc) { 623 if (q->qdisc) {
624 unsigned int pkt_len = qdisc_pkt_len(skb);
624 struct sk_buff *to_free = NULL; 625 struct sk_buff *to_free = NULL;
625 int err; 626 int err;
626 627
627 err = qdisc_enqueue(skb, q->qdisc, &to_free); 628 err = qdisc_enqueue(skb, q->qdisc, &to_free);
628 kfree_skb_list(to_free); 629 kfree_skb_list(to_free);
629 if (unlikely(err != NET_XMIT_SUCCESS)) { 630 if (err != NET_XMIT_SUCCESS &&
630 if (net_xmit_drop_count(err)) { 631 net_xmit_drop_count(err)) {
631 qdisc_qstats_drop(sch); 632 qdisc_qstats_drop(sch);
632 qdisc_tree_reduce_backlog(sch, 1, 633 qdisc_tree_reduce_backlog(sch, 1,
633 qdisc_pkt_len(skb)); 634 pkt_len);
634 }
635 } 635 }
636 goto tfifo_dequeue; 636 goto tfifo_dequeue;
637 } 637 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f4d443aeae54..8f575899adfa 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -153,8 +153,9 @@ prio_destroy(struct Qdisc *sch)
153static int prio_tune(struct Qdisc *sch, struct nlattr *opt) 153static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
154{ 154{
155 struct prio_sched_data *q = qdisc_priv(sch); 155 struct prio_sched_data *q = qdisc_priv(sch);
156 struct Qdisc *queues[TCQ_PRIO_BANDS];
157 int oldbands = q->bands, i;
156 struct tc_prio_qopt *qopt; 158 struct tc_prio_qopt *qopt;
157 int i;
158 159
159 if (nla_len(opt) < sizeof(*qopt)) 160 if (nla_len(opt) < sizeof(*qopt))
160 return -EINVAL; 161 return -EINVAL;
@@ -168,62 +169,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
168 return -EINVAL; 169 return -EINVAL;
169 } 170 }
170 171
172 /* Before commit, make sure we can allocate all new qdiscs */
173 for (i = oldbands; i < qopt->bands; i++) {
174 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
175 TC_H_MAKE(sch->handle, i + 1));
176 if (!queues[i]) {
177 while (i > oldbands)
178 qdisc_destroy(queues[--i]);
179 return -ENOMEM;
180 }
181 }
182
171 sch_tree_lock(sch); 183 sch_tree_lock(sch);
172 q->bands = qopt->bands; 184 q->bands = qopt->bands;
173 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 185 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
174 186
175 for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { 187 for (i = q->bands; i < oldbands; i++) {
176 struct Qdisc *child = q->queues[i]; 188 struct Qdisc *child = q->queues[i];
177 q->queues[i] = &noop_qdisc;
178 if (child != &noop_qdisc) {
179 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
180 qdisc_destroy(child);
181 }
182 }
183 sch_tree_unlock(sch);
184 189
185 for (i = 0; i < q->bands; i++) { 190 qdisc_tree_reduce_backlog(child, child->q.qlen,
186 if (q->queues[i] == &noop_qdisc) { 191 child->qstats.backlog);
187 struct Qdisc *child, *old; 192 qdisc_destroy(child);
188
189 child = qdisc_create_dflt(sch->dev_queue,
190 &pfifo_qdisc_ops,
191 TC_H_MAKE(sch->handle, i + 1));
192 if (child) {
193 sch_tree_lock(sch);
194 old = q->queues[i];
195 q->queues[i] = child;
196
197 if (old != &noop_qdisc) {
198 qdisc_tree_reduce_backlog(old,
199 old->q.qlen,
200 old->qstats.backlog);
201 qdisc_destroy(old);
202 }
203 sch_tree_unlock(sch);
204 }
205 }
206 } 193 }
194
195 for (i = oldbands; i < q->bands; i++)
196 q->queues[i] = queues[i];
197
198 sch_tree_unlock(sch);
207 return 0; 199 return 0;
208} 200}
209 201
210static int prio_init(struct Qdisc *sch, struct nlattr *opt) 202static int prio_init(struct Qdisc *sch, struct nlattr *opt)
211{ 203{
212 struct prio_sched_data *q = qdisc_priv(sch); 204 if (!opt)
213 int i;
214
215 for (i = 0; i < TCQ_PRIO_BANDS; i++)
216 q->queues[i] = &noop_qdisc;
217
218 if (opt == NULL) {
219 return -EINVAL; 205 return -EINVAL;
220 } else {
221 int err;
222 206
223 if ((err = prio_tune(sch, opt)) != 0) 207 return prio_tune(sch, opt);
224 return err;
225 }
226 return 0;
227} 208}
228 209
229static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) 210static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)