diff options
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r-- | include/net/sch_generic.h | 100 |
1 files changed, 67 insertions, 33 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 3c8728aaab4e..b931f021d7ab 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -25,16 +25,18 @@ struct qdisc_rate_table { | |||
25 | enum qdisc_state_t { | 25 | enum qdisc_state_t { |
26 | __QDISC_STATE_SCHED, | 26 | __QDISC_STATE_SCHED, |
27 | __QDISC_STATE_DEACTIVATED, | 27 | __QDISC_STATE_DEACTIVATED, |
28 | __QDISC_STATE_THROTTLED, | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | /* | 31 | /* |
31 | * following bits are only changed while qdisc lock is held | 32 | * following bits are only changed while qdisc lock is held |
32 | */ | 33 | */ |
33 | enum qdisc___state_t { | 34 | enum qdisc___state_t { |
34 | __QDISC___STATE_RUNNING, | 35 | __QDISC___STATE_RUNNING = 1, |
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct qdisc_size_table { | 38 | struct qdisc_size_table { |
39 | struct rcu_head rcu; | ||
38 | struct list_head list; | 40 | struct list_head list; |
39 | struct tc_sizespec szopts; | 41 | struct tc_sizespec szopts; |
40 | int refcnt; | 42 | int refcnt; |
@@ -46,14 +48,13 @@ struct Qdisc { | |||
46 | struct sk_buff * (*dequeue)(struct Qdisc *dev); | 48 | struct sk_buff * (*dequeue)(struct Qdisc *dev); |
47 | unsigned flags; | 49 | unsigned flags; |
48 | #define TCQ_F_BUILTIN 1 | 50 | #define TCQ_F_BUILTIN 1 |
49 | #define TCQ_F_THROTTLED 2 | 51 | #define TCQ_F_INGRESS 2 |
50 | #define TCQ_F_INGRESS 4 | 52 | #define TCQ_F_CAN_BYPASS 4 |
51 | #define TCQ_F_CAN_BYPASS 8 | 53 | #define TCQ_F_MQROOT 8 |
52 | #define TCQ_F_MQROOT 16 | ||
53 | #define TCQ_F_WARN_NONWC (1 << 16) | 54 | #define TCQ_F_WARN_NONWC (1 << 16) |
54 | int padded; | 55 | int padded; |
55 | struct Qdisc_ops *ops; | 56 | struct Qdisc_ops *ops; |
56 | struct qdisc_size_table *stab; | 57 | struct qdisc_size_table __rcu *stab; |
57 | struct list_head list; | 58 | struct list_head list; |
58 | u32 handle; | 59 | u32 handle; |
59 | u32 parent; | 60 | u32 parent; |
@@ -78,25 +79,44 @@ struct Qdisc { | |||
78 | unsigned long state; | 79 | unsigned long state; |
79 | struct sk_buff_head q; | 80 | struct sk_buff_head q; |
80 | struct gnet_stats_basic_packed bstats; | 81 | struct gnet_stats_basic_packed bstats; |
81 | unsigned long __state; | 82 | unsigned int __state; |
82 | struct gnet_stats_queue qstats; | 83 | struct gnet_stats_queue qstats; |
83 | struct rcu_head rcu_head; | 84 | struct rcu_head rcu_head; |
84 | spinlock_t busylock; | 85 | spinlock_t busylock; |
86 | u32 limit; | ||
85 | }; | 87 | }; |
86 | 88 | ||
87 | static inline bool qdisc_is_running(struct Qdisc *qdisc) | 89 | static inline bool qdisc_is_running(const struct Qdisc *qdisc) |
88 | { | 90 | { |
89 | return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 91 | return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; |
90 | } | 92 | } |
91 | 93 | ||
92 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) | 94 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
93 | { | 95 | { |
94 | return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 96 | if (qdisc_is_running(qdisc)) |
97 | return false; | ||
98 | qdisc->__state |= __QDISC___STATE_RUNNING; | ||
99 | return true; | ||
95 | } | 100 | } |
96 | 101 | ||
97 | static inline void qdisc_run_end(struct Qdisc *qdisc) | 102 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
98 | { | 103 | { |
99 | __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | 104 | qdisc->__state &= ~__QDISC___STATE_RUNNING; |
105 | } | ||
106 | |||
107 | static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) | ||
108 | { | ||
109 | return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; | ||
110 | } | ||
111 | |||
112 | static inline void qdisc_throttled(struct Qdisc *qdisc) | ||
113 | { | ||
114 | set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); | ||
115 | } | ||
116 | |||
117 | static inline void qdisc_unthrottled(struct Qdisc *qdisc) | ||
118 | { | ||
119 | clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); | ||
100 | } | 120 | } |
101 | 121 | ||
102 | struct Qdisc_class_ops { | 122 | struct Qdisc_class_ops { |
@@ -199,7 +219,7 @@ struct tcf_proto { | |||
199 | 219 | ||
200 | struct qdisc_skb_cb { | 220 | struct qdisc_skb_cb { |
201 | unsigned int pkt_len; | 221 | unsigned int pkt_len; |
202 | char data[]; | 222 | long data[]; |
203 | }; | 223 | }; |
204 | 224 | ||
205 | static inline int qdisc_qlen(struct Qdisc *q) | 225 | static inline int qdisc_qlen(struct Qdisc *q) |
@@ -207,7 +227,7 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
207 | return q->q.qlen; | 227 | return q->q.qlen; |
208 | } | 228 | } |
209 | 229 | ||
210 | static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) | 230 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
211 | { | 231 | { |
212 | return (struct qdisc_skb_cb *)skb->cb; | 232 | return (struct qdisc_skb_cb *)skb->cb; |
213 | } | 233 | } |
@@ -321,6 +341,7 @@ extern void dev_init_scheduler(struct net_device *dev); | |||
321 | extern void dev_shutdown(struct net_device *dev); | 341 | extern void dev_shutdown(struct net_device *dev); |
322 | extern void dev_activate(struct net_device *dev); | 342 | extern void dev_activate(struct net_device *dev); |
323 | extern void dev_deactivate(struct net_device *dev); | 343 | extern void dev_deactivate(struct net_device *dev); |
344 | extern void dev_deactivate_many(struct list_head *head); | ||
324 | extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | 345 | extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
325 | struct Qdisc *qdisc); | 346 | struct Qdisc *qdisc); |
326 | extern void qdisc_reset(struct Qdisc *qdisc); | 347 | extern void qdisc_reset(struct Qdisc *qdisc); |
@@ -328,11 +349,10 @@ extern void qdisc_destroy(struct Qdisc *qdisc); | |||
328 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); | 349 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
329 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 350 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
330 | struct Qdisc_ops *ops); | 351 | struct Qdisc_ops *ops); |
331 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, | 352 | extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
332 | struct netdev_queue *dev_queue, | ||
333 | struct Qdisc_ops *ops, u32 parentid); | 353 | struct Qdisc_ops *ops, u32 parentid); |
334 | extern void qdisc_calculate_pkt_len(struct sk_buff *skb, | 354 | extern void __qdisc_calculate_pkt_len(struct sk_buff *skb, |
335 | struct qdisc_size_table *stab); | 355 | const struct qdisc_size_table *stab); |
336 | extern void tcf_destroy(struct tcf_proto *tp); | 356 | extern void tcf_destroy(struct tcf_proto *tp); |
337 | extern void tcf_destroy_chain(struct tcf_proto **fl); | 357 | extern void tcf_destroy_chain(struct tcf_proto **fl); |
338 | 358 | ||
@@ -394,7 +414,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) | |||
394 | return true; | 414 | return true; |
395 | } | 415 | } |
396 | 416 | ||
397 | static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) | 417 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
398 | { | 418 | { |
399 | return qdisc_skb_cb(skb)->pkt_len; | 419 | return qdisc_skb_cb(skb)->pkt_len; |
400 | } | 420 | } |
@@ -411,12 +431,20 @@ enum net_xmit_qdisc_t { | |||
411 | #define net_xmit_drop_count(e) (1) | 431 | #define net_xmit_drop_count(e) (1) |
412 | #endif | 432 | #endif |
413 | 433 | ||
414 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 434 | static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, |
435 | const struct Qdisc *sch) | ||
415 | { | 436 | { |
416 | #ifdef CONFIG_NET_SCHED | 437 | #ifdef CONFIG_NET_SCHED |
417 | if (sch->stab) | 438 | struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); |
418 | qdisc_calculate_pkt_len(skb, sch->stab); | 439 | |
440 | if (stab) | ||
441 | __qdisc_calculate_pkt_len(skb, stab); | ||
419 | #endif | 442 | #endif |
443 | } | ||
444 | |||
445 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||
446 | { | ||
447 | qdisc_calculate_pkt_len(skb, sch); | ||
420 | return sch->enqueue(skb, sch); | 448 | return sch->enqueue(skb, sch); |
421 | } | 449 | } |
422 | 450 | ||
@@ -426,10 +454,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | |||
426 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; | 454 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; |
427 | } | 455 | } |
428 | 456 | ||
429 | static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) | 457 | |
458 | static inline void bstats_update(struct gnet_stats_basic_packed *bstats, | ||
459 | const struct sk_buff *skb) | ||
460 | { | ||
461 | bstats->bytes += qdisc_pkt_len(skb); | ||
462 | bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
463 | } | ||
464 | |||
465 | static inline void qdisc_bstats_update(struct Qdisc *sch, | ||
466 | const struct sk_buff *skb) | ||
430 | { | 467 | { |
431 | sch->bstats.bytes += len; | 468 | bstats_update(&sch->bstats, skb); |
432 | sch->bstats.packets++; | ||
433 | } | 469 | } |
434 | 470 | ||
435 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | 471 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
@@ -437,7 +473,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | |||
437 | { | 473 | { |
438 | __skb_queue_tail(list, skb); | 474 | __skb_queue_tail(list, skb); |
439 | sch->qstats.backlog += qdisc_pkt_len(skb); | 475 | sch->qstats.backlog += qdisc_pkt_len(skb); |
440 | __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); | ||
441 | 476 | ||
442 | return NET_XMIT_SUCCESS; | 477 | return NET_XMIT_SUCCESS; |
443 | } | 478 | } |
@@ -452,8 +487,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, | |||
452 | { | 487 | { |
453 | struct sk_buff *skb = __skb_dequeue(list); | 488 | struct sk_buff *skb = __skb_dequeue(list); |
454 | 489 | ||
455 | if (likely(skb != NULL)) | 490 | if (likely(skb != NULL)) { |
456 | sch->qstats.backlog -= qdisc_pkt_len(skb); | 491 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
492 | qdisc_bstats_update(sch, skb); | ||
493 | } | ||
457 | 494 | ||
458 | return skb; | 495 | return skb; |
459 | } | 496 | } |
@@ -466,10 +503,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) | |||
466 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, | 503 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
467 | struct sk_buff_head *list) | 504 | struct sk_buff_head *list) |
468 | { | 505 | { |
469 | struct sk_buff *skb = __qdisc_dequeue_head(sch, list); | 506 | struct sk_buff *skb = __skb_dequeue(list); |
470 | 507 | ||
471 | if (likely(skb != NULL)) { | 508 | if (likely(skb != NULL)) { |
472 | unsigned int len = qdisc_pkt_len(skb); | 509 | unsigned int len = qdisc_pkt_len(skb); |
510 | sch->qstats.backlog -= len; | ||
473 | kfree_skb(skb); | 511 | kfree_skb(skb); |
474 | return len; | 512 | return len; |
475 | } | 513 | } |
@@ -601,7 +639,7 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) | |||
601 | slot = 0; | 639 | slot = 0; |
602 | slot >>= rtab->rate.cell_log; | 640 | slot >>= rtab->rate.cell_log; |
603 | if (slot > 255) | 641 | if (slot > 255) |
604 | return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); | 642 | return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; |
605 | return rtab->data[slot]; | 643 | return rtab->data[slot]; |
606 | } | 644 | } |
607 | 645 | ||
@@ -611,11 +649,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, | |||
611 | { | 649 | { |
612 | struct sk_buff *n; | 650 | struct sk_buff *n; |
613 | 651 | ||
614 | if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) && | 652 | n = skb_clone(skb, gfp_mask); |
615 | !skb_shared(skb)) | ||
616 | n = skb_get(skb); | ||
617 | else | ||
618 | n = skb_clone(skb, gfp_mask); | ||
619 | 653 | ||
620 | if (n) { | 654 | if (n) { |
621 | n->tc_verd = SET_TC_VERD(n->tc_verd, 0); | 655 | n->tc_verd = SET_TC_VERD(n->tc_verd, 0); |