diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 165 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 10 |
2 files changed, 111 insertions, 64 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 27d03816ec3e..6128e6f24589 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -37,15 +37,11 @@ | |||
37 | * - updates to tree and tree walking are only done under the rtnl mutex. | 37 | * - updates to tree and tree walking are only done under the rtnl mutex. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | static inline int qdisc_qlen(struct Qdisc *q) | ||
41 | { | ||
42 | return q->q.qlen; | ||
43 | } | ||
44 | |||
45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 40 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
46 | { | 41 | { |
47 | q->gso_skb = skb; | 42 | q->gso_skb = skb; |
48 | q->qstats.requeues++; | 43 | q->qstats.requeues++; |
44 | q->q.qlen++; /* it's still part of the queue */ | ||
49 | __netif_schedule(q); | 45 | __netif_schedule(q); |
50 | 46 | ||
51 | return 0; | 47 | return 0; |
@@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
61 | 57 | ||
62 | /* check the reason of requeuing without tx lock first */ | 58 | /* check the reason of requeuing without tx lock first */ |
63 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 59 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
64 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 60 | if (!netif_tx_queue_stopped(txq) && |
61 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 62 | q->gso_skb = NULL; |
66 | else | 63 | q->q.qlen--; |
64 | } else | ||
67 | skb = NULL; | 65 | skb = NULL; |
68 | } else { | 66 | } else { |
69 | skb = q->dequeue(q); | 67 | skb = q->dequeue(q); |
@@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
103 | } | 101 | } |
104 | 102 | ||
105 | /* | 103 | /* |
106 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | 104 | * Transmit one skb, and handle the return status as required. Holding the |
107 | * | 105 | * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this |
108 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | 106 | * function. |
109 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
110 | * this queue. | ||
111 | * | ||
112 | * netif_tx_lock serializes accesses to device driver. | ||
113 | * | ||
114 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
115 | * if one is grabbed, another must be free. | ||
116 | * | ||
117 | * Note, that this procedure can be called by a watchdog timer | ||
118 | * | 107 | * |
119 | * Returns to the caller: | 108 | * Returns to the caller: |
120 | * 0 - queue is empty or throttled. | 109 | * 0 - queue is empty or throttled. |
121 | * >0 - queue is not empty. | 110 | * >0 - queue is not empty. |
122 | * | ||
123 | */ | 111 | */ |
124 | static inline int qdisc_restart(struct Qdisc *q) | 112 | int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
113 | struct net_device *dev, struct netdev_queue *txq, | ||
114 | spinlock_t *root_lock) | ||
125 | { | 115 | { |
126 | struct netdev_queue *txq; | ||
127 | int ret = NETDEV_TX_BUSY; | 116 | int ret = NETDEV_TX_BUSY; |
128 | struct net_device *dev; | ||
129 | spinlock_t *root_lock; | ||
130 | struct sk_buff *skb; | ||
131 | |||
132 | /* Dequeue packet */ | ||
133 | if (unlikely((skb = dequeue_skb(q)) == NULL)) | ||
134 | return 0; | ||
135 | |||
136 | root_lock = qdisc_lock(q); | ||
137 | 117 | ||
138 | /* And release qdisc */ | 118 | /* And release qdisc */ |
139 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
140 | 120 | ||
141 | dev = qdisc_dev(q); | ||
142 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
143 | |||
144 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
145 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && |
146 | !netif_tx_queue_frozen(txq)) | 123 | !netif_tx_queue_frozen(txq)) |
@@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
177 | return ret; | 154 | return ret; |
178 | } | 155 | } |
179 | 156 | ||
157 | /* | ||
158 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | ||
159 | * | ||
160 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | ||
161 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
162 | * this queue. | ||
163 | * | ||
164 | * netif_tx_lock serializes accesses to device driver. | ||
165 | * | ||
166 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
167 | * if one is grabbed, another must be free. | ||
168 | * | ||
169 | * Note, that this procedure can be called by a watchdog timer | ||
170 | * | ||
171 | * Returns to the caller: | ||
172 | * 0 - queue is empty or throttled. | ||
173 | * >0 - queue is not empty. | ||
174 | * | ||
175 | */ | ||
176 | static inline int qdisc_restart(struct Qdisc *q) | ||
177 | { | ||
178 | struct netdev_queue *txq; | ||
179 | struct net_device *dev; | ||
180 | spinlock_t *root_lock; | ||
181 | struct sk_buff *skb; | ||
182 | |||
183 | /* Dequeue packet */ | ||
184 | skb = dequeue_skb(q); | ||
185 | if (unlikely(!skb)) | ||
186 | return 0; | ||
187 | |||
188 | root_lock = qdisc_lock(q); | ||
189 | dev = qdisc_dev(q); | ||
190 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
191 | |||
192 | return sch_direct_xmit(skb, q, dev, txq, root_lock); | ||
193 | } | ||
194 | |||
180 | void __qdisc_run(struct Qdisc *q) | 195 | void __qdisc_run(struct Qdisc *q) |
181 | { | 196 | { |
182 | unsigned long start_time = jiffies; | 197 | unsigned long start_time = jiffies; |
@@ -391,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = | |||
391 | 406 | ||
392 | #define PFIFO_FAST_BANDS 3 | 407 | #define PFIFO_FAST_BANDS 3 |
393 | 408 | ||
394 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | 409 | /* |
395 | struct Qdisc *qdisc) | 410 | * Private data for a pfifo_fast scheduler containing: |
411 | * - queues for the three band | ||
412 | * - bitmap indicating which of the bands contain skbs | ||
413 | */ | ||
414 | struct pfifo_fast_priv { | ||
415 | u32 bitmap; | ||
416 | struct sk_buff_head q[PFIFO_FAST_BANDS]; | ||
417 | }; | ||
418 | |||
419 | /* | ||
420 | * Convert a bitmap to the first band number where an skb is queued, where: | ||
421 | * bitmap=0 means there are no skbs on any band. | ||
422 | * bitmap=1 means there is an skb on band 0. | ||
423 | * bitmap=7 means there are skbs on all 3 bands, etc. | ||
424 | */ | ||
425 | static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; | ||
426 | |||
427 | static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, | ||
428 | int band) | ||
396 | { | 429 | { |
397 | struct sk_buff_head *list = qdisc_priv(qdisc); | 430 | return priv->q + band; |
398 | return list + prio2band[skb->priority & TC_PRIO_MAX]; | ||
399 | } | 431 | } |
400 | 432 | ||
401 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 433 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
402 | { | 434 | { |
403 | struct sk_buff_head *list = prio2list(skb, qdisc); | 435 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
436 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | ||
437 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | ||
438 | struct sk_buff_head *list = band2list(priv, band); | ||
404 | 439 | ||
405 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { | 440 | priv->bitmap |= (1 << band); |
406 | qdisc->q.qlen++; | 441 | qdisc->q.qlen++; |
407 | return __qdisc_enqueue_tail(skb, qdisc, list); | 442 | return __qdisc_enqueue_tail(skb, qdisc, list); |
408 | } | 443 | } |
@@ -412,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
412 | 447 | ||
413 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 448 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) |
414 | { | 449 | { |
415 | int prio; | 450 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
416 | struct sk_buff_head *list = qdisc_priv(qdisc); | 451 | int band = bitmap2band[priv->bitmap]; |
417 | 452 | ||
418 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 453 | if (likely(band >= 0)) { |
419 | if (!skb_queue_empty(list + prio)) { | 454 | struct sk_buff_head *list = band2list(priv, band); |
420 | qdisc->q.qlen--; | 455 | struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); |
421 | return __qdisc_dequeue_head(qdisc, list + prio); | 456 | |
422 | } | 457 | qdisc->q.qlen--; |
458 | if (skb_queue_empty(list)) | ||
459 | priv->bitmap &= ~(1 << band); | ||
460 | |||
461 | return skb; | ||
423 | } | 462 | } |
424 | 463 | ||
425 | return NULL; | 464 | return NULL; |
@@ -427,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
427 | 466 | ||
428 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | 467 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) |
429 | { | 468 | { |
430 | int prio; | 469 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
431 | struct sk_buff_head *list = qdisc_priv(qdisc); | 470 | int band = bitmap2band[priv->bitmap]; |
471 | |||
472 | if (band >= 0) { | ||
473 | struct sk_buff_head *list = band2list(priv, band); | ||
432 | 474 | ||
433 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 475 | return skb_peek(list); |
434 | if (!skb_queue_empty(list + prio)) | ||
435 | return skb_peek(list + prio); | ||
436 | } | 476 | } |
437 | 477 | ||
438 | return NULL; | 478 | return NULL; |
@@ -441,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
441 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 481 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
442 | { | 482 | { |
443 | int prio; | 483 | int prio; |
444 | struct sk_buff_head *list = qdisc_priv(qdisc); | 484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
445 | 485 | ||
446 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 486 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
447 | __qdisc_reset_queue(qdisc, list + prio); | 487 | __qdisc_reset_queue(qdisc, band2list(priv, prio)); |
448 | 488 | ||
489 | priv->bitmap = 0; | ||
449 | qdisc->qstats.backlog = 0; | 490 | qdisc->qstats.backlog = 0; |
450 | qdisc->q.qlen = 0; | 491 | qdisc->q.qlen = 0; |
451 | } | 492 | } |
@@ -465,17 +506,17 @@ nla_put_failure: | |||
465 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) | 506 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) |
466 | { | 507 | { |
467 | int prio; | 508 | int prio; |
468 | struct sk_buff_head *list = qdisc_priv(qdisc); | 509 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
469 | 510 | ||
470 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 511 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
471 | skb_queue_head_init(list + prio); | 512 | skb_queue_head_init(band2list(priv, prio)); |
472 | 513 | ||
473 | return 0; | 514 | return 0; |
474 | } | 515 | } |
475 | 516 | ||
476 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | 517 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
477 | .id = "pfifo_fast", | 518 | .id = "pfifo_fast", |
478 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | 519 | .priv_size = sizeof(struct pfifo_fast_priv), |
479 | .enqueue = pfifo_fast_enqueue, | 520 | .enqueue = pfifo_fast_enqueue, |
480 | .dequeue = pfifo_fast_dequeue, | 521 | .dequeue = pfifo_fast_dequeue, |
481 | .peek = pfifo_fast_peek, | 522 | .peek = pfifo_fast_peek, |
@@ -547,8 +588,11 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
547 | if (ops->reset) | 588 | if (ops->reset) |
548 | ops->reset(qdisc); | 589 | ops->reset(qdisc); |
549 | 590 | ||
550 | kfree_skb(qdisc->gso_skb); | 591 | if (qdisc->gso_skb) { |
551 | qdisc->gso_skb = NULL; | 592 | kfree_skb(qdisc->gso_skb); |
593 | qdisc->gso_skb = NULL; | ||
594 | qdisc->q.qlen = 0; | ||
595 | } | ||
552 | } | 596 | } |
553 | EXPORT_SYMBOL(qdisc_reset); | 597 | EXPORT_SYMBOL(qdisc_reset); |
554 | 598 | ||
@@ -605,6 +649,9 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
605 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 649 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
606 | return; | 650 | return; |
607 | } | 651 | } |
652 | |||
653 | /* Can by-pass the queue discipline for default qdisc */ | ||
654 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
608 | } else { | 655 | } else { |
609 | qdisc = &noqueue_qdisc; | 656 | qdisc = &noqueue_qdisc; |
610 | } | 657 | } |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 9c002b6e0533..5a002c247231 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -268,7 +268,7 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
268 | return __teql_resolve(skb, skb_res, dev); | 268 | return __teql_resolve(skb, skb_res, dev); |
269 | } | 269 | } |
270 | 270 | ||
271 | static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 271 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
272 | { | 272 | { |
273 | struct teql_master *master = netdev_priv(dev); | 273 | struct teql_master *master = netdev_priv(dev); |
274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
@@ -307,14 +307,14 @@ restart: | |||
307 | 307 | ||
308 | if (!netif_tx_queue_stopped(slave_txq) && | 308 | if (!netif_tx_queue_stopped(slave_txq) && |
309 | !netif_tx_queue_frozen(slave_txq) && | 309 | !netif_tx_queue_frozen(slave_txq) && |
310 | slave_ops->ndo_start_xmit(skb, slave) == 0) { | 310 | slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { |
311 | txq_trans_update(slave_txq); | 311 | txq_trans_update(slave_txq); |
312 | __netif_tx_unlock(slave_txq); | 312 | __netif_tx_unlock(slave_txq); |
313 | master->slaves = NEXT_SLAVE(q); | 313 | master->slaves = NEXT_SLAVE(q); |
314 | netif_wake_queue(dev); | 314 | netif_wake_queue(dev); |
315 | txq->tx_packets++; | 315 | txq->tx_packets++; |
316 | txq->tx_bytes += length; | 316 | txq->tx_bytes += length; |
317 | return 0; | 317 | return NETDEV_TX_OK; |
318 | } | 318 | } |
319 | __netif_tx_unlock(slave_txq); | 319 | __netif_tx_unlock(slave_txq); |
320 | } | 320 | } |
@@ -323,7 +323,7 @@ restart: | |||
323 | break; | 323 | break; |
324 | case 1: | 324 | case 1: |
325 | master->slaves = NEXT_SLAVE(q); | 325 | master->slaves = NEXT_SLAVE(q); |
326 | return 0; | 326 | return NETDEV_TX_OK; |
327 | default: | 327 | default: |
328 | nores = 1; | 328 | nores = 1; |
329 | break; | 329 | break; |
@@ -345,7 +345,7 @@ restart: | |||
345 | drop: | 345 | drop: |
346 | txq->tx_dropped++; | 346 | txq->tx_dropped++; |
347 | dev_kfree_skb(skb); | 347 | dev_kfree_skb(skb); |
348 | return 0; | 348 | return NETDEV_TX_OK; |
349 | } | 349 | } |
350 | 350 | ||
351 | static int teql_master_open(struct net_device *dev) | 351 | static int teql_master_open(struct net_device *dev) |