aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_cbq.c7
-rw-r--r--net/sched/sch_netem.c6
2 files changed, 6 insertions, 7 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 290b26bdc89d..9e6cdab6af3b 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
390 now = q->now + incr; 390 now = q->now + incr;
391 391
392 do { 392 do {
393 if (PSCHED_TLESS(cl->undertime, now)) { 393 if (cl->undertime < now) {
394 q->toplevel = cl->level; 394 q->toplevel = cl->level;
395 return; 395 return;
396 } 396 }
@@ -845,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl)
845 if (cl->tparent == NULL) 845 if (cl->tparent == NULL)
846 return cl; 846 return cl;
847 847
848 if (PSCHED_IS_PASTPERFECT(cl->undertime) || 848 if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) {
849 !PSCHED_TLESS(q->now, cl->undertime)) {
850 cl->delayed = 0; 849 cl->delayed = 0;
851 return cl; 850 return cl;
852 } 851 }
@@ -870,7 +869,7 @@ cbq_under_limit(struct cbq_class *cl)
870 if (cl->level > q->toplevel) 869 if (cl->level > q->toplevel)
871 return NULL; 870 return NULL;
872 } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && 871 } while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
873 PSCHED_TLESS(q->now, cl->undertime)); 872 q->now < cl->undertime);
874 873
875 cl->delayed = 0; 874 cl->delayed = 0;
876 return cl; 875 return cl;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6044ae77d5da..5d571aa04a76 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -286,7 +286,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
286 /* if more time remaining? */ 286 /* if more time remaining? */
287 PSCHED_GET_TIME(now); 287 PSCHED_GET_TIME(now);
288 288
289 if (!PSCHED_TLESS(now, cb->time_to_send)) { 289 if (cb->time_to_send <= now) {
290 pr_debug("netem_dequeue: return skb=%p\n", skb); 290 pr_debug("netem_dequeue: return skb=%p\n", skb);
291 sch->q.qlen--; 291 sch->q.qlen--;
292 return skb; 292 return skb;
@@ -494,7 +494,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
494 494
495 if (likely(skb_queue_len(list) < q->limit)) { 495 if (likely(skb_queue_len(list) < q->limit)) {
496 /* Optimize for add at tail */ 496 /* Optimize for add at tail */
497 if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) { 497 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
498 q->oldest = tnext; 498 q->oldest = tnext;
499 return qdisc_enqueue_tail(nskb, sch); 499 return qdisc_enqueue_tail(nskb, sch);
500 } 500 }
@@ -503,7 +503,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
503 const struct netem_skb_cb *cb 503 const struct netem_skb_cb *cb
504 = (const struct netem_skb_cb *)skb->cb; 504 = (const struct netem_skb_cb *)skb->cb;
505 505
506 if (!PSCHED_TLESS(tnext, cb->time_to_send)) 506 if (tnext >= cb->time_to_send)
507 break; 507 break;
508 } 508 }
509 509