diff options
Diffstat (limited to 'net/sched')
| -rw-r--r-- | net/sched/Kconfig | 2 | ||||
| -rw-r--r-- | net/sched/act_api.c | 4 | ||||
| -rw-r--r-- | net/sched/sch_api.c | 1 | ||||
| -rw-r--r-- | net/sched/sch_generic.c | 5 | ||||
| -rw-r--r-- | net/sched/sch_htb.c | 4 | ||||
| -rw-r--r-- | net/sched/sch_netem.c | 131 |
6 files changed, 94 insertions, 53 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9c118baed9dc..b0941186f867 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
| @@ -185,7 +185,7 @@ config NET_SCH_GRED | |||
| 185 | depends on NET_SCHED | 185 | depends on NET_SCHED |
| 186 | help | 186 | help |
| 187 | Say Y here if you want to use the Generic Random Early Detection | 187 | Say Y here if you want to use the Generic Random Early Detection |
| 188 | (RED) packet scheduling algorithm for some of your network devices | 188 | (GRED) packet scheduling algorithm for some of your network devices |
| 189 | (see the top of <file:net/sched/sch_red.c> for details and | 189 | (see the top of <file:net/sched/sch_red.c> for details and |
| 190 | references about the algorithm). | 190 | references about the algorithm). |
| 191 | 191 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 5e6cc371b39e..cafcb084098d 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -171,10 +171,10 @@ repeat: | |||
| 171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | 171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); |
| 172 | skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); | 172 | skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); |
| 173 | } | 173 | } |
| 174 | if (ret != TC_ACT_PIPE) | ||
| 175 | goto exec_done; | ||
| 176 | if (ret == TC_ACT_REPEAT) | 174 | if (ret == TC_ACT_REPEAT) |
| 177 | goto repeat; /* we need a ttl - JHS */ | 175 | goto repeat; /* we need a ttl - JHS */ |
| 176 | if (ret != TC_ACT_PIPE) | ||
| 177 | goto exec_done; | ||
| 178 | } | 178 | } |
| 179 | act = a->next; | 179 | act = a->next; |
| 180 | } | 180 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4323a74eea30..07977f8f2679 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -1289,6 +1289,7 @@ static int __init pktsched_init(void) | |||
| 1289 | 1289 | ||
| 1290 | subsys_initcall(pktsched_init); | 1290 | subsys_initcall(pktsched_init); |
| 1291 | 1291 | ||
| 1292 | EXPORT_SYMBOL(qdisc_lookup); | ||
| 1292 | EXPORT_SYMBOL(qdisc_get_rtab); | 1293 | EXPORT_SYMBOL(qdisc_get_rtab); |
| 1293 | EXPORT_SYMBOL(qdisc_put_rtab); | 1294 | EXPORT_SYMBOL(qdisc_put_rtab); |
| 1294 | EXPORT_SYMBOL(register_qdisc); | 1295 | EXPORT_SYMBOL(register_qdisc); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8c01e023f02e..87e48a4e1051 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -179,6 +179,7 @@ requeue: | |||
| 179 | netif_schedule(dev); | 179 | netif_schedule(dev); |
| 180 | return 1; | 180 | return 1; |
| 181 | } | 181 | } |
| 182 | BUG_ON((int) q->q.qlen < 0); | ||
| 182 | return q->q.qlen; | 183 | return q->q.qlen; |
| 183 | } | 184 | } |
| 184 | 185 | ||
| @@ -539,6 +540,10 @@ void dev_activate(struct net_device *dev) | |||
| 539 | write_unlock_bh(&qdisc_tree_lock); | 540 | write_unlock_bh(&qdisc_tree_lock); |
| 540 | } | 541 | } |
| 541 | 542 | ||
| 543 | if (!netif_carrier_ok(dev)) | ||
| 544 | /* Delay activation until next carrier-on event */ | ||
| 545 | return; | ||
| 546 | |||
| 542 | spin_lock_bh(&dev->queue_lock); | 547 | spin_lock_bh(&dev->queue_lock); |
| 543 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 548 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); |
| 544 | if (dev->qdisc != &noqueue_qdisc) { | 549 | if (dev->qdisc != &noqueue_qdisc) { |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a85935e7d53d..558cc087e602 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -717,6 +717,10 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 717 | if (q->direct_queue.qlen < q->direct_qlen) { | 717 | if (q->direct_queue.qlen < q->direct_qlen) { |
| 718 | __skb_queue_tail(&q->direct_queue, skb); | 718 | __skb_queue_tail(&q->direct_queue, skb); |
| 719 | q->direct_pkts++; | 719 | q->direct_pkts++; |
| 720 | } else { | ||
| 721 | kfree_skb(skb); | ||
| 722 | sch->qstats.drops++; | ||
| 723 | return NET_XMIT_DROP; | ||
| 720 | } | 724 | } |
| 721 | #ifdef CONFIG_NET_CLS_ACT | 725 | #ifdef CONFIG_NET_CLS_ACT |
| 722 | } else if (!cl) { | 726 | } else if (!cl) { |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29deb139d..e0c9fbe73b15 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -138,38 +138,77 @@ static long tabledist(unsigned long mu, long sigma, | |||
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | /* Put skb in the private delayed queue. */ | 140 | /* Put skb in the private delayed queue. */ |
| 141 | static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) | 141 | static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) |
| 142 | { | 142 | { |
| 143 | struct netem_sched_data *q = qdisc_priv(sch); | 143 | struct netem_sched_data *q = qdisc_priv(sch); |
| 144 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
| 145 | psched_tdiff_t td; | 144 | psched_tdiff_t td; |
| 146 | psched_time_t now; | 145 | psched_time_t now; |
| 147 | 146 | ||
| 148 | PSCHED_GET_TIME(now); | 147 | PSCHED_GET_TIME(now); |
| 149 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); | 148 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); |
| 150 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
| 151 | 149 | ||
| 152 | /* Always queue at tail to keep packets in order */ | 150 | /* Always queue at tail to keep packets in order */ |
| 153 | if (likely(q->delayed.qlen < q->limit)) { | 151 | if (likely(q->delayed.qlen < q->limit)) { |
| 152 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
| 153 | |||
| 154 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
| 155 | |||
| 156 | pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, | ||
| 157 | now, cb->time_to_send); | ||
| 158 | |||
| 154 | __skb_queue_tail(&q->delayed, skb); | 159 | __skb_queue_tail(&q->delayed, skb); |
| 155 | if (!timer_pending(&q->timer)) { | ||
| 156 | q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); | ||
| 157 | add_timer(&q->timer); | ||
| 158 | } | ||
| 159 | return NET_XMIT_SUCCESS; | 160 | return NET_XMIT_SUCCESS; |
| 160 | } | 161 | } |
| 161 | 162 | ||
| 163 | pr_debug("netem_delay: queue over limit %d\n", q->limit); | ||
| 164 | sch->qstats.overlimits++; | ||
| 162 | kfree_skb(skb); | 165 | kfree_skb(skb); |
| 163 | return NET_XMIT_DROP; | 166 | return NET_XMIT_DROP; |
| 164 | } | 167 | } |
| 165 | 168 | ||
| 169 | /* | ||
| 170 | * Move a packet that is ready to send from the delay holding | ||
| 171 | * list to the underlying qdisc. | ||
| 172 | */ | ||
| 173 | static int netem_run(struct Qdisc *sch) | ||
| 174 | { | ||
| 175 | struct netem_sched_data *q = qdisc_priv(sch); | ||
| 176 | struct sk_buff *skb; | ||
| 177 | psched_time_t now; | ||
| 178 | |||
| 179 | PSCHED_GET_TIME(now); | ||
| 180 | |||
| 181 | skb = skb_peek(&q->delayed); | ||
| 182 | if (skb) { | ||
| 183 | const struct netem_skb_cb *cb | ||
| 184 | = (const struct netem_skb_cb *)skb->cb; | ||
| 185 | long delay | ||
| 186 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
| 187 | pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); | ||
| 188 | |||
| 189 | /* if more time remaining? */ | ||
| 190 | if (delay > 0) { | ||
| 191 | mod_timer(&q->timer, jiffies + delay); | ||
| 192 | return 1; | ||
| 193 | } | ||
| 194 | |||
| 195 | __skb_unlink(skb, &q->delayed); | ||
| 196 | |||
| 197 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
| 198 | sch->q.qlen--; | ||
| 199 | sch->qstats.drops++; | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | return 0; | ||
| 204 | } | ||
| 205 | |||
| 166 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 206 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
| 167 | { | 207 | { |
| 168 | struct netem_sched_data *q = qdisc_priv(sch); | 208 | struct netem_sched_data *q = qdisc_priv(sch); |
| 169 | struct sk_buff *skb2; | ||
| 170 | int ret; | 209 | int ret; |
| 171 | 210 | ||
| 172 | pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); | 211 | pr_debug("netem_enqueue skb=%p\n", skb); |
| 173 | 212 | ||
| 174 | /* Random packet drop 0 => none, ~0 => all */ | 213 | /* Random packet drop 0 => none, ~0 => all */ |
| 175 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { | 214 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { |
| @@ -180,11 +219,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 180 | } | 219 | } |
| 181 | 220 | ||
| 182 | /* Random duplication */ | 221 | /* Random duplication */ |
| 183 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor) | 222 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { |
| 184 | && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 223 | struct sk_buff *skb2; |
| 185 | pr_debug("netem_enqueue: dup %p\n", skb2); | 224 | |
| 225 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
| 226 | if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { | ||
| 227 | struct Qdisc *qp; | ||
| 228 | |||
| 229 | /* Since one packet can generate two packets in the | ||
| 230 | * queue, the parent's qlen accounting gets confused, | ||
| 231 | * so fix it. | ||
| 232 | */ | ||
| 233 | qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); | ||
| 234 | if (qp) | ||
| 235 | qp->q.qlen++; | ||
| 186 | 236 | ||
| 187 | if (delay_skb(sch, skb2)) { | ||
| 188 | sch->q.qlen++; | 237 | sch->q.qlen++; |
| 189 | sch->bstats.bytes += skb2->len; | 238 | sch->bstats.bytes += skb2->len; |
| 190 | sch->bstats.packets++; | 239 | sch->bstats.packets++; |
| @@ -202,7 +251,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 202 | ret = q->qdisc->enqueue(skb, q->qdisc); | 251 | ret = q->qdisc->enqueue(skb, q->qdisc); |
| 203 | } else { | 252 | } else { |
| 204 | q->counter = 0; | 253 | q->counter = 0; |
| 205 | ret = delay_skb(sch, skb); | 254 | ret = netem_delay(sch, skb); |
| 255 | netem_run(sch); | ||
| 206 | } | 256 | } |
| 207 | 257 | ||
| 208 | if (likely(ret == NET_XMIT_SUCCESS)) { | 258 | if (likely(ret == NET_XMIT_SUCCESS)) { |
| @@ -212,6 +262,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 212 | } else | 262 | } else |
| 213 | sch->qstats.drops++; | 263 | sch->qstats.drops++; |
| 214 | 264 | ||
| 265 | pr_debug("netem: enqueue ret %d\n", ret); | ||
| 215 | return ret; | 266 | return ret; |
| 216 | } | 267 | } |
| 217 | 268 | ||
| @@ -241,56 +292,35 @@ static unsigned int netem_drop(struct Qdisc* sch) | |||
| 241 | return len; | 292 | return len; |
| 242 | } | 293 | } |
| 243 | 294 | ||
| 244 | /* Dequeue packet. | ||
| 245 | * Move all packets that are ready to send from the delay holding | ||
| 246 | * list to the underlying qdisc, then just call dequeue | ||
| 247 | */ | ||
| 248 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) | 295 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
| 249 | { | 296 | { |
| 250 | struct netem_sched_data *q = qdisc_priv(sch); | 297 | struct netem_sched_data *q = qdisc_priv(sch); |
| 251 | struct sk_buff *skb; | 298 | struct sk_buff *skb; |
| 299 | int pending; | ||
| 300 | |||
| 301 | pending = netem_run(sch); | ||
| 252 | 302 | ||
| 253 | skb = q->qdisc->dequeue(q->qdisc); | 303 | skb = q->qdisc->dequeue(q->qdisc); |
| 254 | if (skb) | 304 | if (skb) { |
| 305 | pr_debug("netem_dequeue: return skb=%p\n", skb); | ||
| 255 | sch->q.qlen--; | 306 | sch->q.qlen--; |
| 307 | sch->flags &= ~TCQ_F_THROTTLED; | ||
| 308 | } | ||
| 309 | else if (pending) { | ||
| 310 | pr_debug("netem_dequeue: throttling\n"); | ||
| 311 | sch->flags |= TCQ_F_THROTTLED; | ||
| 312 | } | ||
| 313 | |||
| 256 | return skb; | 314 | return skb; |
| 257 | } | 315 | } |
| 258 | 316 | ||
| 259 | static void netem_watchdog(unsigned long arg) | 317 | static void netem_watchdog(unsigned long arg) |
| 260 | { | 318 | { |
| 261 | struct Qdisc *sch = (struct Qdisc *)arg; | 319 | struct Qdisc *sch = (struct Qdisc *)arg; |
| 262 | struct netem_sched_data *q = qdisc_priv(sch); | ||
| 263 | struct net_device *dev = sch->dev; | ||
| 264 | struct sk_buff *skb; | ||
| 265 | psched_time_t now; | ||
| 266 | |||
| 267 | pr_debug("netem_watchdog: fired @%lu\n", jiffies); | ||
| 268 | |||
| 269 | spin_lock_bh(&dev->queue_lock); | ||
| 270 | PSCHED_GET_TIME(now); | ||
| 271 | |||
| 272 | while ((skb = skb_peek(&q->delayed)) != NULL) { | ||
| 273 | const struct netem_skb_cb *cb | ||
| 274 | = (const struct netem_skb_cb *)skb->cb; | ||
| 275 | long delay | ||
| 276 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
| 277 | pr_debug("netem_watchdog: skb %p@%lu %ld\n", | ||
| 278 | skb, jiffies, delay); | ||
| 279 | 320 | ||
| 280 | /* if more time remaining? */ | 321 | pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); |
| 281 | if (delay > 0) { | 322 | sch->flags &= ~TCQ_F_THROTTLED; |
| 282 | mod_timer(&q->timer, jiffies + delay); | 323 | netif_schedule(sch->dev); |
| 283 | break; | ||
| 284 | } | ||
| 285 | __skb_unlink(skb, &q->delayed); | ||
| 286 | |||
| 287 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
| 288 | sch->q.qlen--; | ||
| 289 | sch->qstats.drops++; | ||
| 290 | } | ||
| 291 | } | ||
| 292 | qdisc_run(dev); | ||
| 293 | spin_unlock_bh(&dev->queue_lock); | ||
| 294 | } | 324 | } |
| 295 | 325 | ||
| 296 | static void netem_reset(struct Qdisc *sch) | 326 | static void netem_reset(struct Qdisc *sch) |
| @@ -301,6 +331,7 @@ static void netem_reset(struct Qdisc *sch) | |||
| 301 | skb_queue_purge(&q->delayed); | 331 | skb_queue_purge(&q->delayed); |
| 302 | 332 | ||
| 303 | sch->q.qlen = 0; | 333 | sch->q.qlen = 0; |
| 334 | sch->flags &= ~TCQ_F_THROTTLED; | ||
| 304 | del_timer_sync(&q->timer); | 335 | del_timer_sync(&q->timer); |
| 305 | } | 336 | } |
| 306 | 337 | ||
