diff options
Diffstat (limited to 'net/sched/sch_red.c')
| -rw-r--r-- | net/sched/sch_red.c | 179 |
1 files changed, 163 insertions, 16 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index dccfa44c2d71..2be563cba72b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -44,6 +44,7 @@ struct red_sched_data | |||
| 44 | unsigned char flags; | 44 | unsigned char flags; |
| 45 | struct red_parms parms; | 45 | struct red_parms parms; |
| 46 | struct red_stats stats; | 46 | struct red_stats stats; |
| 47 | struct Qdisc *qdisc; | ||
| 47 | }; | 48 | }; |
| 48 | 49 | ||
| 49 | static inline int red_use_ecn(struct red_sched_data *q) | 50 | static inline int red_use_ecn(struct red_sched_data *q) |
| @@ -59,8 +60,10 @@ static inline int red_use_harddrop(struct red_sched_data *q) | |||
| 59 | static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 60 | static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) |
| 60 | { | 61 | { |
| 61 | struct red_sched_data *q = qdisc_priv(sch); | 62 | struct red_sched_data *q = qdisc_priv(sch); |
| 63 | struct Qdisc *child = q->qdisc; | ||
| 64 | int ret; | ||
| 62 | 65 | ||
| 63 | q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog); | 66 | q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog); |
| 64 | 67 | ||
| 65 | if (red_is_idling(&q->parms)) | 68 | if (red_is_idling(&q->parms)) |
| 66 | red_end_of_idle_period(&q->parms); | 69 | red_end_of_idle_period(&q->parms); |
| @@ -91,11 +94,16 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
| 91 | break; | 94 | break; |
| 92 | } | 95 | } |
| 93 | 96 | ||
| 94 | if (sch->qstats.backlog + skb->len <= q->limit) | 97 | ret = child->enqueue(skb, child); |
| 95 | return qdisc_enqueue_tail(skb, sch); | 98 | if (likely(ret == NET_XMIT_SUCCESS)) { |
| 96 | 99 | sch->bstats.bytes += skb->len; | |
| 97 | q->stats.pdrop++; | 100 | sch->bstats.packets++; |
| 98 | return qdisc_drop(skb, sch); | 101 | sch->q.qlen++; |
| 102 | } else { | ||
| 103 | q->stats.pdrop++; | ||
| 104 | sch->qstats.drops++; | ||
| 105 | } | ||
| 106 | return ret; | ||
| 99 | 107 | ||
| 100 | congestion_drop: | 108 | congestion_drop: |
| 101 | qdisc_drop(skb, sch); | 109 | qdisc_drop(skb, sch); |
| @@ -105,21 +113,30 @@ congestion_drop: | |||
| 105 | static int red_requeue(struct sk_buff *skb, struct Qdisc* sch) | 113 | static int red_requeue(struct sk_buff *skb, struct Qdisc* sch) |
| 106 | { | 114 | { |
| 107 | struct red_sched_data *q = qdisc_priv(sch); | 115 | struct red_sched_data *q = qdisc_priv(sch); |
| 116 | struct Qdisc *child = q->qdisc; | ||
| 117 | int ret; | ||
| 108 | 118 | ||
| 109 | if (red_is_idling(&q->parms)) | 119 | if (red_is_idling(&q->parms)) |
| 110 | red_end_of_idle_period(&q->parms); | 120 | red_end_of_idle_period(&q->parms); |
| 111 | 121 | ||
| 112 | return qdisc_requeue(skb, sch); | 122 | ret = child->ops->requeue(skb, child); |
| 123 | if (likely(ret == NET_XMIT_SUCCESS)) { | ||
| 124 | sch->qstats.requeues++; | ||
| 125 | sch->q.qlen++; | ||
| 126 | } | ||
| 127 | return ret; | ||
| 113 | } | 128 | } |
| 114 | 129 | ||
| 115 | static struct sk_buff * red_dequeue(struct Qdisc* sch) | 130 | static struct sk_buff * red_dequeue(struct Qdisc* sch) |
| 116 | { | 131 | { |
| 117 | struct sk_buff *skb; | 132 | struct sk_buff *skb; |
| 118 | struct red_sched_data *q = qdisc_priv(sch); | 133 | struct red_sched_data *q = qdisc_priv(sch); |
| 134 | struct Qdisc *child = q->qdisc; | ||
| 119 | 135 | ||
| 120 | skb = qdisc_dequeue_head(sch); | 136 | skb = child->dequeue(child); |
| 121 | 137 | if (skb) | |
| 122 | if (skb == NULL && !red_is_idling(&q->parms)) | 138 | sch->q.qlen--; |
| 139 | else if (!red_is_idling(&q->parms)) | ||
| 123 | red_start_of_idle_period(&q->parms); | 140 | red_start_of_idle_period(&q->parms); |
| 124 | 141 | ||
| 125 | return skb; | 142 | return skb; |
| @@ -127,14 +144,14 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch) | |||
| 127 | 144 | ||
| 128 | static unsigned int red_drop(struct Qdisc* sch) | 145 | static unsigned int red_drop(struct Qdisc* sch) |
| 129 | { | 146 | { |
| 130 | struct sk_buff *skb; | ||
| 131 | struct red_sched_data *q = qdisc_priv(sch); | 147 | struct red_sched_data *q = qdisc_priv(sch); |
| 148 | struct Qdisc *child = q->qdisc; | ||
| 149 | unsigned int len; | ||
| 132 | 150 | ||
| 133 | skb = qdisc_dequeue_tail(sch); | 151 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { |
| 134 | if (skb) { | ||
| 135 | unsigned int len = skb->len; | ||
| 136 | q->stats.other++; | 152 | q->stats.other++; |
| 137 | qdisc_drop(skb, sch); | 153 | sch->qstats.drops++; |
| 154 | sch->q.qlen--; | ||
| 138 | return len; | 155 | return len; |
| 139 | } | 156 | } |
| 140 | 157 | ||
| @@ -148,15 +165,48 @@ static void red_reset(struct Qdisc* sch) | |||
| 148 | { | 165 | { |
| 149 | struct red_sched_data *q = qdisc_priv(sch); | 166 | struct red_sched_data *q = qdisc_priv(sch); |
| 150 | 167 | ||
| 151 | qdisc_reset_queue(sch); | 168 | qdisc_reset(q->qdisc); |
| 169 | sch->q.qlen = 0; | ||
| 152 | red_restart(&q->parms); | 170 | red_restart(&q->parms); |
| 153 | } | 171 | } |
| 154 | 172 | ||
| 173 | static void red_destroy(struct Qdisc *sch) | ||
| 174 | { | ||
| 175 | struct red_sched_data *q = qdisc_priv(sch); | ||
| 176 | qdisc_destroy(q->qdisc); | ||
| 177 | } | ||
| 178 | |||
| 179 | static struct Qdisc *red_create_dflt(struct net_device *dev, u32 limit) | ||
| 180 | { | ||
| 181 | struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops); | ||
| 182 | struct rtattr *rta; | ||
| 183 | int ret; | ||
| 184 | |||
| 185 | if (q) { | ||
| 186 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), | ||
| 187 | GFP_KERNEL); | ||
| 188 | if (rta) { | ||
| 189 | rta->rta_type = RTM_NEWQDISC; | ||
| 190 | rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); | ||
| 191 | ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; | ||
| 192 | |||
| 193 | ret = q->ops->change(q, rta); | ||
| 194 | kfree(rta); | ||
| 195 | |||
| 196 | if (ret == 0) | ||
| 197 | return q; | ||
| 198 | } | ||
| 199 | qdisc_destroy(q); | ||
| 200 | } | ||
| 201 | return NULL; | ||
| 202 | } | ||
| 203 | |||
| 155 | static int red_change(struct Qdisc *sch, struct rtattr *opt) | 204 | static int red_change(struct Qdisc *sch, struct rtattr *opt) |
| 156 | { | 205 | { |
| 157 | struct red_sched_data *q = qdisc_priv(sch); | 206 | struct red_sched_data *q = qdisc_priv(sch); |
| 158 | struct rtattr *tb[TCA_RED_MAX]; | 207 | struct rtattr *tb[TCA_RED_MAX]; |
| 159 | struct tc_red_qopt *ctl; | 208 | struct tc_red_qopt *ctl; |
| 209 | struct Qdisc *child = NULL; | ||
| 160 | 210 | ||
| 161 | if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt)) | 211 | if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt)) |
| 162 | return -EINVAL; | 212 | return -EINVAL; |
| @@ -169,9 +219,17 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
| 169 | 219 | ||
| 170 | ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); | 220 | ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); |
| 171 | 221 | ||
| 222 | if (ctl->limit > 0) { | ||
| 223 | child = red_create_dflt(sch->dev, ctl->limit); | ||
| 224 | if (child == NULL) | ||
| 225 | return -ENOMEM; | ||
| 226 | } | ||
| 227 | |||
| 172 | sch_tree_lock(sch); | 228 | sch_tree_lock(sch); |
| 173 | q->flags = ctl->flags; | 229 | q->flags = ctl->flags; |
| 174 | q->limit = ctl->limit; | 230 | q->limit = ctl->limit; |
| 231 | if (child) | ||
| 232 | qdisc_destroy(xchg(&q->qdisc, child)); | ||
| 175 | 233 | ||
| 176 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, | 234 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, |
| 177 | ctl->Plog, ctl->Scell_log, | 235 | ctl->Plog, ctl->Scell_log, |
| @@ -186,6 +244,9 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
| 186 | 244 | ||
| 187 | static int red_init(struct Qdisc* sch, struct rtattr *opt) | 245 | static int red_init(struct Qdisc* sch, struct rtattr *opt) |
| 188 | { | 246 | { |
| 247 | struct red_sched_data *q = qdisc_priv(sch); | ||
| 248 | |||
| 249 | q->qdisc = &noop_qdisc; | ||
| 189 | return red_change(sch, opt); | 250 | return red_change(sch, opt); |
| 190 | } | 251 | } |
| 191 | 252 | ||
| @@ -224,15 +285,101 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |||
| 224 | return gnet_stats_copy_app(d, &st, sizeof(st)); | 285 | return gnet_stats_copy_app(d, &st, sizeof(st)); |
| 225 | } | 286 | } |
| 226 | 287 | ||
| 288 | static int red_dump_class(struct Qdisc *sch, unsigned long cl, | ||
| 289 | struct sk_buff *skb, struct tcmsg *tcm) | ||
| 290 | { | ||
| 291 | struct red_sched_data *q = qdisc_priv(sch); | ||
| 292 | |||
| 293 | if (cl != 1) | ||
| 294 | return -ENOENT; | ||
| 295 | tcm->tcm_handle |= TC_H_MIN(1); | ||
| 296 | tcm->tcm_info = q->qdisc->handle; | ||
| 297 | return 0; | ||
| 298 | } | ||
| 299 | |||
| 300 | static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | ||
| 301 | struct Qdisc **old) | ||
| 302 | { | ||
| 303 | struct red_sched_data *q = qdisc_priv(sch); | ||
| 304 | |||
| 305 | if (new == NULL) | ||
| 306 | new = &noop_qdisc; | ||
| 307 | |||
| 308 | sch_tree_lock(sch); | ||
| 309 | *old = xchg(&q->qdisc, new); | ||
| 310 | qdisc_reset(*old); | ||
| 311 | sch->q.qlen = 0; | ||
| 312 | sch_tree_unlock(sch); | ||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | |||
| 316 | static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg) | ||
| 317 | { | ||
| 318 | struct red_sched_data *q = qdisc_priv(sch); | ||
| 319 | return q->qdisc; | ||
| 320 | } | ||
| 321 | |||
| 322 | static unsigned long red_get(struct Qdisc *sch, u32 classid) | ||
| 323 | { | ||
| 324 | return 1; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void red_put(struct Qdisc *sch, unsigned long arg) | ||
| 328 | { | ||
| 329 | return; | ||
| 330 | } | ||
| 331 | |||
| 332 | static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
| 333 | struct rtattr **tca, unsigned long *arg) | ||
| 334 | { | ||
| 335 | return -ENOSYS; | ||
| 336 | } | ||
| 337 | |||
| 338 | static int red_delete(struct Qdisc *sch, unsigned long cl) | ||
| 339 | { | ||
| 340 | return -ENOSYS; | ||
| 341 | } | ||
| 342 | |||
| 343 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | ||
| 344 | { | ||
| 345 | if (!walker->stop) { | ||
| 346 | if (walker->count >= walker->skip) | ||
| 347 | if (walker->fn(sch, 1, walker) < 0) { | ||
| 348 | walker->stop = 1; | ||
| 349 | return; | ||
| 350 | } | ||
| 351 | walker->count++; | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
| 356 | { | ||
| 357 | return NULL; | ||
| 358 | } | ||
| 359 | |||
| 360 | static struct Qdisc_class_ops red_class_ops = { | ||
| 361 | .graft = red_graft, | ||
| 362 | .leaf = red_leaf, | ||
| 363 | .get = red_get, | ||
| 364 | .put = red_put, | ||
| 365 | .change = red_change_class, | ||
| 366 | .delete = red_delete, | ||
| 367 | .walk = red_walk, | ||
| 368 | .tcf_chain = red_find_tcf, | ||
| 369 | .dump = red_dump_class, | ||
| 370 | }; | ||
| 371 | |||
| 227 | static struct Qdisc_ops red_qdisc_ops = { | 372 | static struct Qdisc_ops red_qdisc_ops = { |
| 228 | .id = "red", | 373 | .id = "red", |
| 229 | .priv_size = sizeof(struct red_sched_data), | 374 | .priv_size = sizeof(struct red_sched_data), |
| 375 | .cl_ops = &red_class_ops, | ||
| 230 | .enqueue = red_enqueue, | 376 | .enqueue = red_enqueue, |
| 231 | .dequeue = red_dequeue, | 377 | .dequeue = red_dequeue, |
| 232 | .requeue = red_requeue, | 378 | .requeue = red_requeue, |
| 233 | .drop = red_drop, | 379 | .drop = red_drop, |
| 234 | .init = red_init, | 380 | .init = red_init, |
| 235 | .reset = red_reset, | 381 | .reset = red_reset, |
| 382 | .destroy = red_destroy, | ||
| 236 | .change = red_change, | 383 | .change = red_change, |
| 237 | .dump = red_dump, | 384 | .dump = red_dump, |
| 238 | .dump_stats = red_dump_stats, | 385 | .dump_stats = red_dump_stats, |
