diff options
| author | Thomas Graf <tgraf@suug.ch> | 2005-06-19 01:57:26 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:57:26 -0400 |
| commit | 9972b25d0c6e7f8f893eb3444dea37b42b1201de (patch) | |
| tree | 1f94d7bc245178d815669d4cf7db0f56ac71b752 | |
| parent | 1e061ab2e5aa50a84d68ca654773632f9c425bb6 (diff) | |
[PKT_SCHED]: Generic queue management interface for qdiscs using internal skb queues
Implements an interface to be used by leaf qdiscs maintaining an internal
skb queue. The interface maintains a backlog in bytes additionaly
to the skb_queue_len() maintained by the queue itself. Relevant statistics
get incremented automatically. Every function comes in two variants, one
assuming Qdisc->q is used as queue and the second taking a sk_buff_head
as argument. Be aware that, if you use multiple queues, you still have to
maintain the Qdisc->q.qlen counter yourself.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/net/sch_generic.h | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c57504b3b518..7b97405e2dbf 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp) | |||
| 172 | kfree(tp); | 172 | kfree(tp); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | ||
| 176 | struct sk_buff_head *list) | ||
| 177 | { | ||
| 178 | __skb_queue_tail(list, skb); | ||
| 179 | sch->qstats.backlog += skb->len; | ||
| 180 | sch->bstats.bytes += skb->len; | ||
| 181 | sch->bstats.packets++; | ||
| 182 | |||
| 183 | return NET_XMIT_SUCCESS; | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) | ||
| 187 | { | ||
| 188 | return __qdisc_enqueue_tail(skb, sch, &sch->q); | ||
| 189 | } | ||
| 190 | |||
| 191 | static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, | ||
| 192 | struct sk_buff_head *list) | ||
| 193 | { | ||
| 194 | struct sk_buff *skb = __skb_dequeue(list); | ||
| 195 | |||
| 196 | if (likely(skb != NULL)) | ||
| 197 | sch->qstats.backlog -= skb->len; | ||
| 198 | |||
| 199 | return skb; | ||
| 200 | } | ||
| 201 | |||
| 202 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) | ||
| 203 | { | ||
| 204 | return __qdisc_dequeue_head(sch, &sch->q); | ||
| 205 | } | ||
| 206 | |||
| 207 | static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, | ||
| 208 | struct sk_buff_head *list) | ||
| 209 | { | ||
| 210 | struct sk_buff *skb = __skb_dequeue_tail(list); | ||
| 211 | |||
| 212 | if (likely(skb != NULL)) | ||
| 213 | sch->qstats.backlog -= skb->len; | ||
| 214 | |||
| 215 | return skb; | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) | ||
| 219 | { | ||
| 220 | return __qdisc_dequeue_tail(sch, &sch->q); | ||
| 221 | } | ||
| 222 | |||
| 223 | static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, | ||
| 224 | struct sk_buff_head *list) | ||
| 225 | { | ||
| 226 | __skb_queue_head(list, skb); | ||
| 227 | sch->qstats.backlog += skb->len; | ||
| 228 | sch->qstats.requeues++; | ||
| 229 | |||
| 230 | return NET_XMIT_SUCCESS; | ||
| 231 | } | ||
| 232 | |||
| 233 | static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
| 234 | { | ||
| 235 | return __qdisc_requeue(skb, sch, &sch->q); | ||
| 236 | } | ||
| 237 | |||
| 238 | static inline void __qdisc_reset_queue(struct Qdisc *sch, | ||
| 239 | struct sk_buff_head *list) | ||
| 240 | { | ||
| 241 | /* | ||
| 242 | * We do not know the backlog in bytes of this list, it | ||
| 243 | * is up to the caller to correct it | ||
| 244 | */ | ||
| 245 | skb_queue_purge(list); | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline void qdisc_reset_queue(struct Qdisc *sch) | ||
| 249 | { | ||
| 250 | __qdisc_reset_queue(sch, &sch->q); | ||
| 251 | sch->qstats.backlog = 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, | ||
| 255 | struct sk_buff_head *list) | ||
| 256 | { | ||
| 257 | struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); | ||
| 258 | |||
| 259 | if (likely(skb != NULL)) { | ||
| 260 | unsigned int len = skb->len; | ||
| 261 | kfree_skb(skb); | ||
| 262 | return len; | ||
| 263 | } | ||
| 264 | |||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 268 | static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) | ||
| 269 | { | ||
| 270 | return __qdisc_queue_drop(sch, &sch->q); | ||
| 271 | } | ||
| 272 | |||
| 273 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) | ||
| 274 | { | ||
| 275 | kfree_skb(skb); | ||
| 276 | sch->qstats.drops++; | ||
| 277 | |||
| 278 | return NET_XMIT_DROP; | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) | ||
| 282 | { | ||
| 283 | sch->qstats.drops++; | ||
| 284 | |||
| 285 | #ifdef CONFIG_NET_CLS_POLICE | ||
| 286 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) | ||
| 287 | goto drop; | ||
| 288 | |||
| 289 | return NET_XMIT_SUCCESS; | ||
| 290 | |||
| 291 | drop: | ||
| 292 | #endif | ||
| 293 | kfree_skb(skb); | ||
| 294 | return NET_XMIT_DROP; | ||
| 295 | } | ||
| 296 | |||
| 175 | #endif | 297 | #endif |
