aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_netem.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-11-03 11:19:53 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-03 16:13:03 -0500
commit56b174256b6936ec4c1ed8f3407109ac6929d3ca (patch)
tree673380fc883abd0b3fc7e1561aba31a41d964886 /net/sched/sch_netem.c
parent8ce0c8254f15229aa99fc6c04141f28c446e5f8c (diff)
net: add rbnode to struct sk_buff
Yaogong replaces TCP out of order receive queue by an RB tree. As netem already does a private skb->{next/prev/tstamp} union with a 'struct rb_node', lets do this in a cleaner way. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Yaogong Wang <wygivan@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r--net/sched/sch_netem.c27
1 files changed, 7 insertions, 20 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b34331967e02..179f1c8c0d8b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -139,33 +139,20 @@ struct netem_sched_data {
139 139
140/* Time stamp put into socket buffer control block 140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue. 141 * Only valid when skbs are in our internal t(ime)fifo queue.
142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
142 */ 146 */
143struct netem_skb_cb { 147struct netem_skb_cb {
144 psched_time_t time_to_send; 148 psched_time_t time_to_send;
145 ktime_t tstamp_save; 149 ktime_t tstamp_save;
146}; 150};
147 151
148/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
149 * to hold a rb_node structure.
150 *
151 * If struct sk_buff layout is changed, the following checks will complain.
152 */
153static struct rb_node *netem_rb_node(struct sk_buff *skb)
154{
155 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
156 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
157 offsetof(struct sk_buff, next) + sizeof(skb->next));
158 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
159 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
160 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
161 sizeof(skb->prev) +
162 sizeof(skb->tstamp));
163 return (struct rb_node *)&skb->next;
164}
165 152
166static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) 153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
167{ 154{
168 return (struct sk_buff *)rb; 155 return container_of(rb, struct sk_buff, rbnode);
169} 156}
170 157
171static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
@@ -403,8 +390,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
403 else 390 else
404 p = &parent->rb_left; 391 p = &parent->rb_left;
405 } 392 }
406 rb_link_node(netem_rb_node(nskb), parent, p); 393 rb_link_node(&nskb->rbnode, parent, p);
407 rb_insert_color(netem_rb_node(nskb), &q->t_root); 394 rb_insert_color(&nskb->rbnode, &q->t_root);
408 sch->q.qlen++; 395 sch->q.qlen++;
409} 396}
410 397