aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-01-21 05:23:49 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:08:40 -0500
commit4c30719f4f550d9b3034d9c00da9cb7fb99e6c0b (patch)
treedb487b4ae184e7e6b90e2623c371f8a09edec830 /net/sched
parent5b0ac72bc5fdda9634fb07db4cb0237fa9b6df68 (diff)
[PKT_SCHED] dsmark: handle cloned and non-linear skb's
Make dsmark work properly with non-linear and cloned skb's Before modifying the header, it needs to check that skb header is writeable. Note: this makes the assumption, that if it queues a good skb then a good skb will come out of the embedded qdisc. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_dsmark.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d96eaf0aa6b8..ad30b7b4769f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -187,13 +187,19 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
187 pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); 187 pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
188 188
189 if (p->set_tc_index) { 189 if (p->set_tc_index) {
190 /* FIXME: Safe with non-linear skbs? --RR */
191 switch (skb->protocol) { 190 switch (skb->protocol) {
192 case __constant_htons(ETH_P_IP): 191 case __constant_htons(ETH_P_IP):
192 if (skb_cow_head(skb, sizeof(struct iphdr)))
193 goto drop;
194
193 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) 195 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
194 & ~INET_ECN_MASK; 196 & ~INET_ECN_MASK;
195 break; 197 break;
198
196 case __constant_htons(ETH_P_IPV6): 199 case __constant_htons(ETH_P_IPV6):
200 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
201 goto drop;
202
197 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) 203 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
198 & ~INET_ECN_MASK; 204 & ~INET_ECN_MASK;
199 break; 205 break;
@@ -217,14 +223,14 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
217 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
218 kfree_skb(skb); 224 kfree_skb(skb);
219 return NET_XMIT_SUCCESS; 225 return NET_XMIT_SUCCESS;
226
220 case TC_ACT_SHOT: 227 case TC_ACT_SHOT:
221 kfree_skb(skb); 228 goto drop;
222 sch->qstats.drops++;
223 return NET_XMIT_BYPASS;
224#endif 229#endif
225 case TC_ACT_OK: 230 case TC_ACT_OK:
226 skb->tc_index = TC_H_MIN(res.classid); 231 skb->tc_index = TC_H_MIN(res.classid);
227 break; 232 break;
233
228 default: 234 default:
229 if (p->default_index != NO_DEFAULT_INDEX) 235 if (p->default_index != NO_DEFAULT_INDEX)
230 skb->tc_index = p->default_index; 236 skb->tc_index = p->default_index;
@@ -243,6 +249,11 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
243 sch->q.qlen++; 249 sch->q.qlen++;
244 250
245 return NET_XMIT_SUCCESS; 251 return NET_XMIT_SUCCESS;
252
253drop:
254 kfree_skb(skb);
255 sch->qstats.drops++;
256 return NET_XMIT_BYPASS;
246} 257}
247 258
248static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 259static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)