aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-06-27 16:22:38 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-29 19:57:53 -0400
commit576a30eb6453439b3c37ba24455ac7090c247b5a (patch)
treee0c427a61e3de5c93e797c09903d910f6f060e64 /net/core/dev.c
parent68c1692e3ea5d79f24cb5cc566c4a73939d13d25 (diff)
[NET]: Added GSO header verification
When GSO packets come from an untrusted source (e.g., a Xen guest domain), we need to verify the header integrity before passing it to the hardware. Since the first step in GSO is to verify the header, we can reuse that code by adding a new bit to gso_type: SKB_GSO_DODGY. Packets with this bit set can only be fed directly to devices with the corresponding bit NETIF_F_GSO_ROBUST. If the device doesn't have that bit, then the skb is fed to the GSO engine which will allow the packet to be sent to the hardware if it passes the header check. This patch changes the sg flag to a full features flag. The same method can be used to implement TSO ECN support. We simply have to mark packets with CWR set with SKB_GSO_ECN so that only hardware with a corresponding NETIF_F_TSO_ECN can accept them. The GSO engine can either fully segment the packet, or segment the first MTU and pass the rest to the hardware for further segmentation. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index f1c52cbd6ef7..4f2014994a84 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1190,11 +1190,14 @@ out:
1190/** 1190/**
1191 * skb_gso_segment - Perform segmentation on skb. 1191 * skb_gso_segment - Perform segmentation on skb.
1192 * @skb: buffer to segment 1192 * @skb: buffer to segment
1193 * @sg: whether scatter-gather is supported on the target. 1193 * @features: features for the output path (see dev->features)
1194 * 1194 *
1195 * This function segments the given skb and returns a list of segments. 1195 * This function segments the given skb and returns a list of segments.
1196 *
1197 * It may return NULL if the skb requires no segmentation. This is
1198 * only possible when GSO is used for verifying header integrity.
1196 */ 1199 */
1197struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg) 1200struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1198{ 1201{
1199 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1202 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1200 struct packet_type *ptype; 1203 struct packet_type *ptype;
@@ -1210,12 +1213,14 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg)
1210 rcu_read_lock(); 1213 rcu_read_lock();
1211 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1214 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1212 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1215 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1213 segs = ptype->gso_segment(skb, sg); 1216 segs = ptype->gso_segment(skb, features);
1214 break; 1217 break;
1215 } 1218 }
1216 } 1219 }
1217 rcu_read_unlock(); 1220 rcu_read_unlock();
1218 1221
1222 __skb_push(skb, skb->data - skb->mac.raw);
1223
1219 return segs; 1224 return segs;
1220} 1225}
1221 1226
@@ -1291,9 +1296,15 @@ static int dev_gso_segment(struct sk_buff *skb)
1291{ 1296{
1292 struct net_device *dev = skb->dev; 1297 struct net_device *dev = skb->dev;
1293 struct sk_buff *segs; 1298 struct sk_buff *segs;
1299 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1300 NETIF_F_SG : 0);
1301
1302 segs = skb_gso_segment(skb, features);
1303
1304 /* Verifying header integrity only. */
1305 if (!segs)
1306 return 0;
1294 1307
1295 segs = skb_gso_segment(skb, dev->features & NETIF_F_SG &&
1296 !illegal_highdma(dev, skb));
1297 if (unlikely(IS_ERR(segs))) 1308 if (unlikely(IS_ERR(segs)))
1298 return PTR_ERR(segs); 1309 return PTR_ERR(segs);
1299 1310
@@ -1310,13 +1321,17 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1310 if (netdev_nit) 1321 if (netdev_nit)
1311 dev_queue_xmit_nit(skb, dev); 1322 dev_queue_xmit_nit(skb, dev);
1312 1323
1313 if (!netif_needs_gso(dev, skb)) 1324 if (netif_needs_gso(dev, skb)) {
1314 return dev->hard_start_xmit(skb, dev); 1325 if (unlikely(dev_gso_segment(skb)))
1326 goto out_kfree_skb;
1327 if (skb->next)
1328 goto gso;
1329 }
1315 1330
1316 if (unlikely(dev_gso_segment(skb))) 1331 return dev->hard_start_xmit(skb, dev);
1317 goto out_kfree_skb;
1318 } 1332 }
1319 1333
1334gso:
1320 do { 1335 do {
1321 struct sk_buff *nskb = skb->next; 1336 struct sk_buff *nskb = skb->next;
1322 int rc; 1337 int rc;