diff options
Diffstat (limited to 'net/ipv4/ip_input.c')
-rw-r--r-- | net/ipv4/ip_input.c | 141 |
1 files changed, 77 insertions, 64 deletions
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index c703528e0bcd..473d0f2b2e0d 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -150,7 +150,7 @@ | |||
150 | * SNMP management statistics | 150 | * SNMP management statistics |
151 | */ | 151 | */ |
152 | 152 | ||
153 | DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); | 153 | DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly; |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Process Router Attention IP option | 156 | * Process Router Attention IP option |
@@ -225,8 +225,8 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) | |||
225 | /* If there maybe a raw socket we must check - if not we | 225 | /* If there maybe a raw socket we must check - if not we |
226 | * don't care less | 226 | * don't care less |
227 | */ | 227 | */ |
228 | if (raw_sk) | 228 | if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash)) |
229 | raw_v4_input(skb, skb->nh.iph, hash); | 229 | raw_sk = NULL; |
230 | 230 | ||
231 | if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { | 231 | if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { |
232 | int ret; | 232 | int ret; |
@@ -279,18 +279,70 @@ int ip_local_deliver(struct sk_buff *skb) | |||
279 | ip_local_deliver_finish); | 279 | ip_local_deliver_finish); |
280 | } | 280 | } |
281 | 281 | ||
282 | static inline int ip_rcv_finish(struct sk_buff *skb) | 282 | static inline int ip_rcv_options(struct sk_buff *skb) |
283 | { | 283 | { |
284 | struct ip_options *opt; | ||
285 | struct iphdr *iph; | ||
284 | struct net_device *dev = skb->dev; | 286 | struct net_device *dev = skb->dev; |
287 | |||
288 | /* It looks as overkill, because not all | ||
289 | IP options require packet mangling. | ||
290 | But it is the easiest for now, especially taking | ||
291 | into account that combination of IP options | ||
292 | and running sniffer is extremely rare condition. | ||
293 | --ANK (980813) | ||
294 | */ | ||
295 | if (skb_cow(skb, skb_headroom(skb))) { | ||
296 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | ||
297 | goto drop; | ||
298 | } | ||
299 | |||
300 | iph = skb->nh.iph; | ||
301 | |||
302 | if (ip_options_compile(NULL, skb)) { | ||
303 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | ||
304 | goto drop; | ||
305 | } | ||
306 | |||
307 | opt = &(IPCB(skb)->opt); | ||
308 | if (unlikely(opt->srr)) { | ||
309 | struct in_device *in_dev = in_dev_get(dev); | ||
310 | if (in_dev) { | ||
311 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { | ||
312 | if (IN_DEV_LOG_MARTIANS(in_dev) && | ||
313 | net_ratelimit()) | ||
314 | printk(KERN_INFO "source route option " | ||
315 | "%u.%u.%u.%u -> %u.%u.%u.%u\n", | ||
316 | NIPQUAD(iph->saddr), | ||
317 | NIPQUAD(iph->daddr)); | ||
318 | in_dev_put(in_dev); | ||
319 | goto drop; | ||
320 | } | ||
321 | |||
322 | in_dev_put(in_dev); | ||
323 | } | ||
324 | |||
325 | if (ip_options_rcv_srr(skb)) | ||
326 | goto drop; | ||
327 | } | ||
328 | |||
329 | return 0; | ||
330 | drop: | ||
331 | return -1; | ||
332 | } | ||
333 | |||
334 | static inline int ip_rcv_finish(struct sk_buff *skb) | ||
335 | { | ||
285 | struct iphdr *iph = skb->nh.iph; | 336 | struct iphdr *iph = skb->nh.iph; |
286 | int err; | ||
287 | 337 | ||
288 | /* | 338 | /* |
289 | * Initialise the virtual path cache for the packet. It describes | 339 | * Initialise the virtual path cache for the packet. It describes |
290 | * how the packet travels inside Linux networking. | 340 | * how the packet travels inside Linux networking. |
291 | */ | 341 | */ |
292 | if (skb->dst == NULL) { | 342 | if (likely(skb->dst == NULL)) { |
293 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { | 343 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
344 | skb->dev); | ||
345 | if (unlikely(err)) { | ||
294 | if (err == -EHOSTUNREACH) | 346 | if (err == -EHOSTUNREACH) |
295 | IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 347 | IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); |
296 | goto drop; | 348 | goto drop; |
@@ -298,7 +350,7 @@ static inline int ip_rcv_finish(struct sk_buff *skb) | |||
298 | } | 350 | } |
299 | 351 | ||
300 | #ifdef CONFIG_NET_CLS_ROUTE | 352 | #ifdef CONFIG_NET_CLS_ROUTE |
301 | if (skb->dst->tclassid) { | 353 | if (unlikely(skb->dst->tclassid)) { |
302 | struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id(); | 354 | struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id(); |
303 | u32 idx = skb->dst->tclassid; | 355 | u32 idx = skb->dst->tclassid; |
304 | st[idx&0xFF].o_packets++; | 356 | st[idx&0xFF].o_packets++; |
@@ -308,48 +360,11 @@ static inline int ip_rcv_finish(struct sk_buff *skb) | |||
308 | } | 360 | } |
309 | #endif | 361 | #endif |
310 | 362 | ||
311 | if (iph->ihl > 5) { | 363 | if (iph->ihl > 5 && ip_rcv_options(skb)) |
312 | struct ip_options *opt; | 364 | goto drop; |
313 | |||
314 | /* It looks as overkill, because not all | ||
315 | IP options require packet mangling. | ||
316 | But it is the easiest for now, especially taking | ||
317 | into account that combination of IP options | ||
318 | and running sniffer is extremely rare condition. | ||
319 | --ANK (980813) | ||
320 | */ | ||
321 | |||
322 | if (skb_cow(skb, skb_headroom(skb))) { | ||
323 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | ||
324 | goto drop; | ||
325 | } | ||
326 | iph = skb->nh.iph; | ||
327 | |||
328 | if (ip_options_compile(NULL, skb)) | ||
329 | goto inhdr_error; | ||
330 | |||
331 | opt = &(IPCB(skb)->opt); | ||
332 | if (opt->srr) { | ||
333 | struct in_device *in_dev = in_dev_get(dev); | ||
334 | if (in_dev) { | ||
335 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { | ||
336 | if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) | ||
337 | printk(KERN_INFO "source route option %u.%u.%u.%u -> %u.%u.%u.%u\n", | ||
338 | NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); | ||
339 | in_dev_put(in_dev); | ||
340 | goto drop; | ||
341 | } | ||
342 | in_dev_put(in_dev); | ||
343 | } | ||
344 | if (ip_options_rcv_srr(skb)) | ||
345 | goto drop; | ||
346 | } | ||
347 | } | ||
348 | 365 | ||
349 | return dst_input(skb); | 366 | return dst_input(skb); |
350 | 367 | ||
351 | inhdr_error: | ||
352 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | ||
353 | drop: | 368 | drop: |
354 | kfree_skb(skb); | 369 | kfree_skb(skb); |
355 | return NET_RX_DROP; | 370 | return NET_RX_DROP; |
@@ -358,9 +373,10 @@ drop: | |||
358 | /* | 373 | /* |
359 | * Main IP Receive routine. | 374 | * Main IP Receive routine. |
360 | */ | 375 | */ |
361 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) | 376 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) |
362 | { | 377 | { |
363 | struct iphdr *iph; | 378 | struct iphdr *iph; |
379 | u32 len; | ||
364 | 380 | ||
365 | /* When the interface is in promisc. mode, drop all the crap | 381 | /* When the interface is in promisc. mode, drop all the crap |
366 | * that it receives, do not try to analyse it. | 382 | * that it receives, do not try to analyse it. |
@@ -392,29 +408,27 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) | |||
392 | */ | 408 | */ |
393 | 409 | ||
394 | if (iph->ihl < 5 || iph->version != 4) | 410 | if (iph->ihl < 5 || iph->version != 4) |
395 | goto inhdr_error; | 411 | goto inhdr_error; |
396 | 412 | ||
397 | if (!pskb_may_pull(skb, iph->ihl*4)) | 413 | if (!pskb_may_pull(skb, iph->ihl*4)) |
398 | goto inhdr_error; | 414 | goto inhdr_error; |
399 | 415 | ||
400 | iph = skb->nh.iph; | 416 | iph = skb->nh.iph; |
401 | 417 | ||
402 | if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) | 418 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
403 | goto inhdr_error; | 419 | goto inhdr_error; |
404 | 420 | ||
405 | { | 421 | len = ntohs(iph->tot_len); |
406 | __u32 len = ntohs(iph->tot_len); | 422 | if (skb->len < len || len < (iph->ihl*4)) |
407 | if (skb->len < len || len < (iph->ihl<<2)) | 423 | goto inhdr_error; |
408 | goto inhdr_error; | ||
409 | 424 | ||
410 | /* Our transport medium may have padded the buffer out. Now we know it | 425 | /* Our transport medium may have padded the buffer out. Now we know it |
411 | * is IP we can trim to the true length of the frame. | 426 | * is IP we can trim to the true length of the frame. |
412 | * Note this now means skb->len holds ntohs(iph->tot_len). | 427 | * Note this now means skb->len holds ntohs(iph->tot_len). |
413 | */ | 428 | */ |
414 | if (pskb_trim_rcsum(skb, len)) { | 429 | if (pskb_trim_rcsum(skb, len)) { |
415 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 430 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); |
416 | goto drop; | 431 | goto drop; |
417 | } | ||
418 | } | 432 | } |
419 | 433 | ||
420 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, | 434 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, |
@@ -428,5 +442,4 @@ out: | |||
428 | return NET_RX_DROP; | 442 | return NET_RX_DROP; |
429 | } | 443 | } |
430 | 444 | ||
431 | EXPORT_SYMBOL(ip_rcv); | ||
432 | EXPORT_SYMBOL(ip_statistics); | 445 | EXPORT_SYMBOL(ip_statistics); |