diff options
Diffstat (limited to 'net/ipv4/ip_input.c')
| -rw-r--r-- | net/ipv4/ip_input.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 2ee132b330fd..237880a80432 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -158,7 +158,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly; | |||
| 158 | int ip_call_ra_chain(struct sk_buff *skb) | 158 | int ip_call_ra_chain(struct sk_buff *skb) |
| 159 | { | 159 | { |
| 160 | struct ip_ra_chain *ra; | 160 | struct ip_ra_chain *ra; |
| 161 | u8 protocol = skb->nh.iph->protocol; | 161 | u8 protocol = ip_hdr(skb)->protocol; |
| 162 | struct sock *last = NULL; | 162 | struct sock *last = NULL; |
| 163 | 163 | ||
| 164 | read_lock(&ip_ra_lock); | 164 | read_lock(&ip_ra_lock); |
| @@ -171,7 +171,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
| 171 | if (sk && inet_sk(sk)->num == protocol && | 171 | if (sk && inet_sk(sk)->num == protocol && |
| 172 | (!sk->sk_bound_dev_if || | 172 | (!sk->sk_bound_dev_if || |
| 173 | sk->sk_bound_dev_if == skb->dev->ifindex)) { | 173 | sk->sk_bound_dev_if == skb->dev->ifindex)) { |
| 174 | if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { | 174 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
| 175 | skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); | 175 | skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); |
| 176 | if (skb == NULL) { | 176 | if (skb == NULL) { |
| 177 | read_unlock(&ip_ra_lock); | 177 | read_unlock(&ip_ra_lock); |
| @@ -206,7 +206,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) | |||
| 206 | rcu_read_lock(); | 206 | rcu_read_lock(); |
| 207 | { | 207 | { |
| 208 | /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ | 208 | /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ |
| 209 | int protocol = skb->nh.iph->protocol; | 209 | int protocol = ip_hdr(skb)->protocol; |
| 210 | int hash; | 210 | int hash; |
| 211 | struct sock *raw_sk; | 211 | struct sock *raw_sk; |
| 212 | struct net_protocol *ipprot; | 212 | struct net_protocol *ipprot; |
| @@ -218,7 +218,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) | |||
| 218 | /* If there maybe a raw socket we must check - if not we | 218 | /* If there maybe a raw socket we must check - if not we |
| 219 | * don't care less | 219 | * don't care less |
| 220 | */ | 220 | */ |
| 221 | if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash)) | 221 | if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash)) |
| 222 | raw_sk = NULL; | 222 | raw_sk = NULL; |
| 223 | 223 | ||
| 224 | if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { | 224 | if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { |
| @@ -264,7 +264,7 @@ int ip_local_deliver(struct sk_buff *skb) | |||
| 264 | * Reassemble IP fragments. | 264 | * Reassemble IP fragments. |
| 265 | */ | 265 | */ |
| 266 | 266 | ||
| 267 | if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { | 267 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
| 268 | skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER); | 268 | skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER); |
| 269 | if (!skb) | 269 | if (!skb) |
| 270 | return 0; | 270 | return 0; |
| @@ -292,7 +292,7 @@ static inline int ip_rcv_options(struct sk_buff *skb) | |||
| 292 | goto drop; | 292 | goto drop; |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | iph = skb->nh.iph; | 295 | iph = ip_hdr(skb); |
| 296 | 296 | ||
| 297 | if (ip_options_compile(NULL, skb)) { | 297 | if (ip_options_compile(NULL, skb)) { |
| 298 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 298 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); |
| @@ -328,7 +328,7 @@ drop: | |||
| 328 | 328 | ||
| 329 | static inline int ip_rcv_finish(struct sk_buff *skb) | 329 | static inline int ip_rcv_finish(struct sk_buff *skb) |
| 330 | { | 330 | { |
| 331 | struct iphdr *iph = skb->nh.iph; | 331 | const struct iphdr *iph = ip_hdr(skb); |
| 332 | 332 | ||
| 333 | /* | 333 | /* |
| 334 | * Initialise the virtual path cache for the packet. It describes | 334 | * Initialise the virtual path cache for the packet. It describes |
| @@ -389,7 +389,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
| 389 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | 389 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) |
| 390 | goto inhdr_error; | 390 | goto inhdr_error; |
| 391 | 391 | ||
| 392 | iph = skb->nh.iph; | 392 | iph = ip_hdr(skb); |
| 393 | 393 | ||
| 394 | /* | 394 | /* |
| 395 | * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. | 395 | * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. |
| @@ -408,7 +408,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
| 408 | if (!pskb_may_pull(skb, iph->ihl*4)) | 408 | if (!pskb_may_pull(skb, iph->ihl*4)) |
| 409 | goto inhdr_error; | 409 | goto inhdr_error; |
| 410 | 410 | ||
| 411 | iph = skb->nh.iph; | 411 | iph = ip_hdr(skb); |
| 412 | 412 | ||
| 413 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 413 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
| 414 | goto inhdr_error; | 414 | goto inhdr_error; |
