aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_input.c')
-rw-r--r--net/ipv4/ip_input.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index f38e97647ac0..324e7e0fdb2a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -158,7 +158,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
158int ip_call_ra_chain(struct sk_buff *skb) 158int ip_call_ra_chain(struct sk_buff *skb)
159{ 159{
160 struct ip_ra_chain *ra; 160 struct ip_ra_chain *ra;
161 u8 protocol = skb->nh.iph->protocol; 161 u8 protocol = ip_hdr(skb)->protocol;
162 struct sock *last = NULL; 162 struct sock *last = NULL;
163 163
164 read_lock(&ip_ra_lock); 164 read_lock(&ip_ra_lock);
@@ -171,7 +171,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
171 if (sk && inet_sk(sk)->num == protocol && 171 if (sk && inet_sk(sk)->num == protocol &&
172 (!sk->sk_bound_dev_if || 172 (!sk->sk_bound_dev_if ||
173 sk->sk_bound_dev_if == skb->dev->ifindex)) { 173 sk->sk_bound_dev_if == skb->dev->ifindex)) {
174 if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { 174 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
175 skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); 175 skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN);
176 if (skb == NULL) { 176 if (skb == NULL) {
177 read_unlock(&ip_ra_lock); 177 read_unlock(&ip_ra_lock);
@@ -198,17 +198,15 @@ int ip_call_ra_chain(struct sk_buff *skb)
198 198
199static inline int ip_local_deliver_finish(struct sk_buff *skb) 199static inline int ip_local_deliver_finish(struct sk_buff *skb)
200{ 200{
201 int ihl = skb->nh.iph->ihl*4; 201 __skb_pull(skb, ip_hdrlen(skb));
202
203 __skb_pull(skb, ihl);
204 202
205 /* Point into the IP datagram, just past the header. */ 203 /* Point into the IP datagram, just past the header. */
206 skb->h.raw = skb->data; 204 skb_reset_transport_header(skb);
207 205
208 rcu_read_lock(); 206 rcu_read_lock();
209 { 207 {
210 /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ 208 /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
211 int protocol = skb->nh.iph->protocol; 209 int protocol = ip_hdr(skb)->protocol;
212 int hash; 210 int hash;
213 struct sock *raw_sk; 211 struct sock *raw_sk;
214 struct net_protocol *ipprot; 212 struct net_protocol *ipprot;
@@ -220,7 +218,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
220 /* If there maybe a raw socket we must check - if not we 218 /* If there maybe a raw socket we must check - if not we
221 * don't care less 219 * don't care less
222 */ 220 */
223 if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash)) 221 if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
224 raw_sk = NULL; 222 raw_sk = NULL;
225 223
226 if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { 224 if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) {
@@ -266,7 +264,7 @@ int ip_local_deliver(struct sk_buff *skb)
266 * Reassemble IP fragments. 264 * Reassemble IP fragments.
267 */ 265 */
268 266
269 if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { 267 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
270 skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER); 268 skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER);
271 if (!skb) 269 if (!skb)
272 return 0; 270 return 0;
@@ -294,7 +292,7 @@ static inline int ip_rcv_options(struct sk_buff *skb)
294 goto drop; 292 goto drop;
295 } 293 }
296 294
297 iph = skb->nh.iph; 295 iph = ip_hdr(skb);
298 296
299 if (ip_options_compile(NULL, skb)) { 297 if (ip_options_compile(NULL, skb)) {
300 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 298 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
@@ -330,7 +328,7 @@ drop:
330 328
331static inline int ip_rcv_finish(struct sk_buff *skb) 329static inline int ip_rcv_finish(struct sk_buff *skb)
332{ 330{
333 struct iphdr *iph = skb->nh.iph; 331 const struct iphdr *iph = ip_hdr(skb);
334 332
335 /* 333 /*
336 * Initialise the virtual path cache for the packet. It describes 334 * Initialise the virtual path cache for the packet. It describes
@@ -391,7 +389,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
391 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 389 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
392 goto inhdr_error; 390 goto inhdr_error;
393 391
394 iph = skb->nh.iph; 392 iph = ip_hdr(skb);
395 393
396 /* 394 /*
397 * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. 395 * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
@@ -410,7 +408,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
410 if (!pskb_may_pull(skb, iph->ihl*4)) 408 if (!pskb_may_pull(skb, iph->ihl*4))
411 goto inhdr_error; 409 goto inhdr_error;
412 410
413 iph = skb->nh.iph; 411 iph = ip_hdr(skb);
414 412
415 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 413 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
416 goto inhdr_error; 414 goto inhdr_error;