aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-04-21 01:47:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:25:10 -0400
commiteddc9ec53be2ecdbf4efe0efd4a83052594f0ac0 (patch)
tree4a38ab4dbd9d61fdf5a5ea6ed61463e0b9e33ba7 /net/ipv4/ipmr.c
parente023dd643798c4f06c16466af90b4d250e4b8bd7 (diff)
[SK_BUFF]: Introduce ip_hdr(), remove skb->nh.iph
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c55
1 files changed, 29 insertions, 26 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b24dffe3bd46..e0021499093f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -303,7 +303,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
303 atomic_dec(&cache_resolve_queue_len); 303 atomic_dec(&cache_resolve_queue_len);
304 304
305 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { 305 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
306 if (skb->nh.iph->version == 0) { 306 if (ip_hdr(skb)->version == 0) {
307 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 307 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
308 nlh->nlmsg_type = NLMSG_ERROR; 308 nlh->nlmsg_type = NLMSG_ERROR;
309 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 309 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -509,7 +509,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
509 */ 509 */
510 510
511 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { 511 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
512 if (skb->nh.iph->version == 0) { 512 if (ip_hdr(skb)->version == 0) {
513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
514 514
515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
@@ -569,8 +569,9 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
569 msg->im_msgtype = IGMPMSG_WHOLEPKT; 569 msg->im_msgtype = IGMPMSG_WHOLEPKT;
570 msg->im_mbz = 0; 570 msg->im_mbz = 0;
571 msg->im_vif = reg_vif_num; 571 msg->im_vif = reg_vif_num;
572 skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; 572 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
573 skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); 573 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
574 sizeof(struct iphdr));
574 } else 575 } else
575#endif 576#endif
576 { 577 {
@@ -579,10 +580,10 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
579 * Copy the IP header 580 * Copy the IP header
580 */ 581 */
581 582
582 skb->nh.iph = (struct iphdr *)skb_put(skb, ihl); 583 skb->nh.raw = skb_put(skb, ihl);
583 memcpy(skb->data,pkt->data,ihl); 584 memcpy(skb->data,pkt->data,ihl);
584 skb->nh.iph->protocol = 0; /* Flag to the kernel this is a route add */ 585 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
585 msg = (struct igmpmsg*)skb->nh.iph; 586 msg = (struct igmpmsg *)skb_network_header(skb);
586 msg->im_vif = vifi; 587 msg->im_vif = vifi;
587 skb->dst = dst_clone(pkt->dst); 588 skb->dst = dst_clone(pkt->dst);
588 589
@@ -594,7 +595,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
594 igmp->type = 595 igmp->type =
595 msg->im_msgtype = assert; 596 msg->im_msgtype = assert;
596 igmp->code = 0; 597 igmp->code = 0;
597 skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ 598 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
598 skb->h.raw = skb->nh.raw; 599 skb->h.raw = skb->nh.raw;
599 } 600 }
600 601
@@ -624,11 +625,12 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
624{ 625{
625 int err; 626 int err;
626 struct mfc_cache *c; 627 struct mfc_cache *c;
628 const struct iphdr *iph = ip_hdr(skb);
627 629
628 spin_lock_bh(&mfc_unres_lock); 630 spin_lock_bh(&mfc_unres_lock);
629 for (c=mfc_unres_queue; c; c=c->next) { 631 for (c=mfc_unres_queue; c; c=c->next) {
630 if (c->mfc_mcastgrp == skb->nh.iph->daddr && 632 if (c->mfc_mcastgrp == iph->daddr &&
631 c->mfc_origin == skb->nh.iph->saddr) 633 c->mfc_origin == iph->saddr)
632 break; 634 break;
633 } 635 }
634 636
@@ -648,9 +650,9 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
648 /* 650 /*
649 * Fill in the new cache entry 651 * Fill in the new cache entry
650 */ 652 */
651 c->mfc_parent=-1; 653 c->mfc_parent = -1;
652 c->mfc_origin=skb->nh.iph->saddr; 654 c->mfc_origin = iph->saddr;
653 c->mfc_mcastgrp=skb->nh.iph->daddr; 655 c->mfc_mcastgrp = iph->daddr;
654 656
655 /* 657 /*
656 * Reflect first query at mrouted. 658 * Reflect first query at mrouted.
@@ -1096,12 +1098,12 @@ static struct notifier_block ip_mr_notifier={
1096static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1098static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1097{ 1099{
1098 struct iphdr *iph; 1100 struct iphdr *iph;
1099 struct iphdr *old_iph = skb->nh.iph; 1101 struct iphdr *old_iph = ip_hdr(skb);
1100 1102
1101 skb_push(skb, sizeof(struct iphdr)); 1103 skb_push(skb, sizeof(struct iphdr));
1102 skb->h.ipiph = skb->nh.iph; 1104 skb->h.raw = skb->nh.raw;
1103 skb_reset_network_header(skb); 1105 skb_reset_network_header(skb);
1104 iph = skb->nh.iph; 1106 iph = ip_hdr(skb);
1105 1107
1106 iph->version = 4; 1108 iph->version = 4;
1107 iph->tos = old_iph->tos; 1109 iph->tos = old_iph->tos;
@@ -1137,7 +1139,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1137 1139
1138static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1140static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1139{ 1141{
1140 struct iphdr *iph = skb->nh.iph; 1142 const struct iphdr *iph = ip_hdr(skb);
1141 struct vif_device *vif = &vif_table[vifi]; 1143 struct vif_device *vif = &vif_table[vifi];
1142 struct net_device *dev; 1144 struct net_device *dev;
1143 struct rtable *rt; 1145 struct rtable *rt;
@@ -1203,8 +1205,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1203 1205
1204 dst_release(skb->dst); 1206 dst_release(skb->dst);
1205 skb->dst = &rt->u.dst; 1207 skb->dst = &rt->u.dst;
1206 iph = skb->nh.iph; 1208 ip_decrease_ttl(ip_hdr(skb));
1207 ip_decrease_ttl(iph);
1208 1209
1209 /* FIXME: forward and output firewalls used to be called here. 1210 /* FIXME: forward and output firewalls used to be called here.
1210 * What do we do with netfilter? -- RR */ 1211 * What do we do with netfilter? -- RR */
@@ -1304,7 +1305,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1304 * Forward the frame 1305 * Forward the frame
1305 */ 1306 */
1306 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) { 1307 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1307 if (skb->nh.iph->ttl > cache->mfc_un.res.ttls[ct]) { 1308 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1308 if (psend != -1) { 1309 if (psend != -1) {
1309 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1310 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1310 if (skb2) 1311 if (skb2)
@@ -1350,7 +1351,7 @@ int ip_mr_input(struct sk_buff *skb)
1350 if (IPCB(skb)->opt.router_alert) { 1351 if (IPCB(skb)->opt.router_alert) {
1351 if (ip_call_ra_chain(skb)) 1352 if (ip_call_ra_chain(skb))
1352 return 0; 1353 return 0;
1353 } else if (skb->nh.iph->protocol == IPPROTO_IGMP){ 1354 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1354 /* IGMPv1 (and broken IGMPv2 implementations sort of 1355 /* IGMPv1 (and broken IGMPv2 implementations sort of
1355 Cisco IOS <= 11.2(8)) do not put router alert 1356 Cisco IOS <= 11.2(8)) do not put router alert
1356 option to IGMP packets destined to routable 1357 option to IGMP packets destined to routable
@@ -1369,7 +1370,7 @@ int ip_mr_input(struct sk_buff *skb)
1369 } 1370 }
1370 1371
1371 read_lock(&mrt_lock); 1372 read_lock(&mrt_lock);
1372 cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr); 1373 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1373 1374
1374 /* 1375 /*
1375 * No usable cache entry 1376 * No usable cache entry
@@ -1580,6 +1581,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1580 1581
1581 if (cache==NULL) { 1582 if (cache==NULL) {
1582 struct sk_buff *skb2; 1583 struct sk_buff *skb2;
1584 struct iphdr *iph;
1583 struct net_device *dev; 1585 struct net_device *dev;
1584 int vif; 1586 int vif;
1585 1587
@@ -1601,10 +1603,11 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1601 1603
1602 skb_push(skb2, sizeof(struct iphdr)); 1604 skb_push(skb2, sizeof(struct iphdr));
1603 skb_reset_network_header(skb2); 1605 skb_reset_network_header(skb2);
1604 skb2->nh.iph->ihl = sizeof(struct iphdr)>>2; 1606 iph = ip_hdr(skb2);
1605 skb2->nh.iph->saddr = rt->rt_src; 1607 iph->ihl = sizeof(struct iphdr) >> 2;
1606 skb2->nh.iph->daddr = rt->rt_dst; 1608 iph->saddr = rt->rt_src;
1607 skb2->nh.iph->version = 0; 1609 iph->daddr = rt->rt_dst;
1610 iph->version = 0;
1608 err = ipmr_cache_unresolved(vif, skb2); 1611 err = ipmr_cache_unresolved(vif, skb2);
1609 read_unlock(&mrt_lock); 1612 read_unlock(&mrt_lock);
1610 return err; 1613 return err;