aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
authorJianjun Kong <jianjun@zeuux.org>2008-11-03 03:28:02 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-03 03:28:02 -0500
commitc354e1246348e25c714e6b2973f3257183d06e2c (patch)
tree4631234b66f374b97c2c60a0701f9e96c54e7416 /net/ipv4/ipmr.c
parent09cb105ea78d5644570d52085e2149f784575872 (diff)
net: clean up net/ipv4/ipmr.c
Signed-off-by: Jianjun Kong <jianjun@zeuux.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c130
1 files changed, 65 insertions, 65 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b42e082cc170..05ed336f798a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -331,7 +331,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
331 331
332 atomic_dec(&cache_resolve_queue_len); 332 atomic_dec(&cache_resolve_queue_len);
333 333
334 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { 334 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
335 if (ip_hdr(skb)->version == 0) { 335 if (ip_hdr(skb)->version == 0) {
336 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 336 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
337 nlh->nlmsg_type = NLMSG_ERROR; 337 nlh->nlmsg_type = NLMSG_ERROR;
@@ -477,13 +477,13 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
477 /* 477 /*
478 * Fill in the VIF structures 478 * Fill in the VIF structures
479 */ 479 */
480 v->rate_limit=vifc->vifc_rate_limit; 480 v->rate_limit = vifc->vifc_rate_limit;
481 v->local=vifc->vifc_lcl_addr.s_addr; 481 v->local = vifc->vifc_lcl_addr.s_addr;
482 v->remote=vifc->vifc_rmt_addr.s_addr; 482 v->remote = vifc->vifc_rmt_addr.s_addr;
483 v->flags=vifc->vifc_flags; 483 v->flags = vifc->vifc_flags;
484 if (!mrtsock) 484 if (!mrtsock)
485 v->flags |= VIFF_STATIC; 485 v->flags |= VIFF_STATIC;
486 v->threshold=vifc->vifc_threshold; 486 v->threshold = vifc->vifc_threshold;
487 v->bytes_in = 0; 487 v->bytes_in = 0;
488 v->bytes_out = 0; 488 v->bytes_out = 0;
489 v->pkt_in = 0; 489 v->pkt_in = 0;
@@ -494,7 +494,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
494 494
495 /* And finish update writing critical data */ 495 /* And finish update writing critical data */
496 write_lock_bh(&mrt_lock); 496 write_lock_bh(&mrt_lock);
497 v->dev=dev; 497 v->dev = dev;
498#ifdef CONFIG_IP_PIMSM 498#ifdef CONFIG_IP_PIMSM
499 if (v->flags&VIFF_REGISTER) 499 if (v->flags&VIFF_REGISTER)
500 reg_vif_num = vifi; 500 reg_vif_num = vifi;
@@ -507,7 +507,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
507 507
508static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) 508static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
509{ 509{
510 int line=MFC_HASH(mcastgrp,origin); 510 int line = MFC_HASH(mcastgrp, origin);
511 struct mfc_cache *c; 511 struct mfc_cache *c;
512 512
513 for (c=mfc_cache_array[line]; c; c = c->next) { 513 for (c=mfc_cache_array[line]; c; c = c->next) {
@@ -522,8 +522,8 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
522 */ 522 */
523static struct mfc_cache *ipmr_cache_alloc(void) 523static struct mfc_cache *ipmr_cache_alloc(void)
524{ 524{
525 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 525 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
526 if (c==NULL) 526 if (c == NULL)
527 return NULL; 527 return NULL;
528 c->mfc_un.res.minvif = MAXVIFS; 528 c->mfc_un.res.minvif = MAXVIFS;
529 return c; 529 return c;
@@ -531,8 +531,8 @@ static struct mfc_cache *ipmr_cache_alloc(void)
531 531
532static struct mfc_cache *ipmr_cache_alloc_unres(void) 532static struct mfc_cache *ipmr_cache_alloc_unres(void)
533{ 533{
534 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 534 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
535 if (c==NULL) 535 if (c == NULL)
536 return NULL; 536 return NULL;
537 skb_queue_head_init(&c->mfc_un.unres.unresolved); 537 skb_queue_head_init(&c->mfc_un.unres.unresolved);
538 c->mfc_un.unres.expires = jiffies + 10*HZ; 538 c->mfc_un.unres.expires = jiffies + 10*HZ;
@@ -552,7 +552,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
552 * Play the pending entries through our router 552 * Play the pending entries through our router
553 */ 553 */
554 554
555 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { 555 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
556 if (ip_hdr(skb)->version == 0) { 556 if (ip_hdr(skb)->version == 0) {
557 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 557 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
558 558
@@ -637,7 +637,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
637 * Add our header 637 * Add our header
638 */ 638 */
639 639
640 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); 640 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
641 igmp->type = 641 igmp->type =
642 msg->im_msgtype = assert; 642 msg->im_msgtype = assert;
643 igmp->code = 0; 643 igmp->code = 0;
@@ -653,7 +653,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
653 /* 653 /*
654 * Deliver to mrouted 654 * Deliver to mrouted
655 */ 655 */
656 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) { 656 if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
657 if (net_ratelimit()) 657 if (net_ratelimit())
658 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 658 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
659 kfree_skb(skb); 659 kfree_skb(skb);
@@ -685,7 +685,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
685 * Create a new entry if allowable 685 * Create a new entry if allowable
686 */ 686 */
687 687
688 if (atomic_read(&cache_resolve_queue_len)>=10 || 688 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
689 (c=ipmr_cache_alloc_unres())==NULL) { 689 (c=ipmr_cache_alloc_unres())==NULL) {
690 spin_unlock_bh(&mfc_unres_lock); 690 spin_unlock_bh(&mfc_unres_lock);
691 691
@@ -728,7 +728,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
728 kfree_skb(skb); 728 kfree_skb(skb);
729 err = -ENOBUFS; 729 err = -ENOBUFS;
730 } else { 730 } else {
731 skb_queue_tail(&c->mfc_un.unres.unresolved,skb); 731 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
732 err = 0; 732 err = 0;
733 } 733 }
734 734
@@ -745,7 +745,7 @@ static int ipmr_mfc_delete(struct mfcctl *mfc)
745 int line; 745 int line;
746 struct mfc_cache *c, **cp; 746 struct mfc_cache *c, **cp;
747 747
748 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 748 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
749 749
750 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { 750 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
751 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 751 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
@@ -766,7 +766,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
766 int line; 766 int line;
767 struct mfc_cache *uc, *c, **cp; 767 struct mfc_cache *uc, *c, **cp;
768 768
769 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 769 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
770 770
771 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { 771 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
772 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 772 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
@@ -787,13 +787,13 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
787 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 787 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
788 return -EINVAL; 788 return -EINVAL;
789 789
790 c=ipmr_cache_alloc(); 790 c = ipmr_cache_alloc();
791 if (c==NULL) 791 if (c == NULL)
792 return -ENOMEM; 792 return -ENOMEM;
793 793
794 c->mfc_origin=mfc->mfcc_origin.s_addr; 794 c->mfc_origin = mfc->mfcc_origin.s_addr;
795 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr; 795 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
796 c->mfc_parent=mfc->mfcc_parent; 796 c->mfc_parent = mfc->mfcc_parent;
797 ipmr_update_thresholds(c, mfc->mfcc_ttls); 797 ipmr_update_thresholds(c, mfc->mfcc_ttls);
798 if (!mrtsock) 798 if (!mrtsock)
799 c->mfc_flags |= MFC_STATIC; 799 c->mfc_flags |= MFC_STATIC;
@@ -846,7 +846,7 @@ static void mroute_clean_tables(struct sock *sk)
846 /* 846 /*
847 * Wipe the cache 847 * Wipe the cache
848 */ 848 */
849 for (i=0;i<MFC_LINES;i++) { 849 for (i=0; i<MFC_LINES; i++) {
850 struct mfc_cache *c, **cp; 850 struct mfc_cache *c, **cp;
851 851
852 cp = &mfc_cache_array[i]; 852 cp = &mfc_cache_array[i];
@@ -887,7 +887,7 @@ static void mrtsock_destruct(struct sock *sk)
887 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; 887 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
888 888
889 write_lock_bh(&mrt_lock); 889 write_lock_bh(&mrt_lock);
890 mroute_socket=NULL; 890 mroute_socket = NULL;
891 write_unlock_bh(&mrt_lock); 891 write_unlock_bh(&mrt_lock);
892 892
893 mroute_clean_tables(sk); 893 mroute_clean_tables(sk);
@@ -902,7 +902,7 @@ static void mrtsock_destruct(struct sock *sk)
902 * MOSPF/PIM router set up we can clean this up. 902 * MOSPF/PIM router set up we can clean this up.
903 */ 903 */
904 904
905int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) 905int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
906{ 906{
907 int ret; 907 int ret;
908 struct vifctl vif; 908 struct vifctl vif;
@@ -918,7 +918,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
918 if (sk->sk_type != SOCK_RAW || 918 if (sk->sk_type != SOCK_RAW ||
919 inet_sk(sk)->num != IPPROTO_IGMP) 919 inet_sk(sk)->num != IPPROTO_IGMP)
920 return -EOPNOTSUPP; 920 return -EOPNOTSUPP;
921 if (optlen!=sizeof(int)) 921 if (optlen != sizeof(int))
922 return -ENOPROTOOPT; 922 return -ENOPROTOOPT;
923 923
924 rtnl_lock(); 924 rtnl_lock();
@@ -930,7 +930,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
930 ret = ip_ra_control(sk, 1, mrtsock_destruct); 930 ret = ip_ra_control(sk, 1, mrtsock_destruct);
931 if (ret == 0) { 931 if (ret == 0) {
932 write_lock_bh(&mrt_lock); 932 write_lock_bh(&mrt_lock);
933 mroute_socket=sk; 933 mroute_socket = sk;
934 write_unlock_bh(&mrt_lock); 934 write_unlock_bh(&mrt_lock);
935 935
936 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; 936 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
@@ -938,19 +938,19 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
938 rtnl_unlock(); 938 rtnl_unlock();
939 return ret; 939 return ret;
940 case MRT_DONE: 940 case MRT_DONE:
941 if (sk!=mroute_socket) 941 if (sk != mroute_socket)
942 return -EACCES; 942 return -EACCES;
943 return ip_ra_control(sk, 0, NULL); 943 return ip_ra_control(sk, 0, NULL);
944 case MRT_ADD_VIF: 944 case MRT_ADD_VIF:
945 case MRT_DEL_VIF: 945 case MRT_DEL_VIF:
946 if (optlen!=sizeof(vif)) 946 if (optlen != sizeof(vif))
947 return -EINVAL; 947 return -EINVAL;
948 if (copy_from_user(&vif,optval,sizeof(vif))) 948 if (copy_from_user(&vif, optval, sizeof(vif)))
949 return -EFAULT; 949 return -EFAULT;
950 if (vif.vifc_vifi >= MAXVIFS) 950 if (vif.vifc_vifi >= MAXVIFS)
951 return -ENFILE; 951 return -ENFILE;
952 rtnl_lock(); 952 rtnl_lock();
953 if (optname==MRT_ADD_VIF) { 953 if (optname == MRT_ADD_VIF) {
954 ret = vif_add(&vif, sk==mroute_socket); 954 ret = vif_add(&vif, sk==mroute_socket);
955 } else { 955 } else {
956 ret = vif_delete(vif.vifc_vifi, 0); 956 ret = vif_delete(vif.vifc_vifi, 0);
@@ -964,12 +964,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
964 */ 964 */
965 case MRT_ADD_MFC: 965 case MRT_ADD_MFC:
966 case MRT_DEL_MFC: 966 case MRT_DEL_MFC:
967 if (optlen!=sizeof(mfc)) 967 if (optlen != sizeof(mfc))
968 return -EINVAL; 968 return -EINVAL;
969 if (copy_from_user(&mfc,optval, sizeof(mfc))) 969 if (copy_from_user(&mfc, optval, sizeof(mfc)))
970 return -EFAULT; 970 return -EFAULT;
971 rtnl_lock(); 971 rtnl_lock();
972 if (optname==MRT_DEL_MFC) 972 if (optname == MRT_DEL_MFC)
973 ret = ipmr_mfc_delete(&mfc); 973 ret = ipmr_mfc_delete(&mfc);
974 else 974 else
975 ret = ipmr_mfc_add(&mfc, sk==mroute_socket); 975 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
@@ -1028,12 +1028,12 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
1028 * Getsock opt support for the multicast routing system. 1028 * Getsock opt support for the multicast routing system.
1029 */ 1029 */
1030 1030
1031int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) 1031int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1032{ 1032{
1033 int olr; 1033 int olr;
1034 int val; 1034 int val;
1035 1035
1036 if (optname!=MRT_VERSION && 1036 if (optname != MRT_VERSION &&
1037#ifdef CONFIG_IP_PIMSM 1037#ifdef CONFIG_IP_PIMSM
1038 optname!=MRT_PIM && 1038 optname!=MRT_PIM &&
1039#endif 1039#endif
@@ -1047,17 +1047,17 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
1047 if (olr < 0) 1047 if (olr < 0)
1048 return -EINVAL; 1048 return -EINVAL;
1049 1049
1050 if (put_user(olr,optlen)) 1050 if (put_user(olr, optlen))
1051 return -EFAULT; 1051 return -EFAULT;
1052 if (optname==MRT_VERSION) 1052 if (optname == MRT_VERSION)
1053 val=0x0305; 1053 val = 0x0305;
1054#ifdef CONFIG_IP_PIMSM 1054#ifdef CONFIG_IP_PIMSM
1055 else if (optname==MRT_PIM) 1055 else if (optname == MRT_PIM)
1056 val=mroute_do_pim; 1056 val = mroute_do_pim;
1057#endif 1057#endif
1058 else 1058 else
1059 val=mroute_do_assert; 1059 val = mroute_do_assert;
1060 if (copy_to_user(optval,&val,olr)) 1060 if (copy_to_user(optval, &val, olr))
1061 return -EFAULT; 1061 return -EFAULT;
1062 return 0; 1062 return 0;
1063} 1063}
@@ -1075,27 +1075,27 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1075 1075
1076 switch (cmd) { 1076 switch (cmd) {
1077 case SIOCGETVIFCNT: 1077 case SIOCGETVIFCNT:
1078 if (copy_from_user(&vr,arg,sizeof(vr))) 1078 if (copy_from_user(&vr, arg, sizeof(vr)))
1079 return -EFAULT; 1079 return -EFAULT;
1080 if (vr.vifi>=maxvif) 1080 if (vr.vifi >= maxvif)
1081 return -EINVAL; 1081 return -EINVAL;
1082 read_lock(&mrt_lock); 1082 read_lock(&mrt_lock);
1083 vif=&vif_table[vr.vifi]; 1083 vif=&vif_table[vr.vifi];
1084 if (VIF_EXISTS(vr.vifi)) { 1084 if (VIF_EXISTS(vr.vifi)) {
1085 vr.icount=vif->pkt_in; 1085 vr.icount = vif->pkt_in;
1086 vr.ocount=vif->pkt_out; 1086 vr.ocount = vif->pkt_out;
1087 vr.ibytes=vif->bytes_in; 1087 vr.ibytes = vif->bytes_in;
1088 vr.obytes=vif->bytes_out; 1088 vr.obytes = vif->bytes_out;
1089 read_unlock(&mrt_lock); 1089 read_unlock(&mrt_lock);
1090 1090
1091 if (copy_to_user(arg,&vr,sizeof(vr))) 1091 if (copy_to_user(arg, &vr, sizeof(vr)))
1092 return -EFAULT; 1092 return -EFAULT;
1093 return 0; 1093 return 0;
1094 } 1094 }
1095 read_unlock(&mrt_lock); 1095 read_unlock(&mrt_lock);
1096 return -EADDRNOTAVAIL; 1096 return -EADDRNOTAVAIL;
1097 case SIOCGETSGCNT: 1097 case SIOCGETSGCNT:
1098 if (copy_from_user(&sr,arg,sizeof(sr))) 1098 if (copy_from_user(&sr, arg, sizeof(sr)))
1099 return -EFAULT; 1099 return -EFAULT;
1100 1100
1101 read_lock(&mrt_lock); 1101 read_lock(&mrt_lock);
@@ -1106,7 +1106,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1106 sr.wrong_if = c->mfc_un.res.wrong_if; 1106 sr.wrong_if = c->mfc_un.res.wrong_if;
1107 read_unlock(&mrt_lock); 1107 read_unlock(&mrt_lock);
1108 1108
1109 if (copy_to_user(arg,&sr,sizeof(sr))) 1109 if (copy_to_user(arg, &sr, sizeof(sr)))
1110 return -EFAULT; 1110 return -EFAULT;
1111 return 0; 1111 return 0;
1112 } 1112 }
@@ -1130,15 +1130,15 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1130 if (event != NETDEV_UNREGISTER) 1130 if (event != NETDEV_UNREGISTER)
1131 return NOTIFY_DONE; 1131 return NOTIFY_DONE;
1132 v=&vif_table[0]; 1132 v=&vif_table[0];
1133 for (ct=0;ct<maxvif;ct++,v++) { 1133 for (ct=0; ct<maxvif; ct++,v++) {
1134 if (v->dev==dev) 1134 if (v->dev == dev)
1135 vif_delete(ct, 1); 1135 vif_delete(ct, 1);
1136 } 1136 }
1137 return NOTIFY_DONE; 1137 return NOTIFY_DONE;
1138} 1138}
1139 1139
1140 1140
1141static struct notifier_block ip_mr_notifier={ 1141static struct notifier_block ip_mr_notifier = {
1142 .notifier_call = ipmr_device_event, 1142 .notifier_call = ipmr_device_event,
1143}; 1143};
1144 1144
@@ -1204,7 +1204,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1204#ifdef CONFIG_IP_PIMSM 1204#ifdef CONFIG_IP_PIMSM
1205 if (vif->flags & VIFF_REGISTER) { 1205 if (vif->flags & VIFF_REGISTER) {
1206 vif->pkt_out++; 1206 vif->pkt_out++;
1207 vif->bytes_out+=skb->len; 1207 vif->bytes_out += skb->len;
1208 vif->dev->stats.tx_bytes += skb->len; 1208 vif->dev->stats.tx_bytes += skb->len;
1209 vif->dev->stats.tx_packets++; 1209 vif->dev->stats.tx_packets++;
1210 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1210 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
@@ -1254,7 +1254,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1254 } 1254 }
1255 1255
1256 vif->pkt_out++; 1256 vif->pkt_out++;
1257 vif->bytes_out+=skb->len; 1257 vif->bytes_out += skb->len;
1258 1258
1259 dst_release(skb->dst); 1259 dst_release(skb->dst);
1260 skb->dst = &rt->u.dst; 1260 skb->dst = &rt->u.dst;
@@ -1352,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1352 } 1352 }
1353 1353
1354 vif_table[vif].pkt_in++; 1354 vif_table[vif].pkt_in++;
1355 vif_table[vif].bytes_in+=skb->len; 1355 vif_table[vif].bytes_in += skb->len;
1356 1356
1357 /* 1357 /*
1358 * Forward the frame 1358 * Forward the frame
@@ -1364,7 +1364,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1364 if (skb2) 1364 if (skb2)
1365 ipmr_queue_xmit(skb2, cache, psend); 1365 ipmr_queue_xmit(skb2, cache, psend);
1366 } 1366 }
1367 psend=ct; 1367 psend = ct;
1368 } 1368 }
1369 } 1369 }
1370 if (psend != -1) { 1370 if (psend != -1) {
@@ -1428,7 +1428,7 @@ int ip_mr_input(struct sk_buff *skb)
1428 /* 1428 /*
1429 * No usable cache entry 1429 * No usable cache entry
1430 */ 1430 */
1431 if (cache==NULL) { 1431 if (cache == NULL) {
1432 int vif; 1432 int vif;
1433 1433
1434 if (local) { 1434 if (local) {
@@ -1602,13 +1602,13 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1602 if (dev) 1602 if (dev)
1603 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1603 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1604 1604
1605 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0)); 1605 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1606 1606
1607 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1607 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1608 if (c->mfc_un.res.ttls[ct] < 255) { 1608 if (c->mfc_un.res.ttls[ct] < 255) {
1609 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1609 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1610 goto rtattr_failure; 1610 goto rtattr_failure;
1611 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1611 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1612 nhp->rtnh_flags = 0; 1612 nhp->rtnh_flags = 0;
1613 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1613 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1614 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; 1614 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
@@ -1634,7 +1634,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1634 read_lock(&mrt_lock); 1634 read_lock(&mrt_lock);
1635 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1635 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1636 1636
1637 if (cache==NULL) { 1637 if (cache == NULL) {
1638 struct sk_buff *skb2; 1638 struct sk_buff *skb2;
1639 struct iphdr *iph; 1639 struct iphdr *iph;
1640 struct net_device *dev; 1640 struct net_device *dev;