aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-01 12:15:29 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-04 00:50:53 -0400
commita8cb16dd9cb571c45bb479a1e4721ce11220a216 (patch)
tree103313a0d315877c734834140652f6bdb5a2602e /net
parenta8c9486b816f74d4645144db9e8fa2f711c1fc4b (diff)
ipmr: cleanups
Various code style cleanups Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/ipmr.c238
1 files changed, 124 insertions, 114 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index cbb6dabe024f..86dd5691af46 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -98,7 +98,7 @@ struct ipmr_result {
98}; 98};
99 99
100/* Big lock, protecting vif table, mrt cache and mroute socket state. 100/* Big lock, protecting vif table, mrt cache and mroute socket state.
101 Note that the changes are semaphored via rtnl_lock. 101 * Note that the changes are semaphored via rtnl_lock.
102 */ 102 */
103 103
104static DEFINE_RWLOCK(mrt_lock); 104static DEFINE_RWLOCK(mrt_lock);
@@ -113,11 +113,11 @@ static DEFINE_RWLOCK(mrt_lock);
113static DEFINE_SPINLOCK(mfc_unres_lock); 113static DEFINE_SPINLOCK(mfc_unres_lock);
114 114
115/* We return to original Alan's scheme. Hash table of resolved 115/* We return to original Alan's scheme. Hash table of resolved
116 entries is changed only in process context and protected 116 * entries is changed only in process context and protected
117 with weak lock mrt_lock. Queue of unresolved entries is protected 117 * with weak lock mrt_lock. Queue of unresolved entries is protected
118 with strong spinlock mfc_unres_lock. 118 * with strong spinlock mfc_unres_lock.
119 119 *
120 In this case data path is free of exclusive locks at all. 120 * In this case data path is free of exclusive locks at all.
121 */ 121 */
122 122
123static struct kmem_cache *mrt_cachep __read_mostly; 123static struct kmem_cache *mrt_cachep __read_mostly;
@@ -396,9 +396,9 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
396 set_fs(KERNEL_DS); 396 set_fs(KERNEL_DS);
397 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); 397 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
398 set_fs(oldfs); 398 set_fs(oldfs);
399 } else 399 } else {
400 err = -EOPNOTSUPP; 400 err = -EOPNOTSUPP;
401 401 }
402 dev = NULL; 402 dev = NULL;
403 403
404 if (err == 0 && 404 if (err == 0 &&
@@ -495,7 +495,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
495 dev->iflink = 0; 495 dev->iflink = 0;
496 496
497 rcu_read_lock(); 497 rcu_read_lock();
498 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { 498 in_dev = __in_dev_get_rcu(dev);
499 if (!in_dev) {
499 rcu_read_unlock(); 500 rcu_read_unlock();
500 goto failure; 501 goto failure;
501 } 502 }
@@ -552,9 +553,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
552 mrt->mroute_reg_vif_num = -1; 553 mrt->mroute_reg_vif_num = -1;
553#endif 554#endif
554 555
555 if (vifi+1 == mrt->maxvif) { 556 if (vifi + 1 == mrt->maxvif) {
556 int tmp; 557 int tmp;
557 for (tmp=vifi-1; tmp>=0; tmp--) { 558
559 for (tmp = vifi - 1; tmp >= 0; tmp--) {
558 if (VIF_EXISTS(mrt, tmp)) 560 if (VIF_EXISTS(mrt, tmp))
559 break; 561 break;
560 } 562 }
@@ -565,12 +567,13 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
565 567
566 dev_set_allmulti(dev, -1); 568 dev_set_allmulti(dev, -1);
567 569
568 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { 570 in_dev = __in_dev_get_rtnl(dev);
571 if (in_dev) {
569 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 572 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
570 ip_rt_multicast_event(in_dev); 573 ip_rt_multicast_event(in_dev);
571 } 574 }
572 575
573 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) 576 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
574 unregister_netdevice_queue(dev, head); 577 unregister_netdevice_queue(dev, head);
575 578
576 dev_put(dev); 579 dev_put(dev);
@@ -590,7 +593,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
590} 593}
591 594
592/* Destroy an unresolved cache entry, killing queued skbs 595/* Destroy an unresolved cache entry, killing queued skbs
593 and reporting error to netlink readers. 596 * and reporting error to netlink readers.
594 */ 597 */
595 598
596static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 599static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
@@ -612,8 +615,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
612 memset(&e->msg, 0, sizeof(e->msg)); 615 memset(&e->msg, 0, sizeof(e->msg));
613 616
614 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 617 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
615 } else 618 } else {
616 kfree_skb(skb); 619 kfree_skb(skb);
620 }
617 } 621 }
618 622
619 ipmr_cache_free(c); 623 ipmr_cache_free(c);
@@ -735,9 +739,9 @@ static int vif_add(struct net *net, struct mr_table *mrt,
735 dev_put(dev); 739 dev_put(dev);
736 return -EADDRNOTAVAIL; 740 return -EADDRNOTAVAIL;
737 } 741 }
738 } else 742 } else {
739 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); 743 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
740 744 }
741 if (!dev) 745 if (!dev)
742 return -EADDRNOTAVAIL; 746 return -EADDRNOTAVAIL;
743 err = dev_set_allmulti(dev, 1); 747 err = dev_set_allmulti(dev, 1);
@@ -750,16 +754,16 @@ static int vif_add(struct net *net, struct mr_table *mrt,
750 return -EINVAL; 754 return -EINVAL;
751 } 755 }
752 756
753 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { 757 in_dev = __in_dev_get_rtnl(dev);
758 if (!in_dev) {
754 dev_put(dev); 759 dev_put(dev);
755 return -EADDRNOTAVAIL; 760 return -EADDRNOTAVAIL;
756 } 761 }
757 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 762 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
758 ip_rt_multicast_event(in_dev); 763 ip_rt_multicast_event(in_dev);
759 764
760 /* 765 /* Fill in the VIF structures */
761 * Fill in the VIF structures 766
762 */
763 v->rate_limit = vifc->vifc_rate_limit; 767 v->rate_limit = vifc->vifc_rate_limit;
764 v->local = vifc->vifc_lcl_addr.s_addr; 768 v->local = vifc->vifc_lcl_addr.s_addr;
765 v->remote = vifc->vifc_rmt_addr.s_addr; 769 v->remote = vifc->vifc_rmt_addr.s_addr;
@@ -772,14 +776,14 @@ static int vif_add(struct net *net, struct mr_table *mrt,
772 v->pkt_in = 0; 776 v->pkt_in = 0;
773 v->pkt_out = 0; 777 v->pkt_out = 0;
774 v->link = dev->ifindex; 778 v->link = dev->ifindex;
775 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER)) 779 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
776 v->link = dev->iflink; 780 v->link = dev->iflink;
777 781
778 /* And finish update writing critical data */ 782 /* And finish update writing critical data */
779 write_lock_bh(&mrt_lock); 783 write_lock_bh(&mrt_lock);
780 v->dev = dev; 784 v->dev = dev;
781#ifdef CONFIG_IP_PIMSM 785#ifdef CONFIG_IP_PIMSM
782 if (v->flags&VIFF_REGISTER) 786 if (v->flags & VIFF_REGISTER)
783 mrt->mroute_reg_vif_num = vifi; 787 mrt->mroute_reg_vif_num = vifi;
784#endif 788#endif
785 if (vifi+1 > mrt->maxvif) 789 if (vifi+1 > mrt->maxvif)
@@ -836,17 +840,15 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
836 struct sk_buff *skb; 840 struct sk_buff *skb;
837 struct nlmsgerr *e; 841 struct nlmsgerr *e;
838 842
839 /* 843 /* Play the pending entries through our router */
840 * Play the pending entries through our router
841 */
842 844
843 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { 845 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
844 if (ip_hdr(skb)->version == 0) { 846 if (ip_hdr(skb)->version == 0) {
845 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 847 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
846 848
847 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 849 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
848 nlh->nlmsg_len = (skb_tail_pointer(skb) - 850 nlh->nlmsg_len = skb_tail_pointer(skb) -
849 (u8 *)nlh); 851 (u8 *)nlh;
850 } else { 852 } else {
851 nlh->nlmsg_type = NLMSG_ERROR; 853 nlh->nlmsg_type = NLMSG_ERROR;
852 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 854 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -857,8 +859,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
857 } 859 }
858 860
859 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 861 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
860 } else 862 } else {
861 ip_mr_forward(net, mrt, skb, c, 0); 863 ip_mr_forward(net, mrt, skb, c, 0);
864 }
862 } 865 }
863} 866}
864 867
@@ -892,9 +895,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
892#ifdef CONFIG_IP_PIMSM 895#ifdef CONFIG_IP_PIMSM
893 if (assert == IGMPMSG_WHOLEPKT) { 896 if (assert == IGMPMSG_WHOLEPKT) {
894 /* Ugly, but we have no choice with this interface. 897 /* Ugly, but we have no choice with this interface.
895 Duplicate old header, fix ihl, length etc. 898 * Duplicate old header, fix ihl, length etc.
896 And all this only to mangle msg->im_msgtype and 899 * And all this only to mangle msg->im_msgtype and
897 to set msg->im_mbz to "mbz" :-) 900 * to set msg->im_mbz to "mbz" :-)
898 */ 901 */
899 skb_push(skb, sizeof(struct iphdr)); 902 skb_push(skb, sizeof(struct iphdr));
900 skb_reset_network_header(skb); 903 skb_reset_network_header(skb);
@@ -911,27 +914,23 @@ static int ipmr_cache_report(struct mr_table *mrt,
911#endif 914#endif
912 { 915 {
913 916
914 /* 917 /* Copy the IP header */
915 * Copy the IP header
916 */
917 918
918 skb->network_header = skb->tail; 919 skb->network_header = skb->tail;
919 skb_put(skb, ihl); 920 skb_put(skb, ihl);
920 skb_copy_to_linear_data(skb, pkt->data, ihl); 921 skb_copy_to_linear_data(skb, pkt->data, ihl);
921 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 922 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
922 msg = (struct igmpmsg *)skb_network_header(skb); 923 msg = (struct igmpmsg *)skb_network_header(skb);
923 msg->im_vif = vifi; 924 msg->im_vif = vifi;
924 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 925 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
925 926
926 /* 927 /* Add our header */
927 * Add our header
928 */
929 928
930 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 929 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
931 igmp->type = 930 igmp->type =
932 msg->im_msgtype = assert; 931 msg->im_msgtype = assert;
933 igmp->code = 0; 932 igmp->code = 0;
934 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ 933 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
935 skb->transport_header = skb->network_header; 934 skb->transport_header = skb->network_header;
936 } 935 }
937 936
@@ -943,9 +942,8 @@ static int ipmr_cache_report(struct mr_table *mrt,
943 return -EINVAL; 942 return -EINVAL;
944 } 943 }
945 944
946 /* 945 /* Deliver to mrouted */
947 * Deliver to mrouted 946
948 */
949 ret = sock_queue_rcv_skb(mroute_sk, skb); 947 ret = sock_queue_rcv_skb(mroute_sk, skb);
950 rcu_read_unlock(); 948 rcu_read_unlock();
951 if (ret < 0) { 949 if (ret < 0) {
@@ -979,9 +977,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
979 } 977 }
980 978
981 if (!found) { 979 if (!found) {
982 /* 980 /* Create a new entry if allowable */
983 * Create a new entry if allowable
984 */
985 981
986 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || 982 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
987 (c = ipmr_cache_alloc_unres()) == NULL) { 983 (c = ipmr_cache_alloc_unres()) == NULL) {
@@ -991,16 +987,14 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
991 return -ENOBUFS; 987 return -ENOBUFS;
992 } 988 }
993 989
994 /* 990 /* Fill in the new cache entry */
995 * Fill in the new cache entry 991
996 */
997 c->mfc_parent = -1; 992 c->mfc_parent = -1;
998 c->mfc_origin = iph->saddr; 993 c->mfc_origin = iph->saddr;
999 c->mfc_mcastgrp = iph->daddr; 994 c->mfc_mcastgrp = iph->daddr;
1000 995
1001 /* 996 /* Reflect first query at mrouted. */
1002 * Reflect first query at mrouted. 997
1003 */
1004 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 998 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1005 if (err < 0) { 999 if (err < 0) {
1006 /* If the report failed throw the cache entry 1000 /* If the report failed throw the cache entry
@@ -1020,10 +1014,9 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1020 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1014 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1021 } 1015 }
1022 1016
1023 /* 1017 /* See if we can append the packet */
1024 * See if we can append the packet 1018
1025 */ 1019 if (c->mfc_un.unres.unresolved.qlen > 3) {
1026 if (c->mfc_un.unres.unresolved.qlen>3) {
1027 kfree_skb(skb); 1020 kfree_skb(skb);
1028 err = -ENOBUFS; 1021 err = -ENOBUFS;
1029 } else { 1022 } else {
@@ -1140,18 +1133,16 @@ static void mroute_clean_tables(struct mr_table *mrt)
1140 LIST_HEAD(list); 1133 LIST_HEAD(list);
1141 struct mfc_cache *c, *next; 1134 struct mfc_cache *c, *next;
1142 1135
1143 /* 1136 /* Shut down all active vif entries */
1144 * Shut down all active vif entries 1137
1145 */
1146 for (i = 0; i < mrt->maxvif; i++) { 1138 for (i = 0; i < mrt->maxvif; i++) {
1147 if (!(mrt->vif_table[i].flags&VIFF_STATIC)) 1139 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1148 vif_delete(mrt, i, 0, &list); 1140 vif_delete(mrt, i, 0, &list);
1149 } 1141 }
1150 unregister_netdevice_many(&list); 1142 unregister_netdevice_many(&list);
1151 1143
1152 /* 1144 /* Wipe the cache */
1153 * Wipe the cache 1145
1154 */
1155 for (i = 0; i < MFC_LINES; i++) { 1146 for (i = 0; i < MFC_LINES; i++) {
1156 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1147 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1157 if (c->mfc_flags & MFC_STATIC) 1148 if (c->mfc_flags & MFC_STATIC)
@@ -1282,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1282 case MRT_ASSERT: 1273 case MRT_ASSERT:
1283 { 1274 {
1284 int v; 1275 int v;
1285 if (get_user(v,(int __user *)optval)) 1276 if (get_user(v, (int __user *)optval))
1286 return -EFAULT; 1277 return -EFAULT;
1287 mrt->mroute_do_assert = (v) ? 1 : 0; 1278 mrt->mroute_do_assert = (v) ? 1 : 0;
1288 return 0; 1279 return 0;
@@ -1292,7 +1283,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1292 { 1283 {
1293 int v; 1284 int v;
1294 1285
1295 if (get_user(v,(int __user *)optval)) 1286 if (get_user(v, (int __user *)optval))
1296 return -EFAULT; 1287 return -EFAULT;
1297 v = (v) ? 1 : 0; 1288 v = (v) ? 1 : 0;
1298 1289
@@ -1355,9 +1346,9 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1355 1346
1356 if (optname != MRT_VERSION && 1347 if (optname != MRT_VERSION &&
1357#ifdef CONFIG_IP_PIMSM 1348#ifdef CONFIG_IP_PIMSM
1358 optname!=MRT_PIM && 1349 optname != MRT_PIM &&
1359#endif 1350#endif
1360 optname!=MRT_ASSERT) 1351 optname != MRT_ASSERT)
1361 return -ENOPROTOOPT; 1352 return -ENOPROTOOPT;
1362 1353
1363 if (get_user(olr, optlen)) 1354 if (get_user(olr, optlen))
@@ -1473,7 +1464,7 @@ static struct notifier_block ip_mr_notifier = {
1473}; 1464};
1474 1465
1475/* 1466/*
1476 * Encapsulate a packet by attaching a valid IPIP header to it. 1467 * Encapsulate a packet by attaching a valid IPIP header to it.
1477 * This avoids tunnel drivers and other mess and gives us the speed so 1468 * This avoids tunnel drivers and other mess and gives us the speed so
1478 * important for multicast video. 1469 * important for multicast video.
1479 */ 1470 */
@@ -1488,7 +1479,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1488 skb_reset_network_header(skb); 1479 skb_reset_network_header(skb);
1489 iph = ip_hdr(skb); 1480 iph = ip_hdr(skb);
1490 1481
1491 iph->version = 4; 1482 iph->version = 4;
1492 iph->tos = old_iph->tos; 1483 iph->tos = old_iph->tos;
1493 iph->ttl = old_iph->ttl; 1484 iph->ttl = old_iph->ttl;
1494 iph->frag_off = 0; 1485 iph->frag_off = 0;
@@ -1506,7 +1497,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1506 1497
1507static inline int ipmr_forward_finish(struct sk_buff *skb) 1498static inline int ipmr_forward_finish(struct sk_buff *skb)
1508{ 1499{
1509 struct ip_options * opt = &(IPCB(skb)->opt); 1500 struct ip_options *opt = &(IPCB(skb)->opt);
1510 1501
1511 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1502 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1512 1503
@@ -1543,22 +1534,34 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1543 } 1534 }
1544#endif 1535#endif
1545 1536
1546 if (vif->flags&VIFF_TUNNEL) { 1537 if (vif->flags & VIFF_TUNNEL) {
1547 struct flowi fl = { .oif = vif->link, 1538 struct flowi fl = {
1548 .nl_u = { .ip4_u = 1539 .oif = vif->link,
1549 { .daddr = vif->remote, 1540 .nl_u = {
1550 .saddr = vif->local, 1541 .ip4_u = {
1551 .tos = RT_TOS(iph->tos) } }, 1542 .daddr = vif->remote,
1552 .proto = IPPROTO_IPIP }; 1543 .saddr = vif->local,
1544 .tos = RT_TOS(iph->tos)
1545 }
1546 },
1547 .proto = IPPROTO_IPIP
1548 };
1549
1553 if (ip_route_output_key(net, &rt, &fl)) 1550 if (ip_route_output_key(net, &rt, &fl))
1554 goto out_free; 1551 goto out_free;
1555 encap = sizeof(struct iphdr); 1552 encap = sizeof(struct iphdr);
1556 } else { 1553 } else {
1557 struct flowi fl = { .oif = vif->link, 1554 struct flowi fl = {
1558 .nl_u = { .ip4_u = 1555 .oif = vif->link,
1559 { .daddr = iph->daddr, 1556 .nl_u = {
1560 .tos = RT_TOS(iph->tos) } }, 1557 .ip4_u = {
1561 .proto = IPPROTO_IPIP }; 1558 .daddr = iph->daddr,
1559 .tos = RT_TOS(iph->tos)
1560 }
1561 },
1562 .proto = IPPROTO_IPIP
1563 };
1564
1562 if (ip_route_output_key(net, &rt, &fl)) 1565 if (ip_route_output_key(net, &rt, &fl))
1563 goto out_free; 1566 goto out_free;
1564 } 1567 }
@@ -1567,8 +1570,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1567 1570
1568 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { 1571 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1569 /* Do not fragment multicasts. Alas, IPv4 does not 1572 /* Do not fragment multicasts. Alas, IPv4 does not
1570 allow to send ICMP, so that packets will disappear 1573 * allow to send ICMP, so that packets will disappear
1571 to blackhole. 1574 * to blackhole.
1572 */ 1575 */
1573 1576
1574 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 1577 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -1591,7 +1594,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1591 ip_decrease_ttl(ip_hdr(skb)); 1594 ip_decrease_ttl(ip_hdr(skb));
1592 1595
1593 /* FIXME: forward and output firewalls used to be called here. 1596 /* FIXME: forward and output firewalls used to be called here.
1594 * What do we do with netfilter? -- RR */ 1597 * What do we do with netfilter? -- RR
1598 */
1595 if (vif->flags & VIFF_TUNNEL) { 1599 if (vif->flags & VIFF_TUNNEL) {
1596 ip_encap(skb, vif->local, vif->remote); 1600 ip_encap(skb, vif->local, vif->remote);
1597 /* FIXME: extra output firewall step used to be here. --RR */ 1601 /* FIXME: extra output firewall step used to be here. --RR */
@@ -1652,15 +1656,15 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1652 1656
1653 if (skb_rtable(skb)->fl.iif == 0) { 1657 if (skb_rtable(skb)->fl.iif == 0) {
1654 /* It is our own packet, looped back. 1658 /* It is our own packet, looped back.
1655 Very complicated situation... 1659 * Very complicated situation...
1656 1660 *
1657 The best workaround until routing daemons will be 1661 * The best workaround until routing daemons will be
1658 fixed is not to redistribute packet, if it was 1662 * fixed is not to redistribute packet, if it was
1659 send through wrong interface. It means, that 1663 * send through wrong interface. It means, that
1660 multicast applications WILL NOT work for 1664 * multicast applications WILL NOT work for
1661 (S,G), which have default multicast route pointing 1665 * (S,G), which have default multicast route pointing
1662 to wrong oif. In any case, it is not a good 1666 * to wrong oif. In any case, it is not a good
1663 idea to use multicasting applications on router. 1667 * idea to use multicasting applications on router.
1664 */ 1668 */
1665 goto dont_forward; 1669 goto dont_forward;
1666 } 1670 }
@@ -1670,9 +1674,9 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1670 1674
1671 if (true_vifi >= 0 && mrt->mroute_do_assert && 1675 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1672 /* pimsm uses asserts, when switching from RPT to SPT, 1676 /* pimsm uses asserts, when switching from RPT to SPT,
1673 so that we cannot check that packet arrived on an oif. 1677 * so that we cannot check that packet arrived on an oif.
1674 It is bad, but otherwise we would need to move pretty 1678 * It is bad, but otherwise we would need to move pretty
1675 large chunk of pimd to kernel. Ough... --ANK 1679 * large chunk of pimd to kernel. Ough... --ANK
1676 */ 1680 */
1677 (mrt->mroute_do_pim || 1681 (mrt->mroute_do_pim ||
1678 cache->mfc_un.res.ttls[true_vifi] < 255) && 1682 cache->mfc_un.res.ttls[true_vifi] < 255) &&
@@ -1690,10 +1694,12 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1690 /* 1694 /*
1691 * Forward the frame 1695 * Forward the frame
1692 */ 1696 */
1693 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) { 1697 for (ct = cache->mfc_un.res.maxvif - 1;
1698 ct >= cache->mfc_un.res.minvif; ct--) {
1694 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { 1699 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1695 if (psend != -1) { 1700 if (psend != -1) {
1696 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1701 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1702
1697 if (skb2) 1703 if (skb2)
1698 ipmr_queue_xmit(net, mrt, skb2, cache, 1704 ipmr_queue_xmit(net, mrt, skb2, cache,
1699 psend); 1705 psend);
@@ -1704,6 +1710,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1704 if (psend != -1) { 1710 if (psend != -1) {
1705 if (local) { 1711 if (local) {
1706 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1712 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1713
1707 if (skb2) 1714 if (skb2)
1708 ipmr_queue_xmit(net, mrt, skb2, cache, psend); 1715 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1709 } else { 1716 } else {
@@ -1733,7 +1740,7 @@ int ip_mr_input(struct sk_buff *skb)
1733 int err; 1740 int err;
1734 1741
1735 /* Packet is looped back after forward, it should not be 1742 /* Packet is looped back after forward, it should not be
1736 forwarded second time, but still can be delivered locally. 1743 * forwarded second time, but still can be delivered locally.
1737 */ 1744 */
1738 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1745 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1739 goto dont_forward; 1746 goto dont_forward;
@@ -1822,10 +1829,10 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1822 1829
1823 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1830 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1824 /* 1831 /*
1825 Check that: 1832 * Check that:
1826 a. packet is really destinted to a multicast group 1833 * a. packet is really sent to a multicast group
1827 b. packet is not a NULL-REGISTER 1834 * b. packet is not a NULL-REGISTER
1828 c. packet is not truncated 1835 * c. packet is not truncated
1829 */ 1836 */
1830 if (!ipv4_is_multicast(encap->daddr) || 1837 if (!ipv4_is_multicast(encap->daddr) ||
1831 encap->tot_len == 0 || 1838 encap->tot_len == 0 ||
@@ -1860,7 +1867,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1860 * Handle IGMP messages of PIMv1 1867 * Handle IGMP messages of PIMv1
1861 */ 1868 */
1862 1869
1863int pim_rcv_v1(struct sk_buff * skb) 1870int pim_rcv_v1(struct sk_buff *skb)
1864{ 1871{
1865 struct igmphdr *pim; 1872 struct igmphdr *pim;
1866 struct net *net = dev_net(skb->dev); 1873 struct net *net = dev_net(skb->dev);
@@ -1887,7 +1894,7 @@ drop:
1887#endif 1894#endif
1888 1895
1889#ifdef CONFIG_IP_PIMSM_V2 1896#ifdef CONFIG_IP_PIMSM_V2
1890static int pim_rcv(struct sk_buff * skb) 1897static int pim_rcv(struct sk_buff *skb)
1891{ 1898{
1892 struct pimreghdr *pim; 1899 struct pimreghdr *pim;
1893 struct net *net = dev_net(skb->dev); 1900 struct net *net = dev_net(skb->dev);
@@ -1897,8 +1904,8 @@ static int pim_rcv(struct sk_buff * skb)
1897 goto drop; 1904 goto drop;
1898 1905
1899 pim = (struct pimreghdr *)skb_transport_header(skb); 1906 pim = (struct pimreghdr *)skb_transport_header(skb);
1900 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || 1907 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
1901 (pim->flags&PIM_NULL_REGISTER) || 1908 (pim->flags & PIM_NULL_REGISTER) ||
1902 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 1909 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1903 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1910 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1904 goto drop; 1911 goto drop;
@@ -1971,7 +1978,7 @@ int ipmr_get_route(struct net *net,
1971 struct sk_buff *skb2; 1978 struct sk_buff *skb2;
1972 struct iphdr *iph; 1979 struct iphdr *iph;
1973 struct net_device *dev; 1980 struct net_device *dev;
1974 int vif; 1981 int vif = -1;
1975 1982
1976 if (nowait) { 1983 if (nowait) {
1977 rcu_read_unlock(); 1984 rcu_read_unlock();
@@ -1980,7 +1987,9 @@ int ipmr_get_route(struct net *net,
1980 1987
1981 dev = skb->dev; 1988 dev = skb->dev;
1982 read_lock(&mrt_lock); 1989 read_lock(&mrt_lock);
1983 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { 1990 if (dev)
1991 vif = ipmr_find_vif(mrt, dev);
1992 if (vif < 0) {
1984 read_unlock(&mrt_lock); 1993 read_unlock(&mrt_lock);
1985 rcu_read_unlock(); 1994 rcu_read_unlock();
1986 return -ENODEV; 1995 return -ENODEV;
@@ -2098,7 +2107,8 @@ done:
2098 2107
2099#ifdef CONFIG_PROC_FS 2108#ifdef CONFIG_PROC_FS
2100/* 2109/*
2101 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 2110 * The /proc interfaces to multicast routing :
2111 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2102 */ 2112 */
2103struct ipmr_vif_iter { 2113struct ipmr_vif_iter {
2104 struct seq_net_private p; 2114 struct seq_net_private p;
@@ -2294,7 +2304,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2294 if (!list_empty(it->cache)) 2304 if (!list_empty(it->cache))
2295 return list_first_entry(it->cache, struct mfc_cache, list); 2305 return list_first_entry(it->cache, struct mfc_cache, list);
2296 2306
2297 end_of_list: 2307end_of_list:
2298 spin_unlock_bh(&mfc_unres_lock); 2308 spin_unlock_bh(&mfc_unres_lock);
2299 it->cache = NULL; 2309 it->cache = NULL;
2300 2310
@@ -2335,7 +2345,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2335 mfc->mfc_un.res.bytes, 2345 mfc->mfc_un.res.bytes,
2336 mfc->mfc_un.res.wrong_if); 2346 mfc->mfc_un.res.wrong_if);
2337 for (n = mfc->mfc_un.res.minvif; 2347 for (n = mfc->mfc_un.res.minvif;
2338 n < mfc->mfc_un.res.maxvif; n++ ) { 2348 n < mfc->mfc_un.res.maxvif; n++) {
2339 if (VIF_EXISTS(mrt, n) && 2349 if (VIF_EXISTS(mrt, n) &&
2340 mfc->mfc_un.res.ttls[n] < 255) 2350 mfc->mfc_un.res.ttls[n] < 255)
2341 seq_printf(seq, 2351 seq_printf(seq,