aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-04-26 10:02:08 -0400
committerPatrick McHardy <kaber@trash.net>2010-04-26 10:22:50 -0400
commitcb6a4e461fb427689920472bd7335f926d521747 (patch)
tree2c3fb3312590aabcc8cfd5f7ded10c197bf83661 /net/ipv4/ipmr.c
parent25239cee7e8732dbdc9f5d324f1c22a3bdec1d1f (diff)
net: ipmr: add support for dumping routing tables over netlink
The ipmr /proc interface (ip_mr_cache) can't be extended to dump routes from any tables but the main table in a backwards compatible fashion since the output format ends in a variable amount of output interfaces. Introduce a new netlink interface to dump multicast routes from all tables, similar to the netlink interface for regular routes. Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c96
1 files changed, 89 insertions, 7 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 41e8fc0ce8b3..eddfd12f55b8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -128,8 +128,8 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 int local); 128 int local);
129static int ipmr_cache_report(struct mr_table *mrt, 129static int ipmr_cache_report(struct mr_table *mrt,
130 struct sk_buff *pkt, vifi_t vifi, int assert); 130 struct sk_buff *pkt, vifi_t vifi, int assert);
131static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 131static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm); 132 struct mfc_cache *c, struct rtmsg *rtm);
133static void ipmr_expire_process(unsigned long arg); 133static void ipmr_expire_process(unsigned long arg);
134 134
135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -831,7 +831,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
831 if (ip_hdr(skb)->version == 0) { 831 if (ip_hdr(skb)->version == 0) {
832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
833 833
834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 834 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
835 nlh->nlmsg_len = (skb_tail_pointer(skb) - 835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
836 (u8 *)nlh); 836 (u8 *)nlh);
837 } else { 837 } else {
@@ -1904,9 +1904,8 @@ drop:
1904} 1904}
1905#endif 1905#endif
1906 1906
1907static int 1907static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1908ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, 1908 struct mfc_cache *c, struct rtmsg *rtm)
1909 struct rtmsg *rtm)
1910{ 1909{
1911 int ct; 1910 int ct;
1912 struct rtnexthop *nhp; 1911 struct rtnexthop *nhp;
@@ -1994,11 +1993,93 @@ int ipmr_get_route(struct net *net,
1994 1993
1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1994 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1996 cache->mfc_flags |= MFC_NOTIFY; 1995 cache->mfc_flags |= MFC_NOTIFY;
1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm); 1996 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1998 read_unlock(&mrt_lock); 1997 read_unlock(&mrt_lock);
1999 return err; 1998 return err;
2000} 1999}
2001 2000
2001static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2002 u32 pid, u32 seq, struct mfc_cache *c)
2003{
2004 struct nlmsghdr *nlh;
2005 struct rtmsg *rtm;
2006
2007 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2008 if (nlh == NULL)
2009 return -EMSGSIZE;
2010
2011 rtm = nlmsg_data(nlh);
2012 rtm->rtm_family = RTNL_FAMILY_IPMR;
2013 rtm->rtm_dst_len = 32;
2014 rtm->rtm_src_len = 32;
2015 rtm->rtm_tos = 0;
2016 rtm->rtm_table = mrt->id;
2017 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2018 rtm->rtm_type = RTN_MULTICAST;
2019 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2020 rtm->rtm_protocol = RTPROT_UNSPEC;
2021 rtm->rtm_flags = 0;
2022
2023 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
2024 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
2025
2026 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2027 goto nla_put_failure;
2028
2029 return nlmsg_end(skb, nlh);
2030
2031nla_put_failure:
2032 nlmsg_cancel(skb, nlh);
2033 return -EMSGSIZE;
2034}
2035
2036static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2037{
2038 struct net *net = sock_net(skb->sk);
2039 struct mr_table *mrt;
2040 struct mfc_cache *mfc;
2041 unsigned int t = 0, s_t;
2042 unsigned int h = 0, s_h;
2043 unsigned int e = 0, s_e;
2044
2045 s_t = cb->args[0];
2046 s_h = cb->args[1];
2047 s_e = cb->args[2];
2048
2049 read_lock(&mrt_lock);
2050 ipmr_for_each_table(mrt, net) {
2051 if (t < s_t)
2052 goto next_table;
2053 if (t > s_t)
2054 s_h = 0;
2055 for (h = s_h; h < MFC_LINES; h++) {
2056 list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
2057 if (e < s_e)
2058 goto next_entry;
2059 if (ipmr_fill_mroute(mrt, skb,
2060 NETLINK_CB(cb->skb).pid,
2061 cb->nlh->nlmsg_seq,
2062 mfc) < 0)
2063 goto done;
2064next_entry:
2065 e++;
2066 }
2067 e = s_e = 0;
2068 }
2069 s_h = 0;
2070next_table:
2071 t++;
2072 }
2073done:
2074 read_unlock(&mrt_lock);
2075
2076 cb->args[2] = e;
2077 cb->args[1] = h;
2078 cb->args[0] = t;
2079
2080 return skb->len;
2081}
2082
2002#ifdef CONFIG_PROC_FS 2083#ifdef CONFIG_PROC_FS
2003/* 2084/*
2004 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 2085 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
@@ -2355,6 +2436,7 @@ int __init ip_mr_init(void)
2355 goto add_proto_fail; 2436 goto add_proto_fail;
2356 } 2437 }
2357#endif 2438#endif
2439 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
2358 return 0; 2440 return 0;
2359 2441
2360#ifdef CONFIG_IP_PIMSM_V2 2442#ifdef CONFIG_IP_PIMSM_V2