aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Dichtel <nicolas.dichtel@6wind.com>2012-12-03 20:13:40 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-04 13:08:11 -0500
commit8cd3ac9f9b7bd921d0a28fd3273160ee8891e698 (patch)
tree4364f03b86770df2b3d2323a3667c188116017c4
parent1eb99af52c4bc705f4042f37f255975acfc738f2 (diff)
ipmr: advertise new mfc entries via rtnl
This patch allows to monitor mfc activities via rtnetlink. To avoid parsing two times the mfc oifs, we use maxvif to allocate the rtnl msg, thus we may allocate some superfluous space. Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/ipmr.c64
1 files changed, 59 insertions, 5 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 084dac3bc151..a9454cbd953c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -134,6 +134,8 @@ static int ipmr_cache_report(struct mr_table *mrt,
134 struct sk_buff *pkt, vifi_t vifi, int assert); 134 struct sk_buff *pkt, vifi_t vifi, int assert);
135static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 135static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
136 struct mfc_cache *c, struct rtmsg *rtm); 136 struct mfc_cache *c, struct rtmsg *rtm);
137static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
138 int cmd);
137static void mroute_clean_tables(struct mr_table *mrt); 139static void mroute_clean_tables(struct mr_table *mrt);
138static void ipmr_expire_process(unsigned long arg); 140static void ipmr_expire_process(unsigned long arg);
139 141
@@ -669,6 +671,7 @@ static void ipmr_expire_process(unsigned long arg)
669 } 671 }
670 672
671 list_del(&c->list); 673 list_del(&c->list);
674 mroute_netlink_event(mrt, c, RTM_DELROUTE);
672 ipmr_destroy_unres(mrt, c); 675 ipmr_destroy_unres(mrt, c);
673 } 676 }
674 677
@@ -1026,6 +1029,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1026 1029
1027 atomic_inc(&mrt->cache_resolve_queue_len); 1030 atomic_inc(&mrt->cache_resolve_queue_len);
1028 list_add(&c->list, &mrt->mfc_unres_queue); 1031 list_add(&c->list, &mrt->mfc_unres_queue);
1032 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1029 1033
1030 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1034 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1031 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1035 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
@@ -1060,7 +1064,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1060 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1064 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1061 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1065 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1062 list_del_rcu(&c->list); 1066 list_del_rcu(&c->list);
1063 1067 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1064 ipmr_cache_free(c); 1068 ipmr_cache_free(c);
1065 return 0; 1069 return 0;
1066 } 1070 }
@@ -1095,6 +1099,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1095 if (!mrtsock) 1099 if (!mrtsock)
1096 c->mfc_flags |= MFC_STATIC; 1100 c->mfc_flags |= MFC_STATIC;
1097 write_unlock_bh(&mrt_lock); 1101 write_unlock_bh(&mrt_lock);
1102 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1098 return 0; 1103 return 0;
1099 } 1104 }
1100 1105
@@ -1137,6 +1142,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1137 ipmr_cache_resolve(net, mrt, uc, c); 1142 ipmr_cache_resolve(net, mrt, uc, c);
1138 ipmr_cache_free(uc); 1143 ipmr_cache_free(uc);
1139 } 1144 }
1145 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1140 return 0; 1146 return 0;
1141} 1147}
1142 1148
@@ -1165,6 +1171,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1165 if (c->mfc_flags & MFC_STATIC) 1171 if (c->mfc_flags & MFC_STATIC)
1166 continue; 1172 continue;
1167 list_del_rcu(&c->list); 1173 list_del_rcu(&c->list);
1174 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1168 ipmr_cache_free(c); 1175 ipmr_cache_free(c);
1169 } 1176 }
1170 } 1177 }
@@ -1173,6 +1180,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1173 spin_lock_bh(&mfc_unres_lock); 1180 spin_lock_bh(&mfc_unres_lock);
1174 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 1181 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1175 list_del(&c->list); 1182 list_del(&c->list);
1183 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1176 ipmr_destroy_unres(mrt, c); 1184 ipmr_destroy_unres(mrt, c);
1177 } 1185 }
1178 spin_unlock_bh(&mfc_unres_lock); 1186 spin_unlock_bh(&mfc_unres_lock);
@@ -2150,13 +2158,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2150} 2158}
2151 2159
2152static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2160static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2153 u32 portid, u32 seq, struct mfc_cache *c) 2161 u32 portid, u32 seq, struct mfc_cache *c, int cmd)
2154{ 2162{
2155 struct nlmsghdr *nlh; 2163 struct nlmsghdr *nlh;
2156 struct rtmsg *rtm; 2164 struct rtmsg *rtm;
2157 int err; 2165 int err;
2158 2166
2159 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2167 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2160 if (nlh == NULL) 2168 if (nlh == NULL)
2161 return -EMSGSIZE; 2169 return -EMSGSIZE;
2162 2170
@@ -2191,6 +2199,52 @@ nla_put_failure:
2191 return -EMSGSIZE; 2199 return -EMSGSIZE;
2192} 2200}
2193 2201
2202static size_t mroute_msgsize(bool unresolved, int maxvif)
2203{
2204 size_t len =
2205 NLMSG_ALIGN(sizeof(struct rtmsg))
2206 + nla_total_size(4) /* RTA_TABLE */
2207 + nla_total_size(4) /* RTA_SRC */
2208 + nla_total_size(4) /* RTA_DST */
2209 ;
2210
2211 if (!unresolved)
2212 len = len
2213 + nla_total_size(4) /* RTA_IIF */
2214 + nla_total_size(0) /* RTA_MULTIPATH */
2215 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2216 /* RTA_MFC_STATS */
2217 + nla_total_size(sizeof(struct rta_mfc_stats))
2218 ;
2219
2220 return len;
2221}
2222
2223static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2224 int cmd)
2225{
2226 struct net *net = read_pnet(&mrt->net);
2227 struct sk_buff *skb;
2228 int err = -ENOBUFS;
2229
2230 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2231 GFP_ATOMIC);
2232 if (skb == NULL)
2233 goto errout;
2234
2235 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2236 if (err < 0)
2237 goto errout;
2238
2239 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2240 return;
2241
2242errout:
2243 kfree_skb(skb);
2244 if (err < 0)
2245 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2246}
2247
2194static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2248static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2195{ 2249{
2196 struct net *net = sock_net(skb->sk); 2250 struct net *net = sock_net(skb->sk);
@@ -2217,7 +2271,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2217 if (ipmr_fill_mroute(mrt, skb, 2271 if (ipmr_fill_mroute(mrt, skb,
2218 NETLINK_CB(cb->skb).portid, 2272 NETLINK_CB(cb->skb).portid,
2219 cb->nlh->nlmsg_seq, 2273 cb->nlh->nlmsg_seq,
2220 mfc) < 0) 2274 mfc, RTM_NEWROUTE) < 0)
2221 goto done; 2275 goto done;
2222next_entry: 2276next_entry:
2223 e++; 2277 e++;
@@ -2231,7 +2285,7 @@ next_entry:
2231 if (ipmr_fill_mroute(mrt, skb, 2285 if (ipmr_fill_mroute(mrt, skb,
2232 NETLINK_CB(cb->skb).portid, 2286 NETLINK_CB(cb->skb).portid,
2233 cb->nlh->nlmsg_seq, 2287 cb->nlh->nlmsg_seq,
2234 mfc) < 0) { 2288 mfc, RTM_NEWROUTE) < 0) {
2235 spin_unlock_bh(&mfc_unres_lock); 2289 spin_unlock_bh(&mfc_unres_lock);
2236 goto done; 2290 goto done;
2237 } 2291 }