summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuval Mintz <yuvalm@mellanox.com>2018-02-28 16:29:39 -0500
committerDavid S. Miller <davem@davemloft.net>2018-03-01 13:13:23 -0500
commit7b0db85737db3f4d76b2a412e4f19eae59b8b494 (patch)
tree9cde582f7276b75ffb8ea39e0c1b8cd07218a85b
parent889cd83cbe411dda854429f3223ab2d31a860a4a (diff)
ipmr, ip6mr: Unite dumproute flows
The various MFC entries are being held in the same kind of mr_tables for both ipmr and ip6mr, and their traversal logic is identical. Also, with the exception of the addresses [and other small tidbits] the major bulk of the nla setting is identical. Unite as much of the dumping as possible between the two. Notice this requires creating an mr_table iterator for each, as the for-each preprocessor macro can't be used by the common logic. Signed-off-by: Yuval Mintz <yuvalm@mellanox.com> Acked-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/mroute_base.h29
-rw-r--r--net/ipv4/ipmr.c161
-rw-r--r--net/ipv4/ipmr_base.c123
-rw-r--r--net/ipv6/ip6mr.c156
4 files changed, 230 insertions, 239 deletions
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index f40202b16dae..c2560cb50f1d 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -170,6 +170,16 @@ void *mr_mfc_find_parent(struct mr_table *mrt,
170void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); 170void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
171void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); 171void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
172 172
173int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
174 struct mr_mfc *c, struct rtmsg *rtm);
175int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
176 struct mr_table *(*iter)(struct net *net,
177 struct mr_table *mrt),
178 int (*fill)(struct mr_table *mrt,
179 struct sk_buff *skb,
180 u32 portid, u32 seq, struct mr_mfc *c,
181 int cmd, int flags),
182 spinlock_t *lock);
173#else 183#else
174static inline void vif_device_init(struct vif_device *v, 184static inline void vif_device_init(struct vif_device *v,
175 struct net_device *dev, 185 struct net_device *dev,
@@ -207,6 +217,25 @@ static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
207{ 217{
208 return NULL; 218 return NULL;
209} 219}
220
221static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
222 struct mr_mfc *c, struct rtmsg *rtm)
223{
224 return -EINVAL;
225}
226
227static inline int
228mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
229 struct mr_table *(*iter)(struct net *net,
230 struct mr_table *mrt),
231 int (*fill)(struct mr_table *mrt,
232 struct sk_buff *skb,
233 u32 portid, u32 seq, struct mr_mfc *c,
234 int cmd, int flags),
235 spinlock_t *lock)
236{
237 return -EINVAL;
238}
210#endif 239#endif
211 240
212static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) 241static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index f5ff54297824..d752a70855d8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -105,8 +105,6 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
105 struct mfc_cache *cache, int local); 105 struct mfc_cache *cache, int local);
106static int ipmr_cache_report(struct mr_table *mrt, 106static int ipmr_cache_report(struct mr_table *mrt,
107 struct sk_buff *pkt, vifi_t vifi, int assert); 107 struct sk_buff *pkt, vifi_t vifi, int assert);
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 struct mr_mfc *c, struct rtmsg *rtm);
110static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, 108static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 int cmd); 109 int cmd);
112static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt); 110static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
@@ -117,6 +115,23 @@ static void ipmr_expire_process(struct timer_list *t);
117#define ipmr_for_each_table(mrt, net) \ 115#define ipmr_for_each_table(mrt, net) \
118 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) 116 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
119 117
118static struct mr_table *ipmr_mr_table_iter(struct net *net,
119 struct mr_table *mrt)
120{
121 struct mr_table *ret;
122
123 if (!mrt)
124 ret = list_entry_rcu(net->ipv4.mr_tables.next,
125 struct mr_table, list);
126 else
127 ret = list_entry_rcu(mrt->list.next,
128 struct mr_table, list);
129
130 if (&ret->list == &net->ipv4.mr_tables)
131 return NULL;
132 return ret;
133}
134
120static struct mr_table *ipmr_get_table(struct net *net, u32 id) 135static struct mr_table *ipmr_get_table(struct net *net, u32 id)
121{ 136{
122 struct mr_table *mrt; 137 struct mr_table *mrt;
@@ -284,6 +299,14 @@ EXPORT_SYMBOL(ipmr_rule_default);
284#define ipmr_for_each_table(mrt, net) \ 299#define ipmr_for_each_table(mrt, net) \
285 for (mrt = net->ipv4.mrt; mrt; mrt = NULL) 300 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
286 301
302static struct mr_table *ipmr_mr_table_iter(struct net *net,
303 struct mr_table *mrt)
304{
305 if (!mrt)
306 return net->ipv4.mrt;
307 return NULL;
308}
309
287static struct mr_table *ipmr_get_table(struct net *net, u32 id) 310static struct mr_table *ipmr_get_table(struct net *net, u32 id)
288{ 311{
289 return net->ipv4.mrt; 312 return net->ipv4.mrt;
@@ -1051,8 +1074,8 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
1051 struct nlmsghdr *nlh = skb_pull(skb, 1074 struct nlmsghdr *nlh = skb_pull(skb,
1052 sizeof(struct iphdr)); 1075 sizeof(struct iphdr));
1053 1076
1054 if (__ipmr_fill_mroute(mrt, skb, &c->_c, 1077 if (mr_fill_mroute(mrt, skb, &c->_c,
1055 nlmsg_data(nlh)) > 0) { 1078 nlmsg_data(nlh)) > 0) {
1056 nlh->nlmsg_len = skb_tail_pointer(skb) - 1079 nlh->nlmsg_len = skb_tail_pointer(skb) -
1057 (u8 *)nlh; 1080 (u8 *)nlh;
1058 } else { 1081 } else {
@@ -2256,66 +2279,6 @@ drop:
2256} 2279}
2257#endif 2280#endif
2258 2281
2259static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2260 struct mr_mfc *c, struct rtmsg *rtm)
2261{
2262 struct rta_mfc_stats mfcs;
2263 struct nlattr *mp_attr;
2264 struct rtnexthop *nhp;
2265 unsigned long lastuse;
2266 int ct;
2267
2268 /* If cache is unresolved, don't try to parse IIF and OIF */
2269 if (c->mfc_parent >= MAXVIFS) {
2270 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2271 return -ENOENT;
2272 }
2273
2274 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2275 nla_put_u32(skb, RTA_IIF,
2276 mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2277 return -EMSGSIZE;
2278
2279 if (c->mfc_flags & MFC_OFFLOAD)
2280 rtm->rtm_flags |= RTNH_F_OFFLOAD;
2281
2282 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2283 return -EMSGSIZE;
2284
2285 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2286 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2287 struct vif_device *vif;
2288
2289 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2290 nla_nest_cancel(skb, mp_attr);
2291 return -EMSGSIZE;
2292 }
2293
2294 nhp->rtnh_flags = 0;
2295 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2296 vif = &mrt->vif_table[ct];
2297 nhp->rtnh_ifindex = vif->dev->ifindex;
2298 nhp->rtnh_len = sizeof(*nhp);
2299 }
2300 }
2301
2302 nla_nest_end(skb, mp_attr);
2303
2304 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2305 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2306
2307 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2308 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2309 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2310 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2311 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2312 RTA_PAD))
2313 return -EMSGSIZE;
2314
2315 rtm->rtm_type = RTN_MULTICAST;
2316 return 1;
2317}
2318
2319int ipmr_get_route(struct net *net, struct sk_buff *skb, 2282int ipmr_get_route(struct net *net, struct sk_buff *skb,
2320 __be32 saddr, __be32 daddr, 2283 __be32 saddr, __be32 daddr,
2321 struct rtmsg *rtm, u32 portid) 2284 struct rtmsg *rtm, u32 portid)
@@ -2373,7 +2336,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2373 } 2336 }
2374 2337
2375 read_lock(&mrt_lock); 2338 read_lock(&mrt_lock);
2376 err = __ipmr_fill_mroute(mrt, skb, &cache->_c, rtm); 2339 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2377 read_unlock(&mrt_lock); 2340 read_unlock(&mrt_lock);
2378 rcu_read_unlock(); 2341 rcu_read_unlock();
2379 return err; 2342 return err;
@@ -2410,7 +2373,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2410 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) || 2373 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2411 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp)) 2374 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2412 goto nla_put_failure; 2375 goto nla_put_failure;
2413 err = __ipmr_fill_mroute(mrt, skb, &c->_c, rtm); 2376 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2414 /* do not break the dump if cache is unresolved */ 2377 /* do not break the dump if cache is unresolved */
2415 if (err < 0 && err != -ENOENT) 2378 if (err < 0 && err != -ENOENT)
2416 goto nla_put_failure; 2379 goto nla_put_failure;
@@ -2423,6 +2386,14 @@ nla_put_failure:
2423 return -EMSGSIZE; 2386 return -EMSGSIZE;
2424} 2387}
2425 2388
2389static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2390 u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2391 int flags)
2392{
2393 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2394 cmd, flags);
2395}
2396
2426static size_t mroute_msgsize(bool unresolved, int maxvif) 2397static size_t mroute_msgsize(bool unresolved, int maxvif)
2427{ 2398{
2428 size_t len = 2399 size_t len =
@@ -2596,62 +2567,8 @@ errout_free:
2596 2567
2597static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2568static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2598{ 2569{
2599 struct net *net = sock_net(skb->sk); 2570 return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
2600 unsigned int t = 0, s_t; 2571 _ipmr_fill_mroute, &mfc_unres_lock);
2601 unsigned int e = 0, s_e;
2602 struct mr_table *mrt;
2603 struct mr_mfc *mfc;
2604
2605 s_t = cb->args[0];
2606 s_e = cb->args[1];
2607
2608 rcu_read_lock();
2609 ipmr_for_each_table(mrt, net) {
2610 if (t < s_t)
2611 goto next_table;
2612 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2613 if (e < s_e)
2614 goto next_entry;
2615 if (ipmr_fill_mroute(mrt, skb,
2616 NETLINK_CB(cb->skb).portid,
2617 cb->nlh->nlmsg_seq,
2618 (struct mfc_cache *)mfc,
2619 RTM_NEWROUTE, NLM_F_MULTI) < 0)
2620 goto done;
2621next_entry:
2622 e++;
2623 }
2624 e = 0;
2625 s_e = 0;
2626
2627 spin_lock_bh(&mfc_unres_lock);
2628 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2629 if (e < s_e)
2630 goto next_entry2;
2631 if (ipmr_fill_mroute(mrt, skb,
2632 NETLINK_CB(cb->skb).portid,
2633 cb->nlh->nlmsg_seq,
2634 (struct mfc_cache *)mfc,
2635 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
2636 spin_unlock_bh(&mfc_unres_lock);
2637 goto done;
2638 }
2639next_entry2:
2640 e++;
2641 }
2642 spin_unlock_bh(&mfc_unres_lock);
2643 e = 0;
2644 s_e = 0;
2645next_table:
2646 t++;
2647 }
2648done:
2649 rcu_read_unlock();
2650
2651 cb->args[1] = e;
2652 cb->args[0] = t;
2653
2654 return skb->len;
2655} 2572}
2656 2573
2657static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = { 2574static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index e1b7b639e9b1..8ba55bfda817 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -198,3 +198,126 @@ end_of_list:
198} 198}
199EXPORT_SYMBOL(mr_mfc_seq_next); 199EXPORT_SYMBOL(mr_mfc_seq_next);
200#endif 200#endif
201
202int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
203 struct mr_mfc *c, struct rtmsg *rtm)
204{
205 struct rta_mfc_stats mfcs;
206 struct nlattr *mp_attr;
207 struct rtnexthop *nhp;
208 unsigned long lastuse;
209 int ct;
210
211 /* If cache is unresolved, don't try to parse IIF and OIF */
212 if (c->mfc_parent >= MAXVIFS) {
213 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
214 return -ENOENT;
215 }
216
217 if (VIF_EXISTS(mrt, c->mfc_parent) &&
218 nla_put_u32(skb, RTA_IIF,
219 mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
220 return -EMSGSIZE;
221
222 if (c->mfc_flags & MFC_OFFLOAD)
223 rtm->rtm_flags |= RTNH_F_OFFLOAD;
224
225 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
226 if (!mp_attr)
227 return -EMSGSIZE;
228
229 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
230 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
231 struct vif_device *vif;
232
233 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
234 if (!nhp) {
235 nla_nest_cancel(skb, mp_attr);
236 return -EMSGSIZE;
237 }
238
239 nhp->rtnh_flags = 0;
240 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
241 vif = &mrt->vif_table[ct];
242 nhp->rtnh_ifindex = vif->dev->ifindex;
243 nhp->rtnh_len = sizeof(*nhp);
244 }
245 }
246
247 nla_nest_end(skb, mp_attr);
248
249 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
250 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
251
252 mfcs.mfcs_packets = c->mfc_un.res.pkt;
253 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
254 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
255 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
256 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
257 RTA_PAD))
258 return -EMSGSIZE;
259
260 rtm->rtm_type = RTN_MULTICAST;
261 return 1;
262}
263EXPORT_SYMBOL(mr_fill_mroute);
264
265int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
266 struct mr_table *(*iter)(struct net *net,
267 struct mr_table *mrt),
268 int (*fill)(struct mr_table *mrt,
269 struct sk_buff *skb,
270 u32 portid, u32 seq, struct mr_mfc *c,
271 int cmd, int flags),
272 spinlock_t *lock)
273{
274 unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
275 struct net *net = sock_net(skb->sk);
276 struct mr_table *mrt;
277 struct mr_mfc *mfc;
278
279 rcu_read_lock();
280 for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
281 if (t < s_t)
282 goto next_table;
283 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
284 if (e < s_e)
285 goto next_entry;
286 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
287 cb->nlh->nlmsg_seq, mfc,
288 RTM_NEWROUTE, NLM_F_MULTI) < 0)
289 goto done;
290next_entry:
291 e++;
292 }
293 e = 0;
294 s_e = 0;
295
296 spin_lock_bh(lock);
297 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
298 if (e < s_e)
299 goto next_entry2;
300 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
301 cb->nlh->nlmsg_seq, mfc,
302 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
303 spin_unlock_bh(lock);
304 goto done;
305 }
306next_entry2:
307 e++;
308 }
309 spin_unlock_bh(lock);
310 e = 0;
311 s_e = 0;
312next_table:
313 t++;
314 }
315done:
316 rcu_read_unlock();
317
318 cb->args[1] = e;
319 cb->args[0] = t;
320
321 return skb->len;
322}
323EXPORT_SYMBOL(mr_rtm_dumproute);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c3b3f1c381e1..2a38f9de45d3 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -87,8 +87,6 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
87 struct sk_buff *skb, struct mfc6_cache *cache); 87 struct sk_buff *skb, struct mfc6_cache *cache);
88static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, 88static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert); 89 mifi_t mifi, int assert);
90static int __ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
91 struct mfc6_cache *c, struct rtmsg *rtm);
92static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc, 90static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
93 int cmd); 91 int cmd);
94static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt); 92static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
@@ -101,6 +99,23 @@ static void ipmr_expire_process(struct timer_list *t);
101#define ip6mr_for_each_table(mrt, net) \ 99#define ip6mr_for_each_table(mrt, net) \
102 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
103 101
102static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
104{
105 struct mr_table *ret;
106
107 if (!mrt)
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
110 else
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
113
114 if (&ret->list == &net->ipv6.mr6_tables)
115 return NULL;
116 return ret;
117}
118
104static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 119static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
105{ 120{
106 struct mr_table *mrt; 121 struct mr_table *mrt;
@@ -247,6 +262,14 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
247#define ip6mr_for_each_table(mrt, net) \ 262#define ip6mr_for_each_table(mrt, net) \
248 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) 263 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
249 264
265static struct mr_table *ip6mr_mr_table_iter(struct net *net,
266 struct mr_table *mrt)
267{
268 if (!mrt)
269 return net->ipv6.mrt6;
270 return NULL;
271}
272
250static struct mr_table *ip6mr_get_table(struct net *net, u32 id) 273static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
251{ 274{
252 return net->ipv6.mrt6; 275 return net->ipv6.mrt6;
@@ -948,7 +971,8 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
948 struct nlmsghdr *nlh = skb_pull(skb, 971 struct nlmsghdr *nlh = skb_pull(skb,
949 sizeof(struct ipv6hdr)); 972 sizeof(struct ipv6hdr));
950 973
951 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) { 974 if (mr_fill_mroute(mrt, skb, &c->_c,
975 nlmsg_data(nlh)) > 0) {
952 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 976 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
953 } else { 977 } else {
954 nlh->nlmsg_type = NLMSG_ERROR; 978 nlh->nlmsg_type = NLMSG_ERROR;
@@ -2081,63 +2105,6 @@ int ip6_mr_input(struct sk_buff *skb)
2081 return 0; 2105 return 0;
2082} 2106}
2083 2107
2084
2085static int __ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2086 struct mfc6_cache *c, struct rtmsg *rtm)
2087{
2088 struct rta_mfc_stats mfcs;
2089 struct nlattr *mp_attr;
2090 struct rtnexthop *nhp;
2091 unsigned long lastuse;
2092 int ct;
2093
2094 /* If cache is unresolved, don't try to parse IIF and OIF */
2095 if (c->_c.mfc_parent >= MAXMIFS) {
2096 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2097 return -ENOENT;
2098 }
2099
2100 if (VIF_EXISTS(mrt, c->_c.mfc_parent) &&
2101 nla_put_u32(skb, RTA_IIF,
2102 mrt->vif_table[c->_c.mfc_parent].dev->ifindex) < 0)
2103 return -EMSGSIZE;
2104 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2105 if (!mp_attr)
2106 return -EMSGSIZE;
2107
2108 for (ct = c->_c.mfc_un.res.minvif;
2109 ct < c->_c.mfc_un.res.maxvif; ct++) {
2110 if (VIF_EXISTS(mrt, ct) && c->_c.mfc_un.res.ttls[ct] < 255) {
2111 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2112 if (!nhp) {
2113 nla_nest_cancel(skb, mp_attr);
2114 return -EMSGSIZE;
2115 }
2116
2117 nhp->rtnh_flags = 0;
2118 nhp->rtnh_hops = c->_c.mfc_un.res.ttls[ct];
2119 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2120 nhp->rtnh_len = sizeof(*nhp);
2121 }
2122 }
2123
2124 nla_nest_end(skb, mp_attr);
2125
2126 lastuse = READ_ONCE(c->_c.mfc_un.res.lastuse);
2127 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2128
2129 mfcs.mfcs_packets = c->_c.mfc_un.res.pkt;
2130 mfcs.mfcs_bytes = c->_c.mfc_un.res.bytes;
2131 mfcs.mfcs_wrong_if = c->_c.mfc_un.res.wrong_if;
2132 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2133 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2134 RTA_PAD))
2135 return -EMSGSIZE;
2136
2137 rtm->rtm_type = RTN_MULTICAST;
2138 return 1;
2139}
2140
2141int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, 2108int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2142 u32 portid) 2109 u32 portid)
2143{ 2110{
@@ -2203,7 +2170,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2203 return err; 2170 return err;
2204 } 2171 }
2205 2172
2206 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); 2173 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2207 read_unlock(&mrt_lock); 2174 read_unlock(&mrt_lock);
2208 return err; 2175 return err;
2209} 2176}
@@ -2239,7 +2206,7 @@ static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2239 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) || 2206 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2240 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp)) 2207 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2241 goto nla_put_failure; 2208 goto nla_put_failure;
2242 err = __ip6mr_fill_mroute(mrt, skb, c, rtm); 2209 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2243 /* do not break the dump if cache is unresolved */ 2210 /* do not break the dump if cache is unresolved */
2244 if (err < 0 && err != -ENOENT) 2211 if (err < 0 && err != -ENOENT)
2245 goto nla_put_failure; 2212 goto nla_put_failure;
@@ -2252,6 +2219,14 @@ nla_put_failure:
2252 return -EMSGSIZE; 2219 return -EMSGSIZE;
2253} 2220}
2254 2221
2222static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2223 u32 portid, u32 seq, struct mr_mfc *c,
2224 int cmd, int flags)
2225{
2226 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2227 cmd, flags);
2228}
2229
2255static int mr6_msgsize(bool unresolved, int maxvif) 2230static int mr6_msgsize(bool unresolved, int maxvif)
2256{ 2231{
2257 size_t len = 2232 size_t len =
@@ -2365,59 +2340,6 @@ errout:
2365 2340
2366static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2341static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2367{ 2342{
2368 struct net *net = sock_net(skb->sk); 2343 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2369 unsigned int t = 0, s_t; 2344 _ip6mr_fill_mroute, &mfc_unres_lock);
2370 unsigned int e = 0, s_e;
2371 struct mr_table *mrt;
2372 struct mr_mfc *mfc;
2373
2374 s_t = cb->args[0];
2375 s_e = cb->args[1];
2376
2377 rcu_read_lock();
2378 ip6mr_for_each_table(mrt, net) {
2379 if (t < s_t)
2380 goto next_table;
2381 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2382 if (e < s_e)
2383 goto next_entry;
2384 if (ip6mr_fill_mroute(mrt, skb,
2385 NETLINK_CB(cb->skb).portid,
2386 cb->nlh->nlmsg_seq,
2387 (struct mfc6_cache *)mfc,
2388 RTM_NEWROUTE, NLM_F_MULTI) < 0)
2389 goto done;
2390next_entry:
2391 e++;
2392 }
2393 e = 0;
2394 s_e = 0;
2395
2396 spin_lock_bh(&mfc_unres_lock);
2397 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2398 if (e < s_e)
2399 goto next_entry2;
2400 if (ip6mr_fill_mroute(mrt, skb,
2401 NETLINK_CB(cb->skb).portid,
2402 cb->nlh->nlmsg_seq,
2403 (struct mfc6_cache *)mfc,
2404 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
2405 spin_unlock_bh(&mfc_unres_lock);
2406 goto done;
2407 }
2408next_entry2:
2409 e++;
2410 }
2411 spin_unlock_bh(&mfc_unres_lock);
2412 e = s_e = 0;
2413next_table:
2414 t++;
2415 }
2416done:
2417 rcu_read_unlock();
2418
2419 cb->args[1] = e;
2420 cb->args[0] = t;
2421
2422 return skb->len;
2423} 2345}