aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-04-02 20:22:53 -0400
committerYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-04-05 09:33:38 -0400
commit7bc570c8b4f75ddb3fd5dbeb38127cdc4acbcc9c (patch)
treeb688b728c3ea08479f75986d1e9f590fca1f8715
parent80a9492a33dd7d852465625022d56ff76d62174d (diff)
[IPV6] MROUTE: Support multicast forwarding.
Based on ancient patch by Mickael Hoerdt <hoerdt@clarinet.u-strasbg.fr>, which is available at <http://www-r2.u-strasbg.fr/~hoerdt/dev/linux_ipv6_mforwarding/patch-linux-ipv6-mforwarding-0.1a>. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/mroute6.h227
-rw-r--r--net/ipv6/Kconfig7
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/addrconf.c15
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/ip6_input.c87
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6mr.c1384
-rw-r--r--net/ipv6/ipv6_sockglue.c7
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c30
13 files changed, 1754 insertions, 30 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 84736acb4b99..29ab9b95d376 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -261,6 +261,7 @@ unifdef-y += mempolicy.h
261unifdef-y += mii.h 261unifdef-y += mii.h
262unifdef-y += mman.h 262unifdef-y += mman.h
263unifdef-y += mroute.h 263unifdef-y += mroute.h
264unifdef-y += mroute6.h
264unifdef-y += msdos_fs.h 265unifdef-y += msdos_fs.h
265unifdef-y += msg.h 266unifdef-y += msg.h
266unifdef-y += nbd.h 267unifdef-y += nbd.h
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index b90d3d461d4e..f53e4764fc05 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -160,6 +160,9 @@ struct ipv6_devconf {
160#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 160#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
161 __s32 optimistic_dad; 161 __s32 optimistic_dad;
162#endif 162#endif
163#ifdef CONFIG_IPV6_MROUTE
164 __s32 mc_forwarding;
165#endif
163 void *sysctl; 166 void *sysctl;
164}; 167};
165 168
@@ -190,6 +193,7 @@ enum {
190 DEVCONF_PROXY_NDP, 193 DEVCONF_PROXY_NDP,
191 DEVCONF_OPTIMISTIC_DAD, 194 DEVCONF_OPTIMISTIC_DAD,
192 DEVCONF_ACCEPT_SOURCE_ROUTE, 195 DEVCONF_ACCEPT_SOURCE_ROUTE,
196 DEVCONF_MC_FORWARDING,
193 DEVCONF_MAX 197 DEVCONF_MAX
194}; 198};
195 199
@@ -230,6 +234,7 @@ struct inet6_skb_parm {
230#endif 234#endif
231 235
232#define IP6SKB_XFRM_TRANSFORMED 1 236#define IP6SKB_XFRM_TRANSFORMED 1
237#define IP6SKB_FORWARDED 2
233}; 238};
234 239
235#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 240#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
new file mode 100644
index 000000000000..b92190304e0b
--- /dev/null
+++ b/include/linux/mroute6.h
@@ -0,0 +1,227 @@
1#ifndef __LINUX_MROUTE6_H
2#define __LINUX_MROUTE6_H
3
4#include <linux/types.h>
5#include <linux/sockios.h>
6
7/*
8 * Based on the MROUTING 3.5 defines primarily to keep
9 * source compatibility with BSD.
10 *
11 * See the pim6sd code for the original history.
12 *
13 * Protocol Independent Multicast (PIM) data structures included
14 * Carlos Picoto (cap@di.fc.ul.pt)
15 *
16 */
17
18#define MRT6_BASE 200
19#define MRT6_INIT (MRT6_BASE) /* Activate the kernel mroute code */
20#define MRT6_DONE (MRT6_BASE+1) /* Shutdown the kernel mroute */
21#define MRT6_ADD_MIF (MRT6_BASE+2) /* Add a virtual interface */
22#define MRT6_DEL_MIF (MRT6_BASE+3) /* Delete a virtual interface */
23#define MRT6_ADD_MFC (MRT6_BASE+4) /* Add a multicast forwarding entry */
24#define MRT6_DEL_MFC (MRT6_BASE+5) /* Delete a multicast forwarding entry */
25#define MRT6_VERSION (MRT6_BASE+6) /* Get the kernel multicast version */
26
27#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */
28#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
29#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
30
31#define MAXMIFS 32
32typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */
33typedef unsigned short mifi_t;
34#define ALL_MIFS ((mifi_t)(-1))
35
36#ifndef IF_SETSIZE
37#define IF_SETSIZE 256
38#endif
39
40typedef __u32 if_mask;
41#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
42
43#if !defined(__KERNEL__) && !defined(DIV_ROUND_UP)
44#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y))
45#endif
46
47typedef struct if_set {
48 if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
49} if_set;
50
51#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
52#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS)))
53#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS)))
54#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f)))
55#define IF_ZERO(p) bzero(p, sizeof(*(p)))
56
57/*
58 * Passed by mrouted for an MRT_ADD_MIF - again we use the
59 * mrouted 3.6 structures for compatibility
60 */
61
62struct mif6ctl {
63 mifi_t mif6c_mifi; /* Index of MIF */
64 unsigned char mif6c_flags; /* MIFF_ flags */
65 unsigned char vifc_threshold; /* ttl limit */
66 u_short mif6c_pifi; /* the index of the physical IF */
67 unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
68};
69
70#define MIFF_REGISTER 0x1 /* register vif */
71
72/*
73 * Cache manipulation structures for mrouted and PIMd
74 */
75
76struct mf6cctl
77{
78 struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */
79 struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */
80 mifi_t mf6cc_parent; /* Where it arrived */
81 struct if_set mf6cc_ifset; /* Where it is going */
82};
83
84/*
85 * Group count retrieval for pim6sd
86 */
87
88struct sioc_sg_req6
89{
90 struct sockaddr_in6 src;
91 struct sockaddr_in6 grp;
92 unsigned long pktcnt;
93 unsigned long bytecnt;
94 unsigned long wrong_if;
95};
96
97/*
98 * To get vif packet counts
99 */
100
101struct sioc_mif_req6
102{
103 mifi_t mifi; /* Which iface */
104 unsigned long icount; /* In packets */
105 unsigned long ocount; /* Out packets */
106 unsigned long ibytes; /* In bytes */
107 unsigned long obytes; /* Out bytes */
108};
109
110/*
111 * That's all usermode folks
112 */
113
114#ifdef __KERNEL__
115
116#include <linux/skbuff.h> /* for struct sk_buff_head */
117
118struct net_device;
119struct inet6_dev *ipv6_find_idev(struct net_device *dev);
120
121#ifdef CONFIG_IPV6_MROUTE
122static inline int ip6_mroute_opt(int opt)
123{
124 return (opt >= MRT6_BASE) && (opt <= MRT6_BASE + 10);
125}
126#else
127static inline int ip6_mroute_opt(int opt)
128{
129 return 0;
130}
131#endif
132
133struct sock;
134
135extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int);
136extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
137extern int ip6_mr_input(struct sk_buff *skb);
138extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
139extern void ip6_mr_init(void);
140
141struct mif_device
142{
143 struct net_device *dev; /* Device we are using */
144 unsigned long bytes_in,bytes_out;
145 unsigned long pkt_in,pkt_out; /* Statistics */
146 unsigned long rate_limit; /* Traffic shaping (NI) */
147 unsigned char threshold; /* TTL threshold */
148 unsigned short flags; /* Control flags */
149 int link; /* Physical interface index */
150};
151
152#define VIFF_STATIC 0x8000
153
154struct mfc6_cache
155{
156 struct mfc6_cache *next; /* Next entry on cache line */
157 struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
158 struct in6_addr mf6c_origin; /* Source of packet */
159 mifi_t mf6c_parent; /* Source interface */
160 int mfc_flags; /* Flags on line */
161
162 union {
163 struct {
164 unsigned long expires;
165 struct sk_buff_head unresolved; /* Unresolved buffers */
166 } unres;
167 struct {
168 unsigned long last_assert;
169 int minvif;
170 int maxvif;
171 unsigned long bytes;
172 unsigned long pkt;
173 unsigned long wrong_if;
174 unsigned char ttls[MAXMIFS]; /* TTL thresholds */
175 } res;
176 } mfc_un;
177};
178
179#define MFC_STATIC 1
180#define MFC_NOTIFY 2
181
182#define MFC6_LINES 64
183
184#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
185 (__force u32)(a)->s6_addr32[1] ^ \
186 (__force u32)(a)->s6_addr32[2] ^ \
187 (__force u32)(a)->s6_addr32[3] ^ \
188 (__force u32)(g)->s6_addr32[0] ^ \
189 (__force u32)(g)->s6_addr32[1] ^ \
190 (__force u32)(g)->s6_addr32[2] ^ \
191 (__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
192
193#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
194
195#endif
196
197#ifdef __KERNEL__
198struct rtmsg;
199extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
200
201#ifdef CONFIG_IPV6_MROUTE
202extern struct sock *mroute6_socket;
203extern int ip6mr_sk_done(struct sock *sk);
204#else
205#define mroute6_socket NULL
206static inline int ip6mr_sk_done(struct sock *sk) { return 0; }
207#endif
208#endif
209
210/*
211 * Structure used to communicate from kernel to multicast router.
212 * We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{}
213 * used for IPv4 implementation). This is because this structure will be passed via an
214 * IPv6 raw socket, on wich an application will only receiver the payload i.e the data after
215 * the IPv6 header and all the extension headers. (See section 3 of RFC 3542)
216 */
217
218struct mrt6msg {
219#define MRT6MSG_NOCACHE 1
220 __u8 im6_mbz; /* must be zero */
221 __u8 im6_msgtype; /* what type of message */
222 __u16 im6_mif; /* mif rec'd on */
223 __u32 im6_pad; /* padding for 64 bit arch */
224 struct in6_addr im6_src, im6_dst;
225};
226
227#endif
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 47263e45bacb..9a2ea81e499f 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -209,3 +209,10 @@ config IPV6_SUBTREES
209 209
210 If unsure, say N. 210 If unsure, say N.
211 211
212config IPV6_MROUTE
213 bool "IPv6: multicast routing (EXPERIMENTAL)"
214 depends on IPV6 && EXPERIMENTAL
215 ---help---
216 Experimental support for IPv6 multicast forwarding.
217 If unsure, say N.
218
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index ae14617e607f..686934acfac1 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -11,6 +11,8 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o 11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
12 12
13ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o 13ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
14ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
15
14ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ 16ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
15 xfrm6_output.o 17 xfrm6_output.o
16ipv6-$(CONFIG_NETFILTER) += netfilter.o 18ipv6-$(CONFIG_NETFILTER) += netfilter.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 11037615bc73..dbc51af69017 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -412,7 +412,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
412 return ndev; 412 return ndev;
413} 413}
414 414
415static struct inet6_dev * ipv6_find_idev(struct net_device *dev) 415struct inet6_dev * ipv6_find_idev(struct net_device *dev)
416{ 416{
417 struct inet6_dev *idev; 417 struct inet6_dev *idev;
418 418
@@ -3547,6 +3547,9 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3547#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 3547#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3548 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; 3548 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
3549#endif 3549#endif
3550#ifdef CONFIG_IPV6_MROUTE
3551 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
3552#endif
3550} 3553}
3551 3554
3552static inline size_t inet6_if_nlmsg_size(void) 3555static inline size_t inet6_if_nlmsg_size(void)
@@ -4095,6 +4098,16 @@ static struct addrconf_sysctl_table
4095 4098
4096 }, 4099 },
4097#endif 4100#endif
4101#ifdef CONFIG_IPV6_MROUTE
4102 {
4103 .ctl_name = CTL_UNNUMBERED,
4104 .procname = "mc_forwarding",
4105 .data = &ipv6_devconf.mc_forwarding,
4106 .maxlen = sizeof(int),
4107 .mode = 0644,
4108 .proc_handler = &proc_dointvec,
4109 },
4110#endif
4098 { 4111 {
4099 .ctl_name = 0, /* sentinel */ 4112 .ctl_name = 0, /* sentinel */
4100 } 4113 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 1731b0abf7f5..3c6aafb02183 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -61,6 +61,9 @@
61 61
62#include <asm/uaccess.h> 62#include <asm/uaccess.h>
63#include <asm/system.h> 63#include <asm/system.h>
64#ifdef CONFIG_IPV6_MROUTE
65#include <linux/mroute6.h>
66#endif
64 67
65MODULE_AUTHOR("Cast of dozens"); 68MODULE_AUTHOR("Cast of dozens");
66MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 69MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
@@ -953,6 +956,9 @@ static int __init inet6_init(void)
953 err = icmpv6_init(); 956 err = icmpv6_init();
954 if (err) 957 if (err)
955 goto icmp_fail; 958 goto icmp_fail;
959#ifdef CONFIG_IPV6_MROUTE
960 ip6_mr_init();
961#endif
956 err = ndisc_init(); 962 err = ndisc_init();
957 if (err) 963 if (err)
958 goto ndisc_fail; 964 goto ndisc_fail;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 43a617e2268b..09a3201e408a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/in6.h> 30#include <linux/in6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mroute6.h>
32 33
33#include <linux/netfilter.h> 34#include <linux/netfilter.h>
34#include <linux/netfilter_ipv6.h> 35#include <linux/netfilter_ipv6.h>
@@ -236,36 +237,84 @@ int ip6_mc_input(struct sk_buff *skb)
236 hdr = ipv6_hdr(skb); 237 hdr = ipv6_hdr(skb);
237 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); 238 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
238 239
240#ifdef CONFIG_IPV6_MROUTE
239 /* 241 /*
240 * IPv6 multicast router mode isnt currently supported. 242 * IPv6 multicast router mode is now supported ;)
241 */ 243 */
242#if 0 244 if (ipv6_devconf.mc_forwarding &&
243 if (ipv6_config.multicast_route) { 245 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
244 int addr_type; 246 /*
245 247 * Okay, we try to forward - split and duplicate
246 addr_type = ipv6_addr_type(&hdr->daddr); 248 * packets.
247 249 */
248 if (!(addr_type & (IPV6_ADDR_LOOPBACK | IPV6_ADDR_LINKLOCAL))) { 250 struct sk_buff *skb2;
249 struct sk_buff *skb2; 251 struct inet6_skb_parm *opt = IP6CB(skb);
250 struct dst_entry *dst; 252
253 /* Check for MLD */
254 if (unlikely(opt->ra)) {
255 /* Check if this is a mld message */
256 u8 *ptr = skb_network_header(skb) + opt->ra;
257 struct icmp6hdr *icmp6;
258 u8 nexthdr = hdr->nexthdr;
259 int offset;
260
261 /* Check if the value of Router Alert
262 * is for MLD (0x0000).
263 */
264 if ((ptr[2] | ptr[3]) == 0) {
265 if (!ipv6_ext_hdr(nexthdr)) {
266 /* BUG */
267 goto discard;
268 }
269 offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
270 &nexthdr);
271 if (offset < 0)
272 goto discard;
273
274 if (nexthdr != IPPROTO_ICMPV6)
275 goto discard;
276
277 if (!pskb_may_pull(skb, (skb_network_header(skb) +
278 offset + 1 - skb->data)))
279 goto discard;
280
281 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
282
283 switch (icmp6->icmp6_type) {
284 case ICMPV6_MGM_QUERY:
285 case ICMPV6_MGM_REPORT:
286 case ICMPV6_MGM_REDUCTION:
287 case ICMPV6_MLD2_REPORT:
288 break;
289 default:
290 /* Bogus */
291 goto discard;
292 }
293 deliver = 1;
294 goto out;
295 }
296 /* unknown RA - process it normally */
297 }
251 298
252 dst = skb->dst; 299 if (deliver)
300 skb2 = skb_clone(skb, GFP_ATOMIC);
301 else {
302 skb2 = skb;
303 skb = NULL;
304 }
253 305
254 if (deliver) { 306 if (skb2) {
255 skb2 = skb_clone(skb, GFP_ATOMIC); 307 skb2->dev = skb2->dst->dev;
256 dst_output(skb2); 308 ip6_mr_input(skb2);
257 } else {
258 dst_output(skb);
259 return 0;
260 }
261 } 309 }
262 } 310 }
263#endif 311#endif
264 312out:
265 if (likely(deliver)) { 313 if (likely(deliver)) {
266 ip6_input(skb); 314 ip6_input(skb);
267 return 0; 315 return 0;
268 } 316 }
317discard:
269 /* discard */ 318 /* discard */
270 kfree_skb(skb); 319 kfree_skb(skb);
271 320
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a8b4da25b0a7..c0dbe549cc42 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -55,6 +55,7 @@
55#include <net/icmp.h> 55#include <net/icmp.h>
56#include <net/xfrm.h> 56#include <net/xfrm.h>
57#include <net/checksum.h> 57#include <net/checksum.h>
58#include <linux/mroute6.h>
58 59
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 60static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60 61
@@ -137,8 +138,9 @@ static int ip6_output2(struct sk_buff *skb)
137 struct inet6_dev *idev = ip6_dst_idev(skb->dst); 138 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
138 139
139 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 140 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
140 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, 141 ((mroute6_socket && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
141 &ipv6_hdr(skb)->saddr)) { 142 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
143 &ipv6_hdr(skb)->saddr))) {
142 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 144 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
143 145
144 /* Do not check for IFF_ALLMULTI; multicast routing 146 /* Do not check for IFF_ALLMULTI; multicast routing
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
new file mode 100644
index 000000000000..1bdf3c177d58
--- /dev/null
+++ b/net/ipv6/ip6mr.c
@@ -0,0 +1,1384 @@
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/inetdevice.h>
34#include <linux/igmp.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/mroute.h>
38#include <linux/init.h>
39#include <net/ip.h>
40#include <net/protocol.h>
41#include <linux/skbuff.h>
42#include <net/sock.h>
43#include <net/icmp.h>
44#include <net/udp.h>
45#include <net/raw.h>
46#include <net/route.h>
47#include <linux/notifier.h>
48#include <linux/if_arp.h>
49#include <linux/netfilter_ipv4.h>
50#include <net/ipip.h>
51#include <net/checksum.h>
52#include <net/netlink.h>
53
54#include <net/ipv6.h>
55#include <net/ip6_route.h>
56#include <linux/mroute6.h>
57#include <net/addrconf.h>
58#include <linux/netfilter_ipv6.h>
59
60struct sock *mroute6_socket;
61
62
63/* Big lock, protecting vif table, mrt cache and mroute socket state.
64 Note that the changes are semaphored via rtnl_lock.
65 */
66
67static DEFINE_RWLOCK(mrt_lock);
68
69/*
70 * Multicast router control variables
71 */
72
73static struct mif_device vif6_table[MAXMIFS]; /* Devices */
74static int maxvif;
75
76#define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
77
78static struct mfc6_cache *mfc6_cache_array[MFC_LINES]; /* Forwarding cache */
79
80static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
81static atomic_t cache_resolve_queue_len; /* Size of unresolved */
82
83/* Special spinlock for queue of unresolved entries */
84static DEFINE_SPINLOCK(mfc_unres_lock);
85
86/* We return to original Alan's scheme. Hash table of resolved
87 entries is changed only in process context and protected
88 with weak lock mrt_lock. Queue of unresolved entries is protected
89 with strong spinlock mfc_unres_lock.
90
91 In this case data path is free of exclusive locks at all.
92 */
93
94static struct kmem_cache *mrt_cachep __read_mostly;
95
96static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
97static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
98static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
99
100static struct timer_list ipmr_expire_timer;
101
102
103#ifdef CONFIG_PROC_FS
104
105struct ipmr_mfc_iter {
106 struct mfc6_cache **cache;
107 int ct;
108};
109
110
111static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
112{
113 struct mfc6_cache *mfc;
114
115 it->cache = mfc6_cache_array;
116 read_lock(&mrt_lock);
117 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
118 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
119 if (pos-- == 0)
120 return mfc;
121 read_unlock(&mrt_lock);
122
123 it->cache = &mfc_unres_queue;
124 spin_lock_bh(&mfc_unres_lock);
125 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
126 if (pos-- == 0)
127 return mfc;
128 spin_unlock_bh(&mfc_unres_lock);
129
130 it->cache = NULL;
131 return NULL;
132}
133
134
135
136
137/*
138 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
139 */
140
141struct ipmr_vif_iter {
142 int ct;
143};
144
145static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
146 loff_t pos)
147{
148 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
149 if (!MIF_EXISTS(iter->ct))
150 continue;
151 if (pos-- == 0)
152 return &vif6_table[iter->ct];
153 }
154 return NULL;
155}
156
157static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
158 __acquires(mrt_lock)
159{
160 read_lock(&mrt_lock);
161 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
162 : SEQ_START_TOKEN);
163}
164
165static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
166{
167 struct ipmr_vif_iter *iter = seq->private;
168
169 ++*pos;
170 if (v == SEQ_START_TOKEN)
171 return ip6mr_vif_seq_idx(iter, 0);
172
173 while (++iter->ct < maxvif) {
174 if (!MIF_EXISTS(iter->ct))
175 continue;
176 return &vif6_table[iter->ct];
177 }
178 return NULL;
179}
180
181static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
182 __releases(mrt_lock)
183{
184 read_unlock(&mrt_lock);
185}
186
187static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
188{
189 if (v == SEQ_START_TOKEN) {
190 seq_puts(seq,
191 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
192 } else {
193 const struct mif_device *vif = v;
194 const char *name = vif->dev ? vif->dev->name : "none";
195
196 seq_printf(seq,
197 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X\n",
198 vif - vif6_table,
199 name, vif->bytes_in, vif->pkt_in,
200 vif->bytes_out, vif->pkt_out,
201 vif->flags);
202 }
203 return 0;
204}
205
206static struct seq_operations ip6mr_vif_seq_ops = {
207 .start = ip6mr_vif_seq_start,
208 .next = ip6mr_vif_seq_next,
209 .stop = ip6mr_vif_seq_stop,
210 .show = ip6mr_vif_seq_show,
211};
212
213static int ip6mr_vif_open(struct inode *inode, struct file *file)
214{
215 return seq_open_private(file, &ip6mr_vif_seq_ops,
216 sizeof(struct ipmr_vif_iter));
217}
218
219static struct file_operations ip6mr_vif_fops = {
220 .owner = THIS_MODULE,
221 .open = ip6mr_vif_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = seq_release,
225};
226
227static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
228{
229 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
230 : SEQ_START_TOKEN);
231}
232
233static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
234{
235 struct mfc6_cache *mfc = v;
236 struct ipmr_mfc_iter *it = seq->private;
237
238 ++*pos;
239
240 if (v == SEQ_START_TOKEN)
241 return ipmr_mfc_seq_idx(seq->private, 0);
242
243 if (mfc->next)
244 return mfc->next;
245
246 if (it->cache == &mfc_unres_queue)
247 goto end_of_list;
248
249 BUG_ON(it->cache != mfc6_cache_array);
250
251 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
252 mfc = mfc6_cache_array[it->ct];
253 if (mfc)
254 return mfc;
255 }
256
257 /* exhausted cache_array, show unresolved */
258 read_unlock(&mrt_lock);
259 it->cache = &mfc_unres_queue;
260 it->ct = 0;
261
262 spin_lock_bh(&mfc_unres_lock);
263 mfc = mfc_unres_queue;
264 if (mfc)
265 return mfc;
266
267 end_of_list:
268 spin_unlock_bh(&mfc_unres_lock);
269 it->cache = NULL;
270
271 return NULL;
272}
273
274static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
275{
276 struct ipmr_mfc_iter *it = seq->private;
277
278 if (it->cache == &mfc_unres_queue)
279 spin_unlock_bh(&mfc_unres_lock);
280 else if (it->cache == mfc6_cache_array)
281 read_unlock(&mrt_lock);
282}
283
284static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
285{
286 int n;
287
288 if (v == SEQ_START_TOKEN) {
289 seq_puts(seq,
290 "Group "
291 "Origin "
292 "Iif Pkts Bytes Wrong Oifs\n");
293 } else {
294 const struct mfc6_cache *mfc = v;
295 const struct ipmr_mfc_iter *it = seq->private;
296
297 seq_printf(seq,
298 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
299 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
300 mfc->mf6c_parent,
301 mfc->mfc_un.res.pkt,
302 mfc->mfc_un.res.bytes,
303 mfc->mfc_un.res.wrong_if);
304
305 if (it->cache != &mfc_unres_queue) {
306 for (n = mfc->mfc_un.res.minvif;
307 n < mfc->mfc_un.res.maxvif; n++) {
308 if (MIF_EXISTS(n) &&
309 mfc->mfc_un.res.ttls[n] < 255)
310 seq_printf(seq,
311 " %2d:%-3d",
312 n, mfc->mfc_un.res.ttls[n]);
313 }
314 }
315 seq_putc(seq, '\n');
316 }
317 return 0;
318}
319
320static struct seq_operations ipmr_mfc_seq_ops = {
321 .start = ipmr_mfc_seq_start,
322 .next = ipmr_mfc_seq_next,
323 .stop = ipmr_mfc_seq_stop,
324 .show = ipmr_mfc_seq_show,
325};
326
327static int ipmr_mfc_open(struct inode *inode, struct file *file)
328{
329 return seq_open_private(file, &ipmr_mfc_seq_ops,
330 sizeof(struct ipmr_mfc_iter));
331}
332
333static struct file_operations ip6mr_mfc_fops = {
334 .owner = THIS_MODULE,
335 .open = ipmr_mfc_open,
336 .read = seq_read,
337 .llseek = seq_lseek,
338 .release = seq_release,
339};
340#endif
341
342/*
343 * Delete a VIF entry
344 */
345
346static int mif6_delete(int vifi)
347{
348 struct mif_device *v;
349 struct net_device *dev;
350 if (vifi < 0 || vifi >= maxvif)
351 return -EADDRNOTAVAIL;
352
353 v = &vif6_table[vifi];
354
355 write_lock_bh(&mrt_lock);
356 dev = v->dev;
357 v->dev = NULL;
358
359 if (!dev) {
360 write_unlock_bh(&mrt_lock);
361 return -EADDRNOTAVAIL;
362 }
363
364 if (vifi + 1 == maxvif) {
365 int tmp;
366 for (tmp = vifi - 1; tmp >= 0; tmp--) {
367 if (MIF_EXISTS(tmp))
368 break;
369 }
370 maxvif = tmp + 1;
371 }
372
373 write_unlock_bh(&mrt_lock);
374
375 dev_set_allmulti(dev, -1);
376
377 if (v->flags & MIFF_REGISTER)
378 unregister_netdevice(dev);
379
380 dev_put(dev);
381 return 0;
382}
383
384/* Destroy an unresolved cache entry, killing queued skbs
385 and reporting error to netlink readers.
386 */
387
388static void ip6mr_destroy_unres(struct mfc6_cache *c)
389{
390 struct sk_buff *skb;
391
392 atomic_dec(&cache_resolve_queue_len);
393
394 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
395 if (ipv6_hdr(skb)->version == 0) {
396 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
397 nlh->nlmsg_type = NLMSG_ERROR;
398 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
399 skb_trim(skb, nlh->nlmsg_len);
400 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
401 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
402 } else
403 kfree_skb(skb);
404 }
405
406 kmem_cache_free(mrt_cachep, c);
407}
408
409
410/* Single timer process for all the unresolved queue. */
411
412static void ipmr_do_expire_process(unsigned long dummy)
413{
414 unsigned long now = jiffies;
415 unsigned long expires = 10 * HZ;
416 struct mfc6_cache *c, **cp;
417
418 cp = &mfc_unres_queue;
419
420 while ((c = *cp) != NULL) {
421 if (time_after(c->mfc_un.unres.expires, now)) {
422 /* not yet... */
423 unsigned long interval = c->mfc_un.unres.expires - now;
424 if (interval < expires)
425 expires = interval;
426 cp = &c->next;
427 continue;
428 }
429
430 *cp = c->next;
431 ip6mr_destroy_unres(c);
432 }
433
434 if (atomic_read(&cache_resolve_queue_len))
435 mod_timer(&ipmr_expire_timer, jiffies + expires);
436}
437
438static void ipmr_expire_process(unsigned long dummy)
439{
440 if (!spin_trylock(&mfc_unres_lock)) {
441 mod_timer(&ipmr_expire_timer, jiffies + 1);
442 return;
443 }
444
445 if (atomic_read(&cache_resolve_queue_len))
446 ipmr_do_expire_process(dummy);
447
448 spin_unlock(&mfc_unres_lock);
449}
450
451/* Fill oifs list. It is called under write locked mrt_lock. */
452
453static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
454{
455 int vifi;
456
457 cache->mfc_un.res.minvif = MAXVIFS;
458 cache->mfc_un.res.maxvif = 0;
459 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
460
461 for (vifi = 0; vifi < maxvif; vifi++) {
462 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
463 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
464 if (cache->mfc_un.res.minvif > vifi)
465 cache->mfc_un.res.minvif = vifi;
466 if (cache->mfc_un.res.maxvif <= vifi)
467 cache->mfc_un.res.maxvif = vifi + 1;
468 }
469 }
470}
471
472static int mif6_add(struct mif6ctl *vifc, int mrtsock)
473{
474 int vifi = vifc->mif6c_mifi;
475 struct mif_device *v = &vif6_table[vifi];
476 struct net_device *dev;
477
478 /* Is vif busy ? */
479 if (MIF_EXISTS(vifi))
480 return -EADDRINUSE;
481
482 switch (vifc->mif6c_flags) {
483 case 0:
484 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
485 if (!dev)
486 return -EADDRNOTAVAIL;
487 dev_put(dev);
488 break;
489 default:
490 return -EINVAL;
491 }
492
493 dev_set_allmulti(dev, 1);
494
495 /*
496 * Fill in the VIF structures
497 */
498 v->rate_limit = vifc->vifc_rate_limit;
499 v->flags = vifc->mif6c_flags;
500 if (!mrtsock)
501 v->flags |= VIFF_STATIC;
502 v->threshold = vifc->vifc_threshold;
503 v->bytes_in = 0;
504 v->bytes_out = 0;
505 v->pkt_in = 0;
506 v->pkt_out = 0;
507 v->link = dev->ifindex;
508 if (v->flags & MIFF_REGISTER)
509 v->link = dev->iflink;
510
511 /* And finish update writing critical data */
512 write_lock_bh(&mrt_lock);
513 dev_hold(dev);
514 v->dev = dev;
515 if (vifi + 1 > maxvif)
516 maxvif = vifi + 1;
517 write_unlock_bh(&mrt_lock);
518 return 0;
519}
520
521static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
522{
523 int line = MFC6_HASH(mcastgrp, origin);
524 struct mfc6_cache *c;
525
526 for (c = mfc6_cache_array[line]; c; c = c->next) {
527 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
528 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
529 break;
530 }
531 return c;
532}
533
534/*
535 * Allocate a multicast cache entry
536 */
537static struct mfc6_cache *ip6mr_cache_alloc(void)
538{
539 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
540 if (c == NULL)
541 return NULL;
542 memset(c, 0, sizeof(*c));
543 c->mfc_un.res.minvif = MAXVIFS;
544 return c;
545}
546
547static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
548{
549 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
550 if (c == NULL)
551 return NULL;
552 memset(c, 0, sizeof(*c));
553 skb_queue_head_init(&c->mfc_un.unres.unresolved);
554 c->mfc_un.unres.expires = jiffies + 10 * HZ;
555 return c;
556}
557
558/*
559 * A cache entry has gone into a resolved state from queued
560 */
561
562static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
563{
564 struct sk_buff *skb;
565
566 /*
567 * Play the pending entries through our router
568 */
569
570 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
571 if (ipv6_hdr(skb)->version == 0) {
572 int err;
573 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
574
575 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
576 nlh->nlmsg_len = skb->tail - (u8 *)nlh;
577 } else {
578 nlh->nlmsg_type = NLMSG_ERROR;
579 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
580 skb_trim(skb, nlh->nlmsg_len);
581 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
582 }
583 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
584 } else
585 ip6_mr_forward(skb, c);
586 }
587}
588
589/*
590 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
591 * expects the following bizarre scheme.
592 *
593 * Called under mrt_lock.
594 */
595
596static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
597{
598 struct sk_buff *skb;
599 struct mrt6msg *msg;
600 int ret;
601
602 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
603
604 if (!skb)
605 return -ENOBUFS;
606
607 /* I suppose that internal messages
608 * do not require checksums */
609
610 skb->ip_summed = CHECKSUM_UNNECESSARY;
611
612 /*
613 * Copy the IP header
614 */
615
616 skb_put(skb, sizeof(struct ipv6hdr));
617 skb_reset_network_header(skb);
618 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
619
620 /*
621 * Add our header
622 */
623 skb_put(skb, sizeof(*msg));
624 skb_reset_transport_header(skb);
625 msg = (struct mrt6msg *)skb_transport_header(skb);
626
627 msg->im6_mbz = 0;
628 msg->im6_msgtype = assert;
629 msg->im6_mif = vifi;
630 msg->im6_pad = 0;
631 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
632 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
633
634 skb->dst = dst_clone(pkt->dst);
635 skb->ip_summed = CHECKSUM_UNNECESSARY;
636
637 skb_pull(skb, sizeof(struct ipv6hdr));
638
639 if (mroute6_socket == NULL) {
640 kfree_skb(skb);
641 return -EINVAL;
642 }
643
644 /*
645 * Deliver to user space multicast routing algorithms
646 */
647 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
648 if (net_ratelimit())
649 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
650 kfree_skb(skb);
651 }
652
653 return ret;
654}
655
656/*
657 * Queue a packet for resolution. It gets locked cache entry!
658 */
659
660static int
661ip6mr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
662{
663 int err;
664 struct mfc6_cache *c;
665
666 spin_lock_bh(&mfc_unres_lock);
667 for (c = mfc_unres_queue; c; c = c->next) {
668 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
669 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
670 break;
671 }
672
673 if (c == NULL) {
674 /*
675 * Create a new entry if allowable
676 */
677
678 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
679 (c = ip6mr_cache_alloc_unres()) == NULL) {
680 spin_unlock_bh(&mfc_unres_lock);
681
682 kfree_skb(skb);
683 return -ENOBUFS;
684 }
685
686 /*
687 * Fill in the new cache entry
688 */
689 c->mf6c_parent = -1;
690 c->mf6c_origin = ipv6_hdr(skb)->saddr;
691 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
692
693 /*
694 * Reflect first query at pim6sd
695 */
696 if ((err = ip6mr_cache_report(skb, vifi, MRT6MSG_NOCACHE)) < 0) {
697 /* If the report failed throw the cache entry
698 out - Brad Parker
699 */
700 spin_unlock_bh(&mfc_unres_lock);
701
702 kmem_cache_free(mrt_cachep, c);
703 kfree_skb(skb);
704 return err;
705 }
706
707 atomic_inc(&cache_resolve_queue_len);
708 c->next = mfc_unres_queue;
709 mfc_unres_queue = c;
710
711 ipmr_do_expire_process(1);
712 }
713
714 /*
715 * See if we can append the packet
716 */
717 if (c->mfc_un.unres.unresolved.qlen > 3) {
718 kfree_skb(skb);
719 err = -ENOBUFS;
720 } else {
721 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
722 err = 0;
723 }
724
725 spin_unlock_bh(&mfc_unres_lock);
726 return err;
727}
728
729/*
730 * MFC6 cache manipulation by user space
731 */
732
733static int ip6mr_mfc_delete(struct mf6cctl *mfc)
734{
735 int line;
736 struct mfc6_cache *c, **cp;
737
738 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
739
740 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
741 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
742 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
743 write_lock_bh(&mrt_lock);
744 *cp = c->next;
745 write_unlock_bh(&mrt_lock);
746
747 kmem_cache_free(mrt_cachep, c);
748 return 0;
749 }
750 }
751 return -ENOENT;
752}
753
754static int ip6mr_device_event(struct notifier_block *this,
755 unsigned long event, void *ptr)
756{
757 struct net_device *dev = ptr;
758 struct mif_device *v;
759 int ct;
760
761 if (dev_net(dev) != &init_net)
762 return NOTIFY_DONE;
763
764 if (event != NETDEV_UNREGISTER)
765 return NOTIFY_DONE;
766
767 v = &vif6_table[0];
768 for (ct = 0; ct < maxvif; ct++, v++) {
769 if (v->dev == dev)
770 mif6_delete(ct);
771 }
772 return NOTIFY_DONE;
773}
774
775static struct notifier_block ip6_mr_notifier = {
776 .notifier_call = ip6mr_device_event
777};
778
779/*
780 * Setup for IP multicast routing
781 */
782
783void __init ip6_mr_init(void)
784{
785 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
786 sizeof(struct mfc6_cache),
787 0, SLAB_HWCACHE_ALIGN,
788 NULL);
789 if (!mrt_cachep)
790 panic("cannot allocate ip6_mrt_cache");
791
792 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
793 register_netdevice_notifier(&ip6_mr_notifier);
794#ifdef CONFIG_PROC_FS
795 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops);
796 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops);
797#endif
798}
799
800
801static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
802{
803 int line;
804 struct mfc6_cache *uc, *c, **cp;
805 unsigned char ttls[MAXVIFS];
806 int i;
807
808 memset(ttls, 255, MAXVIFS);
809 for (i = 0; i < MAXVIFS; i++) {
810 if (IF_ISSET(i, &mfc->mf6cc_ifset))
811 ttls[i] = 1;
812
813 }
814
815 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
816
817 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
818 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
819 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
820 break;
821 }
822
823 if (c != NULL) {
824 write_lock_bh(&mrt_lock);
825 c->mf6c_parent = mfc->mf6cc_parent;
826 ip6mr_update_thresholds(c, ttls);
827 if (!mrtsock)
828 c->mfc_flags |= MFC_STATIC;
829 write_unlock_bh(&mrt_lock);
830 return 0;
831 }
832
833 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
834 return -EINVAL;
835
836 c = ip6mr_cache_alloc();
837 if (c == NULL)
838 return -ENOMEM;
839
840 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
841 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
842 c->mf6c_parent = mfc->mf6cc_parent;
843 ip6mr_update_thresholds(c, ttls);
844 if (!mrtsock)
845 c->mfc_flags |= MFC_STATIC;
846
847 write_lock_bh(&mrt_lock);
848 c->next = mfc6_cache_array[line];
849 mfc6_cache_array[line] = c;
850 write_unlock_bh(&mrt_lock);
851
852 /*
853 * Check to see if we resolved a queued list. If so we
854 * need to send on the frames and tidy up.
855 */
856 spin_lock_bh(&mfc_unres_lock);
857 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
858 cp = &uc->next) {
859 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
860 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
861 *cp = uc->next;
862 if (atomic_dec_and_test(&cache_resolve_queue_len))
863 del_timer(&ipmr_expire_timer);
864 break;
865 }
866 }
867 spin_unlock_bh(&mfc_unres_lock);
868
869 if (uc) {
870 ip6mr_cache_resolve(uc, c);
871 kmem_cache_free(mrt_cachep, uc);
872 }
873 return 0;
874}
875
876/*
877 * Close the multicast socket, and clear the vif tables etc
878 */
879
880static void mroute_clean_tables(struct sock *sk)
881{
882 int i;
883
884 /*
885 * Shut down all active vif entries
886 */
887 for (i = 0; i < maxvif; i++) {
888 if (!(vif6_table[i].flags & VIFF_STATIC))
889 mif6_delete(i);
890 }
891
892 /*
893 * Wipe the cache
894 */
895 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
896 struct mfc6_cache *c, **cp;
897
898 cp = &mfc6_cache_array[i];
899 while ((c = *cp) != NULL) {
900 if (c->mfc_flags & MFC_STATIC) {
901 cp = &c->next;
902 continue;
903 }
904 write_lock_bh(&mrt_lock);
905 *cp = c->next;
906 write_unlock_bh(&mrt_lock);
907
908 kmem_cache_free(mrt_cachep, c);
909 }
910 }
911
912 if (atomic_read(&cache_resolve_queue_len) != 0) {
913 struct mfc6_cache *c;
914
915 spin_lock_bh(&mfc_unres_lock);
916 while (mfc_unres_queue != NULL) {
917 c = mfc_unres_queue;
918 mfc_unres_queue = c->next;
919 spin_unlock_bh(&mfc_unres_lock);
920
921 ip6mr_destroy_unres(c);
922
923 spin_lock_bh(&mfc_unres_lock);
924 }
925 spin_unlock_bh(&mfc_unres_lock);
926 }
927}
928
929static int ip6mr_sk_init(struct sock *sk)
930{
931 int err = 0;
932
933 rtnl_lock();
934 write_lock_bh(&mrt_lock);
935 if (likely(mroute6_socket == NULL))
936 mroute6_socket = sk;
937 else
938 err = -EADDRINUSE;
939 write_unlock_bh(&mrt_lock);
940
941 rtnl_unlock();
942
943 return err;
944}
945
946int ip6mr_sk_done(struct sock *sk)
947{
948 int err = 0;
949
950 rtnl_lock();
951 if (sk == mroute6_socket) {
952 write_lock_bh(&mrt_lock);
953 mroute6_socket = NULL;
954 write_unlock_bh(&mrt_lock);
955
956 mroute_clean_tables(sk);
957 } else
958 err = -EACCES;
959 rtnl_unlock();
960
961 return err;
962}
963
964/*
965 * Socket options and virtual interface manipulation. The whole
966 * virtual interface system is a complete heap, but unfortunately
967 * that's how BSD mrouted happens to think. Maybe one day with a proper
968 * MOSPF/PIM router set up we can clean this up.
969 */
970
971int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
972{
973 int ret;
974 struct mif6ctl vif;
975 struct mf6cctl mfc;
976 mifi_t mifi;
977
978 if (optname != MRT6_INIT) {
979 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
980 return -EACCES;
981 }
982
983 switch (optname) {
984 case MRT6_INIT:
985 if (sk->sk_type != SOCK_RAW ||
986 inet_sk(sk)->num != IPPROTO_ICMPV6)
987 return -EOPNOTSUPP;
988 if (optlen < sizeof(int))
989 return -EINVAL;
990
991 return ip6mr_sk_init(sk);
992
993 case MRT6_DONE:
994 return ip6mr_sk_done(sk);
995
996 case MRT6_ADD_MIF:
997 if (optlen < sizeof(vif))
998 return -EINVAL;
999 if (copy_from_user(&vif, optval, sizeof(vif)))
1000 return -EFAULT;
1001 if (vif.mif6c_mifi >= MAXVIFS)
1002 return -ENFILE;
1003 rtnl_lock();
1004 ret = mif6_add(&vif, sk == mroute6_socket);
1005 rtnl_unlock();
1006 return ret;
1007
1008 case MRT6_DEL_MIF:
1009 if (optlen < sizeof(mifi_t))
1010 return -EINVAL;
1011 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1012 return -EFAULT;
1013 rtnl_lock();
1014 ret = mif6_delete(mifi);
1015 rtnl_unlock();
1016 return ret;
1017
1018 /*
1019 * Manipulate the forwarding caches. These live
1020 * in a sort of kernel/user symbiosis.
1021 */
1022 case MRT6_ADD_MFC:
1023 case MRT6_DEL_MFC:
1024 if (optlen < sizeof(mfc))
1025 return -EINVAL;
1026 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1027 return -EFAULT;
1028 rtnl_lock();
1029 if (optname == MRT6_DEL_MFC)
1030 ret = ip6mr_mfc_delete(&mfc);
1031 else
1032 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1033 rtnl_unlock();
1034 return ret;
1035
1036 /*
1037 * Spurious command, or MRT_VERSION which you cannot
1038 * set.
1039 */
1040 default:
1041 return -ENOPROTOOPT;
1042 }
1043}
1044
1045/*
1046 * Getsock opt support for the multicast routing system.
1047 */
1048
1049int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1050 int __user *optlen)
1051{
1052 int olr;
1053 int val;
1054
1055 switch (optname) {
1056 case MRT6_VERSION:
1057 val = 0x0305;
1058 break;
1059 default:
1060 return -ENOPROTOOPT;
1061 }
1062
1063 if (get_user(olr, optlen))
1064 return -EFAULT;
1065
1066 olr = min_t(int, olr, sizeof(int));
1067 if (olr < 0)
1068 return -EINVAL;
1069
1070 if (put_user(olr, optlen))
1071 return -EFAULT;
1072 if (copy_to_user(optval, &val, olr))
1073 return -EFAULT;
1074 return 0;
1075}
1076
1077/*
1078 * The IP multicast ioctl support routines.
1079 */
1080
1081int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1082{
1083 struct sioc_sg_req6 sr;
1084 struct sioc_mif_req6 vr;
1085 struct mif_device *vif;
1086 struct mfc6_cache *c;
1087
1088 switch (cmd) {
1089 case SIOCGETMIFCNT_IN6:
1090 if (copy_from_user(&vr, arg, sizeof(vr)))
1091 return -EFAULT;
1092 if (vr.mifi >= maxvif)
1093 return -EINVAL;
1094 read_lock(&mrt_lock);
1095 vif = &vif6_table[vr.mifi];
1096 if (MIF_EXISTS(vr.mifi)) {
1097 vr.icount = vif->pkt_in;
1098 vr.ocount = vif->pkt_out;
1099 vr.ibytes = vif->bytes_in;
1100 vr.obytes = vif->bytes_out;
1101 read_unlock(&mrt_lock);
1102
1103 if (copy_to_user(arg, &vr, sizeof(vr)))
1104 return -EFAULT;
1105 return 0;
1106 }
1107 read_unlock(&mrt_lock);
1108 return -EADDRNOTAVAIL;
1109 case SIOCGETSGCNT_IN6:
1110 if (copy_from_user(&sr, arg, sizeof(sr)))
1111 return -EFAULT;
1112
1113 read_lock(&mrt_lock);
1114 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1115 if (c) {
1116 sr.pktcnt = c->mfc_un.res.pkt;
1117 sr.bytecnt = c->mfc_un.res.bytes;
1118 sr.wrong_if = c->mfc_un.res.wrong_if;
1119 read_unlock(&mrt_lock);
1120
1121 if (copy_to_user(arg, &sr, sizeof(sr)))
1122 return -EFAULT;
1123 return 0;
1124 }
1125 read_unlock(&mrt_lock);
1126 return -EADDRNOTAVAIL;
1127 default:
1128 return -ENOIOCTLCMD;
1129 }
1130}
1131
1132
1133static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1134{
1135 /* XXX stats */
1136 return dst_output(skb);
1137}
1138
1139/*
1140 * Processing handlers for ip6mr_forward
1141 */
1142
1143static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1144{
1145 struct ipv6hdr *ipv6h;
1146 struct mif_device *vif = &vif6_table[vifi];
1147 struct net_device *dev;
1148 struct dst_entry *dst;
1149 struct flowi fl;
1150
1151 if (vif->dev == NULL)
1152 goto out_free;
1153
1154 ipv6h = ipv6_hdr(skb);
1155
1156 fl = (struct flowi) {
1157 .oif = vif->link,
1158 .nl_u = { .ip6_u =
1159 { .daddr = ipv6h->daddr, }
1160 }
1161 };
1162
1163 dst = ip6_route_output(&init_net, NULL, &fl);
1164 if (!dst)
1165 goto out_free;
1166
1167 dst_release(skb->dst);
1168 skb->dst = dst;
1169
1170 /*
1171 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1172 * not only before forwarding, but after forwarding on all output
1173 * interfaces. It is clear, if mrouter runs a multicasting
1174 * program, it should receive packets not depending to what interface
1175 * program is joined.
1176 * If we will not make it, the program will have to join on all
1177 * interfaces. On the other hand, multihoming host (or router, but
1178 * not mrouter) cannot join to more than one interface - it will
1179 * result in receiving multiple packets.
1180 */
1181 dev = vif->dev;
1182 skb->dev = dev;
1183 vif->pkt_out++;
1184 vif->bytes_out += skb->len;
1185
1186 /* We are about to write */
1187 /* XXX: extension headers? */
1188 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1189 goto out_free;
1190
1191 ipv6h = ipv6_hdr(skb);
1192 ipv6h->hop_limit--;
1193
1194 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1195
1196 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1197 ip6mr_forward2_finish);
1198
1199out_free:
1200 kfree_skb(skb);
1201 return 0;
1202}
1203
1204static int ip6mr_find_vif(struct net_device *dev)
1205{
1206 int ct;
1207 for (ct = maxvif - 1; ct >= 0; ct--) {
1208 if (vif6_table[ct].dev == dev)
1209 break;
1210 }
1211 return ct;
1212}
1213
1214static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1215{
1216 int psend = -1;
1217 int vif, ct;
1218
1219 vif = cache->mf6c_parent;
1220 cache->mfc_un.res.pkt++;
1221 cache->mfc_un.res.bytes += skb->len;
1222
1223 vif6_table[vif].pkt_in++;
1224 vif6_table[vif].bytes_in += skb->len;
1225
1226 /*
1227 * Forward the frame
1228 */
1229 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1230 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1231 if (psend != -1) {
1232 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1233 if (skb2)
1234 ip6mr_forward2(skb2, cache, psend);
1235 }
1236 psend = ct;
1237 }
1238 }
1239 if (psend != -1) {
1240 ip6mr_forward2(skb, cache, psend);
1241 return 0;
1242 }
1243
1244 kfree_skb(skb);
1245 return 0;
1246}
1247
1248
1249/*
1250 * Multicast packets for forwarding arrive here
1251 */
1252
1253int ip6_mr_input(struct sk_buff *skb)
1254{
1255 struct mfc6_cache *cache;
1256
1257 read_lock(&mrt_lock);
1258 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1259
1260 /*
1261 * No usable cache entry
1262 */
1263 if (cache == NULL) {
1264 int vif;
1265
1266 vif = ip6mr_find_vif(skb->dev);
1267 if (vif >= 0) {
1268 int err = ip6mr_cache_unresolved(vif, skb);
1269 read_unlock(&mrt_lock);
1270
1271 return err;
1272 }
1273 read_unlock(&mrt_lock);
1274 kfree_skb(skb);
1275 return -ENODEV;
1276 }
1277
1278 ip6_mr_forward(skb, cache);
1279
1280 read_unlock(&mrt_lock);
1281
1282 return 0;
1283}
1284
1285
1286static int
1287ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1288{
1289 int ct;
1290 struct rtnexthop *nhp;
1291 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1292 u8 *b = skb->tail;
1293 struct rtattr *mp_head;
1294
1295 if (dev)
1296 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1297
1298 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1299
1300 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1301 if (c->mfc_un.res.ttls[ct] < 255) {
1302 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1303 goto rtattr_failure;
1304 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1305 nhp->rtnh_flags = 0;
1306 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1307 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1308 nhp->rtnh_len = sizeof(*nhp);
1309 }
1310 }
1311 mp_head->rta_type = RTA_MULTIPATH;
1312 mp_head->rta_len = skb->tail - (u8 *)mp_head;
1313 rtm->rtm_type = RTN_MULTICAST;
1314 return 1;
1315
1316rtattr_failure:
1317 nlmsg_trim(skb, b);
1318 return -EMSGSIZE;
1319}
1320
1321int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1322{
1323 int err;
1324 struct mfc6_cache *cache;
1325 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1326
1327 read_lock(&mrt_lock);
1328 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1329
1330 if (!cache) {
1331 struct sk_buff *skb2;
1332 struct ipv6hdr *iph;
1333 struct net_device *dev;
1334 int vif;
1335
1336 if (nowait) {
1337 read_unlock(&mrt_lock);
1338 return -EAGAIN;
1339 }
1340
1341 dev = skb->dev;
1342 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1343 read_unlock(&mrt_lock);
1344 return -ENODEV;
1345 }
1346
1347 /* really correct? */
1348 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1349 if (!skb2) {
1350 read_unlock(&mrt_lock);
1351 return -ENOMEM;
1352 }
1353
1354 skb_reset_transport_header(skb2);
1355
1356 skb_put(skb2, sizeof(struct ipv6hdr));
1357 skb_reset_network_header(skb2);
1358
1359 iph = ipv6_hdr(skb2);
1360 iph->version = 0;
1361 iph->priority = 0;
1362 iph->flow_lbl[0] = 0;
1363 iph->flow_lbl[1] = 0;
1364 iph->flow_lbl[2] = 0;
1365 iph->payload_len = 0;
1366 iph->nexthdr = IPPROTO_NONE;
1367 iph->hop_limit = 0;
1368 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1369 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1370
1371 err = ip6mr_cache_unresolved(vif, skb2);
1372 read_unlock(&mrt_lock);
1373
1374 return err;
1375 }
1376
1377 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1378 cache->mfc_flags |= MFC_NOTIFY;
1379
1380 err = ip6mr_fill_mroute(skb, cache, rtm);
1381 read_unlock(&mrt_lock);
1382 return err;
1383}
1384
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4195ac92345e..99624109c010 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -33,6 +33,7 @@
33#include <linux/sockios.h> 33#include <linux/sockios.h>
34#include <linux/net.h> 34#include <linux/net.h>
35#include <linux/in6.h> 35#include <linux/in6.h>
36#include <linux/mroute6.h>
36#include <linux/netdevice.h> 37#include <linux/netdevice.h>
37#include <linux/if_arp.h> 38#include <linux/if_arp.h>
38#include <linux/init.h> 39#include <linux/init.h>
@@ -118,6 +119,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
118 119
119 valbool = (val!=0); 120 valbool = (val!=0);
120 121
122 if (ip6_mroute_opt(optname))
123 return ip6_mroute_setsockopt(sk, optname, optval, optlen);
124
121 lock_sock(sk); 125 lock_sock(sk);
122 126
123 switch (optname) { 127 switch (optname) {
@@ -790,6 +794,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
790 int len; 794 int len;
791 int val; 795 int val;
792 796
797 if (ip6_mroute_opt(optname))
798 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
799
793 if (get_user(len, optlen)) 800 if (get_user(len, optlen))
794 return -EFAULT; 801 return -EFAULT;
795 switch (optname) { 802 switch (optname) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index aae6cedf1709..088b80b4ce74 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -53,6 +53,7 @@
53#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 53#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
54#include <net/mip6.h> 54#include <net/mip6.h>
55#endif 55#endif
56#include <linux/mroute6.h>
56 57
57#include <net/raw.h> 58#include <net/raw.h>
58#include <net/rawv6.h> 59#include <net/rawv6.h>
@@ -1135,7 +1136,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1135 } 1136 }
1136 1137
1137 default: 1138 default:
1139#ifdef CONFIG_IPV6_MROUTE
1140 return ip6mr_ioctl(sk, cmd, (void __user *)arg);
1141#else
1138 return -ENOIOCTLCMD; 1142 return -ENOIOCTLCMD;
1143#endif
1139 } 1144 }
1140} 1145}
1141 1146
@@ -1143,7 +1148,7 @@ static void rawv6_close(struct sock *sk, long timeout)
1143{ 1148{
1144 if (inet_sk(sk)->num == IPPROTO_RAW) 1149 if (inet_sk(sk)->num == IPPROTO_RAW)
1145 ip6_ra_control(sk, -1, NULL); 1150 ip6_ra_control(sk, -1, NULL);
1146 1151 ip6mr_sk_done(sk);
1147 sk_common_release(sk); 1152 sk_common_release(sk);
1148} 1153}
1149 1154
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index cd82b6db35ff..3c314d5f46c6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -36,6 +36,7 @@
36#include <linux/route.h> 36#include <linux/route.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/in6.h> 38#include <linux/in6.h>
39#include <linux/mroute6.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/if_arp.h> 41#include <linux/if_arp.h>
41#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
@@ -2106,7 +2107,7 @@ static inline size_t rt6_nlmsg_size(void)
2106static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, 2107static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2107 struct in6_addr *dst, struct in6_addr *src, 2108 struct in6_addr *dst, struct in6_addr *src,
2108 int iif, int type, u32 pid, u32 seq, 2109 int iif, int type, u32 pid, u32 seq,
2109 int prefix, unsigned int flags) 2110 int prefix, int nowait, unsigned int flags)
2110{ 2111{
2111 struct rtmsg *rtm; 2112 struct rtmsg *rtm;
2112 struct nlmsghdr *nlh; 2113 struct nlmsghdr *nlh;
@@ -2166,9 +2167,24 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2166 } else if (rtm->rtm_src_len) 2167 } else if (rtm->rtm_src_len)
2167 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); 2168 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2168#endif 2169#endif
2169 if (iif) 2170 if (iif) {
2170 NLA_PUT_U32(skb, RTA_IIF, iif); 2171#ifdef CONFIG_IPV6_MROUTE
2171 else if (dst) { 2172 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2173 int err = ip6mr_get_route(skb, rtm, nowait);
2174 if (err <= 0) {
2175 if (!nowait) {
2176 if (err == 0)
2177 return 0;
2178 goto nla_put_failure;
2179 } else {
2180 if (err == -EMSGSIZE)
2181 goto nla_put_failure;
2182 }
2183 }
2184 } else
2185#endif
2186 NLA_PUT_U32(skb, RTA_IIF, iif);
2187 } else if (dst) {
2172 struct in6_addr saddr_buf; 2188 struct in6_addr saddr_buf;
2173 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, 2189 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
2174 dst, 0, &saddr_buf) == 0) 2190 dst, 0, &saddr_buf) == 0)
@@ -2211,7 +2227,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2211 2227
2212 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 2228 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2213 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, 2229 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2214 prefix, NLM_F_MULTI); 2230 prefix, 0, NLM_F_MULTI);
2215} 2231}
2216 2232
2217static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2233static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
@@ -2277,7 +2293,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2277 2293
2278 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2294 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2279 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2295 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2280 nlh->nlmsg_seq, 0, 0); 2296 nlh->nlmsg_seq, 0, 0, 0);
2281 if (err < 0) { 2297 if (err < 0) {
2282 kfree_skb(skb); 2298 kfree_skb(skb);
2283 goto errout; 2299 goto errout;
@@ -2303,7 +2319,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2303 goto errout; 2319 goto errout;
2304 2320
2305 err = rt6_fill_node(skb, rt, NULL, NULL, 0, 2321 err = rt6_fill_node(skb, rt, NULL, NULL, 0,
2306 event, info->pid, seq, 0, 0); 2322 event, info->pid, seq, 0, 0, 0);
2307 if (err < 0) { 2323 if (err < 0) {
2308 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 2324 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2309 WARN_ON(err == -EMSGSIZE); 2325 WARN_ON(err == -EMSGSIZE);