aboutsummaryrefslogtreecommitdiffstats
path: root/net/802
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 21:58:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 21:58:50 -0500
commita0b1c42951dd06ec83cc1bc2c9788131d9fefcd8 (patch)
treea572f1523cf904c93020c9cdb32f3bc84ec3ac16 /net/802
parent8ec4942212a6d337982967778a3dc3b60aea782e (diff)
parentecd9883724b78cc72ed92c98bcb1a46c764fff21 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking update from David Miller: 1) Checkpoint/restarted TCP sockets now can properly propagate the TCP timestamp offset. From Andrey Vagin. 2) VMWARE VM VSOCK layer, from Andy King. 3) Much improved support for virtual functions and SR-IOV in bnx2x, from Ariel ELior. 4) All protocols on ipv4 and ipv6 are now network namespace aware, and all the compatability checks for initial-namespace-only protocols is removed. Thanks to Tom Parkin for helping deal with the last major holdout, L2TP. 5) IPV6 support in netpoll and network namespace support in pktgen, from Cong Wang. 6) Multiple Registration Protocol (MRP) and Multiple VLAN Registration Protocol (MVRP) support, from David Ward. 7) Compute packet lengths more accurately in the packet scheduler, from Eric Dumazet. 8) Use per-task page fragment allocator in skb_append_datato_frags(), also from Eric Dumazet. 9) Add support for connection tracking labels in netfilter, from Florian Westphal. 10) Fix default multicast group joining on ipv6, and add anti-spoofing checks to 6to4 and 6rd. From Hannes Frederic Sowa. 11) Make ipv4/ipv6 fragmentation memory limits more reasonable in modern times, rearrange inet frag datastructures for better cacheline locality, and move more operations outside of locking. From Jesper Dangaard Brouer. 12) Instead of strict master <--> slave relationships, allow arbitrary scenerios with "upper device lists". From Jiri Pirko. 13) Improve rate limiting accuracy in TBF and act_police, also from Jiri Pirko. 14) Add a BPF filter netfilter match target, from Willem de Bruijn. 15) Orphan and delete a bunch of pre-historic networking drivers from Paul Gortmaker. 16) Add TSO support for GRE tunnels, from Pravin B SHelar. Although this still needs some minor bug fixing before it's %100 correct in all cases. 17) Handle unresolved IPSEC states like ARP, with a resolution packet queue. From Steffen Klassert. 18) Remove TCP Appropriate Byte Count support (ABC), from Stephen Hemminger. This was long overdue. 19) Support SO_REUSEPORT, from Tom Herbert. 20) Allow locking a socket BPF filter, so that it cannot change after a process drops capabilities. 21) Add VLAN filtering to bridge, from Vlad Yasevich. 22) Bring ipv6 on-par with ipv4 and do not cache neighbour entries in the ipv6 routes, from YOSHIFUJI Hideaki. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1538 commits) ipv6: fix race condition regarding dst->expires and dst->from. net: fix a wrong assignment in skb_split() ip_gre: remove an extra dst_release() ppp: set qdisc_tx_busylock to avoid LOCKDEP splat atl1c: restore buffer state net: fix a build failure when !CONFIG_PROC_FS net: ipv4: fix waring -Wunused-variable net: proc: fix build failed when procfs is not configured Revert "xen: netback: remove redundant xenvif_put" net: move procfs code to net/core/net-procfs.c qmi_wwan, cdc-ether: add ADU960S bonding: set sysfs device_type to 'bond' bonding: fix bond_release_all inconsistencies b44: use netdev_alloc_skb_ip_align() xen: netback: remove redundant xenvif_put net: fec: Do a sanity check on the gpio number ip_gre: propogate target device GSO capability to the tunnel device ip_gre: allow CSUM capable devices to handle packets bonding: Fix initialize after use for 3ad machine state spinlock bonding: Fix race condition between bond_enslave() and bond_3ad_update_lacp_rate() ...
Diffstat (limited to 'net/802')
-rw-r--r--net/802/Kconfig3
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/mrp.c895
3 files changed, 899 insertions, 0 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
index be33d27c8e69..80d4bf78905d 100644
--- a/net/802/Kconfig
+++ b/net/802/Kconfig
@@ -5,3 +5,6 @@ config STP
5config GARP 5config GARP
6 tristate 6 tristate
7 select STP 7 select STP
8
9config MRP
10 tristate
diff --git a/net/802/Makefile b/net/802/Makefile
index a30d6e385aed..37e654d6615e 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
11obj-$(CONFIG_ATALK) += p8022.o psnap.o 11obj-$(CONFIG_ATALK) += p8022.o psnap.o
12obj-$(CONFIG_STP) += stp.o 12obj-$(CONFIG_STP) += stp.o
13obj-$(CONFIG_GARP) += garp.o 13obj-$(CONFIG_GARP) += garp.o
14obj-$(CONFIG_MRP) += mrp.o
diff --git a/net/802/mrp.c b/net/802/mrp.c
new file mode 100644
index 000000000000..a4cc3229952a
--- /dev/null
+++ b/net/802/mrp.c
@@ -0,0 +1,895 @@
1/*
2 * IEEE 802.1Q Multiple Registration Protocol (MRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/802/garp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rtnetlink.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/mrp.h>
22#include <asm/unaligned.h>
23
24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27MODULE_LICENSE("GPL");
28
29static const u8
30mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
31 [MRP_APPLICANT_VO] = {
32 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
33 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
34 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
35 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
36 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
37 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
38 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
40 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
43 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
44 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
45 },
46 [MRP_APPLICANT_VP] = {
47 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
48 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
49 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
50 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
51 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
52 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
53 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
54 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
55 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
56 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
58 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
59 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
60 },
61 [MRP_APPLICANT_VN] = {
62 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
63 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
64 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
65 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
66 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
67 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
68 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
69 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
73 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
74 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
75 },
76 [MRP_APPLICANT_AN] = {
77 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
78 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
79 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
80 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
81 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
82 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
83 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
84 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
87 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
88 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
89 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
90 },
91 [MRP_APPLICANT_AA] = {
92 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
93 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
94 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
95 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
96 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
97 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
98 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
99 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
100 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
101 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
102 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
103 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
104 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
105 },
106 [MRP_APPLICANT_QA] = {
107 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
108 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
109 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
110 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
111 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
112 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
113 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
115 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
116 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
117 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
118 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
119 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
120 },
121 [MRP_APPLICANT_LA] = {
122 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
123 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
124 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
125 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
126 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
127 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
128 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
129 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
133 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
134 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
135 },
136 [MRP_APPLICANT_AO] = {
137 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
138 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
139 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
140 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
141 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
142 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
143 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
145 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
147 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
148 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
149 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
150 },
151 [MRP_APPLICANT_QO] = {
152 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
153 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
154 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
155 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
156 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
157 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
158 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
160 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
161 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
162 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
163 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
164 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
165 },
166 [MRP_APPLICANT_AP] = {
167 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
168 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
169 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
170 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
171 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
172 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
173 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
174 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
175 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
176 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
177 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
178 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
179 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
180 },
181 [MRP_APPLICANT_QP] = {
182 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
183 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
184 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
185 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
186 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
187 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
188 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
190 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
191 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
192 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
193 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
194 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
195 },
196};
197
198static const u8
199mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
200 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
201 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
202 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
203 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
204 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
205 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
206 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
207 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
208 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
209 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
210 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
211};
212
213static void mrp_attrvalue_inc(void *value, u8 len)
214{
215 u8 *v = (u8 *)value;
216
217 /* Add 1 to the last byte. If it becomes zero,
218 * go to the previous byte and repeat.
219 */
220 while (len > 0 && !++v[--len])
221 ;
222}
223
224static int mrp_attr_cmp(const struct mrp_attr *attr,
225 const void *value, u8 len, u8 type)
226{
227 if (attr->type != type)
228 return attr->type - type;
229 if (attr->len != len)
230 return attr->len - len;
231 return memcmp(attr->value, value, len);
232}
233
234static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
235 const void *value, u8 len, u8 type)
236{
237 struct rb_node *parent = app->mad.rb_node;
238 struct mrp_attr *attr;
239 int d;
240
241 while (parent) {
242 attr = rb_entry(parent, struct mrp_attr, node);
243 d = mrp_attr_cmp(attr, value, len, type);
244 if (d > 0)
245 parent = parent->rb_left;
246 else if (d < 0)
247 parent = parent->rb_right;
248 else
249 return attr;
250 }
251 return NULL;
252}
253
254static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
255 const void *value, u8 len, u8 type)
256{
257 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
258 struct mrp_attr *attr;
259 int d;
260
261 while (*p) {
262 parent = *p;
263 attr = rb_entry(parent, struct mrp_attr, node);
264 d = mrp_attr_cmp(attr, value, len, type);
265 if (d > 0)
266 p = &parent->rb_left;
267 else if (d < 0)
268 p = &parent->rb_right;
269 else {
270 /* The attribute already exists; re-use it. */
271 return attr;
272 }
273 }
274 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
275 if (!attr)
276 return attr;
277 attr->state = MRP_APPLICANT_VO;
278 attr->type = type;
279 attr->len = len;
280 memcpy(attr->value, value, len);
281
282 rb_link_node(&attr->node, parent, p);
283 rb_insert_color(&attr->node, &app->mad);
284 return attr;
285}
286
287static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
288{
289 rb_erase(&attr->node, &app->mad);
290 kfree(attr);
291}
292
293static int mrp_pdu_init(struct mrp_applicant *app)
294{
295 struct sk_buff *skb;
296 struct mrp_pdu_hdr *ph;
297
298 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
299 GFP_ATOMIC);
300 if (!skb)
301 return -ENOMEM;
302
303 skb->dev = app->dev;
304 skb->protocol = app->app->pkttype.type;
305 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
306 skb_reset_network_header(skb);
307 skb_reset_transport_header(skb);
308
309 ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
310 ph->version = app->app->version;
311
312 app->pdu = skb;
313 return 0;
314}
315
316static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
317{
318 __be16 *endmark;
319
320 if (skb_tailroom(app->pdu) < sizeof(*endmark))
321 return -1;
322 endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
323 put_unaligned(MRP_END_MARK, endmark);
324 return 0;
325}
326
327static void mrp_pdu_queue(struct mrp_applicant *app)
328{
329 if (!app->pdu)
330 return;
331
332 if (mrp_cb(app->pdu)->mh)
333 mrp_pdu_append_end_mark(app);
334 mrp_pdu_append_end_mark(app);
335
336 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
337 app->app->group_address, app->dev->dev_addr,
338 app->pdu->len);
339
340 skb_queue_tail(&app->queue, app->pdu);
341 app->pdu = NULL;
342}
343
344static void mrp_queue_xmit(struct mrp_applicant *app)
345{
346 struct sk_buff *skb;
347
348 while ((skb = skb_dequeue(&app->queue)))
349 dev_queue_xmit(skb);
350}
351
352static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
353 u8 attrtype, u8 attrlen)
354{
355 struct mrp_msg_hdr *mh;
356
357 if (mrp_cb(app->pdu)->mh) {
358 if (mrp_pdu_append_end_mark(app) < 0)
359 return -1;
360 mrp_cb(app->pdu)->mh = NULL;
361 mrp_cb(app->pdu)->vah = NULL;
362 }
363
364 if (skb_tailroom(app->pdu) < sizeof(*mh))
365 return -1;
366 mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
367 mh->attrtype = attrtype;
368 mh->attrlen = attrlen;
369 mrp_cb(app->pdu)->mh = mh;
370 return 0;
371}
372
373static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
374 const void *firstattrvalue, u8 attrlen)
375{
376 struct mrp_vecattr_hdr *vah;
377
378 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
379 return -1;
380 vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
381 sizeof(*vah) + attrlen);
382 put_unaligned(0, &vah->lenflags);
383 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
384 mrp_cb(app->pdu)->vah = vah;
385 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
386 return 0;
387}
388
389static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
390 const struct mrp_attr *attr,
391 enum mrp_vecattr_event vaevent)
392{
393 u16 len, pos;
394 u8 *vaevents;
395 int err;
396again:
397 if (!app->pdu) {
398 err = mrp_pdu_init(app);
399 if (err < 0)
400 return err;
401 }
402
403 /* If there is no Message header in the PDU, or the Message header is
404 * for a different attribute type, add an EndMark (if necessary) and a
405 * new Message header to the PDU.
406 */
407 if (!mrp_cb(app->pdu)->mh ||
408 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
409 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
410 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
411 goto queue;
412 }
413
414 /* If there is no VectorAttribute header for this Message in the PDU,
415 * or this attribute's value does not sequentially follow the previous
416 * attribute's value, add a new VectorAttribute header to the PDU.
417 */
418 if (!mrp_cb(app->pdu)->vah ||
419 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
420 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
421 goto queue;
422 }
423
424 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
425 pos = len % 3;
426
427 /* Events are packed into Vectors in the PDU, three to a byte. Add a
428 * byte to the end of the Vector if necessary.
429 */
430 if (!pos) {
431 if (skb_tailroom(app->pdu) < sizeof(u8))
432 goto queue;
433 vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
434 } else {
435 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
436 }
437
438 switch (pos) {
439 case 0:
440 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
441 __MRP_VECATTR_EVENT_MAX);
442 break;
443 case 1:
444 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
445 break;
446 case 2:
447 *vaevents += vaevent;
448 break;
449 default:
450 WARN_ON(1);
451 }
452
453 /* Increment the length of the VectorAttribute in the PDU, as well as
454 * the value of the next attribute that would continue its Vector.
455 */
456 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
457 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
458
459 return 0;
460
461queue:
462 mrp_pdu_queue(app);
463 goto again;
464}
465
466static void mrp_attr_event(struct mrp_applicant *app,
467 struct mrp_attr *attr, enum mrp_event event)
468{
469 enum mrp_applicant_state state;
470
471 state = mrp_applicant_state_table[attr->state][event];
472 if (state == MRP_APPLICANT_INVALID) {
473 WARN_ON(1);
474 return;
475 }
476
477 if (event == MRP_EVENT_TX) {
478 /* When appending the attribute fails, don't update its state
479 * in order to retry at the next TX event.
480 */
481
482 switch (mrp_tx_action_table[attr->state]) {
483 case MRP_TX_ACTION_NONE:
484 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
485 case MRP_TX_ACTION_S_IN_OPTIONAL:
486 break;
487 case MRP_TX_ACTION_S_NEW:
488 if (mrp_pdu_append_vecattr_event(
489 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
490 return;
491 break;
492 case MRP_TX_ACTION_S_JOIN_IN:
493 if (mrp_pdu_append_vecattr_event(
494 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
495 return;
496 break;
497 case MRP_TX_ACTION_S_LV:
498 if (mrp_pdu_append_vecattr_event(
499 app, attr, MRP_VECATTR_EVENT_LV) < 0)
500 return;
501 /* As a pure applicant, sending a leave message
502 * implies that the attribute was unregistered and
503 * can be destroyed.
504 */
505 mrp_attr_destroy(app, attr);
506 return;
507 default:
508 WARN_ON(1);
509 }
510 }
511
512 attr->state = state;
513}
514
515int mrp_request_join(const struct net_device *dev,
516 const struct mrp_application *appl,
517 const void *value, u8 len, u8 type)
518{
519 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
520 struct mrp_applicant *app = rtnl_dereference(
521 port->applicants[appl->type]);
522 struct mrp_attr *attr;
523
524 if (sizeof(struct mrp_skb_cb) + len >
525 FIELD_SIZEOF(struct sk_buff, cb))
526 return -ENOMEM;
527
528 spin_lock_bh(&app->lock);
529 attr = mrp_attr_create(app, value, len, type);
530 if (!attr) {
531 spin_unlock_bh(&app->lock);
532 return -ENOMEM;
533 }
534 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
535 spin_unlock_bh(&app->lock);
536 return 0;
537}
538EXPORT_SYMBOL_GPL(mrp_request_join);
539
540void mrp_request_leave(const struct net_device *dev,
541 const struct mrp_application *appl,
542 const void *value, u8 len, u8 type)
543{
544 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
545 struct mrp_applicant *app = rtnl_dereference(
546 port->applicants[appl->type]);
547 struct mrp_attr *attr;
548
549 if (sizeof(struct mrp_skb_cb) + len >
550 FIELD_SIZEOF(struct sk_buff, cb))
551 return;
552
553 spin_lock_bh(&app->lock);
554 attr = mrp_attr_lookup(app, value, len, type);
555 if (!attr) {
556 spin_unlock_bh(&app->lock);
557 return;
558 }
559 mrp_attr_event(app, attr, MRP_EVENT_LV);
560 spin_unlock_bh(&app->lock);
561}
562EXPORT_SYMBOL_GPL(mrp_request_leave);
563
564static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
565{
566 struct rb_node *node, *next;
567 struct mrp_attr *attr;
568
569 for (node = rb_first(&app->mad);
570 next = node ? rb_next(node) : NULL, node != NULL;
571 node = next) {
572 attr = rb_entry(node, struct mrp_attr, node);
573 mrp_attr_event(app, attr, event);
574 }
575}
576
577static void mrp_join_timer_arm(struct mrp_applicant *app)
578{
579 unsigned long delay;
580
581 delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
582 mod_timer(&app->join_timer, jiffies + delay);
583}
584
585static void mrp_join_timer(unsigned long data)
586{
587 struct mrp_applicant *app = (struct mrp_applicant *)data;
588
589 spin_lock(&app->lock);
590 mrp_mad_event(app, MRP_EVENT_TX);
591 mrp_pdu_queue(app);
592 spin_unlock(&app->lock);
593
594 mrp_queue_xmit(app);
595 mrp_join_timer_arm(app);
596}
597
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{
600 __be16 endmark;
601
602 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
603 return -1;
604 if (endmark == MRP_END_MARK) {
605 *offset += sizeof(endmark);
606 return -1;
607 }
608 return 0;
609}
610
611static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
612 struct sk_buff *skb,
613 enum mrp_vecattr_event vaevent)
614{
615 struct mrp_attr *attr;
616 enum mrp_event event;
617
618 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
619 mrp_cb(skb)->mh->attrlen,
620 mrp_cb(skb)->mh->attrtype);
621 if (attr == NULL)
622 return;
623
624 switch (vaevent) {
625 case MRP_VECATTR_EVENT_NEW:
626 event = MRP_EVENT_R_NEW;
627 break;
628 case MRP_VECATTR_EVENT_JOIN_IN:
629 event = MRP_EVENT_R_JOIN_IN;
630 break;
631 case MRP_VECATTR_EVENT_IN:
632 event = MRP_EVENT_R_IN;
633 break;
634 case MRP_VECATTR_EVENT_JOIN_MT:
635 event = MRP_EVENT_R_JOIN_MT;
636 break;
637 case MRP_VECATTR_EVENT_MT:
638 event = MRP_EVENT_R_MT;
639 break;
640 case MRP_VECATTR_EVENT_LV:
641 event = MRP_EVENT_R_LV;
642 break;
643 default:
644 return;
645 }
646
647 mrp_attr_event(app, attr, event);
648}
649
650static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
651 struct sk_buff *skb, int *offset)
652{
653 struct mrp_vecattr_hdr _vah;
654 u16 valen;
655 u8 vaevents, vaevent;
656
657 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
658 &_vah);
659 if (!mrp_cb(skb)->vah)
660 return -1;
661 *offset += sizeof(_vah);
662
663 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
664 MRP_VECATTR_HDR_FLAG_LA)
665 mrp_mad_event(app, MRP_EVENT_R_LA);
666 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
667 MRP_VECATTR_HDR_LEN_MASK);
668
669 /* The VectorAttribute structure in a PDU carries event information
670 * about one or more attributes having consecutive values. Only the
671 * value for the first attribute is contained in the structure. So
672 * we make a copy of that value, and then increment it each time we
673 * advance to the next event in its Vector.
674 */
675 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
676 FIELD_SIZEOF(struct sk_buff, cb))
677 return -1;
678 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
679 mrp_cb(skb)->mh->attrlen) < 0)
680 return -1;
681 *offset += mrp_cb(skb)->mh->attrlen;
682
683 /* In a VectorAttribute, the Vector contains events which are packed
684 * three to a byte. We process one byte of the Vector at a time.
685 */
686 while (valen > 0) {
687 if (skb_copy_bits(skb, *offset, &vaevents,
688 sizeof(vaevents)) < 0)
689 return -1;
690 *offset += sizeof(vaevents);
691
692 /* Extract and process the first event. */
693 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
694 __MRP_VECATTR_EVENT_MAX);
695 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
696 /* The byte is malformed; stop processing. */
697 return -1;
698 }
699 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
700
701 /* If present, extract and process the second event. */
702 if (!--valen)
703 break;
704 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
705 mrp_cb(skb)->mh->attrlen);
706 vaevents %= (__MRP_VECATTR_EVENT_MAX *
707 __MRP_VECATTR_EVENT_MAX);
708 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
709 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
710
711 /* If present, extract and process the third event. */
712 if (!--valen)
713 break;
714 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
715 mrp_cb(skb)->mh->attrlen);
716 vaevents %= __MRP_VECATTR_EVENT_MAX;
717 vaevent = vaevents;
718 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
719 }
720 return 0;
721}
722
723static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
724 int *offset)
725{
726 struct mrp_msg_hdr _mh;
727
728 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
729 if (!mrp_cb(skb)->mh)
730 return -1;
731 *offset += sizeof(_mh);
732
733 if (mrp_cb(skb)->mh->attrtype == 0 ||
734 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
735 mrp_cb(skb)->mh->attrlen == 0)
736 return -1;
737
738 while (skb->len > *offset) {
739 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
740 break;
741 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
742 return -1;
743 }
744 return 0;
745}
746
747static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
748 struct packet_type *pt, struct net_device *orig_dev)
749{
750 struct mrp_application *appl = container_of(pt, struct mrp_application,
751 pkttype);
752 struct mrp_port *port;
753 struct mrp_applicant *app;
754 struct mrp_pdu_hdr _ph;
755 const struct mrp_pdu_hdr *ph;
756 int offset = skb_network_offset(skb);
757
758 /* If the interface is in promiscuous mode, drop the packet if
759 * it was unicast to another host.
760 */
761 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
762 goto out;
763 skb = skb_share_check(skb, GFP_ATOMIC);
764 if (unlikely(!skb))
765 goto out;
766 port = rcu_dereference(dev->mrp_port);
767 if (unlikely(!port))
768 goto out;
769 app = rcu_dereference(port->applicants[appl->type]);
770 if (unlikely(!app))
771 goto out;
772
773 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
774 if (!ph)
775 goto out;
776 offset += sizeof(_ph);
777
778 if (ph->version != app->app->version)
779 goto out;
780
781 spin_lock(&app->lock);
782 while (skb->len > offset) {
783 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
784 break;
785 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
786 break;
787 }
788 spin_unlock(&app->lock);
789out:
790 kfree_skb(skb);
791 return 0;
792}
793
794static int mrp_init_port(struct net_device *dev)
795{
796 struct mrp_port *port;
797
798 port = kzalloc(sizeof(*port), GFP_KERNEL);
799 if (!port)
800 return -ENOMEM;
801 rcu_assign_pointer(dev->mrp_port, port);
802 return 0;
803}
804
805static void mrp_release_port(struct net_device *dev)
806{
807 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
808 unsigned int i;
809
810 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
811 if (rtnl_dereference(port->applicants[i]))
812 return;
813 }
814 RCU_INIT_POINTER(dev->mrp_port, NULL);
815 kfree_rcu(port, rcu);
816}
817
818int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
819{
820 struct mrp_applicant *app;
821 int err;
822
823 ASSERT_RTNL();
824
825 if (!rtnl_dereference(dev->mrp_port)) {
826 err = mrp_init_port(dev);
827 if (err < 0)
828 goto err1;
829 }
830
831 err = -ENOMEM;
832 app = kzalloc(sizeof(*app), GFP_KERNEL);
833 if (!app)
834 goto err2;
835
836 err = dev_mc_add(dev, appl->group_address);
837 if (err < 0)
838 goto err3;
839
840 app->dev = dev;
841 app->app = appl;
842 app->mad = RB_ROOT;
843 spin_lock_init(&app->lock);
844 skb_queue_head_init(&app->queue);
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app);
848 return 0;
849
850err3:
851 kfree(app);
852err2:
853 mrp_release_port(dev);
854err1:
855 return err;
856}
857EXPORT_SYMBOL_GPL(mrp_init_applicant);
858
859void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
860{
861 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
862 struct mrp_applicant *app = rtnl_dereference(
863 port->applicants[appl->type]);
864
865 ASSERT_RTNL();
866
867 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
868
869 /* Delete timer and generate a final TX event to flush out
870 * all pending messages before the applicant is gone.
871 */
872 del_timer_sync(&app->join_timer);
873 mrp_mad_event(app, MRP_EVENT_TX);
874 mrp_pdu_queue(app);
875 mrp_queue_xmit(app);
876
877 dev_mc_del(dev, appl->group_address);
878 kfree_rcu(app, rcu);
879 mrp_release_port(dev);
880}
881EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
882
883int mrp_register_application(struct mrp_application *appl)
884{
885 appl->pkttype.func = mrp_rcv;
886 dev_add_pack(&appl->pkttype);
887 return 0;
888}
889EXPORT_SYMBOL_GPL(mrp_register_application);
890
891void mrp_unregister_application(struct mrp_application *appl)
892{
893 dev_remove_pack(&appl->pkttype);
894}
895EXPORT_SYMBOL_GPL(mrp_unregister_application);