aboutsummaryrefslogtreecommitdiffstats
path: root/net/hsr/hsr_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/hsr/hsr_main.c')
-rw-r--r--net/hsr/hsr_main.c248
1 files changed, 3 insertions, 245 deletions
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 99b8fc4eca6c..bcda901437bc 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -7,10 +7,6 @@
7 * 7 *
8 * Author(s): 8 * Author(s):
9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
10 *
11 * In addition to routines for registering and unregistering HSR support, this
12 * file also contains the receive routine that handles all incoming frames with
13 * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
14 */ 10 */
15 11
16#include <linux/netdevice.h> 12#include <linux/netdevice.h>
@@ -56,11 +52,10 @@ bool is_hsr_slave(struct net_device *dev)
56 return false; 52 return false;
57} 53}
58 54
59
60/* If dev is a HSR slave device, return the virtual master device. Return NULL 55/* If dev is a HSR slave device, return the virtual master device. Return NULL
61 * otherwise. 56 * otherwise.
62 */ 57 */
63static struct hsr_priv *get_hsr_master(struct net_device *dev) 58struct hsr_priv *get_hsr_master(struct net_device *dev)
64{ 59{
65 struct hsr_priv *hsr; 60 struct hsr_priv *hsr;
66 61
@@ -76,12 +71,11 @@ static struct hsr_priv *get_hsr_master(struct net_device *dev)
76 return NULL; 71 return NULL;
77} 72}
78 73
79
80/* If dev is a HSR slave device, return the other slave device. Return NULL 74/* If dev is a HSR slave device, return the other slave device. Return NULL
81 * otherwise. 75 * otherwise.
82 */ 76 */
83static struct net_device *get_other_slave(struct hsr_priv *hsr, 77struct net_device *get_other_slave(struct hsr_priv *hsr,
84 struct net_device *dev) 78 struct net_device *dev)
85{ 79{
86 if (dev == hsr->slave[0]) 80 if (dev == hsr->slave[0])
87 return hsr->slave[1]; 81 return hsr->slave[1];
@@ -197,239 +191,6 @@ static void prune_nodes_all(unsigned long data)
197} 191}
198 192
199 193
200static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
201{
202 struct hsr_tag *hsr_tag;
203 struct sk_buff *skb2;
204
205 skb2 = skb_share_check(skb, GFP_ATOMIC);
206 if (unlikely(!skb2))
207 goto err_free;
208 skb = skb2;
209
210 if (unlikely(!pskb_may_pull(skb, HSR_HLEN)))
211 goto err_free;
212
213 hsr_tag = (struct hsr_tag *) skb->data;
214 skb->protocol = hsr_tag->encap_proto;
215 skb_pull(skb, HSR_HLEN);
216
217 return skb;
218
219err_free:
220 kfree_skb(skb);
221 return NULL;
222}
223
224
225/* The uses I can see for these HSR supervision frames are:
226 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
227 * 22") to reset any sequence_nr counters belonging to that node. Useful if
228 * the other node's counter has been reset for some reason.
229 * --
230 * Or not - resetting the counter and bridging the frame would create a
231 * loop, unfortunately.
232 *
233 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
234 * frame is received from a particular node, we know something is wrong.
235 * We just register these (as with normal frames) and throw them away.
236 *
237 * 3) Allow different MAC addresses for the two slave interfaces, using the
238 * MacAddressA field.
239 */
240static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
241{
242 struct hsr_sup_tag *hsr_stag;
243
244 if (!ether_addr_equal(eth_hdr(skb)->h_dest,
245 hsr->sup_multicast_addr))
246 return false;
247
248 hsr_stag = (struct hsr_sup_tag *) skb->data;
249 if (get_hsr_stag_path(hsr_stag) != 0x0f)
250 return false;
251 if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
252 (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
253 return false;
254 if (hsr_stag->HSR_TLV_Length != 12)
255 return false;
256
257 return true;
258}
259
260
261/* Implementation somewhat according to IEC-62439-3, p. 43
262 */
263static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
264 struct packet_type *pt, struct net_device *orig_dev)
265{
266 struct hsr_priv *hsr;
267 struct net_device *other_slave;
268 struct hsr_node *node;
269 bool deliver_to_self;
270 struct sk_buff *skb_deliver;
271 enum hsr_dev_idx dev_in_idx, dev_other_idx;
272 bool dup_out;
273 int ret;
274
275 hsr = get_hsr_master(dev);
276
277 if (!hsr) {
278 /* Non-HSR-slave device 'dev' is connected to a HSR network */
279 kfree_skb(skb);
280 dev->stats.rx_errors++;
281 return NET_RX_SUCCESS;
282 }
283
284 if (dev == hsr->slave[0]) {
285 dev_in_idx = HSR_DEV_SLAVE_A;
286 dev_other_idx = HSR_DEV_SLAVE_B;
287 } else {
288 dev_in_idx = HSR_DEV_SLAVE_B;
289 dev_other_idx = HSR_DEV_SLAVE_A;
290 }
291
292 node = hsr_find_node(&hsr->self_node_db, skb);
293 if (node) {
294 /* Always kill frames sent by ourselves */
295 kfree_skb(skb);
296 return NET_RX_SUCCESS;
297 }
298
299 /* Is this frame a candidate for local reception? */
300 deliver_to_self = false;
301 if ((skb->pkt_type == PACKET_HOST) ||
302 (skb->pkt_type == PACKET_MULTICAST) ||
303 (skb->pkt_type == PACKET_BROADCAST))
304 deliver_to_self = true;
305 else if (ether_addr_equal(eth_hdr(skb)->h_dest,
306 hsr->dev->dev_addr)) {
307 skb->pkt_type = PACKET_HOST;
308 deliver_to_self = true;
309 }
310
311
312 rcu_read_lock(); /* node_db */
313 node = hsr_find_node(&hsr->node_db, skb);
314
315 if (is_supervision_frame(hsr, skb)) {
316 skb_pull(skb, sizeof(struct hsr_sup_tag));
317 node = hsr_merge_node(hsr, node, skb, dev_in_idx);
318 if (!node) {
319 rcu_read_unlock(); /* node_db */
320 kfree_skb(skb);
321 hsr->dev->stats.rx_dropped++;
322 return NET_RX_DROP;
323 }
324 skb_push(skb, sizeof(struct hsr_sup_tag));
325 deliver_to_self = false;
326 }
327
328 if (!node) {
329 /* Source node unknown; this might be a HSR frame from
330 * another net (different multicast address). Ignore it.
331 */
332 rcu_read_unlock(); /* node_db */
333 kfree_skb(skb);
334 return NET_RX_SUCCESS;
335 }
336
337 /* Register ALL incoming frames as outgoing through the other interface.
338 * This allows us to register frames as incoming only if they are valid
339 * for the receiving interface, without using a specific counter for
340 * incoming frames.
341 */
342 dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
343 if (!dup_out)
344 hsr_register_frame_in(node, dev_in_idx);
345
346 /* Forward this frame? */
347 if (!dup_out && (skb->pkt_type != PACKET_HOST))
348 other_slave = get_other_slave(hsr, dev);
349 else
350 other_slave = NULL;
351
352 if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
353 deliver_to_self = false;
354
355 rcu_read_unlock(); /* node_db */
356
357 if (!deliver_to_self && !other_slave) {
358 kfree_skb(skb);
359 /* Circulated frame; silently remove it. */
360 return NET_RX_SUCCESS;
361 }
362
363 skb_deliver = skb;
364 if (deliver_to_self && other_slave) {
365 /* skb_clone() is not enough since we will strip the hsr tag
366 * and do address substitution below
367 */
368 skb_deliver = pskb_copy(skb, GFP_ATOMIC);
369 if (!skb_deliver) {
370 deliver_to_self = false;
371 hsr->dev->stats.rx_dropped++;
372 }
373 }
374
375 if (deliver_to_self) {
376 bool multicast_frame;
377
378 skb_deliver = hsr_pull_tag(skb_deliver);
379 if (!skb_deliver) {
380 hsr->dev->stats.rx_dropped++;
381 goto forward;
382 }
383#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
384 /* Move everything in the header that is after the HSR tag,
385 * to work around alignment problems caused by the 6-byte HSR
386 * tag. In practice, this removes/overwrites the HSR tag in
387 * the header and restores a "standard" packet.
388 */
389 memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data,
390 skb_headlen(skb_deliver));
391
392 /* Adjust skb members so they correspond with the move above.
393 * This cannot possibly underflow skb->data since hsr_pull_tag()
394 * above succeeded.
395 * At this point in the protocol stack, the transport and
396 * network headers have not been set yet, and we haven't touched
397 * the mac header nor the head. So we only need to adjust data
398 * and tail:
399 */
400 skb_deliver->data -= HSR_HLEN;
401 skb_deliver->tail -= HSR_HLEN;
402#endif
403 skb_deliver->dev = hsr->dev;
404 hsr_addr_subst_source(hsr, skb_deliver);
405 multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
406 ret = netif_rx(skb_deliver);
407 if (ret == NET_RX_DROP) {
408 hsr->dev->stats.rx_dropped++;
409 } else {
410 hsr->dev->stats.rx_packets++;
411 hsr->dev->stats.rx_bytes += skb->len;
412 if (multicast_frame)
413 hsr->dev->stats.multicast++;
414 }
415 }
416
417forward:
418 if (other_slave) {
419 skb_push(skb, ETH_HLEN);
420 skb->dev = other_slave;
421 dev_queue_xmit(skb);
422 }
423
424 return NET_RX_SUCCESS;
425}
426
427
428static struct packet_type hsr_pt __read_mostly = {
429 .type = htons(ETH_P_PRP),
430 .func = hsr_rcv,
431};
432
433static struct notifier_block hsr_nb = { 194static struct notifier_block hsr_nb = {
434 .notifier_call = hsr_netdev_notify, /* Slave event notifications */ 195 .notifier_call = hsr_netdev_notify, /* Slave event notifications */
435}; 196};
@@ -441,8 +202,6 @@ static int __init hsr_init(void)
441 202
442 BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN); 203 BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
443 204
444 dev_add_pack(&hsr_pt);
445
446 init_timer(&prune_timer); 205 init_timer(&prune_timer);
447 prune_timer.function = prune_nodes_all; 206 prune_timer.function = prune_nodes_all;
448 prune_timer.data = 0; 207 prune_timer.data = 0;
@@ -461,7 +220,6 @@ static void __exit hsr_exit(void)
461 unregister_netdevice_notifier(&hsr_nb); 220 unregister_netdevice_notifier(&hsr_nb);
462 del_timer_sync(&prune_timer); 221 del_timer_sync(&prune_timer);
463 hsr_netlink_exit(); 222 hsr_netlink_exit();
464 dev_remove_pack(&hsr_pt);
465} 223}
466 224
467module_init(hsr_init); 225module_init(hsr_init);