diff options
author | Arvid Brodin <arvid.brodin@alten.se> | 2014-07-04 17:38:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-08 14:35:31 -0400 |
commit | c5a7591172100269e426cf630da0f2dc8138a206 (patch) | |
tree | 1b77a7c8bf531b24d227e165bd10ab15bb9c0680 /net/hsr | |
parent | 51f3c605318b056ac5deb9079bbef2a976558827 (diff) |
net/hsr: Use list_head (and rcu) instead of array for slave devices.
Signed-off-by: Arvid Brodin <arvid.brodin@alten.se>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/hsr')
-rw-r--r-- | net/hsr/hsr_device.c | 242 | ||||
-rw-r--r-- | net/hsr/hsr_framereg.c | 142 | ||||
-rw-r--r-- | net/hsr/hsr_framereg.h | 12 | ||||
-rw-r--r-- | net/hsr/hsr_main.c | 128 | ||||
-rw-r--r-- | net/hsr/hsr_main.h | 31 | ||||
-rw-r--r-- | net/hsr/hsr_netlink.c | 57 | ||||
-rw-r--r-- | net/hsr/hsr_netlink.h | 3 | ||||
-rw-r--r-- | net/hsr/hsr_slave.c | 199 | ||||
-rw-r--r-- | net/hsr/hsr_slave.h | 29 |
9 files changed, 438 insertions, 405 deletions
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 1f8869cb70ae..1fc4ea20752e 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -45,29 +45,38 @@ static void __hsr_set_operstate(struct net_device *dev, int transition) | |||
45 | } | 45 | } |
46 | } | 46 | } |
47 | 47 | ||
48 | static void hsr_set_operstate(struct net_device *hsr_dev, bool has_carrier) | 48 | static void hsr_set_operstate(struct hsr_port *master, bool has_carrier) |
49 | { | 49 | { |
50 | if (!is_admin_up(hsr_dev)) { | 50 | if (!is_admin_up(master->dev)) { |
51 | __hsr_set_operstate(hsr_dev, IF_OPER_DOWN); | 51 | __hsr_set_operstate(master->dev, IF_OPER_DOWN); |
52 | return; | 52 | return; |
53 | } | 53 | } |
54 | 54 | ||
55 | if (has_carrier) | 55 | if (has_carrier) |
56 | __hsr_set_operstate(hsr_dev, IF_OPER_UP); | 56 | __hsr_set_operstate(master->dev, IF_OPER_UP); |
57 | else | 57 | else |
58 | __hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN); | 58 | __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN); |
59 | } | 59 | } |
60 | 60 | ||
61 | static bool hsr_check_carrier(struct hsr_priv *hsr) | 61 | static bool hsr_check_carrier(struct hsr_port *master) |
62 | { | 62 | { |
63 | struct hsr_port *port; | ||
63 | bool has_carrier; | 64 | bool has_carrier; |
64 | 65 | ||
65 | has_carrier = (is_slave_up(hsr->slave[0]) || is_slave_up(hsr->slave[1])); | 66 | has_carrier = false; |
67 | |||
68 | rcu_read_lock(); | ||
69 | hsr_for_each_port(master->hsr, port) | ||
70 | if ((port->type != HSR_PT_MASTER) && is_slave_up(port->dev)) { | ||
71 | has_carrier = true; | ||
72 | break; | ||
73 | } | ||
74 | rcu_read_unlock(); | ||
66 | 75 | ||
67 | if (has_carrier) | 76 | if (has_carrier) |
68 | netif_carrier_on(hsr->dev); | 77 | netif_carrier_on(master->dev); |
69 | else | 78 | else |
70 | netif_carrier_off(hsr->dev); | 79 | netif_carrier_off(master->dev); |
71 | 80 | ||
72 | return has_carrier; | 81 | return has_carrier; |
73 | } | 82 | } |
@@ -95,31 +104,30 @@ static void hsr_check_announce(struct net_device *hsr_dev, | |||
95 | 104 | ||
96 | void hsr_check_carrier_and_operstate(struct hsr_priv *hsr) | 105 | void hsr_check_carrier_and_operstate(struct hsr_priv *hsr) |
97 | { | 106 | { |
107 | struct hsr_port *master; | ||
98 | unsigned char old_operstate; | 108 | unsigned char old_operstate; |
99 | bool has_carrier; | 109 | bool has_carrier; |
100 | 110 | ||
111 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
101 | /* netif_stacked_transfer_operstate() cannot be used here since | 112 | /* netif_stacked_transfer_operstate() cannot be used here since |
102 | * it doesn't set IF_OPER_LOWERLAYERDOWN (?) | 113 | * it doesn't set IF_OPER_LOWERLAYERDOWN (?) |
103 | */ | 114 | */ |
104 | old_operstate = hsr->dev->operstate; | 115 | old_operstate = master->dev->operstate; |
105 | has_carrier = hsr_check_carrier(hsr); | 116 | has_carrier = hsr_check_carrier(master); |
106 | hsr_set_operstate(hsr->dev, has_carrier); | 117 | hsr_set_operstate(master, has_carrier); |
107 | hsr_check_announce(hsr->dev, old_operstate); | 118 | hsr_check_announce(master->dev, old_operstate); |
108 | } | 119 | } |
109 | 120 | ||
110 | int hsr_get_max_mtu(struct hsr_priv *hsr) | 121 | int hsr_get_max_mtu(struct hsr_priv *hsr) |
111 | { | 122 | { |
112 | unsigned int mtu_max; | 123 | unsigned int mtu_max; |
113 | struct net_device *slave; | 124 | struct hsr_port *port; |
114 | 125 | ||
115 | mtu_max = ETH_DATA_LEN; | 126 | mtu_max = ETH_DATA_LEN; |
116 | rcu_read_lock(); | 127 | rcu_read_lock(); |
117 | slave = hsr->slave[0]; | 128 | hsr_for_each_port(hsr, port) |
118 | if (slave) | 129 | if (port->type != HSR_PT_MASTER) |
119 | mtu_max = min(slave->mtu, mtu_max); | 130 | mtu_max = min(port->dev->mtu, mtu_max); |
120 | slave = hsr->slave[1]; | ||
121 | if (slave) | ||
122 | mtu_max = min(slave->mtu, mtu_max); | ||
123 | rcu_read_unlock(); | 131 | rcu_read_unlock(); |
124 | 132 | ||
125 | if (mtu_max < HSR_HLEN) | 133 | if (mtu_max < HSR_HLEN) |
@@ -131,11 +139,13 @@ int hsr_get_max_mtu(struct hsr_priv *hsr) | |||
131 | static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu) | 139 | static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu) |
132 | { | 140 | { |
133 | struct hsr_priv *hsr; | 141 | struct hsr_priv *hsr; |
142 | struct hsr_port *master; | ||
134 | 143 | ||
135 | hsr = netdev_priv(dev); | 144 | hsr = netdev_priv(dev); |
145 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
136 | 146 | ||
137 | if (new_mtu > hsr_get_max_mtu(hsr)) { | 147 | if (new_mtu > hsr_get_max_mtu(hsr)) { |
138 | netdev_info(hsr->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n", | 148 | netdev_info(master->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n", |
139 | HSR_HLEN); | 149 | HSR_HLEN); |
140 | return -EINVAL; | 150 | return -EINVAL; |
141 | } | 151 | } |
@@ -148,35 +158,42 @@ static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu) | |||
148 | static int hsr_dev_open(struct net_device *dev) | 158 | static int hsr_dev_open(struct net_device *dev) |
149 | { | 159 | { |
150 | struct hsr_priv *hsr; | 160 | struct hsr_priv *hsr; |
151 | struct net_device *slave; | 161 | struct hsr_port *port; |
152 | int i; | 162 | char designation; |
153 | char *slave_name; | ||
154 | 163 | ||
155 | hsr = netdev_priv(dev); | 164 | hsr = netdev_priv(dev); |
165 | designation = '\0'; | ||
156 | 166 | ||
157 | for (i = 0; i < HSR_MAX_SLAVE; i++) { | 167 | rcu_read_lock(); |
158 | slave = hsr->slave[i]; | 168 | hsr_for_each_port(hsr, port) { |
159 | if (slave) | 169 | if (port->type == HSR_PT_MASTER) |
160 | slave_name = slave->name; | 170 | continue; |
161 | else | 171 | switch (port->type) { |
162 | slave_name = "null"; | 172 | case HSR_PT_SLAVE_A: |
163 | 173 | designation = 'A'; | |
164 | if (!is_slave_up(slave)) | 174 | break; |
165 | netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n", | 175 | case HSR_PT_SLAVE_B: |
166 | 'A' + i, slave_name); | 176 | designation = 'B'; |
177 | break; | ||
178 | default: | ||
179 | designation = '?'; | ||
180 | } | ||
181 | if (!is_slave_up(port->dev)) | ||
182 | netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n", | ||
183 | designation, port->dev->name); | ||
167 | } | 184 | } |
185 | rcu_read_unlock(); | ||
186 | |||
187 | if (designation == '\0') | ||
188 | netdev_warn(dev, "No slave devices configured\n"); | ||
168 | 189 | ||
169 | return 0; | 190 | return 0; |
170 | } | 191 | } |
171 | 192 | ||
193 | |||
172 | static int hsr_dev_close(struct net_device *dev) | 194 | static int hsr_dev_close(struct net_device *dev) |
173 | { | 195 | { |
174 | /* Nothing to do here. We could try to restore the state of the slaves | 196 | /* Nothing to do here. */ |
175 | * to what they were before being changed by the hsr master dev's state, | ||
176 | * but they might have been changed manually in the mean time too, so | ||
177 | * taking them up or down here might be confusing and is probably not a | ||
178 | * good idea. | ||
179 | */ | ||
180 | return 0; | 197 | return 0; |
181 | } | 198 | } |
182 | 199 | ||
@@ -220,18 +237,24 @@ static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr) | |||
220 | hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP); | 237 | hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP); |
221 | } | 238 | } |
222 | 239 | ||
223 | static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr, | 240 | static int slave_xmit(struct hsr_priv *hsr, struct sk_buff *skb, |
224 | enum hsr_dev_idx dev_idx) | 241 | enum hsr_port_type type) |
225 | { | 242 | { |
243 | struct hsr_port *port; | ||
226 | struct hsr_ethhdr *hsr_ethhdr; | 244 | struct hsr_ethhdr *hsr_ethhdr; |
227 | 245 | ||
228 | hsr_ethhdr = (struct hsr_ethhdr *) skb->data; | 246 | hsr_ethhdr = (struct hsr_ethhdr *) skb->data; |
229 | 247 | ||
230 | skb->dev = hsr->slave[dev_idx]; | 248 | rcu_read_lock(); |
231 | if (unlikely(!skb->dev)) | 249 | port = hsr_port_get_hsr(hsr, type); |
250 | if (!port) { | ||
251 | rcu_read_unlock(); | ||
232 | return NET_XMIT_DROP; | 252 | return NET_XMIT_DROP; |
253 | } | ||
254 | skb->dev = port->dev; | ||
233 | 255 | ||
234 | hsr_addr_subst_dest(hsr, &hsr_ethhdr->ethhdr, dev_idx); | 256 | hsr_addr_subst_dest(port->hsr, &hsr_ethhdr->ethhdr, port); |
257 | rcu_read_unlock(); | ||
235 | 258 | ||
236 | /* Address substitution (IEC62439-3 pp 26, 50): replace mac | 259 | /* Address substitution (IEC62439-3 pp 26, 50): replace mac |
237 | * address of outgoing frame with that of the outgoing slave's. | 260 | * address of outgoing frame with that of the outgoing slave's. |
@@ -241,10 +264,10 @@ static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr, | |||
241 | return dev_queue_xmit(skb); | 264 | return dev_queue_xmit(skb); |
242 | } | 265 | } |
243 | 266 | ||
244 | |||
245 | static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) | 267 | static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
246 | { | 268 | { |
247 | struct hsr_priv *hsr; | 269 | struct hsr_priv *hsr; |
270 | struct hsr_port *master; | ||
248 | struct hsr_ethhdr *hsr_ethhdr; | 271 | struct hsr_ethhdr *hsr_ethhdr; |
249 | struct sk_buff *skb2; | 272 | struct sk_buff *skb2; |
250 | int res1, res2; | 273 | int res1, res2; |
@@ -259,16 +282,23 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
259 | } | 282 | } |
260 | 283 | ||
261 | skb2 = pskb_copy(skb, GFP_ATOMIC); | 284 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
262 | res1 = slave_xmit(skb, hsr, HSR_DEV_SLAVE_A); | ||
263 | res2 = slave_xmit(skb2, hsr, HSR_DEV_SLAVE_B); | ||
264 | 285 | ||
286 | res1 = slave_xmit(hsr, skb, HSR_PT_SLAVE_A); | ||
287 | if (skb2) | ||
288 | res2 = slave_xmit(hsr, skb2, HSR_PT_SLAVE_B); | ||
289 | else | ||
290 | res2 = NET_XMIT_DROP; | ||
291 | |||
292 | rcu_read_lock(); | ||
293 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
265 | if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN || | 294 | if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN || |
266 | res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) { | 295 | res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) { |
267 | hsr->dev->stats.tx_packets++; | 296 | master->dev->stats.tx_packets++; |
268 | hsr->dev->stats.tx_bytes += skb->len; | 297 | master->dev->stats.tx_bytes += skb->len; |
269 | } else { | 298 | } else { |
270 | hsr->dev->stats.tx_dropped++; | 299 | master->dev->stats.tx_dropped++; |
271 | } | 300 | } |
301 | rcu_read_unlock(); | ||
272 | 302 | ||
273 | return NETDEV_TX_OK; | 303 | return NETDEV_TX_OK; |
274 | } | 304 | } |
@@ -322,31 +352,35 @@ static int hsr_pad(int size) | |||
322 | static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type) | 352 | static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type) |
323 | { | 353 | { |
324 | struct hsr_priv *hsr; | 354 | struct hsr_priv *hsr; |
355 | struct hsr_port *master; | ||
325 | struct sk_buff *skb; | 356 | struct sk_buff *skb; |
326 | int hlen, tlen; | 357 | int hlen, tlen; |
327 | struct hsr_sup_tag *hsr_stag; | 358 | struct hsr_sup_tag *hsr_stag; |
328 | struct hsr_sup_payload *hsr_sp; | 359 | struct hsr_sup_payload *hsr_sp; |
329 | unsigned long irqflags; | 360 | unsigned long irqflags; |
361 | int res; | ||
362 | |||
363 | hsr = netdev_priv(hsr_dev); | ||
364 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
330 | 365 | ||
331 | hlen = LL_RESERVED_SPACE(hsr_dev); | 366 | hlen = LL_RESERVED_SPACE(master->dev); |
332 | tlen = hsr_dev->needed_tailroom; | 367 | tlen = master->dev->needed_tailroom; |
333 | skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen, | 368 | skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen, |
334 | GFP_ATOMIC); | 369 | GFP_ATOMIC); |
335 | 370 | ||
336 | if (skb == NULL) | 371 | if (skb == NULL) |
337 | return; | 372 | return; |
338 | 373 | ||
339 | hsr = netdev_priv(hsr_dev); | ||
340 | |||
341 | skb_reserve(skb, hlen); | 374 | skb_reserve(skb, hlen); |
342 | 375 | ||
343 | skb->dev = hsr_dev; | 376 | skb->dev = master->dev; |
344 | skb->protocol = htons(ETH_P_PRP); | 377 | skb->protocol = htons(ETH_P_PRP); |
345 | skb->priority = TC_PRIO_CONTROL; | 378 | skb->priority = TC_PRIO_CONTROL; |
346 | 379 | ||
347 | if (dev_hard_header(skb, skb->dev, ETH_P_PRP, | 380 | res = dev_hard_header(skb, skb->dev, ETH_P_PRP, |
348 | hsr->sup_multicast_addr, | 381 | hsr->sup_multicast_addr, |
349 | skb->dev->dev_addr, skb->len) < 0) | 382 | skb->dev->dev_addr, skb->len); |
383 | if (res <= 0) | ||
350 | goto out; | 384 | goto out; |
351 | 385 | ||
352 | skb_pull(skb, sizeof(struct ethhdr)); | 386 | skb_pull(skb, sizeof(struct ethhdr)); |
@@ -367,12 +401,13 @@ static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type) | |||
367 | 401 | ||
368 | /* Payload: MacAddressA */ | 402 | /* Payload: MacAddressA */ |
369 | hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp)); | 403 | hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp)); |
370 | ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr); | 404 | ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); |
371 | 405 | ||
372 | dev_queue_xmit(skb); | 406 | dev_queue_xmit(skb); |
373 | return; | 407 | return; |
374 | 408 | ||
375 | out: | 409 | out: |
410 | WARN_ON_ONCE("HSR: Could not send supervision frame\n"); | ||
376 | kfree_skb(skb); | 411 | kfree_skb(skb); |
377 | } | 412 | } |
378 | 413 | ||
@@ -382,14 +417,16 @@ out: | |||
382 | static void hsr_announce(unsigned long data) | 417 | static void hsr_announce(unsigned long data) |
383 | { | 418 | { |
384 | struct hsr_priv *hsr; | 419 | struct hsr_priv *hsr; |
420 | struct hsr_port *master; | ||
385 | 421 | ||
386 | hsr = (struct hsr_priv *) data; | 422 | hsr = (struct hsr_priv *) data; |
423 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
387 | 424 | ||
388 | if (hsr->announce_count < 3) { | 425 | if (hsr->announce_count < 3) { |
389 | send_hsr_supervision_frame(hsr->dev, HSR_TLV_ANNOUNCE); | 426 | send_hsr_supervision_frame(master->dev, HSR_TLV_ANNOUNCE); |
390 | hsr->announce_count++; | 427 | hsr->announce_count++; |
391 | } else { | 428 | } else { |
392 | send_hsr_supervision_frame(hsr->dev, HSR_TLV_LIFE_CHECK); | 429 | send_hsr_supervision_frame(master->dev, HSR_TLV_LIFE_CHECK); |
393 | } | 430 | } |
394 | 431 | ||
395 | if (hsr->announce_count < 3) | 432 | if (hsr->announce_count < 3) |
@@ -399,43 +436,28 @@ static void hsr_announce(unsigned long data) | |||
399 | hsr->announce_timer.expires = jiffies + | 436 | hsr->announce_timer.expires = jiffies + |
400 | msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); | 437 | msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); |
401 | 438 | ||
402 | if (is_admin_up(hsr->dev)) | 439 | if (is_admin_up(master->dev)) |
403 | add_timer(&hsr->announce_timer); | 440 | add_timer(&hsr->announce_timer); |
404 | } | 441 | } |
405 | 442 | ||
406 | 443 | ||
407 | static void restore_slaves(struct net_device *hsr_dev) | ||
408 | { | ||
409 | struct hsr_priv *hsr; | ||
410 | |||
411 | hsr = netdev_priv(hsr_dev); | ||
412 | hsr_del_slave(hsr, 1); | ||
413 | hsr_del_slave(hsr, 0); | ||
414 | } | ||
415 | |||
416 | static void reclaim_hsr_dev(struct rcu_head *rh) | ||
417 | { | ||
418 | struct hsr_priv *hsr; | ||
419 | |||
420 | hsr = container_of(rh, struct hsr_priv, rcu_head); | ||
421 | free_netdev(hsr->dev); | ||
422 | } | ||
423 | |||
424 | |||
425 | /* According to comments in the declaration of struct net_device, this function | 444 | /* According to comments in the declaration of struct net_device, this function |
426 | * is "Called from unregister, can be used to call free_netdev". Ok then... | 445 | * is "Called from unregister, can be used to call free_netdev". Ok then... |
427 | */ | 446 | */ |
428 | static void hsr_dev_destroy(struct net_device *hsr_dev) | 447 | static void hsr_dev_destroy(struct net_device *hsr_dev) |
429 | { | 448 | { |
430 | struct hsr_priv *hsr; | 449 | struct hsr_priv *hsr; |
450 | struct hsr_port *port; | ||
431 | 451 | ||
432 | hsr = netdev_priv(hsr_dev); | 452 | hsr = netdev_priv(hsr_dev); |
453 | hsr_for_each_port(hsr, port) | ||
454 | hsr_del_port(port); | ||
433 | 455 | ||
434 | del_timer_sync(&hsr->prune_timer); | 456 | del_timer_sync(&hsr->prune_timer); |
435 | del_timer_sync(&hsr->announce_timer); | 457 | del_timer_sync(&hsr->announce_timer); |
436 | unregister_hsr_master(hsr); /* calls list_del_rcu on hsr */ | 458 | |
437 | restore_slaves(hsr_dev); | 459 | synchronize_rcu(); |
438 | call_rcu(&hsr->rcu_head, reclaim_hsr_dev); /* reclaim hsr */ | 460 | free_netdev(hsr_dev); |
439 | } | 461 | } |
440 | 462 | ||
441 | static const struct net_device_ops hsr_device_ops = { | 463 | static const struct net_device_ops hsr_device_ops = { |
@@ -461,12 +483,11 @@ void hsr_dev_setup(struct net_device *dev) | |||
461 | 483 | ||
462 | /* Return true if dev is a HSR master; return false otherwise. | 484 | /* Return true if dev is a HSR master; return false otherwise. |
463 | */ | 485 | */ |
464 | bool is_hsr_master(struct net_device *dev) | 486 | inline bool is_hsr_master(struct net_device *dev) |
465 | { | 487 | { |
466 | return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit); | 488 | return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit); |
467 | } | 489 | } |
468 | 490 | ||
469 | |||
470 | /* Default multicast address for HSR Supervision frames */ | 491 | /* Default multicast address for HSR Supervision frames */ |
471 | static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = { | 492 | static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = { |
472 | 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00 | 493 | 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00 |
@@ -476,12 +497,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], | |||
476 | unsigned char multicast_spec) | 497 | unsigned char multicast_spec) |
477 | { | 498 | { |
478 | struct hsr_priv *hsr; | 499 | struct hsr_priv *hsr; |
500 | struct hsr_port *port; | ||
479 | int res; | 501 | int res; |
480 | 502 | ||
481 | hsr = netdev_priv(hsr_dev); | 503 | hsr = netdev_priv(hsr_dev); |
482 | hsr->dev = hsr_dev; | 504 | INIT_LIST_HEAD(&hsr->ports); |
483 | hsr->slave[0] = NULL; | ||
484 | hsr->slave[1] = NULL; | ||
485 | INIT_LIST_HEAD(&hsr->node_db); | 505 | INIT_LIST_HEAD(&hsr->node_db); |
486 | INIT_LIST_HEAD(&hsr->self_node_db); | 506 | INIT_LIST_HEAD(&hsr->self_node_db); |
487 | 507 | ||
@@ -516,36 +536,42 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], | |||
516 | ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr); | 536 | ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr); |
517 | hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec; | 537 | hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec; |
518 | 538 | ||
519 | /* FIXME: should I modify the value of these? | 539 | /* FIXME: should I modify the value of these? |
520 | * | 540 | * |
521 | * - hsr_dev->flags - i.e. | 541 | * - hsr_dev->flags - i.e. |
522 | * IFF_MASTER/SLAVE? | 542 | * IFF_MASTER/SLAVE? |
523 | * - hsr_dev->priv_flags - i.e. | 543 | * - hsr_dev->priv_flags - i.e. |
524 | * IFF_EBRIDGE? | 544 | * IFF_EBRIDGE? |
525 | * IFF_TX_SKB_SHARING? | 545 | * IFF_TX_SKB_SHARING? |
526 | * IFF_HSR_MASTER/SLAVE? | 546 | * IFF_HSR_MASTER/SLAVE? |
527 | */ | 547 | */ |
528 | 548 | ||
529 | /* Make sure the 1st call to netif_carrier_on() gets through */ | 549 | /* Make sure the 1st call to netif_carrier_on() gets through */ |
530 | netif_carrier_off(hsr_dev); | 550 | netif_carrier_off(hsr_dev); |
531 | 551 | ||
532 | res = register_netdevice(hsr_dev); | 552 | res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); |
533 | if (res) | 553 | if (res) |
534 | return res; | 554 | return res; |
535 | 555 | ||
536 | res = hsr_add_slave(hsr, slave[0], 0); | 556 | res = register_netdevice(hsr_dev); |
537 | if (res) | 557 | if (res) |
538 | return res; | 558 | goto fail; |
539 | res = hsr_add_slave(hsr, slave[1], 1); | 559 | |
540 | if (res) { | 560 | res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A); |
541 | hsr_del_slave(hsr, 0); | 561 | if (res) |
542 | return res; | 562 | goto fail; |
543 | } | 563 | res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B); |
564 | if (res) | ||
565 | goto fail; | ||
544 | 566 | ||
545 | hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD); | 567 | hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD); |
546 | add_timer(&hsr->prune_timer); | 568 | add_timer(&hsr->prune_timer); |
547 | 569 | ||
548 | register_hsr_master(hsr); | ||
549 | |||
550 | return 0; | 570 | return 0; |
571 | |||
572 | fail: | ||
573 | hsr_for_each_port(hsr, port) | ||
574 | hsr_del_port(port); | ||
575 | |||
576 | return res; | ||
551 | } | 577 | } |
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 3666f94c526f..c9b78c52c75b 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c | |||
@@ -27,12 +27,11 @@ struct hsr_node { | |||
27 | struct list_head mac_list; | 27 | struct list_head mac_list; |
28 | unsigned char MacAddressA[ETH_ALEN]; | 28 | unsigned char MacAddressA[ETH_ALEN]; |
29 | unsigned char MacAddressB[ETH_ALEN]; | 29 | unsigned char MacAddressB[ETH_ALEN]; |
30 | enum hsr_dev_idx AddrB_if;/* The local slave through which AddrB | 30 | /* Local slave through which AddrB frames are received from this node */ |
31 | * frames are received from this node | 31 | enum hsr_port_type AddrB_port; |
32 | */ | 32 | unsigned long time_in[HSR_PT_PORTS]; |
33 | unsigned long time_in[HSR_MAX_SLAVE]; | 33 | bool time_in_stale[HSR_PT_PORTS]; |
34 | bool time_in_stale[HSR_MAX_SLAVE]; | 34 | u16 seq_out[HSR_PT_PORTS]; |
35 | u16 seq_out[HSR_MAX_DEV]; | ||
36 | struct rcu_head rcu_head; | 35 | struct rcu_head rcu_head; |
37 | }; | 36 | }; |
38 | 37 | ||
@@ -154,11 +153,10 @@ int hsr_create_self_node(struct list_head *self_node_db, | |||
154 | * We also need to detect if the sender's SlaveA and SlaveB cables have been | 153 | * We also need to detect if the sender's SlaveA and SlaveB cables have been |
155 | * swapped. | 154 | * swapped. |
156 | */ | 155 | */ |
157 | struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | 156 | struct hsr_node *hsr_merge_node(struct hsr_node *node, struct sk_buff *skb, |
158 | struct hsr_node *node, | 157 | struct hsr_port *port) |
159 | struct sk_buff *skb, | ||
160 | enum hsr_dev_idx dev_idx) | ||
161 | { | 158 | { |
159 | struct hsr_priv *hsr; | ||
162 | struct hsr_sup_payload *hsr_sp; | 160 | struct hsr_sup_payload *hsr_sp; |
163 | struct hsr_ethhdr_sp *hsr_ethsup; | 161 | struct hsr_ethhdr_sp *hsr_ethsup; |
164 | int i; | 162 | int i; |
@@ -166,6 +164,7 @@ struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | |||
166 | 164 | ||
167 | hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb); | 165 | hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb); |
168 | hsr_sp = (struct hsr_sup_payload *) skb->data; | 166 | hsr_sp = (struct hsr_sup_payload *) skb->data; |
167 | hsr = port->hsr; | ||
169 | 168 | ||
170 | if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) { | 169 | if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) { |
171 | /* Node has changed its AddrA, frame was received from SlaveB */ | 170 | /* Node has changed its AddrA, frame was received from SlaveB */ |
@@ -174,7 +173,7 @@ struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | |||
174 | node = NULL; | 173 | node = NULL; |
175 | } | 174 | } |
176 | 175 | ||
177 | if (node && (dev_idx == node->AddrB_if) && | 176 | if (node && (port->type == node->AddrB_port) && |
178 | !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) { | 177 | !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) { |
179 | /* Cables have been swapped */ | 178 | /* Cables have been swapped */ |
180 | list_del_rcu(&node->mac_list); | 179 | list_del_rcu(&node->mac_list); |
@@ -182,8 +181,8 @@ struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | |||
182 | node = NULL; | 181 | node = NULL; |
183 | } | 182 | } |
184 | 183 | ||
185 | if (node && (dev_idx != node->AddrB_if) && | 184 | if (node && (port->type != node->AddrB_port) && |
186 | (node->AddrB_if != HSR_DEV_NONE) && | 185 | (node->AddrB_port != HSR_PT_NONE) && |
187 | !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) { | 186 | !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) { |
188 | /* Cables have been swapped */ | 187 | /* Cables have been swapped */ |
189 | list_del_rcu(&node->mac_list); | 188 | list_del_rcu(&node->mac_list); |
@@ -200,7 +199,7 @@ struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | |||
200 | * address. Node is PICS_SUBS capable; merge its AddrB. | 199 | * address. Node is PICS_SUBS capable; merge its AddrB. |
201 | */ | 200 | */ |
202 | ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source); | 201 | ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source); |
203 | node->AddrB_if = dev_idx; | 202 | node->AddrB_port = port->type; |
204 | return node; | 203 | return node; |
205 | } | 204 | } |
206 | 205 | ||
@@ -211,17 +210,15 @@ struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | |||
211 | ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA); | 210 | ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA); |
212 | ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source); | 211 | ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source); |
213 | if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source)) | 212 | if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source)) |
214 | node->AddrB_if = dev_idx; | 213 | node->AddrB_port = port->type; |
215 | else | ||
216 | node->AddrB_if = HSR_DEV_NONE; | ||
217 | 214 | ||
218 | /* We are only interested in time diffs here, so use current jiffies | 215 | /* We are only interested in time diffs here, so use current jiffies |
219 | * as initialization. (0 could trigger an spurious ring error warning). | 216 | * as initialization. (0 could trigger an spurious ring error warning). |
220 | */ | 217 | */ |
221 | now = jiffies; | 218 | now = jiffies; |
222 | for (i = 0; i < HSR_MAX_SLAVE; i++) | 219 | for (i = 0; i < HSR_PT_PORTS; i++) |
223 | node->time_in[i] = now; | 220 | node->time_in[i] = now; |
224 | for (i = 0; i < HSR_MAX_DEV; i++) | 221 | for (i = 0; i < HSR_PT_PORTS; i++) |
225 | node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1; | 222 | node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1; |
226 | 223 | ||
227 | list_add_tail_rcu(&node->mac_list, &hsr->node_db); | 224 | list_add_tail_rcu(&node->mac_list, &hsr->node_db); |
@@ -265,13 +262,13 @@ void hsr_addr_subst_source(struct hsr_priv *hsr, struct sk_buff *skb) | |||
265 | * which "side" the different interfaces are. | 262 | * which "side" the different interfaces are. |
266 | */ | 263 | */ |
267 | void hsr_addr_subst_dest(struct hsr_priv *hsr, struct ethhdr *ethhdr, | 264 | void hsr_addr_subst_dest(struct hsr_priv *hsr, struct ethhdr *ethhdr, |
268 | enum hsr_dev_idx dev_idx) | 265 | struct hsr_port *port) |
269 | { | 266 | { |
270 | struct hsr_node *node; | 267 | struct hsr_node *node; |
271 | 268 | ||
272 | rcu_read_lock(); | 269 | rcu_read_lock(); |
273 | node = find_node_by_AddrA(&hsr->node_db, ethhdr->h_dest); | 270 | node = find_node_by_AddrA(&hsr->node_db, ethhdr->h_dest); |
274 | if (node && (node->AddrB_if == dev_idx)) | 271 | if (node && (node->AddrB_port == port->type)) |
275 | ether_addr_copy(ethhdr->h_dest, node->MacAddressB); | 272 | ether_addr_copy(ethhdr->h_dest, node->MacAddressB); |
276 | rcu_read_unlock(); | 273 | rcu_read_unlock(); |
277 | } | 274 | } |
@@ -295,14 +292,10 @@ static bool seq_nr_after(u16 a, u16 b) | |||
295 | #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) | 292 | #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) |
296 | 293 | ||
297 | 294 | ||
298 | void hsr_register_frame_in(struct hsr_node *node, enum hsr_dev_idx dev_idx) | 295 | void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port) |
299 | { | 296 | { |
300 | if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) { | 297 | node->time_in[port->type] = jiffies; |
301 | WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); | 298 | node->time_in_stale[port->type] = false; |
302 | return; | ||
303 | } | ||
304 | node->time_in[dev_idx] = jiffies; | ||
305 | node->time_in_stale[dev_idx] = false; | ||
306 | } | 299 | } |
307 | 300 | ||
308 | 301 | ||
@@ -314,16 +307,12 @@ void hsr_register_frame_in(struct hsr_node *node, enum hsr_dev_idx dev_idx) | |||
314 | * 0 otherwise, or | 307 | * 0 otherwise, or |
315 | * negative error code on error | 308 | * negative error code on error |
316 | */ | 309 | */ |
317 | int hsr_register_frame_out(struct hsr_node *node, enum hsr_dev_idx dev_idx, | 310 | int hsr_register_frame_out(struct hsr_node *node, struct hsr_port *port, |
318 | struct sk_buff *skb) | 311 | struct sk_buff *skb) |
319 | { | 312 | { |
320 | struct hsr_ethhdr *hsr_ethhdr; | 313 | struct hsr_ethhdr *hsr_ethhdr; |
321 | u16 sequence_nr; | 314 | u16 sequence_nr; |
322 | 315 | ||
323 | if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) { | ||
324 | WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); | ||
325 | return -EINVAL; | ||
326 | } | ||
327 | if (!skb_mac_header_was_set(skb)) { | 316 | if (!skb_mac_header_was_set(skb)) { |
328 | WARN_ONCE(1, "%s: Mac header not set\n", __func__); | 317 | WARN_ONCE(1, "%s: Mac header not set\n", __func__); |
329 | return -EINVAL; | 318 | return -EINVAL; |
@@ -331,35 +320,32 @@ int hsr_register_frame_out(struct hsr_node *node, enum hsr_dev_idx dev_idx, | |||
331 | hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb); | 320 | hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb); |
332 | 321 | ||
333 | sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr); | 322 | sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr); |
334 | if (seq_nr_before_or_eq(sequence_nr, node->seq_out[dev_idx])) | 323 | if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type])) |
335 | return 1; | 324 | return 1; |
336 | 325 | ||
337 | node->seq_out[dev_idx] = sequence_nr; | 326 | node->seq_out[port->type] = sequence_nr; |
338 | return 0; | 327 | return 0; |
339 | } | 328 | } |
340 | 329 | ||
341 | 330 | ||
342 | 331 | static struct hsr_port *get_late_port(struct hsr_priv *hsr, | |
343 | static bool is_late(struct hsr_node *node, enum hsr_dev_idx dev_idx) | 332 | struct hsr_node *node) |
344 | { | 333 | { |
345 | enum hsr_dev_idx other; | 334 | if (node->time_in_stale[HSR_PT_SLAVE_A]) |
346 | 335 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); | |
347 | if (node->time_in_stale[dev_idx]) | 336 | if (node->time_in_stale[HSR_PT_SLAVE_B]) |
348 | return true; | 337 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
349 | 338 | ||
350 | if (dev_idx == HSR_DEV_SLAVE_A) | 339 | if (time_after(node->time_in[HSR_PT_SLAVE_B], |
351 | other = HSR_DEV_SLAVE_B; | 340 | node->time_in[HSR_PT_SLAVE_A] + |
352 | else | 341 | msecs_to_jiffies(MAX_SLAVE_DIFF))) |
353 | other = HSR_DEV_SLAVE_A; | 342 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); |
354 | 343 | if (time_after(node->time_in[HSR_PT_SLAVE_A], | |
355 | if (node->time_in_stale[other]) | 344 | node->time_in[HSR_PT_SLAVE_B] + |
356 | return false; | 345 | msecs_to_jiffies(MAX_SLAVE_DIFF))) |
346 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); | ||
357 | 347 | ||
358 | if (time_after(node->time_in[other], node->time_in[dev_idx] + | 348 | return NULL; |
359 | msecs_to_jiffies(MAX_SLAVE_DIFF))) | ||
360 | return true; | ||
361 | |||
362 | return false; | ||
363 | } | 349 | } |
364 | 350 | ||
365 | 351 | ||
@@ -370,6 +356,7 @@ void hsr_prune_nodes(unsigned long data) | |||
370 | { | 356 | { |
371 | struct hsr_priv *hsr; | 357 | struct hsr_priv *hsr; |
372 | struct hsr_node *node; | 358 | struct hsr_node *node; |
359 | struct hsr_port *port; | ||
373 | unsigned long timestamp; | 360 | unsigned long timestamp; |
374 | unsigned long time_a, time_b; | 361 | unsigned long time_a, time_b; |
375 | 362 | ||
@@ -378,35 +365,33 @@ void hsr_prune_nodes(unsigned long data) | |||
378 | rcu_read_lock(); | 365 | rcu_read_lock(); |
379 | list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { | 366 | list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { |
380 | /* Shorthand */ | 367 | /* Shorthand */ |
381 | time_a = node->time_in[HSR_DEV_SLAVE_A]; | 368 | time_a = node->time_in[HSR_PT_SLAVE_A]; |
382 | time_b = node->time_in[HSR_DEV_SLAVE_B]; | 369 | time_b = node->time_in[HSR_PT_SLAVE_B]; |
383 | 370 | ||
384 | /* Check for timestamps old enough to risk wrap-around */ | 371 | /* Check for timestamps old enough to risk wrap-around */ |
385 | if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2)) | 372 | if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2)) |
386 | node->time_in_stale[HSR_DEV_SLAVE_A] = true; | 373 | node->time_in_stale[HSR_PT_SLAVE_A] = true; |
387 | if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2)) | 374 | if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2)) |
388 | node->time_in_stale[HSR_DEV_SLAVE_B] = true; | 375 | node->time_in_stale[HSR_PT_SLAVE_B] = true; |
389 | 376 | ||
390 | /* Get age of newest frame from node. | 377 | /* Get age of newest frame from node. |
391 | * At least one time_in is OK here; nodes get pruned long | 378 | * At least one time_in is OK here; nodes get pruned long |
392 | * before both time_ins can get stale | 379 | * before both time_ins can get stale |
393 | */ | 380 | */ |
394 | timestamp = time_a; | 381 | timestamp = time_a; |
395 | if (node->time_in_stale[HSR_DEV_SLAVE_A] || | 382 | if (node->time_in_stale[HSR_PT_SLAVE_A] || |
396 | (!node->time_in_stale[HSR_DEV_SLAVE_B] && | 383 | (!node->time_in_stale[HSR_PT_SLAVE_B] && |
397 | time_after(time_b, time_a))) | 384 | time_after(time_b, time_a))) |
398 | timestamp = time_b; | 385 | timestamp = time_b; |
399 | 386 | ||
400 | /* Warn of ring error only as long as we get frames at all */ | 387 | /* Warn of ring error only as long as we get frames at all */ |
401 | if (time_is_after_jiffies(timestamp + | 388 | if (time_is_after_jiffies(timestamp + |
402 | msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) { | 389 | msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) { |
403 | 390 | rcu_read_lock(); | |
404 | if (is_late(node, HSR_DEV_SLAVE_A)) | 391 | port = get_late_port(hsr, node); |
405 | hsr_nl_ringerror(hsr, node->MacAddressA, | 392 | if (port != NULL) |
406 | HSR_DEV_SLAVE_A); | 393 | hsr_nl_ringerror(hsr, node->MacAddressA, port); |
407 | else if (is_late(node, HSR_DEV_SLAVE_B)) | 394 | rcu_read_unlock(); |
408 | hsr_nl_ringerror(hsr, node->MacAddressA, | ||
409 | HSR_DEV_SLAVE_B); | ||
410 | } | 395 | } |
411 | 396 | ||
412 | /* Prune old entries */ | 397 | /* Prune old entries */ |
@@ -455,7 +440,7 @@ int hsr_get_node_data(struct hsr_priv *hsr, | |||
455 | u16 *if2_seq) | 440 | u16 *if2_seq) |
456 | { | 441 | { |
457 | struct hsr_node *node; | 442 | struct hsr_node *node; |
458 | struct net_device *slave; | 443 | struct hsr_port *port; |
459 | unsigned long tdiff; | 444 | unsigned long tdiff; |
460 | 445 | ||
461 | 446 | ||
@@ -468,8 +453,8 @@ int hsr_get_node_data(struct hsr_priv *hsr, | |||
468 | 453 | ||
469 | ether_addr_copy(addr_b, node->MacAddressB); | 454 | ether_addr_copy(addr_b, node->MacAddressB); |
470 | 455 | ||
471 | tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A]; | 456 | tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A]; |
472 | if (node->time_in_stale[HSR_DEV_SLAVE_A]) | 457 | if (node->time_in_stale[HSR_PT_SLAVE_A]) |
473 | *if1_age = INT_MAX; | 458 | *if1_age = INT_MAX; |
474 | #if HZ <= MSEC_PER_SEC | 459 | #if HZ <= MSEC_PER_SEC |
475 | else if (tdiff > msecs_to_jiffies(INT_MAX)) | 460 | else if (tdiff > msecs_to_jiffies(INT_MAX)) |
@@ -478,8 +463,8 @@ int hsr_get_node_data(struct hsr_priv *hsr, | |||
478 | else | 463 | else |
479 | *if1_age = jiffies_to_msecs(tdiff); | 464 | *if1_age = jiffies_to_msecs(tdiff); |
480 | 465 | ||
481 | tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_B]; | 466 | tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B]; |
482 | if (node->time_in_stale[HSR_DEV_SLAVE_B]) | 467 | if (node->time_in_stale[HSR_PT_SLAVE_B]) |
483 | *if2_age = INT_MAX; | 468 | *if2_age = INT_MAX; |
484 | #if HZ <= MSEC_PER_SEC | 469 | #if HZ <= MSEC_PER_SEC |
485 | else if (tdiff > msecs_to_jiffies(INT_MAX)) | 470 | else if (tdiff > msecs_to_jiffies(INT_MAX)) |
@@ -489,14 +474,15 @@ int hsr_get_node_data(struct hsr_priv *hsr, | |||
489 | *if2_age = jiffies_to_msecs(tdiff); | 474 | *if2_age = jiffies_to_msecs(tdiff); |
490 | 475 | ||
491 | /* Present sequence numbers as if they were incoming on interface */ | 476 | /* Present sequence numbers as if they were incoming on interface */ |
492 | *if1_seq = node->seq_out[HSR_DEV_SLAVE_B]; | 477 | *if1_seq = node->seq_out[HSR_PT_SLAVE_B]; |
493 | *if2_seq = node->seq_out[HSR_DEV_SLAVE_A]; | 478 | *if2_seq = node->seq_out[HSR_PT_SLAVE_A]; |
494 | 479 | ||
495 | slave = hsr->slave[node->AddrB_if]; | 480 | if (node->AddrB_port != HSR_PT_NONE) { |
496 | if ((node->AddrB_if != HSR_DEV_NONE) && slave) | 481 | port = hsr_port_get_hsr(hsr, node->AddrB_port); |
497 | *addr_b_ifindex = slave->ifindex; | 482 | *addr_b_ifindex = port->dev->ifindex; |
498 | else | 483 | } else { |
499 | *addr_b_ifindex = -1; | 484 | *addr_b_ifindex = -1; |
485 | } | ||
500 | 486 | ||
501 | rcu_read_unlock(); | 487 | rcu_read_unlock(); |
502 | 488 | ||
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index ccb09cf4ec5b..c87f36fc154c 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h | |||
@@ -18,18 +18,16 @@ struct hsr_node; | |||
18 | 18 | ||
19 | struct hsr_node *hsr_find_node(struct list_head *node_db, struct sk_buff *skb); | 19 | struct hsr_node *hsr_find_node(struct list_head *node_db, struct sk_buff *skb); |
20 | 20 | ||
21 | struct hsr_node *hsr_merge_node(struct hsr_priv *hsr, | 21 | struct hsr_node *hsr_merge_node(struct hsr_node *node, struct sk_buff *skb, |
22 | struct hsr_node *node, | 22 | struct hsr_port *port); |
23 | struct sk_buff *skb, | ||
24 | enum hsr_dev_idx dev_idx); | ||
25 | 23 | ||
26 | void hsr_addr_subst_source(struct hsr_priv *hsr, struct sk_buff *skb); | 24 | void hsr_addr_subst_source(struct hsr_priv *hsr, struct sk_buff *skb); |
27 | void hsr_addr_subst_dest(struct hsr_priv *hsr, struct ethhdr *ethhdr, | 25 | void hsr_addr_subst_dest(struct hsr_priv *hsr, struct ethhdr *ethhdr, |
28 | enum hsr_dev_idx dev_idx); | 26 | struct hsr_port *port); |
29 | 27 | ||
30 | void hsr_register_frame_in(struct hsr_node *node, enum hsr_dev_idx dev_idx); | 28 | void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port); |
31 | 29 | ||
32 | int hsr_register_frame_out(struct hsr_node *node, enum hsr_dev_idx dev_idx, | 30 | int hsr_register_frame_out(struct hsr_node *node, struct hsr_port *port, |
33 | struct sk_buff *skb); | 31 | struct sk_buff *skb); |
34 | 32 | ||
35 | void hsr_prune_nodes(unsigned long data); | 33 | void hsr_prune_nodes(unsigned long data); |
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index b5abe26f7b4c..a06cab57ab68 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c | |||
@@ -39,76 +39,25 @@ void unregister_hsr_master(struct hsr_priv *hsr) | |||
39 | } | 39 | } |
40 | } | 40 | } |
41 | 41 | ||
42 | bool is_hsr_slave(struct net_device *dev) | ||
43 | { | ||
44 | struct hsr_priv *hsr_it; | ||
45 | |||
46 | list_for_each_entry_rcu(hsr_it, &hsr_list, hsr_list) { | ||
47 | if (dev == hsr_it->slave[0]) | ||
48 | return true; | ||
49 | if (dev == hsr_it->slave[1]) | ||
50 | return true; | ||
51 | } | ||
52 | |||
53 | return false; | ||
54 | } | ||
55 | |||
56 | /* If dev is a HSR slave device, return the virtual master device. Return NULL | ||
57 | * otherwise. | ||
58 | */ | ||
59 | struct hsr_priv *get_hsr_master(struct net_device *dev) | ||
60 | { | ||
61 | struct hsr_priv *hsr; | ||
62 | |||
63 | rcu_read_lock(); | ||
64 | list_for_each_entry_rcu(hsr, &hsr_list, hsr_list) | ||
65 | if ((dev == hsr->slave[0]) || | ||
66 | (dev == hsr->slave[1])) { | ||
67 | rcu_read_unlock(); | ||
68 | return hsr; | ||
69 | } | ||
70 | |||
71 | rcu_read_unlock(); | ||
72 | return NULL; | ||
73 | } | ||
74 | |||
75 | /* If dev is a HSR slave device, return the other slave device. Return NULL | ||
76 | * otherwise. | ||
77 | */ | ||
78 | struct net_device *get_other_slave(struct hsr_priv *hsr, | ||
79 | struct net_device *dev) | ||
80 | { | ||
81 | if (dev == hsr->slave[0]) | ||
82 | return hsr->slave[1]; | ||
83 | if (dev == hsr->slave[1]) | ||
84 | return hsr->slave[0]; | ||
85 | |||
86 | return NULL; | ||
87 | } | ||
88 | |||
89 | 42 | ||
90 | static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | 43 | static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, |
91 | void *ptr) | 44 | void *ptr) |
92 | { | 45 | { |
93 | struct net_device *slave, *other_slave; | 46 | struct net_device *dev; |
47 | struct hsr_port *port, *master; | ||
94 | struct hsr_priv *hsr; | 48 | struct hsr_priv *hsr; |
95 | int mtu_max; | 49 | int mtu_max; |
96 | int res; | 50 | int res; |
97 | struct net_device *dev; | ||
98 | 51 | ||
99 | dev = netdev_notifier_info_to_dev(ptr); | 52 | dev = netdev_notifier_info_to_dev(ptr); |
100 | 53 | port = hsr_port_get_rtnl(dev); | |
101 | hsr = get_hsr_master(dev); | 54 | if (port == NULL) { |
102 | if (hsr) { | ||
103 | /* dev is a slave device */ | ||
104 | slave = dev; | ||
105 | other_slave = get_other_slave(hsr, slave); | ||
106 | } else { | ||
107 | if (!is_hsr_master(dev)) | 55 | if (!is_hsr_master(dev)) |
108 | return NOTIFY_DONE; | 56 | return NOTIFY_DONE; /* Not an HSR device */ |
109 | hsr = netdev_priv(dev); | 57 | hsr = netdev_priv(dev); |
110 | slave = hsr->slave[0]; | 58 | port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); |
111 | other_slave = hsr->slave[1]; | 59 | } else { |
60 | hsr = port->hsr; | ||
112 | } | 61 | } |
113 | 62 | ||
114 | switch (event) { | 63 | switch (event) { |
@@ -118,48 +67,41 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | |||
118 | hsr_check_carrier_and_operstate(hsr); | 67 | hsr_check_carrier_and_operstate(hsr); |
119 | break; | 68 | break; |
120 | case NETDEV_CHANGEADDR: | 69 | case NETDEV_CHANGEADDR: |
121 | 70 | if (port->type == HSR_PT_MASTER) { | |
122 | /* This should not happen since there's no ndo_set_mac_address() | 71 | /* This should not happen since there's no |
123 | * for HSR devices - i.e. not supported. | 72 | * ndo_set_mac_address() for HSR devices - i.e. not |
124 | */ | 73 | * supported. |
125 | if (dev == hsr->dev) | 74 | */ |
126 | break; | 75 | break; |
76 | } | ||
127 | 77 | ||
128 | if (dev == hsr->slave[0]) { | 78 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); |
129 | ether_addr_copy(hsr->dev->dev_addr, dev->dev_addr); | 79 | |
130 | call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr->dev); | 80 | if (port->type == HSR_PT_SLAVE_A) { |
81 | ether_addr_copy(master->dev->dev_addr, dev->dev_addr); | ||
82 | call_netdevice_notifiers(NETDEV_CHANGEADDR, master->dev); | ||
131 | } | 83 | } |
132 | 84 | ||
133 | /* Make sure we recognize frames from ourselves in hsr_rcv() */ | 85 | /* Make sure we recognize frames from ourselves in hsr_rcv() */ |
134 | other_slave = hsr->slave[1]; | 86 | port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
135 | res = hsr_create_self_node(&hsr->self_node_db, | 87 | res = hsr_create_self_node(&hsr->self_node_db, |
136 | hsr->dev->dev_addr, | 88 | master->dev->dev_addr, |
137 | other_slave ? | 89 | port ? |
138 | other_slave->dev_addr : | 90 | port->dev->dev_addr : |
139 | hsr->dev->dev_addr); | 91 | master->dev->dev_addr); |
140 | if (res) | 92 | if (res) |
141 | netdev_warn(hsr->dev, | 93 | netdev_warn(master->dev, |
142 | "Could not update HSR node address.\n"); | 94 | "Could not update HSR node address.\n"); |
143 | break; | 95 | break; |
144 | case NETDEV_CHANGEMTU: | 96 | case NETDEV_CHANGEMTU: |
145 | if (dev == hsr->dev) | 97 | if (port->type == HSR_PT_MASTER) |
146 | break; /* Handled in ndo_change_mtu() */ | 98 | break; /* Handled in ndo_change_mtu() */ |
147 | mtu_max = hsr_get_max_mtu(hsr); | 99 | mtu_max = hsr_get_max_mtu(port->hsr); |
148 | if (hsr->dev->mtu > mtu_max) | 100 | master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER); |
149 | dev_set_mtu(hsr->dev, mtu_max); | 101 | master->dev->mtu = mtu_max; |
150 | break; | 102 | break; |
151 | case NETDEV_UNREGISTER: | 103 | case NETDEV_UNREGISTER: |
152 | if (dev == hsr->slave[0]) { | 104 | hsr_del_port(port); |
153 | hsr->slave[0] = NULL; | ||
154 | hsr_del_slave(hsr, 0); | ||
155 | } | ||
156 | if (dev == hsr->slave[1]) { | ||
157 | hsr->slave[1] = NULL; | ||
158 | hsr_del_slave(hsr, 1); | ||
159 | } | ||
160 | |||
161 | /* There should really be a way to set a new slave device... */ | ||
162 | |||
163 | break; | 105 | break; |
164 | case NETDEV_PRE_TYPE_CHANGE: | 106 | case NETDEV_PRE_TYPE_CHANGE: |
165 | /* HSR works only on Ethernet devices. Refuse slave to change | 107 | /* HSR works only on Ethernet devices. Refuse slave to change |
@@ -172,6 +114,16 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | |||
172 | } | 114 | } |
173 | 115 | ||
174 | 116 | ||
117 | struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt) | ||
118 | { | ||
119 | struct hsr_port *port; | ||
120 | |||
121 | hsr_for_each_port(hsr, port) | ||
122 | if (port->type == pt) | ||
123 | return port; | ||
124 | return NULL; | ||
125 | } | ||
126 | |||
175 | static struct notifier_block hsr_nb = { | 127 | static struct notifier_block hsr_nb = { |
176 | .notifier_call = hsr_netdev_notify, /* Slave event notifications */ | 128 | .notifier_call = hsr_netdev_notify, /* Slave event notifications */ |
177 | }; | 129 | }; |
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 43689a6d731f..e31c3069fdf8 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h | |||
@@ -136,20 +136,26 @@ struct hsr_ethhdr_sp { | |||
136 | } __packed; | 136 | } __packed; |
137 | 137 | ||
138 | 138 | ||
139 | enum hsr_dev_idx { | 139 | enum hsr_port_type { |
140 | HSR_DEV_NONE = -1, | 140 | HSR_PT_NONE = 0, /* Must be 0, used by framereg */ |
141 | HSR_DEV_SLAVE_A = 0, | 141 | HSR_PT_SLAVE_A, |
142 | HSR_DEV_SLAVE_B, | 142 | HSR_PT_SLAVE_B, |
143 | HSR_DEV_MASTER, | 143 | HSR_PT_INTERLINK, |
144 | HSR_PT_MASTER, | ||
145 | HSR_PT_PORTS, /* This must be the last item in the enum */ | ||
146 | }; | ||
147 | |||
148 | struct hsr_port { | ||
149 | struct list_head port_list; | ||
150 | struct net_device *dev; | ||
151 | struct hsr_priv *hsr; | ||
152 | enum hsr_port_type type; | ||
144 | }; | 153 | }; |
145 | #define HSR_MAX_SLAVE (HSR_DEV_SLAVE_B + 1) | ||
146 | #define HSR_MAX_DEV (HSR_DEV_MASTER + 1) | ||
147 | 154 | ||
148 | struct hsr_priv { | 155 | struct hsr_priv { |
149 | struct list_head hsr_list; /* List of hsr devices */ | 156 | struct list_head hsr_list; /* List of hsr devices */ |
150 | struct rcu_head rcu_head; | 157 | struct rcu_head rcu_head; |
151 | struct net_device *dev; | 158 | struct list_head ports; |
152 | struct net_device *slave[HSR_MAX_SLAVE]; | ||
153 | struct list_head node_db; /* Other HSR nodes */ | 159 | struct list_head node_db; /* Other HSR nodes */ |
154 | struct list_head self_node_db; /* MACs of slaves */ | 160 | struct list_head self_node_db; /* MACs of slaves */ |
155 | struct timer_list announce_timer; /* Supervision frame dispatch */ | 161 | struct timer_list announce_timer; /* Supervision frame dispatch */ |
@@ -160,11 +166,6 @@ struct hsr_priv { | |||
160 | unsigned char sup_multicast_addr[ETH_ALEN]; | 166 | unsigned char sup_multicast_addr[ETH_ALEN]; |
161 | }; | 167 | }; |
162 | 168 | ||
163 | void register_hsr_master(struct hsr_priv *hsr); | 169 | struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt); |
164 | void unregister_hsr_master(struct hsr_priv *hsr); | ||
165 | bool is_hsr_slave(struct net_device *dev); | ||
166 | struct hsr_priv *get_hsr_master(struct net_device *dev); | ||
167 | struct net_device *get_other_slave(struct hsr_priv *hsr, | ||
168 | struct net_device *dev); | ||
169 | 170 | ||
170 | #endif /* __HSR_PRIVATE_H */ | 171 | #endif /* __HSR_PRIVATE_H */ |
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index a2ce359774f3..67082453928c 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c | |||
@@ -64,7 +64,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, | |||
64 | static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) | 64 | static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) |
65 | { | 65 | { |
66 | struct hsr_priv *hsr; | 66 | struct hsr_priv *hsr; |
67 | struct net_device *slave; | 67 | struct hsr_port *port; |
68 | int res; | 68 | int res; |
69 | 69 | ||
70 | hsr = netdev_priv(dev); | 70 | hsr = netdev_priv(dev); |
@@ -72,17 +72,17 @@ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
72 | res = 0; | 72 | res = 0; |
73 | 73 | ||
74 | rcu_read_lock(); | 74 | rcu_read_lock(); |
75 | slave = hsr->slave[0]; | 75 | port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); |
76 | if (slave) | 76 | if (port) |
77 | res = nla_put_u32(skb, IFLA_HSR_SLAVE1, slave->ifindex); | 77 | res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex); |
78 | rcu_read_unlock(); | 78 | rcu_read_unlock(); |
79 | if (res) | 79 | if (res) |
80 | goto nla_put_failure; | 80 | goto nla_put_failure; |
81 | 81 | ||
82 | rcu_read_lock(); | 82 | rcu_read_lock(); |
83 | slave = hsr->slave[1]; | 83 | port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
84 | if (slave) | 84 | if (port) |
85 | res = nla_put_u32(skb, IFLA_HSR_SLAVE2, slave->ifindex); | 85 | res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex); |
86 | rcu_read_unlock(); | 86 | rcu_read_unlock(); |
87 | if (res) | 87 | if (res) |
88 | goto nla_put_failure; | 88 | goto nla_put_failure; |
@@ -141,13 +141,12 @@ static const struct genl_multicast_group hsr_mcgrps[] = { | |||
141 | * (i.e. a link has failed somewhere). | 141 | * (i.e. a link has failed somewhere). |
142 | */ | 142 | */ |
143 | void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], | 143 | void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], |
144 | enum hsr_dev_idx dev_idx) | 144 | struct hsr_port *port) |
145 | { | 145 | { |
146 | struct sk_buff *skb; | 146 | struct sk_buff *skb; |
147 | struct net_device *slave; | ||
148 | void *msg_head; | 147 | void *msg_head; |
148 | struct hsr_port *master; | ||
149 | int res; | 149 | int res; |
150 | int ifindex; | ||
151 | 150 | ||
152 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | 151 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); |
153 | if (!skb) | 152 | if (!skb) |
@@ -161,15 +160,7 @@ void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], | |||
161 | if (res < 0) | 160 | if (res < 0) |
162 | goto nla_put_failure; | 161 | goto nla_put_failure; |
163 | 162 | ||
164 | rcu_read_lock(); | 163 | res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); |
165 | slave = hsr->slave[dev_idx]; | ||
166 | if (slave) | ||
167 | ifindex = slave->ifindex; | ||
168 | else | ||
169 | ifindex = -1; | ||
170 | rcu_read_unlock(); | ||
171 | |||
172 | res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex); | ||
173 | if (res < 0) | 164 | if (res < 0) |
174 | goto nla_put_failure; | 165 | goto nla_put_failure; |
175 | 166 | ||
@@ -182,7 +173,10 @@ nla_put_failure: | |||
182 | kfree_skb(skb); | 173 | kfree_skb(skb); |
183 | 174 | ||
184 | fail: | 175 | fail: |
185 | netdev_warn(hsr->dev, "Could not send HSR ring error message\n"); | 176 | rcu_read_lock(); |
177 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
178 | netdev_warn(master->dev, "Could not send HSR ring error message\n"); | ||
179 | rcu_read_unlock(); | ||
186 | } | 180 | } |
187 | 181 | ||
188 | /* This is called when we haven't heard from the node with MAC address addr for | 182 | /* This is called when we haven't heard from the node with MAC address addr for |
@@ -192,6 +186,7 @@ void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) | |||
192 | { | 186 | { |
193 | struct sk_buff *skb; | 187 | struct sk_buff *skb; |
194 | void *msg_head; | 188 | void *msg_head; |
189 | struct hsr_port *master; | ||
195 | int res; | 190 | int res; |
196 | 191 | ||
197 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | 192 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); |
@@ -216,7 +211,10 @@ nla_put_failure: | |||
216 | kfree_skb(skb); | 211 | kfree_skb(skb); |
217 | 212 | ||
218 | fail: | 213 | fail: |
219 | netdev_warn(hsr->dev, "Could not send HSR node down\n"); | 214 | rcu_read_lock(); |
215 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
216 | netdev_warn(master->dev, "Could not send HSR node down\n"); | ||
217 | rcu_read_unlock(); | ||
220 | } | 218 | } |
221 | 219 | ||
222 | 220 | ||
@@ -232,12 +230,13 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) | |||
232 | { | 230 | { |
233 | /* For receiving */ | 231 | /* For receiving */ |
234 | struct nlattr *na; | 232 | struct nlattr *na; |
235 | struct net_device *hsr_dev, *slave; | 233 | struct net_device *hsr_dev; |
236 | 234 | ||
237 | /* For sending */ | 235 | /* For sending */ |
238 | struct sk_buff *skb_out; | 236 | struct sk_buff *skb_out; |
239 | void *msg_head; | 237 | void *msg_head; |
240 | struct hsr_priv *hsr; | 238 | struct hsr_priv *hsr; |
239 | struct hsr_port *port; | ||
241 | unsigned char hsr_node_addr_b[ETH_ALEN]; | 240 | unsigned char hsr_node_addr_b[ETH_ALEN]; |
242 | int hsr_node_if1_age; | 241 | int hsr_node_if1_age; |
243 | u16 hsr_node_if1_seq; | 242 | u16 hsr_node_if1_seq; |
@@ -319,9 +318,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) | |||
319 | if (res < 0) | 318 | if (res < 0) |
320 | goto nla_put_failure; | 319 | goto nla_put_failure; |
321 | rcu_read_lock(); | 320 | rcu_read_lock(); |
322 | slave = hsr->slave[0]; | 321 | port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); |
323 | if (slave) | 322 | if (port) |
324 | res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, slave->ifindex); | 323 | res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, |
324 | port->dev->ifindex); | ||
325 | rcu_read_unlock(); | 325 | rcu_read_unlock(); |
326 | if (res < 0) | 326 | if (res < 0) |
327 | goto nla_put_failure; | 327 | goto nla_put_failure; |
@@ -333,9 +333,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) | |||
333 | if (res < 0) | 333 | if (res < 0) |
334 | goto nla_put_failure; | 334 | goto nla_put_failure; |
335 | rcu_read_lock(); | 335 | rcu_read_lock(); |
336 | slave = hsr->slave[1]; | 336 | port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
337 | if (slave) | 337 | if (port) |
338 | res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, slave->ifindex); | 338 | res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, |
339 | port->dev->ifindex); | ||
339 | rcu_read_unlock(); | 340 | rcu_read_unlock(); |
340 | if (res < 0) | 341 | if (res < 0) |
341 | goto nla_put_failure; | 342 | goto nla_put_failure; |
diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h index 3047f9cea5f5..3f6b95b5b6b8 100644 --- a/net/hsr/hsr_netlink.h +++ b/net/hsr/hsr_netlink.h | |||
@@ -17,12 +17,13 @@ | |||
17 | #include <uapi/linux/hsr_netlink.h> | 17 | #include <uapi/linux/hsr_netlink.h> |
18 | 18 | ||
19 | struct hsr_priv; | 19 | struct hsr_priv; |
20 | struct hsr_port; | ||
20 | 21 | ||
21 | int __init hsr_netlink_init(void); | 22 | int __init hsr_netlink_init(void); |
22 | void __exit hsr_netlink_exit(void); | 23 | void __exit hsr_netlink_exit(void); |
23 | 24 | ||
24 | void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], | 25 | void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], |
25 | int dev_idx); | 26 | struct hsr_port *port); |
26 | void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]); | 27 | void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]); |
27 | void hsr_nl_framedrop(int dropcount, int dev_idx); | 28 | void hsr_nl_framedrop(int dropcount, int dev_idx); |
28 | void hsr_nl_linkdown(int dev_idx); | 29 | void hsr_nl_linkdown(int dev_idx); |
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index d676090f7900..fffd69297c3e 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include "hsr_framereg.h" | 17 | #include "hsr_framereg.h" |
18 | 18 | ||
19 | 19 | ||
20 | static int check_slave_ok(struct net_device *dev) | 20 | static int hsr_check_dev_ok(struct net_device *dev) |
21 | { | 21 | { |
22 | /* Don't allow HSR on non-ethernet like devices */ | 22 | /* Don't allow HSR on non-ethernet like devices */ |
23 | if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) || | 23 | if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) || |
@@ -32,7 +32,7 @@ static int check_slave_ok(struct net_device *dev) | |||
32 | return -EINVAL; | 32 | return -EINVAL; |
33 | } | 33 | } |
34 | 34 | ||
35 | if (is_hsr_slave(dev)) { | 35 | if (hsr_port_exists(dev)) { |
36 | netdev_info(dev, "This device is already a HSR slave.\n"); | 36 | netdev_info(dev, "This device is already a HSR slave.\n"); |
37 | return -EINVAL; | 37 | return -EINVAL; |
38 | } | 38 | } |
@@ -116,38 +116,29 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) | |||
116 | rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | 116 | rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) |
117 | { | 117 | { |
118 | struct sk_buff *skb = *pskb; | 118 | struct sk_buff *skb = *pskb; |
119 | struct net_device *dev = skb->dev; | 119 | struct hsr_port *port, *other_port, *master; |
120 | struct hsr_priv *hsr; | 120 | struct hsr_priv *hsr; |
121 | struct net_device *other_slave; | ||
122 | struct hsr_node *node; | 121 | struct hsr_node *node; |
123 | bool deliver_to_self; | 122 | bool deliver_to_self; |
124 | struct sk_buff *skb_deliver; | 123 | struct sk_buff *skb_deliver; |
125 | enum hsr_dev_idx dev_in_idx, dev_other_idx; | ||
126 | bool dup_out; | 124 | bool dup_out; |
127 | int ret; | 125 | int ret; |
128 | 126 | ||
129 | if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP)) | 127 | if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP)) |
130 | return RX_HANDLER_PASS; | 128 | return RX_HANDLER_PASS; |
131 | 129 | ||
132 | hsr = get_hsr_master(dev); | 130 | rcu_read_lock(); /* ports & node */ |
133 | if (!hsr) { | ||
134 | WARN_ON_ONCE(1); | ||
135 | return RX_HANDLER_PASS; | ||
136 | } | ||
137 | 131 | ||
138 | if (dev == hsr->slave[0]) { | 132 | port = hsr_port_get_rcu(skb->dev); |
139 | dev_in_idx = HSR_DEV_SLAVE_A; | 133 | hsr = port->hsr; |
140 | dev_other_idx = HSR_DEV_SLAVE_B; | 134 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); |
141 | } else { | ||
142 | dev_in_idx = HSR_DEV_SLAVE_B; | ||
143 | dev_other_idx = HSR_DEV_SLAVE_A; | ||
144 | } | ||
145 | 135 | ||
146 | node = hsr_find_node(&hsr->self_node_db, skb); | 136 | node = hsr_find_node(&hsr->self_node_db, skb); |
147 | if (node) { | 137 | if (node) { |
148 | /* Always kill frames sent by ourselves */ | 138 | /* Always kill frames sent by ourselves */ |
149 | kfree_skb(skb); | 139 | kfree_skb(skb); |
150 | return RX_HANDLER_CONSUMED; | 140 | ret = RX_HANDLER_CONSUMED; |
141 | goto finish; | ||
151 | } | 142 | } |
152 | 143 | ||
153 | /* Is this frame a candidate for local reception? */ | 144 | /* Is this frame a candidate for local reception? */ |
@@ -156,23 +147,22 @@ rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | |||
156 | (skb->pkt_type == PACKET_MULTICAST) || | 147 | (skb->pkt_type == PACKET_MULTICAST) || |
157 | (skb->pkt_type == PACKET_BROADCAST)) | 148 | (skb->pkt_type == PACKET_BROADCAST)) |
158 | deliver_to_self = true; | 149 | deliver_to_self = true; |
159 | else if (ether_addr_equal(eth_hdr(skb)->h_dest, hsr->dev->dev_addr)) { | 150 | else if (ether_addr_equal(eth_hdr(skb)->h_dest, |
151 | master->dev->dev_addr)) { | ||
160 | skb->pkt_type = PACKET_HOST; | 152 | skb->pkt_type = PACKET_HOST; |
161 | deliver_to_self = true; | 153 | deliver_to_self = true; |
162 | } | 154 | } |
163 | 155 | ||
164 | |||
165 | rcu_read_lock(); /* node_db */ | ||
166 | node = hsr_find_node(&hsr->node_db, skb); | 156 | node = hsr_find_node(&hsr->node_db, skb); |
167 | 157 | ||
168 | if (is_supervision_frame(hsr, skb)) { | 158 | if (is_supervision_frame(hsr, skb)) { |
169 | skb_pull(skb, sizeof(struct hsr_sup_tag)); | 159 | skb_pull(skb, sizeof(struct hsr_sup_tag)); |
170 | node = hsr_merge_node(hsr, node, skb, dev_in_idx); | 160 | node = hsr_merge_node(node, skb, port); |
171 | if (!node) { | 161 | if (!node) { |
172 | rcu_read_unlock(); /* node_db */ | ||
173 | kfree_skb(skb); | 162 | kfree_skb(skb); |
174 | hsr->dev->stats.rx_dropped++; | 163 | master->dev->stats.rx_dropped++; |
175 | return RX_HANDLER_CONSUMED; | 164 | ret = RX_HANDLER_CONSUMED; |
165 | goto finish; | ||
176 | } | 166 | } |
177 | skb_push(skb, sizeof(struct hsr_sup_tag)); | 167 | skb_push(skb, sizeof(struct hsr_sup_tag)); |
178 | deliver_to_self = false; | 168 | deliver_to_self = false; |
@@ -182,46 +172,51 @@ rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | |||
182 | /* Source node unknown; this might be a HSR frame from | 172 | /* Source node unknown; this might be a HSR frame from |
183 | * another net (different multicast address). Ignore it. | 173 | * another net (different multicast address). Ignore it. |
184 | */ | 174 | */ |
185 | rcu_read_unlock(); /* node_db */ | ||
186 | kfree_skb(skb); | 175 | kfree_skb(skb); |
187 | return RX_HANDLER_CONSUMED; | 176 | ret = RX_HANDLER_CONSUMED; |
177 | goto finish; | ||
188 | } | 178 | } |
189 | 179 | ||
180 | if (port->type == HSR_PT_SLAVE_A) | ||
181 | other_port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); | ||
182 | else | ||
183 | other_port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); | ||
184 | |||
190 | /* Register ALL incoming frames as outgoing through the other interface. | 185 | /* Register ALL incoming frames as outgoing through the other interface. |
191 | * This allows us to register frames as incoming only if they are valid | 186 | * This allows us to register frames as incoming only if they are valid |
192 | * for the receiving interface, without using a specific counter for | 187 | * for the receiving interface, without using a specific counter for |
193 | * incoming frames. | 188 | * incoming frames. |
194 | */ | 189 | */ |
195 | dup_out = hsr_register_frame_out(node, dev_other_idx, skb); | 190 | if (other_port) |
191 | dup_out = hsr_register_frame_out(node, other_port, skb); | ||
192 | else | ||
193 | dup_out = 0; | ||
196 | if (!dup_out) | 194 | if (!dup_out) |
197 | hsr_register_frame_in(node, dev_in_idx); | 195 | hsr_register_frame_in(node, port); |
198 | 196 | ||
199 | /* Forward this frame? */ | 197 | /* Forward this frame? */ |
200 | if (!dup_out && (skb->pkt_type != PACKET_HOST)) | 198 | if (dup_out || (skb->pkt_type == PACKET_HOST)) |
201 | other_slave = get_other_slave(hsr, dev); | 199 | other_port = NULL; |
202 | else | ||
203 | other_slave = NULL; | ||
204 | 200 | ||
205 | if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb)) | 201 | if (hsr_register_frame_out(node, master, skb)) |
206 | deliver_to_self = false; | 202 | deliver_to_self = false; |
207 | 203 | ||
208 | rcu_read_unlock(); /* node_db */ | 204 | if (!deliver_to_self && !other_port) { |
209 | |||
210 | if (!deliver_to_self && !other_slave) { | ||
211 | kfree_skb(skb); | 205 | kfree_skb(skb); |
212 | /* Circulated frame; silently remove it. */ | 206 | /* Circulated frame; silently remove it. */ |
213 | return RX_HANDLER_CONSUMED; | 207 | ret = RX_HANDLER_CONSUMED; |
208 | goto finish; | ||
214 | } | 209 | } |
215 | 210 | ||
216 | skb_deliver = skb; | 211 | skb_deliver = skb; |
217 | if (deliver_to_self && other_slave) { | 212 | if (deliver_to_self && other_port) { |
218 | /* skb_clone() is not enough since we will strip the hsr tag | 213 | /* skb_clone() is not enough since we will strip the hsr tag |
219 | * and do address substitution below | 214 | * and do address substitution below |
220 | */ | 215 | */ |
221 | skb_deliver = pskb_copy(skb, GFP_ATOMIC); | 216 | skb_deliver = pskb_copy(skb, GFP_ATOMIC); |
222 | if (!skb_deliver) { | 217 | if (!skb_deliver) { |
223 | deliver_to_self = false; | 218 | deliver_to_self = false; |
224 | hsr->dev->stats.rx_dropped++; | 219 | master->dev->stats.rx_dropped++; |
225 | } | 220 | } |
226 | } | 221 | } |
227 | 222 | ||
@@ -230,7 +225,7 @@ rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | |||
230 | 225 | ||
231 | skb_deliver = hsr_pull_tag(skb_deliver); | 226 | skb_deliver = hsr_pull_tag(skb_deliver); |
232 | if (!skb_deliver) { | 227 | if (!skb_deliver) { |
233 | hsr->dev->stats.rx_dropped++; | 228 | master->dev->stats.rx_dropped++; |
234 | goto forward; | 229 | goto forward; |
235 | } | 230 | } |
236 | #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 231 | #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
@@ -253,82 +248,130 @@ rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | |||
253 | skb_deliver->data -= HSR_HLEN; | 248 | skb_deliver->data -= HSR_HLEN; |
254 | skb_deliver->tail -= HSR_HLEN; | 249 | skb_deliver->tail -= HSR_HLEN; |
255 | #endif | 250 | #endif |
256 | skb_deliver->dev = hsr->dev; | 251 | skb_deliver->dev = master->dev; |
257 | hsr_addr_subst_source(hsr, skb_deliver); | 252 | hsr_addr_subst_source(hsr, skb_deliver); |
258 | multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST); | 253 | multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST); |
259 | ret = netif_rx(skb_deliver); | 254 | ret = netif_rx(skb_deliver); |
260 | if (ret == NET_RX_DROP) { | 255 | if (ret == NET_RX_DROP) { |
261 | hsr->dev->stats.rx_dropped++; | 256 | master->dev->stats.rx_dropped++; |
262 | } else { | 257 | } else { |
263 | hsr->dev->stats.rx_packets++; | 258 | master->dev->stats.rx_packets++; |
264 | hsr->dev->stats.rx_bytes += skb->len; | 259 | master->dev->stats.rx_bytes += skb->len; |
265 | if (multicast_frame) | 260 | if (multicast_frame) |
266 | hsr->dev->stats.multicast++; | 261 | master->dev->stats.multicast++; |
267 | } | 262 | } |
268 | } | 263 | } |
269 | 264 | ||
270 | forward: | 265 | forward: |
271 | if (other_slave) { | 266 | if (other_port) { |
272 | skb_push(skb, ETH_HLEN); | 267 | skb_push(skb, ETH_HLEN); |
273 | skb->dev = other_slave; | 268 | skb->dev = other_port->dev; |
274 | dev_queue_xmit(skb); | 269 | dev_queue_xmit(skb); |
275 | } | 270 | } |
276 | 271 | ||
277 | return RX_HANDLER_CONSUMED; | 272 | ret = RX_HANDLER_CONSUMED; |
273 | |||
274 | finish: | ||
275 | rcu_read_unlock(); | ||
276 | return ret; | ||
278 | } | 277 | } |
279 | 278 | ||
280 | int hsr_add_slave(struct hsr_priv *hsr, struct net_device *dev, int idx) | 279 | /* Setup device to be added to the HSR bridge. */ |
280 | static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port) | ||
281 | { | 281 | { |
282 | int res; | 282 | int res; |
283 | 283 | ||
284 | dev_hold(dev); | 284 | dev_hold(dev); |
285 | |||
286 | res = check_slave_ok(dev); | ||
287 | if (res) | ||
288 | goto fail; | ||
289 | |||
290 | res = dev_set_promiscuity(dev, 1); | 285 | res = dev_set_promiscuity(dev, 1); |
291 | if (res) | 286 | if (res) |
292 | goto fail; | 287 | goto fail_promiscuity; |
293 | 288 | res = netdev_rx_handler_register(dev, hsr_handle_frame, port); | |
294 | res = netdev_rx_handler_register(dev, hsr_handle_frame, hsr); | ||
295 | if (res) | 289 | if (res) |
296 | goto fail_rx_handler; | 290 | goto fail_rx_handler; |
291 | dev_disable_lro(dev); | ||
297 | 292 | ||
298 | 293 | /* FIXME: | |
299 | hsr->slave[idx] = dev; | 294 | * What does net device "adjacency" mean? Should we do |
300 | 295 | * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ? | |
301 | /* Set required header length */ | 296 | */ |
302 | if (dev->hard_header_len + HSR_HLEN > hsr->dev->hard_header_len) | ||
303 | hsr->dev->hard_header_len = dev->hard_header_len + HSR_HLEN; | ||
304 | |||
305 | dev_set_mtu(hsr->dev, hsr_get_max_mtu(hsr)); | ||
306 | 297 | ||
307 | return 0; | 298 | return 0; |
308 | 299 | ||
309 | fail_rx_handler: | 300 | fail_rx_handler: |
310 | dev_set_promiscuity(dev, -1); | 301 | dev_set_promiscuity(dev, -1); |
311 | 302 | fail_promiscuity: | |
312 | fail: | ||
313 | dev_put(dev); | 303 | dev_put(dev); |
304 | |||
314 | return res; | 305 | return res; |
315 | } | 306 | } |
316 | 307 | ||
317 | void hsr_del_slave(struct hsr_priv *hsr, int idx) | 308 | int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, |
309 | enum hsr_port_type type) | ||
318 | { | 310 | { |
319 | struct net_device *slave; | 311 | struct hsr_port *port, *master; |
312 | int res; | ||
320 | 313 | ||
321 | slave = hsr->slave[idx]; | 314 | if (type != HSR_PT_MASTER) { |
322 | hsr->slave[idx] = NULL; | 315 | res = hsr_check_dev_ok(dev); |
316 | if (res) | ||
317 | return res; | ||
318 | } | ||
319 | |||
320 | port = hsr_port_get_hsr(hsr, type); | ||
321 | if (port != NULL) | ||
322 | return -EBUSY; /* This port already exists */ | ||
323 | |||
324 | port = kzalloc(sizeof(*port), GFP_KERNEL); | ||
325 | if (port == NULL) | ||
326 | return -ENOMEM; | ||
327 | |||
328 | if (type != HSR_PT_MASTER) { | ||
329 | res = hsr_portdev_setup(dev, port); | ||
330 | if (res) | ||
331 | goto fail_dev_setup; | ||
332 | } | ||
333 | |||
334 | port->hsr = hsr; | ||
335 | port->dev = dev; | ||
336 | port->type = type; | ||
337 | |||
338 | list_add_tail_rcu(&port->port_list, &hsr->ports); | ||
339 | synchronize_rcu(); | ||
340 | |||
341 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
342 | |||
343 | /* Set required header length */ | ||
344 | if (dev->hard_header_len + HSR_HLEN > master->dev->hard_header_len) | ||
345 | master->dev->hard_header_len = dev->hard_header_len + HSR_HLEN; | ||
323 | 346 | ||
324 | netdev_update_features(hsr->dev); | 347 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); |
325 | dev_set_mtu(hsr->dev, hsr_get_max_mtu(hsr)); | ||
326 | 348 | ||
327 | if (slave) { | 349 | return 0; |
328 | netdev_rx_handler_unregister(slave); | 350 | |
329 | dev_set_promiscuity(slave, -1); | 351 | fail_dev_setup: |
352 | kfree(port); | ||
353 | return res; | ||
354 | } | ||
355 | |||
356 | void hsr_del_port(struct hsr_port *port) | ||
357 | { | ||
358 | struct hsr_priv *hsr; | ||
359 | struct hsr_port *master; | ||
360 | |||
361 | hsr = port->hsr; | ||
362 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
363 | list_del_rcu(&port->port_list); | ||
364 | |||
365 | if (port != master) { | ||
366 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); | ||
367 | netdev_rx_handler_unregister(port->dev); | ||
368 | dev_set_promiscuity(port->dev, -1); | ||
330 | } | 369 | } |
331 | 370 | ||
371 | /* FIXME? | ||
372 | * netdev_upper_dev_unlink(port->dev, port->hsr->dev); | ||
373 | */ | ||
374 | |||
332 | synchronize_rcu(); | 375 | synchronize_rcu(); |
333 | dev_put(slave); | 376 | dev_put(port->dev); |
334 | } | 377 | } |
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h index 03c15fda39a8..3055022eddb3 100644 --- a/net/hsr/hsr_slave.h +++ b/net/hsr/hsr_slave.h | |||
@@ -14,10 +14,35 @@ | |||
14 | 14 | ||
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/rtnetlink.h> | ||
17 | #include "hsr_main.h" | 18 | #include "hsr_main.h" |
18 | 19 | ||
19 | int hsr_add_slave(struct hsr_priv *hsr, struct net_device *dev, int idx); | 20 | int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, |
20 | void hsr_del_slave(struct hsr_priv *hsr, int idx); | 21 | enum hsr_port_type pt); |
22 | void hsr_del_port(struct hsr_port *port); | ||
21 | rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb); | 23 | rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb); |
22 | 24 | ||
25 | |||
26 | #define hsr_for_each_port(hsr, port) \ | ||
27 | list_for_each_entry_rcu((port), &(hsr)->ports, port_list) | ||
28 | |||
29 | |||
30 | static inline bool hsr_port_exists(const struct net_device *dev) | ||
31 | { | ||
32 | return dev->rx_handler == hsr_handle_frame; | ||
33 | } | ||
34 | |||
35 | static inline struct hsr_port *hsr_port_get_rtnl(const struct net_device *dev) | ||
36 | { | ||
37 | ASSERT_RTNL(); | ||
38 | return hsr_port_exists(dev) ? | ||
39 | rtnl_dereference(dev->rx_handler_data) : NULL; | ||
40 | } | ||
41 | |||
42 | static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev) | ||
43 | { | ||
44 | return hsr_port_exists(dev) ? | ||
45 | rcu_dereference(dev->rx_handler_data) : NULL; | ||
46 | } | ||
47 | |||
23 | #endif /* __HSR_SLAVE_H */ | 48 | #endif /* __HSR_SLAVE_H */ |