diff options
author | Arvid Brodin <arvid.brodin@alten.se> | 2014-07-04 17:34:38 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-08 14:35:30 -0400 |
commit | 70ebe4a47185db15f3c55be9611a1a971237870b (patch) | |
tree | 53e0ba561d76df8ff281db8211f8e6d4792478cb /net/hsr/hsr_main.c | |
parent | b8125404c242a6336eacaa54047b27cfd3fee68e (diff) |
net/hsr: Better variable names and update of contact info.
Signed-off-by: Arvid Brodin <arvid.brodin@alten.se>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/hsr/hsr_main.c')
-rw-r--r-- | net/hsr/hsr_main.c | 170 |
1 files changed, 85 insertions, 85 deletions
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index 3fee5218a691..99b8fc4eca6c 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright 2011-2013 Autronica Fire and Security AS | 1 | /* Copyright 2011-2014 Autronica Fire and Security AS |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify it | 3 | * This program is free software; you can redistribute it and/or modify it |
4 | * under the terms of the GNU General Public License as published by the Free | 4 | * under the terms of the GNU General Public License as published by the Free |
@@ -6,7 +6,7 @@ | |||
6 | * any later version. | 6 | * any later version. |
7 | * | 7 | * |
8 | * Author(s): | 8 | * Author(s): |
9 | * 2011-2013 Arvid Brodin, arvid.brodin@xdin.com | 9 | * 2011-2014 Arvid Brodin, arvid.brodin@alten.se |
10 | * | 10 | * |
11 | * In addition to routines for registering and unregistering HSR support, this | 11 | * In addition to routines for registering and unregistering HSR support, this |
12 | * file also contains the receive routine that handles all incoming frames with | 12 | * file also contains the receive routine that handles all incoming frames with |
@@ -26,30 +26,30 @@ | |||
26 | /* List of all registered virtual HSR devices */ | 26 | /* List of all registered virtual HSR devices */ |
27 | static LIST_HEAD(hsr_list); | 27 | static LIST_HEAD(hsr_list); |
28 | 28 | ||
29 | void register_hsr_master(struct hsr_priv *hsr_priv) | 29 | void register_hsr_master(struct hsr_priv *hsr) |
30 | { | 30 | { |
31 | list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list); | 31 | list_add_tail_rcu(&hsr->hsr_list, &hsr_list); |
32 | } | 32 | } |
33 | 33 | ||
34 | void unregister_hsr_master(struct hsr_priv *hsr_priv) | 34 | void unregister_hsr_master(struct hsr_priv *hsr) |
35 | { | 35 | { |
36 | struct hsr_priv *hsr_priv_it; | 36 | struct hsr_priv *hsr_it; |
37 | 37 | ||
38 | list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list) | 38 | list_for_each_entry(hsr_it, &hsr_list, hsr_list) |
39 | if (hsr_priv_it == hsr_priv) { | 39 | if (hsr_it == hsr) { |
40 | list_del_rcu(&hsr_priv_it->hsr_list); | 40 | list_del_rcu(&hsr_it->hsr_list); |
41 | return; | 41 | return; |
42 | } | 42 | } |
43 | } | 43 | } |
44 | 44 | ||
45 | bool is_hsr_slave(struct net_device *dev) | 45 | bool is_hsr_slave(struct net_device *dev) |
46 | { | 46 | { |
47 | struct hsr_priv *hsr_priv_it; | 47 | struct hsr_priv *hsr_it; |
48 | 48 | ||
49 | list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) { | 49 | list_for_each_entry_rcu(hsr_it, &hsr_list, hsr_list) { |
50 | if (dev == hsr_priv_it->slave[0]) | 50 | if (dev == hsr_it->slave[0]) |
51 | return true; | 51 | return true; |
52 | if (dev == hsr_priv_it->slave[1]) | 52 | if (dev == hsr_it->slave[1]) |
53 | return true; | 53 | return true; |
54 | } | 54 | } |
55 | 55 | ||
@@ -62,14 +62,14 @@ bool is_hsr_slave(struct net_device *dev) | |||
62 | */ | 62 | */ |
63 | static struct hsr_priv *get_hsr_master(struct net_device *dev) | 63 | static struct hsr_priv *get_hsr_master(struct net_device *dev) |
64 | { | 64 | { |
65 | struct hsr_priv *hsr_priv; | 65 | struct hsr_priv *hsr; |
66 | 66 | ||
67 | rcu_read_lock(); | 67 | rcu_read_lock(); |
68 | list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list) | 68 | list_for_each_entry_rcu(hsr, &hsr_list, hsr_list) |
69 | if ((dev == hsr_priv->slave[0]) || | 69 | if ((dev == hsr->slave[0]) || |
70 | (dev == hsr_priv->slave[1])) { | 70 | (dev == hsr->slave[1])) { |
71 | rcu_read_unlock(); | 71 | rcu_read_unlock(); |
72 | return hsr_priv; | 72 | return hsr; |
73 | } | 73 | } |
74 | 74 | ||
75 | rcu_read_unlock(); | 75 | rcu_read_unlock(); |
@@ -80,13 +80,13 @@ static struct hsr_priv *get_hsr_master(struct net_device *dev) | |||
80 | /* If dev is a HSR slave device, return the other slave device. Return NULL | 80 | /* If dev is a HSR slave device, return the other slave device. Return NULL |
81 | * otherwise. | 81 | * otherwise. |
82 | */ | 82 | */ |
83 | static struct net_device *get_other_slave(struct hsr_priv *hsr_priv, | 83 | static struct net_device *get_other_slave(struct hsr_priv *hsr, |
84 | struct net_device *dev) | 84 | struct net_device *dev) |
85 | { | 85 | { |
86 | if (dev == hsr_priv->slave[0]) | 86 | if (dev == hsr->slave[0]) |
87 | return hsr_priv->slave[1]; | 87 | return hsr->slave[1]; |
88 | if (dev == hsr_priv->slave[1]) | 88 | if (dev == hsr->slave[1]) |
89 | return hsr_priv->slave[0]; | 89 | return hsr->slave[0]; |
90 | 90 | ||
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
@@ -96,7 +96,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | |||
96 | void *ptr) | 96 | void *ptr) |
97 | { | 97 | { |
98 | struct net_device *slave, *other_slave; | 98 | struct net_device *slave, *other_slave; |
99 | struct hsr_priv *hsr_priv; | 99 | struct hsr_priv *hsr; |
100 | int old_operstate; | 100 | int old_operstate; |
101 | int mtu_max; | 101 | int mtu_max; |
102 | int res; | 102 | int res; |
@@ -104,68 +104,68 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | |||
104 | 104 | ||
105 | dev = netdev_notifier_info_to_dev(ptr); | 105 | dev = netdev_notifier_info_to_dev(ptr); |
106 | 106 | ||
107 | hsr_priv = get_hsr_master(dev); | 107 | hsr = get_hsr_master(dev); |
108 | if (hsr_priv) { | 108 | if (hsr) { |
109 | /* dev is a slave device */ | 109 | /* dev is a slave device */ |
110 | slave = dev; | 110 | slave = dev; |
111 | other_slave = get_other_slave(hsr_priv, slave); | 111 | other_slave = get_other_slave(hsr, slave); |
112 | } else { | 112 | } else { |
113 | if (!is_hsr_master(dev)) | 113 | if (!is_hsr_master(dev)) |
114 | return NOTIFY_DONE; | 114 | return NOTIFY_DONE; |
115 | hsr_priv = netdev_priv(dev); | 115 | hsr = netdev_priv(dev); |
116 | slave = hsr_priv->slave[0]; | 116 | slave = hsr->slave[0]; |
117 | other_slave = hsr_priv->slave[1]; | 117 | other_slave = hsr->slave[1]; |
118 | } | 118 | } |
119 | 119 | ||
120 | switch (event) { | 120 | switch (event) { |
121 | case NETDEV_UP: /* Administrative state DOWN */ | 121 | case NETDEV_UP: /* Administrative state DOWN */ |
122 | case NETDEV_DOWN: /* Administrative state UP */ | 122 | case NETDEV_DOWN: /* Administrative state UP */ |
123 | case NETDEV_CHANGE: /* Link (carrier) state changes */ | 123 | case NETDEV_CHANGE: /* Link (carrier) state changes */ |
124 | old_operstate = hsr_priv->dev->operstate; | 124 | old_operstate = hsr->dev->operstate; |
125 | hsr_set_carrier(hsr_priv->dev, slave, other_slave); | 125 | hsr_set_carrier(hsr->dev, slave, other_slave); |
126 | /* netif_stacked_transfer_operstate() cannot be used here since | 126 | /* netif_stacked_transfer_operstate() cannot be used here since |
127 | * it doesn't set IF_OPER_LOWERLAYERDOWN (?) | 127 | * it doesn't set IF_OPER_LOWERLAYERDOWN (?) |
128 | */ | 128 | */ |
129 | hsr_set_operstate(hsr_priv->dev, slave, other_slave); | 129 | hsr_set_operstate(hsr->dev, slave, other_slave); |
130 | hsr_check_announce(hsr_priv->dev, old_operstate); | 130 | hsr_check_announce(hsr->dev, old_operstate); |
131 | break; | 131 | break; |
132 | case NETDEV_CHANGEADDR: | 132 | case NETDEV_CHANGEADDR: |
133 | 133 | ||
134 | /* This should not happen since there's no ndo_set_mac_address() | 134 | /* This should not happen since there's no ndo_set_mac_address() |
135 | * for HSR devices - i.e. not supported. | 135 | * for HSR devices - i.e. not supported. |
136 | */ | 136 | */ |
137 | if (dev == hsr_priv->dev) | 137 | if (dev == hsr->dev) |
138 | break; | 138 | break; |
139 | 139 | ||
140 | if (dev == hsr_priv->slave[0]) | 140 | if (dev == hsr->slave[0]) |
141 | ether_addr_copy(hsr_priv->dev->dev_addr, | 141 | ether_addr_copy(hsr->dev->dev_addr, |
142 | hsr_priv->slave[0]->dev_addr); | 142 | hsr->slave[0]->dev_addr); |
143 | 143 | ||
144 | /* Make sure we recognize frames from ourselves in hsr_rcv() */ | 144 | /* Make sure we recognize frames from ourselves in hsr_rcv() */ |
145 | res = hsr_create_self_node(&hsr_priv->self_node_db, | 145 | res = hsr_create_self_node(&hsr->self_node_db, |
146 | hsr_priv->dev->dev_addr, | 146 | hsr->dev->dev_addr, |
147 | hsr_priv->slave[1] ? | 147 | hsr->slave[1] ? |
148 | hsr_priv->slave[1]->dev_addr : | 148 | hsr->slave[1]->dev_addr : |
149 | hsr_priv->dev->dev_addr); | 149 | hsr->dev->dev_addr); |
150 | if (res) | 150 | if (res) |
151 | netdev_warn(hsr_priv->dev, | 151 | netdev_warn(hsr->dev, |
152 | "Could not update HSR node address.\n"); | 152 | "Could not update HSR node address.\n"); |
153 | 153 | ||
154 | if (dev == hsr_priv->slave[0]) | 154 | if (dev == hsr->slave[0]) |
155 | call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev); | 155 | call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr->dev); |
156 | break; | 156 | break; |
157 | case NETDEV_CHANGEMTU: | 157 | case NETDEV_CHANGEMTU: |
158 | if (dev == hsr_priv->dev) | 158 | if (dev == hsr->dev) |
159 | break; /* Handled in ndo_change_mtu() */ | 159 | break; /* Handled in ndo_change_mtu() */ |
160 | mtu_max = hsr_get_max_mtu(hsr_priv); | 160 | mtu_max = hsr_get_max_mtu(hsr); |
161 | if (hsr_priv->dev->mtu > mtu_max) | 161 | if (hsr->dev->mtu > mtu_max) |
162 | dev_set_mtu(hsr_priv->dev, mtu_max); | 162 | dev_set_mtu(hsr->dev, mtu_max); |
163 | break; | 163 | break; |
164 | case NETDEV_UNREGISTER: | 164 | case NETDEV_UNREGISTER: |
165 | if (dev == hsr_priv->slave[0]) | 165 | if (dev == hsr->slave[0]) |
166 | hsr_priv->slave[0] = NULL; | 166 | hsr->slave[0] = NULL; |
167 | if (dev == hsr_priv->slave[1]) | 167 | if (dev == hsr->slave[1]) |
168 | hsr_priv->slave[1] = NULL; | 168 | hsr->slave[1] = NULL; |
169 | 169 | ||
170 | /* There should really be a way to set a new slave device... */ | 170 | /* There should really be a way to set a new slave device... */ |
171 | 171 | ||
@@ -185,11 +185,11 @@ static struct timer_list prune_timer; | |||
185 | 185 | ||
186 | static void prune_nodes_all(unsigned long data) | 186 | static void prune_nodes_all(unsigned long data) |
187 | { | 187 | { |
188 | struct hsr_priv *hsr_priv; | 188 | struct hsr_priv *hsr; |
189 | 189 | ||
190 | rcu_read_lock(); | 190 | rcu_read_lock(); |
191 | list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list) | 191 | list_for_each_entry_rcu(hsr, &hsr_list, hsr_list) |
192 | hsr_prune_nodes(hsr_priv); | 192 | hsr_prune_nodes(hsr); |
193 | rcu_read_unlock(); | 193 | rcu_read_unlock(); |
194 | 194 | ||
195 | prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD); | 195 | prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD); |
@@ -207,12 +207,12 @@ static struct sk_buff *hsr_pull_tag(struct sk_buff *skb) | |||
207 | goto err_free; | 207 | goto err_free; |
208 | skb = skb2; | 208 | skb = skb2; |
209 | 209 | ||
210 | if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN))) | 210 | if (unlikely(!pskb_may_pull(skb, HSR_HLEN))) |
211 | goto err_free; | 211 | goto err_free; |
212 | 212 | ||
213 | hsr_tag = (struct hsr_tag *) skb->data; | 213 | hsr_tag = (struct hsr_tag *) skb->data; |
214 | skb->protocol = hsr_tag->encap_proto; | 214 | skb->protocol = hsr_tag->encap_proto; |
215 | skb_pull(skb, HSR_TAGLEN); | 215 | skb_pull(skb, HSR_HLEN); |
216 | 216 | ||
217 | return skb; | 217 | return skb; |
218 | 218 | ||
@@ -237,12 +237,12 @@ err_free: | |||
237 | * 3) Allow different MAC addresses for the two slave interfaces, using the | 237 | * 3) Allow different MAC addresses for the two slave interfaces, using the |
238 | * MacAddressA field. | 238 | * MacAddressA field. |
239 | */ | 239 | */ |
240 | static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb) | 240 | static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) |
241 | { | 241 | { |
242 | struct hsr_sup_tag *hsr_stag; | 242 | struct hsr_sup_tag *hsr_stag; |
243 | 243 | ||
244 | if (!ether_addr_equal(eth_hdr(skb)->h_dest, | 244 | if (!ether_addr_equal(eth_hdr(skb)->h_dest, |
245 | hsr_priv->sup_multicast_addr)) | 245 | hsr->sup_multicast_addr)) |
246 | return false; | 246 | return false; |
247 | 247 | ||
248 | hsr_stag = (struct hsr_sup_tag *) skb->data; | 248 | hsr_stag = (struct hsr_sup_tag *) skb->data; |
@@ -263,25 +263,25 @@ static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb) | |||
263 | static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | 263 | static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, |
264 | struct packet_type *pt, struct net_device *orig_dev) | 264 | struct packet_type *pt, struct net_device *orig_dev) |
265 | { | 265 | { |
266 | struct hsr_priv *hsr_priv; | 266 | struct hsr_priv *hsr; |
267 | struct net_device *other_slave; | 267 | struct net_device *other_slave; |
268 | struct node_entry *node; | 268 | struct hsr_node *node; |
269 | bool deliver_to_self; | 269 | bool deliver_to_self; |
270 | struct sk_buff *skb_deliver; | 270 | struct sk_buff *skb_deliver; |
271 | enum hsr_dev_idx dev_in_idx, dev_other_idx; | 271 | enum hsr_dev_idx dev_in_idx, dev_other_idx; |
272 | bool dup_out; | 272 | bool dup_out; |
273 | int ret; | 273 | int ret; |
274 | 274 | ||
275 | hsr_priv = get_hsr_master(dev); | 275 | hsr = get_hsr_master(dev); |
276 | 276 | ||
277 | if (!hsr_priv) { | 277 | if (!hsr) { |
278 | /* Non-HSR-slave device 'dev' is connected to a HSR network */ | 278 | /* Non-HSR-slave device 'dev' is connected to a HSR network */ |
279 | kfree_skb(skb); | 279 | kfree_skb(skb); |
280 | dev->stats.rx_errors++; | 280 | dev->stats.rx_errors++; |
281 | return NET_RX_SUCCESS; | 281 | return NET_RX_SUCCESS; |
282 | } | 282 | } |
283 | 283 | ||
284 | if (dev == hsr_priv->slave[0]) { | 284 | if (dev == hsr->slave[0]) { |
285 | dev_in_idx = HSR_DEV_SLAVE_A; | 285 | dev_in_idx = HSR_DEV_SLAVE_A; |
286 | dev_other_idx = HSR_DEV_SLAVE_B; | 286 | dev_other_idx = HSR_DEV_SLAVE_B; |
287 | } else { | 287 | } else { |
@@ -289,7 +289,7 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
289 | dev_other_idx = HSR_DEV_SLAVE_A; | 289 | dev_other_idx = HSR_DEV_SLAVE_A; |
290 | } | 290 | } |
291 | 291 | ||
292 | node = hsr_find_node(&hsr_priv->self_node_db, skb); | 292 | node = hsr_find_node(&hsr->self_node_db, skb); |
293 | if (node) { | 293 | if (node) { |
294 | /* Always kill frames sent by ourselves */ | 294 | /* Always kill frames sent by ourselves */ |
295 | kfree_skb(skb); | 295 | kfree_skb(skb); |
@@ -303,22 +303,22 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
303 | (skb->pkt_type == PACKET_BROADCAST)) | 303 | (skb->pkt_type == PACKET_BROADCAST)) |
304 | deliver_to_self = true; | 304 | deliver_to_self = true; |
305 | else if (ether_addr_equal(eth_hdr(skb)->h_dest, | 305 | else if (ether_addr_equal(eth_hdr(skb)->h_dest, |
306 | hsr_priv->dev->dev_addr)) { | 306 | hsr->dev->dev_addr)) { |
307 | skb->pkt_type = PACKET_HOST; | 307 | skb->pkt_type = PACKET_HOST; |
308 | deliver_to_self = true; | 308 | deliver_to_self = true; |
309 | } | 309 | } |
310 | 310 | ||
311 | 311 | ||
312 | rcu_read_lock(); /* node_db */ | 312 | rcu_read_lock(); /* node_db */ |
313 | node = hsr_find_node(&hsr_priv->node_db, skb); | 313 | node = hsr_find_node(&hsr->node_db, skb); |
314 | 314 | ||
315 | if (is_supervision_frame(hsr_priv, skb)) { | 315 | if (is_supervision_frame(hsr, skb)) { |
316 | skb_pull(skb, sizeof(struct hsr_sup_tag)); | 316 | skb_pull(skb, sizeof(struct hsr_sup_tag)); |
317 | node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx); | 317 | node = hsr_merge_node(hsr, node, skb, dev_in_idx); |
318 | if (!node) { | 318 | if (!node) { |
319 | rcu_read_unlock(); /* node_db */ | 319 | rcu_read_unlock(); /* node_db */ |
320 | kfree_skb(skb); | 320 | kfree_skb(skb); |
321 | hsr_priv->dev->stats.rx_dropped++; | 321 | hsr->dev->stats.rx_dropped++; |
322 | return NET_RX_DROP; | 322 | return NET_RX_DROP; |
323 | } | 323 | } |
324 | skb_push(skb, sizeof(struct hsr_sup_tag)); | 324 | skb_push(skb, sizeof(struct hsr_sup_tag)); |
@@ -345,7 +345,7 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
345 | 345 | ||
346 | /* Forward this frame? */ | 346 | /* Forward this frame? */ |
347 | if (!dup_out && (skb->pkt_type != PACKET_HOST)) | 347 | if (!dup_out && (skb->pkt_type != PACKET_HOST)) |
348 | other_slave = get_other_slave(hsr_priv, dev); | 348 | other_slave = get_other_slave(hsr, dev); |
349 | else | 349 | else |
350 | other_slave = NULL; | 350 | other_slave = NULL; |
351 | 351 | ||
@@ -368,7 +368,7 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
368 | skb_deliver = pskb_copy(skb, GFP_ATOMIC); | 368 | skb_deliver = pskb_copy(skb, GFP_ATOMIC); |
369 | if (!skb_deliver) { | 369 | if (!skb_deliver) { |
370 | deliver_to_self = false; | 370 | deliver_to_self = false; |
371 | hsr_priv->dev->stats.rx_dropped++; | 371 | hsr->dev->stats.rx_dropped++; |
372 | } | 372 | } |
373 | } | 373 | } |
374 | 374 | ||
@@ -377,7 +377,7 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
377 | 377 | ||
378 | skb_deliver = hsr_pull_tag(skb_deliver); | 378 | skb_deliver = hsr_pull_tag(skb_deliver); |
379 | if (!skb_deliver) { | 379 | if (!skb_deliver) { |
380 | hsr_priv->dev->stats.rx_dropped++; | 380 | hsr->dev->stats.rx_dropped++; |
381 | goto forward; | 381 | goto forward; |
382 | } | 382 | } |
383 | #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 383 | #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
@@ -386,7 +386,7 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
386 | * tag. In practice, this removes/overwrites the HSR tag in | 386 | * tag. In practice, this removes/overwrites the HSR tag in |
387 | * the header and restores a "standard" packet. | 387 | * the header and restores a "standard" packet. |
388 | */ | 388 | */ |
389 | memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data, | 389 | memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data, |
390 | skb_headlen(skb_deliver)); | 390 | skb_headlen(skb_deliver)); |
391 | 391 | ||
392 | /* Adjust skb members so they correspond with the move above. | 392 | /* Adjust skb members so they correspond with the move above. |
@@ -397,20 +397,20 @@ static int hsr_rcv(struct sk_buff *skb, struct net_device *dev, | |||
397 | * the mac header nor the head. So we only need to adjust data | 397 | * the mac header nor the head. So we only need to adjust data |
398 | * and tail: | 398 | * and tail: |
399 | */ | 399 | */ |
400 | skb_deliver->data -= HSR_TAGLEN; | 400 | skb_deliver->data -= HSR_HLEN; |
401 | skb_deliver->tail -= HSR_TAGLEN; | 401 | skb_deliver->tail -= HSR_HLEN; |
402 | #endif | 402 | #endif |
403 | skb_deliver->dev = hsr_priv->dev; | 403 | skb_deliver->dev = hsr->dev; |
404 | hsr_addr_subst_source(hsr_priv, skb_deliver); | 404 | hsr_addr_subst_source(hsr, skb_deliver); |
405 | multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST); | 405 | multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST); |
406 | ret = netif_rx(skb_deliver); | 406 | ret = netif_rx(skb_deliver); |
407 | if (ret == NET_RX_DROP) { | 407 | if (ret == NET_RX_DROP) { |
408 | hsr_priv->dev->stats.rx_dropped++; | 408 | hsr->dev->stats.rx_dropped++; |
409 | } else { | 409 | } else { |
410 | hsr_priv->dev->stats.rx_packets++; | 410 | hsr->dev->stats.rx_packets++; |
411 | hsr_priv->dev->stats.rx_bytes += skb->len; | 411 | hsr->dev->stats.rx_bytes += skb->len; |
412 | if (multicast_frame) | 412 | if (multicast_frame) |
413 | hsr_priv->dev->stats.multicast++; | 413 | hsr->dev->stats.multicast++; |
414 | } | 414 | } |
415 | } | 415 | } |
416 | 416 | ||
@@ -439,7 +439,7 @@ static int __init hsr_init(void) | |||
439 | { | 439 | { |
440 | int res; | 440 | int res; |
441 | 441 | ||
442 | BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN); | 442 | BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN); |
443 | 443 | ||
444 | dev_add_pack(&hsr_pt); | 444 | dev_add_pack(&hsr_pt); |
445 | 445 | ||