diff options
author | Arvid Brodin <arvid.brodin@alten.se> | 2014-07-04 17:41:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-08 14:35:31 -0400 |
commit | f266a683a4804dc499efc6c2206ef68efed029d0 (patch) | |
tree | 8af16a2a5dc8d101e0f1aa4681452a939179c957 /net/hsr/hsr_slave.c | |
parent | 4c3477dca2fde1e3ab748387d736d40afe0df21d (diff) |
net/hsr: Better frame dispatch
This patch removes the separate paths for frames coming from the outside, and
frames sent from the HSR device, and instead makes all frames go through
hsr_forward_skb() in hsr_forward.c. This greatly improves code readability and
also opens up the possibility for future support of the HSR Interlink device
that is the basis for HSR RedBoxes and HSR QuadBoxes, as well as VLAN
compatibility.
Other improvements:
* A reduction in the number of times an skb is copied on machines without
HAVE_EFFICIENT_UNALIGNED_ACCESS, which improves throughput somewhat.
* Headers are now created using the standard eth_header(), and using the
standard hard_header_len.
* Each HSR slave now gets its own private skb, so slave-specific fields can be
correctly set.
Signed-off-by: Arvid Brodin <arvid.brodin@alten.se>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/hsr/hsr_slave.c')
-rw-r--r-- | net/hsr/hsr_slave.c | 287 |
1 files changed, 52 insertions, 235 deletions
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 23817d0b765b..a348dcbcd683 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c | |||
@@ -14,9 +14,51 @@ | |||
14 | #include <linux/if_arp.h> | 14 | #include <linux/if_arp.h> |
15 | #include "hsr_main.h" | 15 | #include "hsr_main.h" |
16 | #include "hsr_device.h" | 16 | #include "hsr_device.h" |
17 | #include "hsr_forward.h" | ||
17 | #include "hsr_framereg.h" | 18 | #include "hsr_framereg.h" |
18 | 19 | ||
19 | 20 | ||
21 | static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | ||
22 | { | ||
23 | struct sk_buff *skb = *pskb; | ||
24 | struct hsr_port *port; | ||
25 | |||
26 | if (!skb_mac_header_was_set(skb)) { | ||
27 | WARN_ONCE(1, "%s: skb invalid", __func__); | ||
28 | return RX_HANDLER_PASS; | ||
29 | } | ||
30 | |||
31 | rcu_read_lock(); /* hsr->node_db, hsr->ports */ | ||
32 | port = hsr_port_get_rcu(skb->dev); | ||
33 | |||
34 | if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) { | ||
35 | /* Directly kill frames sent by ourselves */ | ||
36 | kfree_skb(skb); | ||
37 | goto finish_consume; | ||
38 | } | ||
39 | |||
40 | if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP)) | ||
41 | goto finish_pass; | ||
42 | |||
43 | skb_push(skb, ETH_HLEN); | ||
44 | |||
45 | hsr_forward_skb(skb, port); | ||
46 | |||
47 | finish_consume: | ||
48 | rcu_read_unlock(); /* hsr->node_db, hsr->ports */ | ||
49 | return RX_HANDLER_CONSUMED; | ||
50 | |||
51 | finish_pass: | ||
52 | rcu_read_unlock(); /* hsr->node_db, hsr->ports */ | ||
53 | return RX_HANDLER_PASS; | ||
54 | } | ||
55 | |||
56 | bool hsr_port_exists(const struct net_device *dev) | ||
57 | { | ||
58 | return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame; | ||
59 | } | ||
60 | |||
61 | |||
20 | static int hsr_check_dev_ok(struct net_device *dev) | 62 | static int hsr_check_dev_ok(struct net_device *dev) |
21 | { | 63 | { |
22 | /* Don't allow HSR on non-ethernet like devices */ | 64 | /* Don't allow HSR on non-ethernet like devices */ |
@@ -42,6 +84,11 @@ static int hsr_check_dev_ok(struct net_device *dev) | |||
42 | return -EINVAL; | 84 | return -EINVAL; |
43 | } | 85 | } |
44 | 86 | ||
87 | if (dev->priv_flags & IFF_DONT_BRIDGE) { | ||
88 | netdev_info(dev, "This device does not support bridging.\n"); | ||
89 | return -EOPNOTSUPP; | ||
90 | } | ||
91 | |||
45 | /* HSR over bonded devices has not been tested, but I'm not sure it | 92 | /* HSR over bonded devices has not been tested, but I'm not sure it |
46 | * won't work... | 93 | * won't work... |
47 | */ | 94 | */ |
@@ -50,232 +97,6 @@ static int hsr_check_dev_ok(struct net_device *dev) | |||
50 | } | 97 | } |
51 | 98 | ||
52 | 99 | ||
53 | static struct sk_buff *hsr_pull_tag(struct sk_buff *skb) | ||
54 | { | ||
55 | struct hsr_tag *hsr_tag; | ||
56 | struct sk_buff *skb2; | ||
57 | |||
58 | skb2 = skb_share_check(skb, GFP_ATOMIC); | ||
59 | if (unlikely(!skb2)) | ||
60 | goto err_free; | ||
61 | skb = skb2; | ||
62 | |||
63 | if (unlikely(!pskb_may_pull(skb, HSR_HLEN))) | ||
64 | goto err_free; | ||
65 | |||
66 | hsr_tag = (struct hsr_tag *) skb->data; | ||
67 | skb->protocol = hsr_tag->encap_proto; | ||
68 | skb_pull(skb, HSR_HLEN); | ||
69 | |||
70 | return skb; | ||
71 | |||
72 | err_free: | ||
73 | kfree_skb(skb); | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | |||
78 | /* The uses I can see for these HSR supervision frames are: | ||
79 | * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = | ||
80 | * 22") to reset any sequence_nr counters belonging to that node. Useful if | ||
81 | * the other node's counter has been reset for some reason. | ||
82 | * -- | ||
83 | * Or not - resetting the counter and bridging the frame would create a | ||
84 | * loop, unfortunately. | ||
85 | * | ||
86 | * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck | ||
87 | * frame is received from a particular node, we know something is wrong. | ||
88 | * We just register these (as with normal frames) and throw them away. | ||
89 | * | ||
90 | * 3) Allow different MAC addresses for the two slave interfaces, using the | ||
91 | * MacAddressA field. | ||
92 | */ | ||
93 | static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) | ||
94 | { | ||
95 | struct hsr_sup_tag *hsr_stag; | ||
96 | |||
97 | if (!ether_addr_equal(eth_hdr(skb)->h_dest, | ||
98 | hsr->sup_multicast_addr)) | ||
99 | return false; | ||
100 | |||
101 | hsr_stag = (struct hsr_sup_tag *) skb->data; | ||
102 | if (get_hsr_stag_path(hsr_stag) != 0x0f) | ||
103 | return false; | ||
104 | if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) && | ||
105 | (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK)) | ||
106 | return false; | ||
107 | if (hsr_stag->HSR_TLV_Length != 12) | ||
108 | return false; | ||
109 | |||
110 | return true; | ||
111 | } | ||
112 | |||
113 | |||
114 | /* Implementation somewhat according to IEC-62439-3, p. 43 | ||
115 | */ | ||
116 | rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) | ||
117 | { | ||
118 | struct sk_buff *skb = *pskb; | ||
119 | struct hsr_port *port, *other_port, *master; | ||
120 | struct hsr_priv *hsr; | ||
121 | struct hsr_node *node; | ||
122 | bool deliver_to_self; | ||
123 | struct sk_buff *skb_deliver; | ||
124 | bool dup_out; | ||
125 | int ret; | ||
126 | |||
127 | if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP)) | ||
128 | return RX_HANDLER_PASS; | ||
129 | |||
130 | rcu_read_lock(); /* ports & node */ | ||
131 | |||
132 | port = hsr_port_get_rcu(skb->dev); | ||
133 | hsr = port->hsr; | ||
134 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | ||
135 | |||
136 | node = hsr_find_node(&hsr->self_node_db, skb); | ||
137 | if (node) { | ||
138 | /* Always kill frames sent by ourselves */ | ||
139 | kfree_skb(skb); | ||
140 | ret = RX_HANDLER_CONSUMED; | ||
141 | goto finish; | ||
142 | } | ||
143 | |||
144 | /* Is this frame a candidate for local reception? */ | ||
145 | deliver_to_self = false; | ||
146 | if ((skb->pkt_type == PACKET_HOST) || | ||
147 | (skb->pkt_type == PACKET_MULTICAST) || | ||
148 | (skb->pkt_type == PACKET_BROADCAST)) | ||
149 | deliver_to_self = true; | ||
150 | else if (ether_addr_equal(eth_hdr(skb)->h_dest, | ||
151 | master->dev->dev_addr)) { | ||
152 | skb->pkt_type = PACKET_HOST; | ||
153 | deliver_to_self = true; | ||
154 | } | ||
155 | |||
156 | node = hsr_find_node(&hsr->node_db, skb); | ||
157 | |||
158 | if (is_supervision_frame(hsr, skb)) { | ||
159 | skb_pull(skb, sizeof(struct hsr_sup_tag)); | ||
160 | node = hsr_merge_node(node, skb, port); | ||
161 | if (!node) { | ||
162 | kfree_skb(skb); | ||
163 | master->dev->stats.rx_dropped++; | ||
164 | ret = RX_HANDLER_CONSUMED; | ||
165 | goto finish; | ||
166 | } | ||
167 | skb_push(skb, sizeof(struct hsr_sup_tag)); | ||
168 | deliver_to_self = false; | ||
169 | } | ||
170 | |||
171 | if (!node) { | ||
172 | /* Source node unknown; this might be a HSR frame from | ||
173 | * another net (different multicast address). Ignore it. | ||
174 | */ | ||
175 | kfree_skb(skb); | ||
176 | ret = RX_HANDLER_CONSUMED; | ||
177 | goto finish; | ||
178 | } | ||
179 | |||
180 | if (port->type == HSR_PT_SLAVE_A) | ||
181 | other_port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); | ||
182 | else | ||
183 | other_port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); | ||
184 | |||
185 | /* Register ALL incoming frames as outgoing through the other interface. | ||
186 | * This allows us to register frames as incoming only if they are valid | ||
187 | * for the receiving interface, without using a specific counter for | ||
188 | * incoming frames. | ||
189 | */ | ||
190 | if (other_port) | ||
191 | dup_out = hsr_register_frame_out(node, other_port, skb); | ||
192 | else | ||
193 | dup_out = 0; | ||
194 | if (!dup_out) | ||
195 | hsr_register_frame_in(node, port); | ||
196 | |||
197 | /* Forward this frame? */ | ||
198 | if (dup_out || (skb->pkt_type == PACKET_HOST)) | ||
199 | other_port = NULL; | ||
200 | |||
201 | if (hsr_register_frame_out(node, master, skb)) | ||
202 | deliver_to_self = false; | ||
203 | |||
204 | if (!deliver_to_self && !other_port) { | ||
205 | kfree_skb(skb); | ||
206 | /* Circulated frame; silently remove it. */ | ||
207 | ret = RX_HANDLER_CONSUMED; | ||
208 | goto finish; | ||
209 | } | ||
210 | |||
211 | skb_deliver = skb; | ||
212 | if (deliver_to_self && other_port) { | ||
213 | /* skb_clone() is not enough since we will strip the hsr tag | ||
214 | * and do address substitution below | ||
215 | */ | ||
216 | skb_deliver = pskb_copy(skb, GFP_ATOMIC); | ||
217 | if (!skb_deliver) { | ||
218 | deliver_to_self = false; | ||
219 | master->dev->stats.rx_dropped++; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | if (deliver_to_self) { | ||
224 | bool multicast_frame; | ||
225 | |||
226 | skb_deliver = hsr_pull_tag(skb_deliver); | ||
227 | if (!skb_deliver) { | ||
228 | master->dev->stats.rx_dropped++; | ||
229 | goto forward; | ||
230 | } | ||
231 | #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
232 | /* Move everything in the header that is after the HSR tag, | ||
233 | * to work around alignment problems caused by the 6-byte HSR | ||
234 | * tag. In practice, this removes/overwrites the HSR tag in | ||
235 | * the header and restores a "standard" packet. | ||
236 | */ | ||
237 | memmove(skb_deliver->data - HSR_HLEN, skb_deliver->data, | ||
238 | skb_headlen(skb_deliver)); | ||
239 | |||
240 | /* Adjust skb members so they correspond with the move above. | ||
241 | * This cannot possibly underflow skb->data since hsr_pull_tag() | ||
242 | * above succeeded. | ||
243 | * At this point in the protocol stack, the transport and | ||
244 | * network headers have not been set yet, and we haven't touched | ||
245 | * the mac header nor the head. So we only need to adjust data | ||
246 | * and tail: | ||
247 | */ | ||
248 | skb_deliver->data -= HSR_HLEN; | ||
249 | skb_deliver->tail -= HSR_HLEN; | ||
250 | #endif | ||
251 | skb_deliver->dev = master->dev; | ||
252 | hsr_addr_subst_source(hsr, skb_deliver); | ||
253 | multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST); | ||
254 | ret = netif_rx(skb_deliver); | ||
255 | if (ret == NET_RX_DROP) { | ||
256 | master->dev->stats.rx_dropped++; | ||
257 | } else { | ||
258 | master->dev->stats.rx_packets++; | ||
259 | master->dev->stats.rx_bytes += skb->len; | ||
260 | if (multicast_frame) | ||
261 | master->dev->stats.multicast++; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | forward: | ||
266 | if (other_port) { | ||
267 | skb_push(skb, ETH_HLEN); | ||
268 | skb->dev = other_port->dev; | ||
269 | dev_queue_xmit(skb); | ||
270 | } | ||
271 | |||
272 | ret = RX_HANDLER_CONSUMED; | ||
273 | |||
274 | finish: | ||
275 | rcu_read_unlock(); | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | /* Setup device to be added to the HSR bridge. */ | 100 | /* Setup device to be added to the HSR bridge. */ |
280 | static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port) | 101 | static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port) |
281 | { | 102 | { |
@@ -285,16 +106,17 @@ static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port) | |||
285 | res = dev_set_promiscuity(dev, 1); | 106 | res = dev_set_promiscuity(dev, 1); |
286 | if (res) | 107 | if (res) |
287 | goto fail_promiscuity; | 108 | goto fail_promiscuity; |
288 | res = netdev_rx_handler_register(dev, hsr_handle_frame, port); | ||
289 | if (res) | ||
290 | goto fail_rx_handler; | ||
291 | dev_disable_lro(dev); | ||
292 | 109 | ||
293 | /* FIXME: | 110 | /* FIXME: |
294 | * What does net device "adjacency" mean? Should we do | 111 | * What does net device "adjacency" mean? Should we do |
295 | * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ? | 112 | * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ? |
296 | */ | 113 | */ |
297 | 114 | ||
115 | res = netdev_rx_handler_register(dev, hsr_handle_frame, port); | ||
116 | if (res) | ||
117 | goto fail_rx_handler; | ||
118 | dev_disable_lro(dev); | ||
119 | |||
298 | return 0; | 120 | return 0; |
299 | 121 | ||
300 | fail_rx_handler: | 122 | fail_rx_handler: |
@@ -339,11 +161,6 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, | |||
339 | synchronize_rcu(); | 161 | synchronize_rcu(); |
340 | 162 | ||
341 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | 163 | master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); |
342 | |||
343 | /* Set required header length */ | ||
344 | if (dev->hard_header_len + HSR_HLEN > master->dev->hard_header_len) | ||
345 | master->dev->hard_header_len = dev->hard_header_len + HSR_HLEN; | ||
346 | |||
347 | netdev_update_features(master->dev); | 164 | netdev_update_features(master->dev); |
348 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); | 165 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); |
349 | 166 | ||