diff options
Diffstat (limited to 'net/bridge/br_forward.c')
-rw-r--r-- | net/bridge/br_forward.c | 167 |
1 files changed, 140 insertions, 27 deletions
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index bc1704ac6cd9..7a241c396981 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * 2 of the License, or (at your option) any later version. | 11 | * 2 of the License, or (at your option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/slab.h> | ||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
16 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
@@ -18,6 +20,11 @@ | |||
18 | #include <linux/netfilter_bridge.h> | 20 | #include <linux/netfilter_bridge.h> |
19 | #include "br_private.h" | 21 | #include "br_private.h" |
20 | 22 | ||
23 | static int deliver_clone(const struct net_bridge_port *prev, | ||
24 | struct sk_buff *skb, | ||
25 | void (*__packet_hook)(const struct net_bridge_port *p, | ||
26 | struct sk_buff *skb)); | ||
27 | |||
21 | /* Don't forward packets to originating port or forwarding diasabled */ | 28 | /* Don't forward packets to originating port or forwarding diasabled */ |
22 | static inline int should_deliver(const struct net_bridge_port *p, | 29 | static inline int should_deliver(const struct net_bridge_port *p, |
23 | const struct sk_buff *skb) | 30 | const struct sk_buff *skb) |
@@ -93,61 +100,167 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
93 | } | 100 | } |
94 | 101 | ||
95 | /* called with rcu_read_lock */ | 102 | /* called with rcu_read_lock */ |
96 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 103 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) |
97 | { | 104 | { |
98 | if (should_deliver(to, skb)) { | 105 | if (should_deliver(to, skb)) { |
99 | __br_forward(to, skb); | 106 | if (skb0) |
107 | deliver_clone(to, skb, __br_forward); | ||
108 | else | ||
109 | __br_forward(to, skb); | ||
100 | return; | 110 | return; |
101 | } | 111 | } |
102 | 112 | ||
103 | kfree_skb(skb); | 113 | if (!skb0) |
114 | kfree_skb(skb); | ||
104 | } | 115 | } |
105 | 116 | ||
106 | /* called under bridge lock */ | 117 | static int deliver_clone(const struct net_bridge_port *prev, |
107 | static void br_flood(struct net_bridge *br, struct sk_buff *skb, | 118 | struct sk_buff *skb, |
119 | void (*__packet_hook)(const struct net_bridge_port *p, | ||
120 | struct sk_buff *skb)) | ||
121 | { | ||
122 | skb = skb_clone(skb, GFP_ATOMIC); | ||
123 | if (!skb) { | ||
124 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
125 | |||
126 | dev->stats.tx_dropped++; | ||
127 | return -ENOMEM; | ||
128 | } | ||
129 | |||
130 | __packet_hook(prev, skb); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static struct net_bridge_port *maybe_deliver( | ||
135 | struct net_bridge_port *prev, struct net_bridge_port *p, | ||
136 | struct sk_buff *skb, | ||
108 | void (*__packet_hook)(const struct net_bridge_port *p, | 137 | void (*__packet_hook)(const struct net_bridge_port *p, |
109 | struct sk_buff *skb)) | 138 | struct sk_buff *skb)) |
110 | { | 139 | { |
140 | int err; | ||
141 | |||
142 | if (!should_deliver(p, skb)) | ||
143 | return prev; | ||
144 | |||
145 | if (!prev) | ||
146 | goto out; | ||
147 | |||
148 | err = deliver_clone(prev, skb, __packet_hook); | ||
149 | if (err) | ||
150 | return ERR_PTR(err); | ||
151 | |||
152 | out: | ||
153 | return p; | ||
154 | } | ||
155 | |||
156 | /* called under bridge lock */ | ||
157 | static void br_flood(struct net_bridge *br, struct sk_buff *skb, | ||
158 | struct sk_buff *skb0, | ||
159 | void (*__packet_hook)(const struct net_bridge_port *p, | ||
160 | struct sk_buff *skb)) | ||
161 | { | ||
111 | struct net_bridge_port *p; | 162 | struct net_bridge_port *p; |
112 | struct net_bridge_port *prev; | 163 | struct net_bridge_port *prev; |
113 | 164 | ||
114 | prev = NULL; | 165 | prev = NULL; |
115 | 166 | ||
116 | list_for_each_entry_rcu(p, &br->port_list, list) { | 167 | list_for_each_entry_rcu(p, &br->port_list, list) { |
117 | if (should_deliver(p, skb)) { | 168 | prev = maybe_deliver(prev, p, skb, __packet_hook); |
118 | if (prev != NULL) { | 169 | if (IS_ERR(prev)) |
119 | struct sk_buff *skb2; | 170 | goto out; |
120 | |||
121 | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { | ||
122 | br->dev->stats.tx_dropped++; | ||
123 | kfree_skb(skb); | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | __packet_hook(prev, skb2); | ||
128 | } | ||
129 | |||
130 | prev = p; | ||
131 | } | ||
132 | } | 171 | } |
133 | 172 | ||
134 | if (prev != NULL) { | 173 | if (!prev) |
174 | goto out; | ||
175 | |||
176 | if (skb0) | ||
177 | deliver_clone(prev, skb, __packet_hook); | ||
178 | else | ||
135 | __packet_hook(prev, skb); | 179 | __packet_hook(prev, skb); |
136 | return; | 180 | return; |
137 | } | ||
138 | 181 | ||
139 | kfree_skb(skb); | 182 | out: |
183 | if (!skb0) | ||
184 | kfree_skb(skb); | ||
140 | } | 185 | } |
141 | 186 | ||
142 | 187 | ||
143 | /* called with rcu_read_lock */ | 188 | /* called with rcu_read_lock */ |
144 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) | 189 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) |
145 | { | 190 | { |
146 | br_flood(br, skb, __br_deliver); | 191 | br_flood(br, skb, NULL, __br_deliver); |
147 | } | 192 | } |
148 | 193 | ||
149 | /* called under bridge lock */ | 194 | /* called under bridge lock */ |
150 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) | 195 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, |
196 | struct sk_buff *skb2) | ||
197 | { | ||
198 | br_flood(br, skb, skb2, __br_forward); | ||
199 | } | ||
200 | |||
201 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
202 | /* called with rcu_read_lock */ | ||
203 | static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, | ||
204 | struct sk_buff *skb, struct sk_buff *skb0, | ||
205 | void (*__packet_hook)( | ||
206 | const struct net_bridge_port *p, | ||
207 | struct sk_buff *skb)) | ||
208 | { | ||
209 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
210 | struct net_bridge *br = netdev_priv(dev); | ||
211 | struct net_bridge_port *port; | ||
212 | struct net_bridge_port *lport, *rport; | ||
213 | struct net_bridge_port *prev; | ||
214 | struct net_bridge_port_group *p; | ||
215 | struct hlist_node *rp; | ||
216 | |||
217 | prev = NULL; | ||
218 | |||
219 | rp = br->router_list.first; | ||
220 | p = mdst ? mdst->ports : NULL; | ||
221 | while (p || rp) { | ||
222 | lport = p ? p->port : NULL; | ||
223 | rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : | ||
224 | NULL; | ||
225 | |||
226 | port = (unsigned long)lport > (unsigned long)rport ? | ||
227 | lport : rport; | ||
228 | |||
229 | prev = maybe_deliver(prev, port, skb, __packet_hook); | ||
230 | if (IS_ERR(prev)) | ||
231 | goto out; | ||
232 | |||
233 | if ((unsigned long)lport >= (unsigned long)port) | ||
234 | p = p->next; | ||
235 | if ((unsigned long)rport >= (unsigned long)port) | ||
236 | rp = rp->next; | ||
237 | } | ||
238 | |||
239 | if (!prev) | ||
240 | goto out; | ||
241 | |||
242 | if (skb0) | ||
243 | deliver_clone(prev, skb, __packet_hook); | ||
244 | else | ||
245 | __packet_hook(prev, skb); | ||
246 | return; | ||
247 | |||
248 | out: | ||
249 | if (!skb0) | ||
250 | kfree_skb(skb); | ||
251 | } | ||
252 | |||
253 | /* called with rcu_read_lock */ | ||
254 | void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, | ||
255 | struct sk_buff *skb) | ||
256 | { | ||
257 | br_multicast_flood(mdst, skb, NULL, __br_deliver); | ||
258 | } | ||
259 | |||
260 | /* called with rcu_read_lock */ | ||
261 | void br_multicast_forward(struct net_bridge_mdb_entry *mdst, | ||
262 | struct sk_buff *skb, struct sk_buff *skb2) | ||
151 | { | 263 | { |
152 | br_flood(br, skb, __br_forward); | 264 | br_multicast_flood(mdst, skb, skb2, __br_forward); |
153 | } | 265 | } |
266 | #endif | ||