diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-20 20:43:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-20 20:43:29 -0400 |
commit | db6d8c7a4027b48d797b369a53f8470aaeed7063 (patch) | |
tree | e140c104a89abc2154e1f41a7db8ebecbb6fa0b4 /net | |
parent | 3a533374283aea50eab3976d8a6d30532175f009 (diff) | |
parent | fb65a7c091529bfffb1262515252c0d0f6241c5c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (1232 commits)
iucv: Fix bad merging.
net_sched: Add size table for qdiscs
net_sched: Add accessor function for packet length for qdiscs
net_sched: Add qdisc_enqueue wrapper
highmem: Export totalhigh_pages.
ipv6 mcast: Omit redundant address family checks in ip6_mc_source().
net: Use standard structures for generic socket address structures.
ipv6 netns: Make several "global" sysctl variables namespace aware.
netns: Use net_eq() to compare net-namespaces for optimization.
ipv6: remove unused macros from net/ipv6.h
ipv6: remove unused parameter from ip6_ra_control
tcp: fix kernel panic with listening_get_next
tcp: Remove redundant checks when setting eff_sacks
tcp: options clean up
tcp: Fix MD5 signatures for non-linear skbs
sctp: Update sctp global memory limit allocations.
sctp: remove unnecessary byteshifting, calculate directly in big-endian
sctp: Allow only 1 listening socket with SO_REUSEADDR
sctp: Do not leak memory on multiple listen() calls
sctp: Support ipv6only AF_INET6 sockets.
...
Diffstat (limited to 'net')
361 files changed, 12739 insertions, 9582 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig new file mode 100644 index 000000000000..be33d27c8e69 --- /dev/null +++ b/net/802/Kconfig | |||
@@ -0,0 +1,7 @@ | |||
1 | config STP | ||
2 | tristate | ||
3 | select LLC | ||
4 | |||
5 | config GARP | ||
6 | tristate | ||
7 | select STP | ||
diff --git a/net/802/Makefile b/net/802/Makefile index 68569ffddea1..7893d679910c 100644 --- a/net/802/Makefile +++ b/net/802/Makefile | |||
@@ -10,3 +10,5 @@ obj-$(CONFIG_FDDI) += fddi.o | |||
10 | obj-$(CONFIG_HIPPI) += hippi.o | 10 | obj-$(CONFIG_HIPPI) += hippi.o |
11 | obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o | 11 | obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o |
12 | obj-$(CONFIG_ATALK) += p8022.o psnap.o | 12 | obj-$(CONFIG_ATALK) += p8022.o psnap.o |
13 | obj-$(CONFIG_STP) += stp.o | ||
14 | obj-$(CONFIG_GARP) += garp.o | ||
diff --git a/net/802/garp.c b/net/802/garp.c new file mode 100644 index 000000000000..1dcb0660c49d --- /dev/null +++ b/net/802/garp.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* | ||
2 | * IEEE 802.1D Generic Attribute Registration Protocol (GARP) | ||
3 | * | ||
4 | * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/timer.h> | ||
12 | #include <linux/skbuff.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/etherdevice.h> | ||
15 | #include <linux/rtnetlink.h> | ||
16 | #include <linux/llc.h> | ||
17 | #include <net/llc.h> | ||
18 | #include <net/llc_pdu.h> | ||
19 | #include <net/garp.h> | ||
20 | #include <asm/unaligned.h> | ||
21 | |||
22 | static unsigned int garp_join_time __read_mostly = 200; | ||
23 | module_param(garp_join_time, uint, 0644); | ||
24 | MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)"); | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
27 | static const struct garp_state_trans { | ||
28 | u8 state; | ||
29 | u8 action; | ||
30 | } garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = { | ||
31 | [GARP_APPLICANT_VA] = { | ||
32 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, | ||
33 | .action = GARP_ACTION_S_JOIN_IN }, | ||
34 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA }, | ||
35 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
36 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
37 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, | ||
38 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
39 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
40 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, | ||
41 | }, | ||
42 | [GARP_APPLICANT_AA] = { | ||
43 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, | ||
44 | .action = GARP_ACTION_S_JOIN_IN }, | ||
45 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, | ||
46 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
47 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
48 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, | ||
49 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
50 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
51 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, | ||
52 | }, | ||
53 | [GARP_APPLICANT_QA] = { | ||
54 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, | ||
55 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, | ||
56 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
57 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, | ||
58 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, | ||
59 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
60 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
61 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, | ||
62 | }, | ||
63 | [GARP_APPLICANT_LA] = { | ||
64 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO, | ||
65 | .action = GARP_ACTION_S_LEAVE_EMPTY }, | ||
66 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA }, | ||
67 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
68 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA }, | ||
69 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA }, | ||
70 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
71 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA }, | ||
72 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, | ||
73 | }, | ||
74 | [GARP_APPLICANT_VP] = { | ||
75 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, | ||
76 | .action = GARP_ACTION_S_JOIN_IN }, | ||
77 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP }, | ||
78 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
79 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
80 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, | ||
81 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
82 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
83 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO }, | ||
84 | }, | ||
85 | [GARP_APPLICANT_AP] = { | ||
86 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, | ||
87 | .action = GARP_ACTION_S_JOIN_IN }, | ||
88 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, | ||
89 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
90 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
91 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, | ||
92 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
93 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
94 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO }, | ||
95 | }, | ||
96 | [GARP_APPLICANT_QP] = { | ||
97 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, | ||
98 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, | ||
99 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
100 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
101 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, | ||
102 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, | ||
103 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, | ||
104 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO }, | ||
105 | }, | ||
106 | [GARP_APPLICANT_VO] = { | ||
107 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, | ||
108 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO }, | ||
109 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
110 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
111 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, | ||
112 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
113 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP }, | ||
114 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, | ||
115 | }, | ||
116 | [GARP_APPLICANT_AO] = { | ||
117 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, | ||
118 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, | ||
119 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
120 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
121 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, | ||
122 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
123 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP }, | ||
124 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, | ||
125 | }, | ||
126 | [GARP_APPLICANT_QO] = { | ||
127 | [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, | ||
128 | [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, | ||
129 | [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
130 | [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
131 | [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, | ||
132 | [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, | ||
133 | [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP }, | ||
134 | [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, | ||
135 | }, | ||
136 | }; | ||
137 | |||
138 | static int garp_attr_cmp(const struct garp_attr *attr, | ||
139 | const void *data, u8 len, u8 type) | ||
140 | { | ||
141 | if (attr->type != type) | ||
142 | return attr->type - type; | ||
143 | if (attr->dlen != len) | ||
144 | return attr->dlen - len; | ||
145 | return memcmp(attr->data, data, len); | ||
146 | } | ||
147 | |||
148 | static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, | ||
149 | const void *data, u8 len, u8 type) | ||
150 | { | ||
151 | struct rb_node *parent = app->gid.rb_node; | ||
152 | struct garp_attr *attr; | ||
153 | int d; | ||
154 | |||
155 | while (parent) { | ||
156 | attr = rb_entry(parent, struct garp_attr, node); | ||
157 | d = garp_attr_cmp(attr, data, len, type); | ||
158 | if (d < 0) | ||
159 | parent = parent->rb_left; | ||
160 | else if (d > 0) | ||
161 | parent = parent->rb_right; | ||
162 | else | ||
163 | return attr; | ||
164 | } | ||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) | ||
169 | { | ||
170 | struct rb_node *parent = NULL, **p = &app->gid.rb_node; | ||
171 | struct garp_attr *attr; | ||
172 | int d; | ||
173 | |||
174 | while (*p) { | ||
175 | parent = *p; | ||
176 | attr = rb_entry(parent, struct garp_attr, node); | ||
177 | d = garp_attr_cmp(attr, new->data, new->dlen, new->type); | ||
178 | if (d < 0) | ||
179 | p = &parent->rb_left; | ||
180 | else if (d > 0) | ||
181 | p = &parent->rb_right; | ||
182 | } | ||
183 | rb_link_node(&new->node, parent, p); | ||
184 | rb_insert_color(&new->node, &app->gid); | ||
185 | } | ||
186 | |||
187 | static struct garp_attr *garp_attr_create(struct garp_applicant *app, | ||
188 | const void *data, u8 len, u8 type) | ||
189 | { | ||
190 | struct garp_attr *attr; | ||
191 | |||
192 | attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); | ||
193 | if (!attr) | ||
194 | return attr; | ||
195 | attr->state = GARP_APPLICANT_VO; | ||
196 | attr->type = type; | ||
197 | attr->dlen = len; | ||
198 | memcpy(attr->data, data, len); | ||
199 | garp_attr_insert(app, attr); | ||
200 | return attr; | ||
201 | } | ||
202 | |||
203 | static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr) | ||
204 | { | ||
205 | rb_erase(&attr->node, &app->gid); | ||
206 | kfree(attr); | ||
207 | } | ||
208 | |||
209 | static int garp_pdu_init(struct garp_applicant *app) | ||
210 | { | ||
211 | struct sk_buff *skb; | ||
212 | struct garp_pdu_hdr *gp; | ||
213 | |||
214 | #define LLC_RESERVE sizeof(struct llc_pdu_un) | ||
215 | skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), | ||
216 | GFP_ATOMIC); | ||
217 | if (!skb) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | skb->dev = app->dev; | ||
221 | skb->protocol = htons(ETH_P_802_2); | ||
222 | skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE); | ||
223 | |||
224 | gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp)); | ||
225 | put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol); | ||
226 | |||
227 | app->pdu = skb; | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static int garp_pdu_append_end_mark(struct garp_applicant *app) | ||
232 | { | ||
233 | if (skb_tailroom(app->pdu) < sizeof(u8)) | ||
234 | return -1; | ||
235 | *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK; | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static void garp_pdu_queue(struct garp_applicant *app) | ||
240 | { | ||
241 | if (!app->pdu) | ||
242 | return; | ||
243 | |||
244 | garp_pdu_append_end_mark(app); | ||
245 | garp_pdu_append_end_mark(app); | ||
246 | |||
247 | llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN, | ||
248 | LLC_SAP_BSPAN, LLC_PDU_CMD); | ||
249 | llc_pdu_init_as_ui_cmd(app->pdu); | ||
250 | llc_mac_hdr_init(app->pdu, app->dev->dev_addr, | ||
251 | app->app->proto.group_address); | ||
252 | |||
253 | skb_queue_tail(&app->queue, app->pdu); | ||
254 | app->pdu = NULL; | ||
255 | } | ||
256 | |||
257 | static void garp_queue_xmit(struct garp_applicant *app) | ||
258 | { | ||
259 | struct sk_buff *skb; | ||
260 | |||
261 | while ((skb = skb_dequeue(&app->queue))) | ||
262 | dev_queue_xmit(skb); | ||
263 | } | ||
264 | |||
265 | static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype) | ||
266 | { | ||
267 | struct garp_msg_hdr *gm; | ||
268 | |||
269 | if (skb_tailroom(app->pdu) < sizeof(*gm)) | ||
270 | return -1; | ||
271 | gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm)); | ||
272 | gm->attrtype = attrtype; | ||
273 | garp_cb(app->pdu)->cur_type = attrtype; | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int garp_pdu_append_attr(struct garp_applicant *app, | ||
278 | const struct garp_attr *attr, | ||
279 | enum garp_attr_event event) | ||
280 | { | ||
281 | struct garp_attr_hdr *ga; | ||
282 | unsigned int len; | ||
283 | int err; | ||
284 | again: | ||
285 | if (!app->pdu) { | ||
286 | err = garp_pdu_init(app); | ||
287 | if (err < 0) | ||
288 | return err; | ||
289 | } | ||
290 | |||
291 | if (garp_cb(app->pdu)->cur_type != attr->type) { | ||
292 | if (garp_cb(app->pdu)->cur_type && | ||
293 | garp_pdu_append_end_mark(app) < 0) | ||
294 | goto queue; | ||
295 | if (garp_pdu_append_msg(app, attr->type) < 0) | ||
296 | goto queue; | ||
297 | } | ||
298 | |||
299 | len = sizeof(*ga) + attr->dlen; | ||
300 | if (skb_tailroom(app->pdu) < len) | ||
301 | goto queue; | ||
302 | ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len); | ||
303 | ga->len = len; | ||
304 | ga->event = event; | ||
305 | memcpy(ga->data, attr->data, attr->dlen); | ||
306 | return 0; | ||
307 | |||
308 | queue: | ||
309 | garp_pdu_queue(app); | ||
310 | goto again; | ||
311 | } | ||
312 | |||
313 | static void garp_attr_event(struct garp_applicant *app, | ||
314 | struct garp_attr *attr, enum garp_event event) | ||
315 | { | ||
316 | enum garp_applicant_state state; | ||
317 | |||
318 | state = garp_applicant_state_table[attr->state][event].state; | ||
319 | if (state == GARP_APPLICANT_INVALID) | ||
320 | return; | ||
321 | |||
322 | switch (garp_applicant_state_table[attr->state][event].action) { | ||
323 | case GARP_ACTION_NONE: | ||
324 | break; | ||
325 | case GARP_ACTION_S_JOIN_IN: | ||
326 | /* When appending the attribute fails, don't update state in | ||
327 | * order to retry on next TRANSMIT_PDU event. */ | ||
328 | if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0) | ||
329 | return; | ||
330 | break; | ||
331 | case GARP_ACTION_S_LEAVE_EMPTY: | ||
332 | garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY); | ||
333 | /* As a pure applicant, sending a leave message implies that | ||
334 | * the attribute was unregistered and can be destroyed. */ | ||
335 | garp_attr_destroy(app, attr); | ||
336 | return; | ||
337 | default: | ||
338 | WARN_ON(1); | ||
339 | } | ||
340 | |||
341 | attr->state = state; | ||
342 | } | ||
343 | |||
344 | int garp_request_join(const struct net_device *dev, | ||
345 | const struct garp_application *appl, | ||
346 | const void *data, u8 len, u8 type) | ||
347 | { | ||
348 | struct garp_port *port = dev->garp_port; | ||
349 | struct garp_applicant *app = port->applicants[appl->type]; | ||
350 | struct garp_attr *attr; | ||
351 | |||
352 | spin_lock_bh(&app->lock); | ||
353 | attr = garp_attr_create(app, data, len, type); | ||
354 | if (!attr) { | ||
355 | spin_unlock_bh(&app->lock); | ||
356 | return -ENOMEM; | ||
357 | } | ||
358 | garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN); | ||
359 | spin_unlock_bh(&app->lock); | ||
360 | return 0; | ||
361 | } | ||
362 | EXPORT_SYMBOL_GPL(garp_request_join); | ||
363 | |||
364 | void garp_request_leave(const struct net_device *dev, | ||
365 | const struct garp_application *appl, | ||
366 | const void *data, u8 len, u8 type) | ||
367 | { | ||
368 | struct garp_port *port = dev->garp_port; | ||
369 | struct garp_applicant *app = port->applicants[appl->type]; | ||
370 | struct garp_attr *attr; | ||
371 | |||
372 | spin_lock_bh(&app->lock); | ||
373 | attr = garp_attr_lookup(app, data, len, type); | ||
374 | if (!attr) { | ||
375 | spin_unlock_bh(&app->lock); | ||
376 | return; | ||
377 | } | ||
378 | garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE); | ||
379 | spin_unlock_bh(&app->lock); | ||
380 | } | ||
381 | EXPORT_SYMBOL_GPL(garp_request_leave); | ||
382 | |||
383 | static void garp_gid_event(struct garp_applicant *app, enum garp_event event) | ||
384 | { | ||
385 | struct rb_node *node, *next; | ||
386 | struct garp_attr *attr; | ||
387 | |||
388 | for (node = rb_first(&app->gid); | ||
389 | next = node ? rb_next(node) : NULL, node != NULL; | ||
390 | node = next) { | ||
391 | attr = rb_entry(node, struct garp_attr, node); | ||
392 | garp_attr_event(app, attr, event); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | static void garp_join_timer_arm(struct garp_applicant *app) | ||
397 | { | ||
398 | unsigned long delay; | ||
399 | |||
400 | delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32; | ||
401 | mod_timer(&app->join_timer, jiffies + delay); | ||
402 | } | ||
403 | |||
404 | static void garp_join_timer(unsigned long data) | ||
405 | { | ||
406 | struct garp_applicant *app = (struct garp_applicant *)data; | ||
407 | |||
408 | spin_lock(&app->lock); | ||
409 | garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); | ||
410 | garp_pdu_queue(app); | ||
411 | spin_unlock(&app->lock); | ||
412 | |||
413 | garp_queue_xmit(app); | ||
414 | garp_join_timer_arm(app); | ||
415 | } | ||
416 | |||
417 | static int garp_pdu_parse_end_mark(struct sk_buff *skb) | ||
418 | { | ||
419 | if (!pskb_may_pull(skb, sizeof(u8))) | ||
420 | return -1; | ||
421 | if (*skb->data == GARP_END_MARK) { | ||
422 | skb_pull(skb, sizeof(u8)); | ||
423 | return -1; | ||
424 | } | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb, | ||
429 | u8 attrtype) | ||
430 | { | ||
431 | const struct garp_attr_hdr *ga; | ||
432 | struct garp_attr *attr; | ||
433 | enum garp_event event; | ||
434 | unsigned int dlen; | ||
435 | |||
436 | if (!pskb_may_pull(skb, sizeof(*ga))) | ||
437 | return -1; | ||
438 | ga = (struct garp_attr_hdr *)skb->data; | ||
439 | if (ga->len < sizeof(*ga)) | ||
440 | return -1; | ||
441 | |||
442 | if (!pskb_may_pull(skb, ga->len)) | ||
443 | return -1; | ||
444 | skb_pull(skb, ga->len); | ||
445 | dlen = sizeof(*ga) - ga->len; | ||
446 | |||
447 | if (attrtype > app->app->maxattr) | ||
448 | return 0; | ||
449 | |||
450 | switch (ga->event) { | ||
451 | case GARP_LEAVE_ALL: | ||
452 | if (dlen != 0) | ||
453 | return -1; | ||
454 | garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY); | ||
455 | return 0; | ||
456 | case GARP_JOIN_EMPTY: | ||
457 | event = GARP_EVENT_R_JOIN_EMPTY; | ||
458 | break; | ||
459 | case GARP_JOIN_IN: | ||
460 | event = GARP_EVENT_R_JOIN_IN; | ||
461 | break; | ||
462 | case GARP_LEAVE_EMPTY: | ||
463 | event = GARP_EVENT_R_LEAVE_EMPTY; | ||
464 | break; | ||
465 | case GARP_EMPTY: | ||
466 | event = GARP_EVENT_R_EMPTY; | ||
467 | break; | ||
468 | default: | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | if (dlen == 0) | ||
473 | return -1; | ||
474 | attr = garp_attr_lookup(app, ga->data, dlen, attrtype); | ||
475 | if (attr == NULL) | ||
476 | return 0; | ||
477 | garp_attr_event(app, attr, event); | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb) | ||
482 | { | ||
483 | const struct garp_msg_hdr *gm; | ||
484 | |||
485 | if (!pskb_may_pull(skb, sizeof(*gm))) | ||
486 | return -1; | ||
487 | gm = (struct garp_msg_hdr *)skb->data; | ||
488 | if (gm->attrtype == 0) | ||
489 | return -1; | ||
490 | skb_pull(skb, sizeof(*gm)); | ||
491 | |||
492 | while (skb->len > 0) { | ||
493 | if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0) | ||
494 | return -1; | ||
495 | if (garp_pdu_parse_end_mark(skb) < 0) | ||
496 | break; | ||
497 | } | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb, | ||
502 | struct net_device *dev) | ||
503 | { | ||
504 | struct garp_application *appl = proto->data; | ||
505 | struct garp_port *port; | ||
506 | struct garp_applicant *app; | ||
507 | const struct garp_pdu_hdr *gp; | ||
508 | |||
509 | port = rcu_dereference(dev->garp_port); | ||
510 | if (!port) | ||
511 | goto err; | ||
512 | app = rcu_dereference(port->applicants[appl->type]); | ||
513 | if (!app) | ||
514 | goto err; | ||
515 | |||
516 | if (!pskb_may_pull(skb, sizeof(*gp))) | ||
517 | goto err; | ||
518 | gp = (struct garp_pdu_hdr *)skb->data; | ||
519 | if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID)) | ||
520 | goto err; | ||
521 | skb_pull(skb, sizeof(*gp)); | ||
522 | |||
523 | spin_lock(&app->lock); | ||
524 | while (skb->len > 0) { | ||
525 | if (garp_pdu_parse_msg(app, skb) < 0) | ||
526 | break; | ||
527 | if (garp_pdu_parse_end_mark(skb) < 0) | ||
528 | break; | ||
529 | } | ||
530 | spin_unlock(&app->lock); | ||
531 | err: | ||
532 | kfree_skb(skb); | ||
533 | } | ||
534 | |||
535 | static int garp_init_port(struct net_device *dev) | ||
536 | { | ||
537 | struct garp_port *port; | ||
538 | |||
539 | port = kzalloc(sizeof(*port), GFP_KERNEL); | ||
540 | if (!port) | ||
541 | return -ENOMEM; | ||
542 | rcu_assign_pointer(dev->garp_port, port); | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static void garp_release_port(struct net_device *dev) | ||
547 | { | ||
548 | struct garp_port *port = dev->garp_port; | ||
549 | unsigned int i; | ||
550 | |||
551 | for (i = 0; i <= GARP_APPLICATION_MAX; i++) { | ||
552 | if (port->applicants[i]) | ||
553 | return; | ||
554 | } | ||
555 | rcu_assign_pointer(dev->garp_port, NULL); | ||
556 | synchronize_rcu(); | ||
557 | kfree(port); | ||
558 | } | ||
559 | |||
560 | int garp_init_applicant(struct net_device *dev, struct garp_application *appl) | ||
561 | { | ||
562 | struct garp_applicant *app; | ||
563 | int err; | ||
564 | |||
565 | ASSERT_RTNL(); | ||
566 | |||
567 | if (!dev->garp_port) { | ||
568 | err = garp_init_port(dev); | ||
569 | if (err < 0) | ||
570 | goto err1; | ||
571 | } | ||
572 | |||
573 | err = -ENOMEM; | ||
574 | app = kzalloc(sizeof(*app), GFP_KERNEL); | ||
575 | if (!app) | ||
576 | goto err2; | ||
577 | |||
578 | err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); | ||
579 | if (err < 0) | ||
580 | goto err3; | ||
581 | |||
582 | app->dev = dev; | ||
583 | app->app = appl; | ||
584 | app->gid = RB_ROOT; | ||
585 | spin_lock_init(&app->lock); | ||
586 | skb_queue_head_init(&app->queue); | ||
587 | rcu_assign_pointer(dev->garp_port->applicants[appl->type], app); | ||
588 | setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app); | ||
589 | garp_join_timer_arm(app); | ||
590 | return 0; | ||
591 | |||
592 | err3: | ||
593 | kfree(app); | ||
594 | err2: | ||
595 | garp_release_port(dev); | ||
596 | err1: | ||
597 | return err; | ||
598 | } | ||
599 | EXPORT_SYMBOL_GPL(garp_init_applicant); | ||
600 | |||
601 | void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) | ||
602 | { | ||
603 | struct garp_port *port = dev->garp_port; | ||
604 | struct garp_applicant *app = port->applicants[appl->type]; | ||
605 | |||
606 | ASSERT_RTNL(); | ||
607 | |||
608 | rcu_assign_pointer(port->applicants[appl->type], NULL); | ||
609 | synchronize_rcu(); | ||
610 | |||
611 | /* Delete timer and generate a final TRANSMIT_PDU event to flush out | ||
612 | * all pending messages before the applicant is gone. */ | ||
613 | del_timer_sync(&app->join_timer); | ||
614 | garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); | ||
615 | garp_pdu_queue(app); | ||
616 | garp_queue_xmit(app); | ||
617 | |||
618 | dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); | ||
619 | kfree(app); | ||
620 | garp_release_port(dev); | ||
621 | } | ||
622 | EXPORT_SYMBOL_GPL(garp_uninit_applicant); | ||
623 | |||
624 | int garp_register_application(struct garp_application *appl) | ||
625 | { | ||
626 | appl->proto.rcv = garp_pdu_rcv; | ||
627 | appl->proto.data = appl; | ||
628 | return stp_proto_register(&appl->proto); | ||
629 | } | ||
630 | EXPORT_SYMBOL_GPL(garp_register_application); | ||
631 | |||
632 | void garp_unregister_application(struct garp_application *appl) | ||
633 | { | ||
634 | stp_proto_unregister(&appl->proto); | ||
635 | } | ||
636 | EXPORT_SYMBOL_GPL(garp_unregister_application); | ||
diff --git a/net/802/stp.c b/net/802/stp.c new file mode 100644 index 000000000000..0b7a24452d11 --- /dev/null +++ b/net/802/stp.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * STP SAP demux | ||
3 | * | ||
4 | * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/skbuff.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <linux/llc.h> | ||
14 | #include <net/llc.h> | ||
15 | #include <net/llc_pdu.h> | ||
16 | #include <net/stp.h> | ||
17 | |||
18 | /* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */ | ||
19 | #define GARP_ADDR_MIN 0x20 | ||
20 | #define GARP_ADDR_MAX 0x2F | ||
21 | #define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) | ||
22 | |||
23 | static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; | ||
24 | static const struct stp_proto *stp_proto __read_mostly; | ||
25 | |||
26 | static struct llc_sap *sap __read_mostly; | ||
27 | static unsigned int sap_registered; | ||
28 | static DEFINE_MUTEX(stp_proto_mutex); | ||
29 | |||
30 | /* Called under rcu_read_lock from LLC */ | ||
31 | static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, | ||
32 | struct packet_type *pt, struct net_device *orig_dev) | ||
33 | { | ||
34 | const struct ethhdr *eh = eth_hdr(skb); | ||
35 | const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); | ||
36 | const struct stp_proto *proto; | ||
37 | |||
38 | if (pdu->ssap != LLC_SAP_BSPAN || | ||
39 | pdu->dsap != LLC_SAP_BSPAN || | ||
40 | pdu->ctrl_1 != LLC_PDU_TYPE_U) | ||
41 | goto err; | ||
42 | |||
43 | if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) { | ||
44 | proto = rcu_dereference(garp_protos[eh->h_dest[5] - | ||
45 | GARP_ADDR_MIN]); | ||
46 | if (proto && | ||
47 | compare_ether_addr(eh->h_dest, proto->group_address)) | ||
48 | goto err; | ||
49 | } else | ||
50 | proto = rcu_dereference(stp_proto); | ||
51 | |||
52 | if (!proto) | ||
53 | goto err; | ||
54 | |||
55 | proto->rcv(proto, skb, dev); | ||
56 | return 0; | ||
57 | |||
58 | err: | ||
59 | kfree_skb(skb); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | int stp_proto_register(const struct stp_proto *proto) | ||
64 | { | ||
65 | int err = 0; | ||
66 | |||
67 | mutex_lock(&stp_proto_mutex); | ||
68 | if (sap_registered++ == 0) { | ||
69 | sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv); | ||
70 | if (!sap) { | ||
71 | err = -ENOMEM; | ||
72 | goto out; | ||
73 | } | ||
74 | } | ||
75 | if (is_zero_ether_addr(proto->group_address)) | ||
76 | rcu_assign_pointer(stp_proto, proto); | ||
77 | else | ||
78 | rcu_assign_pointer(garp_protos[proto->group_address[5] - | ||
79 | GARP_ADDR_MIN], proto); | ||
80 | out: | ||
81 | mutex_unlock(&stp_proto_mutex); | ||
82 | return err; | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(stp_proto_register); | ||
85 | |||
86 | void stp_proto_unregister(const struct stp_proto *proto) | ||
87 | { | ||
88 | mutex_lock(&stp_proto_mutex); | ||
89 | if (is_zero_ether_addr(proto->group_address)) | ||
90 | rcu_assign_pointer(stp_proto, NULL); | ||
91 | else | ||
92 | rcu_assign_pointer(garp_protos[proto->group_address[5] - | ||
93 | GARP_ADDR_MIN], NULL); | ||
94 | synchronize_rcu(); | ||
95 | |||
96 | if (--sap_registered == 0) | ||
97 | llc_sap_put(sap); | ||
98 | mutex_unlock(&stp_proto_mutex); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(stp_proto_unregister); | ||
101 | |||
102 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig index c4a382e450e2..fa073a54963e 100644 --- a/net/8021q/Kconfig +++ b/net/8021q/Kconfig | |||
@@ -17,3 +17,13 @@ config VLAN_8021Q | |||
17 | will be called 8021q. | 17 | will be called 8021q. |
18 | 18 | ||
19 | If unsure, say N. | 19 | If unsure, say N. |
20 | |||
21 | config VLAN_8021Q_GVRP | ||
22 | bool "GVRP (GARP VLAN Registration Protocol) support" | ||
23 | depends on VLAN_8021Q | ||
24 | select GARP | ||
25 | help | ||
26 | Select this to enable GVRP end-system support. GVRP is used for | ||
27 | automatic propagation of registered VLANs to switches. | ||
28 | |||
29 | If unsure, say N. | ||
diff --git a/net/8021q/Makefile b/net/8021q/Makefile index 10ca7f486c3a..9f4f174ead1c 100644 --- a/net/8021q/Makefile +++ b/net/8021q/Makefile | |||
@@ -1,12 +1,10 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the Linux VLAN layer. | 2 | # Makefile for the Linux VLAN layer. |
3 | # | 3 | # |
4 | obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o | ||
5 | obj-$(CONFIG_VLAN_8021Q) += 8021q.o | ||
4 | 6 | ||
5 | obj-$(CONFIG_VLAN_8021Q) += 8021q.o | 7 | 8021q-y := vlan.o vlan_dev.o vlan_netlink.o |
6 | 8 | 8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o | |
7 | 8021q-objs := vlan.o vlan_dev.o vlan_netlink.o | 9 | 8021q-$(CONFIG_PROC_FS) += vlanproc.o |
8 | |||
9 | ifeq ($(CONFIG_PROC_FS),y) | ||
10 | 8021q-objs += vlanproc.o | ||
11 | endif | ||
12 | 10 | ||
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 08f14f6c5fd6..b661f47bf10a 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -18,22 +18,20 @@ | |||
18 | * 2 of the License, or (at your option) any later version. | 18 | * 2 of the License, or (at your option) any later version. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <asm/uaccess.h> /* for copy_from_user */ | ||
22 | #include <linux/capability.h> | 21 | #include <linux/capability.h> |
23 | #include <linux/module.h> | 22 | #include <linux/module.h> |
24 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
25 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
26 | #include <net/datalink.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/in.h> | ||
29 | #include <linux/init.h> | 25 | #include <linux/init.h> |
30 | #include <linux/rculist.h> | 26 | #include <linux/rculist.h> |
31 | #include <net/p8022.h> | 27 | #include <net/p8022.h> |
32 | #include <net/arp.h> | 28 | #include <net/arp.h> |
33 | #include <linux/rtnetlink.h> | 29 | #include <linux/rtnetlink.h> |
34 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | #include <net/rtnetlink.h> | ||
35 | #include <net/net_namespace.h> | 32 | #include <net/net_namespace.h> |
36 | #include <net/netns/generic.h> | 33 | #include <net/netns/generic.h> |
34 | #include <asm/uaccess.h> | ||
37 | 35 | ||
38 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
39 | #include "vlan.h" | 37 | #include "vlan.h" |
@@ -84,13 +82,12 @@ static struct vlan_group *__vlan_find_group(struct net_device *real_dev) | |||
84 | * | 82 | * |
85 | * Must be invoked with RCU read lock (no preempt) | 83 | * Must be invoked with RCU read lock (no preempt) |
86 | */ | 84 | */ |
87 | struct net_device *__find_vlan_dev(struct net_device *real_dev, | 85 | struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id) |
88 | unsigned short VID) | ||
89 | { | 86 | { |
90 | struct vlan_group *grp = __vlan_find_group(real_dev); | 87 | struct vlan_group *grp = __vlan_find_group(real_dev); |
91 | 88 | ||
92 | if (grp) | 89 | if (grp) |
93 | return vlan_group_get_device(grp, VID); | 90 | return vlan_group_get_device(grp, vlan_id); |
94 | 91 | ||
95 | return NULL; | 92 | return NULL; |
96 | } | 93 | } |
@@ -118,14 +115,14 @@ static struct vlan_group *vlan_group_alloc(struct net_device *real_dev) | |||
118 | return grp; | 115 | return grp; |
119 | } | 116 | } |
120 | 117 | ||
121 | static int vlan_group_prealloc_vid(struct vlan_group *vg, int vid) | 118 | static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) |
122 | { | 119 | { |
123 | struct net_device **array; | 120 | struct net_device **array; |
124 | unsigned int size; | 121 | unsigned int size; |
125 | 122 | ||
126 | ASSERT_RTNL(); | 123 | ASSERT_RTNL(); |
127 | 124 | ||
128 | array = vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN]; | 125 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; |
129 | if (array != NULL) | 126 | if (array != NULL) |
130 | return 0; | 127 | return 0; |
131 | 128 | ||
@@ -134,7 +131,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, int vid) | |||
134 | if (array == NULL) | 131 | if (array == NULL) |
135 | return -ENOBUFS; | 132 | return -ENOBUFS; |
136 | 133 | ||
137 | vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN] = array; | 134 | vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; |
138 | return 0; | 135 | return 0; |
139 | } | 136 | } |
140 | 137 | ||
@@ -148,7 +145,7 @@ void unregister_vlan_dev(struct net_device *dev) | |||
148 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
149 | struct net_device *real_dev = vlan->real_dev; | 146 | struct net_device *real_dev = vlan->real_dev; |
150 | struct vlan_group *grp; | 147 | struct vlan_group *grp; |
151 | unsigned short vlan_id = vlan->vlan_id; | 148 | u16 vlan_id = vlan->vlan_id; |
152 | 149 | ||
153 | ASSERT_RTNL(); | 150 | ASSERT_RTNL(); |
154 | 151 | ||
@@ -166,8 +163,12 @@ void unregister_vlan_dev(struct net_device *dev) | |||
166 | 163 | ||
167 | synchronize_net(); | 164 | synchronize_net(); |
168 | 165 | ||
166 | unregister_netdevice(dev); | ||
167 | |||
169 | /* If the group is now empty, kill off the group. */ | 168 | /* If the group is now empty, kill off the group. */ |
170 | if (grp->nr_vlans == 0) { | 169 | if (grp->nr_vlans == 0) { |
170 | vlan_gvrp_uninit_applicant(real_dev); | ||
171 | |||
171 | if (real_dev->features & NETIF_F_HW_VLAN_RX) | 172 | if (real_dev->features & NETIF_F_HW_VLAN_RX) |
172 | real_dev->vlan_rx_register(real_dev, NULL); | 173 | real_dev->vlan_rx_register(real_dev, NULL); |
173 | 174 | ||
@@ -179,8 +180,6 @@ void unregister_vlan_dev(struct net_device *dev) | |||
179 | 180 | ||
180 | /* Get rid of the vlan's reference to real_dev */ | 181 | /* Get rid of the vlan's reference to real_dev */ |
181 | dev_put(real_dev); | 182 | dev_put(real_dev); |
182 | |||
183 | unregister_netdevice(dev); | ||
184 | } | 183 | } |
185 | 184 | ||
186 | static void vlan_transfer_operstate(const struct net_device *dev, | 185 | static void vlan_transfer_operstate(const struct net_device *dev, |
@@ -204,7 +203,7 @@ static void vlan_transfer_operstate(const struct net_device *dev, | |||
204 | } | 203 | } |
205 | } | 204 | } |
206 | 205 | ||
207 | int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id) | 206 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) |
208 | { | 207 | { |
209 | char *name = real_dev->name; | 208 | char *name = real_dev->name; |
210 | 209 | ||
@@ -241,7 +240,7 @@ int register_vlan_dev(struct net_device *dev) | |||
241 | { | 240 | { |
242 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 241 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
243 | struct net_device *real_dev = vlan->real_dev; | 242 | struct net_device *real_dev = vlan->real_dev; |
244 | unsigned short vlan_id = vlan->vlan_id; | 243 | u16 vlan_id = vlan->vlan_id; |
245 | struct vlan_group *grp, *ngrp = NULL; | 244 | struct vlan_group *grp, *ngrp = NULL; |
246 | int err; | 245 | int err; |
247 | 246 | ||
@@ -250,15 +249,18 @@ int register_vlan_dev(struct net_device *dev) | |||
250 | ngrp = grp = vlan_group_alloc(real_dev); | 249 | ngrp = grp = vlan_group_alloc(real_dev); |
251 | if (!grp) | 250 | if (!grp) |
252 | return -ENOBUFS; | 251 | return -ENOBUFS; |
252 | err = vlan_gvrp_init_applicant(real_dev); | ||
253 | if (err < 0) | ||
254 | goto out_free_group; | ||
253 | } | 255 | } |
254 | 256 | ||
255 | err = vlan_group_prealloc_vid(grp, vlan_id); | 257 | err = vlan_group_prealloc_vid(grp, vlan_id); |
256 | if (err < 0) | 258 | if (err < 0) |
257 | goto out_free_group; | 259 | goto out_uninit_applicant; |
258 | 260 | ||
259 | err = register_netdevice(dev); | 261 | err = register_netdevice(dev); |
260 | if (err < 0) | 262 | if (err < 0) |
261 | goto out_free_group; | 263 | goto out_uninit_applicant; |
262 | 264 | ||
263 | /* Account for reference in struct vlan_dev_info */ | 265 | /* Account for reference in struct vlan_dev_info */ |
264 | dev_hold(real_dev); | 266 | dev_hold(real_dev); |
@@ -279,6 +281,9 @@ int register_vlan_dev(struct net_device *dev) | |||
279 | 281 | ||
280 | return 0; | 282 | return 0; |
281 | 283 | ||
284 | out_uninit_applicant: | ||
285 | if (ngrp) | ||
286 | vlan_gvrp_uninit_applicant(real_dev); | ||
282 | out_free_group: | 287 | out_free_group: |
283 | if (ngrp) | 288 | if (ngrp) |
284 | vlan_group_free(ngrp); | 289 | vlan_group_free(ngrp); |
@@ -288,8 +293,7 @@ out_free_group: | |||
288 | /* Attach a VLAN device to a mac address (ie Ethernet Card). | 293 | /* Attach a VLAN device to a mac address (ie Ethernet Card). |
289 | * Returns 0 if the device was created or a negative error code otherwise. | 294 | * Returns 0 if the device was created or a negative error code otherwise. |
290 | */ | 295 | */ |
291 | static int register_vlan_device(struct net_device *real_dev, | 296 | static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) |
292 | unsigned short VLAN_ID) | ||
293 | { | 297 | { |
294 | struct net_device *new_dev; | 298 | struct net_device *new_dev; |
295 | struct net *net = dev_net(real_dev); | 299 | struct net *net = dev_net(real_dev); |
@@ -297,10 +301,10 @@ static int register_vlan_device(struct net_device *real_dev, | |||
297 | char name[IFNAMSIZ]; | 301 | char name[IFNAMSIZ]; |
298 | int err; | 302 | int err; |
299 | 303 | ||
300 | if (VLAN_ID >= VLAN_VID_MASK) | 304 | if (vlan_id >= VLAN_VID_MASK) |
301 | return -ERANGE; | 305 | return -ERANGE; |
302 | 306 | ||
303 | err = vlan_check_real_dev(real_dev, VLAN_ID); | 307 | err = vlan_check_real_dev(real_dev, vlan_id); |
304 | if (err < 0) | 308 | if (err < 0) |
305 | return err; | 309 | return err; |
306 | 310 | ||
@@ -308,26 +312,26 @@ static int register_vlan_device(struct net_device *real_dev, | |||
308 | switch (vn->name_type) { | 312 | switch (vn->name_type) { |
309 | case VLAN_NAME_TYPE_RAW_PLUS_VID: | 313 | case VLAN_NAME_TYPE_RAW_PLUS_VID: |
310 | /* name will look like: eth1.0005 */ | 314 | /* name will look like: eth1.0005 */ |
311 | snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID); | 315 | snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); |
312 | break; | 316 | break; |
313 | case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: | 317 | case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: |
314 | /* Put our vlan.VID in the name. | 318 | /* Put our vlan.VID in the name. |
315 | * Name will look like: vlan5 | 319 | * Name will look like: vlan5 |
316 | */ | 320 | */ |
317 | snprintf(name, IFNAMSIZ, "vlan%i", VLAN_ID); | 321 | snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); |
318 | break; | 322 | break; |
319 | case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: | 323 | case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: |
320 | /* Put our vlan.VID in the name. | 324 | /* Put our vlan.VID in the name. |
321 | * Name will look like: eth0.5 | 325 | * Name will look like: eth0.5 |
322 | */ | 326 | */ |
323 | snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, VLAN_ID); | 327 | snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); |
324 | break; | 328 | break; |
325 | case VLAN_NAME_TYPE_PLUS_VID: | 329 | case VLAN_NAME_TYPE_PLUS_VID: |
326 | /* Put our vlan.VID in the name. | 330 | /* Put our vlan.VID in the name. |
327 | * Name will look like: vlan0005 | 331 | * Name will look like: vlan0005 |
328 | */ | 332 | */ |
329 | default: | 333 | default: |
330 | snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID); | 334 | snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); |
331 | } | 335 | } |
332 | 336 | ||
333 | new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, | 337 | new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, |
@@ -342,7 +346,7 @@ static int register_vlan_device(struct net_device *real_dev, | |||
342 | */ | 346 | */ |
343 | new_dev->mtu = real_dev->mtu; | 347 | new_dev->mtu = real_dev->mtu; |
344 | 348 | ||
345 | vlan_dev_info(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */ | 349 | vlan_dev_info(new_dev)->vlan_id = vlan_id; |
346 | vlan_dev_info(new_dev)->real_dev = real_dev; | 350 | vlan_dev_info(new_dev)->real_dev = real_dev; |
347 | vlan_dev_info(new_dev)->dent = NULL; | 351 | vlan_dev_info(new_dev)->dent = NULL; |
348 | vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; | 352 | vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; |
@@ -536,7 +540,6 @@ static struct notifier_block vlan_notifier_block __read_mostly = { | |||
536 | static int vlan_ioctl_handler(struct net *net, void __user *arg) | 540 | static int vlan_ioctl_handler(struct net *net, void __user *arg) |
537 | { | 541 | { |
538 | int err; | 542 | int err; |
539 | unsigned short vid = 0; | ||
540 | struct vlan_ioctl_args args; | 543 | struct vlan_ioctl_args args; |
541 | struct net_device *dev = NULL; | 544 | struct net_device *dev = NULL; |
542 | 545 | ||
@@ -563,8 +566,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
563 | goto out; | 566 | goto out; |
564 | 567 | ||
565 | err = -EINVAL; | 568 | err = -EINVAL; |
566 | if (args.cmd != ADD_VLAN_CMD && | 569 | if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) |
567 | !(dev->priv_flags & IFF_802_1Q_VLAN)) | ||
568 | goto out; | 570 | goto out; |
569 | } | 571 | } |
570 | 572 | ||
@@ -592,9 +594,9 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
592 | err = -EPERM; | 594 | err = -EPERM; |
593 | if (!capable(CAP_NET_ADMIN)) | 595 | if (!capable(CAP_NET_ADMIN)) |
594 | break; | 596 | break; |
595 | err = vlan_dev_set_vlan_flag(dev, | 597 | err = vlan_dev_change_flags(dev, |
596 | args.u.flag, | 598 | args.vlan_qos ? args.u.flag : 0, |
597 | args.vlan_qos); | 599 | args.u.flag); |
598 | break; | 600 | break; |
599 | 601 | ||
600 | case SET_VLAN_NAME_TYPE_CMD: | 602 | case SET_VLAN_NAME_TYPE_CMD: |
@@ -638,8 +640,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
638 | 640 | ||
639 | case GET_VLAN_VID_CMD: | 641 | case GET_VLAN_VID_CMD: |
640 | err = 0; | 642 | err = 0; |
641 | vlan_dev_get_vid(dev, &vid); | 643 | args.u.VID = vlan_dev_vlan_id(dev); |
642 | args.u.VID = vid; | ||
643 | if (copy_to_user(arg, &args, | 644 | if (copy_to_user(arg, &args, |
644 | sizeof(struct vlan_ioctl_args))) | 645 | sizeof(struct vlan_ioctl_args))) |
645 | err = -EFAULT; | 646 | err = -EFAULT; |
@@ -714,14 +715,20 @@ static int __init vlan_proto_init(void) | |||
714 | if (err < 0) | 715 | if (err < 0) |
715 | goto err2; | 716 | goto err2; |
716 | 717 | ||
717 | err = vlan_netlink_init(); | 718 | err = vlan_gvrp_init(); |
718 | if (err < 0) | 719 | if (err < 0) |
719 | goto err3; | 720 | goto err3; |
720 | 721 | ||
722 | err = vlan_netlink_init(); | ||
723 | if (err < 0) | ||
724 | goto err4; | ||
725 | |||
721 | dev_add_pack(&vlan_packet_type); | 726 | dev_add_pack(&vlan_packet_type); |
722 | vlan_ioctl_set(vlan_ioctl_handler); | 727 | vlan_ioctl_set(vlan_ioctl_handler); |
723 | return 0; | 728 | return 0; |
724 | 729 | ||
730 | err4: | ||
731 | vlan_gvrp_uninit(); | ||
725 | err3: | 732 | err3: |
726 | unregister_netdevice_notifier(&vlan_notifier_block); | 733 | unregister_netdevice_notifier(&vlan_notifier_block); |
727 | err2: | 734 | err2: |
@@ -746,8 +753,9 @@ static void __exit vlan_cleanup_module(void) | |||
746 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); | 753 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); |
747 | 754 | ||
748 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); | 755 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); |
749 | |||
750 | synchronize_net(); | 756 | synchronize_net(); |
757 | |||
758 | vlan_gvrp_uninit(); | ||
751 | } | 759 | } |
752 | 760 | ||
753 | module_init(vlan_proto_init); | 761 | module_init(vlan_proto_init); |
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 5229a72c7ea1..a6603a4d917f 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -3,6 +3,55 @@ | |||
3 | 3 | ||
4 | #include <linux/if_vlan.h> | 4 | #include <linux/if_vlan.h> |
5 | 5 | ||
6 | |||
7 | /** | ||
8 | * struct vlan_priority_tci_mapping - vlan egress priority mappings | ||
9 | * @priority: skb priority | ||
10 | * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 | ||
11 | * @next: pointer to next struct | ||
12 | */ | ||
13 | struct vlan_priority_tci_mapping { | ||
14 | u32 priority; | ||
15 | u16 vlan_qos; | ||
16 | struct vlan_priority_tci_mapping *next; | ||
17 | }; | ||
18 | |||
19 | /** | ||
20 | * struct vlan_dev_info - VLAN private device data | ||
21 | * @nr_ingress_mappings: number of ingress priority mappings | ||
22 | * @ingress_priority_map: ingress priority mappings | ||
23 | * @nr_egress_mappings: number of egress priority mappings | ||
24 | * @egress_priority_map: hash of egress priority mappings | ||
25 | * @vlan_id: VLAN identifier | ||
26 | * @flags: device flags | ||
27 | * @real_dev: underlying netdevice | ||
28 | * @real_dev_addr: address of underlying netdevice | ||
29 | * @dent: proc dir entry | ||
30 | * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX | ||
31 | * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX | ||
32 | */ | ||
33 | struct vlan_dev_info { | ||
34 | unsigned int nr_ingress_mappings; | ||
35 | u32 ingress_priority_map[8]; | ||
36 | unsigned int nr_egress_mappings; | ||
37 | struct vlan_priority_tci_mapping *egress_priority_map[16]; | ||
38 | |||
39 | u16 vlan_id; | ||
40 | u16 flags; | ||
41 | |||
42 | struct net_device *real_dev; | ||
43 | unsigned char real_dev_addr[ETH_ALEN]; | ||
44 | |||
45 | struct proc_dir_entry *dent; | ||
46 | unsigned long cnt_inc_headroom_on_tx; | ||
47 | unsigned long cnt_encap_on_xmit; | ||
48 | }; | ||
49 | |||
50 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) | ||
51 | { | ||
52 | return netdev_priv(dev); | ||
53 | } | ||
54 | |||
6 | #define VLAN_GRP_HASH_SHIFT 5 | 55 | #define VLAN_GRP_HASH_SHIFT 5 |
7 | #define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) | 56 | #define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) |
8 | #define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) | 57 | #define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) |
@@ -18,26 +67,47 @@ | |||
18 | * Must be invoked with rcu_read_lock (ie preempt disabled) | 67 | * Must be invoked with rcu_read_lock (ie preempt disabled) |
19 | * or with RTNL. | 68 | * or with RTNL. |
20 | */ | 69 | */ |
21 | struct net_device *__find_vlan_dev(struct net_device *real_dev, | 70 | struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id); |
22 | unsigned short VID); /* vlan.c */ | ||
23 | 71 | ||
24 | /* found in vlan_dev.c */ | 72 | /* found in vlan_dev.c */ |
25 | int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | 73 | int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, |
26 | struct packet_type *ptype, struct net_device *orig_dev); | 74 | struct packet_type *ptype, struct net_device *orig_dev); |
27 | void vlan_dev_set_ingress_priority(const struct net_device *dev, | 75 | void vlan_dev_set_ingress_priority(const struct net_device *dev, |
28 | u32 skb_prio, short vlan_prio); | 76 | u32 skb_prio, u16 vlan_prio); |
29 | int vlan_dev_set_egress_priority(const struct net_device *dev, | 77 | int vlan_dev_set_egress_priority(const struct net_device *dev, |
30 | u32 skb_prio, short vlan_prio); | 78 | u32 skb_prio, u16 vlan_prio); |
31 | int vlan_dev_set_vlan_flag(const struct net_device *dev, | 79 | int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); |
32 | u32 flag, short flag_val); | ||
33 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); | 80 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); |
34 | void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result); | ||
35 | 81 | ||
36 | int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id); | 82 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); |
37 | void vlan_setup(struct net_device *dev); | 83 | void vlan_setup(struct net_device *dev); |
38 | int register_vlan_dev(struct net_device *dev); | 84 | int register_vlan_dev(struct net_device *dev); |
39 | void unregister_vlan_dev(struct net_device *dev); | 85 | void unregister_vlan_dev(struct net_device *dev); |
40 | 86 | ||
87 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | ||
88 | u16 vlan_tci) | ||
89 | { | ||
90 | struct vlan_dev_info *vip = vlan_dev_info(dev); | ||
91 | |||
92 | return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7]; | ||
93 | } | ||
94 | |||
95 | #ifdef CONFIG_VLAN_8021Q_GVRP | ||
96 | extern int vlan_gvrp_request_join(const struct net_device *dev); | ||
97 | extern void vlan_gvrp_request_leave(const struct net_device *dev); | ||
98 | extern int vlan_gvrp_init_applicant(struct net_device *dev); | ||
99 | extern void vlan_gvrp_uninit_applicant(struct net_device *dev); | ||
100 | extern int vlan_gvrp_init(void); | ||
101 | extern void vlan_gvrp_uninit(void); | ||
102 | #else | ||
103 | static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; } | ||
104 | static inline void vlan_gvrp_request_leave(const struct net_device *dev) {} | ||
105 | static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; } | ||
106 | static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {} | ||
107 | static inline int vlan_gvrp_init(void) { return 0; } | ||
108 | static inline void vlan_gvrp_uninit(void) {} | ||
109 | #endif | ||
110 | |||
41 | int vlan_netlink_init(void); | 111 | int vlan_netlink_init(void); |
42 | void vlan_netlink_fini(void); | 112 | void vlan_netlink_fini(void); |
43 | 113 | ||
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c new file mode 100644 index 000000000000..916061f681b6 --- /dev/null +++ b/net/8021q/vlan_core.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <linux/skbuff.h> | ||
2 | #include <linux/netdevice.h> | ||
3 | #include <linux/if_vlan.h> | ||
4 | #include "vlan.h" | ||
5 | |||
6 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | ||
7 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | ||
8 | u16 vlan_tci, int polling) | ||
9 | { | ||
10 | struct net_device_stats *stats; | ||
11 | |||
12 | if (skb_bond_should_drop(skb)) { | ||
13 | dev_kfree_skb_any(skb); | ||
14 | return NET_RX_DROP; | ||
15 | } | ||
16 | |||
17 | skb->vlan_tci = vlan_tci; | ||
18 | netif_nit_deliver(skb); | ||
19 | |||
20 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | ||
21 | if (skb->dev == NULL) { | ||
22 | dev_kfree_skb_any(skb); | ||
23 | /* Not NET_RX_DROP, this is not being dropped | ||
24 | * due to congestion. */ | ||
25 | return NET_RX_SUCCESS; | ||
26 | } | ||
27 | skb->dev->last_rx = jiffies; | ||
28 | skb->vlan_tci = 0; | ||
29 | |||
30 | stats = &skb->dev->stats; | ||
31 | stats->rx_packets++; | ||
32 | stats->rx_bytes += skb->len; | ||
33 | |||
34 | skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); | ||
35 | switch (skb->pkt_type) { | ||
36 | case PACKET_BROADCAST: | ||
37 | break; | ||
38 | case PACKET_MULTICAST: | ||
39 | stats->multicast++; | ||
40 | break; | ||
41 | case PACKET_OTHERHOST: | ||
42 | /* Our lower layer thinks this is not local, let's make sure. | ||
43 | * This allows the VLAN to have a different MAC than the | ||
44 | * underlying device, and still route correctly. */ | ||
45 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, | ||
46 | skb->dev->dev_addr)) | ||
47 | skb->pkt_type = PACKET_HOST; | ||
48 | break; | ||
49 | }; | ||
50 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | ||
51 | } | ||
52 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | ||
53 | |||
54 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) | ||
55 | { | ||
56 | return vlan_dev_info(dev)->real_dev; | ||
57 | } | ||
58 | EXPORT_SYMBOL_GPL(vlan_dev_real_dev); | ||
59 | |||
60 | u16 vlan_dev_vlan_id(const struct net_device *dev) | ||
61 | { | ||
62 | return vlan_dev_info(dev)->vlan_id; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 5d055c242ed8..f42bc2b26b85 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -21,21 +21,15 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/mm.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <asm/uaccess.h> /* for copy_from_user */ | ||
28 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
29 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
30 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
31 | #include <net/datalink.h> | 27 | #include <linux/ethtool.h> |
32 | #include <net/p8022.h> | ||
33 | #include <net/arp.h> | 28 | #include <net/arp.h> |
34 | 29 | ||
35 | #include "vlan.h" | 30 | #include "vlan.h" |
36 | #include "vlanproc.h" | 31 | #include "vlanproc.h" |
37 | #include <linux/if_vlan.h> | 32 | #include <linux/if_vlan.h> |
38 | #include <net/ip.h> | ||
39 | 33 | ||
40 | /* | 34 | /* |
41 | * Rebuild the Ethernet MAC header. This is called after an ARP | 35 | * Rebuild the Ethernet MAC header. This is called after an ARP |
@@ -73,11 +67,8 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb) | |||
73 | static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) | 67 | static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) |
74 | { | 68 | { |
75 | if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { | 69 | if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { |
76 | if (skb_shared(skb) || skb_cloned(skb)) { | 70 | if (skb_cow(skb, skb_headroom(skb)) < 0) |
77 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 71 | skb = NULL; |
78 | kfree_skb(skb); | ||
79 | skb = nskb; | ||
80 | } | ||
81 | if (skb) { | 72 | if (skb) { |
82 | /* Lifted from Gleb's VLAN code... */ | 73 | /* Lifted from Gleb's VLAN code... */ |
83 | memmove(skb->data - ETH_HLEN, | 74 | memmove(skb->data - ETH_HLEN, |
@@ -149,9 +140,9 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
149 | struct packet_type *ptype, struct net_device *orig_dev) | 140 | struct packet_type *ptype, struct net_device *orig_dev) |
150 | { | 141 | { |
151 | struct vlan_hdr *vhdr; | 142 | struct vlan_hdr *vhdr; |
152 | unsigned short vid; | ||
153 | struct net_device_stats *stats; | 143 | struct net_device_stats *stats; |
154 | unsigned short vlan_TCI; | 144 | u16 vlan_id; |
145 | u16 vlan_tci; | ||
155 | 146 | ||
156 | skb = skb_share_check(skb, GFP_ATOMIC); | 147 | skb = skb_share_check(skb, GFP_ATOMIC); |
157 | if (skb == NULL) | 148 | if (skb == NULL) |
@@ -161,14 +152,14 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
161 | goto err_free; | 152 | goto err_free; |
162 | 153 | ||
163 | vhdr = (struct vlan_hdr *)skb->data; | 154 | vhdr = (struct vlan_hdr *)skb->data; |
164 | vlan_TCI = ntohs(vhdr->h_vlan_TCI); | 155 | vlan_tci = ntohs(vhdr->h_vlan_TCI); |
165 | vid = (vlan_TCI & VLAN_VID_MASK); | 156 | vlan_id = vlan_tci & VLAN_VID_MASK; |
166 | 157 | ||
167 | rcu_read_lock(); | 158 | rcu_read_lock(); |
168 | skb->dev = __find_vlan_dev(dev, vid); | 159 | skb->dev = __find_vlan_dev(dev, vlan_id); |
169 | if (!skb->dev) { | 160 | if (!skb->dev) { |
170 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", | 161 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", |
171 | __func__, (unsigned int)vid, dev->name); | 162 | __func__, vlan_id, dev->name); |
172 | goto err_unlock; | 163 | goto err_unlock; |
173 | } | 164 | } |
174 | 165 | ||
@@ -180,11 +171,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
180 | 171 | ||
181 | skb_pull_rcsum(skb, VLAN_HLEN); | 172 | skb_pull_rcsum(skb, VLAN_HLEN); |
182 | 173 | ||
183 | skb->priority = vlan_get_ingress_priority(skb->dev, | 174 | skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); |
184 | ntohs(vhdr->h_vlan_TCI)); | ||
185 | 175 | ||
186 | pr_debug("%s: priority: %u for TCI: %hu\n", | 176 | pr_debug("%s: priority: %u for TCI: %hu\n", |
187 | __func__, skb->priority, ntohs(vhdr->h_vlan_TCI)); | 177 | __func__, skb->priority, vlan_tci); |
188 | 178 | ||
189 | switch (skb->pkt_type) { | 179 | switch (skb->pkt_type) { |
190 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ | 180 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ |
@@ -227,7 +217,7 @@ err_free: | |||
227 | return NET_RX_DROP; | 217 | return NET_RX_DROP; |
228 | } | 218 | } |
229 | 219 | ||
230 | static inline unsigned short | 220 | static inline u16 |
231 | vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) | 221 | vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) |
232 | { | 222 | { |
233 | struct vlan_priority_tci_mapping *mp; | 223 | struct vlan_priority_tci_mapping *mp; |
@@ -259,103 +249,44 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
259 | unsigned int len) | 249 | unsigned int len) |
260 | { | 250 | { |
261 | struct vlan_hdr *vhdr; | 251 | struct vlan_hdr *vhdr; |
262 | unsigned short veth_TCI = 0; | 252 | unsigned int vhdrlen = 0; |
263 | int rc = 0; | 253 | u16 vlan_tci = 0; |
264 | int build_vlan_header = 0; | 254 | int rc; |
265 | struct net_device *vdev = dev; | ||
266 | |||
267 | pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", | ||
268 | __func__, skb, type, len, vlan_dev_info(dev)->vlan_id, | ||
269 | daddr); | ||
270 | |||
271 | /* build vlan header only if re_order_header flag is NOT set. This | ||
272 | * fixes some programs that get confused when they see a VLAN device | ||
273 | * sending a frame that is VLAN encoded (the consensus is that the VLAN | ||
274 | * device should look completely like an Ethernet device when the | ||
275 | * REORDER_HEADER flag is set) The drawback to this is some extra | ||
276 | * header shuffling in the hard_start_xmit. Users can turn off this | ||
277 | * REORDER behaviour with the vconfig tool. | ||
278 | */ | ||
279 | if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) | ||
280 | build_vlan_header = 1; | ||
281 | 255 | ||
282 | if (build_vlan_header) { | 256 | if (WARN_ON(skb_headroom(skb) < dev->hard_header_len)) |
283 | vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); | 257 | return -ENOSPC; |
284 | 258 | ||
285 | /* build the four bytes that make this a VLAN header. */ | 259 | if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) { |
286 | 260 | vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); | |
287 | /* Now, construct the second two bytes. This field looks | ||
288 | * something like: | ||
289 | * usr_priority: 3 bits (high bits) | ||
290 | * CFI 1 bit | ||
291 | * VLAN ID 12 bits (low bits) | ||
292 | * | ||
293 | */ | ||
294 | veth_TCI = vlan_dev_info(dev)->vlan_id; | ||
295 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); | ||
296 | 261 | ||
297 | vhdr->h_vlan_TCI = htons(veth_TCI); | 262 | vlan_tci = vlan_dev_info(dev)->vlan_id; |
263 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); | ||
264 | vhdr->h_vlan_TCI = htons(vlan_tci); | ||
298 | 265 | ||
299 | /* | 266 | /* |
300 | * Set the protocol type. For a packet of type ETH_P_802_3 we | 267 | * Set the protocol type. For a packet of type ETH_P_802_3 we |
301 | * put the length in here instead. It is up to the 802.2 | 268 | * put the length in here instead. It is up to the 802.2 |
302 | * layer to carry protocol information. | 269 | * layer to carry protocol information. |
303 | */ | 270 | */ |
304 | |||
305 | if (type != ETH_P_802_3) | 271 | if (type != ETH_P_802_3) |
306 | vhdr->h_vlan_encapsulated_proto = htons(type); | 272 | vhdr->h_vlan_encapsulated_proto = htons(type); |
307 | else | 273 | else |
308 | vhdr->h_vlan_encapsulated_proto = htons(len); | 274 | vhdr->h_vlan_encapsulated_proto = htons(len); |
309 | 275 | ||
310 | skb->protocol = htons(ETH_P_8021Q); | 276 | skb->protocol = htons(ETH_P_8021Q); |
311 | skb_reset_network_header(skb); | 277 | type = ETH_P_8021Q; |
278 | vhdrlen = VLAN_HLEN; | ||
312 | } | 279 | } |
313 | 280 | ||
314 | /* Before delegating work to the lower layer, enter our MAC-address */ | 281 | /* Before delegating work to the lower layer, enter our MAC-address */ |
315 | if (saddr == NULL) | 282 | if (saddr == NULL) |
316 | saddr = dev->dev_addr; | 283 | saddr = dev->dev_addr; |
317 | 284 | ||
285 | /* Now make the underlying real hard header */ | ||
318 | dev = vlan_dev_info(dev)->real_dev; | 286 | dev = vlan_dev_info(dev)->real_dev; |
319 | 287 | rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); | |
320 | /* MPLS can send us skbuffs w/out enough space. This check will grow | 288 | if (rc > 0) |
321 | * the skb if it doesn't have enough headroom. Not a beautiful solution, | 289 | rc += vhdrlen; |
322 | * so I'll tick a counter so that users can know it's happening... | ||
323 | * If they care... | ||
324 | */ | ||
325 | |||
326 | /* NOTE: This may still break if the underlying device is not the final | ||
327 | * device (and thus there are more headers to add...) It should work for | ||
328 | * good-ole-ethernet though. | ||
329 | */ | ||
330 | if (skb_headroom(skb) < dev->hard_header_len) { | ||
331 | struct sk_buff *sk_tmp = skb; | ||
332 | skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len); | ||
333 | kfree_skb(sk_tmp); | ||
334 | if (skb == NULL) { | ||
335 | struct net_device_stats *stats = &vdev->stats; | ||
336 | stats->tx_dropped++; | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; | ||
340 | pr_debug("%s: %s: had to grow skb\n", __func__, vdev->name); | ||
341 | } | ||
342 | |||
343 | if (build_vlan_header) { | ||
344 | /* Now make the underlying real hard header */ | ||
345 | rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, | ||
346 | len + VLAN_HLEN); | ||
347 | if (rc > 0) | ||
348 | rc += VLAN_HLEN; | ||
349 | else if (rc < 0) | ||
350 | rc -= VLAN_HLEN; | ||
351 | } else | ||
352 | /* If here, then we'll just make a normal looking ethernet | ||
353 | * frame, but, the hard_start_xmit method will insert the tag | ||
354 | * (it has to be able to do this for bridged and other skbs | ||
355 | * that don't come down the protocol stack in an orderly manner. | ||
356 | */ | ||
357 | rc = dev_hard_header(skb, dev, type, daddr, saddr, len); | ||
358 | |||
359 | return rc; | 290 | return rc; |
360 | } | 291 | } |
361 | 292 | ||
@@ -369,78 +300,49 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
369 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING | 300 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING |
370 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... | 301 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... |
371 | */ | 302 | */ |
372 | |||
373 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || | 303 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || |
374 | vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { | 304 | vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { |
375 | int orig_headroom = skb_headroom(skb); | 305 | unsigned int orig_headroom = skb_headroom(skb); |
376 | unsigned short veth_TCI; | 306 | u16 vlan_tci; |
377 | 307 | ||
378 | /* This is not a VLAN frame...but we can fix that! */ | ||
379 | vlan_dev_info(dev)->cnt_encap_on_xmit++; | 308 | vlan_dev_info(dev)->cnt_encap_on_xmit++; |
380 | 309 | ||
381 | pr_debug("%s: proto to encap: 0x%hx\n", | 310 | vlan_tci = vlan_dev_info(dev)->vlan_id; |
382 | __func__, ntohs(veth->h_vlan_proto)); | 311 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
383 | /* Construct the second two bytes. This field looks something | 312 | skb = __vlan_put_tag(skb, vlan_tci); |
384 | * like: | ||
385 | * usr_priority: 3 bits (high bits) | ||
386 | * CFI 1 bit | ||
387 | * VLAN ID 12 bits (low bits) | ||
388 | */ | ||
389 | veth_TCI = vlan_dev_info(dev)->vlan_id; | ||
390 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); | ||
391 | |||
392 | skb = __vlan_put_tag(skb, veth_TCI); | ||
393 | if (!skb) { | 313 | if (!skb) { |
394 | stats->tx_dropped++; | 314 | stats->tx_dropped++; |
395 | return 0; | 315 | return NETDEV_TX_OK; |
396 | } | 316 | } |
397 | 317 | ||
398 | if (orig_headroom < VLAN_HLEN) | 318 | if (orig_headroom < VLAN_HLEN) |
399 | vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; | 319 | vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; |
400 | } | 320 | } |
401 | 321 | ||
402 | pr_debug("%s: about to send skb: %p to dev: %s\n", | 322 | stats->tx_packets++; |
403 | __func__, skb, skb->dev->name); | ||
404 | pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", | ||
405 | veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], | ||
406 | veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], | ||
407 | veth->h_source[0], veth->h_source[1], veth->h_source[2], | ||
408 | veth->h_source[3], veth->h_source[4], veth->h_source[5], | ||
409 | veth->h_vlan_proto, veth->h_vlan_TCI, | ||
410 | veth->h_vlan_encapsulated_proto); | ||
411 | |||
412 | stats->tx_packets++; /* for statics only */ | ||
413 | stats->tx_bytes += skb->len; | 323 | stats->tx_bytes += skb->len; |
414 | 324 | ||
415 | skb->dev = vlan_dev_info(dev)->real_dev; | 325 | skb->dev = vlan_dev_info(dev)->real_dev; |
416 | dev_queue_xmit(skb); | 326 | dev_queue_xmit(skb); |
417 | 327 | return NETDEV_TX_OK; | |
418 | return 0; | ||
419 | } | 328 | } |
420 | 329 | ||
421 | static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | 330 | static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, |
422 | struct net_device *dev) | 331 | struct net_device *dev) |
423 | { | 332 | { |
424 | struct net_device_stats *stats = &dev->stats; | 333 | struct net_device_stats *stats = &dev->stats; |
425 | unsigned short veth_TCI; | 334 | u16 vlan_tci; |
426 | 335 | ||
427 | /* Construct the second two bytes. This field looks something | 336 | vlan_tci = vlan_dev_info(dev)->vlan_id; |
428 | * like: | 337 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
429 | * usr_priority: 3 bits (high bits) | 338 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); |
430 | * CFI 1 bit | ||
431 | * VLAN ID 12 bits (low bits) | ||
432 | */ | ||
433 | veth_TCI = vlan_dev_info(dev)->vlan_id; | ||
434 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); | ||
435 | skb = __vlan_hwaccel_put_tag(skb, veth_TCI); | ||
436 | 339 | ||
437 | stats->tx_packets++; | 340 | stats->tx_packets++; |
438 | stats->tx_bytes += skb->len; | 341 | stats->tx_bytes += skb->len; |
439 | 342 | ||
440 | skb->dev = vlan_dev_info(dev)->real_dev; | 343 | skb->dev = vlan_dev_info(dev)->real_dev; |
441 | dev_queue_xmit(skb); | 344 | dev_queue_xmit(skb); |
442 | 345 | return NETDEV_TX_OK; | |
443 | return 0; | ||
444 | } | 346 | } |
445 | 347 | ||
446 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 348 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
@@ -457,7 +359,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | |||
457 | } | 359 | } |
458 | 360 | ||
459 | void vlan_dev_set_ingress_priority(const struct net_device *dev, | 361 | void vlan_dev_set_ingress_priority(const struct net_device *dev, |
460 | u32 skb_prio, short vlan_prio) | 362 | u32 skb_prio, u16 vlan_prio) |
461 | { | 363 | { |
462 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 364 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
463 | 365 | ||
@@ -470,7 +372,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev, | |||
470 | } | 372 | } |
471 | 373 | ||
472 | int vlan_dev_set_egress_priority(const struct net_device *dev, | 374 | int vlan_dev_set_egress_priority(const struct net_device *dev, |
473 | u32 skb_prio, short vlan_prio) | 375 | u32 skb_prio, u16 vlan_prio) |
474 | { | 376 | { |
475 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 377 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
476 | struct vlan_priority_tci_mapping *mp = NULL; | 378 | struct vlan_priority_tci_mapping *mp = NULL; |
@@ -507,18 +409,23 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, | |||
507 | } | 409 | } |
508 | 410 | ||
509 | /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ | 411 | /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ |
510 | int vlan_dev_set_vlan_flag(const struct net_device *dev, | 412 | int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) |
511 | u32 flag, short flag_val) | ||
512 | { | 413 | { |
513 | /* verify flag is supported */ | 414 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
514 | if (flag == VLAN_FLAG_REORDER_HDR) { | 415 | u32 old_flags = vlan->flags; |
515 | if (flag_val) | 416 | |
516 | vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; | 417 | if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) |
418 | return -EINVAL; | ||
419 | |||
420 | vlan->flags = (old_flags & ~mask) | (flags & mask); | ||
421 | |||
422 | if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { | ||
423 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
424 | vlan_gvrp_request_join(dev); | ||
517 | else | 425 | else |
518 | vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; | 426 | vlan_gvrp_request_leave(dev); |
519 | return 0; | ||
520 | } | 427 | } |
521 | return -EINVAL; | 428 | return 0; |
522 | } | 429 | } |
523 | 430 | ||
524 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) | 431 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) |
@@ -526,11 +433,6 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) | |||
526 | strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); | 433 | strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); |
527 | } | 434 | } |
528 | 435 | ||
529 | void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result) | ||
530 | { | ||
531 | *result = vlan_dev_info(dev)->vlan_id; | ||
532 | } | ||
533 | |||
534 | static int vlan_dev_open(struct net_device *dev) | 436 | static int vlan_dev_open(struct net_device *dev) |
535 | { | 437 | { |
536 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 438 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
@@ -543,21 +445,44 @@ static int vlan_dev_open(struct net_device *dev) | |||
543 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 445 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
544 | err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); | 446 | err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); |
545 | if (err < 0) | 447 | if (err < 0) |
546 | return err; | 448 | goto out; |
449 | } | ||
450 | |||
451 | if (dev->flags & IFF_ALLMULTI) { | ||
452 | err = dev_set_allmulti(real_dev, 1); | ||
453 | if (err < 0) | ||
454 | goto del_unicast; | ||
547 | } | 455 | } |
456 | if (dev->flags & IFF_PROMISC) { | ||
457 | err = dev_set_promiscuity(real_dev, 1); | ||
458 | if (err < 0) | ||
459 | goto clear_allmulti; | ||
460 | } | ||
461 | |||
548 | memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); | 462 | memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); |
549 | 463 | ||
550 | if (dev->flags & IFF_ALLMULTI) | 464 | if (vlan->flags & VLAN_FLAG_GVRP) |
551 | dev_set_allmulti(real_dev, 1); | 465 | vlan_gvrp_request_join(dev); |
552 | if (dev->flags & IFF_PROMISC) | ||
553 | dev_set_promiscuity(real_dev, 1); | ||
554 | 466 | ||
555 | return 0; | 467 | return 0; |
468 | |||
469 | clear_allmulti: | ||
470 | if (dev->flags & IFF_ALLMULTI) | ||
471 | dev_set_allmulti(real_dev, -1); | ||
472 | del_unicast: | ||
473 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | ||
474 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | ||
475 | out: | ||
476 | return err; | ||
556 | } | 477 | } |
557 | 478 | ||
558 | static int vlan_dev_stop(struct net_device *dev) | 479 | static int vlan_dev_stop(struct net_device *dev) |
559 | { | 480 | { |
560 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 481 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
482 | struct net_device *real_dev = vlan->real_dev; | ||
483 | |||
484 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
485 | vlan_gvrp_request_leave(dev); | ||
561 | 486 | ||
562 | dev_mc_unsync(real_dev, dev); | 487 | dev_mc_unsync(real_dev, dev); |
563 | dev_unicast_unsync(real_dev, dev); | 488 | dev_unicast_unsync(real_dev, dev); |
@@ -645,6 +570,20 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | |||
645 | */ | 570 | */ |
646 | static struct lock_class_key vlan_netdev_xmit_lock_key; | 571 | static struct lock_class_key vlan_netdev_xmit_lock_key; |
647 | 572 | ||
573 | static void vlan_dev_set_lockdep_one(struct net_device *dev, | ||
574 | struct netdev_queue *txq, | ||
575 | void *_subclass) | ||
576 | { | ||
577 | lockdep_set_class_and_subclass(&txq->_xmit_lock, | ||
578 | &vlan_netdev_xmit_lock_key, | ||
579 | *(int *)_subclass); | ||
580 | } | ||
581 | |||
582 | static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) | ||
583 | { | ||
584 | netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); | ||
585 | } | ||
586 | |||
648 | static const struct header_ops vlan_header_ops = { | 587 | static const struct header_ops vlan_header_ops = { |
649 | .create = vlan_dev_hard_header, | 588 | .create = vlan_dev_hard_header, |
650 | .rebuild = vlan_dev_rebuild_header, | 589 | .rebuild = vlan_dev_rebuild_header, |
@@ -683,11 +622,10 @@ static int vlan_dev_init(struct net_device *dev) | |||
683 | dev->hard_start_xmit = vlan_dev_hard_start_xmit; | 622 | dev->hard_start_xmit = vlan_dev_hard_start_xmit; |
684 | } | 623 | } |
685 | 624 | ||
686 | if (real_dev->priv_flags & IFF_802_1Q_VLAN) | 625 | if (is_vlan_dev(real_dev)) |
687 | subclass = 1; | 626 | subclass = 1; |
688 | 627 | ||
689 | lockdep_set_class_and_subclass(&dev->_xmit_lock, | 628 | vlan_dev_set_lockdep_class(dev, subclass); |
690 | &vlan_netdev_xmit_lock_key, subclass); | ||
691 | return 0; | 629 | return 0; |
692 | } | 630 | } |
693 | 631 | ||
@@ -705,6 +643,35 @@ static void vlan_dev_uninit(struct net_device *dev) | |||
705 | } | 643 | } |
706 | } | 644 | } |
707 | 645 | ||
646 | static u32 vlan_ethtool_get_rx_csum(struct net_device *dev) | ||
647 | { | ||
648 | const struct vlan_dev_info *vlan = vlan_dev_info(dev); | ||
649 | struct net_device *real_dev = vlan->real_dev; | ||
650 | |||
651 | if (real_dev->ethtool_ops == NULL || | ||
652 | real_dev->ethtool_ops->get_rx_csum == NULL) | ||
653 | return 0; | ||
654 | return real_dev->ethtool_ops->get_rx_csum(real_dev); | ||
655 | } | ||
656 | |||
657 | static u32 vlan_ethtool_get_flags(struct net_device *dev) | ||
658 | { | ||
659 | const struct vlan_dev_info *vlan = vlan_dev_info(dev); | ||
660 | struct net_device *real_dev = vlan->real_dev; | ||
661 | |||
662 | if (!(real_dev->features & NETIF_F_HW_VLAN_RX) || | ||
663 | real_dev->ethtool_ops == NULL || | ||
664 | real_dev->ethtool_ops->get_flags == NULL) | ||
665 | return 0; | ||
666 | return real_dev->ethtool_ops->get_flags(real_dev); | ||
667 | } | ||
668 | |||
669 | static const struct ethtool_ops vlan_ethtool_ops = { | ||
670 | .get_link = ethtool_op_get_link, | ||
671 | .get_rx_csum = vlan_ethtool_get_rx_csum, | ||
672 | .get_flags = vlan_ethtool_get_flags, | ||
673 | }; | ||
674 | |||
708 | void vlan_setup(struct net_device *dev) | 675 | void vlan_setup(struct net_device *dev) |
709 | { | 676 | { |
710 | ether_setup(dev); | 677 | ether_setup(dev); |
@@ -723,6 +690,7 @@ void vlan_setup(struct net_device *dev) | |||
723 | dev->change_rx_flags = vlan_dev_change_rx_flags; | 690 | dev->change_rx_flags = vlan_dev_change_rx_flags; |
724 | dev->do_ioctl = vlan_dev_ioctl; | 691 | dev->do_ioctl = vlan_dev_ioctl; |
725 | dev->destructor = free_netdev; | 692 | dev->destructor = free_netdev; |
693 | dev->ethtool_ops = &vlan_ethtool_ops; | ||
726 | 694 | ||
727 | memset(dev->broadcast, 0, ETH_ALEN); | 695 | memset(dev->broadcast, 0, ETH_ALEN); |
728 | } | 696 | } |
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c new file mode 100644 index 000000000000..061ceceeef12 --- /dev/null +++ b/net/8021q/vlan_gvrp.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP) | ||
3 | * | ||
4 | * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/if_vlan.h> | ||
12 | #include <net/garp.h> | ||
13 | #include "vlan.h" | ||
14 | |||
15 | #define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } | ||
16 | |||
17 | enum gvrp_attributes { | ||
18 | GVRP_ATTR_INVALID, | ||
19 | GVRP_ATTR_VID, | ||
20 | __GVRP_ATTR_MAX | ||
21 | }; | ||
22 | #define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1) | ||
23 | |||
24 | static struct garp_application vlan_gvrp_app __read_mostly = { | ||
25 | .proto.group_address = GARP_GVRP_ADDRESS, | ||
26 | .maxattr = GVRP_ATTR_MAX, | ||
27 | .type = GARP_APPLICATION_GVRP, | ||
28 | }; | ||
29 | |||
30 | int vlan_gvrp_request_join(const struct net_device *dev) | ||
31 | { | ||
32 | const struct vlan_dev_info *vlan = vlan_dev_info(dev); | ||
33 | __be16 vlan_id = htons(vlan->vlan_id); | ||
34 | |||
35 | return garp_request_join(vlan->real_dev, &vlan_gvrp_app, | ||
36 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); | ||
37 | } | ||
38 | |||
39 | void vlan_gvrp_request_leave(const struct net_device *dev) | ||
40 | { | ||
41 | const struct vlan_dev_info *vlan = vlan_dev_info(dev); | ||
42 | __be16 vlan_id = htons(vlan->vlan_id); | ||
43 | |||
44 | garp_request_leave(vlan->real_dev, &vlan_gvrp_app, | ||
45 | &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); | ||
46 | } | ||
47 | |||
48 | int vlan_gvrp_init_applicant(struct net_device *dev) | ||
49 | { | ||
50 | return garp_init_applicant(dev, &vlan_gvrp_app); | ||
51 | } | ||
52 | |||
53 | void vlan_gvrp_uninit_applicant(struct net_device *dev) | ||
54 | { | ||
55 | garp_uninit_applicant(dev, &vlan_gvrp_app); | ||
56 | } | ||
57 | |||
58 | int __init vlan_gvrp_init(void) | ||
59 | { | ||
60 | return garp_register_application(&vlan_gvrp_app); | ||
61 | } | ||
62 | |||
63 | void vlan_gvrp_uninit(void) | ||
64 | { | ||
65 | garp_unregister_application(&vlan_gvrp_app); | ||
66 | } | ||
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index c93e69ec28ed..e9c91dcecc9b 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
@@ -59,7 +59,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
59 | } | 59 | } |
60 | if (data[IFLA_VLAN_FLAGS]) { | 60 | if (data[IFLA_VLAN_FLAGS]) { |
61 | flags = nla_data(data[IFLA_VLAN_FLAGS]); | 61 | flags = nla_data(data[IFLA_VLAN_FLAGS]); |
62 | if ((flags->flags & flags->mask) & ~VLAN_FLAG_REORDER_HDR) | 62 | if ((flags->flags & flags->mask) & |
63 | ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) | ||
63 | return -EINVAL; | 64 | return -EINVAL; |
64 | } | 65 | } |
65 | 66 | ||
@@ -75,7 +76,6 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
75 | static int vlan_changelink(struct net_device *dev, | 76 | static int vlan_changelink(struct net_device *dev, |
76 | struct nlattr *tb[], struct nlattr *data[]) | 77 | struct nlattr *tb[], struct nlattr *data[]) |
77 | { | 78 | { |
78 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | ||
79 | struct ifla_vlan_flags *flags; | 79 | struct ifla_vlan_flags *flags; |
80 | struct ifla_vlan_qos_mapping *m; | 80 | struct ifla_vlan_qos_mapping *m; |
81 | struct nlattr *attr; | 81 | struct nlattr *attr; |
@@ -83,8 +83,7 @@ static int vlan_changelink(struct net_device *dev, | |||
83 | 83 | ||
84 | if (data[IFLA_VLAN_FLAGS]) { | 84 | if (data[IFLA_VLAN_FLAGS]) { |
85 | flags = nla_data(data[IFLA_VLAN_FLAGS]); | 85 | flags = nla_data(data[IFLA_VLAN_FLAGS]); |
86 | vlan->flags = (vlan->flags & ~flags->mask) | | 86 | vlan_dev_change_flags(dev, flags->flags, flags->mask); |
87 | (flags->flags & flags->mask); | ||
88 | } | 87 | } |
89 | if (data[IFLA_VLAN_INGRESS_QOS]) { | 88 | if (data[IFLA_VLAN_INGRESS_QOS]) { |
90 | nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { | 89 | nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { |
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 08b54b593d56..0feefa4e1a4b 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -18,16 +18,9 @@ | |||
18 | *****************************************************************************/ | 18 | *****************************************************************************/ |
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/stddef.h> /* offsetof(), etc. */ | 21 | #include <linux/errno.h> |
22 | #include <linux/errno.h> /* return codes */ | ||
23 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
24 | #include <linux/slab.h> /* kmalloc(), kfree() */ | 23 | #include <linux/string.h> |
25 | #include <linux/mm.h> | ||
26 | #include <linux/string.h> /* inline mem*, str* functions */ | ||
27 | #include <linux/init.h> /* __initfunc et al. */ | ||
28 | #include <asm/byteorder.h> /* htons(), etc. */ | ||
29 | #include <asm/uaccess.h> /* copy_to_user */ | ||
30 | #include <asm/io.h> | ||
31 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
32 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
33 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
@@ -290,7 +283,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) | |||
290 | static const char fmt[] = "%30s %12lu\n"; | 283 | static const char fmt[] = "%30s %12lu\n"; |
291 | int i; | 284 | int i; |
292 | 285 | ||
293 | if (!(vlandev->priv_flags & IFF_802_1Q_VLAN)) | 286 | if (!is_vlan_dev(vlandev)) |
294 | return 0; | 287 | return 0; |
295 | 288 | ||
296 | seq_printf(seq, | 289 | seq_printf(seq, |
diff --git a/net/Kconfig b/net/Kconfig index acbf7c60e89b..b98668751749 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -181,6 +181,7 @@ source "net/dccp/Kconfig" | |||
181 | source "net/sctp/Kconfig" | 181 | source "net/sctp/Kconfig" |
182 | source "net/tipc/Kconfig" | 182 | source "net/tipc/Kconfig" |
183 | source "net/atm/Kconfig" | 183 | source "net/atm/Kconfig" |
184 | source "net/802/Kconfig" | ||
184 | source "net/bridge/Kconfig" | 185 | source "net/bridge/Kconfig" |
185 | source "net/8021q/Kconfig" | 186 | source "net/8021q/Kconfig" |
186 | source "net/decnet/Kconfig" | 187 | source "net/decnet/Kconfig" |
diff --git a/net/Makefile b/net/Makefile index b7a13643b549..4f43e7f874f3 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -42,7 +42,9 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/ | |||
42 | obj-$(CONFIG_ATM) += atm/ | 42 | obj-$(CONFIG_ATM) += atm/ |
43 | obj-$(CONFIG_DECNET) += decnet/ | 43 | obj-$(CONFIG_DECNET) += decnet/ |
44 | obj-$(CONFIG_ECONET) += econet/ | 44 | obj-$(CONFIG_ECONET) += econet/ |
45 | obj-$(CONFIG_VLAN_8021Q) += 8021q/ | 45 | ifneq ($(CONFIG_VLAN_8021Q),) |
46 | obj-y += 8021q/ | ||
47 | endif | ||
46 | obj-$(CONFIG_IP_DCCP) += dccp/ | 48 | obj-$(CONFIG_IP_DCCP) += dccp/ |
47 | obj-$(CONFIG_IP_SCTP) += sctp/ | 49 | obj-$(CONFIG_IP_SCTP) += sctp/ |
48 | obj-y += wireless/ | 50 | obj-y += wireless/ |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 25aa37ce9430..b25c1e909d14 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -333,7 +333,7 @@ static int aarp_device_event(struct notifier_block *this, unsigned long event, | |||
333 | struct net_device *dev = ptr; | 333 | struct net_device *dev = ptr; |
334 | int ct; | 334 | int ct; |
335 | 335 | ||
336 | if (dev_net(dev) != &init_net) | 336 | if (!net_eq(dev_net(dev), &init_net)) |
337 | return NOTIFY_DONE; | 337 | return NOTIFY_DONE; |
338 | 338 | ||
339 | if (event == NETDEV_DOWN) { | 339 | if (event == NETDEV_DOWN) { |
@@ -716,7 +716,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, | |||
716 | struct atalk_addr sa, *ma, da; | 716 | struct atalk_addr sa, *ma, da; |
717 | struct atalk_iface *ifa; | 717 | struct atalk_iface *ifa; |
718 | 718 | ||
719 | if (dev_net(dev) != &init_net) | 719 | if (!net_eq(dev_net(dev), &init_net)) |
720 | goto out0; | 720 | goto out0; |
721 | 721 | ||
722 | /* We only do Ethernet SNAP AARP. */ | 722 | /* We only do Ethernet SNAP AARP. */ |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 44cd42f7786b..07b5b82c5eab 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -648,7 +648,7 @@ static int ddp_device_event(struct notifier_block *this, unsigned long event, | |||
648 | { | 648 | { |
649 | struct net_device *dev = ptr; | 649 | struct net_device *dev = ptr; |
650 | 650 | ||
651 | if (dev_net(dev) != &init_net) | 651 | if (!net_eq(dev_net(dev), &init_net)) |
652 | return NOTIFY_DONE; | 652 | return NOTIFY_DONE; |
653 | 653 | ||
654 | if (event == NETDEV_DOWN) | 654 | if (event == NETDEV_DOWN) |
@@ -1405,7 +1405,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1405 | int origlen; | 1405 | int origlen; |
1406 | __u16 len_hops; | 1406 | __u16 len_hops; |
1407 | 1407 | ||
1408 | if (dev_net(dev) != &init_net) | 1408 | if (!net_eq(dev_net(dev), &init_net)) |
1409 | goto freeit; | 1409 | goto freeit; |
1410 | 1410 | ||
1411 | /* Don't mangle buffer if shared */ | 1411 | /* Don't mangle buffer if shared */ |
@@ -1493,7 +1493,7 @@ freeit: | |||
1493 | static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, | 1493 | static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, |
1494 | struct packet_type *pt, struct net_device *orig_dev) | 1494 | struct packet_type *pt, struct net_device *orig_dev) |
1495 | { | 1495 | { |
1496 | if (dev_net(dev) != &init_net) | 1496 | if (!net_eq(dev_net(dev), &init_net)) |
1497 | goto freeit; | 1497 | goto freeit; |
1498 | 1498 | ||
1499 | /* Expand any short form frames */ | 1499 | /* Expand any short form frames */ |
diff --git a/net/atm/addr.c b/net/atm/addr.c index 6afa77d63bb5..82e85abc303d 100644 --- a/net/atm/addr.c +++ b/net/atm/addr.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include "signaling.h" | 9 | #include "signaling.h" |
10 | #include "addr.h" | 10 | #include "addr.h" |
11 | 11 | ||
12 | static int check_addr(struct sockaddr_atmsvc *addr) | 12 | static int check_addr(const struct sockaddr_atmsvc *addr) |
13 | { | 13 | { |
14 | int i; | 14 | int i; |
15 | 15 | ||
@@ -23,7 +23,7 @@ static int check_addr(struct sockaddr_atmsvc *addr) | |||
23 | return -EINVAL; | 23 | return -EINVAL; |
24 | } | 24 | } |
25 | 25 | ||
26 | static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b) | 26 | static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b) |
27 | { | 27 | { |
28 | if (*a->sas_addr.prv) | 28 | if (*a->sas_addr.prv) |
29 | if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN)) | 29 | if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN)) |
@@ -35,7 +35,7 @@ static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b) | |||
35 | return !strcmp(a->sas_addr.pub, b->sas_addr.pub); | 35 | return !strcmp(a->sas_addr.pub, b->sas_addr.pub); |
36 | } | 36 | } |
37 | 37 | ||
38 | static void notify_sigd(struct atm_dev *dev) | 38 | static void notify_sigd(const struct atm_dev *dev) |
39 | { | 39 | { |
40 | struct sockaddr_atmpvc pvc; | 40 | struct sockaddr_atmpvc pvc; |
41 | 41 | ||
@@ -63,7 +63,7 @@ void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype) | |||
63 | notify_sigd(dev); | 63 | notify_sigd(dev); |
64 | } | 64 | } |
65 | 65 | ||
66 | int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, | 66 | int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr, |
67 | enum atm_addr_type_t atype) | 67 | enum atm_addr_type_t atype) |
68 | { | 68 | { |
69 | unsigned long flags; | 69 | unsigned long flags; |
@@ -98,7 +98,7 @@ int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, | |||
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, | 101 | int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr, |
102 | enum atm_addr_type_t atype) | 102 | enum atm_addr_type_t atype) |
103 | { | 103 | { |
104 | unsigned long flags; | 104 | unsigned long flags; |
diff --git a/net/atm/addr.h b/net/atm/addr.h index f39433ad45da..6837e9e7eb13 100644 --- a/net/atm/addr.h +++ b/net/atm/addr.h | |||
@@ -10,9 +10,9 @@ | |||
10 | #include <linux/atmdev.h> | 10 | #include <linux/atmdev.h> |
11 | 11 | ||
12 | void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); | 12 | void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); |
13 | int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, | 13 | int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr, |
14 | enum atm_addr_type_t type); | 14 | enum atm_addr_type_t type); |
15 | int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, | 15 | int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr, |
16 | enum atm_addr_type_t type); | 16 | enum atm_addr_type_t type); |
17 | int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, | 17 | int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, |
18 | size_t size, enum atm_addr_type_t type); | 18 | size_t size, enum atm_addr_type_t type); |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 05fafdc2eea3..8d9a6f158880 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -52,12 +52,12 @@ static void skb_debug(const struct sk_buff *skb) | |||
52 | #define ETHERTYPE_IPV6 0x86, 0xdd | 52 | #define ETHERTYPE_IPV6 0x86, 0xdd |
53 | #define PAD_BRIDGED 0x00, 0x00 | 53 | #define PAD_BRIDGED 0x00, 0x00 |
54 | 54 | ||
55 | static unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; | 55 | static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; |
56 | static unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; | 56 | static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; |
57 | static unsigned char llc_oui_pid_pad[] = | 57 | static const unsigned char llc_oui_pid_pad[] = |
58 | { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; | 58 | { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; |
59 | static unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; | 59 | static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; |
60 | static unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; | 60 | static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; |
61 | 61 | ||
62 | enum br2684_encaps { | 62 | enum br2684_encaps { |
63 | e_vc = BR2684_ENCAPS_VC, | 63 | e_vc = BR2684_ENCAPS_VC, |
@@ -217,8 +217,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, | |||
217 | return 1; | 217 | return 1; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline struct br2684_vcc *pick_outgoing_vcc(struct sk_buff *skb, | 220 | static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, |
221 | struct br2684_dev *brdev) | 221 | const struct br2684_dev *brdev) |
222 | { | 222 | { |
223 | return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ | 223 | return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ |
224 | } | 224 | } |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 6f8223ebf551..5b5b96344ce6 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -612,7 +612,7 @@ static int clip_device_event(struct notifier_block *this, unsigned long event, | |||
612 | { | 612 | { |
613 | struct net_device *dev = arg; | 613 | struct net_device *dev = arg; |
614 | 614 | ||
615 | if (dev_net(dev) != &init_net) | 615 | if (!net_eq(dev_net(dev), &init_net)) |
616 | return NOTIFY_DONE; | 616 | return NOTIFY_DONE; |
617 | 617 | ||
618 | if (event == NETDEV_UNREGISTER) { | 618 | if (event == NETDEV_UNREGISTER) { |
diff --git a/net/atm/common.c b/net/atm/common.c index c865517ba449..d34edbe754c8 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -262,7 +262,7 @@ static int adjust_tp(struct atm_trafprm *tp,unsigned char aal) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | 264 | ||
265 | static int check_ci(struct atm_vcc *vcc, short vpi, int vci) | 265 | static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) |
266 | { | 266 | { |
267 | struct hlist_head *head = &vcc_hash[vci & | 267 | struct hlist_head *head = &vcc_hash[vci & |
268 | (VCC_HTABLE_SIZE - 1)]; | 268 | (VCC_HTABLE_SIZE - 1)]; |
@@ -290,7 +290,7 @@ static int check_ci(struct atm_vcc *vcc, short vpi, int vci) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | 292 | ||
293 | static int find_ci(struct atm_vcc *vcc, short *vpi, int *vci) | 293 | static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) |
294 | { | 294 | { |
295 | static short p; /* poor man's per-device cache */ | 295 | static short p; /* poor man's per-device cache */ |
296 | static int c; | 296 | static int c; |
@@ -646,7 +646,7 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos) | |||
646 | } | 646 | } |
647 | 647 | ||
648 | 648 | ||
649 | static int check_tp(struct atm_trafprm *tp) | 649 | static int check_tp(const struct atm_trafprm *tp) |
650 | { | 650 | { |
651 | /* @@@ Should be merged with adjust_tp */ | 651 | /* @@@ Should be merged with adjust_tp */ |
652 | if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; | 652 | if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; |
@@ -663,7 +663,7 @@ static int check_tp(struct atm_trafprm *tp) | |||
663 | } | 663 | } |
664 | 664 | ||
665 | 665 | ||
666 | static int check_qos(struct atm_qos *qos) | 666 | static int check_qos(const struct atm_qos *qos) |
667 | { | 667 | { |
668 | int error; | 668 | int error; |
669 | 669 | ||
diff --git a/net/atm/lec.c b/net/atm/lec.c index 653aca3573ac..5799fb52365a 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -65,36 +65,36 @@ static int lec_close(struct net_device *dev); | |||
65 | static struct net_device_stats *lec_get_stats(struct net_device *dev); | 65 | static struct net_device_stats *lec_get_stats(struct net_device *dev); |
66 | static void lec_init(struct net_device *dev); | 66 | static void lec_init(struct net_device *dev); |
67 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 67 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
68 | unsigned char *mac_addr); | 68 | const unsigned char *mac_addr); |
69 | static int lec_arp_remove(struct lec_priv *priv, | 69 | static int lec_arp_remove(struct lec_priv *priv, |
70 | struct lec_arp_table *to_remove); | 70 | struct lec_arp_table *to_remove); |
71 | /* LANE2 functions */ | 71 | /* LANE2 functions */ |
72 | static void lane2_associate_ind(struct net_device *dev, u8 *mac_address, | 72 | static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address, |
73 | u8 *tlvs, u32 sizeoftlvs); | 73 | const u8 *tlvs, u32 sizeoftlvs); |
74 | static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, | 74 | static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, |
75 | u8 **tlvs, u32 *sizeoftlvs); | 75 | u8 **tlvs, u32 *sizeoftlvs); |
76 | static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, | 76 | static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, |
77 | u8 *tlvs, u32 sizeoftlvs); | 77 | const u8 *tlvs, u32 sizeoftlvs); |
78 | 78 | ||
79 | static int lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, | 79 | static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, |
80 | unsigned long permanent); | 80 | unsigned long permanent); |
81 | static void lec_arp_check_empties(struct lec_priv *priv, | 81 | static void lec_arp_check_empties(struct lec_priv *priv, |
82 | struct atm_vcc *vcc, struct sk_buff *skb); | 82 | struct atm_vcc *vcc, struct sk_buff *skb); |
83 | static void lec_arp_destroy(struct lec_priv *priv); | 83 | static void lec_arp_destroy(struct lec_priv *priv); |
84 | static void lec_arp_init(struct lec_priv *priv); | 84 | static void lec_arp_init(struct lec_priv *priv); |
85 | static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, | 85 | static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, |
86 | unsigned char *mac_to_find, | 86 | const unsigned char *mac_to_find, |
87 | int is_rdesc, | 87 | int is_rdesc, |
88 | struct lec_arp_table **ret_entry); | 88 | struct lec_arp_table **ret_entry); |
89 | static void lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, | 89 | static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, |
90 | unsigned char *atm_addr, unsigned long remoteflag, | 90 | const unsigned char *atm_addr, unsigned long remoteflag, |
91 | unsigned int targetless_le_arp); | 91 | unsigned int targetless_le_arp); |
92 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); | 92 | static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); |
93 | static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); | 93 | static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); |
94 | static void lec_set_flush_tran_id(struct lec_priv *priv, | 94 | static void lec_set_flush_tran_id(struct lec_priv *priv, |
95 | unsigned char *atm_addr, | 95 | const unsigned char *atm_addr, |
96 | unsigned long tran_id); | 96 | unsigned long tran_id); |
97 | static void lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, | 97 | static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, |
98 | struct atm_vcc *vcc, | 98 | struct atm_vcc *vcc, |
99 | void (*old_push) (struct atm_vcc *vcc, | 99 | void (*old_push) (struct atm_vcc *vcc, |
100 | struct sk_buff *skb)); | 100 | struct sk_buff *skb)); |
@@ -634,7 +634,7 @@ static struct atm_dev lecatm_dev = { | |||
634 | */ | 634 | */ |
635 | static int | 635 | static int |
636 | send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, | 636 | send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, |
637 | unsigned char *mac_addr, unsigned char *atm_addr, | 637 | const unsigned char *mac_addr, const unsigned char *atm_addr, |
638 | struct sk_buff *data) | 638 | struct sk_buff *data) |
639 | { | 639 | { |
640 | struct sock *sk; | 640 | struct sock *sk; |
@@ -705,10 +705,9 @@ static void lec_init(struct net_device *dev) | |||
705 | dev->set_multicast_list = lec_set_multicast_list; | 705 | dev->set_multicast_list = lec_set_multicast_list; |
706 | dev->do_ioctl = NULL; | 706 | dev->do_ioctl = NULL; |
707 | printk("%s: Initialized!\n", dev->name); | 707 | printk("%s: Initialized!\n", dev->name); |
708 | return; | ||
709 | } | 708 | } |
710 | 709 | ||
711 | static unsigned char lec_ctrl_magic[] = { | 710 | static const unsigned char lec_ctrl_magic[] = { |
712 | 0xff, | 711 | 0xff, |
713 | 0x00, | 712 | 0x00, |
714 | 0x01, | 713 | 0x01, |
@@ -1276,7 +1275,7 @@ module_exit(lane_module_cleanup); | |||
1276 | * lec will be used. | 1275 | * lec will be used. |
1277 | * If dst_mac == NULL, targetless LE_ARP will be sent | 1276 | * If dst_mac == NULL, targetless LE_ARP will be sent |
1278 | */ | 1277 | */ |
1279 | static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, | 1278 | static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, |
1280 | u8 **tlvs, u32 *sizeoftlvs) | 1279 | u8 **tlvs, u32 *sizeoftlvs) |
1281 | { | 1280 | { |
1282 | unsigned long flags; | 1281 | unsigned long flags; |
@@ -1322,8 +1321,8 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, | |||
1322 | * Returns 1 for success, 0 for failure (out of memory) | 1321 | * Returns 1 for success, 0 for failure (out of memory) |
1323 | * | 1322 | * |
1324 | */ | 1323 | */ |
1325 | static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, | 1324 | static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, |
1326 | u8 *tlvs, u32 sizeoftlvs) | 1325 | const u8 *tlvs, u32 sizeoftlvs) |
1327 | { | 1326 | { |
1328 | int retval; | 1327 | int retval; |
1329 | struct sk_buff *skb; | 1328 | struct sk_buff *skb; |
@@ -1358,8 +1357,8 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, | |||
1358 | * LANE2: 3.1.5, LE_ASSOCIATE.indication | 1357 | * LANE2: 3.1.5, LE_ASSOCIATE.indication |
1359 | * | 1358 | * |
1360 | */ | 1359 | */ |
1361 | static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | 1360 | static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, |
1362 | u8 *tlvs, u32 sizeoftlvs) | 1361 | const u8 *tlvs, u32 sizeoftlvs) |
1363 | { | 1362 | { |
1364 | #if 0 | 1363 | #if 0 |
1365 | int i = 0; | 1364 | int i = 0; |
@@ -1744,7 +1743,7 @@ static void lec_arp_destroy(struct lec_priv *priv) | |||
1744 | * Find entry by mac_address | 1743 | * Find entry by mac_address |
1745 | */ | 1744 | */ |
1746 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 1745 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
1747 | unsigned char *mac_addr) | 1746 | const unsigned char *mac_addr) |
1748 | { | 1747 | { |
1749 | struct hlist_node *node; | 1748 | struct hlist_node *node; |
1750 | struct hlist_head *head; | 1749 | struct hlist_head *head; |
@@ -1764,7 +1763,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | |||
1764 | } | 1763 | } |
1765 | 1764 | ||
1766 | static struct lec_arp_table *make_entry(struct lec_priv *priv, | 1765 | static struct lec_arp_table *make_entry(struct lec_priv *priv, |
1767 | unsigned char *mac_addr) | 1766 | const unsigned char *mac_addr) |
1768 | { | 1767 | { |
1769 | struct lec_arp_table *to_return; | 1768 | struct lec_arp_table *to_return; |
1770 | 1769 | ||
@@ -1921,7 +1920,7 @@ restart: | |||
1921 | * | 1920 | * |
1922 | */ | 1921 | */ |
1923 | static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, | 1922 | static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, |
1924 | unsigned char *mac_to_find, int is_rdesc, | 1923 | const unsigned char *mac_to_find, int is_rdesc, |
1925 | struct lec_arp_table **ret_entry) | 1924 | struct lec_arp_table **ret_entry) |
1926 | { | 1925 | { |
1927 | unsigned long flags; | 1926 | unsigned long flags; |
@@ -2017,7 +2016,7 @@ out: | |||
2017 | } | 2016 | } |
2018 | 2017 | ||
2019 | static int | 2018 | static int |
2020 | lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, | 2019 | lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, |
2021 | unsigned long permanent) | 2020 | unsigned long permanent) |
2022 | { | 2021 | { |
2023 | unsigned long flags; | 2022 | unsigned long flags; |
@@ -2047,8 +2046,8 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, | |||
2047 | * Notifies: Response to arp_request (atm_addr != NULL) | 2046 | * Notifies: Response to arp_request (atm_addr != NULL) |
2048 | */ | 2047 | */ |
2049 | static void | 2048 | static void |
2050 | lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, | 2049 | lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, |
2051 | unsigned char *atm_addr, unsigned long remoteflag, | 2050 | const unsigned char *atm_addr, unsigned long remoteflag, |
2052 | unsigned int targetless_le_arp) | 2051 | unsigned int targetless_le_arp) |
2053 | { | 2052 | { |
2054 | unsigned long flags; | 2053 | unsigned long flags; |
@@ -2148,7 +2147,7 @@ out: | |||
2148 | * Notifies: Vcc setup ready | 2147 | * Notifies: Vcc setup ready |
2149 | */ | 2148 | */ |
2150 | static void | 2149 | static void |
2151 | lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, | 2150 | lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, |
2152 | struct atm_vcc *vcc, | 2151 | struct atm_vcc *vcc, |
2153 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) | 2152 | void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) |
2154 | { | 2153 | { |
@@ -2336,7 +2335,7 @@ restart: | |||
2336 | 2335 | ||
2337 | static void | 2336 | static void |
2338 | lec_set_flush_tran_id(struct lec_priv *priv, | 2337 | lec_set_flush_tran_id(struct lec_priv *priv, |
2339 | unsigned char *atm_addr, unsigned long tran_id) | 2338 | const unsigned char *atm_addr, unsigned long tran_id) |
2340 | { | 2339 | { |
2341 | unsigned long flags; | 2340 | unsigned long flags; |
2342 | struct hlist_node *node; | 2341 | struct hlist_node *node; |
diff --git a/net/atm/lec.h b/net/atm/lec.h index b41cda7ea1e1..0d376682c1a3 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -42,12 +42,12 @@ struct lecdatahdr_8025 { | |||
42 | * | 42 | * |
43 | */ | 43 | */ |
44 | struct lane2_ops { | 44 | struct lane2_ops { |
45 | int (*resolve) (struct net_device *dev, u8 *dst_mac, int force, | 45 | int (*resolve) (struct net_device *dev, const u8 *dst_mac, int force, |
46 | u8 **tlvs, u32 *sizeoftlvs); | 46 | u8 **tlvs, u32 *sizeoftlvs); |
47 | int (*associate_req) (struct net_device *dev, u8 *lan_dst, | 47 | int (*associate_req) (struct net_device *dev, const u8 *lan_dst, |
48 | u8 *tlvs, u32 sizeoftlvs); | 48 | const u8 *tlvs, u32 sizeoftlvs); |
49 | void (*associate_indicator) (struct net_device *dev, u8 *mac_addr, | 49 | void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr, |
50 | u8 *tlvs, u32 sizeoftlvs); | 50 | const u8 *tlvs, u32 sizeoftlvs); |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* | 53 | /* |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 9db332e7a6c0..4fccaa1e07be 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -964,7 +964,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo | |||
964 | 964 | ||
965 | dev = (struct net_device *)dev_ptr; | 965 | dev = (struct net_device *)dev_ptr; |
966 | 966 | ||
967 | if (dev_net(dev) != &init_net) | 967 | if (!net_eq(dev_net(dev), &init_net)) |
968 | return NOTIFY_DONE; | 968 | return NOTIFY_DONE; |
969 | 969 | ||
970 | if (dev->name == NULL || strncmp(dev->name, "lec", 3)) | 970 | if (dev->name == NULL || strncmp(dev->name, "lec", 3)) |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 2712544cf0ca..01c83e2a4c19 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -116,7 +116,7 @@ static int ax25_device_event(struct notifier_block *this, unsigned long event, | |||
116 | { | 116 | { |
117 | struct net_device *dev = (struct net_device *)ptr; | 117 | struct net_device *dev = (struct net_device *)ptr; |
118 | 118 | ||
119 | if (dev_net(dev) != &init_net) | 119 | if (!net_eq(dev_net(dev), &init_net)) |
120 | return NOTIFY_DONE; | 120 | return NOTIFY_DONE; |
121 | 121 | ||
122 | /* Reject non AX.25 devices */ | 122 | /* Reject non AX.25 devices */ |
@@ -893,13 +893,11 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) | |||
893 | 893 | ||
894 | sk->sk_destruct = ax25_free_sock; | 894 | sk->sk_destruct = ax25_free_sock; |
895 | sk->sk_type = osk->sk_type; | 895 | sk->sk_type = osk->sk_type; |
896 | sk->sk_socket = osk->sk_socket; | ||
897 | sk->sk_priority = osk->sk_priority; | 896 | sk->sk_priority = osk->sk_priority; |
898 | sk->sk_protocol = osk->sk_protocol; | 897 | sk->sk_protocol = osk->sk_protocol; |
899 | sk->sk_rcvbuf = osk->sk_rcvbuf; | 898 | sk->sk_rcvbuf = osk->sk_rcvbuf; |
900 | sk->sk_sndbuf = osk->sk_sndbuf; | 899 | sk->sk_sndbuf = osk->sk_sndbuf; |
901 | sk->sk_state = TCP_ESTABLISHED; | 900 | sk->sk_state = TCP_ESTABLISHED; |
902 | sk->sk_sleep = osk->sk_sleep; | ||
903 | sock_copy_flags(sk, osk); | 901 | sock_copy_flags(sk, osk); |
904 | 902 | ||
905 | oax25 = ax25_sk(osk); | 903 | oax25 = ax25_sk(osk); |
@@ -1361,13 +1359,11 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1361 | goto out; | 1359 | goto out; |
1362 | 1360 | ||
1363 | newsk = skb->sk; | 1361 | newsk = skb->sk; |
1364 | newsk->sk_socket = newsock; | 1362 | sock_graft(newsk, newsock); |
1365 | newsk->sk_sleep = &newsock->wait; | ||
1366 | 1363 | ||
1367 | /* Now attach up the new socket */ | 1364 | /* Now attach up the new socket */ |
1368 | kfree_skb(skb); | 1365 | kfree_skb(skb); |
1369 | sk->sk_ack_backlog--; | 1366 | sk->sk_ack_backlog--; |
1370 | newsock->sk = newsk; | ||
1371 | newsock->state = SS_CONNECTED; | 1367 | newsock->state = SS_CONNECTED; |
1372 | 1368 | ||
1373 | out: | 1369 | out: |
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 33790a8efbc8..4a5ba978a804 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c | |||
@@ -451,7 +451,7 @@ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, | |||
451 | skb->sk = NULL; /* Initially we don't know who it's for */ | 451 | skb->sk = NULL; /* Initially we don't know who it's for */ |
452 | skb->destructor = NULL; /* Who initializes this, dammit?! */ | 452 | skb->destructor = NULL; /* Who initializes this, dammit?! */ |
453 | 453 | ||
454 | if (dev_net(dev) != &init_net) { | 454 | if (!net_eq(dev_net(dev), &init_net)) { |
455 | kfree_skb(skb); | 455 | kfree_skb(skb); |
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c index 96e4b9273250..cdc7e751ef36 100644 --- a/net/ax25/ax25_std_timer.c +++ b/net/ax25/ax25_std_timer.c | |||
@@ -39,11 +39,9 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25) | |||
39 | 39 | ||
40 | switch (ax25->state) { | 40 | switch (ax25->state) { |
41 | case AX25_STATE_0: | 41 | case AX25_STATE_0: |
42 | /* Magic here: If we listen() and a new link dies before it | 42 | if (!sk || |
43 | is accepted() it isn't 'dead' so doesn't get removed. */ | 43 | sock_flag(sk, SOCK_DESTROY) || |
44 | if (!sk || sock_flag(sk, SOCK_DESTROY) || | 44 | sock_flag(sk, SOCK_DEAD)) { |
45 | (sk->sk_state == TCP_LISTEN && | ||
46 | sock_flag(sk, SOCK_DEAD))) { | ||
47 | if (sk) { | 45 | if (sk) { |
48 | sock_hold(sk); | 46 | sock_hold(sk); |
49 | ax25_destroy_socket(ax25); | 47 | ax25_destroy_socket(ax25); |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d366423c8392..4e59df5f8e05 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/poll.h> | 37 | #include <linux/poll.h> |
38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
39 | #include <asm/ioctls.h> | ||
39 | 40 | ||
40 | #if defined(CONFIG_KMOD) | 41 | #if defined(CONFIG_KMOD) |
41 | #include <linux/kmod.h> | 42 | #include <linux/kmod.h> |
@@ -48,7 +49,7 @@ | |||
48 | #define BT_DBG(D...) | 49 | #define BT_DBG(D...) |
49 | #endif | 50 | #endif |
50 | 51 | ||
51 | #define VERSION "2.11" | 52 | #define VERSION "2.12" |
52 | 53 | ||
53 | /* Bluetooth sockets */ | 54 | /* Bluetooth sockets */ |
54 | #define BT_MAX_PROTO 8 | 55 | #define BT_MAX_PROTO 8 |
@@ -266,6 +267,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
266 | 267 | ||
267 | skb_reset_transport_header(skb); | 268 | skb_reset_transport_header(skb); |
268 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 269 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
270 | if (err == 0) | ||
271 | sock_recv_timestamp(msg, sk, skb); | ||
269 | 272 | ||
270 | skb_free_datagram(sk, skb); | 273 | skb_free_datagram(sk, skb); |
271 | 274 | ||
@@ -329,6 +332,54 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w | |||
329 | } | 332 | } |
330 | EXPORT_SYMBOL(bt_sock_poll); | 333 | EXPORT_SYMBOL(bt_sock_poll); |
331 | 334 | ||
335 | int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | ||
336 | { | ||
337 | struct sock *sk = sock->sk; | ||
338 | struct sk_buff *skb; | ||
339 | long amount; | ||
340 | int err; | ||
341 | |||
342 | BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); | ||
343 | |||
344 | switch (cmd) { | ||
345 | case TIOCOUTQ: | ||
346 | if (sk->sk_state == BT_LISTEN) | ||
347 | return -EINVAL; | ||
348 | |||
349 | amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | ||
350 | if (amount < 0) | ||
351 | amount = 0; | ||
352 | err = put_user(amount, (int __user *) arg); | ||
353 | break; | ||
354 | |||
355 | case TIOCINQ: | ||
356 | if (sk->sk_state == BT_LISTEN) | ||
357 | return -EINVAL; | ||
358 | |||
359 | lock_sock(sk); | ||
360 | skb = skb_peek(&sk->sk_receive_queue); | ||
361 | amount = skb ? skb->len : 0; | ||
362 | release_sock(sk); | ||
363 | err = put_user(amount, (int __user *) arg); | ||
364 | break; | ||
365 | |||
366 | case SIOCGSTAMP: | ||
367 | err = sock_get_timestamp(sk, (struct timeval __user *) arg); | ||
368 | break; | ||
369 | |||
370 | case SIOCGSTAMPNS: | ||
371 | err = sock_get_timestampns(sk, (struct timespec __user *) arg); | ||
372 | break; | ||
373 | |||
374 | default: | ||
375 | err = -ENOIOCTLCMD; | ||
376 | break; | ||
377 | } | ||
378 | |||
379 | return err; | ||
380 | } | ||
381 | EXPORT_SYMBOL(bt_sock_ioctl); | ||
382 | |||
332 | int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) | 383 | int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) |
333 | { | 384 | { |
334 | DECLARE_WAITQUEUE(wait, current); | 385 | DECLARE_WAITQUEUE(wait, current); |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index e69244dd8de8..b69bf4e7c48b 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -16,10 +16,6 @@ | |||
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | ||
20 | * $Id: bnep.h,v 1.5 2002/08/04 21:23:58 maxk Exp $ | ||
21 | */ | ||
22 | |||
23 | #ifndef _BNEP_H | 19 | #ifndef _BNEP_H |
24 | #define _BNEP_H | 20 | #define _BNEP_H |
25 | 21 | ||
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index f85d94643aaf..021172c0e666 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -25,10 +25,6 @@ | |||
25 | SOFTWARE IS DISCLAIMED. | 25 | SOFTWARE IS DISCLAIMED. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | /* | ||
29 | * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $ | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | 28 | #include <linux/module.h> |
33 | 29 | ||
34 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
@@ -507,6 +503,11 @@ static int bnep_session(void *arg) | |||
507 | /* Delete network device */ | 503 | /* Delete network device */ |
508 | unregister_netdev(dev); | 504 | unregister_netdev(dev); |
509 | 505 | ||
506 | /* Wakeup user-space polling for socket errors */ | ||
507 | s->sock->sk->sk_err = EUNATCH; | ||
508 | |||
509 | wake_up_interruptible(s->sock->sk->sk_sleep); | ||
510 | |||
510 | /* Release the socket */ | 511 | /* Release the socket */ |
511 | fput(s->sock->file); | 512 | fput(s->sock->file); |
512 | 513 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 95e3837e4312..d9fa0ab2c87f 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -25,10 +25,6 @@ | |||
25 | SOFTWARE IS DISCLAIMED. | 25 | SOFTWARE IS DISCLAIMED. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | /* | ||
29 | * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $ | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | 28 | #include <linux/module.h> |
33 | 29 | ||
34 | #include <linux/socket.h> | 30 | #include <linux/socket.h> |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 201e5b1ce473..8ffb57f2303a 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -24,10 +24,6 @@ | |||
24 | SOFTWARE IS DISCLAIMED. | 24 | SOFTWARE IS DISCLAIMED. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | /* | ||
28 | * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $ | ||
29 | */ | ||
30 | |||
31 | #include <linux/module.h> | 27 | #include <linux/module.h> |
32 | 28 | ||
33 | #include <linux/types.h> | 29 | #include <linux/types.h> |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index f8880261da0e..ca8d05245ca0 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -59,24 +59,31 @@ void hci_acl_connect(struct hci_conn *conn) | |||
59 | BT_DBG("%p", conn); | 59 | BT_DBG("%p", conn); |
60 | 60 | ||
61 | conn->state = BT_CONNECT; | 61 | conn->state = BT_CONNECT; |
62 | conn->out = 1; | 62 | conn->out = 1; |
63 | |||
63 | conn->link_mode = HCI_LM_MASTER; | 64 | conn->link_mode = HCI_LM_MASTER; |
64 | 65 | ||
65 | conn->attempt++; | 66 | conn->attempt++; |
66 | 67 | ||
68 | conn->link_policy = hdev->link_policy; | ||
69 | |||
67 | memset(&cp, 0, sizeof(cp)); | 70 | memset(&cp, 0, sizeof(cp)); |
68 | bacpy(&cp.bdaddr, &conn->dst); | 71 | bacpy(&cp.bdaddr, &conn->dst); |
69 | cp.pscan_rep_mode = 0x02; | 72 | cp.pscan_rep_mode = 0x02; |
70 | 73 | ||
71 | if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst)) && | 74 | if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { |
72 | inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { | 75 | if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { |
73 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; | 76 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; |
74 | cp.pscan_mode = ie->data.pscan_mode; | 77 | cp.pscan_mode = ie->data.pscan_mode; |
75 | cp.clock_offset = ie->data.clock_offset | cpu_to_le16(0x8000); | 78 | cp.clock_offset = ie->data.clock_offset | |
79 | cpu_to_le16(0x8000); | ||
80 | } | ||
81 | |||
76 | memcpy(conn->dev_class, ie->data.dev_class, 3); | 82 | memcpy(conn->dev_class, ie->data.dev_class, 3); |
83 | conn->ssp_mode = ie->data.ssp_mode; | ||
77 | } | 84 | } |
78 | 85 | ||
79 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); | 86 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
80 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) | 87 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) |
81 | cp.role_switch = 0x01; | 88 | cp.role_switch = 0x01; |
82 | else | 89 | else |
@@ -122,7 +129,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle) | |||
122 | conn->out = 1; | 129 | conn->out = 1; |
123 | 130 | ||
124 | cp.handle = cpu_to_le16(handle); | 131 | cp.handle = cpu_to_le16(handle); |
125 | cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 132 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
126 | 133 | ||
127 | hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); | 134 | hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); |
128 | } | 135 | } |
@@ -138,7 +145,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) | |||
138 | conn->out = 1; | 145 | conn->out = 1; |
139 | 146 | ||
140 | cp.handle = cpu_to_le16(handle); | 147 | cp.handle = cpu_to_le16(handle); |
141 | cp.pkt_type = cpu_to_le16(hdev->esco_type); | 148 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
142 | 149 | ||
143 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); | 150 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); |
144 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | 151 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); |
@@ -163,11 +170,13 @@ static void hci_conn_timeout(unsigned long arg) | |||
163 | 170 | ||
164 | switch (conn->state) { | 171 | switch (conn->state) { |
165 | case BT_CONNECT: | 172 | case BT_CONNECT: |
173 | case BT_CONNECT2: | ||
166 | if (conn->type == ACL_LINK) | 174 | if (conn->type == ACL_LINK) |
167 | hci_acl_connect_cancel(conn); | 175 | hci_acl_connect_cancel(conn); |
168 | else | 176 | else |
169 | hci_acl_disconn(conn, 0x13); | 177 | hci_acl_disconn(conn, 0x13); |
170 | break; | 178 | break; |
179 | case BT_CONFIG: | ||
171 | case BT_CONNECTED: | 180 | case BT_CONNECTED: |
172 | hci_acl_disconn(conn, 0x13); | 181 | hci_acl_disconn(conn, 0x13); |
173 | break; | 182 | break; |
@@ -199,13 +208,28 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
199 | return NULL; | 208 | return NULL; |
200 | 209 | ||
201 | bacpy(&conn->dst, dst); | 210 | bacpy(&conn->dst, dst); |
202 | conn->hdev = hdev; | 211 | conn->hdev = hdev; |
203 | conn->type = type; | 212 | conn->type = type; |
204 | conn->mode = HCI_CM_ACTIVE; | 213 | conn->mode = HCI_CM_ACTIVE; |
205 | conn->state = BT_OPEN; | 214 | conn->state = BT_OPEN; |
206 | 215 | ||
207 | conn->power_save = 1; | 216 | conn->power_save = 1; |
208 | 217 | ||
218 | switch (type) { | ||
219 | case ACL_LINK: | ||
220 | conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; | ||
221 | break; | ||
222 | case SCO_LINK: | ||
223 | if (lmp_esco_capable(hdev)) | ||
224 | conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK; | ||
225 | else | ||
226 | conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; | ||
227 | break; | ||
228 | case ESCO_LINK: | ||
229 | conn->pkt_type = hdev->esco_type; | ||
230 | break; | ||
231 | } | ||
232 | |||
209 | skb_queue_head_init(&conn->data_q); | 233 | skb_queue_head_init(&conn->data_q); |
210 | 234 | ||
211 | setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); | 235 | setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); |
@@ -221,8 +245,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
221 | if (hdev->notify) | 245 | if (hdev->notify) |
222 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 246 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
223 | 247 | ||
224 | hci_conn_add_sysfs(conn); | ||
225 | |||
226 | tasklet_enable(&hdev->tx_task); | 248 | tasklet_enable(&hdev->tx_task); |
227 | 249 | ||
228 | return conn; | 250 | return conn; |
@@ -254,12 +276,14 @@ int hci_conn_del(struct hci_conn *conn) | |||
254 | } | 276 | } |
255 | 277 | ||
256 | tasklet_disable(&hdev->tx_task); | 278 | tasklet_disable(&hdev->tx_task); |
279 | |||
257 | hci_conn_hash_del(hdev, conn); | 280 | hci_conn_hash_del(hdev, conn); |
258 | if (hdev->notify) | 281 | if (hdev->notify) |
259 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); | 282 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); |
283 | |||
260 | tasklet_enable(&hdev->tx_task); | 284 | tasklet_enable(&hdev->tx_task); |
285 | |||
261 | skb_queue_purge(&conn->data_q); | 286 | skb_queue_purge(&conn->data_q); |
262 | hci_conn_del_sysfs(conn); | ||
263 | 287 | ||
264 | return 0; | 288 | return 0; |
265 | } | 289 | } |
@@ -355,13 +379,21 @@ int hci_conn_auth(struct hci_conn *conn) | |||
355 | { | 379 | { |
356 | BT_DBG("conn %p", conn); | 380 | BT_DBG("conn %p", conn); |
357 | 381 | ||
382 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) { | ||
383 | if (!(conn->auth_type & 0x01)) { | ||
384 | conn->auth_type = HCI_AT_GENERAL_BONDING_MITM; | ||
385 | conn->link_mode &= ~HCI_LM_AUTH; | ||
386 | } | ||
387 | } | ||
388 | |||
358 | if (conn->link_mode & HCI_LM_AUTH) | 389 | if (conn->link_mode & HCI_LM_AUTH) |
359 | return 1; | 390 | return 1; |
360 | 391 | ||
361 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { | 392 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { |
362 | struct hci_cp_auth_requested cp; | 393 | struct hci_cp_auth_requested cp; |
363 | cp.handle = cpu_to_le16(conn->handle); | 394 | cp.handle = cpu_to_le16(conn->handle); |
364 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); | 395 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, |
396 | sizeof(cp), &cp); | ||
365 | } | 397 | } |
366 | return 0; | 398 | return 0; |
367 | } | 399 | } |
@@ -373,7 +405,7 @@ int hci_conn_encrypt(struct hci_conn *conn) | |||
373 | BT_DBG("conn %p", conn); | 405 | BT_DBG("conn %p", conn); |
374 | 406 | ||
375 | if (conn->link_mode & HCI_LM_ENCRYPT) | 407 | if (conn->link_mode & HCI_LM_ENCRYPT) |
376 | return 1; | 408 | return hci_conn_auth(conn); |
377 | 409 | ||
378 | if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) | 410 | if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) |
379 | return 0; | 411 | return 0; |
@@ -382,7 +414,8 @@ int hci_conn_encrypt(struct hci_conn *conn) | |||
382 | struct hci_cp_set_conn_encrypt cp; | 414 | struct hci_cp_set_conn_encrypt cp; |
383 | cp.handle = cpu_to_le16(conn->handle); | 415 | cp.handle = cpu_to_le16(conn->handle); |
384 | cp.encrypt = 1; | 416 | cp.encrypt = 1; |
385 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); | 417 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, |
418 | sizeof(cp), &cp); | ||
386 | } | 419 | } |
387 | return 0; | 420 | return 0; |
388 | } | 421 | } |
@@ -396,7 +429,8 @@ int hci_conn_change_link_key(struct hci_conn *conn) | |||
396 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { | 429 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { |
397 | struct hci_cp_change_conn_link_key cp; | 430 | struct hci_cp_change_conn_link_key cp; |
398 | cp.handle = cpu_to_le16(conn->handle); | 431 | cp.handle = cpu_to_le16(conn->handle); |
399 | hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); | 432 | hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, |
433 | sizeof(cp), &cp); | ||
400 | } | 434 | } |
401 | return 0; | 435 | return 0; |
402 | } | 436 | } |
@@ -498,6 +532,8 @@ void hci_conn_hash_flush(struct hci_dev *hdev) | |||
498 | 532 | ||
499 | c->state = BT_CLOSED; | 533 | c->state = BT_CLOSED; |
500 | 534 | ||
535 | hci_conn_del_sysfs(c); | ||
536 | |||
501 | hci_proto_disconn_ind(c, 0x16); | 537 | hci_proto_disconn_ind(c, 0x16); |
502 | hci_conn_del(c); | 538 | hci_conn_del(c); |
503 | } | 539 | } |
@@ -600,3 +636,23 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) | |||
600 | 636 | ||
601 | return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; | 637 | return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; |
602 | } | 638 | } |
639 | |||
640 | int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) | ||
641 | { | ||
642 | struct hci_auth_info_req req; | ||
643 | struct hci_conn *conn; | ||
644 | |||
645 | if (copy_from_user(&req, arg, sizeof(req))) | ||
646 | return -EFAULT; | ||
647 | |||
648 | hci_dev_lock_bh(hdev); | ||
649 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); | ||
650 | if (conn) | ||
651 | req.type = conn->auth_type; | ||
652 | hci_dev_unlock_bh(hdev); | ||
653 | |||
654 | if (!conn) | ||
655 | return -ENOENT; | ||
656 | |||
657 | return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; | ||
658 | } | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index aec6929f5c16..f5b21cb93699 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -279,10 +279,20 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) | |||
279 | 279 | ||
280 | BT_DBG("%s %x", hdev->name, encrypt); | 280 | BT_DBG("%s %x", hdev->name, encrypt); |
281 | 281 | ||
282 | /* Authentication */ | 282 | /* Encryption */ |
283 | hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); | 283 | hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); |
284 | } | 284 | } |
285 | 285 | ||
286 | static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) | ||
287 | { | ||
288 | __le16 policy = cpu_to_le16(opt); | ||
289 | |||
290 | BT_DBG("%s %x", hdev->name, opt); | ||
291 | |||
292 | /* Default link policy */ | ||
293 | hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); | ||
294 | } | ||
295 | |||
286 | /* Get HCI device by index. | 296 | /* Get HCI device by index. |
287 | * Device is held on return. */ | 297 | * Device is held on return. */ |
288 | struct hci_dev *hci_dev_get(int index) | 298 | struct hci_dev *hci_dev_get(int index) |
@@ -694,32 +704,35 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
694 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 704 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
695 | break; | 705 | break; |
696 | 706 | ||
697 | case HCISETPTYPE: | ||
698 | hdev->pkt_type = (__u16) dr.dev_opt; | ||
699 | break; | ||
700 | |||
701 | case HCISETLINKPOL: | 707 | case HCISETLINKPOL: |
702 | hdev->link_policy = (__u16) dr.dev_opt; | 708 | err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, |
709 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | ||
703 | break; | 710 | break; |
704 | 711 | ||
705 | case HCISETLINKMODE: | 712 | case HCISETLINKMODE: |
706 | hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); | 713 | hdev->link_mode = ((__u16) dr.dev_opt) & |
714 | (HCI_LM_MASTER | HCI_LM_ACCEPT); | ||
715 | break; | ||
716 | |||
717 | case HCISETPTYPE: | ||
718 | hdev->pkt_type = (__u16) dr.dev_opt; | ||
707 | break; | 719 | break; |
708 | 720 | ||
709 | case HCISETACLMTU: | 721 | case HCISETACLMTU: |
710 | hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); | 722 | hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); |
711 | hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); | 723 | hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); |
712 | break; | 724 | break; |
713 | 725 | ||
714 | case HCISETSCOMTU: | 726 | case HCISETSCOMTU: |
715 | hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); | 727 | hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); |
716 | hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); | 728 | hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); |
717 | break; | 729 | break; |
718 | 730 | ||
719 | default: | 731 | default: |
720 | err = -EINVAL; | 732 | err = -EINVAL; |
721 | break; | 733 | break; |
722 | } | 734 | } |
735 | |||
723 | hci_dev_put(hdev); | 736 | hci_dev_put(hdev); |
724 | return err; | 737 | return err; |
725 | } | 738 | } |
@@ -1270,9 +1283,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int | |||
1270 | struct hci_conn *c; | 1283 | struct hci_conn *c; |
1271 | c = list_entry(p, struct hci_conn, list); | 1284 | c = list_entry(p, struct hci_conn, list); |
1272 | 1285 | ||
1273 | if (c->type != type || c->state != BT_CONNECTED | 1286 | if (c->type != type || skb_queue_empty(&c->data_q)) |
1274 | || skb_queue_empty(&c->data_q)) | 1287 | continue; |
1288 | |||
1289 | if (c->state != BT_CONNECTED && c->state != BT_CONFIG) | ||
1275 | continue; | 1290 | continue; |
1291 | |||
1276 | num++; | 1292 | num++; |
1277 | 1293 | ||
1278 | if (c->sent < min) { | 1294 | if (c->sent < min) { |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6aef8f24e581..0e3db289f4be 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -110,6 +110,25 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) | |||
110 | hci_dev_unlock(hdev); | 110 | hci_dev_unlock(hdev); |
111 | } | 111 | } |
112 | 112 | ||
113 | static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | ||
114 | { | ||
115 | struct hci_rp_read_link_policy *rp = (void *) skb->data; | ||
116 | struct hci_conn *conn; | ||
117 | |||
118 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
119 | |||
120 | if (rp->status) | ||
121 | return; | ||
122 | |||
123 | hci_dev_lock(hdev); | ||
124 | |||
125 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); | ||
126 | if (conn) | ||
127 | conn->link_policy = __le16_to_cpu(rp->policy); | ||
128 | |||
129 | hci_dev_unlock(hdev); | ||
130 | } | ||
131 | |||
113 | static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | 132 | static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) |
114 | { | 133 | { |
115 | struct hci_rp_write_link_policy *rp = (void *) skb->data; | 134 | struct hci_rp_write_link_policy *rp = (void *) skb->data; |
@@ -128,13 +147,41 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | |||
128 | hci_dev_lock(hdev); | 147 | hci_dev_lock(hdev); |
129 | 148 | ||
130 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); | 149 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); |
131 | if (conn) { | 150 | if (conn) |
132 | conn->link_policy = get_unaligned_le16(sent + 2); | 151 | conn->link_policy = get_unaligned_le16(sent + 2); |
133 | } | ||
134 | 152 | ||
135 | hci_dev_unlock(hdev); | 153 | hci_dev_unlock(hdev); |
136 | } | 154 | } |
137 | 155 | ||
156 | static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | ||
157 | { | ||
158 | struct hci_rp_read_def_link_policy *rp = (void *) skb->data; | ||
159 | |||
160 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
161 | |||
162 | if (rp->status) | ||
163 | return; | ||
164 | |||
165 | hdev->link_policy = __le16_to_cpu(rp->policy); | ||
166 | } | ||
167 | |||
168 | static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | ||
169 | { | ||
170 | __u8 status = *((__u8 *) skb->data); | ||
171 | void *sent; | ||
172 | |||
173 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
174 | |||
175 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); | ||
176 | if (!sent) | ||
177 | return; | ||
178 | |||
179 | if (!status) | ||
180 | hdev->link_policy = get_unaligned_le16(sent); | ||
181 | |||
182 | hci_req_complete(hdev, status); | ||
183 | } | ||
184 | |||
138 | static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) | 185 | static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) |
139 | { | 186 | { |
140 | __u8 status = *((__u8 *) skb->data); | 187 | __u8 status = *((__u8 *) skb->data); |
@@ -151,12 +198,14 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | |||
151 | 198 | ||
152 | BT_DBG("%s status 0x%x", hdev->name, status); | 199 | BT_DBG("%s status 0x%x", hdev->name, status); |
153 | 200 | ||
201 | if (status) | ||
202 | return; | ||
203 | |||
154 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); | 204 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); |
155 | if (!sent) | 205 | if (!sent) |
156 | return; | 206 | return; |
157 | 207 | ||
158 | if (!status) | 208 | memcpy(hdev->dev_name, sent, 248); |
159 | memcpy(hdev->dev_name, sent, 248); | ||
160 | } | 209 | } |
161 | 210 | ||
162 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) | 211 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -266,12 +315,14 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) | |||
266 | 315 | ||
267 | BT_DBG("%s status 0x%x", hdev->name, status); | 316 | BT_DBG("%s status 0x%x", hdev->name, status); |
268 | 317 | ||
318 | if (status) | ||
319 | return; | ||
320 | |||
269 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); | 321 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); |
270 | if (!sent) | 322 | if (!sent) |
271 | return; | 323 | return; |
272 | 324 | ||
273 | if (!status) | 325 | memcpy(hdev->dev_class, sent, 3); |
274 | memcpy(hdev->dev_class, sent, 3); | ||
275 | } | 326 | } |
276 | 327 | ||
277 | static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | 328 | static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -286,7 +337,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | |||
286 | 337 | ||
287 | setting = __le16_to_cpu(rp->voice_setting); | 338 | setting = __le16_to_cpu(rp->voice_setting); |
288 | 339 | ||
289 | if (hdev->voice_setting == setting ) | 340 | if (hdev->voice_setting == setting) |
290 | return; | 341 | return; |
291 | 342 | ||
292 | hdev->voice_setting = setting; | 343 | hdev->voice_setting = setting; |
@@ -303,28 +354,31 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | |||
303 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | 354 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) |
304 | { | 355 | { |
305 | __u8 status = *((__u8 *) skb->data); | 356 | __u8 status = *((__u8 *) skb->data); |
357 | __u16 setting; | ||
306 | void *sent; | 358 | void *sent; |
307 | 359 | ||
308 | BT_DBG("%s status 0x%x", hdev->name, status); | 360 | BT_DBG("%s status 0x%x", hdev->name, status); |
309 | 361 | ||
362 | if (status) | ||
363 | return; | ||
364 | |||
310 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); | 365 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); |
311 | if (!sent) | 366 | if (!sent) |
312 | return; | 367 | return; |
313 | 368 | ||
314 | if (!status) { | 369 | setting = get_unaligned_le16(sent); |
315 | __u16 setting = get_unaligned_le16(sent); | ||
316 | 370 | ||
317 | if (hdev->voice_setting != setting) { | 371 | if (hdev->voice_setting == setting) |
318 | hdev->voice_setting = setting; | 372 | return; |
319 | 373 | ||
320 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); | 374 | hdev->voice_setting = setting; |
321 | 375 | ||
322 | if (hdev->notify) { | 376 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); |
323 | tasklet_disable(&hdev->tx_task); | 377 | |
324 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 378 | if (hdev->notify) { |
325 | tasklet_enable(&hdev->tx_task); | 379 | tasklet_disable(&hdev->tx_task); |
326 | } | 380 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); |
327 | } | 381 | tasklet_enable(&hdev->tx_task); |
328 | } | 382 | } |
329 | } | 383 | } |
330 | 384 | ||
@@ -337,6 +391,35 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | |||
337 | hci_req_complete(hdev, status); | 391 | hci_req_complete(hdev, status); |
338 | } | 392 | } |
339 | 393 | ||
394 | static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | ||
395 | { | ||
396 | struct hci_rp_read_ssp_mode *rp = (void *) skb->data; | ||
397 | |||
398 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
399 | |||
400 | if (rp->status) | ||
401 | return; | ||
402 | |||
403 | hdev->ssp_mode = rp->mode; | ||
404 | } | ||
405 | |||
406 | static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | ||
407 | { | ||
408 | __u8 status = *((__u8 *) skb->data); | ||
409 | void *sent; | ||
410 | |||
411 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
412 | |||
413 | if (status) | ||
414 | return; | ||
415 | |||
416 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); | ||
417 | if (!sent) | ||
418 | return; | ||
419 | |||
420 | hdev->ssp_mode = *((__u8 *) sent); | ||
421 | } | ||
422 | |||
340 | static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | 423 | static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) |
341 | { | 424 | { |
342 | struct hci_rp_read_local_version *rp = (void *) skb->data; | 425 | struct hci_rp_read_local_version *rp = (void *) skb->data; |
@@ -347,8 +430,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | |||
347 | return; | 430 | return; |
348 | 431 | ||
349 | hdev->hci_ver = rp->hci_ver; | 432 | hdev->hci_ver = rp->hci_ver; |
350 | hdev->hci_rev = btohs(rp->hci_rev); | 433 | hdev->hci_rev = __le16_to_cpu(rp->hci_rev); |
351 | hdev->manufacturer = btohs(rp->manufacturer); | 434 | hdev->manufacturer = __le16_to_cpu(rp->manufacturer); |
352 | 435 | ||
353 | BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, | 436 | BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, |
354 | hdev->manufacturer, | 437 | hdev->manufacturer, |
@@ -536,11 +619,119 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) | |||
536 | hci_dev_unlock(hdev); | 619 | hci_dev_unlock(hdev); |
537 | } | 620 | } |
538 | 621 | ||
622 | static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) | ||
623 | { | ||
624 | struct hci_cp_auth_requested *cp; | ||
625 | struct hci_conn *conn; | ||
626 | |||
627 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
628 | |||
629 | if (!status) | ||
630 | return; | ||
631 | |||
632 | cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); | ||
633 | if (!cp) | ||
634 | return; | ||
635 | |||
636 | hci_dev_lock(hdev); | ||
637 | |||
638 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
639 | if (conn) { | ||
640 | if (conn->state == BT_CONFIG) { | ||
641 | hci_proto_connect_cfm(conn, status); | ||
642 | hci_conn_put(conn); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | hci_dev_unlock(hdev); | ||
647 | } | ||
648 | |||
649 | static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) | ||
650 | { | ||
651 | struct hci_cp_set_conn_encrypt *cp; | ||
652 | struct hci_conn *conn; | ||
653 | |||
654 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
655 | |||
656 | if (!status) | ||
657 | return; | ||
658 | |||
659 | cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); | ||
660 | if (!cp) | ||
661 | return; | ||
662 | |||
663 | hci_dev_lock(hdev); | ||
664 | |||
665 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
666 | if (conn) { | ||
667 | if (conn->state == BT_CONFIG) { | ||
668 | hci_proto_connect_cfm(conn, status); | ||
669 | hci_conn_put(conn); | ||
670 | } | ||
671 | } | ||
672 | |||
673 | hci_dev_unlock(hdev); | ||
674 | } | ||
675 | |||
539 | static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) | 676 | static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) |
540 | { | 677 | { |
541 | BT_DBG("%s status 0x%x", hdev->name, status); | 678 | BT_DBG("%s status 0x%x", hdev->name, status); |
542 | } | 679 | } |
543 | 680 | ||
681 | static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) | ||
682 | { | ||
683 | struct hci_cp_read_remote_features *cp; | ||
684 | struct hci_conn *conn; | ||
685 | |||
686 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
687 | |||
688 | if (!status) | ||
689 | return; | ||
690 | |||
691 | cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); | ||
692 | if (!cp) | ||
693 | return; | ||
694 | |||
695 | hci_dev_lock(hdev); | ||
696 | |||
697 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
698 | if (conn) { | ||
699 | if (conn->state == BT_CONFIG) { | ||
700 | hci_proto_connect_cfm(conn, status); | ||
701 | hci_conn_put(conn); | ||
702 | } | ||
703 | } | ||
704 | |||
705 | hci_dev_unlock(hdev); | ||
706 | } | ||
707 | |||
708 | static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) | ||
709 | { | ||
710 | struct hci_cp_read_remote_ext_features *cp; | ||
711 | struct hci_conn *conn; | ||
712 | |||
713 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
714 | |||
715 | if (!status) | ||
716 | return; | ||
717 | |||
718 | cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); | ||
719 | if (!cp) | ||
720 | return; | ||
721 | |||
722 | hci_dev_lock(hdev); | ||
723 | |||
724 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); | ||
725 | if (conn) { | ||
726 | if (conn->state == BT_CONFIG) { | ||
727 | hci_proto_connect_cfm(conn, status); | ||
728 | hci_conn_put(conn); | ||
729 | } | ||
730 | } | ||
731 | |||
732 | hci_dev_unlock(hdev); | ||
733 | } | ||
734 | |||
544 | static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) | 735 | static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) |
545 | { | 736 | { |
546 | struct hci_cp_setup_sync_conn *cp; | 737 | struct hci_cp_setup_sync_conn *cp; |
@@ -653,6 +844,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * | |||
653 | memcpy(data.dev_class, info->dev_class, 3); | 844 | memcpy(data.dev_class, info->dev_class, 3); |
654 | data.clock_offset = info->clock_offset; | 845 | data.clock_offset = info->clock_offset; |
655 | data.rssi = 0x00; | 846 | data.rssi = 0x00; |
847 | data.ssp_mode = 0x00; | ||
656 | info++; | 848 | info++; |
657 | hci_inquiry_cache_update(hdev, &data); | 849 | hci_inquiry_cache_update(hdev, &data); |
658 | } | 850 | } |
@@ -675,7 +867,14 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
675 | 867 | ||
676 | if (!ev->status) { | 868 | if (!ev->status) { |
677 | conn->handle = __le16_to_cpu(ev->handle); | 869 | conn->handle = __le16_to_cpu(ev->handle); |
678 | conn->state = BT_CONNECTED; | 870 | |
871 | if (conn->type == ACL_LINK) { | ||
872 | conn->state = BT_CONFIG; | ||
873 | hci_conn_hold(conn); | ||
874 | } else | ||
875 | conn->state = BT_CONNECTED; | ||
876 | |||
877 | hci_conn_add_sysfs(conn); | ||
679 | 878 | ||
680 | if (test_bit(HCI_AUTH, &hdev->flags)) | 879 | if (test_bit(HCI_AUTH, &hdev->flags)) |
681 | conn->link_mode |= HCI_LM_AUTH; | 880 | conn->link_mode |= HCI_LM_AUTH; |
@@ -687,30 +886,17 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
687 | if (conn->type == ACL_LINK) { | 886 | if (conn->type == ACL_LINK) { |
688 | struct hci_cp_read_remote_features cp; | 887 | struct hci_cp_read_remote_features cp; |
689 | cp.handle = ev->handle; | 888 | cp.handle = ev->handle; |
690 | hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); | 889 | hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, |
691 | } | 890 | sizeof(cp), &cp); |
692 | |||
693 | /* Set link policy */ | ||
694 | if (conn->type == ACL_LINK && hdev->link_policy) { | ||
695 | struct hci_cp_write_link_policy cp; | ||
696 | cp.handle = ev->handle; | ||
697 | cp.policy = cpu_to_le16(hdev->link_policy); | ||
698 | hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp); | ||
699 | } | 891 | } |
700 | 892 | ||
701 | /* Set packet type for incoming connection */ | 893 | /* Set packet type for incoming connection */ |
702 | if (!conn->out) { | 894 | if (!conn->out && hdev->hci_ver < 3) { |
703 | struct hci_cp_change_conn_ptype cp; | 895 | struct hci_cp_change_conn_ptype cp; |
704 | cp.handle = ev->handle; | 896 | cp.handle = ev->handle; |
705 | cp.pkt_type = (conn->type == ACL_LINK) ? | 897 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
706 | cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): | 898 | hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, |
707 | cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); | 899 | sizeof(cp), &cp); |
708 | |||
709 | hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); | ||
710 | } else { | ||
711 | /* Update disconnect timer */ | ||
712 | hci_conn_hold(conn); | ||
713 | hci_conn_put(conn); | ||
714 | } | 900 | } |
715 | } else | 901 | } else |
716 | conn->state = BT_CLOSED; | 902 | conn->state = BT_CLOSED; |
@@ -730,9 +916,10 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
730 | } | 916 | } |
731 | } | 917 | } |
732 | 918 | ||
733 | hci_proto_connect_cfm(conn, ev->status); | 919 | if (ev->status) { |
734 | if (ev->status) | 920 | hci_proto_connect_cfm(conn, ev->status); |
735 | hci_conn_del(conn); | 921 | hci_conn_del(conn); |
922 | } | ||
736 | 923 | ||
737 | unlock: | 924 | unlock: |
738 | hci_dev_unlock(hdev); | 925 | hci_dev_unlock(hdev); |
@@ -752,10 +939,14 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
752 | 939 | ||
753 | if (mask & HCI_LM_ACCEPT) { | 940 | if (mask & HCI_LM_ACCEPT) { |
754 | /* Connection accepted */ | 941 | /* Connection accepted */ |
942 | struct inquiry_entry *ie; | ||
755 | struct hci_conn *conn; | 943 | struct hci_conn *conn; |
756 | 944 | ||
757 | hci_dev_lock(hdev); | 945 | hci_dev_lock(hdev); |
758 | 946 | ||
947 | if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) | ||
948 | memcpy(ie->data.dev_class, ev->dev_class, 3); | ||
949 | |||
759 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | 950 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); |
760 | if (!conn) { | 951 | if (!conn) { |
761 | if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { | 952 | if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { |
@@ -786,7 +977,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
786 | struct hci_cp_accept_sync_conn_req cp; | 977 | struct hci_cp_accept_sync_conn_req cp; |
787 | 978 | ||
788 | bacpy(&cp.bdaddr, &ev->bdaddr); | 979 | bacpy(&cp.bdaddr, &ev->bdaddr); |
789 | cp.pkt_type = cpu_to_le16(hdev->esco_type); | 980 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
790 | 981 | ||
791 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); | 982 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); |
792 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | 983 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); |
@@ -822,6 +1013,9 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
822 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 1013 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
823 | if (conn) { | 1014 | if (conn) { |
824 | conn->state = BT_CLOSED; | 1015 | conn->state = BT_CLOSED; |
1016 | |||
1017 | hci_conn_del_sysfs(conn); | ||
1018 | |||
825 | hci_proto_disconn_ind(conn, ev->reason); | 1019 | hci_proto_disconn_ind(conn, ev->reason); |
826 | hci_conn_del(conn); | 1020 | hci_conn_del(conn); |
827 | } | 1021 | } |
@@ -845,15 +1039,29 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
845 | 1039 | ||
846 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); | 1040 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); |
847 | 1041 | ||
848 | hci_auth_cfm(conn, ev->status); | 1042 | if (conn->state == BT_CONFIG) { |
1043 | if (!ev->status && hdev->ssp_mode > 0 && | ||
1044 | conn->ssp_mode > 0) { | ||
1045 | struct hci_cp_set_conn_encrypt cp; | ||
1046 | cp.handle = ev->handle; | ||
1047 | cp.encrypt = 0x01; | ||
1048 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, | ||
1049 | sizeof(cp), &cp); | ||
1050 | } else { | ||
1051 | conn->state = BT_CONNECTED; | ||
1052 | hci_proto_connect_cfm(conn, ev->status); | ||
1053 | hci_conn_put(conn); | ||
1054 | } | ||
1055 | } else | ||
1056 | hci_auth_cfm(conn, ev->status); | ||
849 | 1057 | ||
850 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { | 1058 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { |
851 | if (!ev->status) { | 1059 | if (!ev->status) { |
852 | struct hci_cp_set_conn_encrypt cp; | 1060 | struct hci_cp_set_conn_encrypt cp; |
853 | cp.handle = cpu_to_le16(conn->handle); | 1061 | cp.handle = ev->handle; |
854 | cp.encrypt = 1; | 1062 | cp.encrypt = 0x01; |
855 | hci_send_cmd(conn->hdev, | 1063 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, |
856 | HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); | 1064 | sizeof(cp), &cp); |
857 | } else { | 1065 | } else { |
858 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); | 1066 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); |
859 | hci_encrypt_cfm(conn, ev->status, 0x00); | 1067 | hci_encrypt_cfm(conn, ev->status, 0x00); |
@@ -883,15 +1091,24 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * | |||
883 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 1091 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
884 | if (conn) { | 1092 | if (conn) { |
885 | if (!ev->status) { | 1093 | if (!ev->status) { |
886 | if (ev->encrypt) | 1094 | if (ev->encrypt) { |
1095 | /* Encryption implies authentication */ | ||
1096 | conn->link_mode |= HCI_LM_AUTH; | ||
887 | conn->link_mode |= HCI_LM_ENCRYPT; | 1097 | conn->link_mode |= HCI_LM_ENCRYPT; |
888 | else | 1098 | } else |
889 | conn->link_mode &= ~HCI_LM_ENCRYPT; | 1099 | conn->link_mode &= ~HCI_LM_ENCRYPT; |
890 | } | 1100 | } |
891 | 1101 | ||
892 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); | 1102 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); |
893 | 1103 | ||
894 | hci_encrypt_cfm(conn, ev->status, ev->encrypt); | 1104 | if (conn->state == BT_CONFIG) { |
1105 | if (!ev->status) | ||
1106 | conn->state = BT_CONNECTED; | ||
1107 | |||
1108 | hci_proto_connect_cfm(conn, ev->status); | ||
1109 | hci_conn_put(conn); | ||
1110 | } else | ||
1111 | hci_encrypt_cfm(conn, ev->status, ev->encrypt); | ||
895 | } | 1112 | } |
896 | 1113 | ||
897 | hci_dev_unlock(hdev); | 1114 | hci_dev_unlock(hdev); |
@@ -926,14 +1143,29 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff | |||
926 | 1143 | ||
927 | BT_DBG("%s status %d", hdev->name, ev->status); | 1144 | BT_DBG("%s status %d", hdev->name, ev->status); |
928 | 1145 | ||
929 | if (ev->status) | ||
930 | return; | ||
931 | |||
932 | hci_dev_lock(hdev); | 1146 | hci_dev_lock(hdev); |
933 | 1147 | ||
934 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 1148 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
935 | if (conn) | 1149 | if (conn) { |
936 | memcpy(conn->features, ev->features, 8); | 1150 | if (!ev->status) |
1151 | memcpy(conn->features, ev->features, 8); | ||
1152 | |||
1153 | if (conn->state == BT_CONFIG) { | ||
1154 | if (!ev->status && lmp_ssp_capable(hdev) && | ||
1155 | lmp_ssp_capable(conn)) { | ||
1156 | struct hci_cp_read_remote_ext_features cp; | ||
1157 | cp.handle = ev->handle; | ||
1158 | cp.page = 0x01; | ||
1159 | hci_send_cmd(hdev, | ||
1160 | HCI_OP_READ_REMOTE_EXT_FEATURES, | ||
1161 | sizeof(cp), &cp); | ||
1162 | } else { | ||
1163 | conn->state = BT_CONNECTED; | ||
1164 | hci_proto_connect_cfm(conn, ev->status); | ||
1165 | hci_conn_put(conn); | ||
1166 | } | ||
1167 | } | ||
1168 | } | ||
937 | 1169 | ||
938 | hci_dev_unlock(hdev); | 1170 | hci_dev_unlock(hdev); |
939 | } | 1171 | } |
@@ -974,10 +1206,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
974 | hci_cc_role_discovery(hdev, skb); | 1206 | hci_cc_role_discovery(hdev, skb); |
975 | break; | 1207 | break; |
976 | 1208 | ||
1209 | case HCI_OP_READ_LINK_POLICY: | ||
1210 | hci_cc_read_link_policy(hdev, skb); | ||
1211 | break; | ||
1212 | |||
977 | case HCI_OP_WRITE_LINK_POLICY: | 1213 | case HCI_OP_WRITE_LINK_POLICY: |
978 | hci_cc_write_link_policy(hdev, skb); | 1214 | hci_cc_write_link_policy(hdev, skb); |
979 | break; | 1215 | break; |
980 | 1216 | ||
1217 | case HCI_OP_READ_DEF_LINK_POLICY: | ||
1218 | hci_cc_read_def_link_policy(hdev, skb); | ||
1219 | break; | ||
1220 | |||
1221 | case HCI_OP_WRITE_DEF_LINK_POLICY: | ||
1222 | hci_cc_write_def_link_policy(hdev, skb); | ||
1223 | break; | ||
1224 | |||
981 | case HCI_OP_RESET: | 1225 | case HCI_OP_RESET: |
982 | hci_cc_reset(hdev, skb); | 1226 | hci_cc_reset(hdev, skb); |
983 | break; | 1227 | break; |
@@ -1022,6 +1266,14 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1022 | hci_cc_host_buffer_size(hdev, skb); | 1266 | hci_cc_host_buffer_size(hdev, skb); |
1023 | break; | 1267 | break; |
1024 | 1268 | ||
1269 | case HCI_OP_READ_SSP_MODE: | ||
1270 | hci_cc_read_ssp_mode(hdev, skb); | ||
1271 | break; | ||
1272 | |||
1273 | case HCI_OP_WRITE_SSP_MODE: | ||
1274 | hci_cc_write_ssp_mode(hdev, skb); | ||
1275 | break; | ||
1276 | |||
1025 | case HCI_OP_READ_LOCAL_VERSION: | 1277 | case HCI_OP_READ_LOCAL_VERSION: |
1026 | hci_cc_read_local_version(hdev, skb); | 1278 | hci_cc_read_local_version(hdev, skb); |
1027 | break; | 1279 | break; |
@@ -1076,10 +1328,26 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
1076 | hci_cs_add_sco(hdev, ev->status); | 1328 | hci_cs_add_sco(hdev, ev->status); |
1077 | break; | 1329 | break; |
1078 | 1330 | ||
1331 | case HCI_OP_AUTH_REQUESTED: | ||
1332 | hci_cs_auth_requested(hdev, ev->status); | ||
1333 | break; | ||
1334 | |||
1335 | case HCI_OP_SET_CONN_ENCRYPT: | ||
1336 | hci_cs_set_conn_encrypt(hdev, ev->status); | ||
1337 | break; | ||
1338 | |||
1079 | case HCI_OP_REMOTE_NAME_REQ: | 1339 | case HCI_OP_REMOTE_NAME_REQ: |
1080 | hci_cs_remote_name_req(hdev, ev->status); | 1340 | hci_cs_remote_name_req(hdev, ev->status); |
1081 | break; | 1341 | break; |
1082 | 1342 | ||
1343 | case HCI_OP_READ_REMOTE_FEATURES: | ||
1344 | hci_cs_read_remote_features(hdev, ev->status); | ||
1345 | break; | ||
1346 | |||
1347 | case HCI_OP_READ_REMOTE_EXT_FEATURES: | ||
1348 | hci_cs_read_remote_ext_features(hdev, ev->status); | ||
1349 | break; | ||
1350 | |||
1083 | case HCI_OP_SETUP_SYNC_CONN: | 1351 | case HCI_OP_SETUP_SYNC_CONN: |
1084 | hci_cs_setup_sync_conn(hdev, ev->status); | 1352 | hci_cs_setup_sync_conn(hdev, ev->status); |
1085 | break; | 1353 | break; |
@@ -1235,6 +1503,22 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1235 | hci_dev_unlock(hdev); | 1503 | hci_dev_unlock(hdev); |
1236 | } | 1504 | } |
1237 | 1505 | ||
1506 | static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1507 | { | ||
1508 | struct hci_ev_pkt_type_change *ev = (void *) skb->data; | ||
1509 | struct hci_conn *conn; | ||
1510 | |||
1511 | BT_DBG("%s status %d", hdev->name, ev->status); | ||
1512 | |||
1513 | hci_dev_lock(hdev); | ||
1514 | |||
1515 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
1516 | if (conn && !ev->status) | ||
1517 | conn->pkt_type = __le16_to_cpu(ev->pkt_type); | ||
1518 | |||
1519 | hci_dev_unlock(hdev); | ||
1520 | } | ||
1521 | |||
1238 | static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1522 | static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1239 | { | 1523 | { |
1240 | struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; | 1524 | struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; |
@@ -1275,6 +1559,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
1275 | memcpy(data.dev_class, info->dev_class, 3); | 1559 | memcpy(data.dev_class, info->dev_class, 3); |
1276 | data.clock_offset = info->clock_offset; | 1560 | data.clock_offset = info->clock_offset; |
1277 | data.rssi = info->rssi; | 1561 | data.rssi = info->rssi; |
1562 | data.ssp_mode = 0x00; | ||
1278 | info++; | 1563 | info++; |
1279 | hci_inquiry_cache_update(hdev, &data); | 1564 | hci_inquiry_cache_update(hdev, &data); |
1280 | } | 1565 | } |
@@ -1289,6 +1574,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
1289 | memcpy(data.dev_class, info->dev_class, 3); | 1574 | memcpy(data.dev_class, info->dev_class, 3); |
1290 | data.clock_offset = info->clock_offset; | 1575 | data.clock_offset = info->clock_offset; |
1291 | data.rssi = info->rssi; | 1576 | data.rssi = info->rssi; |
1577 | data.ssp_mode = 0x00; | ||
1292 | info++; | 1578 | info++; |
1293 | hci_inquiry_cache_update(hdev, &data); | 1579 | hci_inquiry_cache_update(hdev, &data); |
1294 | } | 1580 | } |
@@ -1299,7 +1585,43 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
1299 | 1585 | ||
1300 | static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1586 | static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1301 | { | 1587 | { |
1588 | struct hci_ev_remote_ext_features *ev = (void *) skb->data; | ||
1589 | struct hci_conn *conn; | ||
1590 | |||
1302 | BT_DBG("%s", hdev->name); | 1591 | BT_DBG("%s", hdev->name); |
1592 | |||
1593 | hci_dev_lock(hdev); | ||
1594 | |||
1595 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
1596 | if (conn) { | ||
1597 | if (!ev->status && ev->page == 0x01) { | ||
1598 | struct inquiry_entry *ie; | ||
1599 | |||
1600 | if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) | ||
1601 | ie->data.ssp_mode = (ev->features[0] & 0x01); | ||
1602 | |||
1603 | conn->ssp_mode = (ev->features[0] & 0x01); | ||
1604 | } | ||
1605 | |||
1606 | if (conn->state == BT_CONFIG) { | ||
1607 | if (!ev->status && hdev->ssp_mode > 0 && | ||
1608 | conn->ssp_mode > 0) { | ||
1609 | if (conn->out) { | ||
1610 | struct hci_cp_auth_requested cp; | ||
1611 | cp.handle = ev->handle; | ||
1612 | hci_send_cmd(hdev, | ||
1613 | HCI_OP_AUTH_REQUESTED, | ||
1614 | sizeof(cp), &cp); | ||
1615 | } | ||
1616 | } else { | ||
1617 | conn->state = BT_CONNECTED; | ||
1618 | hci_proto_connect_cfm(conn, ev->status); | ||
1619 | hci_conn_put(conn); | ||
1620 | } | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1624 | hci_dev_unlock(hdev); | ||
1303 | } | 1625 | } |
1304 | 1626 | ||
1305 | static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1627 | static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1312,12 +1634,22 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu | |||
1312 | hci_dev_lock(hdev); | 1634 | hci_dev_lock(hdev); |
1313 | 1635 | ||
1314 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | 1636 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); |
1315 | if (!conn) | 1637 | if (!conn) { |
1316 | goto unlock; | 1638 | if (ev->link_type == ESCO_LINK) |
1639 | goto unlock; | ||
1640 | |||
1641 | conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); | ||
1642 | if (!conn) | ||
1643 | goto unlock; | ||
1644 | |||
1645 | conn->type = SCO_LINK; | ||
1646 | } | ||
1317 | 1647 | ||
1318 | if (!ev->status) { | 1648 | if (!ev->status) { |
1319 | conn->handle = __le16_to_cpu(ev->handle); | 1649 | conn->handle = __le16_to_cpu(ev->handle); |
1320 | conn->state = BT_CONNECTED; | 1650 | conn->state = BT_CONNECTED; |
1651 | |||
1652 | hci_conn_add_sysfs(conn); | ||
1321 | } else | 1653 | } else |
1322 | conn->state = BT_CLOSED; | 1654 | conn->state = BT_CLOSED; |
1323 | 1655 | ||
@@ -1371,6 +1703,7 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
1371 | memcpy(data.dev_class, info->dev_class, 3); | 1703 | memcpy(data.dev_class, info->dev_class, 3); |
1372 | data.clock_offset = info->clock_offset; | 1704 | data.clock_offset = info->clock_offset; |
1373 | data.rssi = info->rssi; | 1705 | data.rssi = info->rssi; |
1706 | data.ssp_mode = 0x01; | ||
1374 | info++; | 1707 | info++; |
1375 | hci_inquiry_cache_update(hdev, &data); | 1708 | hci_inquiry_cache_update(hdev, &data); |
1376 | } | 1709 | } |
@@ -1378,6 +1711,53 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
1378 | hci_dev_unlock(hdev); | 1711 | hci_dev_unlock(hdev); |
1379 | } | 1712 | } |
1380 | 1713 | ||
1714 | static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1715 | { | ||
1716 | struct hci_ev_io_capa_request *ev = (void *) skb->data; | ||
1717 | struct hci_conn *conn; | ||
1718 | |||
1719 | BT_DBG("%s", hdev->name); | ||
1720 | |||
1721 | hci_dev_lock(hdev); | ||
1722 | |||
1723 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1724 | if (conn) | ||
1725 | hci_conn_hold(conn); | ||
1726 | |||
1727 | hci_dev_unlock(hdev); | ||
1728 | } | ||
1729 | |||
1730 | static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1731 | { | ||
1732 | struct hci_ev_simple_pair_complete *ev = (void *) skb->data; | ||
1733 | struct hci_conn *conn; | ||
1734 | |||
1735 | BT_DBG("%s", hdev->name); | ||
1736 | |||
1737 | hci_dev_lock(hdev); | ||
1738 | |||
1739 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1740 | if (conn) | ||
1741 | hci_conn_put(conn); | ||
1742 | |||
1743 | hci_dev_unlock(hdev); | ||
1744 | } | ||
1745 | |||
1746 | static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1747 | { | ||
1748 | struct hci_ev_remote_host_features *ev = (void *) skb->data; | ||
1749 | struct inquiry_entry *ie; | ||
1750 | |||
1751 | BT_DBG("%s", hdev->name); | ||
1752 | |||
1753 | hci_dev_lock(hdev); | ||
1754 | |||
1755 | if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) | ||
1756 | ie->data.ssp_mode = (ev->features[0] & 0x01); | ||
1757 | |||
1758 | hci_dev_unlock(hdev); | ||
1759 | } | ||
1760 | |||
1381 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 1761 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
1382 | { | 1762 | { |
1383 | struct hci_event_hdr *hdr = (void *) skb->data; | 1763 | struct hci_event_hdr *hdr = (void *) skb->data; |
@@ -1470,6 +1850,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
1470 | hci_clock_offset_evt(hdev, skb); | 1850 | hci_clock_offset_evt(hdev, skb); |
1471 | break; | 1851 | break; |
1472 | 1852 | ||
1853 | case HCI_EV_PKT_TYPE_CHANGE: | ||
1854 | hci_pkt_type_change_evt(hdev, skb); | ||
1855 | break; | ||
1856 | |||
1473 | case HCI_EV_PSCAN_REP_MODE: | 1857 | case HCI_EV_PSCAN_REP_MODE: |
1474 | hci_pscan_rep_mode_evt(hdev, skb); | 1858 | hci_pscan_rep_mode_evt(hdev, skb); |
1475 | break; | 1859 | break; |
@@ -1498,6 +1882,18 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
1498 | hci_extended_inquiry_result_evt(hdev, skb); | 1882 | hci_extended_inquiry_result_evt(hdev, skb); |
1499 | break; | 1883 | break; |
1500 | 1884 | ||
1885 | case HCI_EV_IO_CAPA_REQUEST: | ||
1886 | hci_io_capa_request_evt(hdev, skb); | ||
1887 | break; | ||
1888 | |||
1889 | case HCI_EV_SIMPLE_PAIR_COMPLETE: | ||
1890 | hci_simple_pair_complete_evt(hdev, skb); | ||
1891 | break; | ||
1892 | |||
1893 | case HCI_EV_REMOTE_HOST_FEATURES: | ||
1894 | hci_remote_host_features_evt(hdev, skb); | ||
1895 | break; | ||
1896 | |||
1501 | default: | 1897 | default: |
1502 | BT_DBG("%s event 0x%x", hdev->name, event); | 1898 | BT_DBG("%s event 0x%x", hdev->name, event); |
1503 | break; | 1899 | break; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 747fabd735d2..d62579b67959 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -193,19 +193,11 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign | |||
193 | 193 | ||
194 | return 0; | 194 | return 0; |
195 | 195 | ||
196 | case HCISETSECMGR: | ||
197 | if (!capable(CAP_NET_ADMIN)) | ||
198 | return -EACCES; | ||
199 | |||
200 | if (arg) | ||
201 | set_bit(HCI_SECMGR, &hdev->flags); | ||
202 | else | ||
203 | clear_bit(HCI_SECMGR, &hdev->flags); | ||
204 | |||
205 | return 0; | ||
206 | |||
207 | case HCIGETCONNINFO: | 196 | case HCIGETCONNINFO: |
208 | return hci_get_conn_info(hdev, (void __user *)arg); | 197 | return hci_get_conn_info(hdev, (void __user *) arg); |
198 | |||
199 | case HCIGETAUTHINFO: | ||
200 | return hci_get_auth_info(hdev, (void __user *) arg); | ||
209 | 201 | ||
210 | default: | 202 | default: |
211 | if (hdev->ioctl) | 203 | if (hdev->ioctl) |
@@ -217,7 +209,7 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign | |||
217 | static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 209 | static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
218 | { | 210 | { |
219 | struct sock *sk = sock->sk; | 211 | struct sock *sk = sock->sk; |
220 | void __user *argp = (void __user *)arg; | 212 | void __user *argp = (void __user *) arg; |
221 | int err; | 213 | int err; |
222 | 214 | ||
223 | BT_DBG("cmd %x arg %lx", cmd, arg); | 215 | BT_DBG("cmd %x arg %lx", cmd, arg); |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 84360c117d4e..844ca5f1b2d4 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -113,11 +113,13 @@ static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *a | |||
113 | struct inquiry_data *data = &e->data; | 113 | struct inquiry_data *data = &e->data; |
114 | bdaddr_t bdaddr; | 114 | bdaddr_t bdaddr; |
115 | baswap(&bdaddr, &data->bdaddr); | 115 | baswap(&bdaddr, &data->bdaddr); |
116 | n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %u\n", | 116 | n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", |
117 | batostr(&bdaddr), | 117 | batostr(&bdaddr), |
118 | data->pscan_rep_mode, data->pscan_period_mode, data->pscan_mode, | 118 | data->pscan_rep_mode, data->pscan_period_mode, |
119 | data->dev_class[2], data->dev_class[1], data->dev_class[0], | 119 | data->pscan_mode, data->dev_class[2], |
120 | __le16_to_cpu(data->clock_offset), data->rssi, e->timestamp); | 120 | data->dev_class[1], data->dev_class[0], |
121 | __le16_to_cpu(data->clock_offset), | ||
122 | data->rssi, data->ssp_mode, e->timestamp); | ||
121 | } | 123 | } |
122 | 124 | ||
123 | hci_dev_unlock_bh(hdev); | 125 | hci_dev_unlock_bh(hdev); |
@@ -249,15 +251,28 @@ static ssize_t show_conn_address(struct device *dev, struct device_attribute *at | |||
249 | return sprintf(buf, "%s\n", batostr(&bdaddr)); | 251 | return sprintf(buf, "%s\n", batostr(&bdaddr)); |
250 | } | 252 | } |
251 | 253 | ||
254 | static ssize_t show_conn_features(struct device *dev, struct device_attribute *attr, char *buf) | ||
255 | { | ||
256 | struct hci_conn *conn = dev_get_drvdata(dev); | ||
257 | |||
258 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | ||
259 | conn->features[0], conn->features[1], | ||
260 | conn->features[2], conn->features[3], | ||
261 | conn->features[4], conn->features[5], | ||
262 | conn->features[6], conn->features[7]); | ||
263 | } | ||
264 | |||
252 | #define CONN_ATTR(_name,_mode,_show,_store) \ | 265 | #define CONN_ATTR(_name,_mode,_show,_store) \ |
253 | struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store) | 266 | struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store) |
254 | 267 | ||
255 | static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL); | 268 | static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL); |
256 | static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL); | 269 | static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL); |
270 | static CONN_ATTR(features, S_IRUGO, show_conn_features, NULL); | ||
257 | 271 | ||
258 | static struct device_attribute *conn_attrs[] = { | 272 | static struct device_attribute *conn_attrs[] = { |
259 | &conn_attr_type, | 273 | &conn_attr_type, |
260 | &conn_attr_address, | 274 | &conn_attr_address, |
275 | &conn_attr_features, | ||
261 | NULL | 276 | NULL |
262 | }; | 277 | }; |
263 | 278 | ||
@@ -296,7 +311,6 @@ static void add_conn(struct work_struct *work) | |||
296 | void hci_conn_add_sysfs(struct hci_conn *conn) | 311 | void hci_conn_add_sysfs(struct hci_conn *conn) |
297 | { | 312 | { |
298 | struct hci_dev *hdev = conn->hdev; | 313 | struct hci_dev *hdev = conn->hdev; |
299 | bdaddr_t *ba = &conn->dst; | ||
300 | 314 | ||
301 | BT_DBG("conn %p", conn); | 315 | BT_DBG("conn %p", conn); |
302 | 316 | ||
@@ -305,11 +319,8 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
305 | 319 | ||
306 | conn->dev.release = bt_release; | 320 | conn->dev.release = bt_release; |
307 | 321 | ||
308 | snprintf(conn->dev.bus_id, BUS_ID_SIZE, | 322 | snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", |
309 | "%s%2.2X%2.2X%2.2X%2.2X%2.2X%2.2X", | 323 | hdev->name, conn->handle); |
310 | conn->type == ACL_LINK ? "acl" : "sco", | ||
311 | ba->b[5], ba->b[4], ba->b[3], | ||
312 | ba->b[2], ba->b[1], ba->b[0]); | ||
313 | 324 | ||
314 | dev_set_drvdata(&conn->dev, conn); | 325 | dev_set_drvdata(&conn->dev, conn); |
315 | 326 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 519cdb920f93..96434d774c84 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -581,6 +581,12 @@ static int hidp_session(void *arg) | |||
581 | hid_free_device(session->hid); | 581 | hid_free_device(session->hid); |
582 | } | 582 | } |
583 | 583 | ||
584 | /* Wakeup user-space polling for socket errors */ | ||
585 | session->intr_sock->sk->sk_err = EUNATCH; | ||
586 | session->ctrl_sock->sk->sk_err = EUNATCH; | ||
587 | |||
588 | hidp_schedule(session); | ||
589 | |||
584 | fput(session->intr_sock->file); | 590 | fput(session->intr_sock->file); |
585 | 591 | ||
586 | wait_event_timeout(*(ctrl_sk->sk_sleep), | 592 | wait_event_timeout(*(ctrl_sk->sk_sleep), |
@@ -879,6 +885,10 @@ int hidp_del_connection(struct hidp_conndel_req *req) | |||
879 | skb_queue_purge(&session->ctrl_transmit); | 885 | skb_queue_purge(&session->ctrl_transmit); |
880 | skb_queue_purge(&session->intr_transmit); | 886 | skb_queue_purge(&session->intr_transmit); |
881 | 887 | ||
888 | /* Wakeup user-space polling for socket errors */ | ||
889 | session->intr_sock->sk->sk_err = EUNATCH; | ||
890 | session->ctrl_sock->sk->sk_err = EUNATCH; | ||
891 | |||
882 | /* Kill session thread */ | 892 | /* Kill session thread */ |
883 | atomic_inc(&session->terminate); | 893 | atomic_inc(&session->terminate); |
884 | hidp_schedule(session); | 894 | hidp_schedule(session); |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 6e180d255505..c1239852834a 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #define BT_DBG(D...) | 55 | #define BT_DBG(D...) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #define VERSION "2.9" | 58 | #define VERSION "2.10" |
59 | 59 | ||
60 | static u32 l2cap_feat_mask = 0x0000; | 60 | static u32 l2cap_feat_mask = 0x0000; |
61 | 61 | ||
@@ -76,11 +76,21 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, | |||
76 | static void l2cap_sock_timeout(unsigned long arg) | 76 | static void l2cap_sock_timeout(unsigned long arg) |
77 | { | 77 | { |
78 | struct sock *sk = (struct sock *) arg; | 78 | struct sock *sk = (struct sock *) arg; |
79 | int reason; | ||
79 | 80 | ||
80 | BT_DBG("sock %p state %d", sk, sk->sk_state); | 81 | BT_DBG("sock %p state %d", sk, sk->sk_state); |
81 | 82 | ||
82 | bh_lock_sock(sk); | 83 | bh_lock_sock(sk); |
83 | __l2cap_sock_close(sk, ETIMEDOUT); | 84 | |
85 | if (sk->sk_state == BT_CONNECT && | ||
86 | (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH | | ||
87 | L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE))) | ||
88 | reason = ECONNREFUSED; | ||
89 | else | ||
90 | reason = ETIMEDOUT; | ||
91 | |||
92 | __l2cap_sock_close(sk, reason); | ||
93 | |||
84 | bh_unlock_sock(sk); | 94 | bh_unlock_sock(sk); |
85 | 95 | ||
86 | l2cap_sock_kill(sk); | 96 | l2cap_sock_kill(sk); |
@@ -240,7 +250,7 @@ static void l2cap_chan_del(struct sock *sk, int err) | |||
240 | hci_conn_put(conn->hcon); | 250 | hci_conn_put(conn->hcon); |
241 | } | 251 | } |
242 | 252 | ||
243 | sk->sk_state = BT_CLOSED; | 253 | sk->sk_state = BT_CLOSED; |
244 | sock_set_flag(sk, SOCK_ZAPPED); | 254 | sock_set_flag(sk, SOCK_ZAPPED); |
245 | 255 | ||
246 | if (err) | 256 | if (err) |
@@ -253,6 +263,21 @@ static void l2cap_chan_del(struct sock *sk, int err) | |||
253 | sk->sk_state_change(sk); | 263 | sk->sk_state_change(sk); |
254 | } | 264 | } |
255 | 265 | ||
266 | /* Service level security */ | ||
267 | static inline int l2cap_check_link_mode(struct sock *sk) | ||
268 | { | ||
269 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
270 | |||
271 | if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) || | ||
272 | (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) | ||
273 | return hci_conn_encrypt(conn->hcon); | ||
274 | |||
275 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) | ||
276 | return hci_conn_auth(conn->hcon); | ||
277 | |||
278 | return 1; | ||
279 | } | ||
280 | |||
256 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) | 281 | static inline u8 l2cap_get_ident(struct l2cap_conn *conn) |
257 | { | 282 | { |
258 | u8 id; | 283 | u8 id; |
@@ -287,6 +312,36 @@ static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 | |||
287 | return hci_send_acl(conn->hcon, skb, 0); | 312 | return hci_send_acl(conn->hcon, skb, 0); |
288 | } | 313 | } |
289 | 314 | ||
315 | static void l2cap_do_start(struct sock *sk) | ||
316 | { | ||
317 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
318 | |||
319 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { | ||
320 | if (l2cap_check_link_mode(sk)) { | ||
321 | struct l2cap_conn_req req; | ||
322 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
323 | req.psm = l2cap_pi(sk)->psm; | ||
324 | |||
325 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | ||
326 | |||
327 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
328 | L2CAP_CONN_REQ, sizeof(req), &req); | ||
329 | } | ||
330 | } else { | ||
331 | struct l2cap_info_req req; | ||
332 | req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | ||
333 | |||
334 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | ||
335 | conn->info_ident = l2cap_get_ident(conn); | ||
336 | |||
337 | mod_timer(&conn->info_timer, jiffies + | ||
338 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | ||
339 | |||
340 | l2cap_send_cmd(conn, conn->info_ident, | ||
341 | L2CAP_INFO_REQ, sizeof(req), &req); | ||
342 | } | ||
343 | } | ||
344 | |||
290 | /* ---- L2CAP connections ---- */ | 345 | /* ---- L2CAP connections ---- */ |
291 | static void l2cap_conn_start(struct l2cap_conn *conn) | 346 | static void l2cap_conn_start(struct l2cap_conn *conn) |
292 | { | 347 | { |
@@ -301,16 +356,37 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
301 | bh_lock_sock(sk); | 356 | bh_lock_sock(sk); |
302 | 357 | ||
303 | if (sk->sk_type != SOCK_SEQPACKET) { | 358 | if (sk->sk_type != SOCK_SEQPACKET) { |
304 | l2cap_sock_clear_timer(sk); | 359 | bh_unlock_sock(sk); |
305 | sk->sk_state = BT_CONNECTED; | 360 | continue; |
306 | sk->sk_state_change(sk); | 361 | } |
307 | } else if (sk->sk_state == BT_CONNECT) { | 362 | |
308 | struct l2cap_conn_req req; | 363 | if (sk->sk_state == BT_CONNECT) { |
309 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | 364 | if (l2cap_check_link_mode(sk)) { |
310 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | 365 | struct l2cap_conn_req req; |
311 | req.psm = l2cap_pi(sk)->psm; | 366 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
312 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 367 | req.psm = l2cap_pi(sk)->psm; |
368 | |||
369 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | ||
370 | |||
371 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
313 | L2CAP_CONN_REQ, sizeof(req), &req); | 372 | L2CAP_CONN_REQ, sizeof(req), &req); |
373 | } | ||
374 | } else if (sk->sk_state == BT_CONNECT2) { | ||
375 | struct l2cap_conn_rsp rsp; | ||
376 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
377 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
378 | |||
379 | if (l2cap_check_link_mode(sk)) { | ||
380 | sk->sk_state = BT_CONFIG; | ||
381 | rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); | ||
382 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); | ||
383 | } else { | ||
384 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); | ||
385 | rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); | ||
386 | } | ||
387 | |||
388 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
389 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | ||
314 | } | 390 | } |
315 | 391 | ||
316 | bh_unlock_sock(sk); | 392 | bh_unlock_sock(sk); |
@@ -321,22 +397,27 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
321 | 397 | ||
322 | static void l2cap_conn_ready(struct l2cap_conn *conn) | 398 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
323 | { | 399 | { |
324 | BT_DBG("conn %p", conn); | 400 | struct l2cap_chan_list *l = &conn->chan_list; |
401 | struct sock *sk; | ||
325 | 402 | ||
326 | if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) { | 403 | BT_DBG("conn %p", conn); |
327 | struct l2cap_info_req req; | ||
328 | 404 | ||
329 | req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | 405 | read_lock(&l->lock); |
330 | 406 | ||
331 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | 407 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { |
332 | conn->info_ident = l2cap_get_ident(conn); | 408 | bh_lock_sock(sk); |
333 | 409 | ||
334 | mod_timer(&conn->info_timer, | 410 | if (sk->sk_type != SOCK_SEQPACKET) { |
335 | jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | 411 | l2cap_sock_clear_timer(sk); |
412 | sk->sk_state = BT_CONNECTED; | ||
413 | sk->sk_state_change(sk); | ||
414 | } else if (sk->sk_state == BT_CONNECT) | ||
415 | l2cap_do_start(sk); | ||
336 | 416 | ||
337 | l2cap_send_cmd(conn, conn->info_ident, | 417 | bh_unlock_sock(sk); |
338 | L2CAP_INFO_REQ, sizeof(req), &req); | ||
339 | } | 418 | } |
419 | |||
420 | read_unlock(&l->lock); | ||
340 | } | 421 | } |
341 | 422 | ||
342 | /* Notify sockets that we cannot guaranty reliability anymore */ | 423 | /* Notify sockets that we cannot guaranty reliability anymore */ |
@@ -388,7 +469,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
388 | 469 | ||
389 | conn->feat_mask = 0; | 470 | conn->feat_mask = 0; |
390 | 471 | ||
391 | setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn); | 472 | setup_timer(&conn->info_timer, l2cap_info_timeout, |
473 | (unsigned long) conn); | ||
392 | 474 | ||
393 | spin_lock_init(&conn->lock); | 475 | spin_lock_init(&conn->lock); |
394 | rwlock_init(&conn->chan_list.lock); | 476 | rwlock_init(&conn->chan_list.lock); |
@@ -500,7 +582,7 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) | |||
500 | while ((sk = bt_accept_dequeue(parent, NULL))) | 582 | while ((sk = bt_accept_dequeue(parent, NULL))) |
501 | l2cap_sock_close(sk); | 583 | l2cap_sock_close(sk); |
502 | 584 | ||
503 | parent->sk_state = BT_CLOSED; | 585 | parent->sk_state = BT_CLOSED; |
504 | sock_set_flag(parent, SOCK_ZAPPED); | 586 | sock_set_flag(parent, SOCK_ZAPPED); |
505 | } | 587 | } |
506 | 588 | ||
@@ -543,9 +625,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason) | |||
543 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | 625 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
544 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | 626 | l2cap_send_cmd(conn, l2cap_get_ident(conn), |
545 | L2CAP_DISCONN_REQ, sizeof(req), &req); | 627 | L2CAP_DISCONN_REQ, sizeof(req), &req); |
546 | } else { | 628 | } else |
547 | l2cap_chan_del(sk, reason); | 629 | l2cap_chan_del(sk, reason); |
548 | } | ||
549 | break; | 630 | break; |
550 | 631 | ||
551 | case BT_CONNECT: | 632 | case BT_CONNECT: |
@@ -614,9 +695,9 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p | |||
614 | sock_reset_flag(sk, SOCK_ZAPPED); | 695 | sock_reset_flag(sk, SOCK_ZAPPED); |
615 | 696 | ||
616 | sk->sk_protocol = proto; | 697 | sk->sk_protocol = proto; |
617 | sk->sk_state = BT_OPEN; | 698 | sk->sk_state = BT_OPEN; |
618 | 699 | ||
619 | setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk); | 700 | setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); |
620 | 701 | ||
621 | bt_sock_link(&l2cap_sk_list, sk); | 702 | bt_sock_link(&l2cap_sk_list, sk); |
622 | return sk; | 703 | return sk; |
@@ -729,22 +810,11 @@ static int l2cap_do_connect(struct sock *sk) | |||
729 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); | 810 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); |
730 | 811 | ||
731 | if (hcon->state == BT_CONNECTED) { | 812 | if (hcon->state == BT_CONNECTED) { |
732 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { | 813 | if (sk->sk_type != SOCK_SEQPACKET) { |
733 | l2cap_conn_ready(conn); | ||
734 | goto done; | ||
735 | } | ||
736 | |||
737 | if (sk->sk_type == SOCK_SEQPACKET) { | ||
738 | struct l2cap_conn_req req; | ||
739 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | ||
740 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
741 | req.psm = l2cap_pi(sk)->psm; | ||
742 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
743 | L2CAP_CONN_REQ, sizeof(req), &req); | ||
744 | } else { | ||
745 | l2cap_sock_clear_timer(sk); | 814 | l2cap_sock_clear_timer(sk); |
746 | sk->sk_state = BT_CONNECTED; | 815 | sk->sk_state = BT_CONNECTED; |
747 | } | 816 | } else |
817 | l2cap_do_start(sk); | ||
748 | } | 818 | } |
749 | 819 | ||
750 | done: | 820 | done: |
@@ -1145,7 +1215,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) | |||
1145 | __l2cap_sock_close(sk, 0); | 1215 | __l2cap_sock_close(sk, 0); |
1146 | 1216 | ||
1147 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 1217 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
1148 | err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); | 1218 | err = bt_sock_wait_state(sk, BT_CLOSED, |
1219 | sk->sk_lingertime); | ||
1149 | } | 1220 | } |
1150 | release_sock(sk); | 1221 | release_sock(sk); |
1151 | return err; | 1222 | return err; |
@@ -1189,6 +1260,11 @@ static void l2cap_chan_ready(struct sock *sk) | |||
1189 | */ | 1260 | */ |
1190 | parent->sk_data_ready(parent, 0); | 1261 | parent->sk_data_ready(parent, 0); |
1191 | } | 1262 | } |
1263 | |||
1264 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) { | ||
1265 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
1266 | hci_conn_change_link_key(conn->hcon); | ||
1267 | } | ||
1192 | } | 1268 | } |
1193 | 1269 | ||
1194 | /* Copy frame to all raw sockets on that connection */ | 1270 | /* Copy frame to all raw sockets on that connection */ |
@@ -1477,7 +1553,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1477 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; | 1553 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; |
1478 | struct l2cap_conn_rsp rsp; | 1554 | struct l2cap_conn_rsp rsp; |
1479 | struct sock *sk, *parent; | 1555 | struct sock *sk, *parent; |
1480 | int result = 0, status = 0; | 1556 | int result, status = 0; |
1481 | 1557 | ||
1482 | u16 dcid = 0, scid = __le16_to_cpu(req->scid); | 1558 | u16 dcid = 0, scid = __le16_to_cpu(req->scid); |
1483 | __le16 psm = req->psm; | 1559 | __le16 psm = req->psm; |
@@ -1526,25 +1602,24 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1526 | 1602 | ||
1527 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); | 1603 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); |
1528 | 1604 | ||
1529 | /* Service level security */ | ||
1530 | result = L2CAP_CR_PEND; | ||
1531 | status = L2CAP_CS_AUTHEN_PEND; | ||
1532 | sk->sk_state = BT_CONNECT2; | ||
1533 | l2cap_pi(sk)->ident = cmd->ident; | 1605 | l2cap_pi(sk)->ident = cmd->ident; |
1534 | 1606 | ||
1535 | if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) || | 1607 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { |
1536 | (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) { | 1608 | if (l2cap_check_link_mode(sk)) { |
1537 | if (!hci_conn_encrypt(conn->hcon)) | 1609 | sk->sk_state = BT_CONFIG; |
1538 | goto done; | 1610 | result = L2CAP_CR_SUCCESS; |
1539 | } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) { | 1611 | status = L2CAP_CS_NO_INFO; |
1540 | if (!hci_conn_auth(conn->hcon)) | 1612 | } else { |
1541 | goto done; | 1613 | sk->sk_state = BT_CONNECT2; |
1614 | result = L2CAP_CR_PEND; | ||
1615 | status = L2CAP_CS_AUTHEN_PEND; | ||
1616 | } | ||
1617 | } else { | ||
1618 | sk->sk_state = BT_CONNECT2; | ||
1619 | result = L2CAP_CR_PEND; | ||
1620 | status = L2CAP_CS_NO_INFO; | ||
1542 | } | 1621 | } |
1543 | 1622 | ||
1544 | sk->sk_state = BT_CONFIG; | ||
1545 | result = status = 0; | ||
1546 | |||
1547 | done: | ||
1548 | write_unlock_bh(&list->lock); | 1623 | write_unlock_bh(&list->lock); |
1549 | 1624 | ||
1550 | response: | 1625 | response: |
@@ -1556,6 +1631,21 @@ sendresp: | |||
1556 | rsp.result = cpu_to_le16(result); | 1631 | rsp.result = cpu_to_le16(result); |
1557 | rsp.status = cpu_to_le16(status); | 1632 | rsp.status = cpu_to_le16(status); |
1558 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 1633 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); |
1634 | |||
1635 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { | ||
1636 | struct l2cap_info_req info; | ||
1637 | info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | ||
1638 | |||
1639 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | ||
1640 | conn->info_ident = l2cap_get_ident(conn); | ||
1641 | |||
1642 | mod_timer(&conn->info_timer, jiffies + | ||
1643 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | ||
1644 | |||
1645 | l2cap_send_cmd(conn, conn->info_ident, | ||
1646 | L2CAP_INFO_REQ, sizeof(info), &info); | ||
1647 | } | ||
1648 | |||
1559 | return 0; | 1649 | return 0; |
1560 | } | 1650 | } |
1561 | 1651 | ||
@@ -1664,9 +1754,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
1664 | } | 1754 | } |
1665 | 1755 | ||
1666 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { | 1756 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { |
1667 | u8 req[64]; | 1757 | u8 buf[64]; |
1668 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 1758 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
1669 | l2cap_build_conf_req(sk, req), req); | 1759 | l2cap_build_conf_req(sk, buf), buf); |
1670 | } | 1760 | } |
1671 | 1761 | ||
1672 | unlock: | 1762 | unlock: |
@@ -1708,7 +1798,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
1708 | 1798 | ||
1709 | default: | 1799 | default: |
1710 | sk->sk_state = BT_DISCONN; | 1800 | sk->sk_state = BT_DISCONN; |
1711 | sk->sk_err = ECONNRESET; | 1801 | sk->sk_err = ECONNRESET; |
1712 | l2cap_sock_set_timer(sk, HZ * 5); | 1802 | l2cap_sock_set_timer(sk, HZ * 5); |
1713 | { | 1803 | { |
1714 | struct l2cap_disconn_req req; | 1804 | struct l2cap_disconn_req req; |
@@ -2080,10 +2170,8 @@ static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason) | |||
2080 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | 2170 | static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) |
2081 | { | 2171 | { |
2082 | struct l2cap_chan_list *l; | 2172 | struct l2cap_chan_list *l; |
2083 | struct l2cap_conn *conn = conn = hcon->l2cap_data; | 2173 | struct l2cap_conn *conn = hcon->l2cap_data; |
2084 | struct l2cap_conn_rsp rsp; | ||
2085 | struct sock *sk; | 2174 | struct sock *sk; |
2086 | int result; | ||
2087 | 2175 | ||
2088 | if (!conn) | 2176 | if (!conn) |
2089 | return 0; | 2177 | return 0; |
@@ -2095,45 +2183,65 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | |||
2095 | read_lock(&l->lock); | 2183 | read_lock(&l->lock); |
2096 | 2184 | ||
2097 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | 2185 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { |
2186 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
2187 | |||
2098 | bh_lock_sock(sk); | 2188 | bh_lock_sock(sk); |
2099 | 2189 | ||
2100 | if (sk->sk_state != BT_CONNECT2 || | 2190 | if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) && |
2101 | (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) || | 2191 | !(hcon->link_mode & HCI_LM_ENCRYPT) && |
2102 | (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) { | 2192 | !status) { |
2103 | bh_unlock_sock(sk); | 2193 | bh_unlock_sock(sk); |
2104 | continue; | 2194 | continue; |
2105 | } | 2195 | } |
2106 | 2196 | ||
2107 | if (!status) { | 2197 | if (sk->sk_state == BT_CONNECT) { |
2108 | sk->sk_state = BT_CONFIG; | 2198 | if (!status) { |
2109 | result = 0; | 2199 | struct l2cap_conn_req req; |
2110 | } else { | 2200 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
2111 | sk->sk_state = BT_DISCONN; | 2201 | req.psm = l2cap_pi(sk)->psm; |
2112 | l2cap_sock_set_timer(sk, HZ/10); | ||
2113 | result = L2CAP_CR_SEC_BLOCK; | ||
2114 | } | ||
2115 | 2202 | ||
2116 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 2203 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
2117 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 2204 | |
2118 | rsp.result = cpu_to_le16(result); | 2205 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
2119 | rsp.status = cpu_to_le16(0); | 2206 | L2CAP_CONN_REQ, sizeof(req), &req); |
2120 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 2207 | } else { |
2121 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 2208 | l2cap_sock_clear_timer(sk); |
2209 | l2cap_sock_set_timer(sk, HZ / 10); | ||
2210 | } | ||
2211 | } else if (sk->sk_state == BT_CONNECT2) { | ||
2212 | struct l2cap_conn_rsp rsp; | ||
2213 | __u16 result; | ||
2214 | |||
2215 | if (!status) { | ||
2216 | sk->sk_state = BT_CONFIG; | ||
2217 | result = L2CAP_CR_SUCCESS; | ||
2218 | } else { | ||
2219 | sk->sk_state = BT_DISCONN; | ||
2220 | l2cap_sock_set_timer(sk, HZ / 10); | ||
2221 | result = L2CAP_CR_SEC_BLOCK; | ||
2222 | } | ||
2223 | |||
2224 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
2225 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
2226 | rsp.result = cpu_to_le16(result); | ||
2227 | rsp.status = cpu_to_le16(0); | ||
2228 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
2229 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | ||
2230 | } | ||
2122 | 2231 | ||
2123 | bh_unlock_sock(sk); | 2232 | bh_unlock_sock(sk); |
2124 | } | 2233 | } |
2125 | 2234 | ||
2126 | read_unlock(&l->lock); | 2235 | read_unlock(&l->lock); |
2236 | |||
2127 | return 0; | 2237 | return 0; |
2128 | } | 2238 | } |
2129 | 2239 | ||
2130 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) | 2240 | static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) |
2131 | { | 2241 | { |
2132 | struct l2cap_chan_list *l; | 2242 | struct l2cap_chan_list *l; |
2133 | struct l2cap_conn *conn = hcon->l2cap_data; | 2243 | struct l2cap_conn *conn = hcon->l2cap_data; |
2134 | struct l2cap_conn_rsp rsp; | ||
2135 | struct sock *sk; | 2244 | struct sock *sk; |
2136 | int result; | ||
2137 | 2245 | ||
2138 | if (!conn) | 2246 | if (!conn) |
2139 | return 0; | 2247 | return 0; |
@@ -2145,36 +2253,59 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) | |||
2145 | read_lock(&l->lock); | 2253 | read_lock(&l->lock); |
2146 | 2254 | ||
2147 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { | 2255 | for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { |
2256 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
2257 | |||
2148 | bh_lock_sock(sk); | 2258 | bh_lock_sock(sk); |
2149 | 2259 | ||
2150 | if (sk->sk_state != BT_CONNECT2) { | 2260 | if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) && |
2261 | (sk->sk_state == BT_CONNECTED || | ||
2262 | sk->sk_state == BT_CONFIG) && | ||
2263 | !status && encrypt == 0x00) { | ||
2264 | __l2cap_sock_close(sk, ECONNREFUSED); | ||
2151 | bh_unlock_sock(sk); | 2265 | bh_unlock_sock(sk); |
2152 | continue; | 2266 | continue; |
2153 | } | 2267 | } |
2154 | 2268 | ||
2155 | if (!status) { | 2269 | if (sk->sk_state == BT_CONNECT) { |
2156 | sk->sk_state = BT_CONFIG; | 2270 | if (!status) { |
2157 | result = 0; | 2271 | struct l2cap_conn_req req; |
2158 | } else { | 2272 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
2159 | sk->sk_state = BT_DISCONN; | 2273 | req.psm = l2cap_pi(sk)->psm; |
2160 | l2cap_sock_set_timer(sk, HZ/10); | 2274 | |
2161 | result = L2CAP_CR_SEC_BLOCK; | 2275 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
2162 | } | ||
2163 | 2276 | ||
2164 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 2277 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
2165 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 2278 | L2CAP_CONN_REQ, sizeof(req), &req); |
2166 | rsp.result = cpu_to_le16(result); | 2279 | } else { |
2167 | rsp.status = cpu_to_le16(0); | 2280 | l2cap_sock_clear_timer(sk); |
2168 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 2281 | l2cap_sock_set_timer(sk, HZ / 10); |
2169 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 2282 | } |
2283 | } else if (sk->sk_state == BT_CONNECT2) { | ||
2284 | struct l2cap_conn_rsp rsp; | ||
2285 | __u16 result; | ||
2286 | |||
2287 | if (!status) { | ||
2288 | sk->sk_state = BT_CONFIG; | ||
2289 | result = L2CAP_CR_SUCCESS; | ||
2290 | } else { | ||
2291 | sk->sk_state = BT_DISCONN; | ||
2292 | l2cap_sock_set_timer(sk, HZ / 10); | ||
2293 | result = L2CAP_CR_SEC_BLOCK; | ||
2294 | } | ||
2170 | 2295 | ||
2171 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) | 2296 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
2172 | hci_conn_change_link_key(hcon); | 2297 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
2298 | rsp.result = cpu_to_le16(result); | ||
2299 | rsp.status = cpu_to_le16(0); | ||
2300 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | ||
2301 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | ||
2302 | } | ||
2173 | 2303 | ||
2174 | bh_unlock_sock(sk); | 2304 | bh_unlock_sock(sk); |
2175 | } | 2305 | } |
2176 | 2306 | ||
2177 | read_unlock(&l->lock); | 2307 | read_unlock(&l->lock); |
2308 | |||
2178 | return 0; | 2309 | return 0; |
2179 | } | 2310 | } |
2180 | 2311 | ||
@@ -2301,9 +2432,9 @@ static const struct proto_ops l2cap_sock_ops = { | |||
2301 | .sendmsg = l2cap_sock_sendmsg, | 2432 | .sendmsg = l2cap_sock_sendmsg, |
2302 | .recvmsg = bt_sock_recvmsg, | 2433 | .recvmsg = bt_sock_recvmsg, |
2303 | .poll = bt_sock_poll, | 2434 | .poll = bt_sock_poll, |
2435 | .ioctl = bt_sock_ioctl, | ||
2304 | .mmap = sock_no_mmap, | 2436 | .mmap = sock_no_mmap, |
2305 | .socketpair = sock_no_socketpair, | 2437 | .socketpair = sock_no_socketpair, |
2306 | .ioctl = sock_no_ioctl, | ||
2307 | .shutdown = l2cap_sock_shutdown, | 2438 | .shutdown = l2cap_sock_shutdown, |
2308 | .setsockopt = l2cap_sock_setsockopt, | 2439 | .setsockopt = l2cap_sock_setsockopt, |
2309 | .getsockopt = l2cap_sock_getsockopt | 2440 | .getsockopt = l2cap_sock_getsockopt |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 0c2c93735e93..6cfc7ba611b3 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -23,8 +23,6 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Bluetooth RFCOMM core. | 25 | * Bluetooth RFCOMM core. |
26 | * | ||
27 | * $Id: core.c,v 1.42 2002/10/01 23:26:25 maxk Exp $ | ||
28 | */ | 26 | */ |
29 | 27 | ||
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
@@ -53,7 +51,7 @@ | |||
53 | #define BT_DBG(D...) | 51 | #define BT_DBG(D...) |
54 | #endif | 52 | #endif |
55 | 53 | ||
56 | #define VERSION "1.8" | 54 | #define VERSION "1.10" |
57 | 55 | ||
58 | static int disable_cfc = 0; | 56 | static int disable_cfc = 0; |
59 | static int channel_mtu = -1; | 57 | static int channel_mtu = -1; |
@@ -230,6 +228,21 @@ static int rfcomm_l2sock_create(struct socket **sock) | |||
230 | return err; | 228 | return err; |
231 | } | 229 | } |
232 | 230 | ||
231 | static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d) | ||
232 | { | ||
233 | struct sock *sk = d->session->sock->sk; | ||
234 | |||
235 | if (d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) { | ||
236 | if (!hci_conn_encrypt(l2cap_pi(sk)->conn->hcon)) | ||
237 | return 1; | ||
238 | } else if (d->link_mode & RFCOMM_LM_AUTH) { | ||
239 | if (!hci_conn_auth(l2cap_pi(sk)->conn->hcon)) | ||
240 | return 1; | ||
241 | } | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
233 | /* ---- RFCOMM DLCs ---- */ | 246 | /* ---- RFCOMM DLCs ---- */ |
234 | static void rfcomm_dlc_timeout(unsigned long arg) | 247 | static void rfcomm_dlc_timeout(unsigned long arg) |
235 | { | 248 | { |
@@ -371,15 +384,23 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, | |||
371 | d->addr = __addr(s->initiator, dlci); | 384 | d->addr = __addr(s->initiator, dlci); |
372 | d->priority = 7; | 385 | d->priority = 7; |
373 | 386 | ||
374 | d->state = BT_CONFIG; | 387 | d->state = BT_CONFIG; |
375 | rfcomm_dlc_link(s, d); | 388 | rfcomm_dlc_link(s, d); |
376 | 389 | ||
390 | d->out = 1; | ||
391 | |||
377 | d->mtu = s->mtu; | 392 | d->mtu = s->mtu; |
378 | d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; | 393 | d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; |
379 | 394 | ||
380 | if (s->state == BT_CONNECTED) | 395 | if (s->state == BT_CONNECTED) { |
381 | rfcomm_send_pn(s, 1, d); | 396 | if (rfcomm_check_link_mode(d)) |
397 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); | ||
398 | else | ||
399 | rfcomm_send_pn(s, 1, d); | ||
400 | } | ||
401 | |||
382 | rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); | 402 | rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); |
403 | |||
383 | return 0; | 404 | return 0; |
384 | } | 405 | } |
385 | 406 | ||
@@ -1146,21 +1167,6 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) | |||
1146 | return 0; | 1167 | return 0; |
1147 | } | 1168 | } |
1148 | 1169 | ||
1149 | static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d) | ||
1150 | { | ||
1151 | struct sock *sk = d->session->sock->sk; | ||
1152 | |||
1153 | if (d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) { | ||
1154 | if (!hci_conn_encrypt(l2cap_pi(sk)->conn->hcon)) | ||
1155 | return 1; | ||
1156 | } else if (d->link_mode & RFCOMM_LM_AUTH) { | ||
1157 | if (!hci_conn_auth(l2cap_pi(sk)->conn->hcon)) | ||
1158 | return 1; | ||
1159 | } | ||
1160 | |||
1161 | return 0; | ||
1162 | } | ||
1163 | |||
1164 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) | 1170 | static void rfcomm_dlc_accept(struct rfcomm_dlc *d) |
1165 | { | 1171 | { |
1166 | struct sock *sk = d->session->sock->sk; | 1172 | struct sock *sk = d->session->sock->sk; |
@@ -1205,10 +1211,8 @@ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) | |||
1205 | if (rfcomm_check_link_mode(d)) { | 1211 | if (rfcomm_check_link_mode(d)) { |
1206 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); | 1212 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); |
1207 | rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); | 1213 | rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); |
1208 | return 0; | 1214 | } else |
1209 | } | 1215 | rfcomm_dlc_accept(d); |
1210 | |||
1211 | rfcomm_dlc_accept(d); | ||
1212 | } | 1216 | } |
1213 | return 0; | 1217 | return 0; |
1214 | } | 1218 | } |
@@ -1223,10 +1227,8 @@ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) | |||
1223 | if (rfcomm_check_link_mode(d)) { | 1227 | if (rfcomm_check_link_mode(d)) { |
1224 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); | 1228 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); |
1225 | rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); | 1229 | rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); |
1226 | return 0; | 1230 | } else |
1227 | } | 1231 | rfcomm_dlc_accept(d); |
1228 | |||
1229 | rfcomm_dlc_accept(d); | ||
1230 | } else { | 1232 | } else { |
1231 | rfcomm_send_dm(s, dlci); | 1233 | rfcomm_send_dm(s, dlci); |
1232 | } | 1234 | } |
@@ -1459,8 +1461,12 @@ static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb | |||
1459 | clear_bit(RFCOMM_TX_THROTTLED, &d->flags); | 1461 | clear_bit(RFCOMM_TX_THROTTLED, &d->flags); |
1460 | 1462 | ||
1461 | rfcomm_dlc_lock(d); | 1463 | rfcomm_dlc_lock(d); |
1464 | |||
1465 | d->remote_v24_sig = msc->v24_sig; | ||
1466 | |||
1462 | if (d->modem_status) | 1467 | if (d->modem_status) |
1463 | d->modem_status(d, msc->v24_sig); | 1468 | d->modem_status(d, msc->v24_sig); |
1469 | |||
1464 | rfcomm_dlc_unlock(d); | 1470 | rfcomm_dlc_unlock(d); |
1465 | 1471 | ||
1466 | rfcomm_send_msc(s, 0, dlci, msc->v24_sig); | 1472 | rfcomm_send_msc(s, 0, dlci, msc->v24_sig); |
@@ -1636,7 +1642,11 @@ static void rfcomm_process_connect(struct rfcomm_session *s) | |||
1636 | d = list_entry(p, struct rfcomm_dlc, list); | 1642 | d = list_entry(p, struct rfcomm_dlc, list); |
1637 | if (d->state == BT_CONFIG) { | 1643 | if (d->state == BT_CONFIG) { |
1638 | d->mtu = s->mtu; | 1644 | d->mtu = s->mtu; |
1639 | rfcomm_send_pn(s, 1, d); | 1645 | if (rfcomm_check_link_mode(d)) { |
1646 | set_bit(RFCOMM_AUTH_PENDING, &d->flags); | ||
1647 | rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); | ||
1648 | } else | ||
1649 | rfcomm_send_pn(s, 1, d); | ||
1640 | } | 1650 | } |
1641 | } | 1651 | } |
1642 | } | 1652 | } |
@@ -1709,7 +1719,11 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s) | |||
1709 | 1719 | ||
1710 | if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { | 1720 | if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { |
1711 | rfcomm_dlc_clear_timer(d); | 1721 | rfcomm_dlc_clear_timer(d); |
1712 | rfcomm_dlc_accept(d); | 1722 | if (d->out) { |
1723 | rfcomm_send_pn(s, 1, d); | ||
1724 | rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); | ||
1725 | } else | ||
1726 | rfcomm_dlc_accept(d); | ||
1713 | if (d->link_mode & RFCOMM_LM_SECURE) { | 1727 | if (d->link_mode & RFCOMM_LM_SECURE) { |
1714 | struct sock *sk = s->sock->sk; | 1728 | struct sock *sk = s->sock->sk; |
1715 | hci_conn_change_link_key(l2cap_pi(sk)->conn->hcon); | 1729 | hci_conn_change_link_key(l2cap_pi(sk)->conn->hcon); |
@@ -1717,7 +1731,10 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s) | |||
1717 | continue; | 1731 | continue; |
1718 | } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { | 1732 | } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { |
1719 | rfcomm_dlc_clear_timer(d); | 1733 | rfcomm_dlc_clear_timer(d); |
1720 | rfcomm_send_dm(s, d->dlci); | 1734 | if (!d->out) |
1735 | rfcomm_send_dm(s, d->dlci); | ||
1736 | else | ||
1737 | d->state = BT_CLOSED; | ||
1721 | __rfcomm_dlc_close(d, ECONNREFUSED); | 1738 | __rfcomm_dlc_close(d, ECONNREFUSED); |
1722 | continue; | 1739 | continue; |
1723 | } | 1740 | } |
@@ -1726,7 +1743,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s) | |||
1726 | continue; | 1743 | continue; |
1727 | 1744 | ||
1728 | if ((d->state == BT_CONNECTED || d->state == BT_DISCONN) && | 1745 | if ((d->state == BT_CONNECTED || d->state == BT_DISCONN) && |
1729 | d->mscex == RFCOMM_MSCEX_OK) | 1746 | d->mscex == RFCOMM_MSCEX_OK) |
1730 | rfcomm_process_tx(d); | 1747 | rfcomm_process_tx(d); |
1731 | } | 1748 | } |
1732 | } | 1749 | } |
@@ -1954,7 +1971,8 @@ static void rfcomm_auth_cfm(struct hci_conn *conn, u8 status) | |||
1954 | list_for_each_safe(p, n, &s->dlcs) { | 1971 | list_for_each_safe(p, n, &s->dlcs) { |
1955 | d = list_entry(p, struct rfcomm_dlc, list); | 1972 | d = list_entry(p, struct rfcomm_dlc, list); |
1956 | 1973 | ||
1957 | if (d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) | 1974 | if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) && |
1975 | !(conn->link_mode & HCI_LM_ENCRYPT) && !status) | ||
1958 | continue; | 1976 | continue; |
1959 | 1977 | ||
1960 | if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) | 1978 | if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) |
@@ -1988,6 +2006,14 @@ static void rfcomm_encrypt_cfm(struct hci_conn *conn, u8 status, u8 encrypt) | |||
1988 | list_for_each_safe(p, n, &s->dlcs) { | 2006 | list_for_each_safe(p, n, &s->dlcs) { |
1989 | d = list_entry(p, struct rfcomm_dlc, list); | 2007 | d = list_entry(p, struct rfcomm_dlc, list); |
1990 | 2008 | ||
2009 | if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) && | ||
2010 | (d->state == BT_CONNECTED || | ||
2011 | d->state == BT_CONFIG) && | ||
2012 | !status && encrypt == 0x00) { | ||
2013 | __rfcomm_dlc_close(d, ECONNREFUSED); | ||
2014 | continue; | ||
2015 | } | ||
2016 | |||
1991 | if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) | 2017 | if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) |
1992 | continue; | 2018 | continue; |
1993 | 2019 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 5083adcbfae5..8a972b6ba85f 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -23,8 +23,6 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * RFCOMM sockets. | 25 | * RFCOMM sockets. |
26 | * | ||
27 | * $Id: sock.c,v 1.24 2002/10/03 01:00:34 maxk Exp $ | ||
28 | */ | 26 | */ |
29 | 27 | ||
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
@@ -309,13 +307,13 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int | |||
309 | sk->sk_destruct = rfcomm_sock_destruct; | 307 | sk->sk_destruct = rfcomm_sock_destruct; |
310 | sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; | 308 | sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; |
311 | 309 | ||
312 | sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; | 310 | sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; |
313 | sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; | 311 | sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; |
314 | 312 | ||
315 | sock_reset_flag(sk, SOCK_ZAPPED); | 313 | sock_reset_flag(sk, SOCK_ZAPPED); |
316 | 314 | ||
317 | sk->sk_protocol = proto; | 315 | sk->sk_protocol = proto; |
318 | sk->sk_state = BT_OPEN; | 316 | sk->sk_state = BT_OPEN; |
319 | 317 | ||
320 | bt_sock_link(&rfcomm_sk_list, sk); | 318 | bt_sock_link(&rfcomm_sk_list, sk); |
321 | 319 | ||
@@ -413,6 +411,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a | |||
413 | bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); | 411 | bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); |
414 | rfcomm_pi(sk)->channel = sa->rc_channel; | 412 | rfcomm_pi(sk)->channel = sa->rc_channel; |
415 | 413 | ||
414 | d->link_mode = rfcomm_pi(sk)->link_mode; | ||
415 | |||
416 | err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); | 416 | err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); |
417 | if (!err) | 417 | if (!err) |
418 | err = bt_sock_wait_state(sk, BT_CONNECTED, | 418 | err = bt_sock_wait_state(sk, BT_CONNECTED, |
@@ -688,6 +688,8 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
688 | copied += chunk; | 688 | copied += chunk; |
689 | size -= chunk; | 689 | size -= chunk; |
690 | 690 | ||
691 | sock_recv_timestamp(msg, sk, skb); | ||
692 | |||
691 | if (!(flags & MSG_PEEK)) { | 693 | if (!(flags & MSG_PEEK)) { |
692 | atomic_sub(chunk, &sk->sk_rmem_alloc); | 694 | atomic_sub(chunk, &sk->sk_rmem_alloc); |
693 | 695 | ||
@@ -793,15 +795,20 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon | |||
793 | struct sock *sk = sock->sk; | 795 | struct sock *sk = sock->sk; |
794 | int err; | 796 | int err; |
795 | 797 | ||
796 | lock_sock(sk); | 798 | BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); |
799 | |||
800 | err = bt_sock_ioctl(sock, cmd, arg); | ||
797 | 801 | ||
802 | if (err == -ENOIOCTLCMD) { | ||
798 | #ifdef CONFIG_BT_RFCOMM_TTY | 803 | #ifdef CONFIG_BT_RFCOMM_TTY |
799 | err = rfcomm_dev_ioctl(sk, cmd, (void __user *)arg); | 804 | lock_sock(sk); |
805 | err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); | ||
806 | release_sock(sk); | ||
800 | #else | 807 | #else |
801 | err = -EOPNOTSUPP; | 808 | err = -EOPNOTSUPP; |
802 | #endif | 809 | #endif |
810 | } | ||
803 | 811 | ||
804 | release_sock(sk); | ||
805 | return err; | 812 | return err; |
806 | } | 813 | } |
807 | 814 | ||
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 0a387f2eb7a9..d3340dd52bcf 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -23,8 +23,6 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * RFCOMM TTY. | 25 | * RFCOMM TTY. |
26 | * | ||
27 | * $Id: tty.c,v 1.24 2002/10/03 01:54:38 holtmann Exp $ | ||
28 | */ | 26 | */ |
29 | 27 | ||
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
@@ -77,6 +75,8 @@ struct rfcomm_dev { | |||
77 | struct device *tty_dev; | 75 | struct device *tty_dev; |
78 | 76 | ||
79 | atomic_t wmem_alloc; | 77 | atomic_t wmem_alloc; |
78 | |||
79 | struct sk_buff_head pending; | ||
80 | }; | 80 | }; |
81 | 81 | ||
82 | static LIST_HEAD(rfcomm_dev_list); | 82 | static LIST_HEAD(rfcomm_dev_list); |
@@ -264,13 +264,34 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | |||
264 | init_waitqueue_head(&dev->wait); | 264 | init_waitqueue_head(&dev->wait); |
265 | tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); | 265 | tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); |
266 | 266 | ||
267 | skb_queue_head_init(&dev->pending); | ||
268 | |||
267 | rfcomm_dlc_lock(dlc); | 269 | rfcomm_dlc_lock(dlc); |
270 | |||
271 | if (req->flags & (1 << RFCOMM_REUSE_DLC)) { | ||
272 | struct sock *sk = dlc->owner; | ||
273 | struct sk_buff *skb; | ||
274 | |||
275 | BUG_ON(!sk); | ||
276 | |||
277 | rfcomm_dlc_throttle(dlc); | ||
278 | |||
279 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) { | ||
280 | skb_orphan(skb); | ||
281 | skb_queue_tail(&dev->pending, skb); | ||
282 | atomic_sub(skb->len, &sk->sk_rmem_alloc); | ||
283 | } | ||
284 | } | ||
285 | |||
268 | dlc->data_ready = rfcomm_dev_data_ready; | 286 | dlc->data_ready = rfcomm_dev_data_ready; |
269 | dlc->state_change = rfcomm_dev_state_change; | 287 | dlc->state_change = rfcomm_dev_state_change; |
270 | dlc->modem_status = rfcomm_dev_modem_status; | 288 | dlc->modem_status = rfcomm_dev_modem_status; |
271 | 289 | ||
272 | dlc->owner = dev; | 290 | dlc->owner = dev; |
273 | dev->dlc = dlc; | 291 | dev->dlc = dlc; |
292 | |||
293 | rfcomm_dev_modem_status(dlc, dlc->remote_v24_sig); | ||
294 | |||
274 | rfcomm_dlc_unlock(dlc); | 295 | rfcomm_dlc_unlock(dlc); |
275 | 296 | ||
276 | /* It's safe to call __module_get() here because socket already | 297 | /* It's safe to call __module_get() here because socket already |
@@ -539,11 +560,16 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) | |||
539 | struct rfcomm_dev *dev = dlc->owner; | 560 | struct rfcomm_dev *dev = dlc->owner; |
540 | struct tty_struct *tty; | 561 | struct tty_struct *tty; |
541 | 562 | ||
542 | if (!dev || !(tty = dev->tty)) { | 563 | if (!dev) { |
543 | kfree_skb(skb); | 564 | kfree_skb(skb); |
544 | return; | 565 | return; |
545 | } | 566 | } |
546 | 567 | ||
568 | if (!(tty = dev->tty) || !skb_queue_empty(&dev->pending)) { | ||
569 | skb_queue_tail(&dev->pending, skb); | ||
570 | return; | ||
571 | } | ||
572 | |||
547 | BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len); | 573 | BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len); |
548 | 574 | ||
549 | tty_insert_flip_string(tty, skb->data, skb->len); | 575 | tty_insert_flip_string(tty, skb->data, skb->len); |
@@ -620,6 +646,30 @@ static void rfcomm_tty_wakeup(unsigned long arg) | |||
620 | tty_wakeup(tty); | 646 | tty_wakeup(tty); |
621 | } | 647 | } |
622 | 648 | ||
649 | static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) | ||
650 | { | ||
651 | struct tty_struct *tty = dev->tty; | ||
652 | struct sk_buff *skb; | ||
653 | int inserted = 0; | ||
654 | |||
655 | if (!tty) | ||
656 | return; | ||
657 | |||
658 | BT_DBG("dev %p tty %p", dev, tty); | ||
659 | |||
660 | rfcomm_dlc_lock(dev->dlc); | ||
661 | |||
662 | while ((skb = skb_dequeue(&dev->pending))) { | ||
663 | inserted += tty_insert_flip_string(tty, skb->data, skb->len); | ||
664 | kfree_skb(skb); | ||
665 | } | ||
666 | |||
667 | rfcomm_dlc_unlock(dev->dlc); | ||
668 | |||
669 | if (inserted > 0) | ||
670 | tty_flip_buffer_push(tty); | ||
671 | } | ||
672 | |||
623 | static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | 673 | static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) |
624 | { | 674 | { |
625 | DECLARE_WAITQUEUE(wait, current); | 675 | DECLARE_WAITQUEUE(wait, current); |
@@ -684,6 +734,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
684 | if (err == 0) | 734 | if (err == 0) |
685 | device_move(dev->tty_dev, rfcomm_get_device(dev)); | 735 | device_move(dev->tty_dev, rfcomm_get_device(dev)); |
686 | 736 | ||
737 | rfcomm_tty_copy_pending(dev); | ||
738 | |||
739 | rfcomm_dlc_unthrottle(dev->dlc); | ||
740 | |||
687 | return err; | 741 | return err; |
688 | } | 742 | } |
689 | 743 | ||
@@ -1114,6 +1168,7 @@ int rfcomm_init_ttys(void) | |||
1114 | rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; | 1168 | rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; |
1115 | rfcomm_tty_driver->init_termios = tty_std_termios; | 1169 | rfcomm_tty_driver->init_termios = tty_std_termios; |
1116 | rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; | 1170 | rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; |
1171 | rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; | ||
1117 | tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); | 1172 | tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); |
1118 | 1173 | ||
1119 | if (tty_register_driver(rfcomm_tty_driver)) { | 1174 | if (tty_register_driver(rfcomm_tty_driver)) { |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index b0d487e2db20..8cda49874868 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -53,7 +53,9 @@ | |||
53 | #define BT_DBG(D...) | 53 | #define BT_DBG(D...) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define VERSION "0.5" | 56 | #define VERSION "0.6" |
57 | |||
58 | static int disable_esco = 0; | ||
57 | 59 | ||
58 | static const struct proto_ops sco_sock_ops; | 60 | static const struct proto_ops sco_sock_ops; |
59 | 61 | ||
@@ -193,7 +195,10 @@ static int sco_connect(struct sock *sk) | |||
193 | 195 | ||
194 | err = -ENOMEM; | 196 | err = -ENOMEM; |
195 | 197 | ||
196 | type = lmp_esco_capable(hdev) ? ESCO_LINK : SCO_LINK; | 198 | if (lmp_esco_capable(hdev) && !disable_esco) |
199 | type = ESCO_LINK; | ||
200 | else | ||
201 | type = SCO_LINK; | ||
197 | 202 | ||
198 | hcon = hci_connect(hdev, type, dst); | 203 | hcon = hci_connect(hdev, type, dst); |
199 | if (!hcon) | 204 | if (!hcon) |
@@ -921,7 +926,7 @@ static const struct proto_ops sco_sock_ops = { | |||
921 | .sendmsg = sco_sock_sendmsg, | 926 | .sendmsg = sco_sock_sendmsg, |
922 | .recvmsg = bt_sock_recvmsg, | 927 | .recvmsg = bt_sock_recvmsg, |
923 | .poll = bt_sock_poll, | 928 | .poll = bt_sock_poll, |
924 | .ioctl = sock_no_ioctl, | 929 | .ioctl = bt_sock_ioctl, |
925 | .mmap = sock_no_mmap, | 930 | .mmap = sock_no_mmap, |
926 | .socketpair = sock_no_socketpair, | 931 | .socketpair = sock_no_socketpair, |
927 | .shutdown = sock_no_shutdown, | 932 | .shutdown = sock_no_shutdown, |
@@ -994,6 +999,9 @@ static void __exit sco_exit(void) | |||
994 | module_init(sco_init); | 999 | module_init(sco_init); |
995 | module_exit(sco_exit); | 1000 | module_exit(sco_exit); |
996 | 1001 | ||
1002 | module_param(disable_esco, bool, 0644); | ||
1003 | MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); | ||
1004 | |||
997 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); | 1005 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); |
998 | MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION); | 1006 | MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION); |
999 | MODULE_VERSION(VERSION); | 1007 | MODULE_VERSION(VERSION); |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 12265aff7099..e143ca678881 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config BRIDGE | 5 | config BRIDGE |
6 | tristate "802.1d Ethernet Bridging" | 6 | tristate "802.1d Ethernet Bridging" |
7 | select LLC | 7 | select LLC |
8 | select STP | ||
8 | ---help--- | 9 | ---help--- |
9 | If you say Y here, then your Linux box will be able to act as an | 10 | If you say Y here, then your Linux box will be able to act as an |
10 | Ethernet bridge, which means that the different Ethernet segments it | 11 | Ethernet bridge, which means that the different Ethernet segments it |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 8f3c58e5f7a5..573acdf6f9ff 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br.c,v 1.47 2001/12/24 00:56:41 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -20,21 +18,24 @@ | |||
20 | #include <linux/init.h> | 18 | #include <linux/init.h> |
21 | #include <linux/llc.h> | 19 | #include <linux/llc.h> |
22 | #include <net/llc.h> | 20 | #include <net/llc.h> |
21 | #include <net/stp.h> | ||
23 | 22 | ||
24 | #include "br_private.h" | 23 | #include "br_private.h" |
25 | 24 | ||
26 | int (*br_should_route_hook)(struct sk_buff *skb); | 25 | int (*br_should_route_hook)(struct sk_buff *skb); |
27 | 26 | ||
28 | static struct llc_sap *br_stp_sap; | 27 | static const struct stp_proto br_stp_proto = { |
28 | .rcv = br_stp_rcv, | ||
29 | }; | ||
29 | 30 | ||
30 | static int __init br_init(void) | 31 | static int __init br_init(void) |
31 | { | 32 | { |
32 | int err; | 33 | int err; |
33 | 34 | ||
34 | br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv); | 35 | err = stp_proto_register(&br_stp_proto); |
35 | if (!br_stp_sap) { | 36 | if (err < 0) { |
36 | printk(KERN_ERR "bridge: can't register sap for STP\n"); | 37 | printk(KERN_ERR "bridge: can't register sap for STP\n"); |
37 | return -EADDRINUSE; | 38 | return err; |
38 | } | 39 | } |
39 | 40 | ||
40 | err = br_fdb_init(); | 41 | err = br_fdb_init(); |
@@ -67,13 +68,13 @@ err_out2: | |||
67 | err_out1: | 68 | err_out1: |
68 | br_fdb_fini(); | 69 | br_fdb_fini(); |
69 | err_out: | 70 | err_out: |
70 | llc_sap_put(br_stp_sap); | 71 | stp_proto_unregister(&br_stp_proto); |
71 | return err; | 72 | return err; |
72 | } | 73 | } |
73 | 74 | ||
74 | static void __exit br_deinit(void) | 75 | static void __exit br_deinit(void) |
75 | { | 76 | { |
76 | rcu_assign_pointer(br_stp_sap->rcv_func, NULL); | 77 | stp_proto_unregister(&br_stp_proto); |
77 | 78 | ||
78 | br_netlink_fini(); | 79 | br_netlink_fini(); |
79 | unregister_netdevice_notifier(&br_device_notifier); | 80 | unregister_netdevice_notifier(&br_device_notifier); |
@@ -84,7 +85,6 @@ static void __exit br_deinit(void) | |||
84 | synchronize_net(); | 85 | synchronize_net(); |
85 | 86 | ||
86 | br_netfilter_fini(); | 87 | br_netfilter_fini(); |
87 | llc_sap_put(br_stp_sap); | ||
88 | br_fdb_get_hook = NULL; | 88 | br_fdb_get_hook = NULL; |
89 | br_fdb_put_hook = NULL; | 89 | br_fdb_put_hook = NULL; |
90 | 90 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index bf7787395fe0..d9449df7cad5 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_device.c,v 1.6 2001/12/24 00:59:55 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -21,12 +19,6 @@ | |||
21 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
22 | #include "br_private.h" | 20 | #include "br_private.h" |
23 | 21 | ||
24 | static struct net_device_stats *br_dev_get_stats(struct net_device *dev) | ||
25 | { | ||
26 | struct net_bridge *br = netdev_priv(dev); | ||
27 | return &br->statistics; | ||
28 | } | ||
29 | |||
30 | /* net device transmit always called with no BH (preempt_disabled) */ | 22 | /* net device transmit always called with no BH (preempt_disabled) */ |
31 | int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | 23 | int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
32 | { | 24 | { |
@@ -34,8 +26,8 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
34 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
35 | struct net_bridge_fdb_entry *dst; | 27 | struct net_bridge_fdb_entry *dst; |
36 | 28 | ||
37 | br->statistics.tx_packets++; | 29 | dev->stats.tx_packets++; |
38 | br->statistics.tx_bytes += skb->len; | 30 | dev->stats.tx_bytes += skb->len; |
39 | 31 | ||
40 | skb_reset_mac_header(skb); | 32 | skb_reset_mac_header(skb); |
41 | skb_pull(skb, ETH_HLEN); | 33 | skb_pull(skb, ETH_HLEN); |
@@ -95,6 +87,7 @@ static int br_set_mac_address(struct net_device *dev, void *p) | |||
95 | spin_lock_bh(&br->lock); | 87 | spin_lock_bh(&br->lock); |
96 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 88 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
97 | br_stp_change_bridge_id(br, addr->sa_data); | 89 | br_stp_change_bridge_id(br, addr->sa_data); |
90 | br->flags |= BR_SET_MAC_ADDR; | ||
98 | spin_unlock_bh(&br->lock); | 91 | spin_unlock_bh(&br->lock); |
99 | 92 | ||
100 | return 0; | 93 | return 0; |
@@ -161,7 +154,6 @@ void br_dev_setup(struct net_device *dev) | |||
161 | ether_setup(dev); | 154 | ether_setup(dev); |
162 | 155 | ||
163 | dev->do_ioctl = br_dev_ioctl; | 156 | dev->do_ioctl = br_dev_ioctl; |
164 | dev->get_stats = br_dev_get_stats; | ||
165 | dev->hard_start_xmit = br_dev_xmit; | 157 | dev->hard_start_xmit = br_dev_xmit; |
166 | dev->open = br_dev_open; | 158 | dev->open = br_dev_open; |
167 | dev->set_multicast_list = br_dev_set_multicast_list; | 159 | dev->set_multicast_list = br_dev_set_multicast_list; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 142060f02054..a48f5efdb6bf 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_fdb.c,v 1.6 2002/01/17 00:57:07 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index bdd7c35c3c7b..bdd9ccea17ce 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -91,7 +89,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
91 | /* called with rcu_read_lock */ | 89 | /* called with rcu_read_lock */ |
92 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 90 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) |
93 | { | 91 | { |
94 | if (should_deliver(to, skb)) { | 92 | if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { |
95 | __br_forward(to, skb); | 93 | __br_forward(to, skb); |
96 | return; | 94 | return; |
97 | } | 95 | } |
@@ -115,7 +113,7 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, | |||
115 | struct sk_buff *skb2; | 113 | struct sk_buff *skb2; |
116 | 114 | ||
117 | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { | 115 | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
118 | br->statistics.tx_dropped++; | 116 | br->dev->stats.tx_dropped++; |
119 | kfree_skb(skb); | 117 | kfree_skb(skb); |
120 | return; | 118 | return; |
121 | } | 119 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f38cc5317b88..a072ea5ca6f5 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_if.c,v 1.7 2001/12/24 00:59:55 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -375,6 +373,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
375 | if (IS_ERR(p)) | 373 | if (IS_ERR(p)) |
376 | return PTR_ERR(p); | 374 | return PTR_ERR(p); |
377 | 375 | ||
376 | err = dev_set_promiscuity(dev, 1); | ||
377 | if (err) | ||
378 | goto put_back; | ||
379 | |||
378 | err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), | 380 | err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), |
379 | SYSFS_BRIDGE_PORT_ATTR); | 381 | SYSFS_BRIDGE_PORT_ATTR); |
380 | if (err) | 382 | if (err) |
@@ -389,7 +391,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
389 | goto err2; | 391 | goto err2; |
390 | 392 | ||
391 | rcu_assign_pointer(dev->br_port, p); | 393 | rcu_assign_pointer(dev->br_port, p); |
392 | dev_set_promiscuity(dev, 1); | 394 | dev_disable_lro(dev); |
393 | 395 | ||
394 | list_add_rcu(&p->list, &br->port_list); | 396 | list_add_rcu(&p->list, &br->port_list); |
395 | 397 | ||
@@ -413,12 +415,12 @@ err2: | |||
413 | br_fdb_delete_by_port(br, p, 1); | 415 | br_fdb_delete_by_port(br, p, 1); |
414 | err1: | 416 | err1: |
415 | kobject_del(&p->kobj); | 417 | kobject_del(&p->kobj); |
416 | goto put_back; | ||
417 | err0: | 418 | err0: |
418 | kobject_put(&p->kobj); | 419 | kobject_put(&p->kobj); |
419 | 420 | dev_set_promiscuity(dev, -1); | |
420 | put_back: | 421 | put_back: |
421 | dev_put(dev); | 422 | dev_put(dev); |
423 | kfree(p); | ||
422 | return err; | 424 | return err; |
423 | } | 425 | } |
424 | 426 | ||
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 255c00f60ce7..30b88777c3df 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_input.c,v 1.10 2001/12/24 04:50:20 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -24,13 +22,13 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | |||
24 | 22 | ||
25 | static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) | 23 | static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) |
26 | { | 24 | { |
27 | struct net_device *indev; | 25 | struct net_device *indev, *brdev = br->dev; |
28 | 26 | ||
29 | br->statistics.rx_packets++; | 27 | brdev->stats.rx_packets++; |
30 | br->statistics.rx_bytes += skb->len; | 28 | brdev->stats.rx_bytes += skb->len; |
31 | 29 | ||
32 | indev = skb->dev; | 30 | indev = skb->dev; |
33 | skb->dev = br->dev; | 31 | skb->dev = brdev; |
34 | 32 | ||
35 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, | 33 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, |
36 | netif_receive_skb); | 34 | netif_receive_skb); |
@@ -64,7 +62,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
64 | dst = NULL; | 62 | dst = NULL; |
65 | 63 | ||
66 | if (is_multicast_ether_addr(dest)) { | 64 | if (is_multicast_ether_addr(dest)) { |
67 | br->statistics.multicast++; | 65 | br->dev->stats.multicast++; |
68 | skb2 = skb; | 66 | skb2 = skb; |
69 | } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { | 67 | } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { |
70 | skb2 = skb; | 68 | skb2 = skb; |
@@ -136,14 +134,11 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | |||
136 | if (skb->protocol == htons(ETH_P_PAUSE)) | 134 | if (skb->protocol == htons(ETH_P_PAUSE)) |
137 | goto drop; | 135 | goto drop; |
138 | 136 | ||
139 | /* Process STP BPDU's through normal netif_receive_skb() path */ | 137 | if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, |
140 | if (p->br->stp_enabled != BR_NO_STP) { | 138 | NULL, br_handle_local_finish)) |
141 | if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, | 139 | return NULL; /* frame consumed by filter */ |
142 | NULL, br_handle_local_finish)) | 140 | else |
143 | return NULL; | 141 | return skb; /* continue processing */ |
144 | else | ||
145 | return skb; | ||
146 | } | ||
147 | } | 142 | } |
148 | 143 | ||
149 | switch (p->state) { | 144 | switch (p->state) { |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 0655a5f07f58..eeee218eed80 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_ioctl.c,v 1.4 2000/11/08 05:16:40 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 00644a544e3c..76340bdd052e 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_notify.c,v 1.2 2000/02/21 15:51:34 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -37,7 +35,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
37 | struct net_bridge_port *p = dev->br_port; | 35 | struct net_bridge_port *p = dev->br_port; |
38 | struct net_bridge *br; | 36 | struct net_bridge *br; |
39 | 37 | ||
40 | if (dev_net(dev) != &init_net) | 38 | if (!net_eq(dev_net(dev), &init_net)) |
41 | return NOTIFY_DONE; | 39 | return NOTIFY_DONE; |
42 | 40 | ||
43 | /* not a port of a bridge */ | 41 | /* not a port of a bridge */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c11b554fd109..815ed38925b2 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -4,8 +4,6 @@ | |||
4 | * Authors: | 4 | * Authors: |
5 | * Lennert Buytenhek <buytenh@gnu.org> | 5 | * Lennert Buytenhek <buytenh@gnu.org> |
6 | * | 6 | * |
7 | * $Id: br_private.h,v 1.7 2001/12/24 00:59:55 davem Exp $ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
11 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
@@ -90,11 +88,12 @@ struct net_bridge | |||
90 | spinlock_t lock; | 88 | spinlock_t lock; |
91 | struct list_head port_list; | 89 | struct list_head port_list; |
92 | struct net_device *dev; | 90 | struct net_device *dev; |
93 | struct net_device_stats statistics; | ||
94 | spinlock_t hash_lock; | 91 | spinlock_t hash_lock; |
95 | struct hlist_head hash[BR_HASH_SIZE]; | 92 | struct hlist_head hash[BR_HASH_SIZE]; |
96 | struct list_head age_list; | 93 | struct list_head age_list; |
97 | unsigned long feature_mask; | 94 | unsigned long feature_mask; |
95 | unsigned long flags; | ||
96 | #define BR_SET_MAC_ADDR 0x00000001 | ||
98 | 97 | ||
99 | /* STP */ | 98 | /* STP */ |
100 | bridge_id designated_root; | 99 | bridge_id designated_root; |
@@ -227,8 +226,9 @@ extern void br_stp_set_path_cost(struct net_bridge_port *p, | |||
227 | extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); | 226 | extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); |
228 | 227 | ||
229 | /* br_stp_bpdu.c */ | 228 | /* br_stp_bpdu.c */ |
230 | extern int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, | 229 | struct stp_proto; |
231 | struct packet_type *pt, struct net_device *orig_dev); | 230 | extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, |
231 | struct net_device *dev); | ||
232 | 232 | ||
233 | /* br_stp_timer.c */ | 233 | /* br_stp_timer.c */ |
234 | extern void br_stp_timer_init(struct net_bridge *br); | 234 | extern void br_stp_timer_init(struct net_bridge *br); |
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h index e29f01ac1adf..8b650f7fbfa0 100644 --- a/net/bridge/br_private_stp.h +++ b/net/bridge/br_private_stp.h | |||
@@ -4,8 +4,6 @@ | |||
4 | * Authors: | 4 | * Authors: |
5 | * Lennert Buytenhek <buytenh@gnu.org> | 5 | * Lennert Buytenhek <buytenh@gnu.org> |
6 | * | 6 | * |
7 | * $Id: br_private_stp.h,v 1.3 2001/02/05 06:03:47 davem Exp $ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
11 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 9e96ffcd29a3..921bbe5cb94a 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_stp.c,v 1.4 2000/06/19 10:13:35 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index ddeb6e5d45d6..8b200f96f722 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_stp_bpdu.c,v 1.3 2001/11/10 02:35:25 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -20,6 +18,7 @@ | |||
20 | #include <net/net_namespace.h> | 18 | #include <net/net_namespace.h> |
21 | #include <net/llc.h> | 19 | #include <net/llc.h> |
22 | #include <net/llc_pdu.h> | 20 | #include <net/llc_pdu.h> |
21 | #include <net/stp.h> | ||
23 | #include <asm/unaligned.h> | 22 | #include <asm/unaligned.h> |
24 | 23 | ||
25 | #include "br_private.h" | 24 | #include "br_private.h" |
@@ -133,26 +132,20 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) | |||
133 | * | 132 | * |
134 | * NO locks, but rcu_read_lock (preempt_disabled) | 133 | * NO locks, but rcu_read_lock (preempt_disabled) |
135 | */ | 134 | */ |
136 | int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, | 135 | void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, |
137 | struct packet_type *pt, struct net_device *orig_dev) | 136 | struct net_device *dev) |
138 | { | 137 | { |
139 | const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); | ||
140 | const unsigned char *dest = eth_hdr(skb)->h_dest; | 138 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
141 | struct net_bridge_port *p = rcu_dereference(dev->br_port); | 139 | struct net_bridge_port *p = rcu_dereference(dev->br_port); |
142 | struct net_bridge *br; | 140 | struct net_bridge *br; |
143 | const unsigned char *buf; | 141 | const unsigned char *buf; |
144 | 142 | ||
145 | if (dev_net(dev) != &init_net) | 143 | if (!net_eq(dev_net(dev), &init_net)) |
146 | goto err; | 144 | goto err; |
147 | 145 | ||
148 | if (!p) | 146 | if (!p) |
149 | goto err; | 147 | goto err; |
150 | 148 | ||
151 | if (pdu->ssap != LLC_SAP_BSPAN | ||
152 | || pdu->dsap != LLC_SAP_BSPAN | ||
153 | || pdu->ctrl_1 != LLC_PDU_TYPE_U) | ||
154 | goto err; | ||
155 | |||
156 | if (!pskb_may_pull(skb, 4)) | 149 | if (!pskb_may_pull(skb, 4)) |
157 | goto err; | 150 | goto err; |
158 | 151 | ||
@@ -226,5 +219,4 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, | |||
226 | spin_unlock(&br->lock); | 219 | spin_unlock(&br->lock); |
227 | err: | 220 | err: |
228 | kfree_skb(skb); | 221 | kfree_skb(skb); |
229 | return 0; | ||
230 | } | 222 | } |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 1a430eccec9b..9a52ac5b4525 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_stp_if.c,v 1.4 2001/04/14 21:14:39 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -216,6 +214,10 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br) | |||
216 | const unsigned char *addr = br_mac_zero; | 214 | const unsigned char *addr = br_mac_zero; |
217 | struct net_bridge_port *p; | 215 | struct net_bridge_port *p; |
218 | 216 | ||
217 | /* user has chosen a value so keep it */ | ||
218 | if (br->flags & BR_SET_MAC_ADDR) | ||
219 | return; | ||
220 | |||
219 | list_for_each_entry(p, &br->port_list, list) { | 221 | list_for_each_entry(p, &br->port_list, list) { |
220 | if (addr == br_mac_zero || | 222 | if (addr == br_mac_zero || |
221 | memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) | 223 | memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) |
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 77f5255e6915..772a140bfdf0 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Lennert Buytenhek <buytenh@gnu.org> | 6 | * Lennert Buytenhek <buytenh@gnu.org> |
7 | * | 7 | * |
8 | * $Id: br_stp_timer.c,v 1.3 2000/05/05 02:17:17 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index 7beeefa0f9c0..909479794999 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig | |||
@@ -83,6 +83,15 @@ config BRIDGE_EBT_IP | |||
83 | 83 | ||
84 | To compile it as a module, choose M here. If unsure, say N. | 84 | To compile it as a module, choose M here. If unsure, say N. |
85 | 85 | ||
86 | config BRIDGE_EBT_IP6 | ||
87 | tristate "ebt: IP6 filter support" | ||
88 | depends on BRIDGE_NF_EBTABLES && IPV6 | ||
89 | help | ||
90 | This option adds the IP6 match, which allows basic IPV6 header field | ||
91 | filtering. | ||
92 | |||
93 | To compile it as a module, choose M here. If unsure, say N. | ||
94 | |||
86 | config BRIDGE_EBT_LIMIT | 95 | config BRIDGE_EBT_LIMIT |
87 | tristate "ebt: limit match support" | 96 | tristate "ebt: limit match support" |
88 | depends on BRIDGE_NF_EBTABLES | 97 | depends on BRIDGE_NF_EBTABLES |
@@ -221,7 +230,7 @@ config BRIDGE_EBT_NFLOG | |||
221 | either the old LOG target, the old ULOG target or nfnetlink_log | 230 | either the old LOG target, the old ULOG target or nfnetlink_log |
222 | as backend. | 231 | as backend. |
223 | 232 | ||
224 | This option adds the ulog watcher, that you can use in any rule | 233 | This option adds the nflog watcher, that you can use in any rule |
225 | in any ebtables table. | 234 | in any ebtables table. |
226 | 235 | ||
227 | To compile it as a module, choose M here. If unsure, say N. | 236 | To compile it as a module, choose M here. If unsure, say N. |
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile index 83715d73a503..0718699540b0 100644 --- a/net/bridge/netfilter/Makefile +++ b/net/bridge/netfilter/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o | |||
14 | obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o | 14 | obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o |
15 | obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o | 15 | obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o |
16 | obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o | 16 | obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o |
17 | obj-$(CONFIG_BRIDGE_EBT_IP6) += ebt_ip6.o | ||
17 | obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o | 18 | obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o |
18 | obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o | 19 | obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o |
19 | obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o | 20 | obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o |
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c new file mode 100644 index 000000000000..36efb3a75249 --- /dev/null +++ b/net/bridge/netfilter/ebt_ip6.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * ebt_ip6 | ||
3 | * | ||
4 | * Authors: | ||
5 | * Manohar Castelino <manohar.r.castelino@intel.com> | ||
6 | * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> | ||
7 | * Jan Engelhardt <jengelh@computergmbh.de> | ||
8 | * | ||
9 | * Summary: | ||
10 | * This is just a modification of the IPv4 code written by | ||
11 | * Bart De Schuymer <bdschuym@pandora.be> | ||
12 | * with the changes required to support IPv6 | ||
13 | * | ||
14 | * Jan, 2008 | ||
15 | */ | ||
16 | |||
17 | #include <linux/netfilter_bridge/ebtables.h> | ||
18 | #include <linux/netfilter_bridge/ebt_ip6.h> | ||
19 | #include <linux/ipv6.h> | ||
20 | #include <net/ipv6.h> | ||
21 | #include <linux/in.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <net/dsfield.h> | ||
24 | |||
25 | struct tcpudphdr { | ||
26 | __be16 src; | ||
27 | __be16 dst; | ||
28 | }; | ||
29 | |||
30 | static int ebt_filter_ip6(const struct sk_buff *skb, | ||
31 | const struct net_device *in, | ||
32 | const struct net_device *out, const void *data, | ||
33 | unsigned int datalen) | ||
34 | { | ||
35 | const struct ebt_ip6_info *info = (struct ebt_ip6_info *)data; | ||
36 | const struct ipv6hdr *ih6; | ||
37 | struct ipv6hdr _ip6h; | ||
38 | const struct tcpudphdr *pptr; | ||
39 | struct tcpudphdr _ports; | ||
40 | struct in6_addr tmp_addr; | ||
41 | int i; | ||
42 | |||
43 | ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); | ||
44 | if (ih6 == NULL) | ||
45 | return EBT_NOMATCH; | ||
46 | if (info->bitmask & EBT_IP6_TCLASS && | ||
47 | FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) | ||
48 | return EBT_NOMATCH; | ||
49 | for (i = 0; i < 4; i++) | ||
50 | tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] & | ||
51 | info->smsk.in6_u.u6_addr32[i]; | ||
52 | if (info->bitmask & EBT_IP6_SOURCE && | ||
53 | FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0), | ||
54 | EBT_IP6_SOURCE)) | ||
55 | return EBT_NOMATCH; | ||
56 | for (i = 0; i < 4; i++) | ||
57 | tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] & | ||
58 | info->dmsk.in6_u.u6_addr32[i]; | ||
59 | if (info->bitmask & EBT_IP6_DEST && | ||
60 | FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST)) | ||
61 | return EBT_NOMATCH; | ||
62 | if (info->bitmask & EBT_IP6_PROTO) { | ||
63 | uint8_t nexthdr = ih6->nexthdr; | ||
64 | int offset_ph; | ||
65 | |||
66 | offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr); | ||
67 | if (offset_ph == -1) | ||
68 | return EBT_NOMATCH; | ||
69 | if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) | ||
70 | return EBT_NOMATCH; | ||
71 | if (!(info->bitmask & EBT_IP6_DPORT) && | ||
72 | !(info->bitmask & EBT_IP6_SPORT)) | ||
73 | return EBT_MATCH; | ||
74 | pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports), | ||
75 | &_ports); | ||
76 | if (pptr == NULL) | ||
77 | return EBT_NOMATCH; | ||
78 | if (info->bitmask & EBT_IP6_DPORT) { | ||
79 | u32 dst = ntohs(pptr->dst); | ||
80 | if (FWINV(dst < info->dport[0] || | ||
81 | dst > info->dport[1], EBT_IP6_DPORT)) | ||
82 | return EBT_NOMATCH; | ||
83 | } | ||
84 | if (info->bitmask & EBT_IP6_SPORT) { | ||
85 | u32 src = ntohs(pptr->src); | ||
86 | if (FWINV(src < info->sport[0] || | ||
87 | src > info->sport[1], EBT_IP6_SPORT)) | ||
88 | return EBT_NOMATCH; | ||
89 | } | ||
90 | return EBT_MATCH; | ||
91 | } | ||
92 | return EBT_MATCH; | ||
93 | } | ||
94 | |||
95 | static int ebt_ip6_check(const char *tablename, unsigned int hookmask, | ||
96 | const struct ebt_entry *e, void *data, unsigned int datalen) | ||
97 | { | ||
98 | struct ebt_ip6_info *info = (struct ebt_ip6_info *)data; | ||
99 | |||
100 | if (datalen != EBT_ALIGN(sizeof(struct ebt_ip6_info))) | ||
101 | return -EINVAL; | ||
102 | if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO) | ||
103 | return -EINVAL; | ||
104 | if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK) | ||
105 | return -EINVAL; | ||
106 | if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) { | ||
107 | if (info->invflags & EBT_IP6_PROTO) | ||
108 | return -EINVAL; | ||
109 | if (info->protocol != IPPROTO_TCP && | ||
110 | info->protocol != IPPROTO_UDP && | ||
111 | info->protocol != IPPROTO_UDPLITE && | ||
112 | info->protocol != IPPROTO_SCTP && | ||
113 | info->protocol != IPPROTO_DCCP) | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1]) | ||
117 | return -EINVAL; | ||
118 | if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) | ||
119 | return -EINVAL; | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static struct ebt_match filter_ip6 = | ||
124 | { | ||
125 | .name = EBT_IP6_MATCH, | ||
126 | .match = ebt_filter_ip6, | ||
127 | .check = ebt_ip6_check, | ||
128 | .me = THIS_MODULE, | ||
129 | }; | ||
130 | |||
131 | static int __init ebt_ip6_init(void) | ||
132 | { | ||
133 | return ebt_register_match(&filter_ip6); | ||
134 | } | ||
135 | |||
136 | static void __exit ebt_ip6_fini(void) | ||
137 | { | ||
138 | ebt_unregister_match(&filter_ip6); | ||
139 | } | ||
140 | |||
141 | module_init(ebt_ip6_init); | ||
142 | module_exit(ebt_ip6_fini); | ||
143 | MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match"); | ||
144 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 0b209e4aad0a..2f430d4ae911 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <linux/if_arp.h> | 18 | #include <linux/if_arp.h> |
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <net/netfilter/nf_log.h> | 20 | #include <net/netfilter/nf_log.h> |
21 | #include <linux/ipv6.h> | ||
22 | #include <net/ipv6.h> | ||
23 | #include <linux/in6.h> | ||
21 | 24 | ||
22 | static DEFINE_SPINLOCK(ebt_log_lock); | 25 | static DEFINE_SPINLOCK(ebt_log_lock); |
23 | 26 | ||
@@ -58,6 +61,27 @@ static void print_MAC(const unsigned char *p) | |||
58 | printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); | 61 | printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); |
59 | } | 62 | } |
60 | 63 | ||
64 | static void | ||
65 | print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) | ||
66 | { | ||
67 | if (protocol == IPPROTO_TCP || | ||
68 | protocol == IPPROTO_UDP || | ||
69 | protocol == IPPROTO_UDPLITE || | ||
70 | protocol == IPPROTO_SCTP || | ||
71 | protocol == IPPROTO_DCCP) { | ||
72 | const struct tcpudphdr *pptr; | ||
73 | struct tcpudphdr _ports; | ||
74 | |||
75 | pptr = skb_header_pointer(skb, offset, | ||
76 | sizeof(_ports), &_ports); | ||
77 | if (pptr == NULL) { | ||
78 | printk(" INCOMPLETE TCP/UDP header"); | ||
79 | return; | ||
80 | } | ||
81 | printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); | ||
82 | } | ||
83 | } | ||
84 | |||
61 | #define myNIPQUAD(a) a[0], a[1], a[2], a[3] | 85 | #define myNIPQUAD(a) a[0], a[1], a[2], a[3] |
62 | static void | 86 | static void |
63 | ebt_log_packet(unsigned int pf, unsigned int hooknum, | 87 | ebt_log_packet(unsigned int pf, unsigned int hooknum, |
@@ -95,25 +119,35 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum, | |||
95 | printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP " | 119 | printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP " |
96 | "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), | 120 | "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), |
97 | NIPQUAD(ih->daddr), ih->tos, ih->protocol); | 121 | NIPQUAD(ih->daddr), ih->tos, ih->protocol); |
98 | if (ih->protocol == IPPROTO_TCP || | 122 | print_ports(skb, ih->protocol, ih->ihl*4); |
99 | ih->protocol == IPPROTO_UDP || | 123 | goto out; |
100 | ih->protocol == IPPROTO_UDPLITE || | 124 | } |
101 | ih->protocol == IPPROTO_SCTP || | 125 | |
102 | ih->protocol == IPPROTO_DCCP) { | 126 | #if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE) |
103 | const struct tcpudphdr *pptr; | 127 | if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto == |
104 | struct tcpudphdr _ports; | 128 | htons(ETH_P_IPV6)) { |
105 | 129 | const struct ipv6hdr *ih; | |
106 | pptr = skb_header_pointer(skb, ih->ihl*4, | 130 | struct ipv6hdr _iph; |
107 | sizeof(_ports), &_ports); | 131 | uint8_t nexthdr; |
108 | if (pptr == NULL) { | 132 | int offset_ph; |
109 | printk(" INCOMPLETE TCP/UDP header"); | 133 | |
110 | goto out; | 134 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); |
111 | } | 135 | if (ih == NULL) { |
112 | printk(" SPT=%u DPT=%u", ntohs(pptr->src), | 136 | printk(" INCOMPLETE IPv6 header"); |
113 | ntohs(pptr->dst)); | 137 | goto out; |
114 | } | 138 | } |
139 | printk(" IPv6 SRC=%x:%x:%x:%x:%x:%x:%x:%x " | ||
140 | "IPv6 DST=%x:%x:%x:%x:%x:%x:%x:%x, IPv6 " | ||
141 | "priority=0x%01X, Next Header=%d", NIP6(ih->saddr), | ||
142 | NIP6(ih->daddr), ih->priority, ih->nexthdr); | ||
143 | nexthdr = ih->nexthdr; | ||
144 | offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr); | ||
145 | if (offset_ph == -1) | ||
146 | goto out; | ||
147 | print_ports(skb, nexthdr, offset_ph); | ||
115 | goto out; | 148 | goto out; |
116 | } | 149 | } |
150 | #endif | ||
117 | 151 | ||
118 | if ((bitmask & EBT_LOG_ARP) && | 152 | if ((bitmask & EBT_LOG_ARP) && |
119 | ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || | 153 | ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 484bbf6dd032..8035fbf526ae 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -615,7 +615,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, | |||
615 | struct can_frame *cf = (struct can_frame *)skb->data; | 615 | struct can_frame *cf = (struct can_frame *)skb->data; |
616 | int matches; | 616 | int matches; |
617 | 617 | ||
618 | if (dev->type != ARPHRD_CAN || dev_net(dev) != &init_net) { | 618 | if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) { |
619 | kfree_skb(skb); | 619 | kfree_skb(skb); |
620 | return 0; | 620 | return 0; |
621 | } | 621 | } |
@@ -728,7 +728,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
728 | struct net_device *dev = (struct net_device *)data; | 728 | struct net_device *dev = (struct net_device *)data; |
729 | struct dev_rcv_lists *d; | 729 | struct dev_rcv_lists *d; |
730 | 730 | ||
731 | if (dev_net(dev) != &init_net) | 731 | if (!net_eq(dev_net(dev), &init_net)) |
732 | return NOTIFY_DONE; | 732 | return NOTIFY_DONE; |
733 | 733 | ||
734 | if (dev->type != ARPHRD_CAN) | 734 | if (dev->type != ARPHRD_CAN) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 72c2ce904f83..d0dd382001e2 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1303,7 +1303,7 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, | |||
1303 | struct bcm_op *op; | 1303 | struct bcm_op *op; |
1304 | int notify_enodev = 0; | 1304 | int notify_enodev = 0; |
1305 | 1305 | ||
1306 | if (dev_net(dev) != &init_net) | 1306 | if (!net_eq(dev_net(dev), &init_net)) |
1307 | return NOTIFY_DONE; | 1307 | return NOTIFY_DONE; |
1308 | 1308 | ||
1309 | if (dev->type != ARPHRD_CAN) | 1309 | if (dev->type != ARPHRD_CAN) |
diff --git a/net/can/raw.c b/net/can/raw.c index 3e46ee36a1aa..6e0663faaf9f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -210,7 +210,7 @@ static int raw_notifier(struct notifier_block *nb, | |||
210 | struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); | 210 | struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); |
211 | struct sock *sk = &ro->sk; | 211 | struct sock *sk = &ro->sk; |
212 | 212 | ||
213 | if (dev_net(dev) != &init_net) | 213 | if (!net_eq(dev_net(dev), &init_net)) |
214 | return NOTIFY_DONE; | 214 | return NOTIFY_DONE; |
215 | 215 | ||
216 | if (dev->type != ARPHRD_CAN) | 216 | if (dev->type != ARPHRD_CAN) |
diff --git a/net/compat.c b/net/compat.c index c823f6f290cb..6e1b03b51933 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -75,7 +75,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) | |||
75 | 75 | ||
76 | /* I've named the args so it is easy to tell whose space the pointers are in. */ | 76 | /* I've named the args so it is easy to tell whose space the pointers are in. */ |
77 | int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, | 77 | int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, |
78 | char *kern_address, int mode) | 78 | struct sockaddr *kern_address, int mode) |
79 | { | 79 | { |
80 | int tot_len; | 80 | int tot_len; |
81 | 81 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 821cb1628e5e..2eed17bcb2dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -90,6 +90,7 @@ | |||
90 | #include <linux/if_ether.h> | 90 | #include <linux/if_ether.h> |
91 | #include <linux/netdevice.h> | 91 | #include <linux/netdevice.h> |
92 | #include <linux/etherdevice.h> | 92 | #include <linux/etherdevice.h> |
93 | #include <linux/ethtool.h> | ||
93 | #include <linux/notifier.h> | 94 | #include <linux/notifier.h> |
94 | #include <linux/skbuff.h> | 95 | #include <linux/skbuff.h> |
95 | #include <net/net_namespace.h> | 96 | #include <net/net_namespace.h> |
@@ -120,6 +121,9 @@ | |||
120 | #include <linux/ctype.h> | 121 | #include <linux/ctype.h> |
121 | #include <linux/if_arp.h> | 122 | #include <linux/if_arp.h> |
122 | #include <linux/if_vlan.h> | 123 | #include <linux/if_vlan.h> |
124 | #include <linux/ip.h> | ||
125 | #include <linux/ipv6.h> | ||
126 | #include <linux/in.h> | ||
123 | 127 | ||
124 | #include "net-sysfs.h" | 128 | #include "net-sysfs.h" |
125 | 129 | ||
@@ -257,7 +261,7 @@ DEFINE_PER_CPU(struct softnet_data, softnet_data); | |||
257 | 261 | ||
258 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 262 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
259 | /* | 263 | /* |
260 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class | 264 | * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
261 | * according to dev->type | 265 | * according to dev->type |
262 | */ | 266 | */ |
263 | static const unsigned short netdev_lock_type[] = | 267 | static const unsigned short netdev_lock_type[] = |
@@ -961,6 +965,12 @@ void netdev_state_change(struct net_device *dev) | |||
961 | } | 965 | } |
962 | } | 966 | } |
963 | 967 | ||
968 | void netdev_bonding_change(struct net_device *dev) | ||
969 | { | ||
970 | call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev); | ||
971 | } | ||
972 | EXPORT_SYMBOL(netdev_bonding_change); | ||
973 | |||
964 | /** | 974 | /** |
965 | * dev_load - load a network module | 975 | * dev_load - load a network module |
966 | * @net: the applicable net namespace | 976 | * @net: the applicable net namespace |
@@ -1117,6 +1127,29 @@ int dev_close(struct net_device *dev) | |||
1117 | } | 1127 | } |
1118 | 1128 | ||
1119 | 1129 | ||
1130 | /** | ||
1131 | * dev_disable_lro - disable Large Receive Offload on a device | ||
1132 | * @dev: device | ||
1133 | * | ||
1134 | * Disable Large Receive Offload (LRO) on a net device. Must be | ||
1135 | * called under RTNL. This is needed if received packets may be | ||
1136 | * forwarded to another interface. | ||
1137 | */ | ||
1138 | void dev_disable_lro(struct net_device *dev) | ||
1139 | { | ||
1140 | if (dev->ethtool_ops && dev->ethtool_ops->get_flags && | ||
1141 | dev->ethtool_ops->set_flags) { | ||
1142 | u32 flags = dev->ethtool_ops->get_flags(dev); | ||
1143 | if (flags & ETH_FLAG_LRO) { | ||
1144 | flags &= ~ETH_FLAG_LRO; | ||
1145 | dev->ethtool_ops->set_flags(dev, flags); | ||
1146 | } | ||
1147 | } | ||
1148 | WARN_ON(dev->features & NETIF_F_LRO); | ||
1149 | } | ||
1150 | EXPORT_SYMBOL(dev_disable_lro); | ||
1151 | |||
1152 | |||
1120 | static int dev_boot_phase = 1; | 1153 | static int dev_boot_phase = 1; |
1121 | 1154 | ||
1122 | /* | 1155 | /* |
@@ -1290,16 +1323,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1290 | } | 1323 | } |
1291 | 1324 | ||
1292 | 1325 | ||
1293 | void __netif_schedule(struct net_device *dev) | 1326 | void __netif_schedule(struct Qdisc *q) |
1294 | { | 1327 | { |
1295 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { | 1328 | BUG_ON(q == &noop_qdisc); |
1296 | unsigned long flags; | 1329 | |
1330 | if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { | ||
1297 | struct softnet_data *sd; | 1331 | struct softnet_data *sd; |
1332 | unsigned long flags; | ||
1298 | 1333 | ||
1299 | local_irq_save(flags); | 1334 | local_irq_save(flags); |
1300 | sd = &__get_cpu_var(softnet_data); | 1335 | sd = &__get_cpu_var(softnet_data); |
1301 | dev->next_sched = sd->output_queue; | 1336 | q->next_sched = sd->output_queue; |
1302 | sd->output_queue = dev; | 1337 | sd->output_queue = q; |
1303 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 1338 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1304 | local_irq_restore(flags); | 1339 | local_irq_restore(flags); |
1305 | } | 1340 | } |
@@ -1566,7 +1601,8 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1566 | return 0; | 1601 | return 0; |
1567 | } | 1602 | } |
1568 | 1603 | ||
1569 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1604 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
1605 | struct netdev_queue *txq) | ||
1570 | { | 1606 | { |
1571 | if (likely(!skb->next)) { | 1607 | if (likely(!skb->next)) { |
1572 | if (!list_empty(&ptype_all)) | 1608 | if (!list_empty(&ptype_all)) |
@@ -1595,9 +1631,7 @@ gso: | |||
1595 | skb->next = nskb; | 1631 | skb->next = nskb; |
1596 | return rc; | 1632 | return rc; |
1597 | } | 1633 | } |
1598 | if (unlikely((netif_queue_stopped(dev) || | 1634 | if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) |
1599 | netif_subqueue_stopped(dev, skb)) && | ||
1600 | skb->next)) | ||
1601 | return NETDEV_TX_BUSY; | 1635 | return NETDEV_TX_BUSY; |
1602 | } while (skb->next); | 1636 | } while (skb->next); |
1603 | 1637 | ||
@@ -1634,9 +1668,71 @@ out_kfree_skb: | |||
1634 | * --BLG | 1668 | * --BLG |
1635 | */ | 1669 | */ |
1636 | 1670 | ||
1671 | static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) | ||
1672 | { | ||
1673 | u32 *addr, *ports, hash, ihl; | ||
1674 | u8 ip_proto; | ||
1675 | int alen; | ||
1676 | |||
1677 | switch (skb->protocol) { | ||
1678 | case __constant_htons(ETH_P_IP): | ||
1679 | ip_proto = ip_hdr(skb)->protocol; | ||
1680 | addr = &ip_hdr(skb)->saddr; | ||
1681 | ihl = ip_hdr(skb)->ihl; | ||
1682 | alen = 2; | ||
1683 | break; | ||
1684 | case __constant_htons(ETH_P_IPV6): | ||
1685 | ip_proto = ipv6_hdr(skb)->nexthdr; | ||
1686 | addr = &ipv6_hdr(skb)->saddr.s6_addr32[0]; | ||
1687 | ihl = (40 >> 2); | ||
1688 | alen = 8; | ||
1689 | break; | ||
1690 | default: | ||
1691 | return 0; | ||
1692 | } | ||
1693 | |||
1694 | ports = (u32 *) (skb_network_header(skb) + (ihl * 4)); | ||
1695 | |||
1696 | hash = 0; | ||
1697 | while (alen--) | ||
1698 | hash ^= *addr++; | ||
1699 | |||
1700 | switch (ip_proto) { | ||
1701 | case IPPROTO_TCP: | ||
1702 | case IPPROTO_UDP: | ||
1703 | case IPPROTO_DCCP: | ||
1704 | case IPPROTO_ESP: | ||
1705 | case IPPROTO_AH: | ||
1706 | case IPPROTO_SCTP: | ||
1707 | case IPPROTO_UDPLITE: | ||
1708 | hash ^= *ports; | ||
1709 | break; | ||
1710 | |||
1711 | default: | ||
1712 | break; | ||
1713 | } | ||
1714 | |||
1715 | return hash % dev->real_num_tx_queues; | ||
1716 | } | ||
1717 | |||
1718 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | ||
1719 | struct sk_buff *skb) | ||
1720 | { | ||
1721 | u16 queue_index = 0; | ||
1722 | |||
1723 | if (dev->select_queue) | ||
1724 | queue_index = dev->select_queue(dev, skb); | ||
1725 | else if (dev->real_num_tx_queues > 1) | ||
1726 | queue_index = simple_tx_hash(dev, skb); | ||
1727 | |||
1728 | skb_set_queue_mapping(skb, queue_index); | ||
1729 | return netdev_get_tx_queue(dev, queue_index); | ||
1730 | } | ||
1731 | |||
1637 | int dev_queue_xmit(struct sk_buff *skb) | 1732 | int dev_queue_xmit(struct sk_buff *skb) |
1638 | { | 1733 | { |
1639 | struct net_device *dev = skb->dev; | 1734 | struct net_device *dev = skb->dev; |
1735 | struct netdev_queue *txq; | ||
1640 | struct Qdisc *q; | 1736 | struct Qdisc *q; |
1641 | int rc = -ENOMEM; | 1737 | int rc = -ENOMEM; |
1642 | 1738 | ||
@@ -1669,44 +1765,29 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1669 | } | 1765 | } |
1670 | 1766 | ||
1671 | gso: | 1767 | gso: |
1672 | spin_lock_prefetch(&dev->queue_lock); | ||
1673 | |||
1674 | /* Disable soft irqs for various locks below. Also | 1768 | /* Disable soft irqs for various locks below. Also |
1675 | * stops preemption for RCU. | 1769 | * stops preemption for RCU. |
1676 | */ | 1770 | */ |
1677 | rcu_read_lock_bh(); | 1771 | rcu_read_lock_bh(); |
1678 | 1772 | ||
1679 | /* Updates of qdisc are serialized by queue_lock. | 1773 | txq = dev_pick_tx(dev, skb); |
1680 | * The struct Qdisc which is pointed to by qdisc is now a | 1774 | q = rcu_dereference(txq->qdisc); |
1681 | * rcu structure - it may be accessed without acquiring | ||
1682 | * a lock (but the structure may be stale.) The freeing of the | ||
1683 | * qdisc will be deferred until it's known that there are no | ||
1684 | * more references to it. | ||
1685 | * | ||
1686 | * If the qdisc has an enqueue function, we still need to | ||
1687 | * hold the queue_lock before calling it, since queue_lock | ||
1688 | * also serializes access to the device queue. | ||
1689 | */ | ||
1690 | 1775 | ||
1691 | q = rcu_dereference(dev->qdisc); | ||
1692 | #ifdef CONFIG_NET_CLS_ACT | 1776 | #ifdef CONFIG_NET_CLS_ACT |
1693 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | 1777 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); |
1694 | #endif | 1778 | #endif |
1695 | if (q->enqueue) { | 1779 | if (q->enqueue) { |
1696 | /* Grab device queue */ | 1780 | spinlock_t *root_lock = qdisc_root_lock(q); |
1697 | spin_lock(&dev->queue_lock); | 1781 | |
1698 | q = dev->qdisc; | 1782 | spin_lock(root_lock); |
1699 | if (q->enqueue) { | 1783 | |
1700 | /* reset queue_mapping to zero */ | 1784 | rc = qdisc_enqueue_root(skb, q); |
1701 | skb_set_queue_mapping(skb, 0); | 1785 | qdisc_run(q); |
1702 | rc = q->enqueue(skb, q); | 1786 | |
1703 | qdisc_run(dev); | 1787 | spin_unlock(root_lock); |
1704 | spin_unlock(&dev->queue_lock); | 1788 | |
1705 | 1789 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; | |
1706 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; | 1790 | goto out; |
1707 | goto out; | ||
1708 | } | ||
1709 | spin_unlock(&dev->queue_lock); | ||
1710 | } | 1791 | } |
1711 | 1792 | ||
1712 | /* The device has no queue. Common case for software devices: | 1793 | /* The device has no queue. Common case for software devices: |
@@ -1724,19 +1805,18 @@ gso: | |||
1724 | if (dev->flags & IFF_UP) { | 1805 | if (dev->flags & IFF_UP) { |
1725 | int cpu = smp_processor_id(); /* ok because BHs are off */ | 1806 | int cpu = smp_processor_id(); /* ok because BHs are off */ |
1726 | 1807 | ||
1727 | if (dev->xmit_lock_owner != cpu) { | 1808 | if (txq->xmit_lock_owner != cpu) { |
1728 | 1809 | ||
1729 | HARD_TX_LOCK(dev, cpu); | 1810 | HARD_TX_LOCK(dev, txq, cpu); |
1730 | 1811 | ||
1731 | if (!netif_queue_stopped(dev) && | 1812 | if (!netif_tx_queue_stopped(txq)) { |
1732 | !netif_subqueue_stopped(dev, skb)) { | ||
1733 | rc = 0; | 1813 | rc = 0; |
1734 | if (!dev_hard_start_xmit(skb, dev)) { | 1814 | if (!dev_hard_start_xmit(skb, dev, txq)) { |
1735 | HARD_TX_UNLOCK(dev); | 1815 | HARD_TX_UNLOCK(dev, txq); |
1736 | goto out; | 1816 | goto out; |
1737 | } | 1817 | } |
1738 | } | 1818 | } |
1739 | HARD_TX_UNLOCK(dev); | 1819 | HARD_TX_UNLOCK(dev, txq); |
1740 | if (net_ratelimit()) | 1820 | if (net_ratelimit()) |
1741 | printk(KERN_CRIT "Virtual device %s asks to " | 1821 | printk(KERN_CRIT "Virtual device %s asks to " |
1742 | "queue packet!\n", dev->name); | 1822 | "queue packet!\n", dev->name); |
@@ -1880,7 +1960,7 @@ static void net_tx_action(struct softirq_action *h) | |||
1880 | } | 1960 | } |
1881 | 1961 | ||
1882 | if (sd->output_queue) { | 1962 | if (sd->output_queue) { |
1883 | struct net_device *head; | 1963 | struct Qdisc *head; |
1884 | 1964 | ||
1885 | local_irq_disable(); | 1965 | local_irq_disable(); |
1886 | head = sd->output_queue; | 1966 | head = sd->output_queue; |
@@ -1888,17 +1968,20 @@ static void net_tx_action(struct softirq_action *h) | |||
1888 | local_irq_enable(); | 1968 | local_irq_enable(); |
1889 | 1969 | ||
1890 | while (head) { | 1970 | while (head) { |
1891 | struct net_device *dev = head; | 1971 | struct Qdisc *q = head; |
1972 | spinlock_t *root_lock; | ||
1973 | |||
1892 | head = head->next_sched; | 1974 | head = head->next_sched; |
1893 | 1975 | ||
1894 | smp_mb__before_clear_bit(); | 1976 | smp_mb__before_clear_bit(); |
1895 | clear_bit(__LINK_STATE_SCHED, &dev->state); | 1977 | clear_bit(__QDISC_STATE_SCHED, &q->state); |
1896 | 1978 | ||
1897 | if (spin_trylock(&dev->queue_lock)) { | 1979 | root_lock = qdisc_root_lock(q); |
1898 | qdisc_run(dev); | 1980 | if (spin_trylock(root_lock)) { |
1899 | spin_unlock(&dev->queue_lock); | 1981 | qdisc_run(q); |
1982 | spin_unlock(root_lock); | ||
1900 | } else { | 1983 | } else { |
1901 | netif_schedule(dev); | 1984 | __netif_schedule(q); |
1902 | } | 1985 | } |
1903 | } | 1986 | } |
1904 | } | 1987 | } |
@@ -1979,10 +2062,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | |||
1979 | */ | 2062 | */ |
1980 | static int ing_filter(struct sk_buff *skb) | 2063 | static int ing_filter(struct sk_buff *skb) |
1981 | { | 2064 | { |
1982 | struct Qdisc *q; | ||
1983 | struct net_device *dev = skb->dev; | 2065 | struct net_device *dev = skb->dev; |
1984 | int result = TC_ACT_OK; | ||
1985 | u32 ttl = G_TC_RTTL(skb->tc_verd); | 2066 | u32 ttl = G_TC_RTTL(skb->tc_verd); |
2067 | struct netdev_queue *rxq; | ||
2068 | int result = TC_ACT_OK; | ||
2069 | struct Qdisc *q; | ||
1986 | 2070 | ||
1987 | if (MAX_RED_LOOP < ttl++) { | 2071 | if (MAX_RED_LOOP < ttl++) { |
1988 | printk(KERN_WARNING | 2072 | printk(KERN_WARNING |
@@ -1994,10 +2078,14 @@ static int ing_filter(struct sk_buff *skb) | |||
1994 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 2078 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
1995 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 2079 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); |
1996 | 2080 | ||
1997 | spin_lock(&dev->ingress_lock); | 2081 | rxq = &dev->rx_queue; |
1998 | if ((q = dev->qdisc_ingress) != NULL) | 2082 | |
1999 | result = q->enqueue(skb, q); | 2083 | q = rxq->qdisc; |
2000 | spin_unlock(&dev->ingress_lock); | 2084 | if (q) { |
2085 | spin_lock(qdisc_lock(q)); | ||
2086 | result = qdisc_enqueue_root(skb, q); | ||
2087 | spin_unlock(qdisc_lock(q)); | ||
2088 | } | ||
2001 | 2089 | ||
2002 | return result; | 2090 | return result; |
2003 | } | 2091 | } |
@@ -2006,7 +2094,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2006 | struct packet_type **pt_prev, | 2094 | struct packet_type **pt_prev, |
2007 | int *ret, struct net_device *orig_dev) | 2095 | int *ret, struct net_device *orig_dev) |
2008 | { | 2096 | { |
2009 | if (!skb->dev->qdisc_ingress) | 2097 | if (!skb->dev->rx_queue.qdisc) |
2010 | goto out; | 2098 | goto out; |
2011 | 2099 | ||
2012 | if (*pt_prev) { | 2100 | if (*pt_prev) { |
@@ -2030,6 +2118,33 @@ out: | |||
2030 | } | 2118 | } |
2031 | #endif | 2119 | #endif |
2032 | 2120 | ||
2121 | /* | ||
2122 | * netif_nit_deliver - deliver received packets to network taps | ||
2123 | * @skb: buffer | ||
2124 | * | ||
2125 | * This function is used to deliver incoming packets to network | ||
2126 | * taps. It should be used when the normal netif_receive_skb path | ||
2127 | * is bypassed, for example because of VLAN acceleration. | ||
2128 | */ | ||
2129 | void netif_nit_deliver(struct sk_buff *skb) | ||
2130 | { | ||
2131 | struct packet_type *ptype; | ||
2132 | |||
2133 | if (list_empty(&ptype_all)) | ||
2134 | return; | ||
2135 | |||
2136 | skb_reset_network_header(skb); | ||
2137 | skb_reset_transport_header(skb); | ||
2138 | skb->mac_len = skb->network_header - skb->mac_header; | ||
2139 | |||
2140 | rcu_read_lock(); | ||
2141 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | ||
2142 | if (!ptype->dev || ptype->dev == skb->dev) | ||
2143 | deliver_skb(skb, ptype, skb->dev); | ||
2144 | } | ||
2145 | rcu_read_unlock(); | ||
2146 | } | ||
2147 | |||
2033 | /** | 2148 | /** |
2034 | * netif_receive_skb - process receive buffer from network | 2149 | * netif_receive_skb - process receive buffer from network |
2035 | * @skb: buffer to process | 2150 | * @skb: buffer to process |
@@ -2769,16 +2884,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
2769 | return 0; | 2884 | return 0; |
2770 | } | 2885 | } |
2771 | 2886 | ||
2772 | static void __dev_set_promiscuity(struct net_device *dev, int inc) | 2887 | static int __dev_set_promiscuity(struct net_device *dev, int inc) |
2773 | { | 2888 | { |
2774 | unsigned short old_flags = dev->flags; | 2889 | unsigned short old_flags = dev->flags; |
2775 | 2890 | ||
2776 | ASSERT_RTNL(); | 2891 | ASSERT_RTNL(); |
2777 | 2892 | ||
2778 | if ((dev->promiscuity += inc) == 0) | 2893 | dev->flags |= IFF_PROMISC; |
2779 | dev->flags &= ~IFF_PROMISC; | 2894 | dev->promiscuity += inc; |
2780 | else | 2895 | if (dev->promiscuity == 0) { |
2781 | dev->flags |= IFF_PROMISC; | 2896 | /* |
2897 | * Avoid overflow. | ||
2898 | * If inc causes overflow, untouch promisc and return error. | ||
2899 | */ | ||
2900 | if (inc < 0) | ||
2901 | dev->flags &= ~IFF_PROMISC; | ||
2902 | else { | ||
2903 | dev->promiscuity -= inc; | ||
2904 | printk(KERN_WARNING "%s: promiscuity touches roof, " | ||
2905 | "set promiscuity failed, promiscuity feature " | ||
2906 | "of device might be broken.\n", dev->name); | ||
2907 | return -EOVERFLOW; | ||
2908 | } | ||
2909 | } | ||
2782 | if (dev->flags != old_flags) { | 2910 | if (dev->flags != old_flags) { |
2783 | printk(KERN_INFO "device %s %s promiscuous mode\n", | 2911 | printk(KERN_INFO "device %s %s promiscuous mode\n", |
2784 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : | 2912 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : |
@@ -2796,6 +2924,7 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc) | |||
2796 | if (dev->change_rx_flags) | 2924 | if (dev->change_rx_flags) |
2797 | dev->change_rx_flags(dev, IFF_PROMISC); | 2925 | dev->change_rx_flags(dev, IFF_PROMISC); |
2798 | } | 2926 | } |
2927 | return 0; | ||
2799 | } | 2928 | } |
2800 | 2929 | ||
2801 | /** | 2930 | /** |
@@ -2807,14 +2936,19 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc) | |||
2807 | * remains above zero the interface remains promiscuous. Once it hits zero | 2936 | * remains above zero the interface remains promiscuous. Once it hits zero |
2808 | * the device reverts back to normal filtering operation. A negative inc | 2937 | * the device reverts back to normal filtering operation. A negative inc |
2809 | * value is used to drop promiscuity on the device. | 2938 | * value is used to drop promiscuity on the device. |
2939 | * Return 0 if successful or a negative errno code on error. | ||
2810 | */ | 2940 | */ |
2811 | void dev_set_promiscuity(struct net_device *dev, int inc) | 2941 | int dev_set_promiscuity(struct net_device *dev, int inc) |
2812 | { | 2942 | { |
2813 | unsigned short old_flags = dev->flags; | 2943 | unsigned short old_flags = dev->flags; |
2944 | int err; | ||
2814 | 2945 | ||
2815 | __dev_set_promiscuity(dev, inc); | 2946 | err = __dev_set_promiscuity(dev, inc); |
2947 | if (err < 0) | ||
2948 | return err; | ||
2816 | if (dev->flags != old_flags) | 2949 | if (dev->flags != old_flags) |
2817 | dev_set_rx_mode(dev); | 2950 | dev_set_rx_mode(dev); |
2951 | return err; | ||
2818 | } | 2952 | } |
2819 | 2953 | ||
2820 | /** | 2954 | /** |
@@ -2827,22 +2961,38 @@ void dev_set_promiscuity(struct net_device *dev, int inc) | |||
2827 | * to all interfaces. Once it hits zero the device reverts back to normal | 2961 | * to all interfaces. Once it hits zero the device reverts back to normal |
2828 | * filtering operation. A negative @inc value is used to drop the counter | 2962 | * filtering operation. A negative @inc value is used to drop the counter |
2829 | * when releasing a resource needing all multicasts. | 2963 | * when releasing a resource needing all multicasts. |
2964 | * Return 0 if successful or a negative errno code on error. | ||
2830 | */ | 2965 | */ |
2831 | 2966 | ||
2832 | void dev_set_allmulti(struct net_device *dev, int inc) | 2967 | int dev_set_allmulti(struct net_device *dev, int inc) |
2833 | { | 2968 | { |
2834 | unsigned short old_flags = dev->flags; | 2969 | unsigned short old_flags = dev->flags; |
2835 | 2970 | ||
2836 | ASSERT_RTNL(); | 2971 | ASSERT_RTNL(); |
2837 | 2972 | ||
2838 | dev->flags |= IFF_ALLMULTI; | 2973 | dev->flags |= IFF_ALLMULTI; |
2839 | if ((dev->allmulti += inc) == 0) | 2974 | dev->allmulti += inc; |
2840 | dev->flags &= ~IFF_ALLMULTI; | 2975 | if (dev->allmulti == 0) { |
2976 | /* | ||
2977 | * Avoid overflow. | ||
2978 | * If inc causes overflow, untouch allmulti and return error. | ||
2979 | */ | ||
2980 | if (inc < 0) | ||
2981 | dev->flags &= ~IFF_ALLMULTI; | ||
2982 | else { | ||
2983 | dev->allmulti -= inc; | ||
2984 | printk(KERN_WARNING "%s: allmulti touches roof, " | ||
2985 | "set allmulti failed, allmulti feature of " | ||
2986 | "device might be broken.\n", dev->name); | ||
2987 | return -EOVERFLOW; | ||
2988 | } | ||
2989 | } | ||
2841 | if (dev->flags ^ old_flags) { | 2990 | if (dev->flags ^ old_flags) { |
2842 | if (dev->change_rx_flags) | 2991 | if (dev->change_rx_flags) |
2843 | dev->change_rx_flags(dev, IFF_ALLMULTI); | 2992 | dev->change_rx_flags(dev, IFF_ALLMULTI); |
2844 | dev_set_rx_mode(dev); | 2993 | dev_set_rx_mode(dev); |
2845 | } | 2994 | } |
2995 | return 0; | ||
2846 | } | 2996 | } |
2847 | 2997 | ||
2848 | /* | 2998 | /* |
@@ -2881,9 +3031,9 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
2881 | 3031 | ||
2882 | void dev_set_rx_mode(struct net_device *dev) | 3032 | void dev_set_rx_mode(struct net_device *dev) |
2883 | { | 3033 | { |
2884 | netif_tx_lock_bh(dev); | 3034 | netif_addr_lock_bh(dev); |
2885 | __dev_set_rx_mode(dev); | 3035 | __dev_set_rx_mode(dev); |
2886 | netif_tx_unlock_bh(dev); | 3036 | netif_addr_unlock_bh(dev); |
2887 | } | 3037 | } |
2888 | 3038 | ||
2889 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | 3039 | int __dev_addr_delete(struct dev_addr_list **list, int *count, |
@@ -2961,11 +3111,11 @@ int dev_unicast_delete(struct net_device *dev, void *addr, int alen) | |||
2961 | 3111 | ||
2962 | ASSERT_RTNL(); | 3112 | ASSERT_RTNL(); |
2963 | 3113 | ||
2964 | netif_tx_lock_bh(dev); | 3114 | netif_addr_lock_bh(dev); |
2965 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3115 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
2966 | if (!err) | 3116 | if (!err) |
2967 | __dev_set_rx_mode(dev); | 3117 | __dev_set_rx_mode(dev); |
2968 | netif_tx_unlock_bh(dev); | 3118 | netif_addr_unlock_bh(dev); |
2969 | return err; | 3119 | return err; |
2970 | } | 3120 | } |
2971 | EXPORT_SYMBOL(dev_unicast_delete); | 3121 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -2987,11 +3137,11 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen) | |||
2987 | 3137 | ||
2988 | ASSERT_RTNL(); | 3138 | ASSERT_RTNL(); |
2989 | 3139 | ||
2990 | netif_tx_lock_bh(dev); | 3140 | netif_addr_lock_bh(dev); |
2991 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3141 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
2992 | if (!err) | 3142 | if (!err) |
2993 | __dev_set_rx_mode(dev); | 3143 | __dev_set_rx_mode(dev); |
2994 | netif_tx_unlock_bh(dev); | 3144 | netif_addr_unlock_bh(dev); |
2995 | return err; | 3145 | return err; |
2996 | } | 3146 | } |
2997 | EXPORT_SYMBOL(dev_unicast_add); | 3147 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3058,12 +3208,12 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3058 | { | 3208 | { |
3059 | int err = 0; | 3209 | int err = 0; |
3060 | 3210 | ||
3061 | netif_tx_lock_bh(to); | 3211 | netif_addr_lock_bh(to); |
3062 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, | 3212 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, |
3063 | &from->uc_list, &from->uc_count); | 3213 | &from->uc_list, &from->uc_count); |
3064 | if (!err) | 3214 | if (!err) |
3065 | __dev_set_rx_mode(to); | 3215 | __dev_set_rx_mode(to); |
3066 | netif_tx_unlock_bh(to); | 3216 | netif_addr_unlock_bh(to); |
3067 | return err; | 3217 | return err; |
3068 | } | 3218 | } |
3069 | EXPORT_SYMBOL(dev_unicast_sync); | 3219 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3079,15 +3229,15 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3079 | */ | 3229 | */ |
3080 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3230 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3081 | { | 3231 | { |
3082 | netif_tx_lock_bh(from); | 3232 | netif_addr_lock_bh(from); |
3083 | netif_tx_lock_bh(to); | 3233 | netif_addr_lock(to); |
3084 | 3234 | ||
3085 | __dev_addr_unsync(&to->uc_list, &to->uc_count, | 3235 | __dev_addr_unsync(&to->uc_list, &to->uc_count, |
3086 | &from->uc_list, &from->uc_count); | 3236 | &from->uc_list, &from->uc_count); |
3087 | __dev_set_rx_mode(to); | 3237 | __dev_set_rx_mode(to); |
3088 | 3238 | ||
3089 | netif_tx_unlock_bh(to); | 3239 | netif_addr_unlock(to); |
3090 | netif_tx_unlock_bh(from); | 3240 | netif_addr_unlock_bh(from); |
3091 | } | 3241 | } |
3092 | EXPORT_SYMBOL(dev_unicast_unsync); | 3242 | EXPORT_SYMBOL(dev_unicast_unsync); |
3093 | 3243 | ||
@@ -3107,7 +3257,7 @@ static void __dev_addr_discard(struct dev_addr_list **list) | |||
3107 | 3257 | ||
3108 | static void dev_addr_discard(struct net_device *dev) | 3258 | static void dev_addr_discard(struct net_device *dev) |
3109 | { | 3259 | { |
3110 | netif_tx_lock_bh(dev); | 3260 | netif_addr_lock_bh(dev); |
3111 | 3261 | ||
3112 | __dev_addr_discard(&dev->uc_list); | 3262 | __dev_addr_discard(&dev->uc_list); |
3113 | dev->uc_count = 0; | 3263 | dev->uc_count = 0; |
@@ -3115,7 +3265,7 @@ static void dev_addr_discard(struct net_device *dev) | |||
3115 | __dev_addr_discard(&dev->mc_list); | 3265 | __dev_addr_discard(&dev->mc_list); |
3116 | dev->mc_count = 0; | 3266 | dev->mc_count = 0; |
3117 | 3267 | ||
3118 | netif_tx_unlock_bh(dev); | 3268 | netif_addr_unlock_bh(dev); |
3119 | } | 3269 | } |
3120 | 3270 | ||
3121 | unsigned dev_get_flags(const struct net_device *dev) | 3271 | unsigned dev_get_flags(const struct net_device *dev) |
@@ -3688,6 +3838,21 @@ static void rollback_registered(struct net_device *dev) | |||
3688 | dev_put(dev); | 3838 | dev_put(dev); |
3689 | } | 3839 | } |
3690 | 3840 | ||
3841 | static void __netdev_init_queue_locks_one(struct net_device *dev, | ||
3842 | struct netdev_queue *dev_queue, | ||
3843 | void *_unused) | ||
3844 | { | ||
3845 | spin_lock_init(&dev_queue->_xmit_lock); | ||
3846 | netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); | ||
3847 | dev_queue->xmit_lock_owner = -1; | ||
3848 | } | ||
3849 | |||
3850 | static void netdev_init_queue_locks(struct net_device *dev) | ||
3851 | { | ||
3852 | netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); | ||
3853 | __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL); | ||
3854 | } | ||
3855 | |||
3691 | /** | 3856 | /** |
3692 | * register_netdevice - register a network device | 3857 | * register_netdevice - register a network device |
3693 | * @dev: device to register | 3858 | * @dev: device to register |
@@ -3722,11 +3887,8 @@ int register_netdevice(struct net_device *dev) | |||
3722 | BUG_ON(!dev_net(dev)); | 3887 | BUG_ON(!dev_net(dev)); |
3723 | net = dev_net(dev); | 3888 | net = dev_net(dev); |
3724 | 3889 | ||
3725 | spin_lock_init(&dev->queue_lock); | 3890 | spin_lock_init(&dev->addr_list_lock); |
3726 | spin_lock_init(&dev->_xmit_lock); | 3891 | netdev_init_queue_locks(dev); |
3727 | netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); | ||
3728 | dev->xmit_lock_owner = -1; | ||
3729 | spin_lock_init(&dev->ingress_lock); | ||
3730 | 3892 | ||
3731 | dev->iflink = -1; | 3893 | dev->iflink = -1; |
3732 | 3894 | ||
@@ -4007,6 +4169,19 @@ static struct net_device_stats *internal_stats(struct net_device *dev) | |||
4007 | return &dev->stats; | 4169 | return &dev->stats; |
4008 | } | 4170 | } |
4009 | 4171 | ||
4172 | static void netdev_init_one_queue(struct net_device *dev, | ||
4173 | struct netdev_queue *queue, | ||
4174 | void *_unused) | ||
4175 | { | ||
4176 | queue->dev = dev; | ||
4177 | } | ||
4178 | |||
4179 | static void netdev_init_queues(struct net_device *dev) | ||
4180 | { | ||
4181 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); | ||
4182 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | ||
4183 | } | ||
4184 | |||
4010 | /** | 4185 | /** |
4011 | * alloc_netdev_mq - allocate network device | 4186 | * alloc_netdev_mq - allocate network device |
4012 | * @sizeof_priv: size of private data to allocate space for | 4187 | * @sizeof_priv: size of private data to allocate space for |
@@ -4021,14 +4196,14 @@ static struct net_device_stats *internal_stats(struct net_device *dev) | |||
4021 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 4196 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
4022 | void (*setup)(struct net_device *), unsigned int queue_count) | 4197 | void (*setup)(struct net_device *), unsigned int queue_count) |
4023 | { | 4198 | { |
4024 | void *p; | 4199 | struct netdev_queue *tx; |
4025 | struct net_device *dev; | 4200 | struct net_device *dev; |
4026 | int alloc_size; | 4201 | int alloc_size; |
4202 | void *p; | ||
4027 | 4203 | ||
4028 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 4204 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
4029 | 4205 | ||
4030 | alloc_size = sizeof(struct net_device) + | 4206 | alloc_size = sizeof(struct net_device); |
4031 | sizeof(struct net_device_subqueue) * (queue_count - 1); | ||
4032 | if (sizeof_priv) { | 4207 | if (sizeof_priv) { |
4033 | /* ensure 32-byte alignment of private area */ | 4208 | /* ensure 32-byte alignment of private area */ |
4034 | alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 4209 | alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; |
@@ -4043,22 +4218,33 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4043 | return NULL; | 4218 | return NULL; |
4044 | } | 4219 | } |
4045 | 4220 | ||
4221 | tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); | ||
4222 | if (!tx) { | ||
4223 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
4224 | "tx qdiscs.\n"); | ||
4225 | kfree(p); | ||
4226 | return NULL; | ||
4227 | } | ||
4228 | |||
4046 | dev = (struct net_device *) | 4229 | dev = (struct net_device *) |
4047 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | 4230 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); |
4048 | dev->padded = (char *)dev - (char *)p; | 4231 | dev->padded = (char *)dev - (char *)p; |
4049 | dev_net_set(dev, &init_net); | 4232 | dev_net_set(dev, &init_net); |
4050 | 4233 | ||
4234 | dev->_tx = tx; | ||
4235 | dev->num_tx_queues = queue_count; | ||
4236 | dev->real_num_tx_queues = queue_count; | ||
4237 | |||
4051 | if (sizeof_priv) { | 4238 | if (sizeof_priv) { |
4052 | dev->priv = ((char *)dev + | 4239 | dev->priv = ((char *)dev + |
4053 | ((sizeof(struct net_device) + | 4240 | ((sizeof(struct net_device) + NETDEV_ALIGN_CONST) |
4054 | (sizeof(struct net_device_subqueue) * | ||
4055 | (queue_count - 1)) + NETDEV_ALIGN_CONST) | ||
4056 | & ~NETDEV_ALIGN_CONST)); | 4241 | & ~NETDEV_ALIGN_CONST)); |
4057 | } | 4242 | } |
4058 | 4243 | ||
4059 | dev->egress_subqueue_count = queue_count; | ||
4060 | dev->gso_max_size = GSO_MAX_SIZE; | 4244 | dev->gso_max_size = GSO_MAX_SIZE; |
4061 | 4245 | ||
4246 | netdev_init_queues(dev); | ||
4247 | |||
4062 | dev->get_stats = internal_stats; | 4248 | dev->get_stats = internal_stats; |
4063 | netpoll_netdev_init(dev); | 4249 | netpoll_netdev_init(dev); |
4064 | setup(dev); | 4250 | setup(dev); |
@@ -4079,6 +4265,8 @@ void free_netdev(struct net_device *dev) | |||
4079 | { | 4265 | { |
4080 | release_net(dev_net(dev)); | 4266 | release_net(dev_net(dev)); |
4081 | 4267 | ||
4268 | kfree(dev->_tx); | ||
4269 | |||
4082 | /* Compatibility with error handling in drivers */ | 4270 | /* Compatibility with error handling in drivers */ |
4083 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4271 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
4084 | kfree((char *)dev - dev->padded); | 4272 | kfree((char *)dev - dev->padded); |
@@ -4260,7 +4448,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4260 | void *ocpu) | 4448 | void *ocpu) |
4261 | { | 4449 | { |
4262 | struct sk_buff **list_skb; | 4450 | struct sk_buff **list_skb; |
4263 | struct net_device **list_net; | 4451 | struct Qdisc **list_net; |
4264 | struct sk_buff *skb; | 4452 | struct sk_buff *skb; |
4265 | unsigned int cpu, oldcpu = (unsigned long)ocpu; | 4453 | unsigned int cpu, oldcpu = (unsigned long)ocpu; |
4266 | struct softnet_data *sd, *oldsd; | 4454 | struct softnet_data *sd, *oldsd; |
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index f8a3455f4493..5402b3b38e0d 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -72,7 +72,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | |||
72 | { | 72 | { |
73 | int err; | 73 | int err; |
74 | 74 | ||
75 | netif_tx_lock_bh(dev); | 75 | netif_addr_lock_bh(dev); |
76 | err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, | 76 | err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, |
77 | addr, alen, glbl); | 77 | addr, alen, glbl); |
78 | if (!err) { | 78 | if (!err) { |
@@ -83,7 +83,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | |||
83 | 83 | ||
84 | __dev_set_rx_mode(dev); | 84 | __dev_set_rx_mode(dev); |
85 | } | 85 | } |
86 | netif_tx_unlock_bh(dev); | 86 | netif_addr_unlock_bh(dev); |
87 | return err; | 87 | return err; |
88 | } | 88 | } |
89 | 89 | ||
@@ -95,11 +95,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
95 | { | 95 | { |
96 | int err; | 96 | int err; |
97 | 97 | ||
98 | netif_tx_lock_bh(dev); | 98 | netif_addr_lock_bh(dev); |
99 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | 99 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); |
100 | if (!err) | 100 | if (!err) |
101 | __dev_set_rx_mode(dev); | 101 | __dev_set_rx_mode(dev); |
102 | netif_tx_unlock_bh(dev); | 102 | netif_addr_unlock_bh(dev); |
103 | return err; | 103 | return err; |
104 | } | 104 | } |
105 | 105 | ||
@@ -119,12 +119,12 @@ int dev_mc_sync(struct net_device *to, struct net_device *from) | |||
119 | { | 119 | { |
120 | int err = 0; | 120 | int err = 0; |
121 | 121 | ||
122 | netif_tx_lock_bh(to); | 122 | netif_addr_lock_bh(to); |
123 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, | 123 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, |
124 | &from->mc_list, &from->mc_count); | 124 | &from->mc_list, &from->mc_count); |
125 | if (!err) | 125 | if (!err) |
126 | __dev_set_rx_mode(to); | 126 | __dev_set_rx_mode(to); |
127 | netif_tx_unlock_bh(to); | 127 | netif_addr_unlock_bh(to); |
128 | 128 | ||
129 | return err; | 129 | return err; |
130 | } | 130 | } |
@@ -143,15 +143,15 @@ EXPORT_SYMBOL(dev_mc_sync); | |||
143 | */ | 143 | */ |
144 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | 144 | void dev_mc_unsync(struct net_device *to, struct net_device *from) |
145 | { | 145 | { |
146 | netif_tx_lock_bh(from); | 146 | netif_addr_lock_bh(from); |
147 | netif_tx_lock_bh(to); | 147 | netif_addr_lock(to); |
148 | 148 | ||
149 | __dev_addr_unsync(&to->mc_list, &to->mc_count, | 149 | __dev_addr_unsync(&to->mc_list, &to->mc_count, |
150 | &from->mc_list, &from->mc_count); | 150 | &from->mc_list, &from->mc_count); |
151 | __dev_set_rx_mode(to); | 151 | __dev_set_rx_mode(to); |
152 | 152 | ||
153 | netif_tx_unlock_bh(to); | 153 | netif_addr_unlock(to); |
154 | netif_tx_unlock_bh(from); | 154 | netif_addr_unlock_bh(from); |
155 | } | 155 | } |
156 | EXPORT_SYMBOL(dev_mc_unsync); | 156 | EXPORT_SYMBOL(dev_mc_unsync); |
157 | 157 | ||
@@ -164,7 +164,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) | |||
164 | if (v == SEQ_START_TOKEN) | 164 | if (v == SEQ_START_TOKEN) |
165 | return 0; | 165 | return 0; |
166 | 166 | ||
167 | netif_tx_lock_bh(dev); | 167 | netif_addr_lock_bh(dev); |
168 | for (m = dev->mc_list; m; m = m->next) { | 168 | for (m = dev->mc_list; m; m = m->next) { |
169 | int i; | 169 | int i; |
170 | 170 | ||
@@ -176,7 +176,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) | |||
176 | 176 | ||
177 | seq_putc(seq, '\n'); | 177 | seq_putc(seq, '\n'); |
178 | } | 178 | } |
179 | netif_tx_unlock_bh(dev); | 179 | netif_addr_unlock_bh(dev); |
180 | return 0; | 180 | return 0; |
181 | } | 181 | } |
182 | 182 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 0133b5ebd545..14ada537f895 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -209,6 +209,36 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr) | ||
213 | { | ||
214 | struct ethtool_rxnfc cmd; | ||
215 | |||
216 | if (!dev->ethtool_ops->set_rxhash) | ||
217 | return -EOPNOTSUPP; | ||
218 | |||
219 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | ||
220 | return -EFAULT; | ||
221 | |||
222 | return dev->ethtool_ops->set_rxhash(dev, &cmd); | ||
223 | } | ||
224 | |||
225 | static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr) | ||
226 | { | ||
227 | struct ethtool_rxnfc info; | ||
228 | |||
229 | if (!dev->ethtool_ops->get_rxhash) | ||
230 | return -EOPNOTSUPP; | ||
231 | |||
232 | if (copy_from_user(&info, useraddr, sizeof(info))) | ||
233 | return -EFAULT; | ||
234 | |||
235 | dev->ethtool_ops->get_rxhash(dev, &info); | ||
236 | |||
237 | if (copy_to_user(useraddr, &info, sizeof(info))) | ||
238 | return -EFAULT; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
212 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | 242 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) |
213 | { | 243 | { |
214 | struct ethtool_regs regs; | 244 | struct ethtool_regs regs; |
@@ -826,6 +856,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
826 | case ETHTOOL_GGSO: | 856 | case ETHTOOL_GGSO: |
827 | case ETHTOOL_GFLAGS: | 857 | case ETHTOOL_GFLAGS: |
828 | case ETHTOOL_GPFLAGS: | 858 | case ETHTOOL_GPFLAGS: |
859 | case ETHTOOL_GRXFH: | ||
829 | break; | 860 | break; |
830 | default: | 861 | default: |
831 | if (!capable(CAP_NET_ADMIN)) | 862 | if (!capable(CAP_NET_ADMIN)) |
@@ -977,6 +1008,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
977 | rc = ethtool_set_value(dev, useraddr, | 1008 | rc = ethtool_set_value(dev, useraddr, |
978 | dev->ethtool_ops->set_priv_flags); | 1009 | dev->ethtool_ops->set_priv_flags); |
979 | break; | 1010 | break; |
1011 | case ETHTOOL_GRXFH: | ||
1012 | rc = ethtool_get_rxhash(dev, useraddr); | ||
1013 | break; | ||
1014 | case ETHTOOL_SRXFH: | ||
1015 | rc = ethtool_set_rxhash(dev, useraddr); | ||
1016 | break; | ||
980 | default: | 1017 | default: |
981 | rc = -EOPNOTSUPP; | 1018 | rc = -EOPNOTSUPP; |
982 | } | 1019 | } |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 277a2302eb3a..79de3b14a8d1 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -69,7 +69,7 @@ static void rules_ops_put(struct fib_rules_ops *ops) | |||
69 | static void flush_route_cache(struct fib_rules_ops *ops) | 69 | static void flush_route_cache(struct fib_rules_ops *ops) |
70 | { | 70 | { |
71 | if (ops->flush_cache) | 71 | if (ops->flush_cache) |
72 | ops->flush_cache(); | 72 | ops->flush_cache(ops); |
73 | } | 73 | } |
74 | 74 | ||
75 | int fib_rules_register(struct fib_rules_ops *ops) | 75 | int fib_rules_register(struct fib_rules_ops *ops) |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 755c37fdaee7..4c9c0121c9da 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * in any case. | 36 | * in any case. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode) | 39 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) |
40 | { | 40 | { |
41 | int size, err, ct; | 41 | int size, err, ct; |
42 | 42 | ||
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index a5e372b9ec4d..bf8f7af699d7 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -77,10 +77,10 @@ static void rfc2863_policy(struct net_device *dev) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | 79 | ||
80 | static int linkwatch_urgent_event(struct net_device *dev) | 80 | static bool linkwatch_urgent_event(struct net_device *dev) |
81 | { | 81 | { |
82 | return netif_running(dev) && netif_carrier_ok(dev) && | 82 | return netif_running(dev) && netif_carrier_ok(dev) && |
83 | dev->qdisc != dev->qdisc_sleeping; | 83 | qdisc_tx_changing(dev); |
84 | } | 84 | } |
85 | 85 | ||
86 | 86 | ||
@@ -180,10 +180,9 @@ static void __linkwatch_run_queue(int urgent_only) | |||
180 | 180 | ||
181 | rfc2863_policy(dev); | 181 | rfc2863_policy(dev); |
182 | if (dev->flags & IFF_UP) { | 182 | if (dev->flags & IFF_UP) { |
183 | if (netif_carrier_ok(dev)) { | 183 | if (netif_carrier_ok(dev)) |
184 | WARN_ON(dev->qdisc_sleeping == &noop_qdisc); | ||
185 | dev_activate(dev); | 184 | dev_activate(dev); |
186 | } else | 185 | else |
187 | dev_deactivate(dev); | 186 | dev_deactivate(dev); |
188 | 187 | ||
189 | netdev_state_change(dev); | 188 | netdev_state_change(dev); |
@@ -214,7 +213,7 @@ static void linkwatch_event(struct work_struct *dummy) | |||
214 | 213 | ||
215 | void linkwatch_fire_event(struct net_device *dev) | 214 | void linkwatch_fire_event(struct net_device *dev) |
216 | { | 215 | { |
217 | int urgent = linkwatch_urgent_event(dev); | 216 | bool urgent = linkwatch_urgent_event(dev); |
218 | 217 | ||
219 | if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { | 218 | if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { |
220 | dev_hold(dev); | 219 | dev_hold(dev); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 65f01f71b3f3..f62c8af85d38 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -930,6 +930,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |||
930 | buff = neigh->arp_queue.next; | 930 | buff = neigh->arp_queue.next; |
931 | __skb_unlink(buff, &neigh->arp_queue); | 931 | __skb_unlink(buff, &neigh->arp_queue); |
932 | kfree_skb(buff); | 932 | kfree_skb(buff); |
933 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); | ||
933 | } | 934 | } |
934 | __skb_queue_tail(&neigh->arp_queue, skb); | 935 | __skb_queue_tail(&neigh->arp_queue, skb); |
935 | } | 936 | } |
@@ -2462,12 +2463,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v) | |||
2462 | struct neigh_statistics *st = v; | 2463 | struct neigh_statistics *st = v; |
2463 | 2464 | ||
2464 | if (v == SEQ_START_TOKEN) { | 2465 | if (v == SEQ_START_TOKEN) { |
2465 | seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n"); | 2466 | seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n"); |
2466 | return 0; | 2467 | return 0; |
2467 | } | 2468 | } |
2468 | 2469 | ||
2469 | seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " | 2470 | seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " |
2470 | "%08lx %08lx %08lx %08lx\n", | 2471 | "%08lx %08lx %08lx %08lx %08lx\n", |
2471 | atomic_read(&tbl->entries), | 2472 | atomic_read(&tbl->entries), |
2472 | 2473 | ||
2473 | st->allocs, | 2474 | st->allocs, |
@@ -2483,7 +2484,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v) | |||
2483 | st->rcv_probes_ucast, | 2484 | st->rcv_probes_ucast, |
2484 | 2485 | ||
2485 | st->periodic_gc_runs, | 2486 | st->periodic_gc_runs, |
2486 | st->forced_gc_runs | 2487 | st->forced_gc_runs, |
2488 | st->unres_discards | ||
2487 | ); | 2489 | ); |
2488 | 2490 | ||
2489 | return 0; | 2491 | return 0; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 90e2177af081..c1f4e0d428c0 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -242,11 +242,11 @@ static ssize_t netstat_show(const struct device *d, | |||
242 | offset % sizeof(unsigned long) != 0); | 242 | offset % sizeof(unsigned long) != 0); |
243 | 243 | ||
244 | read_lock(&dev_base_lock); | 244 | read_lock(&dev_base_lock); |
245 | if (dev_isalive(dev) && dev->get_stats && | 245 | if (dev_isalive(dev)) { |
246 | (stats = (*dev->get_stats)(dev))) | 246 | stats = dev->get_stats(dev); |
247 | ret = sprintf(buf, fmt_ulong, | 247 | ret = sprintf(buf, fmt_ulong, |
248 | *(unsigned long *)(((u8 *) stats) + offset)); | 248 | *(unsigned long *)(((u8 *) stats) + offset)); |
249 | 249 | } | |
250 | read_unlock(&dev_base_lock); | 250 | read_unlock(&dev_base_lock); |
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
@@ -318,7 +318,7 @@ static struct attribute_group netstat_group = { | |||
318 | .attrs = netstat_attrs, | 318 | .attrs = netstat_attrs, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | #ifdef CONFIG_WIRELESS_EXT | 321 | #ifdef CONFIG_WIRELESS_EXT_SYSFS |
322 | /* helper function that does all the locking etc for wireless stats */ | 322 | /* helper function that does all the locking etc for wireless stats */ |
323 | static ssize_t wireless_show(struct device *d, char *buf, | 323 | static ssize_t wireless_show(struct device *d, char *buf, |
324 | ssize_t (*format)(const struct iw_statistics *, | 324 | ssize_t (*format)(const struct iw_statistics *, |
@@ -457,10 +457,9 @@ int netdev_register_kobject(struct net_device *net) | |||
457 | strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); | 457 | strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); |
458 | 458 | ||
459 | #ifdef CONFIG_SYSFS | 459 | #ifdef CONFIG_SYSFS |
460 | if (net->get_stats) | 460 | *groups++ = &netstat_group; |
461 | *groups++ = &netstat_group; | ||
462 | 461 | ||
463 | #ifdef CONFIG_WIRELESS_EXT | 462 | #ifdef CONFIG_WIRELESS_EXT_SYSFS |
464 | if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) | 463 | if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) |
465 | *groups++ = &wireless_group; | 464 | *groups++ = &wireless_group; |
466 | #endif | 465 | #endif |
@@ -469,6 +468,19 @@ int netdev_register_kobject(struct net_device *net) | |||
469 | return device_add(dev); | 468 | return device_add(dev); |
470 | } | 469 | } |
471 | 470 | ||
471 | int netdev_class_create_file(struct class_attribute *class_attr) | ||
472 | { | ||
473 | return class_create_file(&net_class, class_attr); | ||
474 | } | ||
475 | |||
476 | void netdev_class_remove_file(struct class_attribute *class_attr) | ||
477 | { | ||
478 | class_remove_file(&net_class, class_attr); | ||
479 | } | ||
480 | |||
481 | EXPORT_SYMBOL(netdev_class_create_file); | ||
482 | EXPORT_SYMBOL(netdev_class_remove_file); | ||
483 | |||
472 | void netdev_initialize_kobject(struct net_device *net) | 484 | void netdev_initialize_kobject(struct net_device *net) |
473 | { | 485 | { |
474 | struct device *device = &(net->dev); | 486 | struct device *device = &(net->dev); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8fb134da0346..c12720895ecf 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work) | |||
58 | 58 | ||
59 | while ((skb = skb_dequeue(&npinfo->txq))) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
60 | struct net_device *dev = skb->dev; | 60 | struct net_device *dev = skb->dev; |
61 | struct netdev_queue *txq; | ||
61 | 62 | ||
62 | if (!netif_device_present(dev) || !netif_running(dev)) { | 63 | if (!netif_device_present(dev) || !netif_running(dev)) { |
63 | __kfree_skb(skb); | 64 | __kfree_skb(skb); |
64 | continue; | 65 | continue; |
65 | } | 66 | } |
66 | 67 | ||
68 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
69 | |||
67 | local_irq_save(flags); | 70 | local_irq_save(flags); |
68 | netif_tx_lock(dev); | 71 | __netif_tx_lock(txq, smp_processor_id()); |
69 | if ((netif_queue_stopped(dev) || | 72 | if (netif_tx_queue_stopped(txq) || |
70 | netif_subqueue_stopped(dev, skb)) || | 73 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
71 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | ||
72 | skb_queue_head(&npinfo->txq, skb); | 74 | skb_queue_head(&npinfo->txq, skb); |
73 | netif_tx_unlock(dev); | 75 | __netif_tx_unlock(txq); |
74 | local_irq_restore(flags); | 76 | local_irq_restore(flags); |
75 | 77 | ||
76 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 78 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
77 | return; | 79 | return; |
78 | } | 80 | } |
79 | netif_tx_unlock(dev); | 81 | __netif_tx_unlock(txq); |
80 | local_irq_restore(flags); | 82 | local_irq_restore(flags); |
81 | } | 83 | } |
82 | } | 84 | } |
@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
278 | 280 | ||
279 | /* don't get messages out of order, and no recursion */ | 281 | /* don't get messages out of order, and no recursion */ |
280 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 282 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
283 | struct netdev_queue *txq; | ||
281 | unsigned long flags; | 284 | unsigned long flags; |
282 | 285 | ||
286 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
287 | |||
283 | local_irq_save(flags); | 288 | local_irq_save(flags); |
284 | /* try until next clock tick */ | 289 | /* try until next clock tick */ |
285 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 290 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
286 | tries > 0; --tries) { | 291 | tries > 0; --tries) { |
287 | if (netif_tx_trylock(dev)) { | 292 | if (__netif_tx_trylock(txq)) { |
288 | if (!netif_queue_stopped(dev) && | 293 | if (!netif_tx_queue_stopped(txq)) |
289 | !netif_subqueue_stopped(dev, skb)) | ||
290 | status = dev->hard_start_xmit(skb, dev); | 294 | status = dev->hard_start_xmit(skb, dev); |
291 | netif_tx_unlock(dev); | 295 | __netif_tx_unlock(txq); |
292 | 296 | ||
293 | if (status == NETDEV_TX_OK) | 297 | if (status == NETDEV_TX_OK) |
294 | break; | 298 | break; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index fdf537707e51..c7d484f7e1c4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1875,7 +1875,7 @@ static int pktgen_device_event(struct notifier_block *unused, | |||
1875 | { | 1875 | { |
1876 | struct net_device *dev = ptr; | 1876 | struct net_device *dev = ptr; |
1877 | 1877 | ||
1878 | if (dev_net(dev) != &init_net) | 1878 | if (!net_eq(dev_net(dev), &init_net)) |
1879 | return NOTIFY_DONE; | 1879 | return NOTIFY_DONE; |
1880 | 1880 | ||
1881 | /* It is OK that we do not hold the group lock right now, | 1881 | /* It is OK that we do not hold the group lock right now, |
@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) | |||
2123 | } | 2123 | } |
2124 | } | 2124 | } |
2125 | #endif | 2125 | #endif |
2126 | static void set_cur_queue_map(struct pktgen_dev *pkt_dev) | ||
2127 | { | ||
2128 | if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { | ||
2129 | __u16 t; | ||
2130 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { | ||
2131 | t = random32() % | ||
2132 | (pkt_dev->queue_map_max - | ||
2133 | pkt_dev->queue_map_min + 1) | ||
2134 | + pkt_dev->queue_map_min; | ||
2135 | } else { | ||
2136 | t = pkt_dev->cur_queue_map + 1; | ||
2137 | if (t > pkt_dev->queue_map_max) | ||
2138 | t = pkt_dev->queue_map_min; | ||
2139 | } | ||
2140 | pkt_dev->cur_queue_map = t; | ||
2141 | } | ||
2142 | } | ||
2143 | |||
2126 | /* Increment/randomize headers according to flags and current values | 2144 | /* Increment/randomize headers according to flags and current values |
2127 | * for IP src/dest, UDP src/dst port, MAC-Addr src/dst | 2145 | * for IP src/dest, UDP src/dst port, MAC-Addr src/dst |
2128 | */ | 2146 | */ |
@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2325 | pkt_dev->cur_pkt_size = t; | 2343 | pkt_dev->cur_pkt_size = t; |
2326 | } | 2344 | } |
2327 | 2345 | ||
2328 | if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { | 2346 | set_cur_queue_map(pkt_dev); |
2329 | __u16 t; | ||
2330 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { | ||
2331 | t = random32() % | ||
2332 | (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1) | ||
2333 | + pkt_dev->queue_map_min; | ||
2334 | } else { | ||
2335 | t = pkt_dev->cur_queue_map + 1; | ||
2336 | if (t > pkt_dev->queue_map_max) | ||
2337 | t = pkt_dev->queue_map_min; | ||
2338 | } | ||
2339 | pkt_dev->cur_queue_map = t; | ||
2340 | } | ||
2341 | 2347 | ||
2342 | pkt_dev->flows[flow].count++; | 2348 | pkt_dev->flows[flow].count++; |
2343 | } | 2349 | } |
@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2458 | __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ | 2464 | __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ |
2459 | __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ | 2465 | __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ |
2460 | __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ | 2466 | __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ |
2461 | 2467 | u16 queue_map; | |
2462 | 2468 | ||
2463 | if (pkt_dev->nr_labels) | 2469 | if (pkt_dev->nr_labels) |
2464 | protocol = htons(ETH_P_MPLS_UC); | 2470 | protocol = htons(ETH_P_MPLS_UC); |
@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2469 | /* Update any of the values, used when we're incrementing various | 2475 | /* Update any of the values, used when we're incrementing various |
2470 | * fields. | 2476 | * fields. |
2471 | */ | 2477 | */ |
2478 | queue_map = pkt_dev->cur_queue_map; | ||
2472 | mod_cur_headers(pkt_dev); | 2479 | mod_cur_headers(pkt_dev); |
2473 | 2480 | ||
2474 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2481 | datalen = (odev->hard_header_len + 16) & ~0xf; |
@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2507 | skb->network_header = skb->tail; | 2514 | skb->network_header = skb->tail; |
2508 | skb->transport_header = skb->network_header + sizeof(struct iphdr); | 2515 | skb->transport_header = skb->network_header + sizeof(struct iphdr); |
2509 | skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); | 2516 | skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); |
2510 | skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); | 2517 | skb_set_queue_mapping(skb, queue_map); |
2511 | iph = ip_hdr(skb); | 2518 | iph = ip_hdr(skb); |
2512 | udph = udp_hdr(skb); | 2519 | udph = udp_hdr(skb); |
2513 | 2520 | ||
@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2797 | __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ | 2804 | __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ |
2798 | __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ | 2805 | __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ |
2799 | __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ | 2806 | __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ |
2807 | u16 queue_map; | ||
2800 | 2808 | ||
2801 | if (pkt_dev->nr_labels) | 2809 | if (pkt_dev->nr_labels) |
2802 | protocol = htons(ETH_P_MPLS_UC); | 2810 | protocol = htons(ETH_P_MPLS_UC); |
@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2807 | /* Update any of the values, used when we're incrementing various | 2815 | /* Update any of the values, used when we're incrementing various |
2808 | * fields. | 2816 | * fields. |
2809 | */ | 2817 | */ |
2818 | queue_map = pkt_dev->cur_queue_map; | ||
2810 | mod_cur_headers(pkt_dev); | 2819 | mod_cur_headers(pkt_dev); |
2811 | 2820 | ||
2812 | skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + | 2821 | skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + |
@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2844 | skb->network_header = skb->tail; | 2853 | skb->network_header = skb->tail; |
2845 | skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); | 2854 | skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); |
2846 | skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); | 2855 | skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); |
2847 | skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); | 2856 | skb_set_queue_mapping(skb, queue_map); |
2848 | iph = ipv6_hdr(skb); | 2857 | iph = ipv6_hdr(skb); |
2849 | udph = udp_hdr(skb); | 2858 | udph = udp_hdr(skb); |
2850 | 2859 | ||
@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3263 | static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3272 | static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3264 | { | 3273 | { |
3265 | struct net_device *odev = NULL; | 3274 | struct net_device *odev = NULL; |
3275 | struct netdev_queue *txq; | ||
3266 | __u64 idle_start = 0; | 3276 | __u64 idle_start = 0; |
3277 | u16 queue_map; | ||
3267 | int ret; | 3278 | int ret; |
3268 | 3279 | ||
3269 | odev = pkt_dev->odev; | 3280 | odev = pkt_dev->odev; |
@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3285 | } | 3296 | } |
3286 | } | 3297 | } |
3287 | 3298 | ||
3288 | if ((netif_queue_stopped(odev) || | 3299 | if (!pkt_dev->skb) { |
3289 | (pkt_dev->skb && | 3300 | set_cur_queue_map(pkt_dev); |
3290 | netif_subqueue_stopped(odev, pkt_dev->skb))) || | 3301 | queue_map = pkt_dev->cur_queue_map; |
3302 | } else { | ||
3303 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | ||
3304 | } | ||
3305 | |||
3306 | txq = netdev_get_tx_queue(odev, queue_map); | ||
3307 | if (netif_tx_queue_stopped(txq) || | ||
3291 | need_resched()) { | 3308 | need_resched()) { |
3292 | idle_start = getCurUs(); | 3309 | idle_start = getCurUs(); |
3293 | 3310 | ||
@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3303 | 3320 | ||
3304 | pkt_dev->idle_acc += getCurUs() - idle_start; | 3321 | pkt_dev->idle_acc += getCurUs() - idle_start; |
3305 | 3322 | ||
3306 | if (netif_queue_stopped(odev) || | 3323 | if (netif_tx_queue_stopped(txq)) { |
3307 | netif_subqueue_stopped(odev, pkt_dev->skb)) { | ||
3308 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | 3324 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ |
3309 | pkt_dev->next_tx_ns = 0; | 3325 | pkt_dev->next_tx_ns = 0; |
3310 | goto out; /* Try the next interface */ | 3326 | goto out; /* Try the next interface */ |
@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3331 | } | 3347 | } |
3332 | } | 3348 | } |
3333 | 3349 | ||
3334 | netif_tx_lock_bh(odev); | 3350 | /* fill_packet() might have changed the queue */ |
3335 | if (!netif_queue_stopped(odev) && | 3351 | queue_map = skb_get_queue_mapping(pkt_dev->skb); |
3336 | !netif_subqueue_stopped(odev, pkt_dev->skb)) { | 3352 | txq = netdev_get_tx_queue(odev, queue_map); |
3353 | |||
3354 | __netif_tx_lock_bh(txq); | ||
3355 | if (!netif_tx_queue_stopped(txq)) { | ||
3337 | 3356 | ||
3338 | atomic_inc(&(pkt_dev->skb->users)); | 3357 | atomic_inc(&(pkt_dev->skb->users)); |
3339 | retry_now: | 3358 | retry_now: |
@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3377 | pkt_dev->next_tx_ns = 0; | 3396 | pkt_dev->next_tx_ns = 0; |
3378 | } | 3397 | } |
3379 | 3398 | ||
3380 | netif_tx_unlock_bh(odev); | 3399 | __netif_tx_unlock_bh(txq); |
3381 | 3400 | ||
3382 | /* If pkt_dev->count is zero, then run forever */ | 3401 | /* If pkt_dev->count is zero, then run forever */ |
3383 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 3402 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a9a77216310e..71edb8b36341 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -605,8 +605,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
605 | int type, u32 pid, u32 seq, u32 change, | 605 | int type, u32 pid, u32 seq, u32 change, |
606 | unsigned int flags) | 606 | unsigned int flags) |
607 | { | 607 | { |
608 | struct netdev_queue *txq; | ||
608 | struct ifinfomsg *ifm; | 609 | struct ifinfomsg *ifm; |
609 | struct nlmsghdr *nlh; | 610 | struct nlmsghdr *nlh; |
611 | struct net_device_stats *stats; | ||
612 | struct nlattr *attr; | ||
610 | 613 | ||
611 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); | 614 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); |
612 | if (nlh == NULL) | 615 | if (nlh == NULL) |
@@ -633,8 +636,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
633 | if (dev->master) | 636 | if (dev->master) |
634 | NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); | 637 | NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); |
635 | 638 | ||
636 | if (dev->qdisc_sleeping) | 639 | txq = netdev_get_tx_queue(dev, 0); |
637 | NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc_sleeping->ops->id); | 640 | if (txq->qdisc_sleeping) |
641 | NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); | ||
638 | 642 | ||
639 | if (1) { | 643 | if (1) { |
640 | struct rtnl_link_ifmap map = { | 644 | struct rtnl_link_ifmap map = { |
@@ -653,19 +657,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
653 | NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); | 657 | NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); |
654 | } | 658 | } |
655 | 659 | ||
656 | if (dev->get_stats) { | 660 | attr = nla_reserve(skb, IFLA_STATS, |
657 | struct net_device_stats *stats = dev->get_stats(dev); | 661 | sizeof(struct rtnl_link_stats)); |
658 | if (stats) { | 662 | if (attr == NULL) |
659 | struct nlattr *attr; | 663 | goto nla_put_failure; |
660 | 664 | ||
661 | attr = nla_reserve(skb, IFLA_STATS, | 665 | stats = dev->get_stats(dev); |
662 | sizeof(struct rtnl_link_stats)); | 666 | copy_rtnl_link_stats(nla_data(attr), stats); |
663 | if (attr == NULL) | ||
664 | goto nla_put_failure; | ||
665 | |||
666 | copy_rtnl_link_stats(nla_data(attr), stats); | ||
667 | } | ||
668 | } | ||
669 | 667 | ||
670 | if (dev->rtnl_link_ops) { | 668 | if (dev->rtnl_link_ops) { |
671 | if (rtnl_link_fill(skb, dev) < 0) | 669 | if (rtnl_link_fill(skb, dev) < 0) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 366621610e76..e4115672b6cf 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4,8 +4,6 @@ | |||
4 | * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> | 4 | * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> |
5 | * Florian La Roche <rzsfl@rz.uni-sb.de> | 5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
6 | * | 6 | * |
7 | * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ | ||
8 | * | ||
9 | * Fixes: | 7 | * Fixes: |
10 | * Alan Cox : Fixed the worst of the load | 8 | * Alan Cox : Fixed the worst of the load |
11 | * balancer bugs. | 9 | * balancer bugs. |
@@ -461,6 +459,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
461 | new->tc_verd = old->tc_verd; | 459 | new->tc_verd = old->tc_verd; |
462 | #endif | 460 | #endif |
463 | #endif | 461 | #endif |
462 | new->vlan_tci = old->vlan_tci; | ||
463 | |||
464 | skb_copy_secmark(new, old); | 464 | skb_copy_secmark(new, old); |
465 | } | 465 | } |
466 | 466 | ||
@@ -1282,114 +1282,83 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | |||
1282 | return 0; | 1282 | return 0; |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | /* | 1285 | static inline void __segment_seek(struct page **page, unsigned int *poff, |
1286 | * Map linear and fragment data from the skb to spd. Returns number of | 1286 | unsigned int *plen, unsigned int off) |
1287 | * pages mapped. | 1287 | { |
1288 | */ | 1288 | *poff += off; |
1289 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1289 | *page += *poff / PAGE_SIZE; |
1290 | unsigned int *total_len, | 1290 | *poff = *poff % PAGE_SIZE; |
1291 | struct splice_pipe_desc *spd) | 1291 | *plen -= off; |
1292 | { | 1292 | } |
1293 | unsigned int nr_pages = spd->nr_pages; | 1293 | |
1294 | unsigned int poff, plen, len, toff, tlen; | 1294 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1295 | int headlen, seg, error = 0; | 1295 | unsigned int plen, unsigned int *off, |
1296 | 1296 | unsigned int *len, struct sk_buff *skb, | |
1297 | toff = *offset; | 1297 | struct splice_pipe_desc *spd) |
1298 | tlen = *total_len; | 1298 | { |
1299 | if (!tlen) { | 1299 | if (!*len) |
1300 | error = 1; | 1300 | return 1; |
1301 | goto err; | 1301 | |
1302 | /* skip this segment if already processed */ | ||
1303 | if (*off >= plen) { | ||
1304 | *off -= plen; | ||
1305 | return 0; | ||
1302 | } | 1306 | } |
1303 | 1307 | ||
1304 | /* | 1308 | /* ignore any bits we already processed */ |
1305 | * if the offset is greater than the linear part, go directly to | 1309 | if (*off) { |
1306 | * the fragments. | 1310 | __segment_seek(&page, &poff, &plen, *off); |
1307 | */ | 1311 | *off = 0; |
1308 | headlen = skb_headlen(skb); | ||
1309 | if (toff >= headlen) { | ||
1310 | toff -= headlen; | ||
1311 | goto map_frag; | ||
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | /* | 1314 | do { |
1315 | * first map the linear region into the pages/partial map, skipping | 1315 | unsigned int flen = min(*len, plen); |
1316 | * any potential initial offset. | ||
1317 | */ | ||
1318 | len = 0; | ||
1319 | while (len < headlen) { | ||
1320 | void *p = skb->data + len; | ||
1321 | |||
1322 | poff = (unsigned long) p & (PAGE_SIZE - 1); | ||
1323 | plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff); | ||
1324 | len += plen; | ||
1325 | |||
1326 | if (toff) { | ||
1327 | if (plen <= toff) { | ||
1328 | toff -= plen; | ||
1329 | continue; | ||
1330 | } | ||
1331 | plen -= toff; | ||
1332 | poff += toff; | ||
1333 | toff = 0; | ||
1334 | } | ||
1335 | 1316 | ||
1336 | plen = min(plen, tlen); | 1317 | /* the linear region may spread across several pages */ |
1337 | if (!plen) | 1318 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1338 | break; | ||
1339 | 1319 | ||
1340 | /* | 1320 | if (spd_fill_page(spd, page, flen, poff, skb)) |
1341 | * just jump directly to update and return, no point | 1321 | return 1; |
1342 | * in going over fragments when the output is full. | ||
1343 | */ | ||
1344 | error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb); | ||
1345 | if (error) | ||
1346 | goto done; | ||
1347 | 1322 | ||
1348 | tlen -= plen; | 1323 | __segment_seek(&page, &poff, &plen, flen); |
1349 | } | 1324 | *len -= flen; |
1325 | |||
1326 | } while (*len && plen); | ||
1327 | |||
1328 | return 0; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * Map linear and fragment data from the skb to spd. It reports failure if the | ||
1333 | * pipe is full or if we already spliced the requested length. | ||
1334 | */ | ||
1335 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | ||
1336 | unsigned int *len, | ||
1337 | struct splice_pipe_desc *spd) | ||
1338 | { | ||
1339 | int seg; | ||
1340 | |||
1341 | /* | ||
1342 | * map the linear part | ||
1343 | */ | ||
1344 | if (__splice_segment(virt_to_page(skb->data), | ||
1345 | (unsigned long) skb->data & (PAGE_SIZE - 1), | ||
1346 | skb_headlen(skb), | ||
1347 | offset, len, skb, spd)) | ||
1348 | return 1; | ||
1350 | 1349 | ||
1351 | /* | 1350 | /* |
1352 | * then map the fragments | 1351 | * then map the fragments |
1353 | */ | 1352 | */ |
1354 | map_frag: | ||
1355 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { | 1353 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
1356 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1354 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1357 | 1355 | ||
1358 | plen = f->size; | 1356 | if (__splice_segment(f->page, f->page_offset, f->size, |
1359 | poff = f->page_offset; | 1357 | offset, len, skb, spd)) |
1360 | 1358 | return 1; | |
1361 | if (toff) { | ||
1362 | if (plen <= toff) { | ||
1363 | toff -= plen; | ||
1364 | continue; | ||
1365 | } | ||
1366 | plen -= toff; | ||
1367 | poff += toff; | ||
1368 | toff = 0; | ||
1369 | } | ||
1370 | |||
1371 | plen = min(plen, tlen); | ||
1372 | if (!plen) | ||
1373 | break; | ||
1374 | |||
1375 | error = spd_fill_page(spd, f->page, plen, poff, skb); | ||
1376 | if (error) | ||
1377 | break; | ||
1378 | |||
1379 | tlen -= plen; | ||
1380 | } | 1359 | } |
1381 | 1360 | ||
1382 | done: | 1361 | return 0; |
1383 | if (spd->nr_pages - nr_pages) { | ||
1384 | *offset = 0; | ||
1385 | *total_len = tlen; | ||
1386 | return 0; | ||
1387 | } | ||
1388 | err: | ||
1389 | /* update the offset to reflect the linear part skip, if any */ | ||
1390 | if (!error) | ||
1391 | *offset = toff; | ||
1392 | return error; | ||
1393 | } | 1362 | } |
1394 | 1363 | ||
1395 | /* | 1364 | /* |
@@ -2288,6 +2257,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2288 | skb_copy_queue_mapping(nskb, skb); | 2257 | skb_copy_queue_mapping(nskb, skb); |
2289 | nskb->priority = skb->priority; | 2258 | nskb->priority = skb->priority; |
2290 | nskb->protocol = skb->protocol; | 2259 | nskb->protocol = skb->protocol; |
2260 | nskb->vlan_tci = skb->vlan_tci; | ||
2291 | nskb->dst = dst_clone(skb->dst); | 2261 | nskb->dst = dst_clone(skb->dst); |
2292 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | 2262 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); |
2293 | nskb->pkt_type = skb->pkt_type; | 2263 | nskb->pkt_type = skb->pkt_type; |
@@ -2592,6 +2562,13 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | |||
2592 | return true; | 2562 | return true; |
2593 | } | 2563 | } |
2594 | 2564 | ||
2565 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) | ||
2566 | { | ||
2567 | if (net_ratelimit()) | ||
2568 | pr_warning("%s: received packets cannot be forwarded" | ||
2569 | " while LRO is enabled\n", skb->dev->name); | ||
2570 | } | ||
2571 | |||
2595 | EXPORT_SYMBOL(___pskb_trim); | 2572 | EXPORT_SYMBOL(___pskb_trim); |
2596 | EXPORT_SYMBOL(__kfree_skb); | 2573 | EXPORT_SYMBOL(__kfree_skb); |
2597 | EXPORT_SYMBOL(kfree_skb); | 2574 | EXPORT_SYMBOL(kfree_skb); |
@@ -2625,6 +2602,7 @@ EXPORT_SYMBOL(skb_seq_read); | |||
2625 | EXPORT_SYMBOL(skb_abort_seq_read); | 2602 | EXPORT_SYMBOL(skb_abort_seq_read); |
2626 | EXPORT_SYMBOL(skb_find_text); | 2603 | EXPORT_SYMBOL(skb_find_text); |
2627 | EXPORT_SYMBOL(skb_append_datato_frags); | 2604 | EXPORT_SYMBOL(skb_append_datato_frags); |
2605 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); | ||
2628 | 2606 | ||
2629 | EXPORT_SYMBOL_GPL(skb_to_sgvec); | 2607 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
2630 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2608 | EXPORT_SYMBOL_GPL(skb_cow_data); |
diff --git a/net/core/sock.c b/net/core/sock.c index 88094cb09c06..10a64d57078c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * handler for protocols to use and generic option handler. | 7 | * handler for protocols to use and generic option handler. |
8 | * | 8 | * |
9 | * | 9 | * |
10 | * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $ | ||
11 | * | ||
12 | * Authors: Ross Biro | 10 | * Authors: Ross Biro |
13 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
14 | * Florian La Roche, <flla@stud.uni-sb.de> | 12 | * Florian La Roche, <flla@stud.uni-sb.de> |
@@ -1068,7 +1066,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1068 | * to be taken into account in all callers. -acme | 1066 | * to be taken into account in all callers. -acme |
1069 | */ | 1067 | */ |
1070 | sk_refcnt_debug_inc(newsk); | 1068 | sk_refcnt_debug_inc(newsk); |
1071 | newsk->sk_socket = NULL; | 1069 | sk_set_socket(newsk, NULL); |
1072 | newsk->sk_sleep = NULL; | 1070 | newsk->sk_sleep = NULL; |
1073 | 1071 | ||
1074 | if (newsk->sk_prot->sockets_allocated) | 1072 | if (newsk->sk_prot->sockets_allocated) |
@@ -1444,7 +1442,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) | |||
1444 | /* Under pressure. */ | 1442 | /* Under pressure. */ |
1445 | if (allocated > prot->sysctl_mem[1]) | 1443 | if (allocated > prot->sysctl_mem[1]) |
1446 | if (prot->enter_memory_pressure) | 1444 | if (prot->enter_memory_pressure) |
1447 | prot->enter_memory_pressure(); | 1445 | prot->enter_memory_pressure(sk); |
1448 | 1446 | ||
1449 | /* Over hard limit. */ | 1447 | /* Over hard limit. */ |
1450 | if (allocated > prot->sysctl_mem[2]) | 1448 | if (allocated > prot->sysctl_mem[2]) |
@@ -1704,7 +1702,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1704 | sk->sk_rcvbuf = sysctl_rmem_default; | 1702 | sk->sk_rcvbuf = sysctl_rmem_default; |
1705 | sk->sk_sndbuf = sysctl_wmem_default; | 1703 | sk->sk_sndbuf = sysctl_wmem_default; |
1706 | sk->sk_state = TCP_CLOSE; | 1704 | sk->sk_state = TCP_CLOSE; |
1707 | sk->sk_socket = sock; | 1705 | sk_set_socket(sk, sock); |
1708 | 1706 | ||
1709 | sock_set_flag(sk, SOCK_ZAPPED); | 1707 | sock_set_flag(sk, SOCK_ZAPPED); |
1710 | 1708 | ||
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 5fc801057244..a570e2af22cb 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -125,14 +125,6 @@ static struct ctl_table net_core_table[] = { | |||
125 | #endif /* CONFIG_XFRM */ | 125 | #endif /* CONFIG_XFRM */ |
126 | #endif /* CONFIG_NET */ | 126 | #endif /* CONFIG_NET */ |
127 | { | 127 | { |
128 | .ctl_name = NET_CORE_SOMAXCONN, | ||
129 | .procname = "somaxconn", | ||
130 | .data = &init_net.core.sysctl_somaxconn, | ||
131 | .maxlen = sizeof(int), | ||
132 | .mode = 0644, | ||
133 | .proc_handler = &proc_dointvec | ||
134 | }, | ||
135 | { | ||
136 | .ctl_name = NET_CORE_BUDGET, | 128 | .ctl_name = NET_CORE_BUDGET, |
137 | .procname = "netdev_budget", | 129 | .procname = "netdev_budget", |
138 | .data = &netdev_budget, | 130 | .data = &netdev_budget, |
@@ -151,6 +143,18 @@ static struct ctl_table net_core_table[] = { | |||
151 | { .ctl_name = 0 } | 143 | { .ctl_name = 0 } |
152 | }; | 144 | }; |
153 | 145 | ||
146 | static struct ctl_table netns_core_table[] = { | ||
147 | { | ||
148 | .ctl_name = NET_CORE_SOMAXCONN, | ||
149 | .procname = "somaxconn", | ||
150 | .data = &init_net.core.sysctl_somaxconn, | ||
151 | .maxlen = sizeof(int), | ||
152 | .mode = 0644, | ||
153 | .proc_handler = &proc_dointvec | ||
154 | }, | ||
155 | { .ctl_name = 0 } | ||
156 | }; | ||
157 | |||
154 | static __net_initdata struct ctl_path net_core_path[] = { | 158 | static __net_initdata struct ctl_path net_core_path[] = { |
155 | { .procname = "net", .ctl_name = CTL_NET, }, | 159 | { .procname = "net", .ctl_name = CTL_NET, }, |
156 | { .procname = "core", .ctl_name = NET_CORE, }, | 160 | { .procname = "core", .ctl_name = NET_CORE, }, |
@@ -159,23 +163,17 @@ static __net_initdata struct ctl_path net_core_path[] = { | |||
159 | 163 | ||
160 | static __net_init int sysctl_core_net_init(struct net *net) | 164 | static __net_init int sysctl_core_net_init(struct net *net) |
161 | { | 165 | { |
162 | struct ctl_table *tbl, *tmp; | 166 | struct ctl_table *tbl; |
163 | 167 | ||
164 | net->core.sysctl_somaxconn = SOMAXCONN; | 168 | net->core.sysctl_somaxconn = SOMAXCONN; |
165 | 169 | ||
166 | tbl = net_core_table; | 170 | tbl = netns_core_table; |
167 | if (net != &init_net) { | 171 | if (net != &init_net) { |
168 | tbl = kmemdup(tbl, sizeof(net_core_table), GFP_KERNEL); | 172 | tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); |
169 | if (tbl == NULL) | 173 | if (tbl == NULL) |
170 | goto err_dup; | 174 | goto err_dup; |
171 | 175 | ||
172 | for (tmp = tbl; tmp->procname; tmp++) { | 176 | tbl[0].data = &net->core.sysctl_somaxconn; |
173 | if (tmp->data >= (void *)&init_net && | ||
174 | tmp->data < (void *)(&init_net + 1)) | ||
175 | tmp->data += (char *)net - (char *)&init_net; | ||
176 | else | ||
177 | tmp->mode &= ~0222; | ||
178 | } | ||
179 | } | 177 | } |
180 | 178 | ||
181 | net->core.sysctl_hdr = register_net_sysctl_table(net, | 179 | net->core.sysctl_hdr = register_net_sysctl_table(net, |
@@ -186,7 +184,7 @@ static __net_init int sysctl_core_net_init(struct net *net) | |||
186 | return 0; | 184 | return 0; |
187 | 185 | ||
188 | err_reg: | 186 | err_reg: |
189 | if (tbl != net_core_table) | 187 | if (tbl != netns_core_table) |
190 | kfree(tbl); | 188 | kfree(tbl); |
191 | err_dup: | 189 | err_dup: |
192 | return -ENOMEM; | 190 | return -ENOMEM; |
@@ -198,7 +196,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net) | |||
198 | 196 | ||
199 | tbl = net->core.sysctl_hdr->ctl_table_arg; | 197 | tbl = net->core.sysctl_hdr->ctl_table_arg; |
200 | unregister_net_sysctl_table(net->core.sysctl_hdr); | 198 | unregister_net_sysctl_table(net->core.sysctl_hdr); |
201 | BUG_ON(tbl == net_core_table); | 199 | BUG_ON(tbl == netns_core_table); |
202 | kfree(tbl); | 200 | kfree(tbl); |
203 | } | 201 | } |
204 | 202 | ||
@@ -209,6 +207,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = { | |||
209 | 207 | ||
210 | static __init int sysctl_core_init(void) | 208 | static __init int sysctl_core_init(void) |
211 | { | 209 | { |
210 | register_net_sysctl_rotable(net_core_path, net_core_table); | ||
212 | return register_pernet_subsys(&sysctl_core_ops); | 211 | return register_pernet_subsys(&sysctl_core_ops); |
213 | } | 212 | } |
214 | 213 | ||
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index a1929f33d703..f6756e0c9e69 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -794,7 +794,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
794 | { | 794 | { |
795 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 795 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); |
796 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; | 796 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; |
797 | const u32 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; | 797 | const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; |
798 | const bool is_data_packet = dccp_data_packet(skb); | 798 | const bool is_data_packet = dccp_data_packet(skb); |
799 | 799 | ||
800 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { | 800 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { |
@@ -825,18 +825,16 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
825 | } | 825 | } |
826 | 826 | ||
827 | /* | 827 | /* |
828 | * Handle pending losses and otherwise check for new loss | 828 | * Perform loss detection and handle pending losses |
829 | */ | 829 | */ |
830 | if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist) && | 830 | if (tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, &hcrx->ccid3hcrx_li_hist, |
831 | tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, | 831 | skb, ndp, ccid3_first_li, sk)) { |
832 | &hcrx->ccid3hcrx_li_hist, | ||
833 | skb, ndp, ccid3_first_li, sk) ) { | ||
834 | do_feedback = CCID3_FBACK_PARAM_CHANGE; | 832 | do_feedback = CCID3_FBACK_PARAM_CHANGE; |
835 | goto done_receiving; | 833 | goto done_receiving; |
836 | } | 834 | } |
837 | 835 | ||
838 | if (tfrc_rx_hist_new_loss_indicated(&hcrx->ccid3hcrx_hist, skb, ndp)) | 836 | if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist)) |
839 | goto update_records; | 837 | return; /* done receiving */ |
840 | 838 | ||
841 | /* | 839 | /* |
842 | * Handle data packets: RTT sampling and monitoring p | 840 | * Handle data packets: RTT sampling and monitoring p |
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 849e181e698f..bcd6ac415bb9 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -90,14 +90,14 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) | |||
90 | { | 90 | { |
91 | struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); | 91 | struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); |
92 | u32 old_i_mean = lh->i_mean; | 92 | u32 old_i_mean = lh->i_mean; |
93 | s64 length; | 93 | s64 len; |
94 | 94 | ||
95 | if (cur == NULL) /* not initialised */ | 95 | if (cur == NULL) /* not initialised */ |
96 | return 0; | 96 | return 0; |
97 | 97 | ||
98 | length = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq); | 98 | len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1; |
99 | 99 | ||
100 | if (length - cur->li_length <= 0) /* duplicate or reordered */ | 100 | if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */ |
101 | return 0; | 101 | return 0; |
102 | 102 | ||
103 | if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) | 103 | if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) |
@@ -114,7 +114,7 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) | |||
114 | if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */ | 114 | if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */ |
115 | return 0; | 115 | return 0; |
116 | 116 | ||
117 | cur->li_length = length; | 117 | cur->li_length = len; |
118 | tfrc_lh_calc_i_mean(lh); | 118 | tfrc_lh_calc_i_mean(lh); |
119 | 119 | ||
120 | return (lh->i_mean < old_i_mean); | 120 | return (lh->i_mean < old_i_mean); |
@@ -159,7 +159,7 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, | |||
159 | else { | 159 | else { |
160 | cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); | 160 | cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); |
161 | new->li_length = dccp_delta_seqno(new->li_seqno, | 161 | new->li_length = dccp_delta_seqno(new->li_seqno, |
162 | tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno); | 162 | tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1; |
163 | if (lh->counter > (2*LIH_SIZE)) | 163 | if (lh->counter > (2*LIH_SIZE)) |
164 | lh->counter -= LIH_SIZE; | 164 | lh->counter -= LIH_SIZE; |
165 | 165 | ||
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index 20af1a693427..6cc108afdc3b 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c | |||
@@ -153,7 +153,7 @@ void tfrc_rx_packet_history_exit(void) | |||
153 | 153 | ||
154 | static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, | 154 | static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, |
155 | const struct sk_buff *skb, | 155 | const struct sk_buff *skb, |
156 | const u32 ndp) | 156 | const u64 ndp) |
157 | { | 157 | { |
158 | const struct dccp_hdr *dh = dccp_hdr(skb); | 158 | const struct dccp_hdr *dh = dccp_hdr(skb); |
159 | 159 | ||
@@ -166,7 +166,7 @@ static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, | |||
166 | 166 | ||
167 | void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, | 167 | void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, |
168 | const struct sk_buff *skb, | 168 | const struct sk_buff *skb, |
169 | const u32 ndp) | 169 | const u64 ndp) |
170 | { | 170 | { |
171 | struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); | 171 | struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); |
172 | 172 | ||
@@ -206,31 +206,39 @@ static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) | |||
206 | * | 206 | * |
207 | * In the descriptions, `Si' refers to the sequence number of entry number i, | 207 | * In the descriptions, `Si' refers to the sequence number of entry number i, |
208 | * whose NDP count is `Ni' (lower case is used for variables). | 208 | * whose NDP count is `Ni' (lower case is used for variables). |
209 | * Note: All __after_loss functions expect that a test against duplicates has | 209 | * Note: All __xxx_loss functions expect that a test against duplicates has been |
210 | * been performed already: the seqno of the skb must not be less than the | 210 | * performed already: the seqno of the skb must not be less than the seqno |
211 | * seqno of loss_prev; and it must not equal that of any valid hist_entry. | 211 | * of loss_prev; and it must not equal that of any valid history entry. |
212 | */ | 212 | */ |
213 | static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1) | ||
214 | { | ||
215 | u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, | ||
216 | s1 = DCCP_SKB_CB(skb)->dccpd_seq; | ||
217 | |||
218 | if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */ | ||
219 | h->loss_count = 1; | ||
220 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1); | ||
221 | } | ||
222 | } | ||
223 | |||
213 | static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) | 224 | static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) |
214 | { | 225 | { |
215 | u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, | 226 | u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, |
216 | s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, | 227 | s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, |
217 | s2 = DCCP_SKB_CB(skb)->dccpd_seq; | 228 | s2 = DCCP_SKB_CB(skb)->dccpd_seq; |
218 | int n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp, | ||
219 | d12 = dccp_delta_seqno(s1, s2), d2; | ||
220 | 229 | ||
221 | if (d12 > 0) { /* S1 < S2 */ | 230 | if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */ |
222 | h->loss_count = 2; | 231 | h->loss_count = 2; |
223 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); | 232 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); |
224 | return; | 233 | return; |
225 | } | 234 | } |
226 | 235 | ||
227 | /* S0 < S2 < S1 */ | 236 | /* S0 < S2 < S1 */ |
228 | d2 = dccp_delta_seqno(s0, s2); | ||
229 | 237 | ||
230 | if (d2 == 1 || n2 >= d2) { /* S2 is direct successor of S0 */ | 238 | if (dccp_loss_free(s0, s2, n2)) { |
231 | int d21 = -d12; | 239 | u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; |
232 | 240 | ||
233 | if (d21 == 1 || n1 >= d21) { | 241 | if (dccp_loss_free(s2, s1, n1)) { |
234 | /* hole is filled: S0, S2, and S1 are consecutive */ | 242 | /* hole is filled: S0, S2, and S1 are consecutive */ |
235 | h->loss_count = 0; | 243 | h->loss_count = 0; |
236 | h->loss_start = tfrc_rx_hist_index(h, 1); | 244 | h->loss_start = tfrc_rx_hist_index(h, 1); |
@@ -238,9 +246,9 @@ static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2 | |||
238 | /* gap between S2 and S1: just update loss_prev */ | 246 | /* gap between S2 and S1: just update loss_prev */ |
239 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); | 247 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); |
240 | 248 | ||
241 | } else { /* hole between S0 and S2 */ | 249 | } else { /* gap between S0 and S2 */ |
242 | /* | 250 | /* |
243 | * Reorder history to insert S2 between S0 and s1 | 251 | * Reorder history to insert S2 between S0 and S1 |
244 | */ | 252 | */ |
245 | tfrc_rx_hist_swap(h, 0, 3); | 253 | tfrc_rx_hist_swap(h, 0, 3); |
246 | h->loss_start = tfrc_rx_hist_index(h, 3); | 254 | h->loss_start = tfrc_rx_hist_index(h, 3); |
@@ -256,22 +264,18 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) | |||
256 | s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, | 264 | s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, |
257 | s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, | 265 | s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, |
258 | s3 = DCCP_SKB_CB(skb)->dccpd_seq; | 266 | s3 = DCCP_SKB_CB(skb)->dccpd_seq; |
259 | int n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp, | ||
260 | d23 = dccp_delta_seqno(s2, s3), d13, d3, d31; | ||
261 | 267 | ||
262 | if (d23 > 0) { /* S2 < S3 */ | 268 | if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */ |
263 | h->loss_count = 3; | 269 | h->loss_count = 3; |
264 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); | 270 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); |
265 | return 1; | 271 | return 1; |
266 | } | 272 | } |
267 | 273 | ||
268 | /* S3 < S2 */ | 274 | /* S3 < S2 */ |
269 | d13 = dccp_delta_seqno(s1, s3); | ||
270 | 275 | ||
271 | if (d13 > 0) { | 276 | if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */ |
272 | /* | 277 | /* |
273 | * The sequence number order is S1, S3, S2 | 278 | * Reorder history to insert S3 between S1 and S2 |
274 | * Reorder history to insert entry between S1 and S2 | ||
275 | */ | 279 | */ |
276 | tfrc_rx_hist_swap(h, 2, 3); | 280 | tfrc_rx_hist_swap(h, 2, 3); |
277 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); | 281 | tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); |
@@ -280,17 +284,15 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) | |||
280 | } | 284 | } |
281 | 285 | ||
282 | /* S0 < S3 < S1 */ | 286 | /* S0 < S3 < S1 */ |
283 | d31 = -d13; | ||
284 | d3 = dccp_delta_seqno(s0, s3); | ||
285 | 287 | ||
286 | if (d3 == 1 || n3 >= d3) { /* S3 is a successor of S0 */ | 288 | if (dccp_loss_free(s0, s3, n3)) { |
289 | u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; | ||
287 | 290 | ||
288 | if (d31 == 1 || n1 >= d31) { | 291 | if (dccp_loss_free(s3, s1, n1)) { |
289 | /* hole between S0 and S1 filled by S3 */ | 292 | /* hole between S0 and S1 filled by S3 */ |
290 | int d2 = dccp_delta_seqno(s1, s2), | 293 | u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp; |
291 | n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp; | ||
292 | 294 | ||
293 | if (d2 == 1 || n2 >= d2) { | 295 | if (dccp_loss_free(s1, s2, n2)) { |
294 | /* entire hole filled by S0, S3, S1, S2 */ | 296 | /* entire hole filled by S0, S3, S1, S2 */ |
295 | h->loss_start = tfrc_rx_hist_index(h, 2); | 297 | h->loss_start = tfrc_rx_hist_index(h, 2); |
296 | h->loss_count = 0; | 298 | h->loss_count = 0; |
@@ -307,8 +309,8 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) | |||
307 | } | 309 | } |
308 | 310 | ||
309 | /* | 311 | /* |
310 | * The remaining case: S3 is not a successor of S0. | 312 | * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3 |
311 | * Sequence order is S0, S3, S1, S2; reorder to insert between S0 and S1 | 313 | * Reorder history to insert S3 between S0 and S1. |
312 | */ | 314 | */ |
313 | tfrc_rx_hist_swap(h, 0, 3); | 315 | tfrc_rx_hist_swap(h, 0, 3); |
314 | h->loss_start = tfrc_rx_hist_index(h, 3); | 316 | h->loss_start = tfrc_rx_hist_index(h, 3); |
@@ -318,33 +320,25 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) | |||
318 | return 1; | 320 | return 1; |
319 | } | 321 | } |
320 | 322 | ||
321 | /* return the signed modulo-2^48 sequence number distance from entry e1 to e2 */ | ||
322 | static s64 tfrc_rx_hist_delta_seqno(struct tfrc_rx_hist *h, u8 e1, u8 e2) | ||
323 | { | ||
324 | DCCP_BUG_ON(e1 > h->loss_count || e2 > h->loss_count); | ||
325 | |||
326 | return dccp_delta_seqno(tfrc_rx_hist_entry(h, e1)->tfrchrx_seqno, | ||
327 | tfrc_rx_hist_entry(h, e2)->tfrchrx_seqno); | ||
328 | } | ||
329 | |||
330 | /* recycle RX history records to continue loss detection if necessary */ | 323 | /* recycle RX history records to continue loss detection if necessary */ |
331 | static void __three_after_loss(struct tfrc_rx_hist *h) | 324 | static void __three_after_loss(struct tfrc_rx_hist *h) |
332 | { | 325 | { |
333 | /* | 326 | /* |
334 | * The distance between S0 and S1 is always greater than 1 and the NDP | 327 | * At this stage we know already that there is a gap between S0 and S1 |
335 | * count of S1 is smaller than this distance. Otherwise there would | 328 | * (since S0 was the highest sequence number received before detecting |
336 | * have been no loss. Hence it is only necessary to see whether there | 329 | * the loss). To recycle the loss record, it is thus only necessary to |
337 | * are further missing data packets between S1/S2 and S2/S3. | 330 | * check for other possible gaps between S1/S2 and between S2/S3. |
338 | */ | 331 | */ |
339 | int d2 = tfrc_rx_hist_delta_seqno(h, 1, 2), | 332 | u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, |
340 | d3 = tfrc_rx_hist_delta_seqno(h, 2, 3), | 333 | s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, |
341 | n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, | 334 | s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno; |
335 | u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, | ||
342 | n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; | 336 | n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; |
343 | 337 | ||
344 | if (d2 == 1 || n2 >= d2) { /* S2 is successor to S1 */ | 338 | if (dccp_loss_free(s1, s2, n2)) { |
345 | 339 | ||
346 | if (d3 == 1 || n3 >= d3) { | 340 | if (dccp_loss_free(s2, s3, n3)) { |
347 | /* S3 is successor of S2: entire hole is filled */ | 341 | /* no gap between S2 and S3: entire hole is filled */ |
348 | h->loss_start = tfrc_rx_hist_index(h, 3); | 342 | h->loss_start = tfrc_rx_hist_index(h, 3); |
349 | h->loss_count = 0; | 343 | h->loss_count = 0; |
350 | } else { | 344 | } else { |
@@ -353,7 +347,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h) | |||
353 | h->loss_count = 1; | 347 | h->loss_count = 1; |
354 | } | 348 | } |
355 | 349 | ||
356 | } else { /* gap between S1 and S2 */ | 350 | } else { /* gap between S1 and S2 */ |
357 | h->loss_start = tfrc_rx_hist_index(h, 1); | 351 | h->loss_start = tfrc_rx_hist_index(h, 1); |
358 | h->loss_count = 2; | 352 | h->loss_count = 2; |
359 | } | 353 | } |
@@ -370,15 +364,20 @@ static void __three_after_loss(struct tfrc_rx_hist *h) | |||
370 | * Chooses action according to pending loss, updates LI database when a new | 364 | * Chooses action according to pending loss, updates LI database when a new |
371 | * loss was detected, and does required post-processing. Returns 1 when caller | 365 | * loss was detected, and does required post-processing. Returns 1 when caller |
372 | * should send feedback, 0 otherwise. | 366 | * should send feedback, 0 otherwise. |
367 | * Since it also takes care of reordering during loss detection and updates the | ||
368 | * records accordingly, the caller should not perform any more RX history | ||
369 | * operations when loss_count is greater than 0 after calling this function. | ||
373 | */ | 370 | */ |
374 | int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, | 371 | int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, |
375 | struct tfrc_loss_hist *lh, | 372 | struct tfrc_loss_hist *lh, |
376 | struct sk_buff *skb, u32 ndp, | 373 | struct sk_buff *skb, const u64 ndp, |
377 | u32 (*calc_first_li)(struct sock *), struct sock *sk) | 374 | u32 (*calc_first_li)(struct sock *), struct sock *sk) |
378 | { | 375 | { |
379 | int is_new_loss = 0; | 376 | int is_new_loss = 0; |
380 | 377 | ||
381 | if (h->loss_count == 1) { | 378 | if (h->loss_count == 0) { |
379 | __do_track_loss(h, skb, ndp); | ||
380 | } else if (h->loss_count == 1) { | ||
382 | __one_after_loss(h, skb, ndp); | 381 | __one_after_loss(h, skb, ndp); |
383 | } else if (h->loss_count != 2) { | 382 | } else if (h->loss_count != 2) { |
384 | DCCP_BUG("invalid loss_count %d", h->loss_count); | 383 | DCCP_BUG("invalid loss_count %d", h->loss_count); |
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index c7eeda49cb20..461cc91cce88 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h | |||
@@ -64,7 +64,7 @@ struct tfrc_rx_hist_entry { | |||
64 | u64 tfrchrx_seqno:48, | 64 | u64 tfrchrx_seqno:48, |
65 | tfrchrx_ccval:4, | 65 | tfrchrx_ccval:4, |
66 | tfrchrx_type:4; | 66 | tfrchrx_type:4; |
67 | u32 tfrchrx_ndp; /* In fact it is from 8 to 24 bits */ | 67 | u64 tfrchrx_ndp:48; |
68 | ktime_t tfrchrx_tstamp; | 68 | ktime_t tfrchrx_tstamp; |
69 | }; | 69 | }; |
70 | 70 | ||
@@ -118,41 +118,21 @@ static inline struct tfrc_rx_hist_entry * | |||
118 | return h->ring[h->loss_start]; | 118 | return h->ring[h->loss_start]; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* initialise loss detection and disable RTT sampling */ | ||
122 | static inline void tfrc_rx_hist_loss_indicated(struct tfrc_rx_hist *h) | ||
123 | { | ||
124 | h->loss_count = 1; | ||
125 | } | ||
126 | |||
127 | /* indicate whether previously a packet was detected missing */ | 121 | /* indicate whether previously a packet was detected missing */ |
128 | static inline int tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h) | 122 | static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h) |
129 | { | ||
130 | return h->loss_count; | ||
131 | } | ||
132 | |||
133 | /* any data packets missing between last reception and skb ? */ | ||
134 | static inline int tfrc_rx_hist_new_loss_indicated(struct tfrc_rx_hist *h, | ||
135 | const struct sk_buff *skb, | ||
136 | u32 ndp) | ||
137 | { | 123 | { |
138 | int delta = dccp_delta_seqno(tfrc_rx_hist_last_rcv(h)->tfrchrx_seqno, | 124 | return h->loss_count > 0; |
139 | DCCP_SKB_CB(skb)->dccpd_seq); | ||
140 | |||
141 | if (delta > 1 && ndp < delta) | ||
142 | tfrc_rx_hist_loss_indicated(h); | ||
143 | |||
144 | return tfrc_rx_hist_loss_pending(h); | ||
145 | } | 125 | } |
146 | 126 | ||
147 | extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, | 127 | extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, |
148 | const struct sk_buff *skb, const u32 ndp); | 128 | const struct sk_buff *skb, const u64 ndp); |
149 | 129 | ||
150 | extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb); | 130 | extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb); |
151 | 131 | ||
152 | struct tfrc_loss_hist; | 132 | struct tfrc_loss_hist; |
153 | extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, | 133 | extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, |
154 | struct tfrc_loss_hist *lh, | 134 | struct tfrc_loss_hist *lh, |
155 | struct sk_buff *skb, u32 ndp, | 135 | struct sk_buff *skb, const u64 ndp, |
156 | u32 (*first_li)(struct sock *sk), | 136 | u32 (*first_li)(struct sock *sk), |
157 | struct sock *sk); | 137 | struct sock *sk); |
158 | extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, | 138 | extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index f44d492d3b74..32617e0576cb 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -153,6 +153,21 @@ static inline u64 max48(const u64 seq1, const u64 seq2) | |||
153 | return after48(seq1, seq2) ? seq1 : seq2; | 153 | return after48(seq1, seq2) ? seq1 : seq2; |
154 | } | 154 | } |
155 | 155 | ||
156 | /** | ||
157 | * dccp_loss_free - Evaluates condition for data loss from RFC 4340, 7.7.1 | ||
158 | * @s1: start sequence number | ||
159 | * @s2: end sequence number | ||
160 | * @ndp: NDP count on packet with sequence number @s2 | ||
161 | * Returns true if the sequence range s1...s2 has no data loss. | ||
162 | */ | ||
163 | static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp) | ||
164 | { | ||
165 | s64 delta = dccp_delta_seqno(s1, s2); | ||
166 | |||
167 | BUG_TRAP(delta >= 0); | ||
168 | return (u64)delta <= ndp + 1; | ||
169 | } | ||
170 | |||
156 | enum { | 171 | enum { |
157 | DCCP_MIB_NUM = 0, | 172 | DCCP_MIB_NUM = 0, |
158 | DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ | 173 | DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ |
@@ -262,7 +277,7 @@ extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
262 | const struct dccp_hdr *dh, const unsigned len); | 277 | const struct dccp_hdr *dh, const unsigned len); |
263 | 278 | ||
264 | extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); | 279 | extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); |
265 | extern int dccp_destroy_sock(struct sock *sk); | 280 | extern void dccp_destroy_sock(struct sock *sk); |
266 | 281 | ||
267 | extern void dccp_close(struct sock *sk, long timeout); | 282 | extern void dccp_close(struct sock *sk, long timeout); |
268 | extern struct sk_buff *dccp_make_response(struct sock *sk, | 283 | extern struct sk_buff *dccp_make_response(struct sock *sk, |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 37d27bcb361f..2622ace17c46 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -205,17 +205,18 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
205 | struct sock *sk; | 205 | struct sock *sk; |
206 | __u64 seq; | 206 | __u64 seq; |
207 | int err; | 207 | int err; |
208 | struct net *net = dev_net(skb->dev); | ||
208 | 209 | ||
209 | if (skb->len < (iph->ihl << 2) + 8) { | 210 | if (skb->len < (iph->ihl << 2) + 8) { |
210 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 211 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
211 | return; | 212 | return; |
212 | } | 213 | } |
213 | 214 | ||
214 | sk = inet_lookup(dev_net(skb->dev), &dccp_hashinfo, | 215 | sk = inet_lookup(net, &dccp_hashinfo, |
215 | iph->daddr, dh->dccph_dport, | 216 | iph->daddr, dh->dccph_dport, |
216 | iph->saddr, dh->dccph_sport, inet_iif(skb)); | 217 | iph->saddr, dh->dccph_sport, inet_iif(skb)); |
217 | if (sk == NULL) { | 218 | if (sk == NULL) { |
218 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 219 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
219 | return; | 220 | return; |
220 | } | 221 | } |
221 | 222 | ||
@@ -229,7 +230,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
229 | * servers this needs to be solved differently. | 230 | * servers this needs to be solved differently. |
230 | */ | 231 | */ |
231 | if (sock_owned_by_user(sk)) | 232 | if (sock_owned_by_user(sk)) |
232 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 233 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
233 | 234 | ||
234 | if (sk->sk_state == DCCP_CLOSED) | 235 | if (sk->sk_state == DCCP_CLOSED) |
235 | goto out; | 236 | goto out; |
@@ -238,7 +239,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
238 | seq = dccp_hdr_seq(dh); | 239 | seq = dccp_hdr_seq(dh); |
239 | if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && | 240 | if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && |
240 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { | 241 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { |
241 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 242 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
242 | goto out; | 243 | goto out; |
243 | } | 244 | } |
244 | 245 | ||
@@ -285,7 +286,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
285 | BUG_TRAP(!req->sk); | 286 | BUG_TRAP(!req->sk); |
286 | 287 | ||
287 | if (seq != dccp_rsk(req)->dreq_iss) { | 288 | if (seq != dccp_rsk(req)->dreq_iss) { |
288 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 289 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
289 | goto out; | 290 | goto out; |
290 | } | 291 | } |
291 | /* | 292 | /* |
@@ -408,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
408 | return newsk; | 409 | return newsk; |
409 | 410 | ||
410 | exit_overflow: | 411 | exit_overflow: |
411 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | 412 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
412 | exit: | 413 | exit: |
413 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | 414 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
414 | dst_release(dst); | 415 | dst_release(dst); |
415 | return NULL; | 416 | return NULL; |
416 | } | 417 | } |
@@ -464,7 +465,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, | |||
464 | 465 | ||
465 | security_skb_classify_flow(skb, &fl); | 466 | security_skb_classify_flow(skb, &fl); |
466 | if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { | 467 | if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { |
467 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 468 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
468 | return NULL; | 469 | return NULL; |
469 | } | 470 | } |
470 | 471 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index f7fe2a572d7b..b74e8b2cbe55 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -93,8 +93,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
93 | struct sock *sk; | 93 | struct sock *sk; |
94 | int err; | 94 | int err; |
95 | __u64 seq; | 95 | __u64 seq; |
96 | struct net *net = dev_net(skb->dev); | ||
96 | 97 | ||
97 | sk = inet6_lookup(dev_net(skb->dev), &dccp_hashinfo, | 98 | sk = inet6_lookup(net, &dccp_hashinfo, |
98 | &hdr->daddr, dh->dccph_dport, | 99 | &hdr->daddr, dh->dccph_dport, |
99 | &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); | 100 | &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); |
100 | 101 | ||
@@ -110,7 +111,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
110 | 111 | ||
111 | bh_lock_sock(sk); | 112 | bh_lock_sock(sk); |
112 | if (sock_owned_by_user(sk)) | 113 | if (sock_owned_by_user(sk)) |
113 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 114 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
114 | 115 | ||
115 | if (sk->sk_state == DCCP_CLOSED) | 116 | if (sk->sk_state == DCCP_CLOSED) |
116 | goto out; | 117 | goto out; |
@@ -188,7 +189,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
188 | BUG_TRAP(req->sk == NULL); | 189 | BUG_TRAP(req->sk == NULL); |
189 | 190 | ||
190 | if (seq != dccp_rsk(req)->dreq_iss) { | 191 | if (seq != dccp_rsk(req)->dreq_iss) { |
191 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 192 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
192 | goto out; | 193 | goto out; |
193 | } | 194 | } |
194 | 195 | ||
@@ -629,9 +630,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
629 | return newsk; | 630 | return newsk; |
630 | 631 | ||
631 | out_overflow: | 632 | out_overflow: |
632 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | 633 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
633 | out: | 634 | out: |
634 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | 635 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
635 | if (opt != NULL && opt != np->opt) | 636 | if (opt != NULL && opt != np->opt) |
636 | sock_kfree_s(sk, opt, opt->tot_len); | 637 | sock_kfree_s(sk, opt, opt->tot_len); |
637 | dst_release(dst); | 638 | dst_release(dst); |
@@ -1091,10 +1092,10 @@ static int dccp_v6_init_sock(struct sock *sk) | |||
1091 | return err; | 1092 | return err; |
1092 | } | 1093 | } |
1093 | 1094 | ||
1094 | static int dccp_v6_destroy_sock(struct sock *sk) | 1095 | static void dccp_v6_destroy_sock(struct sock *sk) |
1095 | { | 1096 | { |
1096 | dccp_destroy_sock(sk); | 1097 | dccp_destroy_sock(sk); |
1097 | return inet6_destroy_sock(sk); | 1098 | inet6_destroy_sock(sk); |
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { | 1101 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { |
diff --git a/net/dccp/options.c b/net/dccp/options.c index 43bc24e761d0..dc7c158a2f4b 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -124,12 +124,12 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
124 | mandatory = 1; | 124 | mandatory = 1; |
125 | break; | 125 | break; |
126 | case DCCPO_NDP_COUNT: | 126 | case DCCPO_NDP_COUNT: |
127 | if (len > 3) | 127 | if (len > 6) |
128 | goto out_invalid_option; | 128 | goto out_invalid_option; |
129 | 129 | ||
130 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); | 130 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); |
131 | dccp_pr_debug("%s rx opt: NDP count=%d\n", dccp_role(sk), | 131 | dccp_pr_debug("%s opt: NDP count=%llu\n", dccp_role(sk), |
132 | opt_recv->dccpor_ndp); | 132 | (unsigned long long)opt_recv->dccpor_ndp); |
133 | break; | 133 | break; |
134 | case DCCPO_CHANGE_L: | 134 | case DCCPO_CHANGE_L: |
135 | /* fall through */ | 135 | /* fall through */ |
@@ -307,9 +307,11 @@ static void dccp_encode_value_var(const u32 value, unsigned char *to, | |||
307 | *to++ = (value & 0xFF); | 307 | *to++ = (value & 0xFF); |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline int dccp_ndp_len(const int ndp) | 310 | static inline u8 dccp_ndp_len(const u64 ndp) |
311 | { | 311 | { |
312 | return likely(ndp <= 0xFF) ? 1 : ndp <= 0xFFFF ? 2 : 3; | 312 | if (likely(ndp <= 0xFF)) |
313 | return 1; | ||
314 | return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); | ||
313 | } | 315 | } |
314 | 316 | ||
315 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, | 317 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, |
@@ -336,7 +338,7 @@ EXPORT_SYMBOL_GPL(dccp_insert_option); | |||
336 | static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb) | 338 | static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb) |
337 | { | 339 | { |
338 | struct dccp_sock *dp = dccp_sk(sk); | 340 | struct dccp_sock *dp = dccp_sk(sk); |
339 | int ndp = dp->dccps_ndp_count; | 341 | u64 ndp = dp->dccps_ndp_count; |
340 | 342 | ||
341 | if (dccp_non_data_packet(skb)) | 343 | if (dccp_non_data_packet(skb)) |
342 | ++dp->dccps_ndp_count; | 344 | ++dp->dccps_ndp_count; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9dfe2470962c..a0b56009611f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -237,7 +237,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) | |||
237 | 237 | ||
238 | EXPORT_SYMBOL_GPL(dccp_init_sock); | 238 | EXPORT_SYMBOL_GPL(dccp_init_sock); |
239 | 239 | ||
240 | int dccp_destroy_sock(struct sock *sk) | 240 | void dccp_destroy_sock(struct sock *sk) |
241 | { | 241 | { |
242 | struct dccp_sock *dp = dccp_sk(sk); | 242 | struct dccp_sock *dp = dccp_sk(sk); |
243 | struct dccp_minisock *dmsk = dccp_msk(sk); | 243 | struct dccp_minisock *dmsk = dccp_msk(sk); |
@@ -268,8 +268,6 @@ int dccp_destroy_sock(struct sock *sk) | |||
268 | 268 | ||
269 | /* clean up feature negotiation state */ | 269 | /* clean up feature negotiation state */ |
270 | dccp_feat_clean(dmsk); | 270 | dccp_feat_clean(dmsk); |
271 | |||
272 | return 0; | ||
273 | } | 271 | } |
274 | 272 | ||
275 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); | 273 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 8703a792b560..3608d5342ca2 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -224,7 +224,7 @@ static void dccp_delack_timer(unsigned long data) | |||
224 | if (sock_owned_by_user(sk)) { | 224 | if (sock_owned_by_user(sk)) { |
225 | /* Try again later. */ | 225 | /* Try again later. */ |
226 | icsk->icsk_ack.blocked = 1; | 226 | icsk->icsk_ack.blocked = 1; |
227 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 227 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
228 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | 228 | sk_reset_timer(sk, &icsk->icsk_delack_timer, |
229 | jiffies + TCP_DELACK_MIN); | 229 | jiffies + TCP_DELACK_MIN); |
230 | goto out; | 230 | goto out; |
@@ -254,7 +254,7 @@ static void dccp_delack_timer(unsigned long data) | |||
254 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 254 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
255 | } | 255 | } |
256 | dccp_send_ack(sk); | 256 | dccp_send_ack(sk); |
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 257 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
258 | } | 258 | } |
259 | out: | 259 | out: |
260 | bh_unlock_sock(sk); | 260 | bh_unlock_sock(sk); |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index fc2efe899e91..3c23ab33dbc0 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -451,7 +451,7 @@ static void dn_destruct(struct sock *sk) | |||
451 | 451 | ||
452 | static int dn_memory_pressure; | 452 | static int dn_memory_pressure; |
453 | 453 | ||
454 | static void dn_enter_memory_pressure(void) | 454 | static void dn_enter_memory_pressure(struct sock *sk) |
455 | { | 455 | { |
456 | if (!dn_memory_pressure) { | 456 | if (!dn_memory_pressure) { |
457 | dn_memory_pressure = 1; | 457 | dn_memory_pressure = 1; |
@@ -1719,6 +1719,8 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1719 | * See if there is data ready to read, sleep if there isn't | 1719 | * See if there is data ready to read, sleep if there isn't |
1720 | */ | 1720 | */ |
1721 | for(;;) { | 1721 | for(;;) { |
1722 | DEFINE_WAIT(wait); | ||
1723 | |||
1722 | if (sk->sk_err) | 1724 | if (sk->sk_err) |
1723 | goto out; | 1725 | goto out; |
1724 | 1726 | ||
@@ -1748,14 +1750,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1748 | goto out; | 1750 | goto out; |
1749 | } | 1751 | } |
1750 | 1752 | ||
1751 | set_bit(SOCK_ASYNC_WAITDATA, &sock->flags); | 1753 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
1752 | SOCK_SLEEP_PRE(sk) | 1754 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1753 | 1755 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); | |
1754 | if (!dn_data_ready(sk, queue, flags, target)) | 1756 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1755 | schedule(); | 1757 | finish_wait(sk->sk_sleep, &wait); |
1756 | |||
1757 | SOCK_SLEEP_POST(sk) | ||
1758 | clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags); | ||
1759 | } | 1758 | } |
1760 | 1759 | ||
1761 | for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { | 1760 | for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { |
@@ -2002,18 +2001,19 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
2002 | * size. | 2001 | * size. |
2003 | */ | 2002 | */ |
2004 | if (dn_queue_too_long(scp, queue, flags)) { | 2003 | if (dn_queue_too_long(scp, queue, flags)) { |
2004 | DEFINE_WAIT(wait); | ||
2005 | |||
2005 | if (flags & MSG_DONTWAIT) { | 2006 | if (flags & MSG_DONTWAIT) { |
2006 | err = -EWOULDBLOCK; | 2007 | err = -EWOULDBLOCK; |
2007 | goto out; | 2008 | goto out; |
2008 | } | 2009 | } |
2009 | 2010 | ||
2010 | SOCK_SLEEP_PRE(sk) | 2011 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
2011 | 2012 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | |
2012 | if (dn_queue_too_long(scp, queue, flags)) | 2013 | sk_wait_event(sk, &timeo, |
2013 | schedule(); | 2014 | !dn_queue_too_long(scp, queue, flags)); |
2014 | 2015 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | |
2015 | SOCK_SLEEP_POST(sk) | 2016 | finish_wait(sk->sk_sleep, &wait); |
2016 | |||
2017 | continue; | 2017 | continue; |
2018 | } | 2018 | } |
2019 | 2019 | ||
@@ -2089,7 +2089,7 @@ static int dn_device_event(struct notifier_block *this, unsigned long event, | |||
2089 | { | 2089 | { |
2090 | struct net_device *dev = (struct net_device *)ptr; | 2090 | struct net_device *dev = (struct net_device *)ptr; |
2091 | 2091 | ||
2092 | if (dev_net(dev) != &init_net) | 2092 | if (!net_eq(dev_net(dev), &init_net)) |
2093 | return NOTIFY_DONE; | 2093 | return NOTIFY_DONE; |
2094 | 2094 | ||
2095 | switch(event) { | 2095 | switch(event) { |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index f50e88bf2661..821bd1cdec04 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -580,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type | |||
580 | struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; | 580 | struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; |
581 | unsigned char padlen = 0; | 581 | unsigned char padlen = 0; |
582 | 582 | ||
583 | if (dev_net(dev) != &init_net) | 583 | if (!net_eq(dev_net(dev), &init_net)) |
584 | goto dump_it; | 584 | goto dump_it; |
585 | 585 | ||
586 | if (dn == NULL) | 586 | if (dn == NULL) |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 5b7539b7fe0c..14fbca55e908 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -229,7 +229,7 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops) | |||
229 | return 0; | 229 | return 0; |
230 | } | 230 | } |
231 | 231 | ||
232 | static void dn_fib_rule_flush_cache(void) | 232 | static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) |
233 | { | 233 | { |
234 | dn_rt_cache_flush(-1); | 234 | dn_rt_cache_flush(-1); |
235 | } | 235 | } |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 7c9bb13b1539..8789d2bb1b06 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -573,9 +573,7 @@ static int econet_release(struct socket *sock) | |||
573 | 573 | ||
574 | sk->sk_state_change(sk); /* It is useless. Just for sanity. */ | 574 | sk->sk_state_change(sk); /* It is useless. Just for sanity. */ |
575 | 575 | ||
576 | sock->sk = NULL; | 576 | sock_orphan(sk); |
577 | sk->sk_socket = NULL; | ||
578 | sock_set_flag(sk, SOCK_DEAD); | ||
579 | 577 | ||
580 | /* Purge queues */ | 578 | /* Purge queues */ |
581 | 579 | ||
@@ -1064,7 +1062,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet | |||
1064 | struct sock *sk; | 1062 | struct sock *sk; |
1065 | struct ec_device *edev = dev->ec_ptr; | 1063 | struct ec_device *edev = dev->ec_ptr; |
1066 | 1064 | ||
1067 | if (dev_net(dev) != &init_net) | 1065 | if (!net_eq(dev_net(dev), &init_net)) |
1068 | goto drop; | 1066 | goto drop; |
1069 | 1067 | ||
1070 | if (skb->pkt_type == PACKET_OTHERHOST) | 1068 | if (skb->pkt_type == PACKET_OTHERHOST) |
@@ -1121,7 +1119,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void | |||
1121 | struct net_device *dev = (struct net_device *)data; | 1119 | struct net_device *dev = (struct net_device *)data; |
1122 | struct ec_device *edev; | 1120 | struct ec_device *edev; |
1123 | 1121 | ||
1124 | if (dev_net(dev) != &init_net) | 1122 | if (!net_eq(dev_net(dev), &init_net)) |
1125 | return NOTIFY_DONE; | 1123 | return NOTIFY_DONE; |
1126 | 1124 | ||
1127 | switch (msg) { | 1125 | switch (msg) { |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 200ee1e63728..69dbc342a464 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -391,7 +391,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
391 | 391 | ||
392 | wstats.updated = 0; | 392 | wstats.updated = 0; |
393 | if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { | 393 | if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { |
394 | wstats.level = rx_stats->rssi; | 394 | wstats.level = rx_stats->signal; |
395 | wstats.updated |= IW_QUAL_LEVEL_UPDATED; | 395 | wstats.updated |= IW_QUAL_LEVEL_UPDATED; |
396 | } else | 396 | } else |
397 | wstats.updated |= IW_QUAL_LEVEL_INVALID; | 397 | wstats.updated |= IW_QUAL_LEVEL_INVALID; |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index d8b02603cbe5..d996547f7a62 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -542,90 +542,4 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
542 | return 1; | 542 | return 1; |
543 | } | 543 | } |
544 | 544 | ||
545 | /* Incoming 802.11 strucure is converted to a TXB | ||
546 | * a block of 802.11 fragment packets (stored as skbs) */ | ||
547 | int ieee80211_tx_frame(struct ieee80211_device *ieee, | ||
548 | struct ieee80211_hdr *frame, int hdr_len, int total_len, | ||
549 | int encrypt_mpdu) | ||
550 | { | ||
551 | struct ieee80211_txb *txb = NULL; | ||
552 | unsigned long flags; | ||
553 | struct net_device_stats *stats = &ieee->stats; | ||
554 | struct sk_buff *skb_frag; | ||
555 | int priority = -1; | ||
556 | int fraglen = total_len; | ||
557 | int headroom = ieee->tx_headroom; | ||
558 | struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; | ||
559 | |||
560 | spin_lock_irqsave(&ieee->lock, flags); | ||
561 | |||
562 | if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt)) | ||
563 | encrypt_mpdu = 0; | ||
564 | |||
565 | /* If there is no driver handler to take the TXB, dont' bother | ||
566 | * creating it... */ | ||
567 | if (!ieee->hard_start_xmit) { | ||
568 | printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); | ||
569 | goto success; | ||
570 | } | ||
571 | |||
572 | if (unlikely(total_len < 24)) { | ||
573 | printk(KERN_WARNING "%s: skb too small (%d).\n", | ||
574 | ieee->dev->name, total_len); | ||
575 | goto success; | ||
576 | } | ||
577 | |||
578 | if (encrypt_mpdu) { | ||
579 | frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | ||
580 | fraglen += crypt->ops->extra_mpdu_prefix_len + | ||
581 | crypt->ops->extra_mpdu_postfix_len; | ||
582 | headroom += crypt->ops->extra_mpdu_prefix_len; | ||
583 | } | ||
584 | |||
585 | /* When we allocate the TXB we allocate enough space for the reserve | ||
586 | * and full fragment bytes (bytes_per_frag doesn't include prefix, | ||
587 | * postfix, header, FCS, etc.) */ | ||
588 | txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC); | ||
589 | if (unlikely(!txb)) { | ||
590 | printk(KERN_WARNING "%s: Could not allocate TXB\n", | ||
591 | ieee->dev->name); | ||
592 | goto failed; | ||
593 | } | ||
594 | txb->encrypted = 0; | ||
595 | txb->payload_size = fraglen; | ||
596 | |||
597 | skb_frag = txb->fragments[0]; | ||
598 | |||
599 | memcpy(skb_put(skb_frag, total_len), frame, total_len); | ||
600 | |||
601 | if (ieee->config & | ||
602 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | ||
603 | skb_put(skb_frag, 4); | ||
604 | |||
605 | /* To avoid overcomplicating things, we do the corner-case frame | ||
606 | * encryption in software. The only real situation where encryption is | ||
607 | * needed here is during software-based shared key authentication. */ | ||
608 | if (encrypt_mpdu) | ||
609 | ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); | ||
610 | |||
611 | success: | ||
612 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
613 | |||
614 | if (txb) { | ||
615 | if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) { | ||
616 | stats->tx_packets++; | ||
617 | stats->tx_bytes += txb->payload_size; | ||
618 | return 0; | ||
619 | } | ||
620 | ieee80211_txb_free(txb); | ||
621 | } | ||
622 | return 0; | ||
623 | |||
624 | failed: | ||
625 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
626 | stats->tx_errors++; | ||
627 | return 1; | ||
628 | } | ||
629 | |||
630 | EXPORT_SYMBOL(ieee80211_tx_frame); | ||
631 | EXPORT_SYMBOL(ieee80211_txb_free); | 545 | EXPORT_SYMBOL(ieee80211_txb_free); |
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 623489afa62c..973832dd7faf 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -43,8 +43,9 @@ static const char *ieee80211_modes[] = { | |||
43 | 43 | ||
44 | #define MAX_CUSTOM_LEN 64 | 44 | #define MAX_CUSTOM_LEN 64 |
45 | static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | 45 | static char *ieee80211_translate_scan(struct ieee80211_device *ieee, |
46 | char *start, char *stop, | 46 | char *start, char *stop, |
47 | struct ieee80211_network *network) | 47 | struct ieee80211_network *network, |
48 | struct iw_request_info *info) | ||
48 | { | 49 | { |
49 | char custom[MAX_CUSTOM_LEN]; | 50 | char custom[MAX_CUSTOM_LEN]; |
50 | char *p; | 51 | char *p; |
@@ -57,7 +58,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
57 | iwe.cmd = SIOCGIWAP; | 58 | iwe.cmd = SIOCGIWAP; |
58 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | 59 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; |
59 | memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); | 60 | memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); |
60 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_ADDR_LEN); | 61 | start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); |
61 | 62 | ||
62 | /* Remaining entries will be displayed in the order we provide them */ | 63 | /* Remaining entries will be displayed in the order we provide them */ |
63 | 64 | ||
@@ -66,17 +67,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
66 | iwe.u.data.flags = 1; | 67 | iwe.u.data.flags = 1; |
67 | if (network->flags & NETWORK_EMPTY_ESSID) { | 68 | if (network->flags & NETWORK_EMPTY_ESSID) { |
68 | iwe.u.data.length = sizeof("<hidden>"); | 69 | iwe.u.data.length = sizeof("<hidden>"); |
69 | start = iwe_stream_add_point(start, stop, &iwe, "<hidden>"); | 70 | start = iwe_stream_add_point(info, start, stop, |
71 | &iwe, "<hidden>"); | ||
70 | } else { | 72 | } else { |
71 | iwe.u.data.length = min(network->ssid_len, (u8) 32); | 73 | iwe.u.data.length = min(network->ssid_len, (u8) 32); |
72 | start = iwe_stream_add_point(start, stop, &iwe, network->ssid); | 74 | start = iwe_stream_add_point(info, start, stop, |
75 | &iwe, network->ssid); | ||
73 | } | 76 | } |
74 | 77 | ||
75 | /* Add the protocol name */ | 78 | /* Add the protocol name */ |
76 | iwe.cmd = SIOCGIWNAME; | 79 | iwe.cmd = SIOCGIWNAME; |
77 | snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", | 80 | snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", |
78 | ieee80211_modes[network->mode]); | 81 | ieee80211_modes[network->mode]); |
79 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN); | 82 | start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); |
80 | 83 | ||
81 | /* Add mode */ | 84 | /* Add mode */ |
82 | iwe.cmd = SIOCGIWMODE; | 85 | iwe.cmd = SIOCGIWMODE; |
@@ -86,7 +89,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
86 | else | 89 | else |
87 | iwe.u.mode = IW_MODE_ADHOC; | 90 | iwe.u.mode = IW_MODE_ADHOC; |
88 | 91 | ||
89 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_UINT_LEN); | 92 | start = iwe_stream_add_event(info, start, stop, |
93 | &iwe, IW_EV_UINT_LEN); | ||
90 | } | 94 | } |
91 | 95 | ||
92 | /* Add channel and frequency */ | 96 | /* Add channel and frequency */ |
@@ -95,7 +99,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
95 | iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel); | 99 | iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel); |
96 | iwe.u.freq.e = 6; | 100 | iwe.u.freq.e = 6; |
97 | iwe.u.freq.i = 0; | 101 | iwe.u.freq.i = 0; |
98 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN); | 102 | start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); |
99 | 103 | ||
100 | /* Add encryption capability */ | 104 | /* Add encryption capability */ |
101 | iwe.cmd = SIOCGIWENCODE; | 105 | iwe.cmd = SIOCGIWENCODE; |
@@ -104,12 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
104 | else | 108 | else |
105 | iwe.u.data.flags = IW_ENCODE_DISABLED; | 109 | iwe.u.data.flags = IW_ENCODE_DISABLED; |
106 | iwe.u.data.length = 0; | 110 | iwe.u.data.length = 0; |
107 | start = iwe_stream_add_point(start, stop, &iwe, network->ssid); | 111 | start = iwe_stream_add_point(info, start, stop, |
112 | &iwe, network->ssid); | ||
108 | 113 | ||
109 | /* Add basic and extended rates */ | 114 | /* Add basic and extended rates */ |
110 | /* Rate : stuffing multiple values in a single event require a bit | 115 | /* Rate : stuffing multiple values in a single event require a bit |
111 | * more of magic - Jean II */ | 116 | * more of magic - Jean II */ |
112 | current_val = start + IW_EV_LCP_LEN; | 117 | current_val = start + iwe_stream_lcp_len(info); |
113 | iwe.cmd = SIOCGIWRATE; | 118 | iwe.cmd = SIOCGIWRATE; |
114 | /* Those two flags are ignored... */ | 119 | /* Those two flags are ignored... */ |
115 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | 120 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; |
@@ -124,17 +129,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
124 | /* Bit rate given in 500 kb/s units (+ 0x80) */ | 129 | /* Bit rate given in 500 kb/s units (+ 0x80) */ |
125 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); | 130 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); |
126 | /* Add new value to event */ | 131 | /* Add new value to event */ |
127 | current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); | 132 | current_val = iwe_stream_add_value(info, start, current_val, |
133 | stop, &iwe, IW_EV_PARAM_LEN); | ||
128 | } | 134 | } |
129 | for (; j < network->rates_ex_len; j++) { | 135 | for (; j < network->rates_ex_len; j++) { |
130 | rate = network->rates_ex[j] & 0x7F; | 136 | rate = network->rates_ex[j] & 0x7F; |
131 | /* Bit rate given in 500 kb/s units (+ 0x80) */ | 137 | /* Bit rate given in 500 kb/s units (+ 0x80) */ |
132 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); | 138 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); |
133 | /* Add new value to event */ | 139 | /* Add new value to event */ |
134 | current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); | 140 | current_val = iwe_stream_add_value(info, start, current_val, |
141 | stop, &iwe, IW_EV_PARAM_LEN); | ||
135 | } | 142 | } |
136 | /* Check if we added any rate */ | 143 | /* Check if we added any rate */ |
137 | if((current_val - start) > IW_EV_LCP_LEN) | 144 | if ((current_val - start) > iwe_stream_lcp_len(info)) |
138 | start = current_val; | 145 | start = current_val; |
139 | 146 | ||
140 | /* Add quality statistics */ | 147 | /* Add quality statistics */ |
@@ -181,14 +188,14 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
181 | iwe.u.qual.level = network->stats.signal; | 188 | iwe.u.qual.level = network->stats.signal; |
182 | } | 189 | } |
183 | 190 | ||
184 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); | 191 | start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); |
185 | 192 | ||
186 | iwe.cmd = IWEVCUSTOM; | 193 | iwe.cmd = IWEVCUSTOM; |
187 | p = custom; | 194 | p = custom; |
188 | 195 | ||
189 | iwe.u.data.length = p - custom; | 196 | iwe.u.data.length = p - custom; |
190 | if (iwe.u.data.length) | 197 | if (iwe.u.data.length) |
191 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 198 | start = iwe_stream_add_point(info, start, stop, &iwe, custom); |
192 | 199 | ||
193 | memset(&iwe, 0, sizeof(iwe)); | 200 | memset(&iwe, 0, sizeof(iwe)); |
194 | if (network->wpa_ie_len) { | 201 | if (network->wpa_ie_len) { |
@@ -196,7 +203,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
196 | memcpy(buf, network->wpa_ie, network->wpa_ie_len); | 203 | memcpy(buf, network->wpa_ie, network->wpa_ie_len); |
197 | iwe.cmd = IWEVGENIE; | 204 | iwe.cmd = IWEVGENIE; |
198 | iwe.u.data.length = network->wpa_ie_len; | 205 | iwe.u.data.length = network->wpa_ie_len; |
199 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 206 | start = iwe_stream_add_point(info, start, stop, &iwe, buf); |
200 | } | 207 | } |
201 | 208 | ||
202 | memset(&iwe, 0, sizeof(iwe)); | 209 | memset(&iwe, 0, sizeof(iwe)); |
@@ -205,7 +212,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
205 | memcpy(buf, network->rsn_ie, network->rsn_ie_len); | 212 | memcpy(buf, network->rsn_ie, network->rsn_ie_len); |
206 | iwe.cmd = IWEVGENIE; | 213 | iwe.cmd = IWEVGENIE; |
207 | iwe.u.data.length = network->rsn_ie_len; | 214 | iwe.u.data.length = network->rsn_ie_len; |
208 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 215 | start = iwe_stream_add_point(info, start, stop, &iwe, buf); |
209 | } | 216 | } |
210 | 217 | ||
211 | /* Add EXTRA: Age to display seconds since last beacon/probe response | 218 | /* Add EXTRA: Age to display seconds since last beacon/probe response |
@@ -217,7 +224,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
217 | jiffies_to_msecs(jiffies - network->last_scanned)); | 224 | jiffies_to_msecs(jiffies - network->last_scanned)); |
218 | iwe.u.data.length = p - custom; | 225 | iwe.u.data.length = p - custom; |
219 | if (iwe.u.data.length) | 226 | if (iwe.u.data.length) |
220 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 227 | start = iwe_stream_add_point(info, start, stop, &iwe, custom); |
221 | 228 | ||
222 | /* Add spectrum management information */ | 229 | /* Add spectrum management information */ |
223 | iwe.cmd = -1; | 230 | iwe.cmd = -1; |
@@ -238,7 +245,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
238 | 245 | ||
239 | if (iwe.cmd == IWEVCUSTOM) { | 246 | if (iwe.cmd == IWEVCUSTOM) { |
240 | iwe.u.data.length = p - custom; | 247 | iwe.u.data.length = p - custom; |
241 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 248 | start = iwe_stream_add_point(info, start, stop, &iwe, custom); |
242 | } | 249 | } |
243 | 250 | ||
244 | return start; | 251 | return start; |
@@ -272,7 +279,8 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
272 | 279 | ||
273 | if (ieee->scan_age == 0 || | 280 | if (ieee->scan_age == 0 || |
274 | time_after(network->last_scanned + ieee->scan_age, jiffies)) | 281 | time_after(network->last_scanned + ieee->scan_age, jiffies)) |
275 | ev = ieee80211_translate_scan(ieee, ev, stop, network); | 282 | ev = ieee80211_translate_scan(ieee, ev, stop, network, |
283 | info); | ||
276 | else | 284 | else |
277 | IEEE80211_DEBUG_SCAN("Not showing network '%s (" | 285 | IEEE80211_DEBUG_SCAN("Not showing network '%s (" |
278 | "%s)' due to age (%dms).\n", | 286 | "%s)' due to age (%dms).\n", |
@@ -744,98 +752,9 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | |||
744 | return 0; | 752 | return 0; |
745 | } | 753 | } |
746 | 754 | ||
747 | int ieee80211_wx_set_auth(struct net_device *dev, | ||
748 | struct iw_request_info *info, | ||
749 | union iwreq_data *wrqu, | ||
750 | char *extra) | ||
751 | { | ||
752 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
753 | unsigned long flags; | ||
754 | int err = 0; | ||
755 | |||
756 | spin_lock_irqsave(&ieee->lock, flags); | ||
757 | |||
758 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
759 | case IW_AUTH_WPA_VERSION: | ||
760 | case IW_AUTH_CIPHER_PAIRWISE: | ||
761 | case IW_AUTH_CIPHER_GROUP: | ||
762 | case IW_AUTH_KEY_MGMT: | ||
763 | /* | ||
764 | * Host AP driver does not use these parameters and allows | ||
765 | * wpa_supplicant to control them internally. | ||
766 | */ | ||
767 | break; | ||
768 | case IW_AUTH_TKIP_COUNTERMEASURES: | ||
769 | break; /* FIXME */ | ||
770 | case IW_AUTH_DROP_UNENCRYPTED: | ||
771 | ieee->drop_unencrypted = !!wrqu->param.value; | ||
772 | break; | ||
773 | case IW_AUTH_80211_AUTH_ALG: | ||
774 | break; /* FIXME */ | ||
775 | case IW_AUTH_WPA_ENABLED: | ||
776 | ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value; | ||
777 | break; | ||
778 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
779 | ieee->ieee802_1x = !!wrqu->param.value; | ||
780 | break; | ||
781 | case IW_AUTH_PRIVACY_INVOKED: | ||
782 | ieee->privacy_invoked = !!wrqu->param.value; | ||
783 | break; | ||
784 | default: | ||
785 | err = -EOPNOTSUPP; | ||
786 | break; | ||
787 | } | ||
788 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
789 | return err; | ||
790 | } | ||
791 | |||
792 | int ieee80211_wx_get_auth(struct net_device *dev, | ||
793 | struct iw_request_info *info, | ||
794 | union iwreq_data *wrqu, | ||
795 | char *extra) | ||
796 | { | ||
797 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
798 | unsigned long flags; | ||
799 | int err = 0; | ||
800 | |||
801 | spin_lock_irqsave(&ieee->lock, flags); | ||
802 | |||
803 | switch (wrqu->param.flags & IW_AUTH_INDEX) { | ||
804 | case IW_AUTH_WPA_VERSION: | ||
805 | case IW_AUTH_CIPHER_PAIRWISE: | ||
806 | case IW_AUTH_CIPHER_GROUP: | ||
807 | case IW_AUTH_KEY_MGMT: | ||
808 | case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */ | ||
809 | case IW_AUTH_80211_AUTH_ALG: /* FIXME */ | ||
810 | /* | ||
811 | * Host AP driver does not use these parameters and allows | ||
812 | * wpa_supplicant to control them internally. | ||
813 | */ | ||
814 | err = -EOPNOTSUPP; | ||
815 | break; | ||
816 | case IW_AUTH_DROP_UNENCRYPTED: | ||
817 | wrqu->param.value = ieee->drop_unencrypted; | ||
818 | break; | ||
819 | case IW_AUTH_WPA_ENABLED: | ||
820 | wrqu->param.value = ieee->wpa_enabled; | ||
821 | break; | ||
822 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
823 | wrqu->param.value = ieee->ieee802_1x; | ||
824 | break; | ||
825 | default: | ||
826 | err = -EOPNOTSUPP; | ||
827 | break; | ||
828 | } | ||
829 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
830 | return err; | ||
831 | } | ||
832 | |||
833 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); | 755 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); |
834 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); | 756 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); |
835 | 757 | ||
836 | EXPORT_SYMBOL(ieee80211_wx_get_scan); | 758 | EXPORT_SYMBOL(ieee80211_wx_get_scan); |
837 | EXPORT_SYMBOL(ieee80211_wx_set_encode); | 759 | EXPORT_SYMBOL(ieee80211_wx_set_encode); |
838 | EXPORT_SYMBOL(ieee80211_wx_get_encode); | 760 | EXPORT_SYMBOL(ieee80211_wx_get_encode); |
839 | |||
840 | EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth); | ||
841 | EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth); | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 24eca23c2db3..dd919d84285f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * PF_INET protocol family socket handler. | 6 | * PF_INET protocol family socket handler. |
7 | * | 7 | * |
8 | * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | 10 | * Florian La Roche, <flla@stud.uni-sb.de> |
@@ -112,12 +110,11 @@ | |||
112 | #include <net/ipip.h> | 110 | #include <net/ipip.h> |
113 | #include <net/inet_common.h> | 111 | #include <net/inet_common.h> |
114 | #include <net/xfrm.h> | 112 | #include <net/xfrm.h> |
113 | #include <net/net_namespace.h> | ||
115 | #ifdef CONFIG_IP_MROUTE | 114 | #ifdef CONFIG_IP_MROUTE |
116 | #include <linux/mroute.h> | 115 | #include <linux/mroute.h> |
117 | #endif | 116 | #endif |
118 | 117 | ||
119 | DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly; | ||
120 | |||
121 | extern void ip_mc_drop_socket(struct sock *sk); | 118 | extern void ip_mc_drop_socket(struct sock *sk); |
122 | 119 | ||
123 | /* The inetsw table contains everything that inet_create needs to | 120 | /* The inetsw table contains everything that inet_create needs to |
@@ -1341,50 +1338,70 @@ static struct net_protocol icmp_protocol = { | |||
1341 | .netns_ok = 1, | 1338 | .netns_ok = 1, |
1342 | }; | 1339 | }; |
1343 | 1340 | ||
1344 | static int __init init_ipv4_mibs(void) | 1341 | static __net_init int ipv4_mib_init_net(struct net *net) |
1345 | { | 1342 | { |
1346 | if (snmp_mib_init((void **)net_statistics, | 1343 | if (snmp_mib_init((void **)net->mib.tcp_statistics, |
1347 | sizeof(struct linux_mib)) < 0) | ||
1348 | goto err_net_mib; | ||
1349 | if (snmp_mib_init((void **)ip_statistics, | ||
1350 | sizeof(struct ipstats_mib)) < 0) | ||
1351 | goto err_ip_mib; | ||
1352 | if (snmp_mib_init((void **)icmp_statistics, | ||
1353 | sizeof(struct icmp_mib)) < 0) | ||
1354 | goto err_icmp_mib; | ||
1355 | if (snmp_mib_init((void **)icmpmsg_statistics, | ||
1356 | sizeof(struct icmpmsg_mib)) < 0) | ||
1357 | goto err_icmpmsg_mib; | ||
1358 | if (snmp_mib_init((void **)tcp_statistics, | ||
1359 | sizeof(struct tcp_mib)) < 0) | 1344 | sizeof(struct tcp_mib)) < 0) |
1360 | goto err_tcp_mib; | 1345 | goto err_tcp_mib; |
1361 | if (snmp_mib_init((void **)udp_statistics, | 1346 | if (snmp_mib_init((void **)net->mib.ip_statistics, |
1347 | sizeof(struct ipstats_mib)) < 0) | ||
1348 | goto err_ip_mib; | ||
1349 | if (snmp_mib_init((void **)net->mib.net_statistics, | ||
1350 | sizeof(struct linux_mib)) < 0) | ||
1351 | goto err_net_mib; | ||
1352 | if (snmp_mib_init((void **)net->mib.udp_statistics, | ||
1362 | sizeof(struct udp_mib)) < 0) | 1353 | sizeof(struct udp_mib)) < 0) |
1363 | goto err_udp_mib; | 1354 | goto err_udp_mib; |
1364 | if (snmp_mib_init((void **)udplite_statistics, | 1355 | if (snmp_mib_init((void **)net->mib.udplite_statistics, |
1365 | sizeof(struct udp_mib)) < 0) | 1356 | sizeof(struct udp_mib)) < 0) |
1366 | goto err_udplite_mib; | 1357 | goto err_udplite_mib; |
1358 | if (snmp_mib_init((void **)net->mib.icmp_statistics, | ||
1359 | sizeof(struct icmp_mib)) < 0) | ||
1360 | goto err_icmp_mib; | ||
1361 | if (snmp_mib_init((void **)net->mib.icmpmsg_statistics, | ||
1362 | sizeof(struct icmpmsg_mib)) < 0) | ||
1363 | goto err_icmpmsg_mib; | ||
1367 | 1364 | ||
1368 | tcp_mib_init(); | 1365 | tcp_mib_init(net); |
1369 | |||
1370 | return 0; | 1366 | return 0; |
1371 | 1367 | ||
1372 | err_udplite_mib: | ||
1373 | snmp_mib_free((void **)udp_statistics); | ||
1374 | err_udp_mib: | ||
1375 | snmp_mib_free((void **)tcp_statistics); | ||
1376 | err_tcp_mib: | ||
1377 | snmp_mib_free((void **)icmpmsg_statistics); | ||
1378 | err_icmpmsg_mib: | 1368 | err_icmpmsg_mib: |
1379 | snmp_mib_free((void **)icmp_statistics); | 1369 | snmp_mib_free((void **)net->mib.icmp_statistics); |
1380 | err_icmp_mib: | 1370 | err_icmp_mib: |
1381 | snmp_mib_free((void **)ip_statistics); | 1371 | snmp_mib_free((void **)net->mib.udplite_statistics); |
1382 | err_ip_mib: | 1372 | err_udplite_mib: |
1383 | snmp_mib_free((void **)net_statistics); | 1373 | snmp_mib_free((void **)net->mib.udp_statistics); |
1374 | err_udp_mib: | ||
1375 | snmp_mib_free((void **)net->mib.net_statistics); | ||
1384 | err_net_mib: | 1376 | err_net_mib: |
1377 | snmp_mib_free((void **)net->mib.ip_statistics); | ||
1378 | err_ip_mib: | ||
1379 | snmp_mib_free((void **)net->mib.tcp_statistics); | ||
1380 | err_tcp_mib: | ||
1385 | return -ENOMEM; | 1381 | return -ENOMEM; |
1386 | } | 1382 | } |
1387 | 1383 | ||
1384 | static __net_exit void ipv4_mib_exit_net(struct net *net) | ||
1385 | { | ||
1386 | snmp_mib_free((void **)net->mib.icmpmsg_statistics); | ||
1387 | snmp_mib_free((void **)net->mib.icmp_statistics); | ||
1388 | snmp_mib_free((void **)net->mib.udplite_statistics); | ||
1389 | snmp_mib_free((void **)net->mib.udp_statistics); | ||
1390 | snmp_mib_free((void **)net->mib.net_statistics); | ||
1391 | snmp_mib_free((void **)net->mib.ip_statistics); | ||
1392 | snmp_mib_free((void **)net->mib.tcp_statistics); | ||
1393 | } | ||
1394 | |||
1395 | static __net_initdata struct pernet_operations ipv4_mib_ops = { | ||
1396 | .init = ipv4_mib_init_net, | ||
1397 | .exit = ipv4_mib_exit_net, | ||
1398 | }; | ||
1399 | |||
1400 | static int __init init_ipv4_mibs(void) | ||
1401 | { | ||
1402 | return register_pernet_subsys(&ipv4_mib_ops); | ||
1403 | } | ||
1404 | |||
1388 | static int ipv4_proc_init(void); | 1405 | static int ipv4_proc_init(void); |
1389 | 1406 | ||
1390 | /* | 1407 | /* |
@@ -1481,14 +1498,15 @@ static int __init inet_init(void) | |||
1481 | * Initialise the multicast router | 1498 | * Initialise the multicast router |
1482 | */ | 1499 | */ |
1483 | #if defined(CONFIG_IP_MROUTE) | 1500 | #if defined(CONFIG_IP_MROUTE) |
1484 | ip_mr_init(); | 1501 | if (ip_mr_init()) |
1502 | printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n"); | ||
1485 | #endif | 1503 | #endif |
1486 | /* | 1504 | /* |
1487 | * Initialise per-cpu ipv4 mibs | 1505 | * Initialise per-cpu ipv4 mibs |
1488 | */ | 1506 | */ |
1489 | 1507 | ||
1490 | if (init_ipv4_mibs()) | 1508 | if (init_ipv4_mibs()) |
1491 | printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; | 1509 | printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); |
1492 | 1510 | ||
1493 | ipv4_proc_init(); | 1511 | ipv4_proc_init(); |
1494 | 1512 | ||
@@ -1560,5 +1578,4 @@ EXPORT_SYMBOL(inet_sock_destruct); | |||
1560 | EXPORT_SYMBOL(inet_stream_connect); | 1578 | EXPORT_SYMBOL(inet_stream_connect); |
1561 | EXPORT_SYMBOL(inet_stream_ops); | 1579 | EXPORT_SYMBOL(inet_stream_ops); |
1562 | EXPORT_SYMBOL(inet_unregister_protosw); | 1580 | EXPORT_SYMBOL(inet_unregister_protosw); |
1563 | EXPORT_SYMBOL(net_statistics); | ||
1564 | EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); | 1581 | EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 9b539fa9fe18..b043eda60b04 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1,7 +1,5 @@ | |||
1 | /* linux/net/ipv4/arp.c | 1 | /* linux/net/ipv4/arp.c |
2 | * | 2 | * |
3 | * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $ | ||
4 | * | ||
5 | * Copyright (C) 1994 by Florian La Roche | 3 | * Copyright (C) 1994 by Florian La Roche |
6 | * | 4 | * |
7 | * This module implements the Address Resolution Protocol ARP (RFC 826), | 5 | * This module implements the Address Resolution Protocol ARP (RFC 826), |
@@ -423,11 +421,12 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) | |||
423 | struct rtable *rt; | 421 | struct rtable *rt; |
424 | int flag = 0; | 422 | int flag = 0; |
425 | /*unsigned long now; */ | 423 | /*unsigned long now; */ |
424 | struct net *net = dev_net(dev); | ||
426 | 425 | ||
427 | if (ip_route_output_key(dev_net(dev), &rt, &fl) < 0) | 426 | if (ip_route_output_key(net, &rt, &fl) < 0) |
428 | return 1; | 427 | return 1; |
429 | if (rt->u.dst.dev != dev) { | 428 | if (rt->u.dst.dev != dev) { |
430 | NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); | 429 | NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); |
431 | flag = 1; | 430 | flag = 1; |
432 | } | 431 | } |
433 | ip_rt_put(rt); | 432 | ip_rt_put(rt); |
@@ -1199,7 +1198,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1199 | switch (event) { | 1198 | switch (event) { |
1200 | case NETDEV_CHANGEADDR: | 1199 | case NETDEV_CHANGEADDR: |
1201 | neigh_changeaddr(&arp_tbl, dev); | 1200 | neigh_changeaddr(&arp_tbl, dev); |
1202 | rt_cache_flush(0); | 1201 | rt_cache_flush(dev_net(dev), 0); |
1203 | break; | 1202 | break; |
1204 | default: | 1203 | default: |
1205 | break; | 1204 | break; |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 0c0c73f368ce..5e6c5a0f3fde 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -52,7 +52,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
52 | inet->sport, usin->sin_port, sk, 1); | 52 | inet->sport, usin->sin_port, sk, 1); |
53 | if (err) { | 53 | if (err) { |
54 | if (err == -ENETUNREACH) | 54 | if (err == -ENETUNREACH) |
55 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 55 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
56 | return err; | 56 | return err; |
57 | } | 57 | } |
58 | 58 | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 79a7ef6209ff..2e667e2f90df 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * NET3 IP device support routines. | 2 | * NET3 IP device support routines. |
3 | * | 3 | * |
4 | * Version: $Id: devinet.c,v 1.44 2001/10/31 21:55:54 davem Exp $ | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
@@ -170,6 +168,8 @@ static struct in_device *inetdev_init(struct net_device *dev) | |||
170 | in_dev->dev = dev; | 168 | in_dev->dev = dev; |
171 | if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) | 169 | if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) |
172 | goto out_kfree; | 170 | goto out_kfree; |
171 | if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) | ||
172 | dev_disable_lro(dev); | ||
173 | /* Reference in_dev->dev */ | 173 | /* Reference in_dev->dev */ |
174 | dev_hold(dev); | 174 | dev_hold(dev); |
175 | /* Account for reference dev->ip_ptr (below) */ | 175 | /* Account for reference dev->ip_ptr (below) */ |
@@ -1013,7 +1013,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | |||
1013 | memcpy(old, ifa->ifa_label, IFNAMSIZ); | 1013 | memcpy(old, ifa->ifa_label, IFNAMSIZ); |
1014 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); | 1014 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); |
1015 | if (named++ == 0) | 1015 | if (named++ == 0) |
1016 | continue; | 1016 | goto skip; |
1017 | dot = strchr(old, ':'); | 1017 | dot = strchr(old, ':'); |
1018 | if (dot == NULL) { | 1018 | if (dot == NULL) { |
1019 | sprintf(old, ":%d", named); | 1019 | sprintf(old, ":%d", named); |
@@ -1024,6 +1024,8 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | |||
1024 | } else { | 1024 | } else { |
1025 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); | 1025 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); |
1026 | } | 1026 | } |
1027 | skip: | ||
1028 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
1027 | } | 1029 | } |
1028 | } | 1030 | } |
1029 | 1031 | ||
@@ -1241,6 +1243,8 @@ static void inet_forward_change(struct net *net) | |||
1241 | read_lock(&dev_base_lock); | 1243 | read_lock(&dev_base_lock); |
1242 | for_each_netdev(net, dev) { | 1244 | for_each_netdev(net, dev) { |
1243 | struct in_device *in_dev; | 1245 | struct in_device *in_dev; |
1246 | if (on) | ||
1247 | dev_disable_lro(dev); | ||
1244 | rcu_read_lock(); | 1248 | rcu_read_lock(); |
1245 | in_dev = __in_dev_get_rcu(dev); | 1249 | in_dev = __in_dev_get_rcu(dev); |
1246 | if (in_dev) | 1250 | if (in_dev) |
@@ -1248,8 +1252,6 @@ static void inet_forward_change(struct net *net) | |||
1248 | rcu_read_unlock(); | 1252 | rcu_read_unlock(); |
1249 | } | 1253 | } |
1250 | read_unlock(&dev_base_lock); | 1254 | read_unlock(&dev_base_lock); |
1251 | |||
1252 | rt_cache_flush(0); | ||
1253 | } | 1255 | } |
1254 | 1256 | ||
1255 | static int devinet_conf_proc(ctl_table *ctl, int write, | 1257 | static int devinet_conf_proc(ctl_table *ctl, int write, |
@@ -1335,10 +1337,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1335 | if (write && *valp != val) { | 1337 | if (write && *valp != val) { |
1336 | struct net *net = ctl->extra2; | 1338 | struct net *net = ctl->extra2; |
1337 | 1339 | ||
1338 | if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) | 1340 | if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { |
1339 | inet_forward_change(net); | 1341 | rtnl_lock(); |
1340 | else if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) | 1342 | if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { |
1341 | rt_cache_flush(0); | 1343 | inet_forward_change(net); |
1344 | } else if (*valp) { | ||
1345 | struct ipv4_devconf *cnf = ctl->extra1; | ||
1346 | struct in_device *idev = | ||
1347 | container_of(cnf, struct in_device, cnf); | ||
1348 | dev_disable_lro(idev->dev); | ||
1349 | } | ||
1350 | rtnl_unlock(); | ||
1351 | rt_cache_flush(net, 0); | ||
1352 | } | ||
1342 | } | 1353 | } |
1343 | 1354 | ||
1344 | return ret; | 1355 | return ret; |
@@ -1351,9 +1362,10 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write, | |||
1351 | int *valp = ctl->data; | 1362 | int *valp = ctl->data; |
1352 | int val = *valp; | 1363 | int val = *valp; |
1353 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1364 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
1365 | struct net *net = ctl->extra2; | ||
1354 | 1366 | ||
1355 | if (write && *valp != val) | 1367 | if (write && *valp != val) |
1356 | rt_cache_flush(0); | 1368 | rt_cache_flush(net, 0); |
1357 | 1369 | ||
1358 | return ret; | 1370 | return ret; |
1359 | } | 1371 | } |
@@ -1364,9 +1376,10 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, | |||
1364 | { | 1376 | { |
1365 | int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, | 1377 | int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, |
1366 | newval, newlen); | 1378 | newval, newlen); |
1379 | struct net *net = table->extra2; | ||
1367 | 1380 | ||
1368 | if (ret == 1) | 1381 | if (ret == 1) |
1369 | rt_cache_flush(0); | 1382 | rt_cache_flush(net, 0); |
1370 | 1383 | ||
1371 | return ret; | 1384 | return ret; |
1372 | } | 1385 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 0b2ac6a3d903..65c1503f8cc8 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * IPv4 Forwarding Information Base: FIB frontend. | 6 | * IPv4 Forwarding Information Base: FIB frontend. |
7 | * | 7 | * |
8 | * Version: $Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $ | ||
9 | * | ||
10 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
@@ -146,7 +144,7 @@ static void fib_flush(struct net *net) | |||
146 | } | 144 | } |
147 | 145 | ||
148 | if (flushed) | 146 | if (flushed) |
149 | rt_cache_flush(-1); | 147 | rt_cache_flush(net, -1); |
150 | } | 148 | } |
151 | 149 | ||
152 | /* | 150 | /* |
@@ -899,21 +897,22 @@ static void fib_disable_ip(struct net_device *dev, int force) | |||
899 | { | 897 | { |
900 | if (fib_sync_down_dev(dev, force)) | 898 | if (fib_sync_down_dev(dev, force)) |
901 | fib_flush(dev_net(dev)); | 899 | fib_flush(dev_net(dev)); |
902 | rt_cache_flush(0); | 900 | rt_cache_flush(dev_net(dev), 0); |
903 | arp_ifdown(dev); | 901 | arp_ifdown(dev); |
904 | } | 902 | } |
905 | 903 | ||
906 | static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) | 904 | static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) |
907 | { | 905 | { |
908 | struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; | 906 | struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; |
907 | struct net_device *dev = ifa->ifa_dev->dev; | ||
909 | 908 | ||
910 | switch (event) { | 909 | switch (event) { |
911 | case NETDEV_UP: | 910 | case NETDEV_UP: |
912 | fib_add_ifaddr(ifa); | 911 | fib_add_ifaddr(ifa); |
913 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 912 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
914 | fib_sync_up(ifa->ifa_dev->dev); | 913 | fib_sync_up(dev); |
915 | #endif | 914 | #endif |
916 | rt_cache_flush(-1); | 915 | rt_cache_flush(dev_net(dev), -1); |
917 | break; | 916 | break; |
918 | case NETDEV_DOWN: | 917 | case NETDEV_DOWN: |
919 | fib_del_ifaddr(ifa); | 918 | fib_del_ifaddr(ifa); |
@@ -921,9 +920,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
921 | /* Last address was deleted from this interface. | 920 | /* Last address was deleted from this interface. |
922 | Disable IP. | 921 | Disable IP. |
923 | */ | 922 | */ |
924 | fib_disable_ip(ifa->ifa_dev->dev, 1); | 923 | fib_disable_ip(dev, 1); |
925 | } else { | 924 | } else { |
926 | rt_cache_flush(-1); | 925 | rt_cache_flush(dev_net(dev), -1); |
927 | } | 926 | } |
928 | break; | 927 | break; |
929 | } | 928 | } |
@@ -951,14 +950,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
951 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 950 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
952 | fib_sync_up(dev); | 951 | fib_sync_up(dev); |
953 | #endif | 952 | #endif |
954 | rt_cache_flush(-1); | 953 | rt_cache_flush(dev_net(dev), -1); |
955 | break; | 954 | break; |
956 | case NETDEV_DOWN: | 955 | case NETDEV_DOWN: |
957 | fib_disable_ip(dev, 0); | 956 | fib_disable_ip(dev, 0); |
958 | break; | 957 | break; |
959 | case NETDEV_CHANGEMTU: | 958 | case NETDEV_CHANGEMTU: |
960 | case NETDEV_CHANGE: | 959 | case NETDEV_CHANGE: |
961 | rt_cache_flush(0); | 960 | rt_cache_flush(dev_net(dev), 0); |
962 | break; | 961 | break; |
963 | } | 962 | } |
964 | return NOTIFY_DONE; | 963 | return NOTIFY_DONE; |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 2e2fc3376ac9..c8cac6c7f881 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * IPv4 FIB: lookup engine and maintenance routines. | 6 | * IPv4 FIB: lookup engine and maintenance routines. |
7 | * | 7 | * |
8 | * Version: $Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $ | ||
9 | * | ||
10 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
@@ -474,7 +472,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | |||
474 | 472 | ||
475 | fib_release_info(fi_drop); | 473 | fib_release_info(fi_drop); |
476 | if (state & FA_S_ACCESSED) | 474 | if (state & FA_S_ACCESSED) |
477 | rt_cache_flush(-1); | 475 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
478 | rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id, | 476 | rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id, |
479 | &cfg->fc_nlinfo, NLM_F_REPLACE); | 477 | &cfg->fc_nlinfo, NLM_F_REPLACE); |
480 | return 0; | 478 | return 0; |
@@ -534,7 +532,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | |||
534 | 532 | ||
535 | if (new_f) | 533 | if (new_f) |
536 | fz->fz_nent++; | 534 | fz->fz_nent++; |
537 | rt_cache_flush(-1); | 535 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
538 | 536 | ||
539 | rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id, | 537 | rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id, |
540 | &cfg->fc_nlinfo, 0); | 538 | &cfg->fc_nlinfo, 0); |
@@ -616,7 +614,7 @@ static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) | |||
616 | write_unlock_bh(&fib_hash_lock); | 614 | write_unlock_bh(&fib_hash_lock); |
617 | 615 | ||
618 | if (fa->fa_state & FA_S_ACCESSED) | 616 | if (fa->fa_state & FA_S_ACCESSED) |
619 | rt_cache_flush(-1); | 617 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
620 | fn_free_alias(fa, f); | 618 | fn_free_alias(fa, f); |
621 | if (kill_fn) { | 619 | if (kill_fn) { |
622 | fn_free_node(f); | 620 | fn_free_node(f); |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 1fb56876be54..6080d7120821 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -258,9 +258,9 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | |||
258 | + nla_total_size(4); /* flow */ | 258 | + nla_total_size(4); /* flow */ |
259 | } | 259 | } |
260 | 260 | ||
261 | static void fib4_rule_flush_cache(void) | 261 | static void fib4_rule_flush_cache(struct fib_rules_ops *ops) |
262 | { | 262 | { |
263 | rt_cache_flush(-1); | 263 | rt_cache_flush(ops->fro_net, -1); |
264 | } | 264 | } |
265 | 265 | ||
266 | static struct fib_rules_ops fib4_rules_ops_template = { | 266 | static struct fib_rules_ops fib4_rules_ops_template = { |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 0d4d72827e4b..ded2ae34eab1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * IPv4 Forwarding Information Base: semantics. | 6 | * IPv4 Forwarding Information Base: semantics. |
7 | * | 7 | * |
8 | * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $ | ||
9 | * | ||
10 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 8 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index e1600ad8fb0e..5cb72786a8af 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -22,8 +22,6 @@ | |||
22 | * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson | 22 | * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson |
23 | * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 | 23 | * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 |
24 | * | 24 | * |
25 | * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $ | ||
26 | * | ||
27 | * | 25 | * |
28 | * Code from fib_hash has been reused which includes the following header: | 26 | * Code from fib_hash has been reused which includes the following header: |
29 | * | 27 | * |
@@ -1273,7 +1271,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1273 | 1271 | ||
1274 | fib_release_info(fi_drop); | 1272 | fib_release_info(fi_drop); |
1275 | if (state & FA_S_ACCESSED) | 1273 | if (state & FA_S_ACCESSED) |
1276 | rt_cache_flush(-1); | 1274 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
1277 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, | 1275 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, |
1278 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); | 1276 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); |
1279 | 1277 | ||
@@ -1318,7 +1316,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1318 | list_add_tail_rcu(&new_fa->fa_list, | 1316 | list_add_tail_rcu(&new_fa->fa_list, |
1319 | (fa ? &fa->fa_list : fa_head)); | 1317 | (fa ? &fa->fa_list : fa_head)); |
1320 | 1318 | ||
1321 | rt_cache_flush(-1); | 1319 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
1322 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, | 1320 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, |
1323 | &cfg->fc_nlinfo, 0); | 1321 | &cfg->fc_nlinfo, 0); |
1324 | succeeded: | 1322 | succeeded: |
@@ -1661,7 +1659,7 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg) | |||
1661 | trie_leaf_remove(t, l); | 1659 | trie_leaf_remove(t, l); |
1662 | 1660 | ||
1663 | if (fa->fa_state & FA_S_ACCESSED) | 1661 | if (fa->fa_state & FA_S_ACCESSED) |
1664 | rt_cache_flush(-1); | 1662 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); |
1665 | 1663 | ||
1666 | fib_release_info(fa->fa_info); | 1664 | fib_release_info(fa->fa_info); |
1667 | alias_free_mem_rcu(fa); | 1665 | alias_free_mem_rcu(fa); |
@@ -2253,25 +2251,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) | |||
2253 | 2251 | ||
2254 | static int fib_triestat_seq_open(struct inode *inode, struct file *file) | 2252 | static int fib_triestat_seq_open(struct inode *inode, struct file *file) |
2255 | { | 2253 | { |
2256 | int err; | 2254 | return single_open_net(inode, file, fib_triestat_seq_show); |
2257 | struct net *net; | ||
2258 | |||
2259 | net = get_proc_net(inode); | ||
2260 | if (net == NULL) | ||
2261 | return -ENXIO; | ||
2262 | err = single_open(file, fib_triestat_seq_show, net); | ||
2263 | if (err < 0) { | ||
2264 | put_net(net); | ||
2265 | return err; | ||
2266 | } | ||
2267 | return 0; | ||
2268 | } | ||
2269 | |||
2270 | static int fib_triestat_seq_release(struct inode *ino, struct file *f) | ||
2271 | { | ||
2272 | struct seq_file *seq = f->private_data; | ||
2273 | put_net(seq->private); | ||
2274 | return single_release(ino, f); | ||
2275 | } | 2255 | } |
2276 | 2256 | ||
2277 | static const struct file_operations fib_triestat_fops = { | 2257 | static const struct file_operations fib_triestat_fops = { |
@@ -2279,7 +2259,7 @@ static const struct file_operations fib_triestat_fops = { | |||
2279 | .open = fib_triestat_seq_open, | 2259 | .open = fib_triestat_seq_open, |
2280 | .read = seq_read, | 2260 | .read = seq_read, |
2281 | .llseek = seq_lseek, | 2261 | .llseek = seq_lseek, |
2282 | .release = fib_triestat_seq_release, | 2262 | .release = single_release_net, |
2283 | }; | 2263 | }; |
2284 | 2264 | ||
2285 | static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos) | 2265 | static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos) |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 87397351ddac..860558633b2c 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -3,8 +3,6 @@ | |||
3 | * | 3 | * |
4 | * Alan Cox, <alan@redhat.com> | 4 | * Alan Cox, <alan@redhat.com> |
5 | * | 5 | * |
6 | * Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
10 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
@@ -113,12 +111,6 @@ struct icmp_bxm { | |||
113 | unsigned char optbuf[40]; | 111 | unsigned char optbuf[40]; |
114 | }; | 112 | }; |
115 | 113 | ||
116 | /* | ||
117 | * Statistics | ||
118 | */ | ||
119 | DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly; | ||
120 | DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics) __read_mostly; | ||
121 | |||
122 | /* An array of errno for error messages from dest unreach. */ | 114 | /* An array of errno for error messages from dest unreach. */ |
123 | /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ | 115 | /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ |
124 | 116 | ||
@@ -298,10 +290,10 @@ out: | |||
298 | /* | 290 | /* |
299 | * Maintain the counters used in the SNMP statistics for outgoing ICMP | 291 | * Maintain the counters used in the SNMP statistics for outgoing ICMP |
300 | */ | 292 | */ |
301 | void icmp_out_count(unsigned char type) | 293 | void icmp_out_count(struct net *net, unsigned char type) |
302 | { | 294 | { |
303 | ICMPMSGOUT_INC_STATS(type); | 295 | ICMPMSGOUT_INC_STATS(net, type); |
304 | ICMP_INC_STATS(ICMP_MIB_OUTMSGS); | 296 | ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); |
305 | } | 297 | } |
306 | 298 | ||
307 | /* | 299 | /* |
@@ -765,7 +757,7 @@ static void icmp_unreach(struct sk_buff *skb) | |||
765 | out: | 757 | out: |
766 | return; | 758 | return; |
767 | out_err: | 759 | out_err: |
768 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 760 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
769 | goto out; | 761 | goto out; |
770 | } | 762 | } |
771 | 763 | ||
@@ -805,7 +797,7 @@ static void icmp_redirect(struct sk_buff *skb) | |||
805 | out: | 797 | out: |
806 | return; | 798 | return; |
807 | out_err: | 799 | out_err: |
808 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 800 | ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); |
809 | goto out; | 801 | goto out; |
810 | } | 802 | } |
811 | 803 | ||
@@ -876,7 +868,7 @@ static void icmp_timestamp(struct sk_buff *skb) | |||
876 | out: | 868 | out: |
877 | return; | 869 | return; |
878 | out_err: | 870 | out_err: |
879 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 871 | ICMP_INC_STATS_BH(dev_net(skb->dst->dev), ICMP_MIB_INERRORS); |
880 | goto out; | 872 | goto out; |
881 | } | 873 | } |
882 | 874 | ||
@@ -975,6 +967,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
975 | { | 967 | { |
976 | struct icmphdr *icmph; | 968 | struct icmphdr *icmph; |
977 | struct rtable *rt = skb->rtable; | 969 | struct rtable *rt = skb->rtable; |
970 | struct net *net = dev_net(rt->u.dst.dev); | ||
978 | 971 | ||
979 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 972 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
980 | int nh; | 973 | int nh; |
@@ -995,7 +988,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
995 | skb_set_network_header(skb, nh); | 988 | skb_set_network_header(skb, nh); |
996 | } | 989 | } |
997 | 990 | ||
998 | ICMP_INC_STATS_BH(ICMP_MIB_INMSGS); | 991 | ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); |
999 | 992 | ||
1000 | switch (skb->ip_summed) { | 993 | switch (skb->ip_summed) { |
1001 | case CHECKSUM_COMPLETE: | 994 | case CHECKSUM_COMPLETE: |
@@ -1013,7 +1006,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
1013 | 1006 | ||
1014 | icmph = icmp_hdr(skb); | 1007 | icmph = icmp_hdr(skb); |
1015 | 1008 | ||
1016 | ICMPMSGIN_INC_STATS_BH(icmph->type); | 1009 | ICMPMSGIN_INC_STATS_BH(net, icmph->type); |
1017 | /* | 1010 | /* |
1018 | * 18 is the highest 'known' ICMP type. Anything else is a mystery | 1011 | * 18 is the highest 'known' ICMP type. Anything else is a mystery |
1019 | * | 1012 | * |
@@ -1029,9 +1022,6 @@ int icmp_rcv(struct sk_buff *skb) | |||
1029 | */ | 1022 | */ |
1030 | 1023 | ||
1031 | if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { | 1024 | if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { |
1032 | struct net *net; | ||
1033 | |||
1034 | net = dev_net(rt->u.dst.dev); | ||
1035 | /* | 1025 | /* |
1036 | * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be | 1026 | * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be |
1037 | * silently ignored (we let user decide with a sysctl). | 1027 | * silently ignored (we let user decide with a sysctl). |
@@ -1057,7 +1047,7 @@ drop: | |||
1057 | kfree_skb(skb); | 1047 | kfree_skb(skb); |
1058 | return 0; | 1048 | return 0; |
1059 | error: | 1049 | error: |
1060 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 1050 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
1061 | goto drop; | 1051 | goto drop; |
1062 | } | 1052 | } |
1063 | 1053 | ||
@@ -1217,5 +1207,4 @@ int __init icmp_init(void) | |||
1217 | 1207 | ||
1218 | EXPORT_SYMBOL(icmp_err_convert); | 1208 | EXPORT_SYMBOL(icmp_err_convert); |
1219 | EXPORT_SYMBOL(icmp_send); | 1209 | EXPORT_SYMBOL(icmp_send); |
1220 | EXPORT_SYMBOL(icmp_statistics); | ||
1221 | EXPORT_SYMBOL(xrlim_allow); | 1210 | EXPORT_SYMBOL(xrlim_allow); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 2769dc4a4c84..6203ece53606 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * the older version didn't come out right using gcc 2.5.8, the newer one | 8 | * the older version didn't come out right using gcc 2.5.8, the newer one |
9 | * seems to fall out with gcc 2.6.2. | 9 | * seems to fall out with gcc 2.6.2. |
10 | * | 10 | * |
11 | * Version: $Id: igmp.c,v 1.47 2002/02/01 22:01:03 davem Exp $ | ||
12 | * | ||
13 | * Authors: | 11 | * Authors: |
14 | * Alan Cox <Alan.Cox@linux.org> | 12 | * Alan Cox <Alan.Cox@linux.org> |
15 | * | 13 | * |
@@ -1198,7 +1196,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1198 | 1196 | ||
1199 | ASSERT_RTNL(); | 1197 | ASSERT_RTNL(); |
1200 | 1198 | ||
1201 | if (dev_net(in_dev->dev) != &init_net) | 1199 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1202 | return; | 1200 | return; |
1203 | 1201 | ||
1204 | for (im=in_dev->mc_list; im; im=im->next) { | 1202 | for (im=in_dev->mc_list; im; im=im->next) { |
@@ -1280,7 +1278,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | |||
1280 | 1278 | ||
1281 | ASSERT_RTNL(); | 1279 | ASSERT_RTNL(); |
1282 | 1280 | ||
1283 | if (dev_net(in_dev->dev) != &init_net) | 1281 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1284 | return; | 1282 | return; |
1285 | 1283 | ||
1286 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1284 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { |
@@ -1310,7 +1308,7 @@ void ip_mc_down(struct in_device *in_dev) | |||
1310 | 1308 | ||
1311 | ASSERT_RTNL(); | 1309 | ASSERT_RTNL(); |
1312 | 1310 | ||
1313 | if (dev_net(in_dev->dev) != &init_net) | 1311 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1314 | return; | 1312 | return; |
1315 | 1313 | ||
1316 | for (i=in_dev->mc_list; i; i=i->next) | 1314 | for (i=in_dev->mc_list; i; i=i->next) |
@@ -1333,7 +1331,7 @@ void ip_mc_init_dev(struct in_device *in_dev) | |||
1333 | { | 1331 | { |
1334 | ASSERT_RTNL(); | 1332 | ASSERT_RTNL(); |
1335 | 1333 | ||
1336 | if (dev_net(in_dev->dev) != &init_net) | 1334 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1337 | return; | 1335 | return; |
1338 | 1336 | ||
1339 | in_dev->mc_tomb = NULL; | 1337 | in_dev->mc_tomb = NULL; |
@@ -1359,7 +1357,7 @@ void ip_mc_up(struct in_device *in_dev) | |||
1359 | 1357 | ||
1360 | ASSERT_RTNL(); | 1358 | ASSERT_RTNL(); |
1361 | 1359 | ||
1362 | if (dev_net(in_dev->dev) != &init_net) | 1360 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1363 | return; | 1361 | return; |
1364 | 1362 | ||
1365 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); | 1363 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); |
@@ -1378,7 +1376,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1378 | 1376 | ||
1379 | ASSERT_RTNL(); | 1377 | ASSERT_RTNL(); |
1380 | 1378 | ||
1381 | if (dev_net(in_dev->dev) != &init_net) | 1379 | if (!net_eq(dev_net(in_dev->dev), &init_net)) |
1382 | return; | 1380 | return; |
1383 | 1381 | ||
1384 | /* Deactivate timers */ | 1382 | /* Deactivate timers */ |
@@ -1762,7 +1760,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1762 | if (!ipv4_is_multicast(addr)) | 1760 | if (!ipv4_is_multicast(addr)) |
1763 | return -EINVAL; | 1761 | return -EINVAL; |
1764 | 1762 | ||
1765 | if (sock_net(sk) != &init_net) | 1763 | if (!net_eq(sock_net(sk), &init_net)) |
1766 | return -EPROTONOSUPPORT; | 1764 | return -EPROTONOSUPPORT; |
1767 | 1765 | ||
1768 | rtnl_lock(); | 1766 | rtnl_lock(); |
@@ -1833,7 +1831,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1833 | u32 ifindex; | 1831 | u32 ifindex; |
1834 | int ret = -EADDRNOTAVAIL; | 1832 | int ret = -EADDRNOTAVAIL; |
1835 | 1833 | ||
1836 | if (sock_net(sk) != &init_net) | 1834 | if (!net_eq(sock_net(sk), &init_net)) |
1837 | return -EPROTONOSUPPORT; | 1835 | return -EPROTONOSUPPORT; |
1838 | 1836 | ||
1839 | rtnl_lock(); | 1837 | rtnl_lock(); |
@@ -1881,7 +1879,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1881 | if (!ipv4_is_multicast(addr)) | 1879 | if (!ipv4_is_multicast(addr)) |
1882 | return -EINVAL; | 1880 | return -EINVAL; |
1883 | 1881 | ||
1884 | if (sock_net(sk) != &init_net) | 1882 | if (!net_eq(sock_net(sk), &init_net)) |
1885 | return -EPROTONOSUPPORT; | 1883 | return -EPROTONOSUPPORT; |
1886 | 1884 | ||
1887 | rtnl_lock(); | 1885 | rtnl_lock(); |
@@ -2017,7 +2015,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2017 | msf->imsf_fmode != MCAST_EXCLUDE) | 2015 | msf->imsf_fmode != MCAST_EXCLUDE) |
2018 | return -EINVAL; | 2016 | return -EINVAL; |
2019 | 2017 | ||
2020 | if (sock_net(sk) != &init_net) | 2018 | if (!net_eq(sock_net(sk), &init_net)) |
2021 | return -EPROTONOSUPPORT; | 2019 | return -EPROTONOSUPPORT; |
2022 | 2020 | ||
2023 | rtnl_lock(); | 2021 | rtnl_lock(); |
@@ -2100,7 +2098,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, | |||
2100 | if (!ipv4_is_multicast(addr)) | 2098 | if (!ipv4_is_multicast(addr)) |
2101 | return -EINVAL; | 2099 | return -EINVAL; |
2102 | 2100 | ||
2103 | if (sock_net(sk) != &init_net) | 2101 | if (!net_eq(sock_net(sk), &init_net)) |
2104 | return -EPROTONOSUPPORT; | 2102 | return -EPROTONOSUPPORT; |
2105 | 2103 | ||
2106 | rtnl_lock(); | 2104 | rtnl_lock(); |
@@ -2165,7 +2163,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, | |||
2165 | if (!ipv4_is_multicast(addr)) | 2163 | if (!ipv4_is_multicast(addr)) |
2166 | return -EINVAL; | 2164 | return -EINVAL; |
2167 | 2165 | ||
2168 | if (sock_net(sk) != &init_net) | 2166 | if (!net_eq(sock_net(sk), &init_net)) |
2169 | return -EPROTONOSUPPORT; | 2167 | return -EPROTONOSUPPORT; |
2170 | 2168 | ||
2171 | rtnl_lock(); | 2169 | rtnl_lock(); |
@@ -2252,7 +2250,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2252 | if (inet->mc_list == NULL) | 2250 | if (inet->mc_list == NULL) |
2253 | return; | 2251 | return; |
2254 | 2252 | ||
2255 | if (sock_net(sk) != &init_net) | 2253 | if (!net_eq(sock_net(sk), &init_net)) |
2256 | return; | 2254 | return; |
2257 | 2255 | ||
2258 | rtnl_lock(); | 2256 | rtnl_lock(); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ec834480abe7..bb81c958b744 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -103,7 +103,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
103 | rover = net_random() % remaining + low; | 103 | rover = net_random() % remaining + low; |
104 | 104 | ||
105 | do { | 105 | do { |
106 | head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; | 106 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
107 | hashinfo->bhash_size)]; | ||
107 | spin_lock(&head->lock); | 108 | spin_lock(&head->lock); |
108 | inet_bind_bucket_for_each(tb, node, &head->chain) | 109 | inet_bind_bucket_for_each(tb, node, &head->chain) |
109 | if (tb->ib_net == net && tb->port == rover) | 110 | if (tb->ib_net == net && tb->port == rover) |
@@ -130,7 +131,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
130 | */ | 131 | */ |
131 | snum = rover; | 132 | snum = rover; |
132 | } else { | 133 | } else { |
133 | head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; | 134 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
135 | hashinfo->bhash_size)]; | ||
134 | spin_lock(&head->lock); | 136 | spin_lock(&head->lock); |
135 | inet_bind_bucket_for_each(tb, node, &head->chain) | 137 | inet_bind_bucket_for_each(tb, node, &head->chain) |
136 | if (tb->ib_net == net && tb->port == snum) | 138 | if (tb->ib_net == net && tb->port == snum) |
@@ -336,15 +338,16 @@ struct dst_entry* inet_csk_route_req(struct sock *sk, | |||
336 | .uli_u = { .ports = | 338 | .uli_u = { .ports = |
337 | { .sport = inet_sk(sk)->sport, | 339 | { .sport = inet_sk(sk)->sport, |
338 | .dport = ireq->rmt_port } } }; | 340 | .dport = ireq->rmt_port } } }; |
341 | struct net *net = sock_net(sk); | ||
339 | 342 | ||
340 | security_req_classify_flow(req, &fl); | 343 | security_req_classify_flow(req, &fl); |
341 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) { | 344 | if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { |
342 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 345 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
343 | return NULL; | 346 | return NULL; |
344 | } | 347 | } |
345 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) { | 348 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) { |
346 | ip_rt_put(rt); | 349 | ip_rt_put(rt); |
347 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 350 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
348 | return NULL; | 351 | return NULL; |
349 | } | 352 | } |
350 | return &rt->u.dst; | 353 | return &rt->u.dst; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index da97695e7096..c10036e7a463 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * inet_diag.c Module for monitoring INET transport protocols sockets. | 2 | * inet_diag.c Module for monitoring INET transport protocols sockets. |
3 | * | 3 | * |
4 | * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $ | ||
5 | * | ||
6 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2023d37b2708..115f53722d20 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -70,7 +70,8 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
70 | static void __inet_put_port(struct sock *sk) | 70 | static void __inet_put_port(struct sock *sk) |
71 | { | 71 | { |
72 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 72 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
73 | const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); | 73 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num, |
74 | hashinfo->bhash_size); | ||
74 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 75 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
75 | struct inet_bind_bucket *tb; | 76 | struct inet_bind_bucket *tb; |
76 | 77 | ||
@@ -95,7 +96,8 @@ EXPORT_SYMBOL(inet_put_port); | |||
95 | void __inet_inherit_port(struct sock *sk, struct sock *child) | 96 | void __inet_inherit_port(struct sock *sk, struct sock *child) |
96 | { | 97 | { |
97 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; | 98 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; |
98 | const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); | 99 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num, |
100 | table->bhash_size); | ||
99 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | 101 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; |
100 | struct inet_bind_bucket *tb; | 102 | struct inet_bind_bucket *tb; |
101 | 103 | ||
@@ -192,7 +194,7 @@ struct sock *__inet_lookup_listener(struct net *net, | |||
192 | const struct hlist_head *head; | 194 | const struct hlist_head *head; |
193 | 195 | ||
194 | read_lock(&hashinfo->lhash_lock); | 196 | read_lock(&hashinfo->lhash_lock); |
195 | head = &hashinfo->listening_hash[inet_lhashfn(hnum)]; | 197 | head = &hashinfo->listening_hash[inet_lhashfn(net, hnum)]; |
196 | if (!hlist_empty(head)) { | 198 | if (!hlist_empty(head)) { |
197 | const struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | 199 | const struct inet_sock *inet = inet_sk((sk = __sk_head(head))); |
198 | 200 | ||
@@ -225,7 +227,7 @@ struct sock * __inet_lookup_established(struct net *net, | |||
225 | /* Optimize here for direct hit, only listening connections can | 227 | /* Optimize here for direct hit, only listening connections can |
226 | * have wildcards anyways. | 228 | * have wildcards anyways. |
227 | */ | 229 | */ |
228 | unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); | 230 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); |
229 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); | 231 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); |
230 | rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); | 232 | rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); |
231 | 233 | ||
@@ -265,13 +267,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
265 | int dif = sk->sk_bound_dev_if; | 267 | int dif = sk->sk_bound_dev_if; |
266 | INET_ADDR_COOKIE(acookie, saddr, daddr) | 268 | INET_ADDR_COOKIE(acookie, saddr, daddr) |
267 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 269 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); |
268 | unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); | 270 | struct net *net = sock_net(sk); |
271 | unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); | ||
269 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 272 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
270 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); | 273 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); |
271 | struct sock *sk2; | 274 | struct sock *sk2; |
272 | const struct hlist_node *node; | 275 | const struct hlist_node *node; |
273 | struct inet_timewait_sock *tw; | 276 | struct inet_timewait_sock *tw; |
274 | struct net *net = sock_net(sk); | ||
275 | 277 | ||
276 | prefetch(head->chain.first); | 278 | prefetch(head->chain.first); |
277 | write_lock(lock); | 279 | write_lock(lock); |
@@ -310,11 +312,11 @@ unique: | |||
310 | 312 | ||
311 | if (twp) { | 313 | if (twp) { |
312 | *twp = tw; | 314 | *twp = tw; |
313 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 315 | NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); |
314 | } else if (tw) { | 316 | } else if (tw) { |
315 | /* Silly. Should hash-dance instead... */ | 317 | /* Silly. Should hash-dance instead... */ |
316 | inet_twsk_deschedule(tw, death_row); | 318 | inet_twsk_deschedule(tw, death_row); |
317 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 319 | NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); |
318 | 320 | ||
319 | inet_twsk_put(tw); | 321 | inet_twsk_put(tw); |
320 | } | 322 | } |
@@ -438,7 +440,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
438 | local_bh_disable(); | 440 | local_bh_disable(); |
439 | for (i = 1; i <= remaining; i++) { | 441 | for (i = 1; i <= remaining; i++) { |
440 | port = low + (i + offset) % remaining; | 442 | port = low + (i + offset) % remaining; |
441 | head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; | 443 | head = &hinfo->bhash[inet_bhashfn(net, port, |
444 | hinfo->bhash_size)]; | ||
442 | spin_lock(&head->lock); | 445 | spin_lock(&head->lock); |
443 | 446 | ||
444 | /* Does not bother with rcv_saddr checks, | 447 | /* Does not bother with rcv_saddr checks, |
@@ -493,7 +496,7 @@ ok: | |||
493 | goto out; | 496 | goto out; |
494 | } | 497 | } |
495 | 498 | ||
496 | head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; | 499 | head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; |
497 | tb = inet_csk(sk)->icsk_bind_hash; | 500 | tb = inet_csk(sk)->icsk_bind_hash; |
498 | spin_lock_bh(&head->lock); | 501 | spin_lock_bh(&head->lock); |
499 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 502 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ce16e9ac24c1..75c2def8f9a0 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -32,7 +32,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
32 | write_unlock(lock); | 32 | write_unlock(lock); |
33 | 33 | ||
34 | /* Disassociate with bind bucket. */ | 34 | /* Disassociate with bind bucket. */ |
35 | bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; | 35 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
36 | hashinfo->bhash_size)]; | ||
36 | spin_lock(&bhead->lock); | 37 | spin_lock(&bhead->lock); |
37 | tb = tw->tw_tb; | 38 | tb = tw->tw_tb; |
38 | __hlist_del(&tw->tw_bind_node); | 39 | __hlist_del(&tw->tw_bind_node); |
@@ -81,7 +82,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
81 | Note, that any socket with inet->num != 0 MUST be bound in | 82 | Note, that any socket with inet->num != 0 MUST be bound in |
82 | binding cache, even if it is closed. | 83 | binding cache, even if it is closed. |
83 | */ | 84 | */ |
84 | bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; | 85 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, |
86 | hashinfo->bhash_size)]; | ||
85 | spin_lock(&bhead->lock); | 87 | spin_lock(&bhead->lock); |
86 | tw->tw_tb = icsk->icsk_bind_hash; | 88 | tw->tw_tb = icsk->icsk_bind_hash; |
87 | BUG_TRAP(icsk->icsk_bind_hash); | 89 | BUG_TRAP(icsk->icsk_bind_hash); |
@@ -158,6 +160,9 @@ rescan: | |||
158 | __inet_twsk_del_dead_node(tw); | 160 | __inet_twsk_del_dead_node(tw); |
159 | spin_unlock(&twdr->death_lock); | 161 | spin_unlock(&twdr->death_lock); |
160 | __inet_twsk_kill(tw, twdr->hashinfo); | 162 | __inet_twsk_kill(tw, twdr->hashinfo); |
163 | #ifdef CONFIG_NET_NS | ||
164 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); | ||
165 | #endif | ||
161 | inet_twsk_put(tw); | 166 | inet_twsk_put(tw); |
162 | killed++; | 167 | killed++; |
163 | spin_lock(&twdr->death_lock); | 168 | spin_lock(&twdr->death_lock); |
@@ -176,8 +181,9 @@ rescan: | |||
176 | } | 181 | } |
177 | 182 | ||
178 | twdr->tw_count -= killed; | 183 | twdr->tw_count -= killed; |
179 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed); | 184 | #ifndef CONFIG_NET_NS |
180 | 185 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); | |
186 | #endif | ||
181 | return ret; | 187 | return ret; |
182 | } | 188 | } |
183 | 189 | ||
@@ -370,6 +376,9 @@ void inet_twdr_twcal_tick(unsigned long data) | |||
370 | &twdr->twcal_row[slot]) { | 376 | &twdr->twcal_row[slot]) { |
371 | __inet_twsk_del_dead_node(tw); | 377 | __inet_twsk_del_dead_node(tw); |
372 | __inet_twsk_kill(tw, twdr->hashinfo); | 378 | __inet_twsk_kill(tw, twdr->hashinfo); |
379 | #ifdef CONFIG_NET_NS | ||
380 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); | ||
381 | #endif | ||
373 | inet_twsk_put(tw); | 382 | inet_twsk_put(tw); |
374 | killed++; | 383 | killed++; |
375 | } | 384 | } |
@@ -393,7 +402,9 @@ void inet_twdr_twcal_tick(unsigned long data) | |||
393 | out: | 402 | out: |
394 | if ((twdr->tw_count -= killed) == 0) | 403 | if ((twdr->tw_count -= killed) == 0) |
395 | del_timer(&twdr->tw_timer); | 404 | del_timer(&twdr->tw_timer); |
396 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed); | 405 | #ifndef CONFIG_NET_NS |
406 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); | ||
407 | #endif | ||
397 | spin_unlock(&twdr->death_lock); | 408 | spin_unlock(&twdr->death_lock); |
398 | } | 409 | } |
399 | 410 | ||
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index af995198f643..a456ceeac3f2 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -3,8 +3,6 @@ | |||
3 | * | 3 | * |
4 | * This source is covered by the GNU GPL, the same as all kernel sources. | 4 | * This source is covered by the GNU GPL, the same as all kernel sources. |
5 | * | 5 | * |
6 | * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ | ||
7 | * | ||
8 | * Authors: Andrey V. Savochkin <saw@msu.ru> | 6 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
9 | */ | 7 | */ |
10 | 8 | ||
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 4813c39b438b..450016b89a18 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The IP forwarding functionality. | 6 | * The IP forwarding functionality. |
7 | * | 7 | * |
8 | * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $ | ||
9 | * | ||
10 | * Authors: see ip.c | 8 | * Authors: see ip.c |
11 | * | 9 | * |
12 | * Fixes: | 10 | * Fixes: |
@@ -44,7 +42,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
44 | { | 42 | { |
45 | struct ip_options * opt = &(IPCB(skb)->opt); | 43 | struct ip_options * opt = &(IPCB(skb)->opt); |
46 | 44 | ||
47 | IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); | 45 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
48 | 46 | ||
49 | if (unlikely(opt->optlen)) | 47 | if (unlikely(opt->optlen)) |
50 | ip_forward_options(skb); | 48 | ip_forward_options(skb); |
@@ -58,6 +56,9 @@ int ip_forward(struct sk_buff *skb) | |||
58 | struct rtable *rt; /* Route we use */ | 56 | struct rtable *rt; /* Route we use */ |
59 | struct ip_options * opt = &(IPCB(skb)->opt); | 57 | struct ip_options * opt = &(IPCB(skb)->opt); |
60 | 58 | ||
59 | if (skb_warn_if_lro(skb)) | ||
60 | goto drop; | ||
61 | |||
61 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) | 62 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) |
62 | goto drop; | 63 | goto drop; |
63 | 64 | ||
@@ -87,7 +88,7 @@ int ip_forward(struct sk_buff *skb) | |||
87 | 88 | ||
88 | if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && | 89 | if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && |
89 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { | 90 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { |
90 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 91 | IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); |
91 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | 92 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
92 | htonl(dst_mtu(&rt->u.dst))); | 93 | htonl(dst_mtu(&rt->u.dst))); |
93 | goto drop; | 94 | goto drop; |
@@ -122,7 +123,7 @@ sr_failed: | |||
122 | 123 | ||
123 | too_many_hops: | 124 | too_many_hops: |
124 | /* Tell the sender its packet died... */ | 125 | /* Tell the sender its packet died... */ |
125 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 126 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_INHDRERRORS); |
126 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); | 127 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); |
127 | drop: | 128 | drop: |
128 | kfree_skb(skb); | 129 | kfree_skb(skb); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 37221f659159..38d38f058018 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The IP fragmentation functionality. | 6 | * The IP fragmentation functionality. |
7 | * | 7 | * |
8 | * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ | ||
9 | * | ||
10 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> | 8 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Alan Cox <Alan.Cox@linux.org> | 9 | * Alan Cox <Alan.Cox@linux.org> |
12 | * | 10 | * |
@@ -180,7 +178,7 @@ static void ip_evictor(struct net *net) | |||
180 | 178 | ||
181 | evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); | 179 | evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); |
182 | if (evicted) | 180 | if (evicted) |
183 | IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted); | 181 | IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); |
184 | } | 182 | } |
185 | 183 | ||
186 | /* | 184 | /* |
@@ -189,8 +187,10 @@ static void ip_evictor(struct net *net) | |||
189 | static void ip_expire(unsigned long arg) | 187 | static void ip_expire(unsigned long arg) |
190 | { | 188 | { |
191 | struct ipq *qp; | 189 | struct ipq *qp; |
190 | struct net *net; | ||
192 | 191 | ||
193 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | 192 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); |
193 | net = container_of(qp->q.net, struct net, ipv4.frags); | ||
194 | 194 | ||
195 | spin_lock(&qp->q.lock); | 195 | spin_lock(&qp->q.lock); |
196 | 196 | ||
@@ -199,14 +199,12 @@ static void ip_expire(unsigned long arg) | |||
199 | 199 | ||
200 | ipq_kill(qp); | 200 | ipq_kill(qp); |
201 | 201 | ||
202 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); | 202 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); |
203 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 203 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
204 | 204 | ||
205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { | 205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
206 | struct sk_buff *head = qp->q.fragments; | 206 | struct sk_buff *head = qp->q.fragments; |
207 | struct net *net; | ||
208 | 207 | ||
209 | net = container_of(qp->q.net, struct net, ipv4.frags); | ||
210 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 208 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
211 | if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) { | 209 | if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) { |
212 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 210 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
@@ -263,7 +261,10 @@ static inline int ip_frag_too_far(struct ipq *qp) | |||
263 | rc = qp->q.fragments && (end - start) > max; | 261 | rc = qp->q.fragments && (end - start) > max; |
264 | 262 | ||
265 | if (rc) { | 263 | if (rc) { |
266 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 264 | struct net *net; |
265 | |||
266 | net = container_of(qp->q.net, struct net, ipv4.frags); | ||
267 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | ||
267 | } | 268 | } |
268 | 269 | ||
269 | return rc; | 270 | return rc; |
@@ -547,7 +548,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
547 | iph = ip_hdr(head); | 548 | iph = ip_hdr(head); |
548 | iph->frag_off = 0; | 549 | iph->frag_off = 0; |
549 | iph->tot_len = htons(len); | 550 | iph->tot_len = htons(len); |
550 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | 551 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS); |
551 | qp->q.fragments = NULL; | 552 | qp->q.fragments = NULL; |
552 | return 0; | 553 | return 0; |
553 | 554 | ||
@@ -562,7 +563,7 @@ out_oversize: | |||
562 | "Oversized IP packet from " NIPQUAD_FMT ".\n", | 563 | "Oversized IP packet from " NIPQUAD_FMT ".\n", |
563 | NIPQUAD(qp->saddr)); | 564 | NIPQUAD(qp->saddr)); |
564 | out_fail: | 565 | out_fail: |
565 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 566 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); |
566 | return err; | 567 | return err; |
567 | } | 568 | } |
568 | 569 | ||
@@ -572,9 +573,9 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
572 | struct ipq *qp; | 573 | struct ipq *qp; |
573 | struct net *net; | 574 | struct net *net; |
574 | 575 | ||
575 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | ||
576 | |||
577 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); | 576 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); |
577 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); | ||
578 | |||
578 | /* Start by cleaning up the memory. */ | 579 | /* Start by cleaning up the memory. */ |
579 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) | 580 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) |
580 | ip_evictor(net); | 581 | ip_evictor(net); |
@@ -592,7 +593,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
592 | return ret; | 593 | return ret; |
593 | } | 594 | } |
594 | 595 | ||
595 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 596 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
596 | kfree_skb(skb); | 597 | kfree_skb(skb); |
597 | return -ENOMEM; | 598 | return -ENOMEM; |
598 | } | 599 | } |
@@ -600,7 +601,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
600 | #ifdef CONFIG_SYSCTL | 601 | #ifdef CONFIG_SYSCTL |
601 | static int zero; | 602 | static int zero; |
602 | 603 | ||
603 | static struct ctl_table ip4_frags_ctl_table[] = { | 604 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
604 | { | 605 | { |
605 | .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, | 606 | .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, |
606 | .procname = "ipfrag_high_thresh", | 607 | .procname = "ipfrag_high_thresh", |
@@ -626,6 +627,10 @@ static struct ctl_table ip4_frags_ctl_table[] = { | |||
626 | .proc_handler = &proc_dointvec_jiffies, | 627 | .proc_handler = &proc_dointvec_jiffies, |
627 | .strategy = &sysctl_jiffies | 628 | .strategy = &sysctl_jiffies |
628 | }, | 629 | }, |
630 | { } | ||
631 | }; | ||
632 | |||
633 | static struct ctl_table ip4_frags_ctl_table[] = { | ||
629 | { | 634 | { |
630 | .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, | 635 | .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, |
631 | .procname = "ipfrag_secret_interval", | 636 | .procname = "ipfrag_secret_interval", |
@@ -646,22 +651,20 @@ static struct ctl_table ip4_frags_ctl_table[] = { | |||
646 | { } | 651 | { } |
647 | }; | 652 | }; |
648 | 653 | ||
649 | static int ip4_frags_ctl_register(struct net *net) | 654 | static int ip4_frags_ns_ctl_register(struct net *net) |
650 | { | 655 | { |
651 | struct ctl_table *table; | 656 | struct ctl_table *table; |
652 | struct ctl_table_header *hdr; | 657 | struct ctl_table_header *hdr; |
653 | 658 | ||
654 | table = ip4_frags_ctl_table; | 659 | table = ip4_frags_ns_ctl_table; |
655 | if (net != &init_net) { | 660 | if (net != &init_net) { |
656 | table = kmemdup(table, sizeof(ip4_frags_ctl_table), GFP_KERNEL); | 661 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
657 | if (table == NULL) | 662 | if (table == NULL) |
658 | goto err_alloc; | 663 | goto err_alloc; |
659 | 664 | ||
660 | table[0].data = &net->ipv4.frags.high_thresh; | 665 | table[0].data = &net->ipv4.frags.high_thresh; |
661 | table[1].data = &net->ipv4.frags.low_thresh; | 666 | table[1].data = &net->ipv4.frags.low_thresh; |
662 | table[2].data = &net->ipv4.frags.timeout; | 667 | table[2].data = &net->ipv4.frags.timeout; |
663 | table[3].mode &= ~0222; | ||
664 | table[4].mode &= ~0222; | ||
665 | } | 668 | } |
666 | 669 | ||
667 | hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); | 670 | hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); |
@@ -678,7 +681,7 @@ err_alloc: | |||
678 | return -ENOMEM; | 681 | return -ENOMEM; |
679 | } | 682 | } |
680 | 683 | ||
681 | static void ip4_frags_ctl_unregister(struct net *net) | 684 | static void ip4_frags_ns_ctl_unregister(struct net *net) |
682 | { | 685 | { |
683 | struct ctl_table *table; | 686 | struct ctl_table *table; |
684 | 687 | ||
@@ -686,13 +689,22 @@ static void ip4_frags_ctl_unregister(struct net *net) | |||
686 | unregister_net_sysctl_table(net->ipv4.frags_hdr); | 689 | unregister_net_sysctl_table(net->ipv4.frags_hdr); |
687 | kfree(table); | 690 | kfree(table); |
688 | } | 691 | } |
692 | |||
693 | static void ip4_frags_ctl_register(void) | ||
694 | { | ||
695 | register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); | ||
696 | } | ||
689 | #else | 697 | #else |
690 | static inline int ip4_frags_ctl_register(struct net *net) | 698 | static inline int ip4_frags_ns_ctl_register(struct net *net) |
691 | { | 699 | { |
692 | return 0; | 700 | return 0; |
693 | } | 701 | } |
694 | 702 | ||
695 | static inline void ip4_frags_ctl_unregister(struct net *net) | 703 | static inline void ip4_frags_ns_ctl_unregister(struct net *net) |
704 | { | ||
705 | } | ||
706 | |||
707 | static inline void ip4_frags_ctl_register(void) | ||
696 | { | 708 | { |
697 | } | 709 | } |
698 | #endif | 710 | #endif |
@@ -716,12 +728,12 @@ static int ipv4_frags_init_net(struct net *net) | |||
716 | 728 | ||
717 | inet_frags_init_net(&net->ipv4.frags); | 729 | inet_frags_init_net(&net->ipv4.frags); |
718 | 730 | ||
719 | return ip4_frags_ctl_register(net); | 731 | return ip4_frags_ns_ctl_register(net); |
720 | } | 732 | } |
721 | 733 | ||
722 | static void ipv4_frags_exit_net(struct net *net) | 734 | static void ipv4_frags_exit_net(struct net *net) |
723 | { | 735 | { |
724 | ip4_frags_ctl_unregister(net); | 736 | ip4_frags_ns_ctl_unregister(net); |
725 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); | 737 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
726 | } | 738 | } |
727 | 739 | ||
@@ -732,6 +744,7 @@ static struct pernet_operations ip4_frags_ops = { | |||
732 | 744 | ||
733 | void __init ipfrag_init(void) | 745 | void __init ipfrag_init(void) |
734 | { | 746 | { |
747 | ip4_frags_ctl_register(); | ||
735 | register_pernet_subsys(&ip4_frags_ops); | 748 | register_pernet_subsys(&ip4_frags_ops); |
736 | ip4_frags.hashfn = ip4_hashfn; | 749 | ip4_frags.hashfn = ip4_hashfn; |
737 | ip4_frags.constructor = ip4_frag_init; | 750 | ip4_frags.constructor = ip4_frag_init; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 4342cba4ff82..2a61158ea722 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -473,6 +473,8 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
473 | read_lock(&ipgre_lock); | 473 | read_lock(&ipgre_lock); |
474 | if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), | 474 | if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), |
475 | iph->saddr, iph->daddr, key)) != NULL) { | 475 | iph->saddr, iph->daddr, key)) != NULL) { |
476 | struct net_device_stats *stats = &tunnel->dev->stats; | ||
477 | |||
476 | secpath_reset(skb); | 478 | secpath_reset(skb); |
477 | 479 | ||
478 | skb->protocol = *(__be16*)(h + 2); | 480 | skb->protocol = *(__be16*)(h + 2); |
@@ -497,28 +499,28 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
497 | /* Looped back packet, drop it! */ | 499 | /* Looped back packet, drop it! */ |
498 | if (skb->rtable->fl.iif == 0) | 500 | if (skb->rtable->fl.iif == 0) |
499 | goto drop; | 501 | goto drop; |
500 | tunnel->stat.multicast++; | 502 | stats->multicast++; |
501 | skb->pkt_type = PACKET_BROADCAST; | 503 | skb->pkt_type = PACKET_BROADCAST; |
502 | } | 504 | } |
503 | #endif | 505 | #endif |
504 | 506 | ||
505 | if (((flags&GRE_CSUM) && csum) || | 507 | if (((flags&GRE_CSUM) && csum) || |
506 | (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { | 508 | (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { |
507 | tunnel->stat.rx_crc_errors++; | 509 | stats->rx_crc_errors++; |
508 | tunnel->stat.rx_errors++; | 510 | stats->rx_errors++; |
509 | goto drop; | 511 | goto drop; |
510 | } | 512 | } |
511 | if (tunnel->parms.i_flags&GRE_SEQ) { | 513 | if (tunnel->parms.i_flags&GRE_SEQ) { |
512 | if (!(flags&GRE_SEQ) || | 514 | if (!(flags&GRE_SEQ) || |
513 | (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { | 515 | (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { |
514 | tunnel->stat.rx_fifo_errors++; | 516 | stats->rx_fifo_errors++; |
515 | tunnel->stat.rx_errors++; | 517 | stats->rx_errors++; |
516 | goto drop; | 518 | goto drop; |
517 | } | 519 | } |
518 | tunnel->i_seqno = seqno + 1; | 520 | tunnel->i_seqno = seqno + 1; |
519 | } | 521 | } |
520 | tunnel->stat.rx_packets++; | 522 | stats->rx_packets++; |
521 | tunnel->stat.rx_bytes += skb->len; | 523 | stats->rx_bytes += skb->len; |
522 | skb->dev = tunnel->dev; | 524 | skb->dev = tunnel->dev; |
523 | dst_release(skb->dst); | 525 | dst_release(skb->dst); |
524 | skb->dst = NULL; | 526 | skb->dst = NULL; |
@@ -540,7 +542,7 @@ drop_nolock: | |||
540 | static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 542 | static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
541 | { | 543 | { |
542 | struct ip_tunnel *tunnel = netdev_priv(dev); | 544 | struct ip_tunnel *tunnel = netdev_priv(dev); |
543 | struct net_device_stats *stats = &tunnel->stat; | 545 | struct net_device_stats *stats = &tunnel->dev->stats; |
544 | struct iphdr *old_iph = ip_hdr(skb); | 546 | struct iphdr *old_iph = ip_hdr(skb); |
545 | struct iphdr *tiph; | 547 | struct iphdr *tiph; |
546 | u8 tos; | 548 | u8 tos; |
@@ -554,7 +556,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
554 | int mtu; | 556 | int mtu; |
555 | 557 | ||
556 | if (tunnel->recursion++) { | 558 | if (tunnel->recursion++) { |
557 | tunnel->stat.collisions++; | 559 | stats->collisions++; |
558 | goto tx_error; | 560 | goto tx_error; |
559 | } | 561 | } |
560 | 562 | ||
@@ -570,7 +572,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
570 | /* NBMA tunnel */ | 572 | /* NBMA tunnel */ |
571 | 573 | ||
572 | if (skb->dst == NULL) { | 574 | if (skb->dst == NULL) { |
573 | tunnel->stat.tx_fifo_errors++; | 575 | stats->tx_fifo_errors++; |
574 | goto tx_error; | 576 | goto tx_error; |
575 | } | 577 | } |
576 | 578 | ||
@@ -621,7 +623,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
621 | .tos = RT_TOS(tos) } }, | 623 | .tos = RT_TOS(tos) } }, |
622 | .proto = IPPROTO_GRE }; | 624 | .proto = IPPROTO_GRE }; |
623 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 625 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
624 | tunnel->stat.tx_carrier_errors++; | 626 | stats->tx_carrier_errors++; |
625 | goto tx_error; | 627 | goto tx_error; |
626 | } | 628 | } |
627 | } | 629 | } |
@@ -629,7 +631,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
629 | 631 | ||
630 | if (tdev == dev) { | 632 | if (tdev == dev) { |
631 | ip_rt_put(rt); | 633 | ip_rt_put(rt); |
632 | tunnel->stat.collisions++; | 634 | stats->collisions++; |
633 | goto tx_error; | 635 | goto tx_error; |
634 | } | 636 | } |
635 | 637 | ||
@@ -954,11 +956,6 @@ done: | |||
954 | return err; | 956 | return err; |
955 | } | 957 | } |
956 | 958 | ||
957 | static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) | ||
958 | { | ||
959 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | ||
960 | } | ||
961 | |||
962 | static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 959 | static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
963 | { | 960 | { |
964 | struct ip_tunnel *tunnel = netdev_priv(dev); | 961 | struct ip_tunnel *tunnel = netdev_priv(dev); |
@@ -1084,7 +1081,6 @@ static void ipgre_tunnel_setup(struct net_device *dev) | |||
1084 | dev->uninit = ipgre_tunnel_uninit; | 1081 | dev->uninit = ipgre_tunnel_uninit; |
1085 | dev->destructor = free_netdev; | 1082 | dev->destructor = free_netdev; |
1086 | dev->hard_start_xmit = ipgre_tunnel_xmit; | 1083 | dev->hard_start_xmit = ipgre_tunnel_xmit; |
1087 | dev->get_stats = ipgre_tunnel_get_stats; | ||
1088 | dev->do_ioctl = ipgre_tunnel_ioctl; | 1084 | dev->do_ioctl = ipgre_tunnel_ioctl; |
1089 | dev->change_mtu = ipgre_tunnel_change_mtu; | 1085 | dev->change_mtu = ipgre_tunnel_change_mtu; |
1090 | 1086 | ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index ff77a4a7f9ec..e0bed56c51f1 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The Internet Protocol (IP) module. | 6 | * The Internet Protocol (IP) module. |
7 | * | 7 | * |
8 | * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Donald Becker, <becker@super.org> | 10 | * Donald Becker, <becker@super.org> |
@@ -147,12 +145,6 @@ | |||
147 | #include <linux/netlink.h> | 145 | #include <linux/netlink.h> |
148 | 146 | ||
149 | /* | 147 | /* |
150 | * SNMP management statistics | ||
151 | */ | ||
152 | |||
153 | DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly; | ||
154 | |||
155 | /* | ||
156 | * Process Router Attention IP option | 148 | * Process Router Attention IP option |
157 | */ | 149 | */ |
158 | int ip_call_ra_chain(struct sk_buff *skb) | 150 | int ip_call_ra_chain(struct sk_buff *skb) |
@@ -232,16 +224,16 @@ static int ip_local_deliver_finish(struct sk_buff *skb) | |||
232 | protocol = -ret; | 224 | protocol = -ret; |
233 | goto resubmit; | 225 | goto resubmit; |
234 | } | 226 | } |
235 | IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); | 227 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); |
236 | } else { | 228 | } else { |
237 | if (!raw) { | 229 | if (!raw) { |
238 | if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 230 | if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
239 | IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); | 231 | IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS); |
240 | icmp_send(skb, ICMP_DEST_UNREACH, | 232 | icmp_send(skb, ICMP_DEST_UNREACH, |
241 | ICMP_PROT_UNREACH, 0); | 233 | ICMP_PROT_UNREACH, 0); |
242 | } | 234 | } |
243 | } else | 235 | } else |
244 | IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); | 236 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); |
245 | kfree_skb(skb); | 237 | kfree_skb(skb); |
246 | } | 238 | } |
247 | } | 239 | } |
@@ -283,7 +275,7 @@ static inline int ip_rcv_options(struct sk_buff *skb) | |||
283 | --ANK (980813) | 275 | --ANK (980813) |
284 | */ | 276 | */ |
285 | if (skb_cow(skb, skb_headroom(skb))) { | 277 | if (skb_cow(skb, skb_headroom(skb))) { |
286 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 278 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
287 | goto drop; | 279 | goto drop; |
288 | } | 280 | } |
289 | 281 | ||
@@ -292,7 +284,7 @@ static inline int ip_rcv_options(struct sk_buff *skb) | |||
292 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); | 284 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); |
293 | 285 | ||
294 | if (ip_options_compile(dev_net(dev), opt, skb)) { | 286 | if (ip_options_compile(dev_net(dev), opt, skb)) { |
295 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 287 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); |
296 | goto drop; | 288 | goto drop; |
297 | } | 289 | } |
298 | 290 | ||
@@ -336,9 +328,11 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
336 | skb->dev); | 328 | skb->dev); |
337 | if (unlikely(err)) { | 329 | if (unlikely(err)) { |
338 | if (err == -EHOSTUNREACH) | 330 | if (err == -EHOSTUNREACH) |
339 | IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 331 | IP_INC_STATS_BH(dev_net(skb->dev), |
332 | IPSTATS_MIB_INADDRERRORS); | ||
340 | else if (err == -ENETUNREACH) | 333 | else if (err == -ENETUNREACH) |
341 | IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES); | 334 | IP_INC_STATS_BH(dev_net(skb->dev), |
335 | IPSTATS_MIB_INNOROUTES); | ||
342 | goto drop; | 336 | goto drop; |
343 | } | 337 | } |
344 | } | 338 | } |
@@ -359,9 +353,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
359 | 353 | ||
360 | rt = skb->rtable; | 354 | rt = skb->rtable; |
361 | if (rt->rt_type == RTN_MULTICAST) | 355 | if (rt->rt_type == RTN_MULTICAST) |
362 | IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); | 356 | IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCASTPKTS); |
363 | else if (rt->rt_type == RTN_BROADCAST) | 357 | else if (rt->rt_type == RTN_BROADCAST) |
364 | IP_INC_STATS_BH(IPSTATS_MIB_INBCASTPKTS); | 358 | IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCASTPKTS); |
365 | 359 | ||
366 | return dst_input(skb); | 360 | return dst_input(skb); |
367 | 361 | ||
@@ -384,10 +378,10 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
384 | if (skb->pkt_type == PACKET_OTHERHOST) | 378 | if (skb->pkt_type == PACKET_OTHERHOST) |
385 | goto drop; | 379 | goto drop; |
386 | 380 | ||
387 | IP_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); | 381 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INRECEIVES); |
388 | 382 | ||
389 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 383 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { |
390 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 384 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
391 | goto out; | 385 | goto out; |
392 | } | 386 | } |
393 | 387 | ||
@@ -420,7 +414,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
420 | 414 | ||
421 | len = ntohs(iph->tot_len); | 415 | len = ntohs(iph->tot_len); |
422 | if (skb->len < len) { | 416 | if (skb->len < len) { |
423 | IP_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); | 417 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS); |
424 | goto drop; | 418 | goto drop; |
425 | } else if (len < (iph->ihl*4)) | 419 | } else if (len < (iph->ihl*4)) |
426 | goto inhdr_error; | 420 | goto inhdr_error; |
@@ -430,7 +424,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
430 | * Note this now means skb->len holds ntohs(iph->tot_len). | 424 | * Note this now means skb->len holds ntohs(iph->tot_len). |
431 | */ | 425 | */ |
432 | if (pskb_trim_rcsum(skb, len)) { | 426 | if (pskb_trim_rcsum(skb, len)) { |
433 | IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 427 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
434 | goto drop; | 428 | goto drop; |
435 | } | 429 | } |
436 | 430 | ||
@@ -441,11 +435,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
441 | ip_rcv_finish); | 435 | ip_rcv_finish); |
442 | 436 | ||
443 | inhdr_error: | 437 | inhdr_error: |
444 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 438 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); |
445 | drop: | 439 | drop: |
446 | kfree_skb(skb); | 440 | kfree_skb(skb); |
447 | out: | 441 | out: |
448 | return NET_RX_DROP; | 442 | return NET_RX_DROP; |
449 | } | 443 | } |
450 | |||
451 | EXPORT_SYMBOL(ip_statistics); | ||
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 33126ad2cfdc..be3f18a7a40e 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The options processing module for ip.c | 6 | * The options processing module for ip.c |
7 | * | 7 | * |
8 | * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $ | ||
9 | * | ||
10 | * Authors: A.N.Kuznetsov | 8 | * Authors: A.N.Kuznetsov |
11 | * | 9 | * |
12 | */ | 10 | */ |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e527628f56cf..465544f6281a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The Internet Protocol (IP) output module. | 6 | * The Internet Protocol (IP) output module. |
7 | * | 7 | * |
8 | * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Donald Becker, <becker@super.org> | 10 | * Donald Becker, <becker@super.org> |
@@ -184,9 +182,9 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
184 | unsigned int hh_len = LL_RESERVED_SPACE(dev); | 182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
185 | 183 | ||
186 | if (rt->rt_type == RTN_MULTICAST) | 184 | if (rt->rt_type == RTN_MULTICAST) |
187 | IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 185 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS); |
188 | else if (rt->rt_type == RTN_BROADCAST) | 186 | else if (rt->rt_type == RTN_BROADCAST) |
189 | IP_INC_STATS(IPSTATS_MIB_OUTBCASTPKTS); | 187 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS); |
190 | 188 | ||
191 | /* Be paranoid, rather than too clever. */ | 189 | /* Be paranoid, rather than too clever. */ |
192 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { | 190 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { |
@@ -246,7 +244,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
246 | /* | 244 | /* |
247 | * If the indicated interface is up and running, send the packet. | 245 | * If the indicated interface is up and running, send the packet. |
248 | */ | 246 | */ |
249 | IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 247 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); |
250 | 248 | ||
251 | skb->dev = dev; | 249 | skb->dev = dev; |
252 | skb->protocol = htons(ETH_P_IP); | 250 | skb->protocol = htons(ETH_P_IP); |
@@ -300,7 +298,7 @@ int ip_output(struct sk_buff *skb) | |||
300 | { | 298 | { |
301 | struct net_device *dev = skb->dst->dev; | 299 | struct net_device *dev = skb->dst->dev; |
302 | 300 | ||
303 | IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 301 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); |
304 | 302 | ||
305 | skb->dev = dev; | 303 | skb->dev = dev; |
306 | skb->protocol = htons(ETH_P_IP); | 304 | skb->protocol = htons(ETH_P_IP); |
@@ -391,7 +389,7 @@ packet_routed: | |||
391 | return ip_local_out(skb); | 389 | return ip_local_out(skb); |
392 | 390 | ||
393 | no_route: | 391 | no_route: |
394 | IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES); | 392 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
395 | kfree_skb(skb); | 393 | kfree_skb(skb); |
396 | return -EHOSTUNREACH; | 394 | return -EHOSTUNREACH; |
397 | } | 395 | } |
@@ -453,7 +451,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
453 | iph = ip_hdr(skb); | 451 | iph = ip_hdr(skb); |
454 | 452 | ||
455 | if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { | 453 | if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { |
456 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 454 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); |
457 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | 455 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
458 | htonl(ip_skb_dst_mtu(skb))); | 456 | htonl(ip_skb_dst_mtu(skb))); |
459 | kfree_skb(skb); | 457 | kfree_skb(skb); |
@@ -544,7 +542,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
544 | err = output(skb); | 542 | err = output(skb); |
545 | 543 | ||
546 | if (!err) | 544 | if (!err) |
547 | IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); | 545 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); |
548 | if (err || !frag) | 546 | if (err || !frag) |
549 | break; | 547 | break; |
550 | 548 | ||
@@ -554,7 +552,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
554 | } | 552 | } |
555 | 553 | ||
556 | if (err == 0) { | 554 | if (err == 0) { |
557 | IP_INC_STATS(IPSTATS_MIB_FRAGOKS); | 555 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS); |
558 | return 0; | 556 | return 0; |
559 | } | 557 | } |
560 | 558 | ||
@@ -563,7 +561,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
563 | kfree_skb(frag); | 561 | kfree_skb(frag); |
564 | frag = skb; | 562 | frag = skb; |
565 | } | 563 | } |
566 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 564 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); |
567 | return err; | 565 | return err; |
568 | } | 566 | } |
569 | 567 | ||
@@ -675,15 +673,15 @@ slow_path: | |||
675 | if (err) | 673 | if (err) |
676 | goto fail; | 674 | goto fail; |
677 | 675 | ||
678 | IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); | 676 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); |
679 | } | 677 | } |
680 | kfree_skb(skb); | 678 | kfree_skb(skb); |
681 | IP_INC_STATS(IPSTATS_MIB_FRAGOKS); | 679 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS); |
682 | return err; | 680 | return err; |
683 | 681 | ||
684 | fail: | 682 | fail: |
685 | kfree_skb(skb); | 683 | kfree_skb(skb); |
686 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 684 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); |
687 | return err; | 685 | return err; |
688 | } | 686 | } |
689 | 687 | ||
@@ -1049,7 +1047,7 @@ alloc_new_skb: | |||
1049 | 1047 | ||
1050 | error: | 1048 | error: |
1051 | inet->cork.length -= length; | 1049 | inet->cork.length -= length; |
1052 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1050 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); |
1053 | return err; | 1051 | return err; |
1054 | } | 1052 | } |
1055 | 1053 | ||
@@ -1191,7 +1189,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1191 | 1189 | ||
1192 | error: | 1190 | error: |
1193 | inet->cork.length -= size; | 1191 | inet->cork.length -= size; |
1194 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1192 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); |
1195 | return err; | 1193 | return err; |
1196 | } | 1194 | } |
1197 | 1195 | ||
@@ -1213,6 +1211,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1213 | struct sk_buff *skb, *tmp_skb; | 1211 | struct sk_buff *skb, *tmp_skb; |
1214 | struct sk_buff **tail_skb; | 1212 | struct sk_buff **tail_skb; |
1215 | struct inet_sock *inet = inet_sk(sk); | 1213 | struct inet_sock *inet = inet_sk(sk); |
1214 | struct net *net = sock_net(sk); | ||
1216 | struct ip_options *opt = NULL; | 1215 | struct ip_options *opt = NULL; |
1217 | struct rtable *rt = (struct rtable *)inet->cork.dst; | 1216 | struct rtable *rt = (struct rtable *)inet->cork.dst; |
1218 | struct iphdr *iph; | 1217 | struct iphdr *iph; |
@@ -1282,7 +1281,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1282 | skb->dst = dst_clone(&rt->u.dst); | 1281 | skb->dst = dst_clone(&rt->u.dst); |
1283 | 1282 | ||
1284 | if (iph->protocol == IPPROTO_ICMP) | 1283 | if (iph->protocol == IPPROTO_ICMP) |
1285 | icmp_out_count(((struct icmphdr *) | 1284 | icmp_out_count(net, ((struct icmphdr *) |
1286 | skb_transport_header(skb))->type); | 1285 | skb_transport_header(skb))->type); |
1287 | 1286 | ||
1288 | /* Netfilter gets whole the not fragmented skb. */ | 1287 | /* Netfilter gets whole the not fragmented skb. */ |
@@ -1299,7 +1298,7 @@ out: | |||
1299 | return err; | 1298 | return err; |
1300 | 1299 | ||
1301 | error: | 1300 | error: |
1302 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1301 | IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); |
1303 | goto out; | 1302 | goto out; |
1304 | } | 1303 | } |
1305 | 1304 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index e0514e82308e..105d92a039b9 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The IP to API glue. | 6 | * The IP to API glue. |
7 | * | 7 | * |
8 | * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: see ip.c | 8 | * Authors: see ip.c |
11 | * | 9 | * |
12 | * Fixes: | 10 | * Fixes: |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index ed45037ce9be..42065fff46c4 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: ipconfig.c,v 1.46 2002/02/01 22:01:04 davem Exp $ | ||
3 | * | ||
4 | * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or | 2 | * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or |
5 | * user-supplied information to configure own IP address and routes. | 3 | * user-supplied information to configure own IP address and routes. |
6 | * | 4 | * |
@@ -434,7 +432,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
434 | unsigned char *sha, *tha; /* s for "source", t for "target" */ | 432 | unsigned char *sha, *tha; /* s for "source", t for "target" */ |
435 | struct ic_device *d; | 433 | struct ic_device *d; |
436 | 434 | ||
437 | if (dev_net(dev) != &init_net) | 435 | if (!net_eq(dev_net(dev), &init_net)) |
438 | goto drop; | 436 | goto drop; |
439 | 437 | ||
440 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 438 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) |
@@ -854,7 +852,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
854 | struct ic_device *d; | 852 | struct ic_device *d; |
855 | int len, ext_len; | 853 | int len, ext_len; |
856 | 854 | ||
857 | if (dev_net(dev) != &init_net) | 855 | if (!net_eq(dev_net(dev), &init_net)) |
858 | goto drop; | 856 | goto drop; |
859 | 857 | ||
860 | /* Perform verifications before taking the lock. */ | 858 | /* Perform verifications before taking the lock. */ |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index af5cb53da5cc..4c6d2caf9203 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Linux NET3: IP/IP protocol decoder. | 2 | * Linux NET3: IP/IP protocol decoder. |
3 | * | 3 | * |
4 | * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $ | ||
5 | * | ||
6 | * Authors: | 4 | * Authors: |
7 | * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 | 5 | * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 |
8 | * | 6 | * |
@@ -368,8 +366,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
368 | skb->protocol = htons(ETH_P_IP); | 366 | skb->protocol = htons(ETH_P_IP); |
369 | skb->pkt_type = PACKET_HOST; | 367 | skb->pkt_type = PACKET_HOST; |
370 | 368 | ||
371 | tunnel->stat.rx_packets++; | 369 | tunnel->dev->stats.rx_packets++; |
372 | tunnel->stat.rx_bytes += skb->len; | 370 | tunnel->dev->stats.rx_bytes += skb->len; |
373 | skb->dev = tunnel->dev; | 371 | skb->dev = tunnel->dev; |
374 | dst_release(skb->dst); | 372 | dst_release(skb->dst); |
375 | skb->dst = NULL; | 373 | skb->dst = NULL; |
@@ -392,7 +390,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
392 | static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 390 | static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
393 | { | 391 | { |
394 | struct ip_tunnel *tunnel = netdev_priv(dev); | 392 | struct ip_tunnel *tunnel = netdev_priv(dev); |
395 | struct net_device_stats *stats = &tunnel->stat; | 393 | struct net_device_stats *stats = &tunnel->dev->stats; |
396 | struct iphdr *tiph = &tunnel->parms.iph; | 394 | struct iphdr *tiph = &tunnel->parms.iph; |
397 | u8 tos = tunnel->parms.iph.tos; | 395 | u8 tos = tunnel->parms.iph.tos; |
398 | __be16 df = tiph->frag_off; | 396 | __be16 df = tiph->frag_off; |
@@ -405,7 +403,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
405 | int mtu; | 403 | int mtu; |
406 | 404 | ||
407 | if (tunnel->recursion++) { | 405 | if (tunnel->recursion++) { |
408 | tunnel->stat.collisions++; | 406 | stats->collisions++; |
409 | goto tx_error; | 407 | goto tx_error; |
410 | } | 408 | } |
411 | 409 | ||
@@ -418,7 +416,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
418 | if (!dst) { | 416 | if (!dst) { |
419 | /* NBMA tunnel */ | 417 | /* NBMA tunnel */ |
420 | if ((rt = skb->rtable) == NULL) { | 418 | if ((rt = skb->rtable) == NULL) { |
421 | tunnel->stat.tx_fifo_errors++; | 419 | stats->tx_fifo_errors++; |
422 | goto tx_error; | 420 | goto tx_error; |
423 | } | 421 | } |
424 | if ((dst = rt->rt_gateway) == 0) | 422 | if ((dst = rt->rt_gateway) == 0) |
@@ -433,7 +431,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
433 | .tos = RT_TOS(tos) } }, | 431 | .tos = RT_TOS(tos) } }, |
434 | .proto = IPPROTO_IPIP }; | 432 | .proto = IPPROTO_IPIP }; |
435 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 433 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
436 | tunnel->stat.tx_carrier_errors++; | 434 | stats->tx_carrier_errors++; |
437 | goto tx_error_icmp; | 435 | goto tx_error_icmp; |
438 | } | 436 | } |
439 | } | 437 | } |
@@ -441,7 +439,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
441 | 439 | ||
442 | if (tdev == dev) { | 440 | if (tdev == dev) { |
443 | ip_rt_put(rt); | 441 | ip_rt_put(rt); |
444 | tunnel->stat.collisions++; | 442 | stats->collisions++; |
445 | goto tx_error; | 443 | goto tx_error; |
446 | } | 444 | } |
447 | 445 | ||
@@ -451,7 +449,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
451 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 449 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; |
452 | 450 | ||
453 | if (mtu < 68) { | 451 | if (mtu < 68) { |
454 | tunnel->stat.collisions++; | 452 | stats->collisions++; |
455 | ip_rt_put(rt); | 453 | ip_rt_put(rt); |
456 | goto tx_error; | 454 | goto tx_error; |
457 | } | 455 | } |
@@ -685,11 +683,6 @@ done: | |||
685 | return err; | 683 | return err; |
686 | } | 684 | } |
687 | 685 | ||
688 | static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) | ||
689 | { | ||
690 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | ||
691 | } | ||
692 | |||
693 | static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 686 | static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
694 | { | 687 | { |
695 | if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) | 688 | if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) |
@@ -702,7 +695,6 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
702 | { | 695 | { |
703 | dev->uninit = ipip_tunnel_uninit; | 696 | dev->uninit = ipip_tunnel_uninit; |
704 | dev->hard_start_xmit = ipip_tunnel_xmit; | 697 | dev->hard_start_xmit = ipip_tunnel_xmit; |
705 | dev->get_stats = ipip_tunnel_get_stats; | ||
706 | dev->do_ioctl = ipip_tunnel_ioctl; | 698 | dev->do_ioctl = ipip_tunnel_ioctl; |
707 | dev->change_mtu = ipip_tunnel_change_mtu; | 699 | dev->change_mtu = ipip_tunnel_change_mtu; |
708 | dev->destructor = free_netdev; | 700 | dev->destructor = free_netdev; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 11700a4dcd95..c519b8d30eee 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -9,8 +9,6 @@ | |||
9 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $ | ||
13 | * | ||
14 | * Fixes: | 12 | * Fixes: |
15 | * Michael Chastain : Incorrect size of copying. | 13 | * Michael Chastain : Incorrect size of copying. |
16 | * Alan Cox : Added the cache manager code | 14 | * Alan Cox : Added the cache manager code |
@@ -120,6 +118,31 @@ static struct timer_list ipmr_expire_timer; | |||
120 | 118 | ||
121 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ | 119 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ |
122 | 120 | ||
121 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) | ||
122 | { | ||
123 | dev_close(dev); | ||
124 | |||
125 | dev = __dev_get_by_name(&init_net, "tunl0"); | ||
126 | if (dev) { | ||
127 | struct ifreq ifr; | ||
128 | mm_segment_t oldfs; | ||
129 | struct ip_tunnel_parm p; | ||
130 | |||
131 | memset(&p, 0, sizeof(p)); | ||
132 | p.iph.daddr = v->vifc_rmt_addr.s_addr; | ||
133 | p.iph.saddr = v->vifc_lcl_addr.s_addr; | ||
134 | p.iph.version = 4; | ||
135 | p.iph.ihl = 5; | ||
136 | p.iph.protocol = IPPROTO_IPIP; | ||
137 | sprintf(p.name, "dvmrp%d", v->vifc_vifi); | ||
138 | ifr.ifr_ifru.ifru_data = (__force void __user *)&p; | ||
139 | |||
140 | oldfs = get_fs(); set_fs(KERNEL_DS); | ||
141 | dev->do_ioctl(dev, &ifr, SIOCDELTUNNEL); | ||
142 | set_fs(oldfs); | ||
143 | } | ||
144 | } | ||
145 | |||
123 | static | 146 | static |
124 | struct net_device *ipmr_new_tunnel(struct vifctl *v) | 147 | struct net_device *ipmr_new_tunnel(struct vifctl *v) |
125 | { | 148 | { |
@@ -161,6 +184,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) | |||
161 | 184 | ||
162 | if (dev_open(dev)) | 185 | if (dev_open(dev)) |
163 | goto failure; | 186 | goto failure; |
187 | dev_hold(dev); | ||
164 | } | 188 | } |
165 | } | 189 | } |
166 | return dev; | 190 | return dev; |
@@ -181,26 +205,20 @@ static int reg_vif_num = -1; | |||
181 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 205 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
182 | { | 206 | { |
183 | read_lock(&mrt_lock); | 207 | read_lock(&mrt_lock); |
184 | ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; | 208 | dev->stats.tx_bytes += skb->len; |
185 | ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; | 209 | dev->stats.tx_packets++; |
186 | ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); | 210 | ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); |
187 | read_unlock(&mrt_lock); | 211 | read_unlock(&mrt_lock); |
188 | kfree_skb(skb); | 212 | kfree_skb(skb); |
189 | return 0; | 213 | return 0; |
190 | } | 214 | } |
191 | 215 | ||
192 | static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) | ||
193 | { | ||
194 | return (struct net_device_stats*)netdev_priv(dev); | ||
195 | } | ||
196 | |||
197 | static void reg_vif_setup(struct net_device *dev) | 216 | static void reg_vif_setup(struct net_device *dev) |
198 | { | 217 | { |
199 | dev->type = ARPHRD_PIMREG; | 218 | dev->type = ARPHRD_PIMREG; |
200 | dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; | 219 | dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; |
201 | dev->flags = IFF_NOARP; | 220 | dev->flags = IFF_NOARP; |
202 | dev->hard_start_xmit = reg_vif_xmit; | 221 | dev->hard_start_xmit = reg_vif_xmit; |
203 | dev->get_stats = reg_vif_get_stats; | ||
204 | dev->destructor = free_netdev; | 222 | dev->destructor = free_netdev; |
205 | } | 223 | } |
206 | 224 | ||
@@ -209,8 +227,7 @@ static struct net_device *ipmr_reg_vif(void) | |||
209 | struct net_device *dev; | 227 | struct net_device *dev; |
210 | struct in_device *in_dev; | 228 | struct in_device *in_dev; |
211 | 229 | ||
212 | dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg", | 230 | dev = alloc_netdev(0, "pimreg", reg_vif_setup); |
213 | reg_vif_setup); | ||
214 | 231 | ||
215 | if (dev == NULL) | 232 | if (dev == NULL) |
216 | return NULL; | 233 | return NULL; |
@@ -234,6 +251,8 @@ static struct net_device *ipmr_reg_vif(void) | |||
234 | if (dev_open(dev)) | 251 | if (dev_open(dev)) |
235 | goto failure; | 252 | goto failure; |
236 | 253 | ||
254 | dev_hold(dev); | ||
255 | |||
237 | return dev; | 256 | return dev; |
238 | 257 | ||
239 | failure: | 258 | failure: |
@@ -248,9 +267,10 @@ failure: | |||
248 | 267 | ||
249 | /* | 268 | /* |
250 | * Delete a VIF entry | 269 | * Delete a VIF entry |
270 | * @notify: Set to 1, if the caller is a notifier_call | ||
251 | */ | 271 | */ |
252 | 272 | ||
253 | static int vif_delete(int vifi) | 273 | static int vif_delete(int vifi, int notify) |
254 | { | 274 | { |
255 | struct vif_device *v; | 275 | struct vif_device *v; |
256 | struct net_device *dev; | 276 | struct net_device *dev; |
@@ -293,7 +313,7 @@ static int vif_delete(int vifi) | |||
293 | ip_rt_multicast_event(in_dev); | 313 | ip_rt_multicast_event(in_dev); |
294 | } | 314 | } |
295 | 315 | ||
296 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER)) | 316 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) |
297 | unregister_netdevice(dev); | 317 | unregister_netdevice(dev); |
298 | 318 | ||
299 | dev_put(dev); | 319 | dev_put(dev); |
@@ -398,6 +418,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
398 | struct vif_device *v = &vif_table[vifi]; | 418 | struct vif_device *v = &vif_table[vifi]; |
399 | struct net_device *dev; | 419 | struct net_device *dev; |
400 | struct in_device *in_dev; | 420 | struct in_device *in_dev; |
421 | int err; | ||
401 | 422 | ||
402 | /* Is vif busy ? */ | 423 | /* Is vif busy ? */ |
403 | if (VIF_EXISTS(vifi)) | 424 | if (VIF_EXISTS(vifi)) |
@@ -415,18 +436,34 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
415 | dev = ipmr_reg_vif(); | 436 | dev = ipmr_reg_vif(); |
416 | if (!dev) | 437 | if (!dev) |
417 | return -ENOBUFS; | 438 | return -ENOBUFS; |
439 | err = dev_set_allmulti(dev, 1); | ||
440 | if (err) { | ||
441 | unregister_netdevice(dev); | ||
442 | dev_put(dev); | ||
443 | return err; | ||
444 | } | ||
418 | break; | 445 | break; |
419 | #endif | 446 | #endif |
420 | case VIFF_TUNNEL: | 447 | case VIFF_TUNNEL: |
421 | dev = ipmr_new_tunnel(vifc); | 448 | dev = ipmr_new_tunnel(vifc); |
422 | if (!dev) | 449 | if (!dev) |
423 | return -ENOBUFS; | 450 | return -ENOBUFS; |
451 | err = dev_set_allmulti(dev, 1); | ||
452 | if (err) { | ||
453 | ipmr_del_tunnel(dev, vifc); | ||
454 | dev_put(dev); | ||
455 | return err; | ||
456 | } | ||
424 | break; | 457 | break; |
425 | case 0: | 458 | case 0: |
426 | dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); | 459 | dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); |
427 | if (!dev) | 460 | if (!dev) |
428 | return -EADDRNOTAVAIL; | 461 | return -EADDRNOTAVAIL; |
429 | dev_put(dev); | 462 | err = dev_set_allmulti(dev, 1); |
463 | if (err) { | ||
464 | dev_put(dev); | ||
465 | return err; | ||
466 | } | ||
430 | break; | 467 | break; |
431 | default: | 468 | default: |
432 | return -EINVAL; | 469 | return -EINVAL; |
@@ -435,7 +472,6 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
435 | if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) | 472 | if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) |
436 | return -EADDRNOTAVAIL; | 473 | return -EADDRNOTAVAIL; |
437 | IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; | 474 | IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; |
438 | dev_set_allmulti(dev, +1); | ||
439 | ip_rt_multicast_event(in_dev); | 475 | ip_rt_multicast_event(in_dev); |
440 | 476 | ||
441 | /* | 477 | /* |
@@ -458,7 +494,6 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
458 | 494 | ||
459 | /* And finish update writing critical data */ | 495 | /* And finish update writing critical data */ |
460 | write_lock_bh(&mrt_lock); | 496 | write_lock_bh(&mrt_lock); |
461 | dev_hold(dev); | ||
462 | v->dev=dev; | 497 | v->dev=dev; |
463 | #ifdef CONFIG_IP_PIMSM | 498 | #ifdef CONFIG_IP_PIMSM |
464 | if (v->flags&VIFF_REGISTER) | 499 | if (v->flags&VIFF_REGISTER) |
@@ -805,7 +840,7 @@ static void mroute_clean_tables(struct sock *sk) | |||
805 | */ | 840 | */ |
806 | for (i=0; i<maxvif; i++) { | 841 | for (i=0; i<maxvif; i++) { |
807 | if (!(vif_table[i].flags&VIFF_STATIC)) | 842 | if (!(vif_table[i].flags&VIFF_STATIC)) |
808 | vif_delete(i); | 843 | vif_delete(i, 0); |
809 | } | 844 | } |
810 | 845 | ||
811 | /* | 846 | /* |
@@ -918,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
918 | if (optname==MRT_ADD_VIF) { | 953 | if (optname==MRT_ADD_VIF) { |
919 | ret = vif_add(&vif, sk==mroute_socket); | 954 | ret = vif_add(&vif, sk==mroute_socket); |
920 | } else { | 955 | } else { |
921 | ret = vif_delete(vif.vifc_vifi); | 956 | ret = vif_delete(vif.vifc_vifi, 0); |
922 | } | 957 | } |
923 | rtnl_unlock(); | 958 | rtnl_unlock(); |
924 | return ret; | 959 | return ret; |
@@ -1089,7 +1124,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1089 | struct vif_device *v; | 1124 | struct vif_device *v; |
1090 | int ct; | 1125 | int ct; |
1091 | 1126 | ||
1092 | if (dev_net(dev) != &init_net) | 1127 | if (!net_eq(dev_net(dev), &init_net)) |
1093 | return NOTIFY_DONE; | 1128 | return NOTIFY_DONE; |
1094 | 1129 | ||
1095 | if (event != NETDEV_UNREGISTER) | 1130 | if (event != NETDEV_UNREGISTER) |
@@ -1097,7 +1132,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1097 | v=&vif_table[0]; | 1132 | v=&vif_table[0]; |
1098 | for (ct=0;ct<maxvif;ct++,v++) { | 1133 | for (ct=0;ct<maxvif;ct++,v++) { |
1099 | if (v->dev==dev) | 1134 | if (v->dev==dev) |
1100 | vif_delete(ct); | 1135 | vif_delete(ct, 1); |
1101 | } | 1136 | } |
1102 | return NOTIFY_DONE; | 1137 | return NOTIFY_DONE; |
1103 | } | 1138 | } |
@@ -1143,7 +1178,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1143 | { | 1178 | { |
1144 | struct ip_options * opt = &(IPCB(skb)->opt); | 1179 | struct ip_options * opt = &(IPCB(skb)->opt); |
1145 | 1180 | ||
1146 | IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); | 1181 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
1147 | 1182 | ||
1148 | if (unlikely(opt->optlen)) | 1183 | if (unlikely(opt->optlen)) |
1149 | ip_forward_options(skb); | 1184 | ip_forward_options(skb); |
@@ -1170,8 +1205,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1170 | if (vif->flags & VIFF_REGISTER) { | 1205 | if (vif->flags & VIFF_REGISTER) { |
1171 | vif->pkt_out++; | 1206 | vif->pkt_out++; |
1172 | vif->bytes_out+=skb->len; | 1207 | vif->bytes_out+=skb->len; |
1173 | ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; | 1208 | vif->dev->stats.tx_bytes += skb->len; |
1174 | ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; | 1209 | vif->dev->stats.tx_packets++; |
1175 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); | 1210 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); |
1176 | kfree_skb(skb); | 1211 | kfree_skb(skb); |
1177 | return; | 1212 | return; |
@@ -1206,7 +1241,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1206 | to blackhole. | 1241 | to blackhole. |
1207 | */ | 1242 | */ |
1208 | 1243 | ||
1209 | IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); | 1244 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); |
1210 | ip_rt_put(rt); | 1245 | ip_rt_put(rt); |
1211 | goto out_free; | 1246 | goto out_free; |
1212 | } | 1247 | } |
@@ -1230,8 +1265,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1230 | if (vif->flags & VIFF_TUNNEL) { | 1265 | if (vif->flags & VIFF_TUNNEL) { |
1231 | ip_encap(skb, vif->local, vif->remote); | 1266 | ip_encap(skb, vif->local, vif->remote); |
1232 | /* FIXME: extra output firewall step used to be here. --RR */ | 1267 | /* FIXME: extra output firewall step used to be here. --RR */ |
1233 | ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; | 1268 | vif->dev->stats.tx_packets++; |
1234 | ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; | 1269 | vif->dev->stats.tx_bytes += skb->len; |
1235 | } | 1270 | } |
1236 | 1271 | ||
1237 | IPCB(skb)->flags |= IPSKB_FORWARDED; | 1272 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
@@ -1487,8 +1522,8 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1487 | skb->pkt_type = PACKET_HOST; | 1522 | skb->pkt_type = PACKET_HOST; |
1488 | dst_release(skb->dst); | 1523 | dst_release(skb->dst); |
1489 | skb->dst = NULL; | 1524 | skb->dst = NULL; |
1490 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; | 1525 | reg_dev->stats.rx_bytes += skb->len; |
1491 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; | 1526 | reg_dev->stats.rx_packets++; |
1492 | nf_reset(skb); | 1527 | nf_reset(skb); |
1493 | netif_rx(skb); | 1528 | netif_rx(skb); |
1494 | dev_put(reg_dev); | 1529 | dev_put(reg_dev); |
@@ -1542,8 +1577,8 @@ static int pim_rcv(struct sk_buff * skb) | |||
1542 | skb->ip_summed = 0; | 1577 | skb->ip_summed = 0; |
1543 | skb->pkt_type = PACKET_HOST; | 1578 | skb->pkt_type = PACKET_HOST; |
1544 | dst_release(skb->dst); | 1579 | dst_release(skb->dst); |
1545 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; | 1580 | reg_dev->stats.rx_bytes += skb->len; |
1546 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; | 1581 | reg_dev->stats.rx_packets++; |
1547 | skb->dst = NULL; | 1582 | skb->dst = NULL; |
1548 | nf_reset(skb); | 1583 | nf_reset(skb); |
1549 | netif_rx(skb); | 1584 | netif_rx(skb); |
@@ -1887,16 +1922,36 @@ static struct net_protocol pim_protocol = { | |||
1887 | * Setup for IP multicast routing | 1922 | * Setup for IP multicast routing |
1888 | */ | 1923 | */ |
1889 | 1924 | ||
1890 | void __init ip_mr_init(void) | 1925 | int __init ip_mr_init(void) |
1891 | { | 1926 | { |
1927 | int err; | ||
1928 | |||
1892 | mrt_cachep = kmem_cache_create("ip_mrt_cache", | 1929 | mrt_cachep = kmem_cache_create("ip_mrt_cache", |
1893 | sizeof(struct mfc_cache), | 1930 | sizeof(struct mfc_cache), |
1894 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 1931 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
1895 | NULL); | 1932 | NULL); |
1933 | if (!mrt_cachep) | ||
1934 | return -ENOMEM; | ||
1935 | |||
1896 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | 1936 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); |
1897 | register_netdevice_notifier(&ip_mr_notifier); | 1937 | err = register_netdevice_notifier(&ip_mr_notifier); |
1938 | if (err) | ||
1939 | goto reg_notif_fail; | ||
1898 | #ifdef CONFIG_PROC_FS | 1940 | #ifdef CONFIG_PROC_FS |
1899 | proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops); | 1941 | err = -ENOMEM; |
1900 | proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops); | 1942 | if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops)) |
1943 | goto proc_vif_fail; | ||
1944 | if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops)) | ||
1945 | goto proc_cache_fail; | ||
1901 | #endif | 1946 | #endif |
1947 | return 0; | ||
1948 | reg_notif_fail: | ||
1949 | kmem_cache_destroy(mrt_cachep); | ||
1950 | #ifdef CONFIG_PROC_FS | ||
1951 | proc_vif_fail: | ||
1952 | unregister_netdevice_notifier(&ip_mr_notifier); | ||
1953 | proc_cache_fail: | ||
1954 | proc_net_remove(&init_net, "ip_mr_vif"); | ||
1955 | #endif | ||
1956 | return err; | ||
1902 | } | 1957 | } |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 535abe0c45e7..1f1897a1a702 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_app.c: Application module support for IPVS | 2 | * ip_vs_app.c: Application module support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 65f1ba112752..f8bdae47a77f 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version: $Id: ip_vs_conn.c,v 1.31 2003/04/18 09:03:16 wensong Exp $ | ||
9 | * | ||
10 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 8 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
11 | * Peter Kese <peter.kese@ijs.si> | 9 | * Peter Kese <peter.kese@ijs.si> |
12 | * Julian Anastasov <ja@ssi.bg> | 10 | * Julian Anastasov <ja@ssi.bg> |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 963981a9d501..a7879eafc3b5 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version: $Id: ip_vs_core.c,v 1.34 2003/05/10 03:05:23 wensong Exp $ | ||
9 | * | ||
10 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 8 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
11 | * Peter Kese <peter.kese@ijs.si> | 9 | * Peter Kese <peter.kese@ijs.si> |
12 | * Julian Anastasov <ja@ssi.bg> | 10 | * Julian Anastasov <ja@ssi.bg> |
@@ -993,7 +991,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
993 | == sysctl_ip_vs_sync_threshold[0])) || | 991 | == sysctl_ip_vs_sync_threshold[0])) || |
994 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && | 992 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && |
995 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || | 993 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || |
996 | (cp->state == IP_VS_TCP_S_CLOSE))))) | 994 | (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || |
995 | (cp->state == IP_VS_TCP_S_TIME_WAIT))))) | ||
997 | ip_vs_sync_conn(cp); | 996 | ip_vs_sync_conn(cp); |
998 | cp->old_state = cp->state; | 997 | cp->old_state = cp->state; |
999 | 998 | ||
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 94c5767c8e01..9a5ace0b4dd6 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version: $Id: ip_vs_ctl.c,v 1.36 2003/06/08 09:31:19 wensong Exp $ | ||
9 | * | ||
10 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 8 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
11 | * Peter Kese <peter.kese@ijs.si> | 9 | * Peter Kese <peter.kese@ijs.si> |
12 | * Julian Anastasov <ja@ssi.bg> | 10 | * Julian Anastasov <ja@ssi.bg> |
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c index dcf5d46aaa5e..8afc1503ed20 100644 --- a/net/ipv4/ipvs/ip_vs_dh.c +++ b/net/ipv4/ipvs/ip_vs_dh.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Destination Hashing scheduling module | 2 | * IPVS: Destination Hashing scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_dh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@gnuchina.org> | 4 | * Authors: Wensong Zhang <wensong@gnuchina.org> |
7 | * | 5 | * |
8 | * Inspired by the consistent hashing scheduler patch from | 6 | * Inspired by the consistent hashing scheduler patch from |
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index dfa0d713c801..bc04eedd6dbb 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_est.c: simple rate estimator for IPVS | 2 | * ip_vs_est.c: simple rate estimator for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index 59aa166b7678..c1c758e4f733 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_ftp.c: IPVS ftp application module | 2 | * ip_vs_ftp.c: IPVS ftp application module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_ftp.c,v 1.13 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * Changes: | 6 | * Changes: |
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index 3888642706ad..0efa3db4b180 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Locality-Based Least-Connection scheduling module | 2 | * IPVS: Locality-Based Least-Connection scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_lblc.c,v 1.10 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@gnuchina.org> | 4 | * Authors: Wensong Zhang <wensong@gnuchina.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index daa260eb21cf..8e3bbeb45138 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Locality-Based Least-Connection with Replication scheduler | 2 | * IPVS: Locality-Based Least-Connection with Replication scheduler |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@gnuchina.org> | 4 | * Authors: Wensong Zhang <wensong@gnuchina.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c index d88fef90a641..ac9f08e065d5 100644 --- a/net/ipv4/ipvs/ip_vs_lc.c +++ b/net/ipv4/ipvs/ip_vs_lc.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Least-Connection Scheduling module | 2 | * IPVS: Least-Connection Scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_lc.c,v 1.10 2003/04/18 09:03:16 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c index bc2a9e5f2a7b..a46bf258d420 100644 --- a/net/ipv4/ipvs/ip_vs_nq.c +++ b/net/ipv4/ipvs/ip_vs_nq.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Never Queue scheduling module | 2 | * IPVS: Never Queue scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index 4b1c16cbb16b..876714f23d65 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_proto.c: transport protocol load balancing support for IPVS | 2 | * ip_vs_proto.c: transport protocol load balancing support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_proto.c,v 1.2 2003/04/18 09:03:16 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Julian Anastasov <ja@ssi.bg> | 5 | * Julian Anastasov <ja@ssi.bg> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c index 4bf835e1d86d..73e0ea87c1f5 100644 --- a/net/ipv4/ipvs/ip_vs_proto_ah.c +++ b/net/ipv4/ipvs/ip_vs_proto_ah.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS | 2 | * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_proto_ah.c,v 1.1 2003/07/04 15:04:37 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | 4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 |
7 | * Wensong Zhang <wensong@linuxvirtualserver.org> | 5 | * Wensong Zhang <wensong@linuxvirtualserver.org> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c index db6a6b7b1a0b..21d70c8ffa54 100644 --- a/net/ipv4/ipvs/ip_vs_proto_esp.c +++ b/net/ipv4/ipvs/ip_vs_proto_esp.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS | 2 | * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_proto_esp.c,v 1.1 2003/07/04 15:04:37 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | 4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 |
7 | * Wensong Zhang <wensong@linuxvirtualserver.org> | 5 | * Wensong Zhang <wensong@linuxvirtualserver.org> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index b83dc14b0a4d..d0ea467986a0 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_proto_tcp.c: TCP load balancing support for IPVS | 2 | * ip_vs_proto_tcp.c: TCP load balancing support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_proto_tcp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Julian Anastasov <ja@ssi.bg> | 5 | * Julian Anastasov <ja@ssi.bg> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 75771cb3cd6f..c6be5d56823f 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_proto_udp.c: UDP load balancing support for IPVS | 2 | * ip_vs_proto_udp.c: UDP load balancing support for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_proto_udp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Julian Anastasov <ja@ssi.bg> | 5 | * Julian Anastasov <ja@ssi.bg> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c index 433f8a947924..c8db12d39e61 100644 --- a/net/ipv4/ipvs/ip_vs_rr.c +++ b/net/ipv4/ipvs/ip_vs_rr.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Round-Robin Scheduling module | 2 | * IPVS: Round-Robin Scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_rr.c,v 1.9 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Peter Kese <peter.kese@ijs.si> | 5 | * Peter Kese <peter.kese@ijs.si> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c index 121a32b1b756..b64767309855 100644 --- a/net/ipv4/ipvs/ip_vs_sched.c +++ b/net/ipv4/ipvs/ip_vs_sched.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version: $Id: ip_vs_sched.c,v 1.13 2003/05/10 03:05:23 wensong Exp $ | ||
9 | * | ||
10 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 8 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
11 | * Peter Kese <peter.kese@ijs.si> | 9 | * Peter Kese <peter.kese@ijs.si> |
12 | * | 10 | * |
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c index dd7c128f9db3..2a7d31358181 100644 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ b/net/ipv4/ipvs/ip_vs_sed.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Shortest Expected Delay scheduling module | 2 | * IPVS: Shortest Expected Delay scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_sed.c,v 1.1 2003/05/10 03:06:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c index 1b25b00ef1e1..b8fdfac65001 100644 --- a/net/ipv4/ipvs/ip_vs_sh.c +++ b/net/ipv4/ipvs/ip_vs_sh.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Source Hashing scheduling module | 2 | * IPVS: Source Hashing scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_sh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@gnuchina.org> | 4 | * Authors: Wensong Zhang <wensong@gnuchina.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index eff54efe0351..45e9bd96c286 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version: $Id: ip_vs_sync.c,v 1.13 2003/06/08 09:31:19 wensong Exp $ | ||
9 | * | ||
10 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 8 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
11 | * | 9 | * |
12 | * ip_vs_sync: sync connection info from master load balancer to backups | 10 | * ip_vs_sync: sync connection info from master load balancer to backups |
@@ -29,10 +27,12 @@ | |||
29 | #include <linux/in.h> | 27 | #include <linux/in.h> |
30 | #include <linux/igmp.h> /* for ip_mc_join_group */ | 28 | #include <linux/igmp.h> /* for ip_mc_join_group */ |
31 | #include <linux/udp.h> | 29 | #include <linux/udp.h> |
30 | #include <linux/err.h> | ||
31 | #include <linux/kthread.h> | ||
32 | #include <linux/wait.h> | ||
32 | 33 | ||
33 | #include <net/ip.h> | 34 | #include <net/ip.h> |
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
35 | #include <asm/uaccess.h> /* for get_fs and set_fs */ | ||
36 | 36 | ||
37 | #include <net/ip_vs.h> | 37 | #include <net/ip_vs.h> |
38 | 38 | ||
@@ -68,8 +68,8 @@ struct ip_vs_sync_conn_options { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct ip_vs_sync_thread_data { | 70 | struct ip_vs_sync_thread_data { |
71 | struct completion *startup; | 71 | struct socket *sock; |
72 | int state; | 72 | char *buf; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) | 75 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) |
@@ -140,18 +140,19 @@ volatile int ip_vs_backup_syncid = 0; | |||
140 | char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; | 140 | char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; |
141 | char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; | 141 | char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; |
142 | 142 | ||
143 | /* sync daemon tasks */ | ||
144 | static struct task_struct *sync_master_thread; | ||
145 | static struct task_struct *sync_backup_thread; | ||
146 | |||
143 | /* multicast addr */ | 147 | /* multicast addr */ |
144 | static struct sockaddr_in mcast_addr; | 148 | static struct sockaddr_in mcast_addr = { |
149 | .sin_family = AF_INET, | ||
150 | .sin_port = __constant_htons(IP_VS_SYNC_PORT), | ||
151 | .sin_addr.s_addr = __constant_htonl(IP_VS_SYNC_GROUP), | ||
152 | }; | ||
145 | 153 | ||
146 | 154 | ||
147 | static inline void sb_queue_tail(struct ip_vs_sync_buff *sb) | 155 | static inline struct ip_vs_sync_buff *sb_dequeue(void) |
148 | { | ||
149 | spin_lock(&ip_vs_sync_lock); | ||
150 | list_add_tail(&sb->list, &ip_vs_sync_queue); | ||
151 | spin_unlock(&ip_vs_sync_lock); | ||
152 | } | ||
153 | |||
154 | static inline struct ip_vs_sync_buff * sb_dequeue(void) | ||
155 | { | 156 | { |
156 | struct ip_vs_sync_buff *sb; | 157 | struct ip_vs_sync_buff *sb; |
157 | 158 | ||
@@ -195,6 +196,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) | |||
195 | kfree(sb); | 196 | kfree(sb); |
196 | } | 197 | } |
197 | 198 | ||
199 | static inline void sb_queue_tail(struct ip_vs_sync_buff *sb) | ||
200 | { | ||
201 | spin_lock(&ip_vs_sync_lock); | ||
202 | if (ip_vs_sync_state & IP_VS_STATE_MASTER) | ||
203 | list_add_tail(&sb->list, &ip_vs_sync_queue); | ||
204 | else | ||
205 | ip_vs_sync_buff_release(sb); | ||
206 | spin_unlock(&ip_vs_sync_lock); | ||
207 | } | ||
208 | |||
198 | /* | 209 | /* |
199 | * Get the current sync buffer if it has been created for more | 210 | * Get the current sync buffer if it has been created for more |
200 | * than the specified time or the specified time is zero. | 211 | * than the specified time or the specified time is zero. |
@@ -574,14 +585,17 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) | |||
574 | static struct socket * make_send_sock(void) | 585 | static struct socket * make_send_sock(void) |
575 | { | 586 | { |
576 | struct socket *sock; | 587 | struct socket *sock; |
588 | int result; | ||
577 | 589 | ||
578 | /* First create a socket */ | 590 | /* First create a socket */ |
579 | if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock) < 0) { | 591 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
592 | if (result < 0) { | ||
580 | IP_VS_ERR("Error during creation of socket; terminating\n"); | 593 | IP_VS_ERR("Error during creation of socket; terminating\n"); |
581 | return NULL; | 594 | return ERR_PTR(result); |
582 | } | 595 | } |
583 | 596 | ||
584 | if (set_mcast_if(sock->sk, ip_vs_master_mcast_ifn) < 0) { | 597 | result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn); |
598 | if (result < 0) { | ||
585 | IP_VS_ERR("Error setting outbound mcast interface\n"); | 599 | IP_VS_ERR("Error setting outbound mcast interface\n"); |
586 | goto error; | 600 | goto error; |
587 | } | 601 | } |
@@ -589,14 +603,15 @@ static struct socket * make_send_sock(void) | |||
589 | set_mcast_loop(sock->sk, 0); | 603 | set_mcast_loop(sock->sk, 0); |
590 | set_mcast_ttl(sock->sk, 1); | 604 | set_mcast_ttl(sock->sk, 1); |
591 | 605 | ||
592 | if (bind_mcastif_addr(sock, ip_vs_master_mcast_ifn) < 0) { | 606 | result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn); |
607 | if (result < 0) { | ||
593 | IP_VS_ERR("Error binding address of the mcast interface\n"); | 608 | IP_VS_ERR("Error binding address of the mcast interface\n"); |
594 | goto error; | 609 | goto error; |
595 | } | 610 | } |
596 | 611 | ||
597 | if (sock->ops->connect(sock, | 612 | result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, |
598 | (struct sockaddr*)&mcast_addr, | 613 | sizeof(struct sockaddr), 0); |
599 | sizeof(struct sockaddr), 0) < 0) { | 614 | if (result < 0) { |
600 | IP_VS_ERR("Error connecting to the multicast addr\n"); | 615 | IP_VS_ERR("Error connecting to the multicast addr\n"); |
601 | goto error; | 616 | goto error; |
602 | } | 617 | } |
@@ -605,7 +620,7 @@ static struct socket * make_send_sock(void) | |||
605 | 620 | ||
606 | error: | 621 | error: |
607 | sock_release(sock); | 622 | sock_release(sock); |
608 | return NULL; | 623 | return ERR_PTR(result); |
609 | } | 624 | } |
610 | 625 | ||
611 | 626 | ||
@@ -615,27 +630,30 @@ static struct socket * make_send_sock(void) | |||
615 | static struct socket * make_receive_sock(void) | 630 | static struct socket * make_receive_sock(void) |
616 | { | 631 | { |
617 | struct socket *sock; | 632 | struct socket *sock; |
633 | int result; | ||
618 | 634 | ||
619 | /* First create a socket */ | 635 | /* First create a socket */ |
620 | if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock) < 0) { | 636 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
637 | if (result < 0) { | ||
621 | IP_VS_ERR("Error during creation of socket; terminating\n"); | 638 | IP_VS_ERR("Error during creation of socket; terminating\n"); |
622 | return NULL; | 639 | return ERR_PTR(result); |
623 | } | 640 | } |
624 | 641 | ||
625 | /* it is equivalent to the REUSEADDR option in user-space */ | 642 | /* it is equivalent to the REUSEADDR option in user-space */ |
626 | sock->sk->sk_reuse = 1; | 643 | sock->sk->sk_reuse = 1; |
627 | 644 | ||
628 | if (sock->ops->bind(sock, | 645 | result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, |
629 | (struct sockaddr*)&mcast_addr, | 646 | sizeof(struct sockaddr)); |
630 | sizeof(struct sockaddr)) < 0) { | 647 | if (result < 0) { |
631 | IP_VS_ERR("Error binding to the multicast addr\n"); | 648 | IP_VS_ERR("Error binding to the multicast addr\n"); |
632 | goto error; | 649 | goto error; |
633 | } | 650 | } |
634 | 651 | ||
635 | /* join the multicast group */ | 652 | /* join the multicast group */ |
636 | if (join_mcast_group(sock->sk, | 653 | result = join_mcast_group(sock->sk, |
637 | (struct in_addr*)&mcast_addr.sin_addr, | 654 | (struct in_addr *) &mcast_addr.sin_addr, |
638 | ip_vs_backup_mcast_ifn) < 0) { | 655 | ip_vs_backup_mcast_ifn); |
656 | if (result < 0) { | ||
639 | IP_VS_ERR("Error joining to the multicast group\n"); | 657 | IP_VS_ERR("Error joining to the multicast group\n"); |
640 | goto error; | 658 | goto error; |
641 | } | 659 | } |
@@ -644,7 +662,7 @@ static struct socket * make_receive_sock(void) | |||
644 | 662 | ||
645 | error: | 663 | error: |
646 | sock_release(sock); | 664 | sock_release(sock); |
647 | return NULL; | 665 | return ERR_PTR(result); |
648 | } | 666 | } |
649 | 667 | ||
650 | 668 | ||
@@ -702,44 +720,29 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) | |||
702 | } | 720 | } |
703 | 721 | ||
704 | 722 | ||
705 | static DECLARE_WAIT_QUEUE_HEAD(sync_wait); | 723 | static int sync_thread_master(void *data) |
706 | static pid_t sync_master_pid = 0; | ||
707 | static pid_t sync_backup_pid = 0; | ||
708 | |||
709 | static DECLARE_WAIT_QUEUE_HEAD(stop_sync_wait); | ||
710 | static int stop_master_sync = 0; | ||
711 | static int stop_backup_sync = 0; | ||
712 | |||
713 | static void sync_master_loop(void) | ||
714 | { | 724 | { |
715 | struct socket *sock; | 725 | struct ip_vs_sync_thread_data *tinfo = data; |
716 | struct ip_vs_sync_buff *sb; | 726 | struct ip_vs_sync_buff *sb; |
717 | 727 | ||
718 | /* create the sending multicast socket */ | ||
719 | sock = make_send_sock(); | ||
720 | if (!sock) | ||
721 | return; | ||
722 | |||
723 | IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, " | 728 | IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, " |
724 | "syncid = %d\n", | 729 | "syncid = %d\n", |
725 | ip_vs_master_mcast_ifn, ip_vs_master_syncid); | 730 | ip_vs_master_mcast_ifn, ip_vs_master_syncid); |
726 | 731 | ||
727 | for (;;) { | 732 | while (!kthread_should_stop()) { |
728 | while ((sb=sb_dequeue())) { | 733 | while ((sb = sb_dequeue())) { |
729 | ip_vs_send_sync_msg(sock, sb->mesg); | 734 | ip_vs_send_sync_msg(tinfo->sock, sb->mesg); |
730 | ip_vs_sync_buff_release(sb); | 735 | ip_vs_sync_buff_release(sb); |
731 | } | 736 | } |
732 | 737 | ||
733 | /* check if entries stay in curr_sb for 2 seconds */ | 738 | /* check if entries stay in curr_sb for 2 seconds */ |
734 | if ((sb = get_curr_sync_buff(2*HZ))) { | 739 | sb = get_curr_sync_buff(2 * HZ); |
735 | ip_vs_send_sync_msg(sock, sb->mesg); | 740 | if (sb) { |
741 | ip_vs_send_sync_msg(tinfo->sock, sb->mesg); | ||
736 | ip_vs_sync_buff_release(sb); | 742 | ip_vs_sync_buff_release(sb); |
737 | } | 743 | } |
738 | 744 | ||
739 | if (stop_master_sync) | 745 | schedule_timeout_interruptible(HZ); |
740 | break; | ||
741 | |||
742 | msleep_interruptible(1000); | ||
743 | } | 746 | } |
744 | 747 | ||
745 | /* clean up the sync_buff queue */ | 748 | /* clean up the sync_buff queue */ |
@@ -753,267 +756,175 @@ static void sync_master_loop(void) | |||
753 | } | 756 | } |
754 | 757 | ||
755 | /* release the sending multicast socket */ | 758 | /* release the sending multicast socket */ |
756 | sock_release(sock); | 759 | sock_release(tinfo->sock); |
760 | kfree(tinfo); | ||
761 | |||
762 | return 0; | ||
757 | } | 763 | } |
758 | 764 | ||
759 | 765 | ||
760 | static void sync_backup_loop(void) | 766 | static int sync_thread_backup(void *data) |
761 | { | 767 | { |
762 | struct socket *sock; | 768 | struct ip_vs_sync_thread_data *tinfo = data; |
763 | char *buf; | ||
764 | int len; | 769 | int len; |
765 | 770 | ||
766 | if (!(buf = kmalloc(sync_recv_mesg_maxlen, GFP_ATOMIC))) { | ||
767 | IP_VS_ERR("sync_backup_loop: kmalloc error\n"); | ||
768 | return; | ||
769 | } | ||
770 | |||
771 | /* create the receiving multicast socket */ | ||
772 | sock = make_receive_sock(); | ||
773 | if (!sock) | ||
774 | goto out; | ||
775 | |||
776 | IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, " | 771 | IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, " |
777 | "syncid = %d\n", | 772 | "syncid = %d\n", |
778 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); | 773 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); |
779 | 774 | ||
780 | for (;;) { | 775 | while (!kthread_should_stop()) { |
781 | /* do you have data now? */ | 776 | wait_event_interruptible(*tinfo->sock->sk->sk_sleep, |
782 | while (!skb_queue_empty(&(sock->sk->sk_receive_queue))) { | 777 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) |
783 | if ((len = | 778 | || kthread_should_stop()); |
784 | ip_vs_receive(sock, buf, | 779 | |
785 | sync_recv_mesg_maxlen)) <= 0) { | 780 | /* do we have data now? */ |
781 | while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { | ||
782 | len = ip_vs_receive(tinfo->sock, tinfo->buf, | ||
783 | sync_recv_mesg_maxlen); | ||
784 | if (len <= 0) { | ||
786 | IP_VS_ERR("receiving message error\n"); | 785 | IP_VS_ERR("receiving message error\n"); |
787 | break; | 786 | break; |
788 | } | 787 | } |
789 | /* disable bottom half, because it accessed the data | 788 | |
789 | /* disable bottom half, because it accesses the data | ||
790 | shared by softirq while getting/creating conns */ | 790 | shared by softirq while getting/creating conns */ |
791 | local_bh_disable(); | 791 | local_bh_disable(); |
792 | ip_vs_process_message(buf, len); | 792 | ip_vs_process_message(tinfo->buf, len); |
793 | local_bh_enable(); | 793 | local_bh_enable(); |
794 | } | 794 | } |
795 | |||
796 | if (stop_backup_sync) | ||
797 | break; | ||
798 | |||
799 | msleep_interruptible(1000); | ||
800 | } | 795 | } |
801 | 796 | ||
802 | /* release the sending multicast socket */ | 797 | /* release the sending multicast socket */ |
803 | sock_release(sock); | 798 | sock_release(tinfo->sock); |
799 | kfree(tinfo->buf); | ||
800 | kfree(tinfo); | ||
804 | 801 | ||
805 | out: | 802 | return 0; |
806 | kfree(buf); | ||
807 | } | 803 | } |
808 | 804 | ||
809 | 805 | ||
810 | static void set_sync_pid(int sync_state, pid_t sync_pid) | 806 | int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) |
811 | { | ||
812 | if (sync_state == IP_VS_STATE_MASTER) | ||
813 | sync_master_pid = sync_pid; | ||
814 | else if (sync_state == IP_VS_STATE_BACKUP) | ||
815 | sync_backup_pid = sync_pid; | ||
816 | } | ||
817 | |||
818 | static void set_stop_sync(int sync_state, int set) | ||
819 | { | 807 | { |
820 | if (sync_state == IP_VS_STATE_MASTER) | 808 | struct ip_vs_sync_thread_data *tinfo; |
821 | stop_master_sync = set; | 809 | struct task_struct **realtask, *task; |
822 | else if (sync_state == IP_VS_STATE_BACKUP) | 810 | struct socket *sock; |
823 | stop_backup_sync = set; | 811 | char *name, *buf = NULL; |
824 | else { | 812 | int (*threadfn)(void *data); |
825 | stop_master_sync = set; | 813 | int result = -ENOMEM; |
826 | stop_backup_sync = set; | ||
827 | } | ||
828 | } | ||
829 | 814 | ||
830 | static int sync_thread(void *startup) | 815 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); |
831 | { | 816 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", |
832 | DECLARE_WAITQUEUE(wait, current); | 817 | sizeof(struct ip_vs_sync_conn)); |
833 | mm_segment_t oldmm; | ||
834 | int state; | ||
835 | const char *name; | ||
836 | struct ip_vs_sync_thread_data *tinfo = startup; | ||
837 | 818 | ||
838 | /* increase the module use count */ | 819 | if (state == IP_VS_STATE_MASTER) { |
839 | ip_vs_use_count_inc(); | 820 | if (sync_master_thread) |
821 | return -EEXIST; | ||
840 | 822 | ||
841 | if (ip_vs_sync_state & IP_VS_STATE_MASTER && !sync_master_pid) { | 823 | strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, |
842 | state = IP_VS_STATE_MASTER; | 824 | sizeof(ip_vs_master_mcast_ifn)); |
825 | ip_vs_master_syncid = syncid; | ||
826 | realtask = &sync_master_thread; | ||
843 | name = "ipvs_syncmaster"; | 827 | name = "ipvs_syncmaster"; |
844 | } else if (ip_vs_sync_state & IP_VS_STATE_BACKUP && !sync_backup_pid) { | 828 | threadfn = sync_thread_master; |
845 | state = IP_VS_STATE_BACKUP; | 829 | sock = make_send_sock(); |
830 | } else if (state == IP_VS_STATE_BACKUP) { | ||
831 | if (sync_backup_thread) | ||
832 | return -EEXIST; | ||
833 | |||
834 | strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, | ||
835 | sizeof(ip_vs_backup_mcast_ifn)); | ||
836 | ip_vs_backup_syncid = syncid; | ||
837 | realtask = &sync_backup_thread; | ||
846 | name = "ipvs_syncbackup"; | 838 | name = "ipvs_syncbackup"; |
839 | threadfn = sync_thread_backup; | ||
840 | sock = make_receive_sock(); | ||
847 | } else { | 841 | } else { |
848 | IP_VS_BUG(); | ||
849 | ip_vs_use_count_dec(); | ||
850 | return -EINVAL; | 842 | return -EINVAL; |
851 | } | 843 | } |
852 | 844 | ||
853 | daemonize(name); | 845 | if (IS_ERR(sock)) { |
854 | 846 | result = PTR_ERR(sock); | |
855 | oldmm = get_fs(); | 847 | goto out; |
856 | set_fs(KERNEL_DS); | 848 | } |
857 | |||
858 | /* Block all signals */ | ||
859 | spin_lock_irq(¤t->sighand->siglock); | ||
860 | siginitsetinv(¤t->blocked, 0); | ||
861 | recalc_sigpending(); | ||
862 | spin_unlock_irq(¤t->sighand->siglock); | ||
863 | 849 | ||
864 | /* set the maximum length of sync message */ | ||
865 | set_sync_mesg_maxlen(state); | 850 | set_sync_mesg_maxlen(state); |
851 | if (state == IP_VS_STATE_BACKUP) { | ||
852 | buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL); | ||
853 | if (!buf) | ||
854 | goto outsocket; | ||
855 | } | ||
866 | 856 | ||
867 | /* set up multicast address */ | 857 | tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); |
868 | mcast_addr.sin_family = AF_INET; | 858 | if (!tinfo) |
869 | mcast_addr.sin_port = htons(IP_VS_SYNC_PORT); | 859 | goto outbuf; |
870 | mcast_addr.sin_addr.s_addr = htonl(IP_VS_SYNC_GROUP); | ||
871 | |||
872 | add_wait_queue(&sync_wait, &wait); | ||
873 | |||
874 | set_sync_pid(state, task_pid_nr(current)); | ||
875 | complete(tinfo->startup); | ||
876 | |||
877 | /* | ||
878 | * once we call the completion queue above, we should | ||
879 | * null out that reference, since its allocated on the | ||
880 | * stack of the creating kernel thread | ||
881 | */ | ||
882 | tinfo->startup = NULL; | ||
883 | |||
884 | /* processing master/backup loop here */ | ||
885 | if (state == IP_VS_STATE_MASTER) | ||
886 | sync_master_loop(); | ||
887 | else if (state == IP_VS_STATE_BACKUP) | ||
888 | sync_backup_loop(); | ||
889 | else IP_VS_BUG(); | ||
890 | |||
891 | remove_wait_queue(&sync_wait, &wait); | ||
892 | |||
893 | /* thread exits */ | ||
894 | |||
895 | /* | ||
896 | * If we weren't explicitly stopped, then we | ||
897 | * exited in error, and should undo our state | ||
898 | */ | ||
899 | if ((!stop_master_sync) && (!stop_backup_sync)) | ||
900 | ip_vs_sync_state -= tinfo->state; | ||
901 | 860 | ||
902 | set_sync_pid(state, 0); | 861 | tinfo->sock = sock; |
903 | IP_VS_INFO("sync thread stopped!\n"); | 862 | tinfo->buf = buf; |
904 | 863 | ||
905 | set_fs(oldmm); | 864 | task = kthread_run(threadfn, tinfo, name); |
865 | if (IS_ERR(task)) { | ||
866 | result = PTR_ERR(task); | ||
867 | goto outtinfo; | ||
868 | } | ||
906 | 869 | ||
907 | /* decrease the module use count */ | 870 | /* mark as active */ |
908 | ip_vs_use_count_dec(); | 871 | *realtask = task; |
872 | ip_vs_sync_state |= state; | ||
909 | 873 | ||
910 | set_stop_sync(state, 0); | 874 | /* increase the module use count */ |
911 | wake_up(&stop_sync_wait); | 875 | ip_vs_use_count_inc(); |
912 | 876 | ||
913 | /* | ||
914 | * we need to free the structure that was allocated | ||
915 | * for us in start_sync_thread | ||
916 | */ | ||
917 | kfree(tinfo); | ||
918 | return 0; | 877 | return 0; |
919 | } | ||
920 | |||
921 | |||
922 | static int fork_sync_thread(void *startup) | ||
923 | { | ||
924 | pid_t pid; | ||
925 | |||
926 | /* fork the sync thread here, then the parent process of the | ||
927 | sync thread is the init process after this thread exits. */ | ||
928 | repeat: | ||
929 | if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) { | ||
930 | IP_VS_ERR("could not create sync_thread due to %d... " | ||
931 | "retrying.\n", pid); | ||
932 | msleep_interruptible(1000); | ||
933 | goto repeat; | ||
934 | } | ||
935 | 878 | ||
936 | return 0; | 879 | outtinfo: |
880 | kfree(tinfo); | ||
881 | outbuf: | ||
882 | kfree(buf); | ||
883 | outsocket: | ||
884 | sock_release(sock); | ||
885 | out: | ||
886 | return result; | ||
937 | } | 887 | } |
938 | 888 | ||
939 | 889 | ||
940 | int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) | 890 | int stop_sync_thread(int state) |
941 | { | 891 | { |
942 | DECLARE_COMPLETION_ONSTACK(startup); | ||
943 | pid_t pid; | ||
944 | struct ip_vs_sync_thread_data *tinfo; | ||
945 | |||
946 | if ((state == IP_VS_STATE_MASTER && sync_master_pid) || | ||
947 | (state == IP_VS_STATE_BACKUP && sync_backup_pid)) | ||
948 | return -EEXIST; | ||
949 | |||
950 | /* | ||
951 | * Note that tinfo will be freed in sync_thread on exit | ||
952 | */ | ||
953 | tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL); | ||
954 | if (!tinfo) | ||
955 | return -ENOMEM; | ||
956 | |||
957 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); | 892 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); |
958 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", | ||
959 | sizeof(struct ip_vs_sync_conn)); | ||
960 | 893 | ||
961 | ip_vs_sync_state |= state; | ||
962 | if (state == IP_VS_STATE_MASTER) { | 894 | if (state == IP_VS_STATE_MASTER) { |
963 | strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, | 895 | if (!sync_master_thread) |
964 | sizeof(ip_vs_master_mcast_ifn)); | 896 | return -ESRCH; |
965 | ip_vs_master_syncid = syncid; | ||
966 | } else { | ||
967 | strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, | ||
968 | sizeof(ip_vs_backup_mcast_ifn)); | ||
969 | ip_vs_backup_syncid = syncid; | ||
970 | } | ||
971 | |||
972 | tinfo->state = state; | ||
973 | tinfo->startup = &startup; | ||
974 | |||
975 | repeat: | ||
976 | if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) { | ||
977 | IP_VS_ERR("could not create fork_sync_thread due to %d... " | ||
978 | "retrying.\n", pid); | ||
979 | msleep_interruptible(1000); | ||
980 | goto repeat; | ||
981 | } | ||
982 | |||
983 | wait_for_completion(&startup); | ||
984 | |||
985 | return 0; | ||
986 | } | ||
987 | 897 | ||
898 | IP_VS_INFO("stopping master sync thread %d ...\n", | ||
899 | task_pid_nr(sync_master_thread)); | ||
988 | 900 | ||
989 | int stop_sync_thread(int state) | 901 | /* |
990 | { | 902 | * The lock synchronizes with sb_queue_tail(), so that we don't |
991 | DECLARE_WAITQUEUE(wait, current); | 903 | * add sync buffers to the queue, when we are already in |
904 | * progress of stopping the master sync daemon. | ||
905 | */ | ||
992 | 906 | ||
993 | if ((state == IP_VS_STATE_MASTER && !sync_master_pid) || | 907 | spin_lock(&ip_vs_sync_lock); |
994 | (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) | 908 | ip_vs_sync_state &= ~IP_VS_STATE_MASTER; |
995 | return -ESRCH; | 909 | spin_unlock(&ip_vs_sync_lock); |
910 | kthread_stop(sync_master_thread); | ||
911 | sync_master_thread = NULL; | ||
912 | } else if (state == IP_VS_STATE_BACKUP) { | ||
913 | if (!sync_backup_thread) | ||
914 | return -ESRCH; | ||
915 | |||
916 | IP_VS_INFO("stopping backup sync thread %d ...\n", | ||
917 | task_pid_nr(sync_backup_thread)); | ||
918 | |||
919 | ip_vs_sync_state &= ~IP_VS_STATE_BACKUP; | ||
920 | kthread_stop(sync_backup_thread); | ||
921 | sync_backup_thread = NULL; | ||
922 | } else { | ||
923 | return -EINVAL; | ||
924 | } | ||
996 | 925 | ||
997 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); | 926 | /* decrease the module use count */ |
998 | IP_VS_INFO("stopping sync thread %d ...\n", | 927 | ip_vs_use_count_dec(); |
999 | (state == IP_VS_STATE_MASTER) ? | ||
1000 | sync_master_pid : sync_backup_pid); | ||
1001 | |||
1002 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
1003 | add_wait_queue(&stop_sync_wait, &wait); | ||
1004 | set_stop_sync(state, 1); | ||
1005 | ip_vs_sync_state -= state; | ||
1006 | wake_up(&sync_wait); | ||
1007 | schedule(); | ||
1008 | __set_current_state(TASK_RUNNING); | ||
1009 | remove_wait_queue(&stop_sync_wait, &wait); | ||
1010 | |||
1011 | /* Note: no need to reap the sync thread, because its parent | ||
1012 | process is the init process */ | ||
1013 | |||
1014 | if ((state == IP_VS_STATE_MASTER && stop_master_sync) || | ||
1015 | (state == IP_VS_STATE_BACKUP && stop_backup_sync)) | ||
1016 | IP_VS_BUG(); | ||
1017 | 928 | ||
1018 | return 0; | 929 | return 0; |
1019 | } | 930 | } |
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c index 8a9d913261d8..772c3cb4eca1 100644 --- a/net/ipv4/ipvs/ip_vs_wlc.c +++ b/net/ipv4/ipvs/ip_vs_wlc.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Weighted Least-Connection Scheduling module | 2 | * IPVS: Weighted Least-Connection Scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Peter Kese <peter.kese@ijs.si> | 5 | * Peter Kese <peter.kese@ijs.si> |
8 | * | 6 | * |
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c index 85c680add6df..1d6932d7dc97 100644 --- a/net/ipv4/ipvs/ip_vs_wrr.c +++ b/net/ipv4/ipvs/ip_vs_wrr.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * IPVS: Weighted Round-Robin Scheduling module | 2 | * IPVS: Weighted Round-Robin Scheduling module |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_wrr.c,v 1.12 2002/09/15 08:14:08 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index f63006caea03..9892d4aca42e 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * ip_vs_xmit.c: various packet transmitters for IPVS | 2 | * ip_vs_xmit.c: various packet transmitters for IPVS |
3 | * | 3 | * |
4 | * Version: $Id: ip_vs_xmit.c,v 1.2 2002/11/30 01:50:35 wensong Exp $ | ||
5 | * | ||
6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
7 | * Julian Anastasov <ja@ssi.bg> | 5 | * Julian Anastasov <ja@ssi.bg> |
8 | * | 6 | * |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 2767841a8cef..f23e60c93ef9 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -213,8 +213,7 @@ config IP_NF_TARGET_NETMAP | |||
213 | help | 213 | help |
214 | NETMAP is an implementation of static 1:1 NAT mapping of network | 214 | NETMAP is an implementation of static 1:1 NAT mapping of network |
215 | addresses. It maps the network address part, while keeping the host | 215 | addresses. It maps the network address part, while keeping the host |
216 | address part intact. It is similar to Fast NAT, except that | 216 | address part intact. |
217 | Netfilter's connection tracking doesn't work well with Fast NAT. | ||
218 | 217 | ||
219 | To compile it as a module, choose M here. If unsure, say N. | 218 | To compile it as a module, choose M here. If unsure, say N. |
220 | 219 | ||
@@ -365,6 +364,18 @@ config IP_NF_RAW | |||
365 | If you want to compile it as a module, say M here and read | 364 | If you want to compile it as a module, say M here and read |
366 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. | 365 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. |
367 | 366 | ||
367 | # security table for MAC policy | ||
368 | config IP_NF_SECURITY | ||
369 | tristate "Security table" | ||
370 | depends on IP_NF_IPTABLES | ||
371 | depends on SECURITY | ||
372 | default m if NETFILTER_ADVANCED=n | ||
373 | help | ||
374 | This option adds a `security' table to iptables, for use | ||
375 | with Mandatory Access Control (MAC) policy. | ||
376 | |||
377 | If unsure, say N. | ||
378 | |||
368 | # ARP tables | 379 | # ARP tables |
369 | config IP_NF_ARPTABLES | 380 | config IP_NF_ARPTABLES |
370 | tristate "ARP tables support" | 381 | tristate "ARP tables support" |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index d9b92fbf5579..3f31291f37ce 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
@@ -42,6 +42,7 @@ obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o | |||
42 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o | 42 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o |
43 | obj-$(CONFIG_NF_NAT) += iptable_nat.o | 43 | obj-$(CONFIG_NF_NAT) += iptable_nat.o |
44 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o | 44 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o |
45 | obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o | ||
45 | 46 | ||
46 | # matches | 47 | # matches |
47 | obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o | 48 | obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 26a37cedcf2e..432ce9d1c11c 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -156,7 +156,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
156 | case IPQ_COPY_META: | 156 | case IPQ_COPY_META: |
157 | case IPQ_COPY_NONE: | 157 | case IPQ_COPY_NONE: |
158 | size = NLMSG_SPACE(sizeof(*pmsg)); | 158 | size = NLMSG_SPACE(sizeof(*pmsg)); |
159 | data_len = 0; | ||
160 | break; | 159 | break; |
161 | 160 | ||
162 | case IPQ_COPY_PACKET: | 161 | case IPQ_COPY_PACKET: |
@@ -224,8 +223,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
224 | return skb; | 223 | return skb; |
225 | 224 | ||
226 | nlmsg_failure: | 225 | nlmsg_failure: |
227 | if (skb) | ||
228 | kfree_skb(skb); | ||
229 | *errp = -EINVAL; | 226 | *errp = -EINVAL; |
230 | printk(KERN_ERR "ip_queue: error creating packet message\n"); | 227 | printk(KERN_ERR "ip_queue: error creating packet message\n"); |
231 | return NULL; | 228 | return NULL; |
@@ -480,7 +477,7 @@ ipq_rcv_dev_event(struct notifier_block *this, | |||
480 | { | 477 | { |
481 | struct net_device *dev = ptr; | 478 | struct net_device *dev = ptr; |
482 | 479 | ||
483 | if (dev_net(dev) != &init_net) | 480 | if (!net_eq(dev_net(dev), &init_net)) |
484 | return NOTIFY_DONE; | 481 | return NOTIFY_DONE; |
485 | 482 | ||
486 | /* Drop any packets associated with the downed device */ | 483 | /* Drop any packets associated with the downed device */ |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 84c26dd27d81..0841aefaa503 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -120,7 +120,7 @@ static int masq_device_event(struct notifier_block *this, | |||
120 | { | 120 | { |
121 | const struct net_device *dev = ptr; | 121 | const struct net_device *dev = ptr; |
122 | 122 | ||
123 | if (dev_net(dev) != &init_net) | 123 | if (!net_eq(dev_net(dev), &init_net)) |
124 | return NOTIFY_DONE; | 124 | return NOTIFY_DONE; |
125 | 125 | ||
126 | if (event == NETDEV_DOWN) { | 126 | if (event == NETDEV_DOWN) { |
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c new file mode 100644 index 000000000000..2b472ac2263a --- /dev/null +++ b/net/ipv4/netfilter/iptable_security.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * "security" table | ||
3 | * | ||
4 | * This is for use by Mandatory Access Control (MAC) security models, | ||
5 | * which need to be able to manage security policy in separate context | ||
6 | * to DAC. | ||
7 | * | ||
8 | * Based on iptable_mangle.c | ||
9 | * | ||
10 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | ||
11 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> | ||
12 | * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
20 | #include <net/ip.h> | ||
21 | |||
22 | MODULE_LICENSE("GPL"); | ||
23 | MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); | ||
24 | MODULE_DESCRIPTION("iptables security table, for MAC rules"); | ||
25 | |||
26 | #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ | ||
27 | (1 << NF_INET_FORWARD) | \ | ||
28 | (1 << NF_INET_LOCAL_OUT) | ||
29 | |||
30 | static struct | ||
31 | { | ||
32 | struct ipt_replace repl; | ||
33 | struct ipt_standard entries[3]; | ||
34 | struct ipt_error term; | ||
35 | } initial_table __initdata = { | ||
36 | .repl = { | ||
37 | .name = "security", | ||
38 | .valid_hooks = SECURITY_VALID_HOOKS, | ||
39 | .num_entries = 4, | ||
40 | .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), | ||
41 | .hook_entry = { | ||
42 | [NF_INET_LOCAL_IN] = 0, | ||
43 | [NF_INET_FORWARD] = sizeof(struct ipt_standard), | ||
44 | [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, | ||
45 | }, | ||
46 | .underflow = { | ||
47 | [NF_INET_LOCAL_IN] = 0, | ||
48 | [NF_INET_FORWARD] = sizeof(struct ipt_standard), | ||
49 | [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, | ||
50 | }, | ||
51 | }, | ||
52 | .entries = { | ||
53 | IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ | ||
54 | IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ | ||
55 | IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ | ||
56 | }, | ||
57 | .term = IPT_ERROR_INIT, /* ERROR */ | ||
58 | }; | ||
59 | |||
60 | static struct xt_table security_table = { | ||
61 | .name = "security", | ||
62 | .valid_hooks = SECURITY_VALID_HOOKS, | ||
63 | .lock = __RW_LOCK_UNLOCKED(security_table.lock), | ||
64 | .me = THIS_MODULE, | ||
65 | .af = AF_INET, | ||
66 | }; | ||
67 | |||
68 | static unsigned int | ||
69 | ipt_local_in_hook(unsigned int hook, | ||
70 | struct sk_buff *skb, | ||
71 | const struct net_device *in, | ||
72 | const struct net_device *out, | ||
73 | int (*okfn)(struct sk_buff *)) | ||
74 | { | ||
75 | return ipt_do_table(skb, hook, in, out, | ||
76 | nf_local_in_net(in, out)->ipv4.iptable_security); | ||
77 | } | ||
78 | |||
79 | static unsigned int | ||
80 | ipt_forward_hook(unsigned int hook, | ||
81 | struct sk_buff *skb, | ||
82 | const struct net_device *in, | ||
83 | const struct net_device *out, | ||
84 | int (*okfn)(struct sk_buff *)) | ||
85 | { | ||
86 | return ipt_do_table(skb, hook, in, out, | ||
87 | nf_forward_net(in, out)->ipv4.iptable_security); | ||
88 | } | ||
89 | |||
90 | static unsigned int | ||
91 | ipt_local_out_hook(unsigned int hook, | ||
92 | struct sk_buff *skb, | ||
93 | const struct net_device *in, | ||
94 | const struct net_device *out, | ||
95 | int (*okfn)(struct sk_buff *)) | ||
96 | { | ||
97 | /* Somebody is playing with raw sockets. */ | ||
98 | if (skb->len < sizeof(struct iphdr) | ||
99 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { | ||
100 | if (net_ratelimit()) | ||
101 | printk(KERN_INFO "iptable_security: ignoring short " | ||
102 | "SOCK_RAW packet.\n"); | ||
103 | return NF_ACCEPT; | ||
104 | } | ||
105 | return ipt_do_table(skb, hook, in, out, | ||
106 | nf_local_out_net(in, out)->ipv4.iptable_security); | ||
107 | } | ||
108 | |||
109 | static struct nf_hook_ops ipt_ops[] __read_mostly = { | ||
110 | { | ||
111 | .hook = ipt_local_in_hook, | ||
112 | .owner = THIS_MODULE, | ||
113 | .pf = PF_INET, | ||
114 | .hooknum = NF_INET_LOCAL_IN, | ||
115 | .priority = NF_IP_PRI_SECURITY, | ||
116 | }, | ||
117 | { | ||
118 | .hook = ipt_forward_hook, | ||
119 | .owner = THIS_MODULE, | ||
120 | .pf = PF_INET, | ||
121 | .hooknum = NF_INET_FORWARD, | ||
122 | .priority = NF_IP_PRI_SECURITY, | ||
123 | }, | ||
124 | { | ||
125 | .hook = ipt_local_out_hook, | ||
126 | .owner = THIS_MODULE, | ||
127 | .pf = PF_INET, | ||
128 | .hooknum = NF_INET_LOCAL_OUT, | ||
129 | .priority = NF_IP_PRI_SECURITY, | ||
130 | }, | ||
131 | }; | ||
132 | |||
133 | static int __net_init iptable_security_net_init(struct net *net) | ||
134 | { | ||
135 | net->ipv4.iptable_security = | ||
136 | ipt_register_table(net, &security_table, &initial_table.repl); | ||
137 | |||
138 | if (IS_ERR(net->ipv4.iptable_security)) | ||
139 | return PTR_ERR(net->ipv4.iptable_security); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static void __net_exit iptable_security_net_exit(struct net *net) | ||
145 | { | ||
146 | ipt_unregister_table(net->ipv4.iptable_security); | ||
147 | } | ||
148 | |||
149 | static struct pernet_operations iptable_security_net_ops = { | ||
150 | .init = iptable_security_net_init, | ||
151 | .exit = iptable_security_net_exit, | ||
152 | }; | ||
153 | |||
154 | static int __init iptable_security_init(void) | ||
155 | { | ||
156 | int ret; | ||
157 | |||
158 | ret = register_pernet_subsys(&iptable_security_net_ops); | ||
159 | if (ret < 0) | ||
160 | return ret; | ||
161 | |||
162 | ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); | ||
163 | if (ret < 0) | ||
164 | goto cleanup_table; | ||
165 | |||
166 | return ret; | ||
167 | |||
168 | cleanup_table: | ||
169 | unregister_pernet_subsys(&iptable_security_net_ops); | ||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | static void __exit iptable_security_fini(void) | ||
174 | { | ||
175 | nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); | ||
176 | unregister_pernet_subsys(&iptable_security_net_ops); | ||
177 | } | ||
178 | |||
179 | module_init(iptable_security_init); | ||
180 | module_exit(iptable_security_fini); | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 78ab19accace..97791048fa9b 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -87,9 +87,8 @@ static int icmp_packet(struct nf_conn *ct, | |||
87 | means this will only run once even if count hits zero twice | 87 | means this will only run once even if count hits zero twice |
88 | (theoretically possible with SMP) */ | 88 | (theoretically possible with SMP) */ |
89 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { | 89 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { |
90 | if (atomic_dec_and_test(&ct->proto.icmp.count) | 90 | if (atomic_dec_and_test(&ct->proto.icmp.count)) |
91 | && del_timer(&ct->timeout)) | 91 | nf_ct_kill_acct(ct, ctinfo, skb); |
92 | ct->timeout.function((unsigned long)ct); | ||
93 | } else { | 92 | } else { |
94 | atomic_inc(&ct->proto.icmp.count); | 93 | atomic_inc(&ct->proto.icmp.count); |
95 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); | 94 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); |
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c index 82e4c0e286b8..65e470bc6123 100644 --- a/net/ipv4/netfilter/nf_nat_proto_sctp.c +++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c | |||
@@ -36,7 +36,7 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
36 | sctp_sctphdr_t *hdr; | 36 | sctp_sctphdr_t *hdr; |
37 | unsigned int hdroff = iphdroff + iph->ihl*4; | 37 | unsigned int hdroff = iphdroff + iph->ihl*4; |
38 | __be32 oldip, newip; | 38 | __be32 oldip, newip; |
39 | u32 crc32; | 39 | __be32 crc32; |
40 | 40 | ||
41 | if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) | 41 | if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) |
42 | return false; | 42 | return false; |
@@ -61,7 +61,7 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
61 | crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), | 61 | crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), |
62 | crc32); | 62 | crc32); |
63 | crc32 = sctp_end_cksum(crc32); | 63 | crc32 = sctp_end_cksum(crc32); |
64 | hdr->checksum = htonl(crc32); | 64 | hdr->checksum = crc32; |
65 | 65 | ||
66 | return true; | 66 | return true; |
67 | } | 67 | } |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 552169b41b16..834356ea99df 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * PROC file system. It is mainly used for debugging and | 7 | * PROC file system. It is mainly used for debugging and |
8 | * statistics. | 8 | * statistics. |
9 | * | 9 | * |
10 | * Version: $Id: proc.c,v 1.45 2001/05/16 16:45:35 davem Exp $ | ||
11 | * | ||
12 | * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 10 | * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
13 | * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> | 11 | * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> |
14 | * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> | 12 | * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> |
@@ -73,32 +71,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
73 | 71 | ||
74 | static int sockstat_seq_open(struct inode *inode, struct file *file) | 72 | static int sockstat_seq_open(struct inode *inode, struct file *file) |
75 | { | 73 | { |
76 | int err; | 74 | return single_open_net(inode, file, sockstat_seq_show); |
77 | struct net *net; | ||
78 | |||
79 | err = -ENXIO; | ||
80 | net = get_proc_net(inode); | ||
81 | if (net == NULL) | ||
82 | goto err_net; | ||
83 | |||
84 | err = single_open(file, sockstat_seq_show, net); | ||
85 | if (err < 0) | ||
86 | goto err_open; | ||
87 | |||
88 | return 0; | ||
89 | |||
90 | err_open: | ||
91 | put_net(net); | ||
92 | err_net: | ||
93 | return err; | ||
94 | } | ||
95 | |||
96 | static int sockstat_seq_release(struct inode *inode, struct file *file) | ||
97 | { | ||
98 | struct net *net = ((struct seq_file *)file->private_data)->private; | ||
99 | |||
100 | put_net(net); | ||
101 | return single_release(inode, file); | ||
102 | } | 75 | } |
103 | 76 | ||
104 | static const struct file_operations sockstat_seq_fops = { | 77 | static const struct file_operations sockstat_seq_fops = { |
@@ -106,7 +79,7 @@ static const struct file_operations sockstat_seq_fops = { | |||
106 | .open = sockstat_seq_open, | 79 | .open = sockstat_seq_open, |
107 | .read = seq_read, | 80 | .read = seq_read, |
108 | .llseek = seq_lseek, | 81 | .llseek = seq_lseek, |
109 | .release = sockstat_seq_release, | 82 | .release = single_release_net, |
110 | }; | 83 | }; |
111 | 84 | ||
112 | /* snmp items */ | 85 | /* snmp items */ |
@@ -268,11 +241,12 @@ static void icmpmsg_put(struct seq_file *seq) | |||
268 | 241 | ||
269 | int j, i, count; | 242 | int j, i, count; |
270 | static int out[PERLINE]; | 243 | static int out[PERLINE]; |
244 | struct net *net = seq->private; | ||
271 | 245 | ||
272 | count = 0; | 246 | count = 0; |
273 | for (i = 0; i < ICMPMSG_MIB_MAX; i++) { | 247 | for (i = 0; i < ICMPMSG_MIB_MAX; i++) { |
274 | 248 | ||
275 | if (snmp_fold_field((void **) icmpmsg_statistics, i)) | 249 | if (snmp_fold_field((void **) net->mib.icmpmsg_statistics, i)) |
276 | out[count++] = i; | 250 | out[count++] = i; |
277 | if (count < PERLINE) | 251 | if (count < PERLINE) |
278 | continue; | 252 | continue; |
@@ -284,7 +258,7 @@ static void icmpmsg_put(struct seq_file *seq) | |||
284 | seq_printf(seq, "\nIcmpMsg: "); | 258 | seq_printf(seq, "\nIcmpMsg: "); |
285 | for (j = 0; j < PERLINE; ++j) | 259 | for (j = 0; j < PERLINE; ++j) |
286 | seq_printf(seq, " %lu", | 260 | seq_printf(seq, " %lu", |
287 | snmp_fold_field((void **) icmpmsg_statistics, | 261 | snmp_fold_field((void **) net->mib.icmpmsg_statistics, |
288 | out[j])); | 262 | out[j])); |
289 | seq_putc(seq, '\n'); | 263 | seq_putc(seq, '\n'); |
290 | } | 264 | } |
@@ -296,7 +270,7 @@ static void icmpmsg_put(struct seq_file *seq) | |||
296 | seq_printf(seq, "\nIcmpMsg:"); | 270 | seq_printf(seq, "\nIcmpMsg:"); |
297 | for (j = 0; j < count; ++j) | 271 | for (j = 0; j < count; ++j) |
298 | seq_printf(seq, " %lu", snmp_fold_field((void **) | 272 | seq_printf(seq, " %lu", snmp_fold_field((void **) |
299 | icmpmsg_statistics, out[j])); | 273 | net->mib.icmpmsg_statistics, out[j])); |
300 | } | 274 | } |
301 | 275 | ||
302 | #undef PERLINE | 276 | #undef PERLINE |
@@ -305,6 +279,7 @@ static void icmpmsg_put(struct seq_file *seq) | |||
305 | static void icmp_put(struct seq_file *seq) | 279 | static void icmp_put(struct seq_file *seq) |
306 | { | 280 | { |
307 | int i; | 281 | int i; |
282 | struct net *net = seq->private; | ||
308 | 283 | ||
309 | seq_puts(seq, "\nIcmp: InMsgs InErrors"); | 284 | seq_puts(seq, "\nIcmp: InMsgs InErrors"); |
310 | for (i=0; icmpmibmap[i].name != NULL; i++) | 285 | for (i=0; icmpmibmap[i].name != NULL; i++) |
@@ -313,18 +288,18 @@ static void icmp_put(struct seq_file *seq) | |||
313 | for (i=0; icmpmibmap[i].name != NULL; i++) | 288 | for (i=0; icmpmibmap[i].name != NULL; i++) |
314 | seq_printf(seq, " Out%s", icmpmibmap[i].name); | 289 | seq_printf(seq, " Out%s", icmpmibmap[i].name); |
315 | seq_printf(seq, "\nIcmp: %lu %lu", | 290 | seq_printf(seq, "\nIcmp: %lu %lu", |
316 | snmp_fold_field((void **) icmp_statistics, ICMP_MIB_INMSGS), | 291 | snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), |
317 | snmp_fold_field((void **) icmp_statistics, ICMP_MIB_INERRORS)); | 292 | snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); |
318 | for (i=0; icmpmibmap[i].name != NULL; i++) | 293 | for (i=0; icmpmibmap[i].name != NULL; i++) |
319 | seq_printf(seq, " %lu", | 294 | seq_printf(seq, " %lu", |
320 | snmp_fold_field((void **) icmpmsg_statistics, | 295 | snmp_fold_field((void **) net->mib.icmpmsg_statistics, |
321 | icmpmibmap[i].index)); | 296 | icmpmibmap[i].index)); |
322 | seq_printf(seq, " %lu %lu", | 297 | seq_printf(seq, " %lu %lu", |
323 | snmp_fold_field((void **) icmp_statistics, ICMP_MIB_OUTMSGS), | 298 | snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), |
324 | snmp_fold_field((void **) icmp_statistics, ICMP_MIB_OUTERRORS)); | 299 | snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); |
325 | for (i=0; icmpmibmap[i].name != NULL; i++) | 300 | for (i=0; icmpmibmap[i].name != NULL; i++) |
326 | seq_printf(seq, " %lu", | 301 | seq_printf(seq, " %lu", |
327 | snmp_fold_field((void **) icmpmsg_statistics, | 302 | snmp_fold_field((void **) net->mib.icmpmsg_statistics, |
328 | icmpmibmap[i].index | 0x100)); | 303 | icmpmibmap[i].index | 0x100)); |
329 | } | 304 | } |
330 | 305 | ||
@@ -334,6 +309,7 @@ static void icmp_put(struct seq_file *seq) | |||
334 | static int snmp_seq_show(struct seq_file *seq, void *v) | 309 | static int snmp_seq_show(struct seq_file *seq, void *v) |
335 | { | 310 | { |
336 | int i; | 311 | int i; |
312 | struct net *net = seq->private; | ||
337 | 313 | ||
338 | seq_puts(seq, "Ip: Forwarding DefaultTTL"); | 314 | seq_puts(seq, "Ip: Forwarding DefaultTTL"); |
339 | 315 | ||
@@ -341,12 +317,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
341 | seq_printf(seq, " %s", snmp4_ipstats_list[i].name); | 317 | seq_printf(seq, " %s", snmp4_ipstats_list[i].name); |
342 | 318 | ||
343 | seq_printf(seq, "\nIp: %d %d", | 319 | seq_printf(seq, "\nIp: %d %d", |
344 | IPV4_DEVCONF_ALL(&init_net, FORWARDING) ? 1 : 2, | 320 | IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, |
345 | sysctl_ip_default_ttl); | 321 | sysctl_ip_default_ttl); |
346 | 322 | ||
347 | for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) | 323 | for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) |
348 | seq_printf(seq, " %lu", | 324 | seq_printf(seq, " %lu", |
349 | snmp_fold_field((void **)ip_statistics, | 325 | snmp_fold_field((void **)net->mib.ip_statistics, |
350 | snmp4_ipstats_list[i].entry)); | 326 | snmp4_ipstats_list[i].entry)); |
351 | 327 | ||
352 | icmp_put(seq); /* RFC 2011 compatibility */ | 328 | icmp_put(seq); /* RFC 2011 compatibility */ |
@@ -361,11 +337,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
361 | /* MaxConn field is signed, RFC 2012 */ | 337 | /* MaxConn field is signed, RFC 2012 */ |
362 | if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) | 338 | if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) |
363 | seq_printf(seq, " %ld", | 339 | seq_printf(seq, " %ld", |
364 | snmp_fold_field((void **)tcp_statistics, | 340 | snmp_fold_field((void **)net->mib.tcp_statistics, |
365 | snmp4_tcp_list[i].entry)); | 341 | snmp4_tcp_list[i].entry)); |
366 | else | 342 | else |
367 | seq_printf(seq, " %lu", | 343 | seq_printf(seq, " %lu", |
368 | snmp_fold_field((void **)tcp_statistics, | 344 | snmp_fold_field((void **)net->mib.tcp_statistics, |
369 | snmp4_tcp_list[i].entry)); | 345 | snmp4_tcp_list[i].entry)); |
370 | } | 346 | } |
371 | 347 | ||
@@ -376,7 +352,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
376 | seq_puts(seq, "\nUdp:"); | 352 | seq_puts(seq, "\nUdp:"); |
377 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) | 353 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) |
378 | seq_printf(seq, " %lu", | 354 | seq_printf(seq, " %lu", |
379 | snmp_fold_field((void **)udp_statistics, | 355 | snmp_fold_field((void **)net->mib.udp_statistics, |
380 | snmp4_udp_list[i].entry)); | 356 | snmp4_udp_list[i].entry)); |
381 | 357 | ||
382 | /* the UDP and UDP-Lite MIBs are the same */ | 358 | /* the UDP and UDP-Lite MIBs are the same */ |
@@ -387,7 +363,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
387 | seq_puts(seq, "\nUdpLite:"); | 363 | seq_puts(seq, "\nUdpLite:"); |
388 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) | 364 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) |
389 | seq_printf(seq, " %lu", | 365 | seq_printf(seq, " %lu", |
390 | snmp_fold_field((void **)udplite_statistics, | 366 | snmp_fold_field((void **)net->mib.udplite_statistics, |
391 | snmp4_udp_list[i].entry)); | 367 | snmp4_udp_list[i].entry)); |
392 | 368 | ||
393 | seq_putc(seq, '\n'); | 369 | seq_putc(seq, '\n'); |
@@ -396,7 +372,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
396 | 372 | ||
397 | static int snmp_seq_open(struct inode *inode, struct file *file) | 373 | static int snmp_seq_open(struct inode *inode, struct file *file) |
398 | { | 374 | { |
399 | return single_open(file, snmp_seq_show, NULL); | 375 | return single_open_net(inode, file, snmp_seq_show); |
400 | } | 376 | } |
401 | 377 | ||
402 | static const struct file_operations snmp_seq_fops = { | 378 | static const struct file_operations snmp_seq_fops = { |
@@ -404,7 +380,7 @@ static const struct file_operations snmp_seq_fops = { | |||
404 | .open = snmp_seq_open, | 380 | .open = snmp_seq_open, |
405 | .read = seq_read, | 381 | .read = seq_read, |
406 | .llseek = seq_lseek, | 382 | .llseek = seq_lseek, |
407 | .release = single_release, | 383 | .release = single_release_net, |
408 | }; | 384 | }; |
409 | 385 | ||
410 | 386 | ||
@@ -415,6 +391,7 @@ static const struct file_operations snmp_seq_fops = { | |||
415 | static int netstat_seq_show(struct seq_file *seq, void *v) | 391 | static int netstat_seq_show(struct seq_file *seq, void *v) |
416 | { | 392 | { |
417 | int i; | 393 | int i; |
394 | struct net *net = seq->private; | ||
418 | 395 | ||
419 | seq_puts(seq, "TcpExt:"); | 396 | seq_puts(seq, "TcpExt:"); |
420 | for (i = 0; snmp4_net_list[i].name != NULL; i++) | 397 | for (i = 0; snmp4_net_list[i].name != NULL; i++) |
@@ -423,7 +400,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v) | |||
423 | seq_puts(seq, "\nTcpExt:"); | 400 | seq_puts(seq, "\nTcpExt:"); |
424 | for (i = 0; snmp4_net_list[i].name != NULL; i++) | 401 | for (i = 0; snmp4_net_list[i].name != NULL; i++) |
425 | seq_printf(seq, " %lu", | 402 | seq_printf(seq, " %lu", |
426 | snmp_fold_field((void **)net_statistics, | 403 | snmp_fold_field((void **)net->mib.net_statistics, |
427 | snmp4_net_list[i].entry)); | 404 | snmp4_net_list[i].entry)); |
428 | 405 | ||
429 | seq_puts(seq, "\nIpExt:"); | 406 | seq_puts(seq, "\nIpExt:"); |
@@ -433,7 +410,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v) | |||
433 | seq_puts(seq, "\nIpExt:"); | 410 | seq_puts(seq, "\nIpExt:"); |
434 | for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) | 411 | for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) |
435 | seq_printf(seq, " %lu", | 412 | seq_printf(seq, " %lu", |
436 | snmp_fold_field((void **)ip_statistics, | 413 | snmp_fold_field((void **)net->mib.ip_statistics, |
437 | snmp4_ipextstats_list[i].entry)); | 414 | snmp4_ipextstats_list[i].entry)); |
438 | 415 | ||
439 | seq_putc(seq, '\n'); | 416 | seq_putc(seq, '\n'); |
@@ -442,7 +419,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v) | |||
442 | 419 | ||
443 | static int netstat_seq_open(struct inode *inode, struct file *file) | 420 | static int netstat_seq_open(struct inode *inode, struct file *file) |
444 | { | 421 | { |
445 | return single_open(file, netstat_seq_show, NULL); | 422 | return single_open_net(inode, file, netstat_seq_show); |
446 | } | 423 | } |
447 | 424 | ||
448 | static const struct file_operations netstat_seq_fops = { | 425 | static const struct file_operations netstat_seq_fops = { |
@@ -450,18 +427,32 @@ static const struct file_operations netstat_seq_fops = { | |||
450 | .open = netstat_seq_open, | 427 | .open = netstat_seq_open, |
451 | .read = seq_read, | 428 | .read = seq_read, |
452 | .llseek = seq_lseek, | 429 | .llseek = seq_lseek, |
453 | .release = single_release, | 430 | .release = single_release_net, |
454 | }; | 431 | }; |
455 | 432 | ||
456 | static __net_init int ip_proc_init_net(struct net *net) | 433 | static __net_init int ip_proc_init_net(struct net *net) |
457 | { | 434 | { |
458 | if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops)) | 435 | if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops)) |
459 | return -ENOMEM; | 436 | goto out_sockstat; |
437 | if (!proc_net_fops_create(net, "netstat", S_IRUGO, &netstat_seq_fops)) | ||
438 | goto out_netstat; | ||
439 | if (!proc_net_fops_create(net, "snmp", S_IRUGO, &snmp_seq_fops)) | ||
440 | goto out_snmp; | ||
441 | |||
460 | return 0; | 442 | return 0; |
443 | |||
444 | out_snmp: | ||
445 | proc_net_remove(net, "netstat"); | ||
446 | out_netstat: | ||
447 | proc_net_remove(net, "sockstat"); | ||
448 | out_sockstat: | ||
449 | return -ENOMEM; | ||
461 | } | 450 | } |
462 | 451 | ||
463 | static __net_exit void ip_proc_exit_net(struct net *net) | 452 | static __net_exit void ip_proc_exit_net(struct net *net) |
464 | { | 453 | { |
454 | proc_net_remove(net, "snmp"); | ||
455 | proc_net_remove(net, "netstat"); | ||
465 | proc_net_remove(net, "sockstat"); | 456 | proc_net_remove(net, "sockstat"); |
466 | } | 457 | } |
467 | 458 | ||
@@ -472,24 +463,6 @@ static __net_initdata struct pernet_operations ip_proc_ops = { | |||
472 | 463 | ||
473 | int __init ip_misc_proc_init(void) | 464 | int __init ip_misc_proc_init(void) |
474 | { | 465 | { |
475 | int rc = 0; | 466 | return register_pernet_subsys(&ip_proc_ops); |
476 | |||
477 | if (register_pernet_subsys(&ip_proc_ops)) | ||
478 | goto out_pernet; | ||
479 | |||
480 | if (!proc_net_fops_create(&init_net, "netstat", S_IRUGO, &netstat_seq_fops)) | ||
481 | goto out_netstat; | ||
482 | |||
483 | if (!proc_net_fops_create(&init_net, "snmp", S_IRUGO, &snmp_seq_fops)) | ||
484 | goto out_snmp; | ||
485 | out: | ||
486 | return rc; | ||
487 | out_snmp: | ||
488 | proc_net_remove(&init_net, "netstat"); | ||
489 | out_netstat: | ||
490 | unregister_pernet_subsys(&ip_proc_ops); | ||
491 | out_pernet: | ||
492 | rc = -ENOMEM; | ||
493 | goto out; | ||
494 | } | 467 | } |
495 | 468 | ||
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 971ab9356e51..ea50da0649fd 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * INET protocol dispatch tables. | 6 | * INET protocol dispatch tables. |
7 | * | 7 | * |
8 | * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * | 10 | * |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 37a1ecd9d600..cd975743bcd2 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * RAW - implementation of IP "raw" sockets. | 6 | * RAW - implementation of IP "raw" sockets. |
7 | * | 7 | * |
8 | * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * | 10 | * |
@@ -322,6 +320,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
322 | unsigned int flags) | 320 | unsigned int flags) |
323 | { | 321 | { |
324 | struct inet_sock *inet = inet_sk(sk); | 322 | struct inet_sock *inet = inet_sk(sk); |
323 | struct net *net = sock_net(sk); | ||
325 | struct iphdr *iph; | 324 | struct iphdr *iph; |
326 | struct sk_buff *skb; | 325 | struct sk_buff *skb; |
327 | unsigned int iphlen; | 326 | unsigned int iphlen; |
@@ -370,7 +369,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
370 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 369 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
371 | } | 370 | } |
372 | if (iph->protocol == IPPROTO_ICMP) | 371 | if (iph->protocol == IPPROTO_ICMP) |
373 | icmp_out_count(((struct icmphdr *) | 372 | icmp_out_count(net, ((struct icmphdr *) |
374 | skb_transport_header(skb))->type); | 373 | skb_transport_header(skb))->type); |
375 | 374 | ||
376 | err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 375 | err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, |
@@ -386,7 +385,7 @@ error_fault: | |||
386 | err = -EFAULT; | 385 | err = -EFAULT; |
387 | kfree_skb(skb); | 386 | kfree_skb(skb); |
388 | error: | 387 | error: |
389 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 388 | IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); |
390 | return err; | 389 | return err; |
391 | } | 390 | } |
392 | 391 | ||
@@ -608,12 +607,11 @@ static void raw_close(struct sock *sk, long timeout) | |||
608 | sk_common_release(sk); | 607 | sk_common_release(sk); |
609 | } | 608 | } |
610 | 609 | ||
611 | static int raw_destroy(struct sock *sk) | 610 | static void raw_destroy(struct sock *sk) |
612 | { | 611 | { |
613 | lock_sock(sk); | 612 | lock_sock(sk); |
614 | ip_flush_pending_frames(sk); | 613 | ip_flush_pending_frames(sk); |
615 | release_sock(sk); | 614 | release_sock(sk); |
616 | return 0; | ||
617 | } | 615 | } |
618 | 616 | ||
619 | /* This gets rid of all the nasties in af_inet. -DaveM */ | 617 | /* This gets rid of all the nasties in af_inet. -DaveM */ |
@@ -947,7 +945,7 @@ static int raw_seq_show(struct seq_file *seq, void *v) | |||
947 | if (v == SEQ_START_TOKEN) | 945 | if (v == SEQ_START_TOKEN) |
948 | seq_printf(seq, " sl local_address rem_address st tx_queue " | 946 | seq_printf(seq, " sl local_address rem_address st tx_queue " |
949 | "rx_queue tr tm->when retrnsmt uid timeout " | 947 | "rx_queue tr tm->when retrnsmt uid timeout " |
950 | "inode drops\n"); | 948 | "inode ref pointer drops\n"); |
951 | else | 949 | else |
952 | raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); | 950 | raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); |
953 | return 0; | 951 | return 0; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 96be336064fb..e4ab0ac94f92 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * ROUTE - implementation of the IP router. | 6 | * ROUTE - implementation of the IP router. |
7 | * | 7 | * |
8 | * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | 10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
@@ -134,7 +132,6 @@ static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; | |||
134 | 132 | ||
135 | static void rt_worker_func(struct work_struct *work); | 133 | static void rt_worker_func(struct work_struct *work); |
136 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); | 134 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); |
137 | static struct timer_list rt_secret_timer; | ||
138 | 135 | ||
139 | /* | 136 | /* |
140 | * Interface to generic destination cache. | 137 | * Interface to generic destination cache. |
@@ -253,20 +250,25 @@ static inline void rt_hash_lock_init(void) | |||
253 | static struct rt_hash_bucket *rt_hash_table __read_mostly; | 250 | static struct rt_hash_bucket *rt_hash_table __read_mostly; |
254 | static unsigned rt_hash_mask __read_mostly; | 251 | static unsigned rt_hash_mask __read_mostly; |
255 | static unsigned int rt_hash_log __read_mostly; | 252 | static unsigned int rt_hash_log __read_mostly; |
256 | static atomic_t rt_genid __read_mostly; | ||
257 | 253 | ||
258 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | 254 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
259 | #define RT_CACHE_STAT_INC(field) \ | 255 | #define RT_CACHE_STAT_INC(field) \ |
260 | (__raw_get_cpu_var(rt_cache_stat).field++) | 256 | (__raw_get_cpu_var(rt_cache_stat).field++) |
261 | 257 | ||
262 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx) | 258 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, |
259 | int genid) | ||
263 | { | 260 | { |
264 | return jhash_3words((__force u32)(__be32)(daddr), | 261 | return jhash_3words((__force u32)(__be32)(daddr), |
265 | (__force u32)(__be32)(saddr), | 262 | (__force u32)(__be32)(saddr), |
266 | idx, atomic_read(&rt_genid)) | 263 | idx, genid) |
267 | & rt_hash_mask; | 264 | & rt_hash_mask; |
268 | } | 265 | } |
269 | 266 | ||
267 | static inline int rt_genid(struct net *net) | ||
268 | { | ||
269 | return atomic_read(&net->ipv4.rt_genid); | ||
270 | } | ||
271 | |||
270 | #ifdef CONFIG_PROC_FS | 272 | #ifdef CONFIG_PROC_FS |
271 | struct rt_cache_iter_state { | 273 | struct rt_cache_iter_state { |
272 | struct seq_net_private p; | 274 | struct seq_net_private p; |
@@ -336,7 +338,7 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | |||
336 | struct rt_cache_iter_state *st = seq->private; | 338 | struct rt_cache_iter_state *st = seq->private; |
337 | if (*pos) | 339 | if (*pos) |
338 | return rt_cache_get_idx(seq, *pos - 1); | 340 | return rt_cache_get_idx(seq, *pos - 1); |
339 | st->genid = atomic_read(&rt_genid); | 341 | st->genid = rt_genid(seq_file_net(seq)); |
340 | return SEQ_START_TOKEN; | 342 | return SEQ_START_TOKEN; |
341 | } | 343 | } |
342 | 344 | ||
@@ -683,6 +685,11 @@ static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) | |||
683 | return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); | 685 | return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); |
684 | } | 686 | } |
685 | 687 | ||
688 | static inline int rt_is_expired(struct rtable *rth) | ||
689 | { | ||
690 | return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); | ||
691 | } | ||
692 | |||
686 | /* | 693 | /* |
687 | * Perform a full scan of hash table and free all entries. | 694 | * Perform a full scan of hash table and free all entries. |
688 | * Can be called by a softirq or a process. | 695 | * Can be called by a softirq or a process. |
@@ -692,6 +699,7 @@ static void rt_do_flush(int process_context) | |||
692 | { | 699 | { |
693 | unsigned int i; | 700 | unsigned int i; |
694 | struct rtable *rth, *next; | 701 | struct rtable *rth, *next; |
702 | struct rtable * tail; | ||
695 | 703 | ||
696 | for (i = 0; i <= rt_hash_mask; i++) { | 704 | for (i = 0; i <= rt_hash_mask; i++) { |
697 | if (process_context && need_resched()) | 705 | if (process_context && need_resched()) |
@@ -701,11 +709,39 @@ static void rt_do_flush(int process_context) | |||
701 | continue; | 709 | continue; |
702 | 710 | ||
703 | spin_lock_bh(rt_hash_lock_addr(i)); | 711 | spin_lock_bh(rt_hash_lock_addr(i)); |
712 | #ifdef CONFIG_NET_NS | ||
713 | { | ||
714 | struct rtable ** prev, * p; | ||
715 | |||
716 | rth = rt_hash_table[i].chain; | ||
717 | |||
718 | /* defer releasing the head of the list after spin_unlock */ | ||
719 | for (tail = rth; tail; tail = tail->u.dst.rt_next) | ||
720 | if (!rt_is_expired(tail)) | ||
721 | break; | ||
722 | if (rth != tail) | ||
723 | rt_hash_table[i].chain = tail; | ||
724 | |||
725 | /* call rt_free on entries after the tail requiring flush */ | ||
726 | prev = &rt_hash_table[i].chain; | ||
727 | for (p = *prev; p; p = next) { | ||
728 | next = p->u.dst.rt_next; | ||
729 | if (!rt_is_expired(p)) { | ||
730 | prev = &p->u.dst.rt_next; | ||
731 | } else { | ||
732 | *prev = next; | ||
733 | rt_free(p); | ||
734 | } | ||
735 | } | ||
736 | } | ||
737 | #else | ||
704 | rth = rt_hash_table[i].chain; | 738 | rth = rt_hash_table[i].chain; |
705 | rt_hash_table[i].chain = NULL; | 739 | rt_hash_table[i].chain = NULL; |
740 | tail = NULL; | ||
741 | #endif | ||
706 | spin_unlock_bh(rt_hash_lock_addr(i)); | 742 | spin_unlock_bh(rt_hash_lock_addr(i)); |
707 | 743 | ||
708 | for (; rth; rth = next) { | 744 | for (; rth != tail; rth = next) { |
709 | next = rth->u.dst.rt_next; | 745 | next = rth->u.dst.rt_next; |
710 | rt_free(rth); | 746 | rt_free(rth); |
711 | } | 747 | } |
@@ -738,7 +774,7 @@ static void rt_check_expire(void) | |||
738 | continue; | 774 | continue; |
739 | spin_lock_bh(rt_hash_lock_addr(i)); | 775 | spin_lock_bh(rt_hash_lock_addr(i)); |
740 | while ((rth = *rthp) != NULL) { | 776 | while ((rth = *rthp) != NULL) { |
741 | if (rth->rt_genid != atomic_read(&rt_genid)) { | 777 | if (rt_is_expired(rth)) { |
742 | *rthp = rth->u.dst.rt_next; | 778 | *rthp = rth->u.dst.rt_next; |
743 | rt_free(rth); | 779 | rt_free(rth); |
744 | continue; | 780 | continue; |
@@ -781,21 +817,21 @@ static void rt_worker_func(struct work_struct *work) | |||
781 | * many times (2^24) without giving recent rt_genid. | 817 | * many times (2^24) without giving recent rt_genid. |
782 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. | 818 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. |
783 | */ | 819 | */ |
784 | static void rt_cache_invalidate(void) | 820 | static void rt_cache_invalidate(struct net *net) |
785 | { | 821 | { |
786 | unsigned char shuffle; | 822 | unsigned char shuffle; |
787 | 823 | ||
788 | get_random_bytes(&shuffle, sizeof(shuffle)); | 824 | get_random_bytes(&shuffle, sizeof(shuffle)); |
789 | atomic_add(shuffle + 1U, &rt_genid); | 825 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); |
790 | } | 826 | } |
791 | 827 | ||
792 | /* | 828 | /* |
793 | * delay < 0 : invalidate cache (fast : entries will be deleted later) | 829 | * delay < 0 : invalidate cache (fast : entries will be deleted later) |
794 | * delay >= 0 : invalidate & flush cache (can be long) | 830 | * delay >= 0 : invalidate & flush cache (can be long) |
795 | */ | 831 | */ |
796 | void rt_cache_flush(int delay) | 832 | void rt_cache_flush(struct net *net, int delay) |
797 | { | 833 | { |
798 | rt_cache_invalidate(); | 834 | rt_cache_invalidate(net); |
799 | if (delay >= 0) | 835 | if (delay >= 0) |
800 | rt_do_flush(!in_softirq()); | 836 | rt_do_flush(!in_softirq()); |
801 | } | 837 | } |
@@ -803,10 +839,11 @@ void rt_cache_flush(int delay) | |||
803 | /* | 839 | /* |
804 | * We change rt_genid and let gc do the cleanup | 840 | * We change rt_genid and let gc do the cleanup |
805 | */ | 841 | */ |
806 | static void rt_secret_rebuild(unsigned long dummy) | 842 | static void rt_secret_rebuild(unsigned long __net) |
807 | { | 843 | { |
808 | rt_cache_invalidate(); | 844 | struct net *net = (struct net *)__net; |
809 | mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); | 845 | rt_cache_invalidate(net); |
846 | mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); | ||
810 | } | 847 | } |
811 | 848 | ||
812 | /* | 849 | /* |
@@ -882,7 +919,7 @@ static int rt_garbage_collect(struct dst_ops *ops) | |||
882 | rthp = &rt_hash_table[k].chain; | 919 | rthp = &rt_hash_table[k].chain; |
883 | spin_lock_bh(rt_hash_lock_addr(k)); | 920 | spin_lock_bh(rt_hash_lock_addr(k)); |
884 | while ((rth = *rthp) != NULL) { | 921 | while ((rth = *rthp) != NULL) { |
885 | if (rth->rt_genid == atomic_read(&rt_genid) && | 922 | if (!rt_is_expired(rth) && |
886 | !rt_may_expire(rth, tmo, expire)) { | 923 | !rt_may_expire(rth, tmo, expire)) { |
887 | tmo >>= 1; | 924 | tmo >>= 1; |
888 | rthp = &rth->u.dst.rt_next; | 925 | rthp = &rth->u.dst.rt_next; |
@@ -964,7 +1001,7 @@ restart: | |||
964 | 1001 | ||
965 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1002 | spin_lock_bh(rt_hash_lock_addr(hash)); |
966 | while ((rth = *rthp) != NULL) { | 1003 | while ((rth = *rthp) != NULL) { |
967 | if (rth->rt_genid != atomic_read(&rt_genid)) { | 1004 | if (rt_is_expired(rth)) { |
968 | *rthp = rth->u.dst.rt_next; | 1005 | *rthp = rth->u.dst.rt_next; |
969 | rt_free(rth); | 1006 | rt_free(rth); |
970 | continue; | 1007 | continue; |
@@ -1140,7 +1177,7 @@ static void rt_del(unsigned hash, struct rtable *rt) | |||
1140 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1177 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1141 | ip_rt_put(rt); | 1178 | ip_rt_put(rt); |
1142 | while ((aux = *rthp) != NULL) { | 1179 | while ((aux = *rthp) != NULL) { |
1143 | if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { | 1180 | if (aux == rt || rt_is_expired(aux)) { |
1144 | *rthp = aux->u.dst.rt_next; | 1181 | *rthp = aux->u.dst.rt_next; |
1145 | rt_free(aux); | 1182 | rt_free(aux); |
1146 | continue; | 1183 | continue; |
@@ -1182,7 +1219,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1182 | 1219 | ||
1183 | for (i = 0; i < 2; i++) { | 1220 | for (i = 0; i < 2; i++) { |
1184 | for (k = 0; k < 2; k++) { | 1221 | for (k = 0; k < 2; k++) { |
1185 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); | 1222 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
1223 | rt_genid(net)); | ||
1186 | 1224 | ||
1187 | rthp=&rt_hash_table[hash].chain; | 1225 | rthp=&rt_hash_table[hash].chain; |
1188 | 1226 | ||
@@ -1194,7 +1232,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1194 | rth->fl.fl4_src != skeys[i] || | 1232 | rth->fl.fl4_src != skeys[i] || |
1195 | rth->fl.oif != ikeys[k] || | 1233 | rth->fl.oif != ikeys[k] || |
1196 | rth->fl.iif != 0 || | 1234 | rth->fl.iif != 0 || |
1197 | rth->rt_genid != atomic_read(&rt_genid) || | 1235 | rt_is_expired(rth) || |
1198 | !net_eq(dev_net(rth->u.dst.dev), net)) { | 1236 | !net_eq(dev_net(rth->u.dst.dev), net)) { |
1199 | rthp = &rth->u.dst.rt_next; | 1237 | rthp = &rth->u.dst.rt_next; |
1200 | continue; | 1238 | continue; |
@@ -1233,7 +1271,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1233 | rt->u.dst.neighbour = NULL; | 1271 | rt->u.dst.neighbour = NULL; |
1234 | rt->u.dst.hh = NULL; | 1272 | rt->u.dst.hh = NULL; |
1235 | rt->u.dst.xfrm = NULL; | 1273 | rt->u.dst.xfrm = NULL; |
1236 | rt->rt_genid = atomic_read(&rt_genid); | 1274 | rt->rt_genid = rt_genid(net); |
1237 | rt->rt_flags |= RTCF_REDIRECTED; | 1275 | rt->rt_flags |= RTCF_REDIRECTED; |
1238 | 1276 | ||
1239 | /* Gateway is different ... */ | 1277 | /* Gateway is different ... */ |
@@ -1297,7 +1335,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1297 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || | 1335 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || |
1298 | rt->u.dst.expires) { | 1336 | rt->u.dst.expires) { |
1299 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | 1337 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1300 | rt->fl.oif); | 1338 | rt->fl.oif, |
1339 | rt_genid(dev_net(dst->dev))); | ||
1301 | #if RT_CACHE_DEBUG >= 1 | 1340 | #if RT_CACHE_DEBUG >= 1 |
1302 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to " | 1341 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to " |
1303 | NIPQUAD_FMT "/%02x dropped\n", | 1342 | NIPQUAD_FMT "/%02x dropped\n", |
@@ -1390,7 +1429,8 @@ static int ip_error(struct sk_buff *skb) | |||
1390 | break; | 1429 | break; |
1391 | case ENETUNREACH: | 1430 | case ENETUNREACH: |
1392 | code = ICMP_NET_UNREACH; | 1431 | code = ICMP_NET_UNREACH; |
1393 | IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES); | 1432 | IP_INC_STATS_BH(dev_net(rt->u.dst.dev), |
1433 | IPSTATS_MIB_INNOROUTES); | ||
1394 | break; | 1434 | break; |
1395 | case EACCES: | 1435 | case EACCES: |
1396 | code = ICMP_PKT_FILTERED; | 1436 | code = ICMP_PKT_FILTERED; |
@@ -1446,7 +1486,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1446 | 1486 | ||
1447 | for (k = 0; k < 2; k++) { | 1487 | for (k = 0; k < 2; k++) { |
1448 | for (i = 0; i < 2; i++) { | 1488 | for (i = 0; i < 2; i++) { |
1449 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); | 1489 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
1490 | rt_genid(net)); | ||
1450 | 1491 | ||
1451 | rcu_read_lock(); | 1492 | rcu_read_lock(); |
1452 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 1493 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -1461,7 +1502,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1461 | rth->fl.iif != 0 || | 1502 | rth->fl.iif != 0 || |
1462 | dst_metric_locked(&rth->u.dst, RTAX_MTU) || | 1503 | dst_metric_locked(&rth->u.dst, RTAX_MTU) || |
1463 | !net_eq(dev_net(rth->u.dst.dev), net) || | 1504 | !net_eq(dev_net(rth->u.dst.dev), net) || |
1464 | rth->rt_genid != atomic_read(&rt_genid)) | 1505 | !rt_is_expired(rth)) |
1465 | continue; | 1506 | continue; |
1466 | 1507 | ||
1467 | if (new_mtu < 68 || new_mtu >= old_mtu) { | 1508 | if (new_mtu < 68 || new_mtu >= old_mtu) { |
@@ -1696,7 +1737,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1696 | rth->fl.oif = 0; | 1737 | rth->fl.oif = 0; |
1697 | rth->rt_gateway = daddr; | 1738 | rth->rt_gateway = daddr; |
1698 | rth->rt_spec_dst= spec_dst; | 1739 | rth->rt_spec_dst= spec_dst; |
1699 | rth->rt_genid = atomic_read(&rt_genid); | 1740 | rth->rt_genid = rt_genid(dev_net(dev)); |
1700 | rth->rt_flags = RTCF_MULTICAST; | 1741 | rth->rt_flags = RTCF_MULTICAST; |
1701 | rth->rt_type = RTN_MULTICAST; | 1742 | rth->rt_type = RTN_MULTICAST; |
1702 | if (our) { | 1743 | if (our) { |
@@ -1711,7 +1752,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1711 | RT_CACHE_STAT_INC(in_slow_mc); | 1752 | RT_CACHE_STAT_INC(in_slow_mc); |
1712 | 1753 | ||
1713 | in_dev_put(in_dev); | 1754 | in_dev_put(in_dev); |
1714 | hash = rt_hash(daddr, saddr, dev->ifindex); | 1755 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1715 | return rt_intern_hash(hash, rth, &skb->rtable); | 1756 | return rt_intern_hash(hash, rth, &skb->rtable); |
1716 | 1757 | ||
1717 | e_nobufs: | 1758 | e_nobufs: |
@@ -1837,7 +1878,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1837 | 1878 | ||
1838 | rth->u.dst.input = ip_forward; | 1879 | rth->u.dst.input = ip_forward; |
1839 | rth->u.dst.output = ip_output; | 1880 | rth->u.dst.output = ip_output; |
1840 | rth->rt_genid = atomic_read(&rt_genid); | 1881 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); |
1841 | 1882 | ||
1842 | rt_set_nexthop(rth, res, itag); | 1883 | rt_set_nexthop(rth, res, itag); |
1843 | 1884 | ||
@@ -1872,7 +1913,8 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
1872 | return err; | 1913 | return err; |
1873 | 1914 | ||
1874 | /* put it into the cache */ | 1915 | /* put it into the cache */ |
1875 | hash = rt_hash(daddr, saddr, fl->iif); | 1916 | hash = rt_hash(daddr, saddr, fl->iif, |
1917 | rt_genid(dev_net(rth->u.dst.dev))); | ||
1876 | return rt_intern_hash(hash, rth, &skb->rtable); | 1918 | return rt_intern_hash(hash, rth, &skb->rtable); |
1877 | } | 1919 | } |
1878 | 1920 | ||
@@ -1998,7 +2040,7 @@ local_input: | |||
1998 | goto e_nobufs; | 2040 | goto e_nobufs; |
1999 | 2041 | ||
2000 | rth->u.dst.output= ip_rt_bug; | 2042 | rth->u.dst.output= ip_rt_bug; |
2001 | rth->rt_genid = atomic_read(&rt_genid); | 2043 | rth->rt_genid = rt_genid(net); |
2002 | 2044 | ||
2003 | atomic_set(&rth->u.dst.__refcnt, 1); | 2045 | atomic_set(&rth->u.dst.__refcnt, 1); |
2004 | rth->u.dst.flags= DST_HOST; | 2046 | rth->u.dst.flags= DST_HOST; |
@@ -2028,7 +2070,7 @@ local_input: | |||
2028 | rth->rt_flags &= ~RTCF_LOCAL; | 2070 | rth->rt_flags &= ~RTCF_LOCAL; |
2029 | } | 2071 | } |
2030 | rth->rt_type = res.type; | 2072 | rth->rt_type = res.type; |
2031 | hash = rt_hash(daddr, saddr, fl.iif); | 2073 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2032 | err = rt_intern_hash(hash, rth, &skb->rtable); | 2074 | err = rt_intern_hash(hash, rth, &skb->rtable); |
2033 | goto done; | 2075 | goto done; |
2034 | 2076 | ||
@@ -2079,7 +2121,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2079 | 2121 | ||
2080 | net = dev_net(dev); | 2122 | net = dev_net(dev); |
2081 | tos &= IPTOS_RT_MASK; | 2123 | tos &= IPTOS_RT_MASK; |
2082 | hash = rt_hash(daddr, saddr, iif); | 2124 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); |
2083 | 2125 | ||
2084 | rcu_read_lock(); | 2126 | rcu_read_lock(); |
2085 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2127 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2091,7 +2133,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2091 | (rth->fl.fl4_tos ^ tos)) == 0 && | 2133 | (rth->fl.fl4_tos ^ tos)) == 0 && |
2092 | rth->fl.mark == skb->mark && | 2134 | rth->fl.mark == skb->mark && |
2093 | net_eq(dev_net(rth->u.dst.dev), net) && | 2135 | net_eq(dev_net(rth->u.dst.dev), net) && |
2094 | rth->rt_genid == atomic_read(&rt_genid)) { | 2136 | !rt_is_expired(rth)) { |
2095 | dst_use(&rth->u.dst, jiffies); | 2137 | dst_use(&rth->u.dst, jiffies); |
2096 | RT_CACHE_STAT_INC(in_hit); | 2138 | RT_CACHE_STAT_INC(in_hit); |
2097 | rcu_read_unlock(); | 2139 | rcu_read_unlock(); |
@@ -2219,7 +2261,7 @@ static int __mkroute_output(struct rtable **result, | |||
2219 | rth->rt_spec_dst= fl->fl4_src; | 2261 | rth->rt_spec_dst= fl->fl4_src; |
2220 | 2262 | ||
2221 | rth->u.dst.output=ip_output; | 2263 | rth->u.dst.output=ip_output; |
2222 | rth->rt_genid = atomic_read(&rt_genid); | 2264 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2223 | 2265 | ||
2224 | RT_CACHE_STAT_INC(out_slow_tot); | 2266 | RT_CACHE_STAT_INC(out_slow_tot); |
2225 | 2267 | ||
@@ -2268,7 +2310,8 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2268 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); | 2310 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); |
2269 | unsigned hash; | 2311 | unsigned hash; |
2270 | if (err == 0) { | 2312 | if (err == 0) { |
2271 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); | 2313 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2314 | rt_genid(dev_net(dev_out))); | ||
2272 | err = rt_intern_hash(hash, rth, rp); | 2315 | err = rt_intern_hash(hash, rth, rp); |
2273 | } | 2316 | } |
2274 | 2317 | ||
@@ -2480,7 +2523,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2480 | unsigned hash; | 2523 | unsigned hash; |
2481 | struct rtable *rth; | 2524 | struct rtable *rth; |
2482 | 2525 | ||
2483 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif); | 2526 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); |
2484 | 2527 | ||
2485 | rcu_read_lock_bh(); | 2528 | rcu_read_lock_bh(); |
2486 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2529 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2493,7 +2536,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2493 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2536 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2494 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2537 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2495 | net_eq(dev_net(rth->u.dst.dev), net) && | 2538 | net_eq(dev_net(rth->u.dst.dev), net) && |
2496 | rth->rt_genid == atomic_read(&rt_genid)) { | 2539 | !rt_is_expired(rth)) { |
2497 | dst_use(&rth->u.dst, jiffies); | 2540 | dst_use(&rth->u.dst, jiffies); |
2498 | RT_CACHE_STAT_INC(out_hit); | 2541 | RT_CACHE_STAT_INC(out_hit); |
2499 | rcu_read_unlock_bh(); | 2542 | rcu_read_unlock_bh(); |
@@ -2524,7 +2567,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2524 | }; | 2567 | }; |
2525 | 2568 | ||
2526 | 2569 | ||
2527 | static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) | 2570 | static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp) |
2528 | { | 2571 | { |
2529 | struct rtable *ort = *rp; | 2572 | struct rtable *ort = *rp; |
2530 | struct rtable *rt = (struct rtable *) | 2573 | struct rtable *rt = (struct rtable *) |
@@ -2548,7 +2591,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) | |||
2548 | rt->idev = ort->idev; | 2591 | rt->idev = ort->idev; |
2549 | if (rt->idev) | 2592 | if (rt->idev) |
2550 | in_dev_hold(rt->idev); | 2593 | in_dev_hold(rt->idev); |
2551 | rt->rt_genid = atomic_read(&rt_genid); | 2594 | rt->rt_genid = rt_genid(net); |
2552 | rt->rt_flags = ort->rt_flags; | 2595 | rt->rt_flags = ort->rt_flags; |
2553 | rt->rt_type = ort->rt_type; | 2596 | rt->rt_type = ort->rt_type; |
2554 | rt->rt_dst = ort->rt_dst; | 2597 | rt->rt_dst = ort->rt_dst; |
@@ -2584,7 +2627,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, | |||
2584 | err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, | 2627 | err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, |
2585 | flags ? XFRM_LOOKUP_WAIT : 0); | 2628 | flags ? XFRM_LOOKUP_WAIT : 0); |
2586 | if (err == -EREMOTE) | 2629 | if (err == -EREMOTE) |
2587 | err = ipv4_dst_blackhole(rp, flp); | 2630 | err = ipv4_dst_blackhole(net, rp, flp); |
2588 | 2631 | ||
2589 | return err; | 2632 | return err; |
2590 | } | 2633 | } |
@@ -2803,7 +2846,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2803 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2846 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
2804 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) | 2847 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) |
2805 | continue; | 2848 | continue; |
2806 | if (rt->rt_genid != atomic_read(&rt_genid)) | 2849 | if (rt_is_expired(rt)) |
2807 | continue; | 2850 | continue; |
2808 | skb->dst = dst_clone(&rt->u.dst); | 2851 | skb->dst = dst_clone(&rt->u.dst); |
2809 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 2852 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
@@ -2827,19 +2870,25 @@ done: | |||
2827 | 2870 | ||
2828 | void ip_rt_multicast_event(struct in_device *in_dev) | 2871 | void ip_rt_multicast_event(struct in_device *in_dev) |
2829 | { | 2872 | { |
2830 | rt_cache_flush(0); | 2873 | rt_cache_flush(dev_net(in_dev->dev), 0); |
2831 | } | 2874 | } |
2832 | 2875 | ||
2833 | #ifdef CONFIG_SYSCTL | 2876 | #ifdef CONFIG_SYSCTL |
2834 | static int flush_delay; | 2877 | static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, |
2835 | |||
2836 | static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write, | ||
2837 | struct file *filp, void __user *buffer, | 2878 | struct file *filp, void __user *buffer, |
2838 | size_t *lenp, loff_t *ppos) | 2879 | size_t *lenp, loff_t *ppos) |
2839 | { | 2880 | { |
2840 | if (write) { | 2881 | if (write) { |
2841 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 2882 | int flush_delay; |
2842 | rt_cache_flush(flush_delay); | 2883 | ctl_table ctl; |
2884 | struct net *net; | ||
2885 | |||
2886 | memcpy(&ctl, __ctl, sizeof(ctl)); | ||
2887 | ctl.data = &flush_delay; | ||
2888 | proc_dointvec(&ctl, write, filp, buffer, lenp, ppos); | ||
2889 | |||
2890 | net = (struct net *)__ctl->extra1; | ||
2891 | rt_cache_flush(net, flush_delay); | ||
2843 | return 0; | 2892 | return 0; |
2844 | } | 2893 | } |
2845 | 2894 | ||
@@ -2855,25 +2904,18 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table, | |||
2855 | size_t newlen) | 2904 | size_t newlen) |
2856 | { | 2905 | { |
2857 | int delay; | 2906 | int delay; |
2907 | struct net *net; | ||
2858 | if (newlen != sizeof(int)) | 2908 | if (newlen != sizeof(int)) |
2859 | return -EINVAL; | 2909 | return -EINVAL; |
2860 | if (get_user(delay, (int __user *)newval)) | 2910 | if (get_user(delay, (int __user *)newval)) |
2861 | return -EFAULT; | 2911 | return -EFAULT; |
2862 | rt_cache_flush(delay); | 2912 | net = (struct net *)table->extra1; |
2913 | rt_cache_flush(net, delay); | ||
2863 | return 0; | 2914 | return 0; |
2864 | } | 2915 | } |
2865 | 2916 | ||
2866 | ctl_table ipv4_route_table[] = { | 2917 | ctl_table ipv4_route_table[] = { |
2867 | { | 2918 | { |
2868 | .ctl_name = NET_IPV4_ROUTE_FLUSH, | ||
2869 | .procname = "flush", | ||
2870 | .data = &flush_delay, | ||
2871 | .maxlen = sizeof(int), | ||
2872 | .mode = 0200, | ||
2873 | .proc_handler = &ipv4_sysctl_rtcache_flush, | ||
2874 | .strategy = &ipv4_sysctl_rtcache_flush_strategy, | ||
2875 | }, | ||
2876 | { | ||
2877 | .ctl_name = NET_IPV4_ROUTE_GC_THRESH, | 2919 | .ctl_name = NET_IPV4_ROUTE_GC_THRESH, |
2878 | .procname = "gc_thresh", | 2920 | .procname = "gc_thresh", |
2879 | .data = &ipv4_dst_ops.gc_thresh, | 2921 | .data = &ipv4_dst_ops.gc_thresh, |
@@ -3011,8 +3053,97 @@ ctl_table ipv4_route_table[] = { | |||
3011 | }, | 3053 | }, |
3012 | { .ctl_name = 0 } | 3054 | { .ctl_name = 0 } |
3013 | }; | 3055 | }; |
3056 | |||
3057 | static __net_initdata struct ctl_path ipv4_route_path[] = { | ||
3058 | { .procname = "net", .ctl_name = CTL_NET, }, | ||
3059 | { .procname = "ipv4", .ctl_name = NET_IPV4, }, | ||
3060 | { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, | ||
3061 | { }, | ||
3062 | }; | ||
3063 | |||
3064 | |||
3065 | static struct ctl_table ipv4_route_flush_table[] = { | ||
3066 | { | ||
3067 | .ctl_name = NET_IPV4_ROUTE_FLUSH, | ||
3068 | .procname = "flush", | ||
3069 | .maxlen = sizeof(int), | ||
3070 | .mode = 0200, | ||
3071 | .proc_handler = &ipv4_sysctl_rtcache_flush, | ||
3072 | .strategy = &ipv4_sysctl_rtcache_flush_strategy, | ||
3073 | }, | ||
3074 | { .ctl_name = 0 }, | ||
3075 | }; | ||
3076 | |||
3077 | static __net_init int sysctl_route_net_init(struct net *net) | ||
3078 | { | ||
3079 | struct ctl_table *tbl; | ||
3080 | |||
3081 | tbl = ipv4_route_flush_table; | ||
3082 | if (net != &init_net) { | ||
3083 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); | ||
3084 | if (tbl == NULL) | ||
3085 | goto err_dup; | ||
3086 | } | ||
3087 | tbl[0].extra1 = net; | ||
3088 | |||
3089 | net->ipv4.route_hdr = | ||
3090 | register_net_sysctl_table(net, ipv4_route_path, tbl); | ||
3091 | if (net->ipv4.route_hdr == NULL) | ||
3092 | goto err_reg; | ||
3093 | return 0; | ||
3094 | |||
3095 | err_reg: | ||
3096 | if (tbl != ipv4_route_flush_table) | ||
3097 | kfree(tbl); | ||
3098 | err_dup: | ||
3099 | return -ENOMEM; | ||
3100 | } | ||
3101 | |||
3102 | static __net_exit void sysctl_route_net_exit(struct net *net) | ||
3103 | { | ||
3104 | struct ctl_table *tbl; | ||
3105 | |||
3106 | tbl = net->ipv4.route_hdr->ctl_table_arg; | ||
3107 | unregister_net_sysctl_table(net->ipv4.route_hdr); | ||
3108 | BUG_ON(tbl == ipv4_route_flush_table); | ||
3109 | kfree(tbl); | ||
3110 | } | ||
3111 | |||
3112 | static __net_initdata struct pernet_operations sysctl_route_ops = { | ||
3113 | .init = sysctl_route_net_init, | ||
3114 | .exit = sysctl_route_net_exit, | ||
3115 | }; | ||
3014 | #endif | 3116 | #endif |
3015 | 3117 | ||
3118 | |||
3119 | static __net_init int rt_secret_timer_init(struct net *net) | ||
3120 | { | ||
3121 | atomic_set(&net->ipv4.rt_genid, | ||
3122 | (int) ((num_physpages ^ (num_physpages>>8)) ^ | ||
3123 | (jiffies ^ (jiffies >> 7)))); | ||
3124 | |||
3125 | net->ipv4.rt_secret_timer.function = rt_secret_rebuild; | ||
3126 | net->ipv4.rt_secret_timer.data = (unsigned long)net; | ||
3127 | init_timer_deferrable(&net->ipv4.rt_secret_timer); | ||
3128 | |||
3129 | net->ipv4.rt_secret_timer.expires = | ||
3130 | jiffies + net_random() % ip_rt_secret_interval + | ||
3131 | ip_rt_secret_interval; | ||
3132 | add_timer(&net->ipv4.rt_secret_timer); | ||
3133 | return 0; | ||
3134 | } | ||
3135 | |||
3136 | static __net_exit void rt_secret_timer_exit(struct net *net) | ||
3137 | { | ||
3138 | del_timer_sync(&net->ipv4.rt_secret_timer); | ||
3139 | } | ||
3140 | |||
3141 | static __net_initdata struct pernet_operations rt_secret_timer_ops = { | ||
3142 | .init = rt_secret_timer_init, | ||
3143 | .exit = rt_secret_timer_exit, | ||
3144 | }; | ||
3145 | |||
3146 | |||
3016 | #ifdef CONFIG_NET_CLS_ROUTE | 3147 | #ifdef CONFIG_NET_CLS_ROUTE |
3017 | struct ip_rt_acct *ip_rt_acct __read_mostly; | 3148 | struct ip_rt_acct *ip_rt_acct __read_mostly; |
3018 | #endif /* CONFIG_NET_CLS_ROUTE */ | 3149 | #endif /* CONFIG_NET_CLS_ROUTE */ |
@@ -3031,9 +3162,6 @@ int __init ip_rt_init(void) | |||
3031 | { | 3162 | { |
3032 | int rc = 0; | 3163 | int rc = 0; |
3033 | 3164 | ||
3034 | atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^ | ||
3035 | (jiffies ^ (jiffies >> 7)))); | ||
3036 | |||
3037 | #ifdef CONFIG_NET_CLS_ROUTE | 3165 | #ifdef CONFIG_NET_CLS_ROUTE |
3038 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); | 3166 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); |
3039 | if (!ip_rt_acct) | 3167 | if (!ip_rt_acct) |
@@ -3065,19 +3193,14 @@ int __init ip_rt_init(void) | |||
3065 | devinet_init(); | 3193 | devinet_init(); |
3066 | ip_fib_init(); | 3194 | ip_fib_init(); |
3067 | 3195 | ||
3068 | rt_secret_timer.function = rt_secret_rebuild; | ||
3069 | rt_secret_timer.data = 0; | ||
3070 | init_timer_deferrable(&rt_secret_timer); | ||
3071 | |||
3072 | /* All the timers, started at system startup tend | 3196 | /* All the timers, started at system startup tend |
3073 | to synchronize. Perturb it a bit. | 3197 | to synchronize. Perturb it a bit. |
3074 | */ | 3198 | */ |
3075 | schedule_delayed_work(&expires_work, | 3199 | schedule_delayed_work(&expires_work, |
3076 | net_random() % ip_rt_gc_interval + ip_rt_gc_interval); | 3200 | net_random() % ip_rt_gc_interval + ip_rt_gc_interval); |
3077 | 3201 | ||
3078 | rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval + | 3202 | if (register_pernet_subsys(&rt_secret_timer_ops)) |
3079 | ip_rt_secret_interval; | 3203 | printk(KERN_ERR "Unable to setup rt_secret_timer\n"); |
3080 | add_timer(&rt_secret_timer); | ||
3081 | 3204 | ||
3082 | if (ip_rt_proc_init()) | 3205 | if (ip_rt_proc_init()) |
3083 | printk(KERN_ERR "Unable to create route proc files\n"); | 3206 | printk(KERN_ERR "Unable to create route proc files\n"); |
@@ -3087,6 +3210,9 @@ int __init ip_rt_init(void) | |||
3087 | #endif | 3210 | #endif |
3088 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); | 3211 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); |
3089 | 3212 | ||
3213 | #ifdef CONFIG_SYSCTL | ||
3214 | register_pernet_subsys(&sysctl_route_ops); | ||
3215 | #endif | ||
3090 | return rc; | 3216 | return rc; |
3091 | } | 3217 | } |
3092 | 3218 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index d182a2a26291..51bc24d3b8a7 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | ||
12 | * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ | ||
13 | */ | 11 | */ |
14 | 12 | ||
15 | #include <linux/tcp.h> | 13 | #include <linux/tcp.h> |
@@ -175,7 +173,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
175 | ; | 173 | ; |
176 | *mssp = msstab[mssind] + 1; | 174 | *mssp = msstab[mssind] + 1; |
177 | 175 | ||
178 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); | 176 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
179 | 177 | ||
180 | return secure_tcp_syn_cookie(iph->saddr, iph->daddr, | 178 | return secure_tcp_syn_cookie(iph->saddr, iph->daddr, |
181 | th->source, th->dest, ntohl(th->seq), | 179 | th->source, th->dest, ntohl(th->seq), |
@@ -271,11 +269,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
271 | 269 | ||
272 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || | 270 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || |
273 | (mss = cookie_check(skb, cookie)) == 0) { | 271 | (mss = cookie_check(skb, cookie)) == 0) { |
274 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); | 272 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); |
275 | goto out; | 273 | goto out; |
276 | } | 274 | } |
277 | 275 | ||
278 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); | 276 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
279 | 277 | ||
280 | /* check for timestamp cookie support */ | 278 | /* check for timestamp cookie support */ |
281 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | 279 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index c437f804ee38..14ef202a2254 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. | 2 | * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. |
3 | * | 3 | * |
4 | * $Id: sysctl_net_ipv4.c,v 1.50 2001/10/20 00:00:11 davem Exp $ | ||
5 | * | ||
6 | * Begun April 1, 1996, Mike Shaver. | 4 | * Begun April 1, 1996, Mike Shaver. |
7 | * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] | 5 | * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] |
8 | */ | 6 | */ |
@@ -795,7 +793,8 @@ static struct ctl_table ipv4_net_table[] = { | |||
795 | .data = &init_net.ipv4.sysctl_icmp_ratelimit, | 793 | .data = &init_net.ipv4.sysctl_icmp_ratelimit, |
796 | .maxlen = sizeof(int), | 794 | .maxlen = sizeof(int), |
797 | .mode = 0644, | 795 | .mode = 0644, |
798 | .proc_handler = &proc_dointvec | 796 | .proc_handler = &proc_dointvec_ms_jiffies, |
797 | .strategy = &sysctl_ms_jiffies | ||
799 | }, | 798 | }, |
800 | { | 799 | { |
801 | .ctl_name = NET_IPV4_ICMP_RATEMASK, | 800 | .ctl_name = NET_IPV4_ICMP_RATEMASK, |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1d723de18686..0b491bf03db4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -279,8 +277,6 @@ | |||
279 | 277 | ||
280 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | 278 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
281 | 279 | ||
282 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; | ||
283 | |||
284 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); | 280 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); |
285 | 281 | ||
286 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | 282 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
@@ -318,10 +314,10 @@ int tcp_memory_pressure __read_mostly; | |||
318 | 314 | ||
319 | EXPORT_SYMBOL(tcp_memory_pressure); | 315 | EXPORT_SYMBOL(tcp_memory_pressure); |
320 | 316 | ||
321 | void tcp_enter_memory_pressure(void) | 317 | void tcp_enter_memory_pressure(struct sock *sk) |
322 | { | 318 | { |
323 | if (!tcp_memory_pressure) { | 319 | if (!tcp_memory_pressure) { |
324 | NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); | 320 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
325 | tcp_memory_pressure = 1; | 321 | tcp_memory_pressure = 1; |
326 | } | 322 | } |
327 | } | 323 | } |
@@ -346,8 +342,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
346 | return inet_csk_listen_poll(sk); | 342 | return inet_csk_listen_poll(sk); |
347 | 343 | ||
348 | /* Socket is not locked. We are protected from async events | 344 | /* Socket is not locked. We are protected from async events |
349 | by poll logic and correct handling of state changes | 345 | * by poll logic and correct handling of state changes |
350 | made by another threads is impossible in any case. | 346 | * made by other threads is impossible in any case. |
351 | */ | 347 | */ |
352 | 348 | ||
353 | mask = 0; | 349 | mask = 0; |
@@ -373,10 +369,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
373 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | 369 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP |
374 | * if and only if shutdown has been made in both directions. | 370 | * if and only if shutdown has been made in both directions. |
375 | * Actually, it is interesting to look how Solaris and DUX | 371 | * Actually, it is interesting to look how Solaris and DUX |
376 | * solve this dilemma. I would prefer, if PULLHUP were maskable, | 372 | * solve this dilemma. I would prefer, if POLLHUP were maskable, |
377 | * then we could set it on SND_SHUTDOWN. BTW examples given | 373 | * then we could set it on SND_SHUTDOWN. BTW examples given |
378 | * in Stevens' books assume exactly this behaviour, it explains | 374 | * in Stevens' books assume exactly this behaviour, it explains |
379 | * why PULLHUP is incompatible with POLLOUT. --ANK | 375 | * why POLLHUP is incompatible with POLLOUT. --ANK |
380 | * | 376 | * |
381 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | 377 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
382 | * blocking on fresh not-connected or disconnected socket. --ANK | 378 | * blocking on fresh not-connected or disconnected socket. --ANK |
@@ -651,7 +647,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
651 | } | 647 | } |
652 | __kfree_skb(skb); | 648 | __kfree_skb(skb); |
653 | } else { | 649 | } else { |
654 | sk->sk_prot->enter_memory_pressure(); | 650 | sk->sk_prot->enter_memory_pressure(sk); |
655 | sk_stream_moderate_sndbuf(sk); | 651 | sk_stream_moderate_sndbuf(sk); |
656 | } | 652 | } |
657 | return NULL; | 653 | return NULL; |
@@ -1155,7 +1151,7 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1155 | struct sk_buff *skb; | 1151 | struct sk_buff *skb; |
1156 | struct tcp_sock *tp = tcp_sk(sk); | 1152 | struct tcp_sock *tp = tcp_sk(sk); |
1157 | 1153 | ||
1158 | NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); | 1154 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); |
1159 | 1155 | ||
1160 | /* RX process wants to run with disabled BHs, though it is not | 1156 | /* RX process wants to run with disabled BHs, though it is not |
1161 | * necessary */ | 1157 | * necessary */ |
@@ -1477,7 +1473,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1477 | /* __ Restore normal policy in scheduler __ */ | 1473 | /* __ Restore normal policy in scheduler __ */ |
1478 | 1474 | ||
1479 | if ((chunk = len - tp->ucopy.len) != 0) { | 1475 | if ((chunk = len - tp->ucopy.len) != 0) { |
1480 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); | 1476 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
1481 | len -= chunk; | 1477 | len -= chunk; |
1482 | copied += chunk; | 1478 | copied += chunk; |
1483 | } | 1479 | } |
@@ -1488,7 +1484,7 @@ do_prequeue: | |||
1488 | tcp_prequeue_process(sk); | 1484 | tcp_prequeue_process(sk); |
1489 | 1485 | ||
1490 | if ((chunk = len - tp->ucopy.len) != 0) { | 1486 | if ((chunk = len - tp->ucopy.len) != 0) { |
1491 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | 1487 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1492 | len -= chunk; | 1488 | len -= chunk; |
1493 | copied += chunk; | 1489 | copied += chunk; |
1494 | } | 1490 | } |
@@ -1603,7 +1599,7 @@ skip_copy: | |||
1603 | tcp_prequeue_process(sk); | 1599 | tcp_prequeue_process(sk); |
1604 | 1600 | ||
1605 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | 1601 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { |
1606 | NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | 1602 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1607 | len -= chunk; | 1603 | len -= chunk; |
1608 | copied += chunk; | 1604 | copied += chunk; |
1609 | } | 1605 | } |
@@ -1670,12 +1666,12 @@ void tcp_set_state(struct sock *sk, int state) | |||
1670 | switch (state) { | 1666 | switch (state) { |
1671 | case TCP_ESTABLISHED: | 1667 | case TCP_ESTABLISHED: |
1672 | if (oldstate != TCP_ESTABLISHED) | 1668 | if (oldstate != TCP_ESTABLISHED) |
1673 | TCP_INC_STATS(TCP_MIB_CURRESTAB); | 1669 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1674 | break; | 1670 | break; |
1675 | 1671 | ||
1676 | case TCP_CLOSE: | 1672 | case TCP_CLOSE: |
1677 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | 1673 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) |
1678 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | 1674 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
1679 | 1675 | ||
1680 | sk->sk_prot->unhash(sk); | 1676 | sk->sk_prot->unhash(sk); |
1681 | if (inet_csk(sk)->icsk_bind_hash && | 1677 | if (inet_csk(sk)->icsk_bind_hash && |
@@ -1684,7 +1680,7 @@ void tcp_set_state(struct sock *sk, int state) | |||
1684 | /* fall through */ | 1680 | /* fall through */ |
1685 | default: | 1681 | default: |
1686 | if (oldstate==TCP_ESTABLISHED) | 1682 | if (oldstate==TCP_ESTABLISHED) |
1687 | TCP_DEC_STATS(TCP_MIB_CURRESTAB); | 1683 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1688 | } | 1684 | } |
1689 | 1685 | ||
1690 | /* Change state AFTER socket is unhashed to avoid closed | 1686 | /* Change state AFTER socket is unhashed to avoid closed |
@@ -1795,13 +1791,13 @@ void tcp_close(struct sock *sk, long timeout) | |||
1795 | */ | 1791 | */ |
1796 | if (data_was_unread) { | 1792 | if (data_was_unread) { |
1797 | /* Unread data was tossed, zap the connection. */ | 1793 | /* Unread data was tossed, zap the connection. */ |
1798 | NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); | 1794 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1799 | tcp_set_state(sk, TCP_CLOSE); | 1795 | tcp_set_state(sk, TCP_CLOSE); |
1800 | tcp_send_active_reset(sk, GFP_KERNEL); | 1796 | tcp_send_active_reset(sk, GFP_KERNEL); |
1801 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1797 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1802 | /* Check zero linger _after_ checking for unread data. */ | 1798 | /* Check zero linger _after_ checking for unread data. */ |
1803 | sk->sk_prot->disconnect(sk, 0); | 1799 | sk->sk_prot->disconnect(sk, 0); |
1804 | NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); | 1800 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
1805 | } else if (tcp_close_state(sk)) { | 1801 | } else if (tcp_close_state(sk)) { |
1806 | /* We FIN if the application ate all the data before | 1802 | /* We FIN if the application ate all the data before |
1807 | * zapping the connection. | 1803 | * zapping the connection. |
@@ -1873,7 +1869,8 @@ adjudge_to_death: | |||
1873 | if (tp->linger2 < 0) { | 1869 | if (tp->linger2 < 0) { |
1874 | tcp_set_state(sk, TCP_CLOSE); | 1870 | tcp_set_state(sk, TCP_CLOSE); |
1875 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1871 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1876 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); | 1872 | NET_INC_STATS_BH(sock_net(sk), |
1873 | LINUX_MIB_TCPABORTONLINGER); | ||
1877 | } else { | 1874 | } else { |
1878 | const int tmo = tcp_fin_time(sk); | 1875 | const int tmo = tcp_fin_time(sk); |
1879 | 1876 | ||
@@ -1895,7 +1892,8 @@ adjudge_to_death: | |||
1895 | "sockets\n"); | 1892 | "sockets\n"); |
1896 | tcp_set_state(sk, TCP_CLOSE); | 1893 | tcp_set_state(sk, TCP_CLOSE); |
1897 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1894 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1898 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 1895 | NET_INC_STATS_BH(sock_net(sk), |
1896 | LINUX_MIB_TCPABORTONMEMORY); | ||
1899 | } | 1897 | } |
1900 | } | 1898 | } |
1901 | 1899 | ||
@@ -2590,12 +2588,69 @@ void __tcp_put_md5sig_pool(void) | |||
2590 | } | 2588 | } |
2591 | 2589 | ||
2592 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | 2590 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); |
2591 | |||
2592 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | ||
2593 | struct tcphdr *th) | ||
2594 | { | ||
2595 | struct scatterlist sg; | ||
2596 | int err; | ||
2597 | |||
2598 | __sum16 old_checksum = th->check; | ||
2599 | th->check = 0; | ||
2600 | /* options aren't included in the hash */ | ||
2601 | sg_init_one(&sg, th, sizeof(struct tcphdr)); | ||
2602 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); | ||
2603 | th->check = old_checksum; | ||
2604 | return err; | ||
2605 | } | ||
2606 | |||
2607 | EXPORT_SYMBOL(tcp_md5_hash_header); | ||
2608 | |||
2609 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | ||
2610 | struct sk_buff *skb, unsigned header_len) | ||
2611 | { | ||
2612 | struct scatterlist sg; | ||
2613 | const struct tcphdr *tp = tcp_hdr(skb); | ||
2614 | struct hash_desc *desc = &hp->md5_desc; | ||
2615 | unsigned i; | ||
2616 | const unsigned head_data_len = skb_headlen(skb) > header_len ? | ||
2617 | skb_headlen(skb) - header_len : 0; | ||
2618 | const struct skb_shared_info *shi = skb_shinfo(skb); | ||
2619 | |||
2620 | sg_init_table(&sg, 1); | ||
2621 | |||
2622 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | ||
2623 | if (crypto_hash_update(desc, &sg, head_data_len)) | ||
2624 | return 1; | ||
2625 | |||
2626 | for (i = 0; i < shi->nr_frags; ++i) { | ||
2627 | const struct skb_frag_struct *f = &shi->frags[i]; | ||
2628 | sg_set_page(&sg, f->page, f->size, f->page_offset); | ||
2629 | if (crypto_hash_update(desc, &sg, f->size)) | ||
2630 | return 1; | ||
2631 | } | ||
2632 | |||
2633 | return 0; | ||
2634 | } | ||
2635 | |||
2636 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); | ||
2637 | |||
2638 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) | ||
2639 | { | ||
2640 | struct scatterlist sg; | ||
2641 | |||
2642 | sg_init_one(&sg, key->key, key->keylen); | ||
2643 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | ||
2644 | } | ||
2645 | |||
2646 | EXPORT_SYMBOL(tcp_md5_hash_key); | ||
2647 | |||
2593 | #endif | 2648 | #endif |
2594 | 2649 | ||
2595 | void tcp_done(struct sock *sk) | 2650 | void tcp_done(struct sock *sk) |
2596 | { | 2651 | { |
2597 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | 2652 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
2598 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | 2653 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
2599 | 2654 | ||
2600 | tcp_set_state(sk, TCP_CLOSE); | 2655 | tcp_set_state(sk, TCP_CLOSE); |
2601 | tcp_clear_xmit_timers(sk); | 2656 | tcp_clear_xmit_timers(sk); |
@@ -2732,4 +2787,3 @@ EXPORT_SYMBOL(tcp_splice_read); | |||
2732 | EXPORT_SYMBOL(tcp_sendpage); | 2787 | EXPORT_SYMBOL(tcp_sendpage); |
2733 | EXPORT_SYMBOL(tcp_setsockopt); | 2788 | EXPORT_SYMBOL(tcp_setsockopt); |
2734 | EXPORT_SYMBOL(tcp_shutdown); | 2789 | EXPORT_SYMBOL(tcp_shutdown); |
2735 | EXPORT_SYMBOL(tcp_statistics); | ||
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 2fbcc7d1b1a0..838d491dfda7 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * tcp_diag.c Module for monitoring TCP transport protocols sockets. | 2 | * tcp_diag.c Module for monitoring TCP transport protocols sockets. |
3 | * | 3 | * |
4 | * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $ | ||
5 | * | ||
6 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cad73b7dfef0..1f5e6049883e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -604,7 +602,7 @@ static u32 tcp_rto_min(struct sock *sk) | |||
604 | u32 rto_min = TCP_RTO_MIN; | 602 | u32 rto_min = TCP_RTO_MIN; |
605 | 603 | ||
606 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | 604 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) |
607 | rto_min = dst_metric(dst, RTAX_RTO_MIN); | 605 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); |
608 | return rto_min; | 606 | return rto_min; |
609 | } | 607 | } |
610 | 608 | ||
@@ -731,6 +729,7 @@ void tcp_update_metrics(struct sock *sk) | |||
731 | if (dst && (dst->flags & DST_HOST)) { | 729 | if (dst && (dst->flags & DST_HOST)) { |
732 | const struct inet_connection_sock *icsk = inet_csk(sk); | 730 | const struct inet_connection_sock *icsk = inet_csk(sk); |
733 | int m; | 731 | int m; |
732 | unsigned long rtt; | ||
734 | 733 | ||
735 | if (icsk->icsk_backoff || !tp->srtt) { | 734 | if (icsk->icsk_backoff || !tp->srtt) { |
736 | /* This session failed to estimate rtt. Why? | 735 | /* This session failed to estimate rtt. Why? |
@@ -742,7 +741,8 @@ void tcp_update_metrics(struct sock *sk) | |||
742 | return; | 741 | return; |
743 | } | 742 | } |
744 | 743 | ||
745 | m = dst_metric(dst, RTAX_RTT) - tp->srtt; | 744 | rtt = dst_metric_rtt(dst, RTAX_RTT); |
745 | m = rtt - tp->srtt; | ||
746 | 746 | ||
747 | /* If newly calculated rtt larger than stored one, | 747 | /* If newly calculated rtt larger than stored one, |
748 | * store new one. Otherwise, use EWMA. Remember, | 748 | * store new one. Otherwise, use EWMA. Remember, |
@@ -750,12 +750,13 @@ void tcp_update_metrics(struct sock *sk) | |||
750 | */ | 750 | */ |
751 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | 751 | if (!(dst_metric_locked(dst, RTAX_RTT))) { |
752 | if (m <= 0) | 752 | if (m <= 0) |
753 | dst->metrics[RTAX_RTT - 1] = tp->srtt; | 753 | set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); |
754 | else | 754 | else |
755 | dst->metrics[RTAX_RTT - 1] -= (m >> 3); | 755 | set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); |
756 | } | 756 | } |
757 | 757 | ||
758 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | 758 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { |
759 | unsigned long var; | ||
759 | if (m < 0) | 760 | if (m < 0) |
760 | m = -m; | 761 | m = -m; |
761 | 762 | ||
@@ -764,11 +765,13 @@ void tcp_update_metrics(struct sock *sk) | |||
764 | if (m < tp->mdev) | 765 | if (m < tp->mdev) |
765 | m = tp->mdev; | 766 | m = tp->mdev; |
766 | 767 | ||
767 | if (m >= dst_metric(dst, RTAX_RTTVAR)) | 768 | var = dst_metric_rtt(dst, RTAX_RTTVAR); |
768 | dst->metrics[RTAX_RTTVAR - 1] = m; | 769 | if (m >= var) |
770 | var = m; | ||
769 | else | 771 | else |
770 | dst->metrics[RTAX_RTTVAR-1] -= | 772 | var -= (var - m) >> 2; |
771 | (dst_metric(dst, RTAX_RTTVAR) - m)>>2; | 773 | |
774 | set_dst_metric_rtt(dst, RTAX_RTTVAR, var); | ||
772 | } | 775 | } |
773 | 776 | ||
774 | if (tp->snd_ssthresh >= 0xFFFF) { | 777 | if (tp->snd_ssthresh >= 0xFFFF) { |
@@ -899,7 +902,7 @@ static void tcp_init_metrics(struct sock *sk) | |||
899 | if (dst_metric(dst, RTAX_RTT) == 0) | 902 | if (dst_metric(dst, RTAX_RTT) == 0) |
900 | goto reset; | 903 | goto reset; |
901 | 904 | ||
902 | if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) | 905 | if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) |
903 | goto reset; | 906 | goto reset; |
904 | 907 | ||
905 | /* Initial rtt is determined from SYN,SYN-ACK. | 908 | /* Initial rtt is determined from SYN,SYN-ACK. |
@@ -916,12 +919,12 @@ static void tcp_init_metrics(struct sock *sk) | |||
916 | * to low value, and then abruptly stops to do it and starts to delay | 919 | * to low value, and then abruptly stops to do it and starts to delay |
917 | * ACKs, wait for troubles. | 920 | * ACKs, wait for troubles. |
918 | */ | 921 | */ |
919 | if (dst_metric(dst, RTAX_RTT) > tp->srtt) { | 922 | if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { |
920 | tp->srtt = dst_metric(dst, RTAX_RTT); | 923 | tp->srtt = dst_metric_rtt(dst, RTAX_RTT); |
921 | tp->rtt_seq = tp->snd_nxt; | 924 | tp->rtt_seq = tp->snd_nxt; |
922 | } | 925 | } |
923 | if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) { | 926 | if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { |
924 | tp->mdev = dst_metric(dst, RTAX_RTTVAR); | 927 | tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); |
925 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | 928 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
926 | } | 929 | } |
927 | tcp_set_rto(sk); | 930 | tcp_set_rto(sk); |
@@ -949,17 +952,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
949 | { | 952 | { |
950 | struct tcp_sock *tp = tcp_sk(sk); | 953 | struct tcp_sock *tp = tcp_sk(sk); |
951 | if (metric > tp->reordering) { | 954 | if (metric > tp->reordering) { |
955 | int mib_idx; | ||
956 | |||
952 | tp->reordering = min(TCP_MAX_REORDERING, metric); | 957 | tp->reordering = min(TCP_MAX_REORDERING, metric); |
953 | 958 | ||
954 | /* This exciting event is worth to be remembered. 8) */ | 959 | /* This exciting event is worth to be remembered. 8) */ |
955 | if (ts) | 960 | if (ts) |
956 | NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); | 961 | mib_idx = LINUX_MIB_TCPTSREORDER; |
957 | else if (tcp_is_reno(tp)) | 962 | else if (tcp_is_reno(tp)) |
958 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); | 963 | mib_idx = LINUX_MIB_TCPRENOREORDER; |
959 | else if (tcp_is_fack(tp)) | 964 | else if (tcp_is_fack(tp)) |
960 | NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); | 965 | mib_idx = LINUX_MIB_TCPFACKREORDER; |
961 | else | 966 | else |
962 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); | 967 | mib_idx = LINUX_MIB_TCPSACKREORDER; |
968 | |||
969 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
963 | #if FASTRETRANS_DEBUG > 1 | 970 | #if FASTRETRANS_DEBUG > 1 |
964 | printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", | 971 | printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", |
965 | tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, | 972 | tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, |
@@ -1155,7 +1162,7 @@ static void tcp_mark_lost_retrans(struct sock *sk) | |||
1155 | tp->lost_out += tcp_skb_pcount(skb); | 1162 | tp->lost_out += tcp_skb_pcount(skb); |
1156 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1163 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1157 | } | 1164 | } |
1158 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); | 1165 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); |
1159 | } else { | 1166 | } else { |
1160 | if (before(ack_seq, new_low_seq)) | 1167 | if (before(ack_seq, new_low_seq)) |
1161 | new_low_seq = ack_seq; | 1168 | new_low_seq = ack_seq; |
@@ -1167,10 +1174,11 @@ static void tcp_mark_lost_retrans(struct sock *sk) | |||
1167 | tp->lost_retrans_low = new_low_seq; | 1174 | tp->lost_retrans_low = new_low_seq; |
1168 | } | 1175 | } |
1169 | 1176 | ||
1170 | static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | 1177 | static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, |
1171 | struct tcp_sack_block_wire *sp, int num_sacks, | 1178 | struct tcp_sack_block_wire *sp, int num_sacks, |
1172 | u32 prior_snd_una) | 1179 | u32 prior_snd_una) |
1173 | { | 1180 | { |
1181 | struct tcp_sock *tp = tcp_sk(sk); | ||
1174 | u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); | 1182 | u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); |
1175 | u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); | 1183 | u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); |
1176 | int dup_sack = 0; | 1184 | int dup_sack = 0; |
@@ -1178,7 +1186,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1178 | if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { | 1186 | if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { |
1179 | dup_sack = 1; | 1187 | dup_sack = 1; |
1180 | tcp_dsack_seen(tp); | 1188 | tcp_dsack_seen(tp); |
1181 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); | 1189 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); |
1182 | } else if (num_sacks > 1) { | 1190 | } else if (num_sacks > 1) { |
1183 | u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); | 1191 | u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); |
1184 | u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); | 1192 | u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); |
@@ -1187,7 +1195,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1187 | !before(start_seq_0, start_seq_1)) { | 1195 | !before(start_seq_0, start_seq_1)) { |
1188 | dup_sack = 1; | 1196 | dup_sack = 1; |
1189 | tcp_dsack_seen(tp); | 1197 | tcp_dsack_seen(tp); |
1190 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); | 1198 | NET_INC_STATS_BH(sock_net(sk), |
1199 | LINUX_MIB_TCPDSACKOFORECV); | ||
1191 | } | 1200 | } |
1192 | } | 1201 | } |
1193 | 1202 | ||
@@ -1414,10 +1423,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, | |||
1414 | unsigned char *ptr = (skb_transport_header(ack_skb) + | 1423 | unsigned char *ptr = (skb_transport_header(ack_skb) + |
1415 | TCP_SKB_CB(ack_skb)->sacked); | 1424 | TCP_SKB_CB(ack_skb)->sacked); |
1416 | struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); | 1425 | struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); |
1417 | struct tcp_sack_block sp[4]; | 1426 | struct tcp_sack_block sp[TCP_NUM_SACKS]; |
1418 | struct tcp_sack_block *cache; | 1427 | struct tcp_sack_block *cache; |
1419 | struct sk_buff *skb; | 1428 | struct sk_buff *skb; |
1420 | int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3; | 1429 | int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); |
1421 | int used_sacks; | 1430 | int used_sacks; |
1422 | int reord = tp->packets_out; | 1431 | int reord = tp->packets_out; |
1423 | int flag = 0; | 1432 | int flag = 0; |
@@ -1432,7 +1441,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, | |||
1432 | tcp_highest_sack_reset(sk); | 1441 | tcp_highest_sack_reset(sk); |
1433 | } | 1442 | } |
1434 | 1443 | ||
1435 | found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, | 1444 | found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, |
1436 | num_sacks, prior_snd_una); | 1445 | num_sacks, prior_snd_una); |
1437 | if (found_dup_sack) | 1446 | if (found_dup_sack) |
1438 | flag |= FLAG_DSACKING_ACK; | 1447 | flag |= FLAG_DSACKING_ACK; |
@@ -1458,18 +1467,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, | |||
1458 | if (!tcp_is_sackblock_valid(tp, dup_sack, | 1467 | if (!tcp_is_sackblock_valid(tp, dup_sack, |
1459 | sp[used_sacks].start_seq, | 1468 | sp[used_sacks].start_seq, |
1460 | sp[used_sacks].end_seq)) { | 1469 | sp[used_sacks].end_seq)) { |
1470 | int mib_idx; | ||
1471 | |||
1461 | if (dup_sack) { | 1472 | if (dup_sack) { |
1462 | if (!tp->undo_marker) | 1473 | if (!tp->undo_marker) |
1463 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); | 1474 | mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; |
1464 | else | 1475 | else |
1465 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); | 1476 | mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; |
1466 | } else { | 1477 | } else { |
1467 | /* Don't count olds caused by ACK reordering */ | 1478 | /* Don't count olds caused by ACK reordering */ |
1468 | if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && | 1479 | if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && |
1469 | !after(sp[used_sacks].end_seq, tp->snd_una)) | 1480 | !after(sp[used_sacks].end_seq, tp->snd_una)) |
1470 | continue; | 1481 | continue; |
1471 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); | 1482 | mib_idx = LINUX_MIB_TCPSACKDISCARD; |
1472 | } | 1483 | } |
1484 | |||
1485 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
1473 | if (i == 0) | 1486 | if (i == 0) |
1474 | first_sack_index = -1; | 1487 | first_sack_index = -1; |
1475 | continue; | 1488 | continue; |
@@ -1962,7 +1975,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag) | |||
1962 | { | 1975 | { |
1963 | if (flag & FLAG_SACK_RENEGING) { | 1976 | if (flag & FLAG_SACK_RENEGING) { |
1964 | struct inet_connection_sock *icsk = inet_csk(sk); | 1977 | struct inet_connection_sock *icsk = inet_csk(sk); |
1965 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); | 1978 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); |
1966 | 1979 | ||
1967 | tcp_enter_loss(sk, 1); | 1980 | tcp_enter_loss(sk, 1); |
1968 | icsk->icsk_retransmits++; | 1981 | icsk->icsk_retransmits++; |
@@ -2382,15 +2395,19 @@ static int tcp_try_undo_recovery(struct sock *sk) | |||
2382 | struct tcp_sock *tp = tcp_sk(sk); | 2395 | struct tcp_sock *tp = tcp_sk(sk); |
2383 | 2396 | ||
2384 | if (tcp_may_undo(tp)) { | 2397 | if (tcp_may_undo(tp)) { |
2398 | int mib_idx; | ||
2399 | |||
2385 | /* Happy end! We did not retransmit anything | 2400 | /* Happy end! We did not retransmit anything |
2386 | * or our original transmission succeeded. | 2401 | * or our original transmission succeeded. |
2387 | */ | 2402 | */ |
2388 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 2403 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
2389 | tcp_undo_cwr(sk, 1); | 2404 | tcp_undo_cwr(sk, 1); |
2390 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 2405 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
2391 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 2406 | mib_idx = LINUX_MIB_TCPLOSSUNDO; |
2392 | else | 2407 | else |
2393 | NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); | 2408 | mib_idx = LINUX_MIB_TCPFULLUNDO; |
2409 | |||
2410 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2394 | tp->undo_marker = 0; | 2411 | tp->undo_marker = 0; |
2395 | } | 2412 | } |
2396 | if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { | 2413 | if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { |
@@ -2413,7 +2430,7 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2413 | DBGUNDO(sk, "D-SACK"); | 2430 | DBGUNDO(sk, "D-SACK"); |
2414 | tcp_undo_cwr(sk, 1); | 2431 | tcp_undo_cwr(sk, 1); |
2415 | tp->undo_marker = 0; | 2432 | tp->undo_marker = 0; |
2416 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); | 2433 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); |
2417 | } | 2434 | } |
2418 | } | 2435 | } |
2419 | 2436 | ||
@@ -2436,7 +2453,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2436 | 2453 | ||
2437 | DBGUNDO(sk, "Hoe"); | 2454 | DBGUNDO(sk, "Hoe"); |
2438 | tcp_undo_cwr(sk, 0); | 2455 | tcp_undo_cwr(sk, 0); |
2439 | NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); | 2456 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); |
2440 | 2457 | ||
2441 | /* So... Do not make Hoe's retransmit yet. | 2458 | /* So... Do not make Hoe's retransmit yet. |
2442 | * If the first packet was delayed, the rest | 2459 | * If the first packet was delayed, the rest |
@@ -2465,7 +2482,7 @@ static int tcp_try_undo_loss(struct sock *sk) | |||
2465 | DBGUNDO(sk, "partial loss"); | 2482 | DBGUNDO(sk, "partial loss"); |
2466 | tp->lost_out = 0; | 2483 | tp->lost_out = 0; |
2467 | tcp_undo_cwr(sk, 1); | 2484 | tcp_undo_cwr(sk, 1); |
2468 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 2485 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); |
2469 | inet_csk(sk)->icsk_retransmits = 0; | 2486 | inet_csk(sk)->icsk_retransmits = 0; |
2470 | tp->undo_marker = 0; | 2487 | tp->undo_marker = 0; |
2471 | if (tcp_is_sack(tp)) | 2488 | if (tcp_is_sack(tp)) |
@@ -2562,7 +2579,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2562 | int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 2579 | int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
2563 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && | 2580 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && |
2564 | (tcp_fackets_out(tp) > tp->reordering)); | 2581 | (tcp_fackets_out(tp) > tp->reordering)); |
2565 | int fast_rexmit = 0; | 2582 | int fast_rexmit = 0, mib_idx; |
2566 | 2583 | ||
2567 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) | 2584 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) |
2568 | tp->sacked_out = 0; | 2585 | tp->sacked_out = 0; |
@@ -2584,7 +2601,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2584 | icsk->icsk_ca_state != TCP_CA_Open && | 2601 | icsk->icsk_ca_state != TCP_CA_Open && |
2585 | tp->fackets_out > tp->reordering) { | 2602 | tp->fackets_out > tp->reordering) { |
2586 | tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); | 2603 | tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); |
2587 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); | 2604 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); |
2588 | } | 2605 | } |
2589 | 2606 | ||
2590 | /* D. Check consistency of the current state. */ | 2607 | /* D. Check consistency of the current state. */ |
@@ -2685,9 +2702,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2685 | /* Otherwise enter Recovery state */ | 2702 | /* Otherwise enter Recovery state */ |
2686 | 2703 | ||
2687 | if (tcp_is_reno(tp)) | 2704 | if (tcp_is_reno(tp)) |
2688 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); | 2705 | mib_idx = LINUX_MIB_TCPRENORECOVERY; |
2689 | else | 2706 | else |
2690 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); | 2707 | mib_idx = LINUX_MIB_TCPSACKRECOVERY; |
2708 | |||
2709 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2691 | 2710 | ||
2692 | tp->high_seq = tp->snd_nxt; | 2711 | tp->high_seq = tp->snd_nxt; |
2693 | tp->prior_ssthresh = 0; | 2712 | tp->prior_ssthresh = 0; |
@@ -3198,7 +3217,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
3198 | } | 3217 | } |
3199 | tp->frto_counter = 0; | 3218 | tp->frto_counter = 0; |
3200 | tp->undo_marker = 0; | 3219 | tp->undo_marker = 0; |
3201 | NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); | 3220 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); |
3202 | } | 3221 | } |
3203 | return 0; | 3222 | return 0; |
3204 | } | 3223 | } |
@@ -3251,12 +3270,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3251 | 3270 | ||
3252 | tcp_ca_event(sk, CA_EVENT_FAST_ACK); | 3271 | tcp_ca_event(sk, CA_EVENT_FAST_ACK); |
3253 | 3272 | ||
3254 | NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); | 3273 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); |
3255 | } else { | 3274 | } else { |
3256 | if (ack_seq != TCP_SKB_CB(skb)->end_seq) | 3275 | if (ack_seq != TCP_SKB_CB(skb)->end_seq) |
3257 | flag |= FLAG_DATA; | 3276 | flag |= FLAG_DATA; |
3258 | else | 3277 | else |
3259 | NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); | 3278 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); |
3260 | 3279 | ||
3261 | flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); | 3280 | flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); |
3262 | 3281 | ||
@@ -3450,6 +3469,43 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3450 | return 1; | 3469 | return 1; |
3451 | } | 3470 | } |
3452 | 3471 | ||
3472 | #ifdef CONFIG_TCP_MD5SIG | ||
3473 | /* | ||
3474 | * Parse MD5 Signature option | ||
3475 | */ | ||
3476 | u8 *tcp_parse_md5sig_option(struct tcphdr *th) | ||
3477 | { | ||
3478 | int length = (th->doff << 2) - sizeof (*th); | ||
3479 | u8 *ptr = (u8*)(th + 1); | ||
3480 | |||
3481 | /* If the TCP option is too short, we can short cut */ | ||
3482 | if (length < TCPOLEN_MD5SIG) | ||
3483 | return NULL; | ||
3484 | |||
3485 | while (length > 0) { | ||
3486 | int opcode = *ptr++; | ||
3487 | int opsize; | ||
3488 | |||
3489 | switch(opcode) { | ||
3490 | case TCPOPT_EOL: | ||
3491 | return NULL; | ||
3492 | case TCPOPT_NOP: | ||
3493 | length--; | ||
3494 | continue; | ||
3495 | default: | ||
3496 | opsize = *ptr++; | ||
3497 | if (opsize < 2 || opsize > length) | ||
3498 | return NULL; | ||
3499 | if (opcode == TCPOPT_MD5SIG) | ||
3500 | return ptr; | ||
3501 | } | ||
3502 | ptr += opsize - 2; | ||
3503 | length -= opsize; | ||
3504 | } | ||
3505 | return NULL; | ||
3506 | } | ||
3507 | #endif | ||
3508 | |||
3453 | static inline void tcp_store_ts_recent(struct tcp_sock *tp) | 3509 | static inline void tcp_store_ts_recent(struct tcp_sock *tp) |
3454 | { | 3510 | { |
3455 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; | 3511 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; |
@@ -3662,26 +3718,33 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, | |||
3662 | return 0; | 3718 | return 0; |
3663 | } | 3719 | } |
3664 | 3720 | ||
3665 | static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) | 3721 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) |
3666 | { | 3722 | { |
3723 | struct tcp_sock *tp = tcp_sk(sk); | ||
3724 | |||
3667 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 3725 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
3726 | int mib_idx; | ||
3727 | |||
3668 | if (before(seq, tp->rcv_nxt)) | 3728 | if (before(seq, tp->rcv_nxt)) |
3669 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); | 3729 | mib_idx = LINUX_MIB_TCPDSACKOLDSENT; |
3670 | else | 3730 | else |
3671 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); | 3731 | mib_idx = LINUX_MIB_TCPDSACKOFOSENT; |
3732 | |||
3733 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
3672 | 3734 | ||
3673 | tp->rx_opt.dsack = 1; | 3735 | tp->rx_opt.dsack = 1; |
3674 | tp->duplicate_sack[0].start_seq = seq; | 3736 | tp->duplicate_sack[0].start_seq = seq; |
3675 | tp->duplicate_sack[0].end_seq = end_seq; | 3737 | tp->duplicate_sack[0].end_seq = end_seq; |
3676 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, | 3738 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1; |
3677 | 4 - tp->rx_opt.tstamp_ok); | ||
3678 | } | 3739 | } |
3679 | } | 3740 | } |
3680 | 3741 | ||
3681 | static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) | 3742 | static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) |
3682 | { | 3743 | { |
3744 | struct tcp_sock *tp = tcp_sk(sk); | ||
3745 | |||
3683 | if (!tp->rx_opt.dsack) | 3746 | if (!tp->rx_opt.dsack) |
3684 | tcp_dsack_set(tp, seq, end_seq); | 3747 | tcp_dsack_set(sk, seq, end_seq); |
3685 | else | 3748 | else |
3686 | tcp_sack_extend(tp->duplicate_sack, seq, end_seq); | 3749 | tcp_sack_extend(tp->duplicate_sack, seq, end_seq); |
3687 | } | 3750 | } |
@@ -3692,7 +3755,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | |||
3692 | 3755 | ||
3693 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 3756 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
3694 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 3757 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
3695 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); | 3758 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
3696 | tcp_enter_quickack_mode(sk); | 3759 | tcp_enter_quickack_mode(sk); |
3697 | 3760 | ||
3698 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 3761 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
@@ -3700,7 +3763,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | |||
3700 | 3763 | ||
3701 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 3764 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
3702 | end_seq = tp->rcv_nxt; | 3765 | end_seq = tp->rcv_nxt; |
3703 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq); | 3766 | tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); |
3704 | } | 3767 | } |
3705 | } | 3768 | } |
3706 | 3769 | ||
@@ -3727,9 +3790,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) | |||
3727 | * Decrease num_sacks. | 3790 | * Decrease num_sacks. |
3728 | */ | 3791 | */ |
3729 | tp->rx_opt.num_sacks--; | 3792 | tp->rx_opt.num_sacks--; |
3730 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + | 3793 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + |
3731 | tp->rx_opt.dsack, | 3794 | tp->rx_opt.dsack; |
3732 | 4 - tp->rx_opt.tstamp_ok); | ||
3733 | for (i = this_sack; i < tp->rx_opt.num_sacks; i++) | 3795 | for (i = this_sack; i < tp->rx_opt.num_sacks; i++) |
3734 | sp[i] = sp[i + 1]; | 3796 | sp[i] = sp[i + 1]; |
3735 | continue; | 3797 | continue; |
@@ -3779,7 +3841,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
3779 | * | 3841 | * |
3780 | * If the sack array is full, forget about the last one. | 3842 | * If the sack array is full, forget about the last one. |
3781 | */ | 3843 | */ |
3782 | if (this_sack >= 4) { | 3844 | if (this_sack >= TCP_NUM_SACKS) { |
3783 | this_sack--; | 3845 | this_sack--; |
3784 | tp->rx_opt.num_sacks--; | 3846 | tp->rx_opt.num_sacks--; |
3785 | sp--; | 3847 | sp--; |
@@ -3792,8 +3854,7 @@ new_sack: | |||
3792 | sp->start_seq = seq; | 3854 | sp->start_seq = seq; |
3793 | sp->end_seq = end_seq; | 3855 | sp->end_seq = end_seq; |
3794 | tp->rx_opt.num_sacks++; | 3856 | tp->rx_opt.num_sacks++; |
3795 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, | 3857 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; |
3796 | 4 - tp->rx_opt.tstamp_ok); | ||
3797 | } | 3858 | } |
3798 | 3859 | ||
3799 | /* RCV.NXT advances, some SACKs should be eaten. */ | 3860 | /* RCV.NXT advances, some SACKs should be eaten. */ |
@@ -3830,9 +3891,8 @@ static void tcp_sack_remove(struct tcp_sock *tp) | |||
3830 | } | 3891 | } |
3831 | if (num_sacks != tp->rx_opt.num_sacks) { | 3892 | if (num_sacks != tp->rx_opt.num_sacks) { |
3832 | tp->rx_opt.num_sacks = num_sacks; | 3893 | tp->rx_opt.num_sacks = num_sacks; |
3833 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + | 3894 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + |
3834 | tp->rx_opt.dsack, | 3895 | tp->rx_opt.dsack; |
3835 | 4 - tp->rx_opt.tstamp_ok); | ||
3836 | } | 3896 | } |
3837 | } | 3897 | } |
3838 | 3898 | ||
@@ -3853,7 +3913,7 @@ static void tcp_ofo_queue(struct sock *sk) | |||
3853 | __u32 dsack = dsack_high; | 3913 | __u32 dsack = dsack_high; |
3854 | if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) | 3914 | if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) |
3855 | dsack_high = TCP_SKB_CB(skb)->end_seq; | 3915 | dsack_high = TCP_SKB_CB(skb)->end_seq; |
3856 | tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack); | 3916 | tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); |
3857 | } | 3917 | } |
3858 | 3918 | ||
3859 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { | 3919 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { |
@@ -3911,8 +3971,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
3911 | 3971 | ||
3912 | if (tp->rx_opt.dsack) { | 3972 | if (tp->rx_opt.dsack) { |
3913 | tp->rx_opt.dsack = 0; | 3973 | tp->rx_opt.dsack = 0; |
3914 | tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, | 3974 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks; |
3915 | 4 - tp->rx_opt.tstamp_ok); | ||
3916 | } | 3975 | } |
3917 | 3976 | ||
3918 | /* Queue data for delivery to the user. | 3977 | /* Queue data for delivery to the user. |
@@ -3981,8 +4040,8 @@ queue_and_out: | |||
3981 | 4040 | ||
3982 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { | 4041 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { |
3983 | /* A retransmit, 2nd most common case. Force an immediate ack. */ | 4042 | /* A retransmit, 2nd most common case. Force an immediate ack. */ |
3984 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); | 4043 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
3985 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 4044 | tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
3986 | 4045 | ||
3987 | out_of_window: | 4046 | out_of_window: |
3988 | tcp_enter_quickack_mode(sk); | 4047 | tcp_enter_quickack_mode(sk); |
@@ -4004,7 +4063,7 @@ drop: | |||
4004 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, | 4063 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, |
4005 | TCP_SKB_CB(skb)->end_seq); | 4064 | TCP_SKB_CB(skb)->end_seq); |
4006 | 4065 | ||
4007 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); | 4066 | tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); |
4008 | 4067 | ||
4009 | /* If window is closed, drop tail of packet. But after | 4068 | /* If window is closed, drop tail of packet. But after |
4010 | * remembering D-SACK for its head made in previous line. | 4069 | * remembering D-SACK for its head made in previous line. |
@@ -4069,12 +4128,12 @@ drop: | |||
4069 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4128 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4070 | /* All the bits are present. Drop. */ | 4129 | /* All the bits are present. Drop. */ |
4071 | __kfree_skb(skb); | 4130 | __kfree_skb(skb); |
4072 | tcp_dsack_set(tp, seq, end_seq); | 4131 | tcp_dsack_set(sk, seq, end_seq); |
4073 | goto add_sack; | 4132 | goto add_sack; |
4074 | } | 4133 | } |
4075 | if (after(seq, TCP_SKB_CB(skb1)->seq)) { | 4134 | if (after(seq, TCP_SKB_CB(skb1)->seq)) { |
4076 | /* Partial overlap. */ | 4135 | /* Partial overlap. */ |
4077 | tcp_dsack_set(tp, seq, | 4136 | tcp_dsack_set(sk, seq, |
4078 | TCP_SKB_CB(skb1)->end_seq); | 4137 | TCP_SKB_CB(skb1)->end_seq); |
4079 | } else { | 4138 | } else { |
4080 | skb1 = skb1->prev; | 4139 | skb1 = skb1->prev; |
@@ -4087,12 +4146,12 @@ drop: | |||
4087 | (struct sk_buff *)&tp->out_of_order_queue && | 4146 | (struct sk_buff *)&tp->out_of_order_queue && |
4088 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { | 4147 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { |
4089 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4148 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4090 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, | 4149 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4091 | end_seq); | 4150 | end_seq); |
4092 | break; | 4151 | break; |
4093 | } | 4152 | } |
4094 | __skb_unlink(skb1, &tp->out_of_order_queue); | 4153 | __skb_unlink(skb1, &tp->out_of_order_queue); |
4095 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, | 4154 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4096 | TCP_SKB_CB(skb1)->end_seq); | 4155 | TCP_SKB_CB(skb1)->end_seq); |
4097 | __kfree_skb(skb1); | 4156 | __kfree_skb(skb1); |
4098 | } | 4157 | } |
@@ -4123,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4123 | struct sk_buff *next = skb->next; | 4182 | struct sk_buff *next = skb->next; |
4124 | __skb_unlink(skb, list); | 4183 | __skb_unlink(skb, list); |
4125 | __kfree_skb(skb); | 4184 | __kfree_skb(skb); |
4126 | NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); | 4185 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); |
4127 | skb = next; | 4186 | skb = next; |
4128 | continue; | 4187 | continue; |
4129 | } | 4188 | } |
@@ -4191,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4191 | struct sk_buff *next = skb->next; | 4250 | struct sk_buff *next = skb->next; |
4192 | __skb_unlink(skb, list); | 4251 | __skb_unlink(skb, list); |
4193 | __kfree_skb(skb); | 4252 | __kfree_skb(skb); |
4194 | NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); | 4253 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); |
4195 | skb = next; | 4254 | skb = next; |
4196 | if (skb == tail || | 4255 | if (skb == tail || |
4197 | tcp_hdr(skb)->syn || | 4256 | tcp_hdr(skb)->syn || |
@@ -4254,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk) | |||
4254 | int res = 0; | 4313 | int res = 0; |
4255 | 4314 | ||
4256 | if (!skb_queue_empty(&tp->out_of_order_queue)) { | 4315 | if (!skb_queue_empty(&tp->out_of_order_queue)) { |
4257 | NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); | 4316 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); |
4258 | __skb_queue_purge(&tp->out_of_order_queue); | 4317 | __skb_queue_purge(&tp->out_of_order_queue); |
4259 | 4318 | ||
4260 | /* Reset SACK state. A conforming SACK implementation will | 4319 | /* Reset SACK state. A conforming SACK implementation will |
@@ -4283,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4283 | 4342 | ||
4284 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); | 4343 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); |
4285 | 4344 | ||
4286 | NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); | 4345 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); |
4287 | 4346 | ||
4288 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 4347 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
4289 | tcp_clamp_window(sk); | 4348 | tcp_clamp_window(sk); |
@@ -4312,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4312 | * drop receive data on the floor. It will get retransmitted | 4371 | * drop receive data on the floor. It will get retransmitted |
4313 | * and hopefully then we'll have sufficient space. | 4372 | * and hopefully then we'll have sufficient space. |
4314 | */ | 4373 | */ |
4315 | NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); | 4374 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); |
4316 | 4375 | ||
4317 | /* Massive buffer overcommit. */ | 4376 | /* Massive buffer overcommit. */ |
4318 | tp->pred_flags = 0; | 4377 | tp->pred_flags = 0; |
@@ -4742,7 +4801,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4742 | tcp_data_snd_check(sk); | 4801 | tcp_data_snd_check(sk); |
4743 | return 0; | 4802 | return 0; |
4744 | } else { /* Header too small */ | 4803 | } else { /* Header too small */ |
4745 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 4804 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
4746 | goto discard; | 4805 | goto discard; |
4747 | } | 4806 | } |
4748 | } else { | 4807 | } else { |
@@ -4779,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4779 | 4838 | ||
4780 | __skb_pull(skb, tcp_header_len); | 4839 | __skb_pull(skb, tcp_header_len); |
4781 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 4840 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
4782 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); | 4841 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); |
4783 | } | 4842 | } |
4784 | if (copied_early) | 4843 | if (copied_early) |
4785 | tcp_cleanup_rbuf(sk, skb->len); | 4844 | tcp_cleanup_rbuf(sk, skb->len); |
@@ -4802,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4802 | if ((int)skb->truesize > sk->sk_forward_alloc) | 4861 | if ((int)skb->truesize > sk->sk_forward_alloc) |
4803 | goto step5; | 4862 | goto step5; |
4804 | 4863 | ||
4805 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); | 4864 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); |
4806 | 4865 | ||
4807 | /* Bulk data transfer: receiver */ | 4866 | /* Bulk data transfer: receiver */ |
4808 | __skb_pull(skb, tcp_header_len); | 4867 | __skb_pull(skb, tcp_header_len); |
@@ -4846,7 +4905,7 @@ slow_path: | |||
4846 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 4905 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
4847 | tcp_paws_discard(sk, skb)) { | 4906 | tcp_paws_discard(sk, skb)) { |
4848 | if (!th->rst) { | 4907 | if (!th->rst) { |
4849 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 4908 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
4850 | tcp_send_dupack(sk, skb); | 4909 | tcp_send_dupack(sk, skb); |
4851 | goto discard; | 4910 | goto discard; |
4852 | } | 4911 | } |
@@ -4881,8 +4940,8 @@ slow_path: | |||
4881 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | 4940 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); |
4882 | 4941 | ||
4883 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 4942 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
4884 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 4943 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
4885 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); | 4944 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); |
4886 | tcp_reset(sk); | 4945 | tcp_reset(sk); |
4887 | return 1; | 4946 | return 1; |
4888 | } | 4947 | } |
@@ -4904,7 +4963,7 @@ step5: | |||
4904 | return 0; | 4963 | return 0; |
4905 | 4964 | ||
4906 | csum_error: | 4965 | csum_error: |
4907 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 4966 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
4908 | 4967 | ||
4909 | discard: | 4968 | discard: |
4910 | __kfree_skb(skb); | 4969 | __kfree_skb(skb); |
@@ -4938,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
4938 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 4997 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
4939 | !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, | 4998 | !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, |
4940 | tcp_time_stamp)) { | 4999 | tcp_time_stamp)) { |
4941 | NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); | 5000 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); |
4942 | goto reset_and_undo; | 5001 | goto reset_and_undo; |
4943 | } | 5002 | } |
4944 | 5003 | ||
@@ -5222,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5222 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 5281 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
5223 | tcp_paws_discard(sk, skb)) { | 5282 | tcp_paws_discard(sk, skb)) { |
5224 | if (!th->rst) { | 5283 | if (!th->rst) { |
5225 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 5284 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
5226 | tcp_send_dupack(sk, skb); | 5285 | tcp_send_dupack(sk, skb); |
5227 | goto discard; | 5286 | goto discard; |
5228 | } | 5287 | } |
@@ -5251,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5251 | * Check for a SYN in window. | 5310 | * Check for a SYN in window. |
5252 | */ | 5311 | */ |
5253 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 5312 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
5254 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); | 5313 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); |
5255 | tcp_reset(sk); | 5314 | tcp_reset(sk); |
5256 | return 1; | 5315 | return 1; |
5257 | } | 5316 | } |
@@ -5333,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5333 | (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 5392 | (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
5334 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { | 5393 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { |
5335 | tcp_done(sk); | 5394 | tcp_done(sk); |
5336 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); | 5395 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
5337 | return 1; | 5396 | return 1; |
5338 | } | 5397 | } |
5339 | 5398 | ||
@@ -5393,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5393 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 5452 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
5394 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 5453 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
5395 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { | 5454 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { |
5396 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); | 5455 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
5397 | tcp_reset(sk); | 5456 | tcp_reset(sk); |
5398 | return 1; | 5457 | return 1; |
5399 | } | 5458 | } |
@@ -5422,6 +5481,9 @@ EXPORT_SYMBOL(sysctl_tcp_ecn); | |||
5422 | EXPORT_SYMBOL(sysctl_tcp_reordering); | 5481 | EXPORT_SYMBOL(sysctl_tcp_reordering); |
5423 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); | 5482 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); |
5424 | EXPORT_SYMBOL(tcp_parse_options); | 5483 | EXPORT_SYMBOL(tcp_parse_options); |
5484 | #ifdef CONFIG_TCP_MD5SIG | ||
5485 | EXPORT_SYMBOL(tcp_parse_md5sig_option); | ||
5486 | #endif | ||
5425 | EXPORT_SYMBOL(tcp_rcv_established); | 5487 | EXPORT_SYMBOL(tcp_rcv_established); |
5426 | EXPORT_SYMBOL(tcp_rcv_state_process); | 5488 | EXPORT_SYMBOL(tcp_rcv_state_process); |
5427 | EXPORT_SYMBOL(tcp_initialize_rcv_mss); | 5489 | EXPORT_SYMBOL(tcp_initialize_rcv_mss); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ffe869ac1bcf..a82df6307567 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * IPv4 specific functions | 8 | * IPv4 specific functions |
11 | * | 9 | * |
12 | * | 10 | * |
@@ -89,10 +87,14 @@ int sysctl_tcp_low_latency __read_mostly; | |||
89 | #ifdef CONFIG_TCP_MD5SIG | 87 | #ifdef CONFIG_TCP_MD5SIG |
90 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, | 88 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, |
91 | __be32 addr); | 89 | __be32 addr); |
92 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 90 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
93 | __be32 saddr, __be32 daddr, | 91 | __be32 daddr, __be32 saddr, struct tcphdr *th); |
94 | struct tcphdr *th, int protocol, | 92 | #else |
95 | unsigned int tcplen); | 93 | static inline |
94 | struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | ||
95 | { | ||
96 | return NULL; | ||
97 | } | ||
96 | #endif | 98 | #endif |
97 | 99 | ||
98 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 100 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
@@ -172,7 +174,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
172 | inet->sport, usin->sin_port, sk, 1); | 174 | inet->sport, usin->sin_port, sk, 1); |
173 | if (tmp < 0) { | 175 | if (tmp < 0) { |
174 | if (tmp == -ENETUNREACH) | 176 | if (tmp == -ENETUNREACH) |
175 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 177 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
176 | return tmp; | 178 | return tmp; |
177 | } | 179 | } |
178 | 180 | ||
@@ -340,16 +342,17 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
340 | struct sock *sk; | 342 | struct sock *sk; |
341 | __u32 seq; | 343 | __u32 seq; |
342 | int err; | 344 | int err; |
345 | struct net *net = dev_net(skb->dev); | ||
343 | 346 | ||
344 | if (skb->len < (iph->ihl << 2) + 8) { | 347 | if (skb->len < (iph->ihl << 2) + 8) { |
345 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 348 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
346 | return; | 349 | return; |
347 | } | 350 | } |
348 | 351 | ||
349 | sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, | 352 | sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, |
350 | iph->saddr, th->source, inet_iif(skb)); | 353 | iph->saddr, th->source, inet_iif(skb)); |
351 | if (!sk) { | 354 | if (!sk) { |
352 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 355 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
353 | return; | 356 | return; |
354 | } | 357 | } |
355 | if (sk->sk_state == TCP_TIME_WAIT) { | 358 | if (sk->sk_state == TCP_TIME_WAIT) { |
@@ -362,7 +365,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
362 | * servers this needs to be solved differently. | 365 | * servers this needs to be solved differently. |
363 | */ | 366 | */ |
364 | if (sock_owned_by_user(sk)) | 367 | if (sock_owned_by_user(sk)) |
365 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 368 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
366 | 369 | ||
367 | if (sk->sk_state == TCP_CLOSE) | 370 | if (sk->sk_state == TCP_CLOSE) |
368 | goto out; | 371 | goto out; |
@@ -371,7 +374,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
371 | seq = ntohl(th->seq); | 374 | seq = ntohl(th->seq); |
372 | if (sk->sk_state != TCP_LISTEN && | 375 | if (sk->sk_state != TCP_LISTEN && |
373 | !between(seq, tp->snd_una, tp->snd_nxt)) { | 376 | !between(seq, tp->snd_una, tp->snd_nxt)) { |
374 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 377 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
375 | goto out; | 378 | goto out; |
376 | } | 379 | } |
377 | 380 | ||
@@ -418,7 +421,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
418 | BUG_TRAP(!req->sk); | 421 | BUG_TRAP(!req->sk); |
419 | 422 | ||
420 | if (seq != tcp_rsk(req)->snt_isn) { | 423 | if (seq != tcp_rsk(req)->snt_isn) { |
421 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 424 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
422 | goto out; | 425 | goto out; |
423 | } | 426 | } |
424 | 427 | ||
@@ -540,6 +543,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
540 | #ifdef CONFIG_TCP_MD5SIG | 543 | #ifdef CONFIG_TCP_MD5SIG |
541 | struct tcp_md5sig_key *key; | 544 | struct tcp_md5sig_key *key; |
542 | #endif | 545 | #endif |
546 | struct net *net; | ||
543 | 547 | ||
544 | /* Never send a reset in response to a reset. */ | 548 | /* Never send a reset in response to a reset. */ |
545 | if (th->rst) | 549 | if (th->rst) |
@@ -578,12 +582,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
578 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | 582 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; |
579 | rep.th.doff = arg.iov[0].iov_len / 4; | 583 | rep.th.doff = arg.iov[0].iov_len / 4; |
580 | 584 | ||
581 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], | 585 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], |
582 | key, | 586 | key, ip_hdr(skb)->daddr, |
583 | ip_hdr(skb)->daddr, | 587 | ip_hdr(skb)->saddr, &rep.th); |
584 | ip_hdr(skb)->saddr, | ||
585 | &rep.th, IPPROTO_TCP, | ||
586 | arg.iov[0].iov_len); | ||
587 | } | 588 | } |
588 | #endif | 589 | #endif |
589 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, | 590 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, |
@@ -591,20 +592,21 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
591 | sizeof(struct tcphdr), IPPROTO_TCP, 0); | 592 | sizeof(struct tcphdr), IPPROTO_TCP, 0); |
592 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 593 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
593 | 594 | ||
594 | ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb, | 595 | net = dev_net(skb->dst->dev); |
596 | ip_send_reply(net->ipv4.tcp_sock, skb, | ||
595 | &arg, arg.iov[0].iov_len); | 597 | &arg, arg.iov[0].iov_len); |
596 | 598 | ||
597 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 599 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
598 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 600 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); |
599 | } | 601 | } |
600 | 602 | ||
601 | /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states | 603 | /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states |
602 | outside socket context is ugly, certainly. What can I do? | 604 | outside socket context is ugly, certainly. What can I do? |
603 | */ | 605 | */ |
604 | 606 | ||
605 | static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | 607 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, |
606 | struct sk_buff *skb, u32 seq, u32 ack, | 608 | u32 win, u32 ts, int oif, |
607 | u32 win, u32 ts) | 609 | struct tcp_md5sig_key *key) |
608 | { | 610 | { |
609 | struct tcphdr *th = tcp_hdr(skb); | 611 | struct tcphdr *th = tcp_hdr(skb); |
610 | struct { | 612 | struct { |
@@ -616,10 +618,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
616 | ]; | 618 | ]; |
617 | } rep; | 619 | } rep; |
618 | struct ip_reply_arg arg; | 620 | struct ip_reply_arg arg; |
619 | #ifdef CONFIG_TCP_MD5SIG | 621 | struct net *net = dev_net(skb->dev); |
620 | struct tcp_md5sig_key *key; | ||
621 | struct tcp_md5sig_key tw_key; | ||
622 | #endif | ||
623 | 622 | ||
624 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 623 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
625 | memset(&arg, 0, sizeof(arg)); | 624 | memset(&arg, 0, sizeof(arg)); |
@@ -645,23 +644,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
645 | rep.th.window = htons(win); | 644 | rep.th.window = htons(win); |
646 | 645 | ||
647 | #ifdef CONFIG_TCP_MD5SIG | 646 | #ifdef CONFIG_TCP_MD5SIG |
648 | /* | ||
649 | * The SKB holds an imcoming packet, but may not have a valid ->sk | ||
650 | * pointer. This is especially the case when we're dealing with a | ||
651 | * TIME_WAIT ack, because the sk structure is long gone, and only | ||
652 | * the tcp_timewait_sock remains. So the md5 key is stashed in that | ||
653 | * structure, and we use it in preference. I believe that (twsk || | ||
654 | * skb->sk) holds true, but we program defensively. | ||
655 | */ | ||
656 | if (!twsk && skb->sk) { | ||
657 | key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr); | ||
658 | } else if (twsk && twsk->tw_md5_keylen) { | ||
659 | tw_key.key = twsk->tw_md5_key; | ||
660 | tw_key.keylen = twsk->tw_md5_keylen; | ||
661 | key = &tw_key; | ||
662 | } else | ||
663 | key = NULL; | ||
664 | |||
665 | if (key) { | 647 | if (key) { |
666 | int offset = (ts) ? 3 : 0; | 648 | int offset = (ts) ? 3 : 0; |
667 | 649 | ||
@@ -672,25 +654,22 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
672 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | 654 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; |
673 | rep.th.doff = arg.iov[0].iov_len/4; | 655 | rep.th.doff = arg.iov[0].iov_len/4; |
674 | 656 | ||
675 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], | 657 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], |
676 | key, | 658 | key, ip_hdr(skb)->daddr, |
677 | ip_hdr(skb)->daddr, | 659 | ip_hdr(skb)->saddr, &rep.th); |
678 | ip_hdr(skb)->saddr, | ||
679 | &rep.th, IPPROTO_TCP, | ||
680 | arg.iov[0].iov_len); | ||
681 | } | 660 | } |
682 | #endif | 661 | #endif |
683 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, | 662 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, |
684 | ip_hdr(skb)->saddr, /* XXX */ | 663 | ip_hdr(skb)->saddr, /* XXX */ |
685 | arg.iov[0].iov_len, IPPROTO_TCP, 0); | 664 | arg.iov[0].iov_len, IPPROTO_TCP, 0); |
686 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 665 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
687 | if (twsk) | 666 | if (oif) |
688 | arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; | 667 | arg.bound_dev_if = oif; |
689 | 668 | ||
690 | ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, | 669 | ip_send_reply(net->ipv4.tcp_sock, skb, |
691 | &arg, arg.iov[0].iov_len); | 670 | &arg, arg.iov[0].iov_len); |
692 | 671 | ||
693 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 672 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
694 | } | 673 | } |
695 | 674 | ||
696 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | 675 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) |
@@ -698,9 +677,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
698 | struct inet_timewait_sock *tw = inet_twsk(sk); | 677 | struct inet_timewait_sock *tw = inet_twsk(sk); |
699 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 678 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
700 | 679 | ||
701 | tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 680 | tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
702 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 681 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
703 | tcptw->tw_ts_recent); | 682 | tcptw->tw_ts_recent, |
683 | tw->tw_bound_dev_if, | ||
684 | tcp_twsk_md5_key(tcptw) | ||
685 | ); | ||
704 | 686 | ||
705 | inet_twsk_put(tw); | 687 | inet_twsk_put(tw); |
706 | } | 688 | } |
@@ -708,9 +690,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
708 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, | 690 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, |
709 | struct request_sock *req) | 691 | struct request_sock *req) |
710 | { | 692 | { |
711 | tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, | 693 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, |
712 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | 694 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, |
713 | req->ts_recent); | 695 | req->ts_recent, |
696 | 0, | ||
697 | tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)); | ||
714 | } | 698 | } |
715 | 699 | ||
716 | /* | 700 | /* |
@@ -1000,32 +984,13 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
1000 | newkey, cmd.tcpm_keylen); | 984 | newkey, cmd.tcpm_keylen); |
1001 | } | 985 | } |
1002 | 986 | ||
1003 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 987 | static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, |
1004 | __be32 saddr, __be32 daddr, | 988 | __be32 daddr, __be32 saddr, int nbytes) |
1005 | struct tcphdr *th, int protocol, | ||
1006 | unsigned int tcplen) | ||
1007 | { | 989 | { |
1008 | struct scatterlist sg[4]; | ||
1009 | __u16 data_len; | ||
1010 | int block = 0; | ||
1011 | __sum16 old_checksum; | ||
1012 | struct tcp_md5sig_pool *hp; | ||
1013 | struct tcp4_pseudohdr *bp; | 990 | struct tcp4_pseudohdr *bp; |
1014 | struct hash_desc *desc; | 991 | struct scatterlist sg; |
1015 | int err; | ||
1016 | unsigned int nbytes = 0; | ||
1017 | |||
1018 | /* | ||
1019 | * Okay, so RFC2385 is turned on for this connection, | ||
1020 | * so we need to generate the MD5 hash for the packet now. | ||
1021 | */ | ||
1022 | |||
1023 | hp = tcp_get_md5sig_pool(); | ||
1024 | if (!hp) | ||
1025 | goto clear_hash_noput; | ||
1026 | 992 | ||
1027 | bp = &hp->md5_blk.ip4; | 993 | bp = &hp->md5_blk.ip4; |
1028 | desc = &hp->md5_desc; | ||
1029 | 994 | ||
1030 | /* | 995 | /* |
1031 | * 1. the TCP pseudo-header (in the order: source IP address, | 996 | * 1. the TCP pseudo-header (in the order: source IP address, |
@@ -1035,86 +1000,96 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1035 | bp->saddr = saddr; | 1000 | bp->saddr = saddr; |
1036 | bp->daddr = daddr; | 1001 | bp->daddr = daddr; |
1037 | bp->pad = 0; | 1002 | bp->pad = 0; |
1038 | bp->protocol = protocol; | 1003 | bp->protocol = IPPROTO_TCP; |
1039 | bp->len = htons(tcplen); | 1004 | bp->len = cpu_to_be16(nbytes); |
1040 | |||
1041 | sg_init_table(sg, 4); | ||
1042 | |||
1043 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
1044 | nbytes += sizeof(*bp); | ||
1045 | |||
1046 | /* 2. the TCP header, excluding options, and assuming a | ||
1047 | * checksum of zero/ | ||
1048 | */ | ||
1049 | old_checksum = th->check; | ||
1050 | th->check = 0; | ||
1051 | sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); | ||
1052 | nbytes += sizeof(struct tcphdr); | ||
1053 | 1005 | ||
1054 | /* 3. the TCP segment data (if any) */ | 1006 | sg_init_one(&sg, bp, sizeof(*bp)); |
1055 | data_len = tcplen - (th->doff << 2); | 1007 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); |
1056 | if (data_len > 0) { | 1008 | } |
1057 | unsigned char *data = (unsigned char *)th + (th->doff << 2); | ||
1058 | sg_set_buf(&sg[block++], data, data_len); | ||
1059 | nbytes += data_len; | ||
1060 | } | ||
1061 | 1009 | ||
1062 | /* 4. an independently-specified key or password, known to both | 1010 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
1063 | * TCPs and presumably connection-specific | 1011 | __be32 daddr, __be32 saddr, struct tcphdr *th) |
1064 | */ | 1012 | { |
1065 | sg_set_buf(&sg[block++], key->key, key->keylen); | 1013 | struct tcp_md5sig_pool *hp; |
1066 | nbytes += key->keylen; | 1014 | struct hash_desc *desc; |
1067 | 1015 | ||
1068 | sg_mark_end(&sg[block - 1]); | 1016 | hp = tcp_get_md5sig_pool(); |
1017 | if (!hp) | ||
1018 | goto clear_hash_noput; | ||
1019 | desc = &hp->md5_desc; | ||
1069 | 1020 | ||
1070 | /* Now store the Hash into the packet */ | 1021 | if (crypto_hash_init(desc)) |
1071 | err = crypto_hash_init(desc); | ||
1072 | if (err) | ||
1073 | goto clear_hash; | 1022 | goto clear_hash; |
1074 | err = crypto_hash_update(desc, sg, nbytes); | 1023 | if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) |
1075 | if (err) | ||
1076 | goto clear_hash; | 1024 | goto clear_hash; |
1077 | err = crypto_hash_final(desc, md5_hash); | 1025 | if (tcp_md5_hash_header(hp, th)) |
1078 | if (err) | 1026 | goto clear_hash; |
1027 | if (tcp_md5_hash_key(hp, key)) | ||
1028 | goto clear_hash; | ||
1029 | if (crypto_hash_final(desc, md5_hash)) | ||
1079 | goto clear_hash; | 1030 | goto clear_hash; |
1080 | 1031 | ||
1081 | /* Reset header, and free up the crypto */ | ||
1082 | tcp_put_md5sig_pool(); | 1032 | tcp_put_md5sig_pool(); |
1083 | th->check = old_checksum; | ||
1084 | |||
1085 | out: | ||
1086 | return 0; | 1033 | return 0; |
1034 | |||
1087 | clear_hash: | 1035 | clear_hash: |
1088 | tcp_put_md5sig_pool(); | 1036 | tcp_put_md5sig_pool(); |
1089 | clear_hash_noput: | 1037 | clear_hash_noput: |
1090 | memset(md5_hash, 0, 16); | 1038 | memset(md5_hash, 0, 16); |
1091 | goto out; | 1039 | return 1; |
1092 | } | 1040 | } |
1093 | 1041 | ||
1094 | int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 1042 | int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1095 | struct sock *sk, | 1043 | struct sock *sk, struct request_sock *req, |
1096 | struct dst_entry *dst, | 1044 | struct sk_buff *skb) |
1097 | struct request_sock *req, | ||
1098 | struct tcphdr *th, int protocol, | ||
1099 | unsigned int tcplen) | ||
1100 | { | 1045 | { |
1046 | struct tcp_md5sig_pool *hp; | ||
1047 | struct hash_desc *desc; | ||
1048 | struct tcphdr *th = tcp_hdr(skb); | ||
1101 | __be32 saddr, daddr; | 1049 | __be32 saddr, daddr; |
1102 | 1050 | ||
1103 | if (sk) { | 1051 | if (sk) { |
1104 | saddr = inet_sk(sk)->saddr; | 1052 | saddr = inet_sk(sk)->saddr; |
1105 | daddr = inet_sk(sk)->daddr; | 1053 | daddr = inet_sk(sk)->daddr; |
1054 | } else if (req) { | ||
1055 | saddr = inet_rsk(req)->loc_addr; | ||
1056 | daddr = inet_rsk(req)->rmt_addr; | ||
1106 | } else { | 1057 | } else { |
1107 | struct rtable *rt = (struct rtable *)dst; | 1058 | const struct iphdr *iph = ip_hdr(skb); |
1108 | BUG_ON(!rt); | 1059 | saddr = iph->saddr; |
1109 | saddr = rt->rt_src; | 1060 | daddr = iph->daddr; |
1110 | daddr = rt->rt_dst; | ||
1111 | } | 1061 | } |
1112 | return tcp_v4_do_calc_md5_hash(md5_hash, key, | 1062 | |
1113 | saddr, daddr, | 1063 | hp = tcp_get_md5sig_pool(); |
1114 | th, protocol, tcplen); | 1064 | if (!hp) |
1065 | goto clear_hash_noput; | ||
1066 | desc = &hp->md5_desc; | ||
1067 | |||
1068 | if (crypto_hash_init(desc)) | ||
1069 | goto clear_hash; | ||
1070 | |||
1071 | if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) | ||
1072 | goto clear_hash; | ||
1073 | if (tcp_md5_hash_header(hp, th)) | ||
1074 | goto clear_hash; | ||
1075 | if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) | ||
1076 | goto clear_hash; | ||
1077 | if (tcp_md5_hash_key(hp, key)) | ||
1078 | goto clear_hash; | ||
1079 | if (crypto_hash_final(desc, md5_hash)) | ||
1080 | goto clear_hash; | ||
1081 | |||
1082 | tcp_put_md5sig_pool(); | ||
1083 | return 0; | ||
1084 | |||
1085 | clear_hash: | ||
1086 | tcp_put_md5sig_pool(); | ||
1087 | clear_hash_noput: | ||
1088 | memset(md5_hash, 0, 16); | ||
1089 | return 1; | ||
1115 | } | 1090 | } |
1116 | 1091 | ||
1117 | EXPORT_SYMBOL(tcp_v4_calc_md5_hash); | 1092 | EXPORT_SYMBOL(tcp_v4_md5_hash_skb); |
1118 | 1093 | ||
1119 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | 1094 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) |
1120 | { | 1095 | { |
@@ -1130,52 +1105,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
1130 | struct tcp_md5sig_key *hash_expected; | 1105 | struct tcp_md5sig_key *hash_expected; |
1131 | const struct iphdr *iph = ip_hdr(skb); | 1106 | const struct iphdr *iph = ip_hdr(skb); |
1132 | struct tcphdr *th = tcp_hdr(skb); | 1107 | struct tcphdr *th = tcp_hdr(skb); |
1133 | int length = (th->doff << 2) - sizeof(struct tcphdr); | ||
1134 | int genhash; | 1108 | int genhash; |
1135 | unsigned char *ptr; | ||
1136 | unsigned char newhash[16]; | 1109 | unsigned char newhash[16]; |
1137 | 1110 | ||
1138 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); | 1111 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); |
1112 | hash_location = tcp_parse_md5sig_option(th); | ||
1139 | 1113 | ||
1140 | /* | ||
1141 | * If the TCP option length is less than the TCP_MD5SIG | ||
1142 | * option length, then we can shortcut | ||
1143 | */ | ||
1144 | if (length < TCPOLEN_MD5SIG) { | ||
1145 | if (hash_expected) | ||
1146 | return 1; | ||
1147 | else | ||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | /* Okay, we can't shortcut - we have to grub through the options */ | ||
1152 | ptr = (unsigned char *)(th + 1); | ||
1153 | while (length > 0) { | ||
1154 | int opcode = *ptr++; | ||
1155 | int opsize; | ||
1156 | |||
1157 | switch (opcode) { | ||
1158 | case TCPOPT_EOL: | ||
1159 | goto done_opts; | ||
1160 | case TCPOPT_NOP: | ||
1161 | length--; | ||
1162 | continue; | ||
1163 | default: | ||
1164 | opsize = *ptr++; | ||
1165 | if (opsize < 2) | ||
1166 | goto done_opts; | ||
1167 | if (opsize > length) | ||
1168 | goto done_opts; | ||
1169 | |||
1170 | if (opcode == TCPOPT_MD5SIG) { | ||
1171 | hash_location = ptr; | ||
1172 | goto done_opts; | ||
1173 | } | ||
1174 | } | ||
1175 | ptr += opsize-2; | ||
1176 | length -= opsize; | ||
1177 | } | ||
1178 | done_opts: | ||
1179 | /* We've parsed the options - do we have a hash? */ | 1114 | /* We've parsed the options - do we have a hash? */ |
1180 | if (!hash_expected && !hash_location) | 1115 | if (!hash_expected && !hash_location) |
1181 | return 0; | 1116 | return 0; |
@@ -1199,11 +1134,9 @@ done_opts: | |||
1199 | /* Okay, so this is hash_expected and hash_location - | 1134 | /* Okay, so this is hash_expected and hash_location - |
1200 | * so we need to calculate the checksum. | 1135 | * so we need to calculate the checksum. |
1201 | */ | 1136 | */ |
1202 | genhash = tcp_v4_do_calc_md5_hash(newhash, | 1137 | genhash = tcp_v4_md5_hash_skb(newhash, |
1203 | hash_expected, | 1138 | hash_expected, |
1204 | iph->saddr, iph->daddr, | 1139 | NULL, NULL, skb); |
1205 | th, sk->sk_protocol, | ||
1206 | skb->len); | ||
1207 | 1140 | ||
1208 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 1141 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
1209 | if (net_ratelimit()) { | 1142 | if (net_ratelimit()) { |
@@ -1347,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1347 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1280 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && |
1348 | (s32)(peer->tcp_ts - req->ts_recent) > | 1281 | (s32)(peer->tcp_ts - req->ts_recent) > |
1349 | TCP_PAWS_WINDOW) { | 1282 | TCP_PAWS_WINDOW) { |
1350 | NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); | 1283 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); |
1351 | goto drop_and_release; | 1284 | goto drop_and_release; |
1352 | } | 1285 | } |
1353 | } | 1286 | } |
@@ -1452,6 +1385,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1452 | if (newkey != NULL) | 1385 | if (newkey != NULL) |
1453 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, | 1386 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, |
1454 | newkey, key->keylen); | 1387 | newkey, key->keylen); |
1388 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; | ||
1455 | } | 1389 | } |
1456 | #endif | 1390 | #endif |
1457 | 1391 | ||
@@ -1461,9 +1395,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1461 | return newsk; | 1395 | return newsk; |
1462 | 1396 | ||
1463 | exit_overflow: | 1397 | exit_overflow: |
1464 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | 1398 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
1465 | exit: | 1399 | exit: |
1466 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | 1400 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1467 | dst_release(dst); | 1401 | dst_release(dst); |
1468 | return NULL; | 1402 | return NULL; |
1469 | } | 1403 | } |
@@ -1590,7 +1524,7 @@ discard: | |||
1590 | return 0; | 1524 | return 0; |
1591 | 1525 | ||
1592 | csum_err: | 1526 | csum_err: |
1593 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1527 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
1594 | goto discard; | 1528 | goto discard; |
1595 | } | 1529 | } |
1596 | 1530 | ||
@@ -1604,12 +1538,13 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1604 | struct tcphdr *th; | 1538 | struct tcphdr *th; |
1605 | struct sock *sk; | 1539 | struct sock *sk; |
1606 | int ret; | 1540 | int ret; |
1541 | struct net *net = dev_net(skb->dev); | ||
1607 | 1542 | ||
1608 | if (skb->pkt_type != PACKET_HOST) | 1543 | if (skb->pkt_type != PACKET_HOST) |
1609 | goto discard_it; | 1544 | goto discard_it; |
1610 | 1545 | ||
1611 | /* Count it even if it's bad */ | 1546 | /* Count it even if it's bad */ |
1612 | TCP_INC_STATS_BH(TCP_MIB_INSEGS); | 1547 | TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); |
1613 | 1548 | ||
1614 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | 1549 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
1615 | goto discard_it; | 1550 | goto discard_it; |
@@ -1638,7 +1573,7 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1638 | TCP_SKB_CB(skb)->flags = iph->tos; | 1573 | TCP_SKB_CB(skb)->flags = iph->tos; |
1639 | TCP_SKB_CB(skb)->sacked = 0; | 1574 | TCP_SKB_CB(skb)->sacked = 0; |
1640 | 1575 | ||
1641 | sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, | 1576 | sk = __inet_lookup(net, &tcp_hashinfo, iph->saddr, |
1642 | th->source, iph->daddr, th->dest, inet_iif(skb)); | 1577 | th->source, iph->daddr, th->dest, inet_iif(skb)); |
1643 | if (!sk) | 1578 | if (!sk) |
1644 | goto no_tcp_socket; | 1579 | goto no_tcp_socket; |
@@ -1685,7 +1620,7 @@ no_tcp_socket: | |||
1685 | 1620 | ||
1686 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | 1621 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { |
1687 | bad_packet: | 1622 | bad_packet: |
1688 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1623 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1689 | } else { | 1624 | } else { |
1690 | tcp_v4_send_reset(NULL, skb); | 1625 | tcp_v4_send_reset(NULL, skb); |
1691 | } | 1626 | } |
@@ -1706,7 +1641,7 @@ do_time_wait: | |||
1706 | } | 1641 | } |
1707 | 1642 | ||
1708 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | 1643 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { |
1709 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1644 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1710 | inet_twsk_put(inet_twsk(sk)); | 1645 | inet_twsk_put(inet_twsk(sk)); |
1711 | goto discard_it; | 1646 | goto discard_it; |
1712 | } | 1647 | } |
@@ -1814,7 +1749,7 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1814 | #ifdef CONFIG_TCP_MD5SIG | 1749 | #ifdef CONFIG_TCP_MD5SIG |
1815 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { | 1750 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { |
1816 | .md5_lookup = tcp_v4_md5_lookup, | 1751 | .md5_lookup = tcp_v4_md5_lookup, |
1817 | .calc_md5_hash = tcp_v4_calc_md5_hash, | 1752 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1818 | .md5_add = tcp_v4_md5_add_func, | 1753 | .md5_add = tcp_v4_md5_add_func, |
1819 | .md5_parse = tcp_v4_parse_md5_keys, | 1754 | .md5_parse = tcp_v4_parse_md5_keys, |
1820 | }; | 1755 | }; |
@@ -1871,7 +1806,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1871 | return 0; | 1806 | return 0; |
1872 | } | 1807 | } |
1873 | 1808 | ||
1874 | int tcp_v4_destroy_sock(struct sock *sk) | 1809 | void tcp_v4_destroy_sock(struct sock *sk) |
1875 | { | 1810 | { |
1876 | struct tcp_sock *tp = tcp_sk(sk); | 1811 | struct tcp_sock *tp = tcp_sk(sk); |
1877 | 1812 | ||
@@ -1915,8 +1850,6 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1915 | } | 1850 | } |
1916 | 1851 | ||
1917 | atomic_dec(&tcp_sockets_allocated); | 1852 | atomic_dec(&tcp_sockets_allocated); |
1918 | |||
1919 | return 0; | ||
1920 | } | 1853 | } |
1921 | 1854 | ||
1922 | EXPORT_SYMBOL(tcp_v4_destroy_sock); | 1855 | EXPORT_SYMBOL(tcp_v4_destroy_sock); |
@@ -1959,8 +1892,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1959 | req = req->dl_next; | 1892 | req = req->dl_next; |
1960 | while (1) { | 1893 | while (1) { |
1961 | while (req) { | 1894 | while (req) { |
1962 | if (req->rsk_ops->family == st->family && | 1895 | if (req->rsk_ops->family == st->family) { |
1963 | net_eq(sock_net(req->sk), net)) { | ||
1964 | cur = req; | 1896 | cur = req; |
1965 | goto out; | 1897 | goto out; |
1966 | } | 1898 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 8245247a6ceb..204c42162660 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -246,7 +244,7 @@ kill: | |||
246 | } | 244 | } |
247 | 245 | ||
248 | if (paws_reject) | 246 | if (paws_reject) |
249 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 247 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); |
250 | 248 | ||
251 | if (!th->rst) { | 249 | if (!th->rst) { |
252 | /* In this case we must reset the TIMEWAIT timer. | 250 | /* In this case we must reset the TIMEWAIT timer. |
@@ -482,7 +480,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
482 | newtp->rx_opt.mss_clamp = req->mss; | 480 | newtp->rx_opt.mss_clamp = req->mss; |
483 | TCP_ECN_openreq_child(newtp, req); | 481 | TCP_ECN_openreq_child(newtp, req); |
484 | 482 | ||
485 | TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); | 483 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
486 | } | 484 | } |
487 | return newsk; | 485 | return newsk; |
488 | } | 486 | } |
@@ -613,7 +611,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
613 | if (!(flg & TCP_FLAG_RST)) | 611 | if (!(flg & TCP_FLAG_RST)) |
614 | req->rsk_ops->send_ack(skb, req); | 612 | req->rsk_ops->send_ack(skb, req); |
615 | if (paws_reject) | 613 | if (paws_reject) |
616 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 614 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
617 | return NULL; | 615 | return NULL; |
618 | } | 616 | } |
619 | 617 | ||
@@ -632,7 +630,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
632 | * "fourth, check the SYN bit" | 630 | * "fourth, check the SYN bit" |
633 | */ | 631 | */ |
634 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { | 632 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { |
635 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | 633 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
636 | goto embryonic_reset; | 634 | goto embryonic_reset; |
637 | } | 635 | } |
638 | 636 | ||
@@ -697,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
697 | } | 695 | } |
698 | 696 | ||
699 | embryonic_reset: | 697 | embryonic_reset: |
700 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | 698 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); |
701 | if (!(flg & TCP_FLAG_RST)) | 699 | if (!(flg & TCP_FLAG_RST)) |
702 | req->rsk_ops->send_reset(sk, skb); | 700 | req->rsk_ops->send_reset(sk, skb); |
703 | 701 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ad993ecb4810..1fa683c0ba9b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -347,28 +345,82 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | |||
347 | TCP_SKB_CB(skb)->end_seq = seq; | 345 | TCP_SKB_CB(skb)->end_seq = seq; |
348 | } | 346 | } |
349 | 347 | ||
350 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | 348 | #define OPTION_SACK_ADVERTISE (1 << 0) |
351 | __u32 tstamp, __u8 **md5_hash) | 349 | #define OPTION_TS (1 << 1) |
352 | { | 350 | #define OPTION_MD5 (1 << 2) |
353 | if (tp->rx_opt.tstamp_ok) { | 351 | |
352 | struct tcp_out_options { | ||
353 | u8 options; /* bit field of OPTION_* */ | ||
354 | u8 ws; /* window scale, 0 to disable */ | ||
355 | u8 num_sack_blocks; /* number of SACK blocks to include */ | ||
356 | u16 mss; /* 0 to disable */ | ||
357 | __u32 tsval, tsecr; /* need to include OPTION_TS */ | ||
358 | }; | ||
359 | |||
360 | static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | ||
361 | const struct tcp_out_options *opts, | ||
362 | __u8 **md5_hash) { | ||
363 | if (unlikely(OPTION_MD5 & opts->options)) { | ||
364 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
365 | (TCPOPT_NOP << 16) | | ||
366 | (TCPOPT_MD5SIG << 8) | | ||
367 | TCPOLEN_MD5SIG); | ||
368 | *md5_hash = (__u8 *)ptr; | ||
369 | ptr += 4; | ||
370 | } else { | ||
371 | *md5_hash = NULL; | ||
372 | } | ||
373 | |||
374 | if (likely(OPTION_TS & opts->options)) { | ||
375 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { | ||
376 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | | ||
377 | (TCPOLEN_SACK_PERM << 16) | | ||
378 | (TCPOPT_TIMESTAMP << 8) | | ||
379 | TCPOLEN_TIMESTAMP); | ||
380 | } else { | ||
381 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
382 | (TCPOPT_NOP << 16) | | ||
383 | (TCPOPT_TIMESTAMP << 8) | | ||
384 | TCPOLEN_TIMESTAMP); | ||
385 | } | ||
386 | *ptr++ = htonl(opts->tsval); | ||
387 | *ptr++ = htonl(opts->tsecr); | ||
388 | } | ||
389 | |||
390 | if (unlikely(opts->mss)) { | ||
391 | *ptr++ = htonl((TCPOPT_MSS << 24) | | ||
392 | (TCPOLEN_MSS << 16) | | ||
393 | opts->mss); | ||
394 | } | ||
395 | |||
396 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options && | ||
397 | !(OPTION_TS & opts->options))) { | ||
354 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 398 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
355 | (TCPOPT_NOP << 16) | | 399 | (TCPOPT_NOP << 16) | |
356 | (TCPOPT_TIMESTAMP << 8) | | 400 | (TCPOPT_SACK_PERM << 8) | |
357 | TCPOLEN_TIMESTAMP); | 401 | TCPOLEN_SACK_PERM); |
358 | *ptr++ = htonl(tstamp); | 402 | } |
359 | *ptr++ = htonl(tp->rx_opt.ts_recent); | 403 | |
404 | if (unlikely(opts->ws)) { | ||
405 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
406 | (TCPOPT_WINDOW << 16) | | ||
407 | (TCPOLEN_WINDOW << 8) | | ||
408 | opts->ws); | ||
360 | } | 409 | } |
361 | if (tp->rx_opt.eff_sacks) { | 410 | |
362 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; | 411 | if (unlikely(opts->num_sack_blocks)) { |
412 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? | ||
413 | tp->duplicate_sack : tp->selective_acks; | ||
363 | int this_sack; | 414 | int this_sack; |
364 | 415 | ||
365 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 416 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
366 | (TCPOPT_NOP << 16) | | 417 | (TCPOPT_NOP << 16) | |
367 | (TCPOPT_SACK << 8) | | 418 | (TCPOPT_SACK << 8) | |
368 | (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * | 419 | (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * |
369 | TCPOLEN_SACK_PERBLOCK))); | 420 | TCPOLEN_SACK_PERBLOCK))); |
370 | 421 | ||
371 | for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { | 422 | for (this_sack = 0; this_sack < opts->num_sack_blocks; |
423 | ++this_sack) { | ||
372 | *ptr++ = htonl(sp[this_sack].start_seq); | 424 | *ptr++ = htonl(sp[this_sack].start_seq); |
373 | *ptr++ = htonl(sp[this_sack].end_seq); | 425 | *ptr++ = htonl(sp[this_sack].end_seq); |
374 | } | 426 | } |
@@ -378,81 +430,137 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | |||
378 | tp->rx_opt.eff_sacks--; | 430 | tp->rx_opt.eff_sacks--; |
379 | } | 431 | } |
380 | } | 432 | } |
433 | } | ||
434 | |||
435 | static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | ||
436 | struct tcp_out_options *opts, | ||
437 | struct tcp_md5sig_key **md5) { | ||
438 | struct tcp_sock *tp = tcp_sk(sk); | ||
439 | unsigned size = 0; | ||
440 | |||
381 | #ifdef CONFIG_TCP_MD5SIG | 441 | #ifdef CONFIG_TCP_MD5SIG |
382 | if (md5_hash) { | 442 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
383 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 443 | if (*md5) { |
384 | (TCPOPT_NOP << 16) | | 444 | opts->options |= OPTION_MD5; |
385 | (TCPOPT_MD5SIG << 8) | | 445 | size += TCPOLEN_MD5SIG_ALIGNED; |
386 | TCPOLEN_MD5SIG); | ||
387 | *md5_hash = (__u8 *)ptr; | ||
388 | } | 446 | } |
447 | #else | ||
448 | *md5 = NULL; | ||
389 | #endif | 449 | #endif |
450 | |||
451 | /* We always get an MSS option. The option bytes which will be seen in | ||
452 | * normal data packets should timestamps be used, must be in the MSS | ||
453 | * advertised. But we subtract them from tp->mss_cache so that | ||
454 | * calculations in tcp_sendmsg are simpler etc. So account for this | ||
455 | * fact here if necessary. If we don't do this correctly, as a | ||
456 | * receiver we won't recognize data packets as being full sized when we | ||
457 | * should, and thus we won't abide by the delayed ACK rules correctly. | ||
458 | * SACKs don't matter, we never delay an ACK when we have any of those | ||
459 | * going out. */ | ||
460 | opts->mss = tcp_advertise_mss(sk); | ||
461 | size += TCPOLEN_MSS_ALIGNED; | ||
462 | |||
463 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | ||
464 | opts->options |= OPTION_TS; | ||
465 | opts->tsval = TCP_SKB_CB(skb)->when; | ||
466 | opts->tsecr = tp->rx_opt.ts_recent; | ||
467 | size += TCPOLEN_TSTAMP_ALIGNED; | ||
468 | } | ||
469 | if (likely(sysctl_tcp_window_scaling)) { | ||
470 | opts->ws = tp->rx_opt.rcv_wscale; | ||
471 | size += TCPOLEN_WSCALE_ALIGNED; | ||
472 | } | ||
473 | if (likely(sysctl_tcp_sack)) { | ||
474 | opts->options |= OPTION_SACK_ADVERTISE; | ||
475 | if (unlikely(!OPTION_TS & opts->options)) | ||
476 | size += TCPOLEN_SACKPERM_ALIGNED; | ||
477 | } | ||
478 | |||
479 | return size; | ||
390 | } | 480 | } |
391 | 481 | ||
392 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | 482 | static unsigned tcp_synack_options(struct sock *sk, |
393 | * If this is every changed make sure to change the definition of | 483 | struct request_sock *req, |
394 | * MAX_SYN_SIZE to match the new maximum number of options that you | 484 | unsigned mss, struct sk_buff *skb, |
395 | * can generate. | 485 | struct tcp_out_options *opts, |
396 | * | 486 | struct tcp_md5sig_key **md5) { |
397 | * Note - that with the RFC2385 TCP option, we make room for the | 487 | unsigned size = 0; |
398 | * 16 byte MD5 hash. This will be filled in later, so the pointer for the | 488 | struct inet_request_sock *ireq = inet_rsk(req); |
399 | * location to be filled is passed back up. | 489 | char doing_ts; |
400 | */ | 490 | |
401 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | ||
402 | int offer_wscale, int wscale, __u32 tstamp, | ||
403 | __u32 ts_recent, __u8 **md5_hash) | ||
404 | { | ||
405 | /* We always get an MSS option. | ||
406 | * The option bytes which will be seen in normal data | ||
407 | * packets should timestamps be used, must be in the MSS | ||
408 | * advertised. But we subtract them from tp->mss_cache so | ||
409 | * that calculations in tcp_sendmsg are simpler etc. | ||
410 | * So account for this fact here if necessary. If we | ||
411 | * don't do this correctly, as a receiver we won't | ||
412 | * recognize data packets as being full sized when we | ||
413 | * should, and thus we won't abide by the delayed ACK | ||
414 | * rules correctly. | ||
415 | * SACKs don't matter, we never delay an ACK when we | ||
416 | * have any of those going out. | ||
417 | */ | ||
418 | *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); | ||
419 | if (ts) { | ||
420 | if (sack) | ||
421 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | | ||
422 | (TCPOLEN_SACK_PERM << 16) | | ||
423 | (TCPOPT_TIMESTAMP << 8) | | ||
424 | TCPOLEN_TIMESTAMP); | ||
425 | else | ||
426 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
427 | (TCPOPT_NOP << 16) | | ||
428 | (TCPOPT_TIMESTAMP << 8) | | ||
429 | TCPOLEN_TIMESTAMP); | ||
430 | *ptr++ = htonl(tstamp); /* TSVAL */ | ||
431 | *ptr++ = htonl(ts_recent); /* TSECR */ | ||
432 | } else if (sack) | ||
433 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
434 | (TCPOPT_NOP << 16) | | ||
435 | (TCPOPT_SACK_PERM << 8) | | ||
436 | TCPOLEN_SACK_PERM); | ||
437 | if (offer_wscale) | ||
438 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
439 | (TCPOPT_WINDOW << 16) | | ||
440 | (TCPOLEN_WINDOW << 8) | | ||
441 | (wscale)); | ||
442 | #ifdef CONFIG_TCP_MD5SIG | 491 | #ifdef CONFIG_TCP_MD5SIG |
443 | /* | 492 | *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); |
444 | * If MD5 is enabled, then we set the option, and include the size | 493 | if (*md5) { |
445 | * (always 18). The actual MD5 hash is added just before the | 494 | opts->options |= OPTION_MD5; |
446 | * packet is sent. | 495 | size += TCPOLEN_MD5SIG_ALIGNED; |
447 | */ | ||
448 | if (md5_hash) { | ||
449 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
450 | (TCPOPT_NOP << 16) | | ||
451 | (TCPOPT_MD5SIG << 8) | | ||
452 | TCPOLEN_MD5SIG); | ||
453 | *md5_hash = (__u8 *)ptr; | ||
454 | } | 496 | } |
497 | #else | ||
498 | *md5 = NULL; | ||
455 | #endif | 499 | #endif |
500 | |||
501 | /* we can't fit any SACK blocks in a packet with MD5 + TS | ||
502 | options. There was discussion about disabling SACK rather than TS in | ||
503 | order to fit in better with old, buggy kernels, but that was deemed | ||
504 | to be unnecessary. */ | ||
505 | doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok); | ||
506 | |||
507 | opts->mss = mss; | ||
508 | size += TCPOLEN_MSS_ALIGNED; | ||
509 | |||
510 | if (likely(ireq->wscale_ok)) { | ||
511 | opts->ws = ireq->rcv_wscale; | ||
512 | size += TCPOLEN_WSCALE_ALIGNED; | ||
513 | } | ||
514 | if (likely(doing_ts)) { | ||
515 | opts->options |= OPTION_TS; | ||
516 | opts->tsval = TCP_SKB_CB(skb)->when; | ||
517 | opts->tsecr = req->ts_recent; | ||
518 | size += TCPOLEN_TSTAMP_ALIGNED; | ||
519 | } | ||
520 | if (likely(ireq->sack_ok)) { | ||
521 | opts->options |= OPTION_SACK_ADVERTISE; | ||
522 | if (unlikely(!doing_ts)) | ||
523 | size += TCPOLEN_SACKPERM_ALIGNED; | ||
524 | } | ||
525 | |||
526 | return size; | ||
527 | } | ||
528 | |||
529 | static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, | ||
530 | struct tcp_out_options *opts, | ||
531 | struct tcp_md5sig_key **md5) { | ||
532 | struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; | ||
533 | struct tcp_sock *tp = tcp_sk(sk); | ||
534 | unsigned size = 0; | ||
535 | |||
536 | #ifdef CONFIG_TCP_MD5SIG | ||
537 | *md5 = tp->af_specific->md5_lookup(sk, sk); | ||
538 | if (unlikely(*md5)) { | ||
539 | opts->options |= OPTION_MD5; | ||
540 | size += TCPOLEN_MD5SIG_ALIGNED; | ||
541 | } | ||
542 | #else | ||
543 | *md5 = NULL; | ||
544 | #endif | ||
545 | |||
546 | if (likely(tp->rx_opt.tstamp_ok)) { | ||
547 | opts->options |= OPTION_TS; | ||
548 | opts->tsval = tcb ? tcb->when : 0; | ||
549 | opts->tsecr = tp->rx_opt.ts_recent; | ||
550 | size += TCPOLEN_TSTAMP_ALIGNED; | ||
551 | } | ||
552 | |||
553 | if (unlikely(tp->rx_opt.eff_sacks)) { | ||
554 | const unsigned remaining = MAX_TCP_OPTION_SPACE - size; | ||
555 | opts->num_sack_blocks = | ||
556 | min_t(unsigned, tp->rx_opt.eff_sacks, | ||
557 | (remaining - TCPOLEN_SACK_BASE_ALIGNED) / | ||
558 | TCPOLEN_SACK_PERBLOCK); | ||
559 | size += TCPOLEN_SACK_BASE_ALIGNED + | ||
560 | opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; | ||
561 | } | ||
562 | |||
563 | return size; | ||
456 | } | 564 | } |
457 | 565 | ||
458 | /* This routine actually transmits TCP packets queued in by | 566 | /* This routine actually transmits TCP packets queued in by |
@@ -473,13 +581,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
473 | struct inet_sock *inet; | 581 | struct inet_sock *inet; |
474 | struct tcp_sock *tp; | 582 | struct tcp_sock *tp; |
475 | struct tcp_skb_cb *tcb; | 583 | struct tcp_skb_cb *tcb; |
476 | int tcp_header_size; | 584 | struct tcp_out_options opts; |
477 | #ifdef CONFIG_TCP_MD5SIG | 585 | unsigned tcp_options_size, tcp_header_size; |
478 | struct tcp_md5sig_key *md5; | 586 | struct tcp_md5sig_key *md5; |
479 | __u8 *md5_hash_location; | 587 | __u8 *md5_hash_location; |
480 | #endif | ||
481 | struct tcphdr *th; | 588 | struct tcphdr *th; |
482 | int sysctl_flags; | ||
483 | int err; | 589 | int err; |
484 | 590 | ||
485 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | 591 | BUG_ON(!skb || !tcp_skb_pcount(skb)); |
@@ -502,50 +608,18 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
502 | inet = inet_sk(sk); | 608 | inet = inet_sk(sk); |
503 | tp = tcp_sk(sk); | 609 | tp = tcp_sk(sk); |
504 | tcb = TCP_SKB_CB(skb); | 610 | tcb = TCP_SKB_CB(skb); |
505 | tcp_header_size = tp->tcp_header_len; | 611 | memset(&opts, 0, sizeof(opts)); |
506 | |||
507 | #define SYSCTL_FLAG_TSTAMPS 0x1 | ||
508 | #define SYSCTL_FLAG_WSCALE 0x2 | ||
509 | #define SYSCTL_FLAG_SACK 0x4 | ||
510 | 612 | ||
511 | sysctl_flags = 0; | 613 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) |
512 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { | 614 | tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); |
513 | tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; | 615 | else |
514 | if (sysctl_tcp_timestamps) { | 616 | tcp_options_size = tcp_established_options(sk, skb, &opts, |
515 | tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; | 617 | &md5); |
516 | sysctl_flags |= SYSCTL_FLAG_TSTAMPS; | 618 | tcp_header_size = tcp_options_size + sizeof(struct tcphdr); |
517 | } | ||
518 | if (sysctl_tcp_window_scaling) { | ||
519 | tcp_header_size += TCPOLEN_WSCALE_ALIGNED; | ||
520 | sysctl_flags |= SYSCTL_FLAG_WSCALE; | ||
521 | } | ||
522 | if (sysctl_tcp_sack) { | ||
523 | sysctl_flags |= SYSCTL_FLAG_SACK; | ||
524 | if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) | ||
525 | tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; | ||
526 | } | ||
527 | } else if (unlikely(tp->rx_opt.eff_sacks)) { | ||
528 | /* A SACK is 2 pad bytes, a 2 byte header, plus | ||
529 | * 2 32-bit sequence numbers for each SACK block. | ||
530 | */ | ||
531 | tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + | ||
532 | (tp->rx_opt.eff_sacks * | ||
533 | TCPOLEN_SACK_PERBLOCK)); | ||
534 | } | ||
535 | 619 | ||
536 | if (tcp_packets_in_flight(tp) == 0) | 620 | if (tcp_packets_in_flight(tp) == 0) |
537 | tcp_ca_event(sk, CA_EVENT_TX_START); | 621 | tcp_ca_event(sk, CA_EVENT_TX_START); |
538 | 622 | ||
539 | #ifdef CONFIG_TCP_MD5SIG | ||
540 | /* | ||
541 | * Are we doing MD5 on this segment? If so - make | ||
542 | * room for it. | ||
543 | */ | ||
544 | md5 = tp->af_specific->md5_lookup(sk, sk); | ||
545 | if (md5) | ||
546 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
547 | #endif | ||
548 | |||
549 | skb_push(skb, tcp_header_size); | 623 | skb_push(skb, tcp_header_size); |
550 | skb_reset_transport_header(skb); | 624 | skb_reset_transport_header(skb); |
551 | skb_set_owner_w(skb, sk); | 625 | skb_set_owner_w(skb, sk); |
@@ -576,39 +650,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
576 | th->urg = 1; | 650 | th->urg = 1; |
577 | } | 651 | } |
578 | 652 | ||
579 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { | 653 | tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); |
580 | tcp_syn_build_options((__be32 *)(th + 1), | 654 | if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) |
581 | tcp_advertise_mss(sk), | ||
582 | (sysctl_flags & SYSCTL_FLAG_TSTAMPS), | ||
583 | (sysctl_flags & SYSCTL_FLAG_SACK), | ||
584 | (sysctl_flags & SYSCTL_FLAG_WSCALE), | ||
585 | tp->rx_opt.rcv_wscale, | ||
586 | tcb->when, | ||
587 | tp->rx_opt.ts_recent, | ||
588 | |||
589 | #ifdef CONFIG_TCP_MD5SIG | ||
590 | md5 ? &md5_hash_location : | ||
591 | #endif | ||
592 | NULL); | ||
593 | } else { | ||
594 | tcp_build_and_update_options((__be32 *)(th + 1), | ||
595 | tp, tcb->when, | ||
596 | #ifdef CONFIG_TCP_MD5SIG | ||
597 | md5 ? &md5_hash_location : | ||
598 | #endif | ||
599 | NULL); | ||
600 | TCP_ECN_send(sk, skb, tcp_header_size); | 655 | TCP_ECN_send(sk, skb, tcp_header_size); |
601 | } | ||
602 | 656 | ||
603 | #ifdef CONFIG_TCP_MD5SIG | 657 | #ifdef CONFIG_TCP_MD5SIG |
604 | /* Calculate the MD5 hash, as we have all we need now */ | 658 | /* Calculate the MD5 hash, as we have all we need now */ |
605 | if (md5) { | 659 | if (md5) { |
660 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | ||
606 | tp->af_specific->calc_md5_hash(md5_hash_location, | 661 | tp->af_specific->calc_md5_hash(md5_hash_location, |
607 | md5, | 662 | md5, sk, NULL, skb); |
608 | sk, NULL, NULL, | ||
609 | tcp_hdr(skb), | ||
610 | sk->sk_protocol, | ||
611 | skb->len); | ||
612 | } | 663 | } |
613 | #endif | 664 | #endif |
614 | 665 | ||
@@ -621,7 +672,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
621 | tcp_event_data_sent(tp, skb, sk); | 672 | tcp_event_data_sent(tp, skb, sk); |
622 | 673 | ||
623 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 674 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
624 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 675 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); |
625 | 676 | ||
626 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 677 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); |
627 | if (likely(err <= 0)) | 678 | if (likely(err <= 0)) |
@@ -630,10 +681,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
630 | tcp_enter_cwr(sk, 1); | 681 | tcp_enter_cwr(sk, 1); |
631 | 682 | ||
632 | return net_xmit_eval(err); | 683 | return net_xmit_eval(err); |
633 | |||
634 | #undef SYSCTL_FLAG_TSTAMPS | ||
635 | #undef SYSCTL_FLAG_WSCALE | ||
636 | #undef SYSCTL_FLAG_SACK | ||
637 | } | 684 | } |
638 | 685 | ||
639 | /* This routine just queue's the buffer | 686 | /* This routine just queue's the buffer |
@@ -974,6 +1021,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
974 | u32 mss_now; | 1021 | u32 mss_now; |
975 | u16 xmit_size_goal; | 1022 | u16 xmit_size_goal; |
976 | int doing_tso = 0; | 1023 | int doing_tso = 0; |
1024 | unsigned header_len; | ||
1025 | struct tcp_out_options opts; | ||
1026 | struct tcp_md5sig_key *md5; | ||
977 | 1027 | ||
978 | mss_now = tp->mss_cache; | 1028 | mss_now = tp->mss_cache; |
979 | 1029 | ||
@@ -986,14 +1036,16 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
986 | mss_now = tcp_sync_mss(sk, mtu); | 1036 | mss_now = tcp_sync_mss(sk, mtu); |
987 | } | 1037 | } |
988 | 1038 | ||
989 | if (tp->rx_opt.eff_sacks) | 1039 | header_len = tcp_established_options(sk, NULL, &opts, &md5) + |
990 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + | 1040 | sizeof(struct tcphdr); |
991 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); | 1041 | /* The mss_cache is sized based on tp->tcp_header_len, which assumes |
992 | 1042 | * some common options. If this is an odd packet (because we have SACK | |
993 | #ifdef CONFIG_TCP_MD5SIG | 1043 | * blocks etc) then our calculated header_len will be different, and |
994 | if (tp->af_specific->md5_lookup(sk, sk)) | 1044 | * we have to adjust mss_now correspondingly */ |
995 | mss_now -= TCPOLEN_MD5SIG_ALIGNED; | 1045 | if (header_len != tp->tcp_header_len) { |
996 | #endif | 1046 | int delta = (int) header_len - tp->tcp_header_len; |
1047 | mss_now -= delta; | ||
1048 | } | ||
997 | 1049 | ||
998 | xmit_size_goal = mss_now; | 1050 | xmit_size_goal = mss_now; |
999 | 1051 | ||
@@ -1913,7 +1965,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1913 | 1965 | ||
1914 | if (err == 0) { | 1966 | if (err == 0) { |
1915 | /* Update global TCP statistics. */ | 1967 | /* Update global TCP statistics. */ |
1916 | TCP_INC_STATS(TCP_MIB_RETRANSSEGS); | 1968 | TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); |
1917 | 1969 | ||
1918 | tp->total_retrans++; | 1970 | tp->total_retrans++; |
1919 | 1971 | ||
@@ -1988,14 +2040,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1988 | 2040 | ||
1989 | if (sacked & TCPCB_LOST) { | 2041 | if (sacked & TCPCB_LOST) { |
1990 | if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { | 2042 | if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { |
2043 | int mib_idx; | ||
2044 | |||
1991 | if (tcp_retransmit_skb(sk, skb)) { | 2045 | if (tcp_retransmit_skb(sk, skb)) { |
1992 | tp->retransmit_skb_hint = NULL; | 2046 | tp->retransmit_skb_hint = NULL; |
1993 | return; | 2047 | return; |
1994 | } | 2048 | } |
1995 | if (icsk->icsk_ca_state != TCP_CA_Loss) | 2049 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
1996 | NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); | 2050 | mib_idx = LINUX_MIB_TCPFASTRETRANS; |
1997 | else | 2051 | else |
1998 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); | 2052 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; |
2053 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
1999 | 2054 | ||
2000 | if (skb == tcp_write_queue_head(sk)) | 2055 | if (skb == tcp_write_queue_head(sk)) |
2001 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 2056 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
@@ -2065,7 +2120,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2065 | inet_csk(sk)->icsk_rto, | 2120 | inet_csk(sk)->icsk_rto, |
2066 | TCP_RTO_MAX); | 2121 | TCP_RTO_MAX); |
2067 | 2122 | ||
2068 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); | 2123 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS); |
2069 | } | 2124 | } |
2070 | } | 2125 | } |
2071 | 2126 | ||
@@ -2119,7 +2174,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2119 | /* NOTE: No TCP options attached and we never retransmit this. */ | 2174 | /* NOTE: No TCP options attached and we never retransmit this. */ |
2120 | skb = alloc_skb(MAX_TCP_HEADER, priority); | 2175 | skb = alloc_skb(MAX_TCP_HEADER, priority); |
2121 | if (!skb) { | 2176 | if (!skb) { |
2122 | NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); | 2177 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
2123 | return; | 2178 | return; |
2124 | } | 2179 | } |
2125 | 2180 | ||
@@ -2130,9 +2185,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2130 | /* Send it off. */ | 2185 | /* Send it off. */ |
2131 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2186 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2132 | if (tcp_transmit_skb(sk, skb, 0, priority)) | 2187 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
2133 | NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); | 2188 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
2134 | 2189 | ||
2135 | TCP_INC_STATS(TCP_MIB_OUTRSTS); | 2190 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); |
2136 | } | 2191 | } |
2137 | 2192 | ||
2138 | /* WARNING: This routine must only be called when we have already sent | 2193 | /* WARNING: This routine must only be called when we have already sent |
@@ -2180,11 +2235,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2180 | struct tcp_sock *tp = tcp_sk(sk); | 2235 | struct tcp_sock *tp = tcp_sk(sk); |
2181 | struct tcphdr *th; | 2236 | struct tcphdr *th; |
2182 | int tcp_header_size; | 2237 | int tcp_header_size; |
2238 | struct tcp_out_options opts; | ||
2183 | struct sk_buff *skb; | 2239 | struct sk_buff *skb; |
2184 | #ifdef CONFIG_TCP_MD5SIG | ||
2185 | struct tcp_md5sig_key *md5; | 2240 | struct tcp_md5sig_key *md5; |
2186 | __u8 *md5_hash_location; | 2241 | __u8 *md5_hash_location; |
2187 | #endif | ||
2188 | 2242 | ||
2189 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2243 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2190 | if (skb == NULL) | 2244 | if (skb == NULL) |
@@ -2195,18 +2249,27 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2195 | 2249 | ||
2196 | skb->dst = dst_clone(dst); | 2250 | skb->dst = dst_clone(dst); |
2197 | 2251 | ||
2198 | tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + | 2252 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
2199 | (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + | 2253 | __u8 rcv_wscale; |
2200 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + | 2254 | /* Set this up on the first call only */ |
2201 | /* SACK_PERM is in the place of NOP NOP of TS */ | 2255 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
2202 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); | 2256 | /* tcp_full_space because it is guaranteed to be the first packet */ |
2257 | tcp_select_initial_window(tcp_full_space(sk), | ||
2258 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | ||
2259 | &req->rcv_wnd, | ||
2260 | &req->window_clamp, | ||
2261 | ireq->wscale_ok, | ||
2262 | &rcv_wscale); | ||
2263 | ireq->rcv_wscale = rcv_wscale; | ||
2264 | } | ||
2265 | |||
2266 | memset(&opts, 0, sizeof(opts)); | ||
2267 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | ||
2268 | tcp_header_size = tcp_synack_options(sk, req, | ||
2269 | dst_metric(dst, RTAX_ADVMSS), | ||
2270 | skb, &opts, &md5) + | ||
2271 | sizeof(struct tcphdr); | ||
2203 | 2272 | ||
2204 | #ifdef CONFIG_TCP_MD5SIG | ||
2205 | /* Are we doing MD5 on this segment? If so - make room for it */ | ||
2206 | md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); | ||
2207 | if (md5) | ||
2208 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
2209 | #endif | ||
2210 | skb_push(skb, tcp_header_size); | 2273 | skb_push(skb, tcp_header_size); |
2211 | skb_reset_transport_header(skb); | 2274 | skb_reset_transport_header(skb); |
2212 | 2275 | ||
@@ -2224,19 +2287,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2224 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); | 2287 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); |
2225 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2288 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2226 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); | 2289 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
2227 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | ||
2228 | __u8 rcv_wscale; | ||
2229 | /* Set this up on the first call only */ | ||
2230 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | ||
2231 | /* tcp_full_space because it is guaranteed to be the first packet */ | ||
2232 | tcp_select_initial_window(tcp_full_space(sk), | ||
2233 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | ||
2234 | &req->rcv_wnd, | ||
2235 | &req->window_clamp, | ||
2236 | ireq->wscale_ok, | ||
2237 | &rcv_wscale); | ||
2238 | ireq->rcv_wscale = rcv_wscale; | ||
2239 | } | ||
2240 | 2290 | ||
2241 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 2291 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
2242 | th->window = htons(min(req->rcv_wnd, 65535U)); | 2292 | th->window = htons(min(req->rcv_wnd, 65535U)); |
@@ -2245,29 +2295,15 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2245 | TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); | 2295 | TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); |
2246 | else | 2296 | else |
2247 | #endif | 2297 | #endif |
2248 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2298 | tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); |
2249 | tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, | ||
2250 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, | ||
2251 | TCP_SKB_CB(skb)->when, | ||
2252 | req->ts_recent, | ||
2253 | ( | ||
2254 | #ifdef CONFIG_TCP_MD5SIG | ||
2255 | md5 ? &md5_hash_location : | ||
2256 | #endif | ||
2257 | NULL) | ||
2258 | ); | ||
2259 | |||
2260 | th->doff = (tcp_header_size >> 2); | 2299 | th->doff = (tcp_header_size >> 2); |
2261 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 2300 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); |
2262 | 2301 | ||
2263 | #ifdef CONFIG_TCP_MD5SIG | 2302 | #ifdef CONFIG_TCP_MD5SIG |
2264 | /* Okay, we have all we need - do the md5 hash if needed */ | 2303 | /* Okay, we have all we need - do the md5 hash if needed */ |
2265 | if (md5) { | 2304 | if (md5) { |
2266 | tp->af_specific->calc_md5_hash(md5_hash_location, | 2305 | tp->af_specific->calc_md5_hash(md5_hash_location, |
2267 | md5, | 2306 | md5, NULL, req, skb); |
2268 | NULL, dst, req, | ||
2269 | tcp_hdr(skb), sk->sk_protocol, | ||
2270 | skb->len); | ||
2271 | } | 2307 | } |
2272 | #endif | 2308 | #endif |
2273 | 2309 | ||
@@ -2367,7 +2403,7 @@ int tcp_connect(struct sock *sk) | |||
2367 | */ | 2403 | */ |
2368 | tp->snd_nxt = tp->write_seq; | 2404 | tp->snd_nxt = tp->write_seq; |
2369 | tp->pushed_seq = tp->write_seq; | 2405 | tp->pushed_seq = tp->write_seq; |
2370 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); | 2406 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); |
2371 | 2407 | ||
2372 | /* Timer for repeating the SYN until an answer. */ | 2408 | /* Timer for repeating the SYN until an answer. */ |
2373 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 2409 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 63ed9d6830e7..328e0cf42b3c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | 10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
@@ -50,7 +48,7 @@ static void tcp_write_err(struct sock *sk) | |||
50 | sk->sk_error_report(sk); | 48 | sk->sk_error_report(sk); |
51 | 49 | ||
52 | tcp_done(sk); | 50 | tcp_done(sk); |
53 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); | 51 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
54 | } | 52 | } |
55 | 53 | ||
56 | /* Do not allow orphaned sockets to eat all our resources. | 54 | /* Do not allow orphaned sockets to eat all our resources. |
@@ -91,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) | |||
91 | if (do_reset) | 89 | if (do_reset) |
92 | tcp_send_active_reset(sk, GFP_ATOMIC); | 90 | tcp_send_active_reset(sk, GFP_ATOMIC); |
93 | tcp_done(sk); | 91 | tcp_done(sk); |
94 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 92 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
95 | return 1; | 93 | return 1; |
96 | } | 94 | } |
97 | return 0; | 95 | return 0; |
@@ -181,7 +179,7 @@ static void tcp_delack_timer(unsigned long data) | |||
181 | if (sock_owned_by_user(sk)) { | 179 | if (sock_owned_by_user(sk)) { |
182 | /* Try again later. */ | 180 | /* Try again later. */ |
183 | icsk->icsk_ack.blocked = 1; | 181 | icsk->icsk_ack.blocked = 1; |
184 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 182 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
185 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); | 183 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
186 | goto out_unlock; | 184 | goto out_unlock; |
187 | } | 185 | } |
@@ -200,7 +198,7 @@ static void tcp_delack_timer(unsigned long data) | |||
200 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | 198 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
201 | struct sk_buff *skb; | 199 | struct sk_buff *skb; |
202 | 200 | ||
203 | NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); | 201 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); |
204 | 202 | ||
205 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | 203 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) |
206 | sk->sk_backlog_rcv(sk, skb); | 204 | sk->sk_backlog_rcv(sk, skb); |
@@ -220,7 +218,7 @@ static void tcp_delack_timer(unsigned long data) | |||
220 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 218 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
221 | } | 219 | } |
222 | tcp_send_ack(sk); | 220 | tcp_send_ack(sk); |
223 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 221 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
224 | } | 222 | } |
225 | TCP_CHECK_TIMER(sk); | 223 | TCP_CHECK_TIMER(sk); |
226 | 224 | ||
@@ -328,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
328 | goto out; | 326 | goto out; |
329 | 327 | ||
330 | if (icsk->icsk_retransmits == 0) { | 328 | if (icsk->icsk_retransmits == 0) { |
329 | int mib_idx; | ||
330 | |||
331 | if (icsk->icsk_ca_state == TCP_CA_Disorder || | 331 | if (icsk->icsk_ca_state == TCP_CA_Disorder || |
332 | icsk->icsk_ca_state == TCP_CA_Recovery) { | 332 | icsk->icsk_ca_state == TCP_CA_Recovery) { |
333 | if (tcp_is_sack(tp)) { | 333 | if (tcp_is_sack(tp)) { |
334 | if (icsk->icsk_ca_state == TCP_CA_Recovery) | 334 | if (icsk->icsk_ca_state == TCP_CA_Recovery) |
335 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); | 335 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; |
336 | else | 336 | else |
337 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); | 337 | mib_idx = LINUX_MIB_TCPSACKFAILURES; |
338 | } else { | 338 | } else { |
339 | if (icsk->icsk_ca_state == TCP_CA_Recovery) | 339 | if (icsk->icsk_ca_state == TCP_CA_Recovery) |
340 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); | 340 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; |
341 | else | 341 | else |
342 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); | 342 | mib_idx = LINUX_MIB_TCPRENOFAILURES; |
343 | } | 343 | } |
344 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { | 344 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
345 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); | 345 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
346 | } else { | 346 | } else { |
347 | NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); | 347 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
348 | } | 348 | } |
349 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
349 | } | 350 | } |
350 | 351 | ||
351 | if (tcp_use_frto(sk)) { | 352 | if (tcp_use_frto(sk)) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 56fcda3694ba..a751770947a3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * The User Datagram Protocol (UDP). | 6 | * The User Datagram Protocol (UDP). |
7 | * | 7 | * |
8 | * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | 10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
@@ -110,9 +108,6 @@ | |||
110 | * Snmp MIB for the UDP layer | 108 | * Snmp MIB for the UDP layer |
111 | */ | 109 | */ |
112 | 110 | ||
113 | DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; | ||
114 | EXPORT_SYMBOL(udp_statistics); | ||
115 | |||
116 | DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; | 111 | DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; |
117 | EXPORT_SYMBOL(udp_stats_in6); | 112 | EXPORT_SYMBOL(udp_stats_in6); |
118 | 113 | ||
@@ -136,7 +131,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num, | |||
136 | struct sock *sk; | 131 | struct sock *sk; |
137 | struct hlist_node *node; | 132 | struct hlist_node *node; |
138 | 133 | ||
139 | sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) | 134 | sk_for_each(sk, node, &udptable[udp_hashfn(net, num)]) |
140 | if (net_eq(sock_net(sk), net) && sk->sk_hash == num) | 135 | if (net_eq(sock_net(sk), net) && sk->sk_hash == num) |
141 | return 1; | 136 | return 1; |
142 | return 0; | 137 | return 0; |
@@ -176,7 +171,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
176 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { | 171 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { |
177 | int size = 0; | 172 | int size = 0; |
178 | 173 | ||
179 | head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; | 174 | head = &udptable[udp_hashfn(net, rover)]; |
180 | if (hlist_empty(head)) | 175 | if (hlist_empty(head)) |
181 | goto gotit; | 176 | goto gotit; |
182 | 177 | ||
@@ -213,7 +208,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
213 | gotit: | 208 | gotit: |
214 | snum = rover; | 209 | snum = rover; |
215 | } else { | 210 | } else { |
216 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; | 211 | head = &udptable[udp_hashfn(net, snum)]; |
217 | 212 | ||
218 | sk_for_each(sk2, node, head) | 213 | sk_for_each(sk2, node, head) |
219 | if (sk2->sk_hash == snum && | 214 | if (sk2->sk_hash == snum && |
@@ -229,7 +224,7 @@ gotit: | |||
229 | inet_sk(sk)->num = snum; | 224 | inet_sk(sk)->num = snum; |
230 | sk->sk_hash = snum; | 225 | sk->sk_hash = snum; |
231 | if (sk_unhashed(sk)) { | 226 | if (sk_unhashed(sk)) { |
232 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; | 227 | head = &udptable[udp_hashfn(net, snum)]; |
233 | sk_add_node(sk, head); | 228 | sk_add_node(sk, head); |
234 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 229 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
235 | } | 230 | } |
@@ -266,7 +261,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
266 | int badness = -1; | 261 | int badness = -1; |
267 | 262 | ||
268 | read_lock(&udp_hash_lock); | 263 | read_lock(&udp_hash_lock); |
269 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { | 264 | sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) { |
270 | struct inet_sock *inet = inet_sk(sk); | 265 | struct inet_sock *inet = inet_sk(sk); |
271 | 266 | ||
272 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | 267 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && |
@@ -356,11 +351,12 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) | |||
356 | struct sock *sk; | 351 | struct sock *sk; |
357 | int harderr; | 352 | int harderr; |
358 | int err; | 353 | int err; |
354 | struct net *net = dev_net(skb->dev); | ||
359 | 355 | ||
360 | sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest, | 356 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, |
361 | iph->saddr, uh->source, skb->dev->ifindex, udptable); | 357 | iph->saddr, uh->source, skb->dev->ifindex, udptable); |
362 | if (sk == NULL) { | 358 | if (sk == NULL) { |
363 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 359 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
364 | return; /* No socket for error */ | 360 | return; /* No socket for error */ |
365 | } | 361 | } |
366 | 362 | ||
@@ -528,7 +524,8 @@ out: | |||
528 | up->len = 0; | 524 | up->len = 0; |
529 | up->pending = 0; | 525 | up->pending = 0; |
530 | if (!err) | 526 | if (!err) |
531 | UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); | 527 | UDP_INC_STATS_USER(sock_net(sk), |
528 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
532 | return err; | 529 | return err; |
533 | } | 530 | } |
534 | 531 | ||
@@ -656,11 +653,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
656 | .uli_u = { .ports = | 653 | .uli_u = { .ports = |
657 | { .sport = inet->sport, | 654 | { .sport = inet->sport, |
658 | .dport = dport } } }; | 655 | .dport = dport } } }; |
656 | struct net *net = sock_net(sk); | ||
657 | |||
659 | security_sk_classify_flow(sk, &fl); | 658 | security_sk_classify_flow(sk, &fl); |
660 | err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1); | 659 | err = ip_route_output_flow(net, &rt, &fl, sk, 1); |
661 | if (err) { | 660 | if (err) { |
662 | if (err == -ENETUNREACH) | 661 | if (err == -ENETUNREACH) |
663 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 662 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
664 | goto out; | 663 | goto out; |
665 | } | 664 | } |
666 | 665 | ||
@@ -727,7 +726,8 @@ out: | |||
727 | * seems like overkill. | 726 | * seems like overkill. |
728 | */ | 727 | */ |
729 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 728 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
730 | UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); | 729 | UDP_INC_STATS_USER(sock_net(sk), |
730 | UDP_MIB_SNDBUFERRORS, is_udplite); | ||
731 | } | 731 | } |
732 | return err; | 732 | return err; |
733 | 733 | ||
@@ -890,7 +890,8 @@ try_again: | |||
890 | goto out_free; | 890 | goto out_free; |
891 | 891 | ||
892 | if (!peeked) | 892 | if (!peeked) |
893 | UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); | 893 | UDP_INC_STATS_USER(sock_net(sk), |
894 | UDP_MIB_INDATAGRAMS, is_udplite); | ||
894 | 895 | ||
895 | sock_recv_timestamp(msg, sk, skb); | 896 | sock_recv_timestamp(msg, sk, skb); |
896 | 897 | ||
@@ -919,7 +920,7 @@ out: | |||
919 | csum_copy_err: | 920 | csum_copy_err: |
920 | lock_sock(sk); | 921 | lock_sock(sk); |
921 | if (!skb_kill_datagram(sk, skb, flags)) | 922 | if (!skb_kill_datagram(sk, skb, flags)) |
922 | UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); | 923 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
923 | release_sock(sk); | 924 | release_sock(sk); |
924 | 925 | ||
925 | if (noblock) | 926 | if (noblock) |
@@ -990,7 +991,8 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
990 | 991 | ||
991 | ret = (*up->encap_rcv)(sk, skb); | 992 | ret = (*up->encap_rcv)(sk, skb); |
992 | if (ret <= 0) { | 993 | if (ret <= 0) { |
993 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, | 994 | UDP_INC_STATS_BH(sock_net(sk), |
995 | UDP_MIB_INDATAGRAMS, | ||
994 | is_udplite); | 996 | is_udplite); |
995 | return -ret; | 997 | return -ret; |
996 | } | 998 | } |
@@ -1042,15 +1044,18 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1042 | 1044 | ||
1043 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 1045 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
1044 | /* Note that an ENOMEM error is charged twice */ | 1046 | /* Note that an ENOMEM error is charged twice */ |
1045 | if (rc == -ENOMEM) | 1047 | if (rc == -ENOMEM) { |
1046 | UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); | 1048 | UDP_INC_STATS_BH(sock_net(sk), |
1049 | UDP_MIB_RCVBUFERRORS, is_udplite); | ||
1050 | atomic_inc(&sk->sk_drops); | ||
1051 | } | ||
1047 | goto drop; | 1052 | goto drop; |
1048 | } | 1053 | } |
1049 | 1054 | ||
1050 | return 0; | 1055 | return 0; |
1051 | 1056 | ||
1052 | drop: | 1057 | drop: |
1053 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); | 1058 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1054 | kfree_skb(skb); | 1059 | kfree_skb(skb); |
1055 | return -1; | 1060 | return -1; |
1056 | } | 1061 | } |
@@ -1061,7 +1066,7 @@ drop: | |||
1061 | * Note: called only from the BH handler context, | 1066 | * Note: called only from the BH handler context, |
1062 | * so we don't need to lock the hashes. | 1067 | * so we don't need to lock the hashes. |
1063 | */ | 1068 | */ |
1064 | static int __udp4_lib_mcast_deliver(struct sk_buff *skb, | 1069 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
1065 | struct udphdr *uh, | 1070 | struct udphdr *uh, |
1066 | __be32 saddr, __be32 daddr, | 1071 | __be32 saddr, __be32 daddr, |
1067 | struct hlist_head udptable[]) | 1072 | struct hlist_head udptable[]) |
@@ -1070,7 +1075,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb, | |||
1070 | int dif; | 1075 | int dif; |
1071 | 1076 | ||
1072 | read_lock(&udp_hash_lock); | 1077 | read_lock(&udp_hash_lock); |
1073 | sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); | 1078 | sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); |
1074 | dif = skb->dev->ifindex; | 1079 | dif = skb->dev->ifindex; |
1075 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); | 1080 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); |
1076 | if (sk) { | 1081 | if (sk) { |
@@ -1158,6 +1163,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1158 | struct rtable *rt = (struct rtable*)skb->dst; | 1163 | struct rtable *rt = (struct rtable*)skb->dst; |
1159 | __be32 saddr = ip_hdr(skb)->saddr; | 1164 | __be32 saddr = ip_hdr(skb)->saddr; |
1160 | __be32 daddr = ip_hdr(skb)->daddr; | 1165 | __be32 daddr = ip_hdr(skb)->daddr; |
1166 | struct net *net = dev_net(skb->dev); | ||
1161 | 1167 | ||
1162 | /* | 1168 | /* |
1163 | * Validate the packet. | 1169 | * Validate the packet. |
@@ -1180,9 +1186,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1180 | goto csum_error; | 1186 | goto csum_error; |
1181 | 1187 | ||
1182 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 1188 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
1183 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); | 1189 | return __udp4_lib_mcast_deliver(net, skb, uh, |
1190 | saddr, daddr, udptable); | ||
1184 | 1191 | ||
1185 | sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, | 1192 | sk = __udp4_lib_lookup(net, saddr, uh->source, daddr, |
1186 | uh->dest, inet_iif(skb), udptable); | 1193 | uh->dest, inet_iif(skb), udptable); |
1187 | 1194 | ||
1188 | if (sk != NULL) { | 1195 | if (sk != NULL) { |
@@ -1211,7 +1218,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1211 | if (udp_lib_checksum_complete(skb)) | 1218 | if (udp_lib_checksum_complete(skb)) |
1212 | goto csum_error; | 1219 | goto csum_error; |
1213 | 1220 | ||
1214 | UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); | 1221 | UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |
1215 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 1222 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
1216 | 1223 | ||
1217 | /* | 1224 | /* |
@@ -1245,7 +1252,7 @@ csum_error: | |||
1245 | ntohs(uh->dest), | 1252 | ntohs(uh->dest), |
1246 | ulen); | 1253 | ulen); |
1247 | drop: | 1254 | drop: |
1248 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); | 1255 | UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); |
1249 | kfree_skb(skb); | 1256 | kfree_skb(skb); |
1250 | return 0; | 1257 | return 0; |
1251 | } | 1258 | } |
@@ -1255,12 +1262,11 @@ int udp_rcv(struct sk_buff *skb) | |||
1255 | return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); | 1262 | return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); |
1256 | } | 1263 | } |
1257 | 1264 | ||
1258 | int udp_destroy_sock(struct sock *sk) | 1265 | void udp_destroy_sock(struct sock *sk) |
1259 | { | 1266 | { |
1260 | lock_sock(sk); | 1267 | lock_sock(sk); |
1261 | udp_flush_pending_frames(sk); | 1268 | udp_flush_pending_frames(sk); |
1262 | release_sock(sk); | 1269 | release_sock(sk); |
1263 | return 0; | ||
1264 | } | 1270 | } |
1265 | 1271 | ||
1266 | /* | 1272 | /* |
@@ -1453,7 +1459,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1453 | spin_lock_bh(&rcvq->lock); | 1459 | spin_lock_bh(&rcvq->lock); |
1454 | while ((skb = skb_peek(rcvq)) != NULL && | 1460 | while ((skb = skb_peek(rcvq)) != NULL && |
1455 | udp_lib_checksum_complete(skb)) { | 1461 | udp_lib_checksum_complete(skb)) { |
1456 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); | 1462 | UDP_INC_STATS_BH(sock_net(sk), |
1463 | UDP_MIB_INERRORS, is_lite); | ||
1457 | __skb_unlink(skb, rcvq); | 1464 | __skb_unlink(skb, rcvq); |
1458 | kfree_skb(skb); | 1465 | kfree_skb(skb); |
1459 | } | 1466 | } |
@@ -1629,12 +1636,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |||
1629 | __u16 srcp = ntohs(inet->sport); | 1636 | __u16 srcp = ntohs(inet->sport); |
1630 | 1637 | ||
1631 | seq_printf(f, "%4d: %08X:%04X %08X:%04X" | 1638 | seq_printf(f, "%4d: %08X:%04X %08X:%04X" |
1632 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p%n", | 1639 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", |
1633 | bucket, src, srcp, dest, destp, sp->sk_state, | 1640 | bucket, src, srcp, dest, destp, sp->sk_state, |
1634 | atomic_read(&sp->sk_wmem_alloc), | 1641 | atomic_read(&sp->sk_wmem_alloc), |
1635 | atomic_read(&sp->sk_rmem_alloc), | 1642 | atomic_read(&sp->sk_rmem_alloc), |
1636 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 1643 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), |
1637 | atomic_read(&sp->sk_refcnt), sp, len); | 1644 | atomic_read(&sp->sk_refcnt), sp, |
1645 | atomic_read(&sp->sk_drops), len); | ||
1638 | } | 1646 | } |
1639 | 1647 | ||
1640 | int udp4_seq_show(struct seq_file *seq, void *v) | 1648 | int udp4_seq_show(struct seq_file *seq, void *v) |
@@ -1643,7 +1651,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) | |||
1643 | seq_printf(seq, "%-127s\n", | 1651 | seq_printf(seq, "%-127s\n", |
1644 | " sl local_address rem_address st tx_queue " | 1652 | " sl local_address rem_address st tx_queue " |
1645 | "rx_queue tr tm->when retrnsmt uid timeout " | 1653 | "rx_queue tr tm->when retrnsmt uid timeout " |
1646 | "inode"); | 1654 | "inode ref pointer drops"); |
1647 | else { | 1655 | else { |
1648 | struct udp_iter_state *state = seq->private; | 1656 | struct udp_iter_state *state = seq->private; |
1649 | int len; | 1657 | int len; |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 7288bf7977fb..2e9bad2fa1bc 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -26,7 +26,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
26 | extern int udp_sendpage(struct sock *sk, struct page *page, int offset, | 26 | extern int udp_sendpage(struct sock *sk, struct page *page, int offset, |
27 | size_t size, int flags); | 27 | size_t size, int flags); |
28 | extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); | 28 | extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); |
29 | extern int udp_destroy_sock(struct sock *sk); | 29 | extern void udp_destroy_sock(struct sock *sk); |
30 | 30 | ||
31 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
32 | extern int udp4_seq_show(struct seq_file *seq, void *v); | 32 | extern int udp4_seq_show(struct seq_file *seq, void *v); |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 72ce26b6c4d3..3c807964da96 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). | 2 | * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). |
3 | * | 3 | * |
4 | * Version: $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $ | ||
5 | * | ||
6 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> | 4 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> |
7 | * | 5 | * |
8 | * Changes: | 6 | * Changes: |
@@ -13,7 +11,6 @@ | |||
13 | * 2 of the License, or (at your option) any later version. | 11 | * 2 of the License, or (at your option) any later version. |
14 | */ | 12 | */ |
15 | #include "udp_impl.h" | 13 | #include "udp_impl.h" |
16 | DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics) __read_mostly; | ||
17 | 14 | ||
18 | struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; | 15 | struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; |
19 | 16 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ff61a5cdb0b3..580ae506c399 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | 7 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
8 | * | 8 | * |
9 | * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $ | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
@@ -121,6 +119,7 @@ static void ipv6_regen_rndid(unsigned long data); | |||
121 | static int desync_factor = MAX_DESYNC_FACTOR * HZ; | 119 | static int desync_factor = MAX_DESYNC_FACTOR * HZ; |
122 | #endif | 120 | #endif |
123 | 121 | ||
122 | static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); | ||
124 | static int ipv6_count_addresses(struct inet6_dev *idev); | 123 | static int ipv6_count_addresses(struct inet6_dev *idev); |
125 | 124 | ||
126 | /* | 125 | /* |
@@ -185,6 +184,8 @@ struct ipv6_devconf ipv6_devconf __read_mostly = { | |||
185 | #endif | 184 | #endif |
186 | .proxy_ndp = 0, | 185 | .proxy_ndp = 0, |
187 | .accept_source_route = 0, /* we do not accept RH0 by default. */ | 186 | .accept_source_route = 0, /* we do not accept RH0 by default. */ |
187 | .disable_ipv6 = 0, | ||
188 | .accept_dad = 1, | ||
188 | }; | 189 | }; |
189 | 190 | ||
190 | static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { | 191 | static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { |
@@ -217,6 +218,8 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { | |||
217 | #endif | 218 | #endif |
218 | .proxy_ndp = 0, | 219 | .proxy_ndp = 0, |
219 | .accept_source_route = 0, /* we do not accept RH0 by default. */ | 220 | .accept_source_route = 0, /* we do not accept RH0 by default. */ |
221 | .disable_ipv6 = 0, | ||
222 | .accept_dad = 1, | ||
220 | }; | 223 | }; |
221 | 224 | ||
222 | /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ | 225 | /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ |
@@ -226,9 +229,15 @@ const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_IN | |||
226 | const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; | 229 | const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; |
227 | 230 | ||
228 | /* Check if a valid qdisc is available */ | 231 | /* Check if a valid qdisc is available */ |
229 | static inline int addrconf_qdisc_ok(struct net_device *dev) | 232 | static inline bool addrconf_qdisc_ok(const struct net_device *dev) |
233 | { | ||
234 | return !qdisc_tx_is_noop(dev); | ||
235 | } | ||
236 | |||
237 | /* Check if a route is valid prefix route */ | ||
238 | static inline int addrconf_is_prefix_route(const struct rt6_info *rt) | ||
230 | { | 239 | { |
231 | return (dev->qdisc != &noop_qdisc); | 240 | return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0); |
232 | } | 241 | } |
233 | 242 | ||
234 | static void addrconf_del_timer(struct inet6_ifaddr *ifp) | 243 | static void addrconf_del_timer(struct inet6_ifaddr *ifp) |
@@ -344,6 +353,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
344 | kfree(ndev); | 353 | kfree(ndev); |
345 | return NULL; | 354 | return NULL; |
346 | } | 355 | } |
356 | if (ndev->cnf.forwarding) | ||
357 | dev_disable_lro(dev); | ||
347 | /* We refer to the device */ | 358 | /* We refer to the device */ |
348 | dev_hold(dev); | 359 | dev_hold(dev); |
349 | 360 | ||
@@ -372,6 +383,9 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
372 | */ | 383 | */ |
373 | in6_dev_hold(ndev); | 384 | in6_dev_hold(ndev); |
374 | 385 | ||
386 | if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) | ||
387 | ndev->cnf.accept_dad = -1; | ||
388 | |||
375 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 389 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) |
376 | if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { | 390 | if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { |
377 | printk(KERN_INFO | 391 | printk(KERN_INFO |
@@ -438,6 +452,8 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
438 | if (!idev) | 452 | if (!idev) |
439 | return; | 453 | return; |
440 | dev = idev->dev; | 454 | dev = idev->dev; |
455 | if (idev->cnf.forwarding) | ||
456 | dev_disable_lro(dev); | ||
441 | if (dev && (dev->flags & IFF_MULTICAST)) { | 457 | if (dev && (dev->flags & IFF_MULTICAST)) { |
442 | if (idev->cnf.forwarding) | 458 | if (idev->cnf.forwarding) |
443 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); | 459 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); |
@@ -483,12 +499,14 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
483 | if (p == &net->ipv6.devconf_dflt->forwarding) | 499 | if (p == &net->ipv6.devconf_dflt->forwarding) |
484 | return; | 500 | return; |
485 | 501 | ||
502 | rtnl_lock(); | ||
486 | if (p == &net->ipv6.devconf_all->forwarding) { | 503 | if (p == &net->ipv6.devconf_all->forwarding) { |
487 | __s32 newf = net->ipv6.devconf_all->forwarding; | 504 | __s32 newf = net->ipv6.devconf_all->forwarding; |
488 | net->ipv6.devconf_dflt->forwarding = newf; | 505 | net->ipv6.devconf_dflt->forwarding = newf; |
489 | addrconf_forward_change(net, newf); | 506 | addrconf_forward_change(net, newf); |
490 | } else if ((!*p) ^ (!old)) | 507 | } else if ((!*p) ^ (!old)) |
491 | dev_forward_change((struct inet6_dev *)table->extra1); | 508 | dev_forward_change((struct inet6_dev *)table->extra1); |
509 | rtnl_unlock(); | ||
492 | 510 | ||
493 | if (*p) | 511 | if (*p) |
494 | rt6_purge_dflt_routers(net); | 512 | rt6_purge_dflt_routers(net); |
@@ -568,6 +586,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
568 | struct rt6_info *rt; | 586 | struct rt6_info *rt; |
569 | int hash; | 587 | int hash; |
570 | int err = 0; | 588 | int err = 0; |
589 | int addr_type = ipv6_addr_type(addr); | ||
590 | |||
591 | if (addr_type == IPV6_ADDR_ANY || | ||
592 | addr_type & IPV6_ADDR_MULTICAST || | ||
593 | (!(idev->dev->flags & IFF_LOOPBACK) && | ||
594 | addr_type & IPV6_ADDR_LOOPBACK)) | ||
595 | return ERR_PTR(-EADDRNOTAVAIL); | ||
571 | 596 | ||
572 | rcu_read_lock_bh(); | 597 | rcu_read_lock_bh(); |
573 | if (idev->dead) { | 598 | if (idev->dead) { |
@@ -777,7 +802,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
777 | ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); | 802 | ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); |
778 | rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); | 803 | rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); |
779 | 804 | ||
780 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { | 805 | if (rt && addrconf_is_prefix_route(rt)) { |
781 | if (onlink == 0) { | 806 | if (onlink == 0) { |
782 | ip6_del_rt(rt); | 807 | ip6_del_rt(rt); |
783 | rt = NULL; | 808 | rt = NULL; |
@@ -958,7 +983,8 @@ static inline int ipv6_saddr_preferred(int type) | |||
958 | return 0; | 983 | return 0; |
959 | } | 984 | } |
960 | 985 | ||
961 | static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score, | 986 | static int ipv6_get_saddr_eval(struct net *net, |
987 | struct ipv6_saddr_score *score, | ||
962 | struct ipv6_saddr_dst *dst, | 988 | struct ipv6_saddr_dst *dst, |
963 | int i) | 989 | int i) |
964 | { | 990 | { |
@@ -1037,7 +1063,8 @@ static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score, | |||
1037 | break; | 1063 | break; |
1038 | case IPV6_SADDR_RULE_LABEL: | 1064 | case IPV6_SADDR_RULE_LABEL: |
1039 | /* Rule 6: Prefer matching label */ | 1065 | /* Rule 6: Prefer matching label */ |
1040 | ret = ipv6_addr_label(&score->ifa->addr, score->addr_type, | 1066 | ret = ipv6_addr_label(net, |
1067 | &score->ifa->addr, score->addr_type, | ||
1041 | score->ifa->idev->dev->ifindex) == dst->label; | 1068 | score->ifa->idev->dev->ifindex) == dst->label; |
1042 | break; | 1069 | break; |
1043 | #ifdef CONFIG_IPV6_PRIVACY | 1070 | #ifdef CONFIG_IPV6_PRIVACY |
@@ -1091,7 +1118,7 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev, | |||
1091 | dst.addr = daddr; | 1118 | dst.addr = daddr; |
1092 | dst.ifindex = dst_dev ? dst_dev->ifindex : 0; | 1119 | dst.ifindex = dst_dev ? dst_dev->ifindex : 0; |
1093 | dst.scope = __ipv6_addr_src_scope(dst_type); | 1120 | dst.scope = __ipv6_addr_src_scope(dst_type); |
1094 | dst.label = ipv6_addr_label(daddr, dst_type, dst.ifindex); | 1121 | dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex); |
1095 | dst.prefs = prefs; | 1122 | dst.prefs = prefs; |
1096 | 1123 | ||
1097 | hiscore->rule = -1; | 1124 | hiscore->rule = -1; |
@@ -1159,8 +1186,8 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev, | |||
1159 | for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { | 1186 | for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { |
1160 | int minihiscore, miniscore; | 1187 | int minihiscore, miniscore; |
1161 | 1188 | ||
1162 | minihiscore = ipv6_get_saddr_eval(hiscore, &dst, i); | 1189 | minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i); |
1163 | miniscore = ipv6_get_saddr_eval(score, &dst, i); | 1190 | miniscore = ipv6_get_saddr_eval(net, score, &dst, i); |
1164 | 1191 | ||
1165 | if (minihiscore > miniscore) { | 1192 | if (minihiscore > miniscore) { |
1166 | if (i == IPV6_SADDR_RULE_SCOPE && | 1193 | if (i == IPV6_SADDR_RULE_SCOPE && |
@@ -1400,6 +1427,20 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp) | |||
1400 | 1427 | ||
1401 | void addrconf_dad_failure(struct inet6_ifaddr *ifp) | 1428 | void addrconf_dad_failure(struct inet6_ifaddr *ifp) |
1402 | { | 1429 | { |
1430 | struct inet6_dev *idev = ifp->idev; | ||
1431 | if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { | ||
1432 | struct in6_addr addr; | ||
1433 | |||
1434 | addr.s6_addr32[0] = htonl(0xfe800000); | ||
1435 | addr.s6_addr32[1] = 0; | ||
1436 | |||
1437 | if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && | ||
1438 | ipv6_addr_equal(&ifp->addr, &addr)) { | ||
1439 | /* DAD failed for link-local based on MAC address */ | ||
1440 | idev->cnf.disable_ipv6 = 1; | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1403 | if (net_ratelimit()) | 1444 | if (net_ratelimit()) |
1404 | printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name); | 1445 | printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name); |
1405 | addrconf_dad_stop(ifp); | 1446 | addrconf_dad_stop(ifp); |
@@ -1788,7 +1829,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1788 | rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, | 1829 | rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, |
1789 | dev->ifindex, 1); | 1830 | dev->ifindex, 1); |
1790 | 1831 | ||
1791 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { | 1832 | if (rt && addrconf_is_prefix_route(rt)) { |
1792 | /* Autoconf prefix route */ | 1833 | /* Autoconf prefix route */ |
1793 | if (valid_lft == 0) { | 1834 | if (valid_lft == 0) { |
1794 | ip6_del_rt(rt); | 1835 | ip6_del_rt(rt); |
@@ -1822,6 +1863,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1822 | struct inet6_ifaddr * ifp; | 1863 | struct inet6_ifaddr * ifp; |
1823 | struct in6_addr addr; | 1864 | struct in6_addr addr; |
1824 | int create = 0, update_lft = 0; | 1865 | int create = 0, update_lft = 0; |
1866 | struct net *net = dev_net(dev); | ||
1825 | 1867 | ||
1826 | if (pinfo->prefix_len == 64) { | 1868 | if (pinfo->prefix_len == 64) { |
1827 | memcpy(&addr, &pinfo->prefix, 8); | 1869 | memcpy(&addr, &pinfo->prefix, 8); |
@@ -1840,7 +1882,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1840 | 1882 | ||
1841 | ok: | 1883 | ok: |
1842 | 1884 | ||
1843 | ifp = ipv6_get_ifaddr(dev_net(dev), &addr, dev, 1); | 1885 | ifp = ipv6_get_ifaddr(net, &addr, dev, 1); |
1844 | 1886 | ||
1845 | if (ifp == NULL && valid_lft) { | 1887 | if (ifp == NULL && valid_lft) { |
1846 | int max_addresses = in6_dev->cnf.max_addresses; | 1888 | int max_addresses = in6_dev->cnf.max_addresses; |
@@ -1848,7 +1890,7 @@ ok: | |||
1848 | 1890 | ||
1849 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 1891 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
1850 | if (in6_dev->cnf.optimistic_dad && | 1892 | if (in6_dev->cnf.optimistic_dad && |
1851 | !ipv6_devconf.forwarding) | 1893 | !net->ipv6.devconf_all->forwarding) |
1852 | addr_flags = IFA_F_OPTIMISTIC; | 1894 | addr_flags = IFA_F_OPTIMISTIC; |
1853 | #endif | 1895 | #endif |
1854 | 1896 | ||
@@ -2273,11 +2315,12 @@ static void init_loopback(struct net_device *dev) | |||
2273 | static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) | 2315 | static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) |
2274 | { | 2316 | { |
2275 | struct inet6_ifaddr * ifp; | 2317 | struct inet6_ifaddr * ifp; |
2318 | struct net *net = dev_net(idev->dev); | ||
2276 | u32 addr_flags = IFA_F_PERMANENT; | 2319 | u32 addr_flags = IFA_F_PERMANENT; |
2277 | 2320 | ||
2278 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 2321 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
2279 | if (idev->cnf.optimistic_dad && | 2322 | if (idev->cnf.optimistic_dad && |
2280 | !ipv6_devconf.forwarding) | 2323 | !net->ipv6.devconf_all->forwarding) |
2281 | addr_flags |= IFA_F_OPTIMISTIC; | 2324 | addr_flags |= IFA_F_OPTIMISTIC; |
2282 | #endif | 2325 | #endif |
2283 | 2326 | ||
@@ -2732,6 +2775,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2732 | spin_lock_bh(&ifp->lock); | 2775 | spin_lock_bh(&ifp->lock); |
2733 | 2776 | ||
2734 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || | 2777 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || |
2778 | idev->cnf.accept_dad < 1 || | ||
2735 | !(ifp->flags&IFA_F_TENTATIVE) || | 2779 | !(ifp->flags&IFA_F_TENTATIVE) || |
2736 | ifp->flags & IFA_F_NODAD) { | 2780 | ifp->flags & IFA_F_NODAD) { |
2737 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); | 2781 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); |
@@ -2779,6 +2823,11 @@ static void addrconf_dad_timer(unsigned long data) | |||
2779 | read_unlock_bh(&idev->lock); | 2823 | read_unlock_bh(&idev->lock); |
2780 | goto out; | 2824 | goto out; |
2781 | } | 2825 | } |
2826 | if (idev->cnf.accept_dad > 1 && idev->cnf.disable_ipv6) { | ||
2827 | read_unlock_bh(&idev->lock); | ||
2828 | addrconf_dad_failure(ifp); | ||
2829 | return; | ||
2830 | } | ||
2782 | spin_lock_bh(&ifp->lock); | 2831 | spin_lock_bh(&ifp->lock); |
2783 | if (ifp->probes == 0) { | 2832 | if (ifp->probes == 0) { |
2784 | /* | 2833 | /* |
@@ -3638,6 +3687,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3638 | #ifdef CONFIG_IPV6_MROUTE | 3687 | #ifdef CONFIG_IPV6_MROUTE |
3639 | array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; | 3688 | array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; |
3640 | #endif | 3689 | #endif |
3690 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; | ||
3691 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; | ||
3641 | } | 3692 | } |
3642 | 3693 | ||
3643 | static inline size_t inet6_if_nlmsg_size(void) | 3694 | static inline size_t inet6_if_nlmsg_size(void) |
@@ -4197,6 +4248,22 @@ static struct addrconf_sysctl_table | |||
4197 | }, | 4248 | }, |
4198 | #endif | 4249 | #endif |
4199 | { | 4250 | { |
4251 | .ctl_name = CTL_UNNUMBERED, | ||
4252 | .procname = "disable_ipv6", | ||
4253 | .data = &ipv6_devconf.disable_ipv6, | ||
4254 | .maxlen = sizeof(int), | ||
4255 | .mode = 0644, | ||
4256 | .proc_handler = &proc_dointvec, | ||
4257 | }, | ||
4258 | { | ||
4259 | .ctl_name = CTL_UNNUMBERED, | ||
4260 | .procname = "accept_dad", | ||
4261 | .data = &ipv6_devconf.accept_dad, | ||
4262 | .maxlen = sizeof(int), | ||
4263 | .mode = 0644, | ||
4264 | .proc_handler = &proc_dointvec, | ||
4265 | }, | ||
4266 | { | ||
4200 | .ctl_name = 0, /* sentinel */ | 4267 | .ctl_name = 0, /* sentinel */ |
4201 | } | 4268 | } |
4202 | }, | 4269 | }, |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 9bfa8846f262..08909039d87b 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -29,6 +29,9 @@ | |||
29 | */ | 29 | */ |
30 | struct ip6addrlbl_entry | 30 | struct ip6addrlbl_entry |
31 | { | 31 | { |
32 | #ifdef CONFIG_NET_NS | ||
33 | struct net *lbl_net; | ||
34 | #endif | ||
32 | struct in6_addr prefix; | 35 | struct in6_addr prefix; |
33 | int prefixlen; | 36 | int prefixlen; |
34 | int ifindex; | 37 | int ifindex; |
@@ -46,6 +49,16 @@ static struct ip6addrlbl_table | |||
46 | u32 seq; | 49 | u32 seq; |
47 | } ip6addrlbl_table; | 50 | } ip6addrlbl_table; |
48 | 51 | ||
52 | static inline | ||
53 | struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) | ||
54 | { | ||
55 | #ifdef CONFIG_NET_NS | ||
56 | return lbl->lbl_net; | ||
57 | #else | ||
58 | return &init_net; | ||
59 | #endif | ||
60 | } | ||
61 | |||
49 | /* | 62 | /* |
50 | * Default policy table (RFC3484 + extensions) | 63 | * Default policy table (RFC3484 + extensions) |
51 | * | 64 | * |
@@ -65,7 +78,7 @@ static struct ip6addrlbl_table | |||
65 | 78 | ||
66 | #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL | 79 | #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL |
67 | 80 | ||
68 | static const __initdata struct ip6addrlbl_init_table | 81 | static const __net_initdata struct ip6addrlbl_init_table |
69 | { | 82 | { |
70 | const struct in6_addr *prefix; | 83 | const struct in6_addr *prefix; |
71 | int prefixlen; | 84 | int prefixlen; |
@@ -108,6 +121,9 @@ static const __initdata struct ip6addrlbl_init_table | |||
108 | /* Object management */ | 121 | /* Object management */ |
109 | static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) | 122 | static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) |
110 | { | 123 | { |
124 | #ifdef CONFIG_NET_NS | ||
125 | release_net(p->lbl_net); | ||
126 | #endif | ||
111 | kfree(p); | 127 | kfree(p); |
112 | } | 128 | } |
113 | 129 | ||
@@ -128,10 +144,13 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p) | |||
128 | } | 144 | } |
129 | 145 | ||
130 | /* Find label */ | 146 | /* Find label */ |
131 | static int __ip6addrlbl_match(struct ip6addrlbl_entry *p, | 147 | static int __ip6addrlbl_match(struct net *net, |
148 | struct ip6addrlbl_entry *p, | ||
132 | const struct in6_addr *addr, | 149 | const struct in6_addr *addr, |
133 | int addrtype, int ifindex) | 150 | int addrtype, int ifindex) |
134 | { | 151 | { |
152 | if (!net_eq(ip6addrlbl_net(p), net)) | ||
153 | return 0; | ||
135 | if (p->ifindex && p->ifindex != ifindex) | 154 | if (p->ifindex && p->ifindex != ifindex) |
136 | return 0; | 155 | return 0; |
137 | if (p->addrtype && p->addrtype != addrtype) | 156 | if (p->addrtype && p->addrtype != addrtype) |
@@ -141,19 +160,21 @@ static int __ip6addrlbl_match(struct ip6addrlbl_entry *p, | |||
141 | return 1; | 160 | return 1; |
142 | } | 161 | } |
143 | 162 | ||
144 | static struct ip6addrlbl_entry *__ipv6_addr_label(const struct in6_addr *addr, | 163 | static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, |
164 | const struct in6_addr *addr, | ||
145 | int type, int ifindex) | 165 | int type, int ifindex) |
146 | { | 166 | { |
147 | struct hlist_node *pos; | 167 | struct hlist_node *pos; |
148 | struct ip6addrlbl_entry *p; | 168 | struct ip6addrlbl_entry *p; |
149 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 169 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { |
150 | if (__ip6addrlbl_match(p, addr, type, ifindex)) | 170 | if (__ip6addrlbl_match(net, p, addr, type, ifindex)) |
151 | return p; | 171 | return p; |
152 | } | 172 | } |
153 | return NULL; | 173 | return NULL; |
154 | } | 174 | } |
155 | 175 | ||
156 | u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) | 176 | u32 ipv6_addr_label(struct net *net, |
177 | const struct in6_addr *addr, int type, int ifindex) | ||
157 | { | 178 | { |
158 | u32 label; | 179 | u32 label; |
159 | struct ip6addrlbl_entry *p; | 180 | struct ip6addrlbl_entry *p; |
@@ -161,7 +182,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) | |||
161 | type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; | 182 | type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; |
162 | 183 | ||
163 | rcu_read_lock(); | 184 | rcu_read_lock(); |
164 | p = __ipv6_addr_label(addr, type, ifindex); | 185 | p = __ipv6_addr_label(net, addr, type, ifindex); |
165 | label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; | 186 | label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; |
166 | rcu_read_unlock(); | 187 | rcu_read_unlock(); |
167 | 188 | ||
@@ -174,7 +195,8 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) | |||
174 | } | 195 | } |
175 | 196 | ||
176 | /* allocate one entry */ | 197 | /* allocate one entry */ |
177 | static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, | 198 | static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net, |
199 | const struct in6_addr *prefix, | ||
178 | int prefixlen, int ifindex, | 200 | int prefixlen, int ifindex, |
179 | u32 label) | 201 | u32 label) |
180 | { | 202 | { |
@@ -216,6 +238,9 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, | |||
216 | newp->addrtype = addrtype; | 238 | newp->addrtype = addrtype; |
217 | newp->label = label; | 239 | newp->label = label; |
218 | INIT_HLIST_NODE(&newp->list); | 240 | INIT_HLIST_NODE(&newp->list); |
241 | #ifdef CONFIG_NET_NS | ||
242 | newp->lbl_net = hold_net(net); | ||
243 | #endif | ||
219 | atomic_set(&newp->refcnt, 1); | 244 | atomic_set(&newp->refcnt, 1); |
220 | return newp; | 245 | return newp; |
221 | } | 246 | } |
@@ -237,6 +262,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) | |||
237 | hlist_for_each_entry_safe(p, pos, n, | 262 | hlist_for_each_entry_safe(p, pos, n, |
238 | &ip6addrlbl_table.head, list) { | 263 | &ip6addrlbl_table.head, list) { |
239 | if (p->prefixlen == newp->prefixlen && | 264 | if (p->prefixlen == newp->prefixlen && |
265 | net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && | ||
240 | p->ifindex == newp->ifindex && | 266 | p->ifindex == newp->ifindex && |
241 | ipv6_addr_equal(&p->prefix, &newp->prefix)) { | 267 | ipv6_addr_equal(&p->prefix, &newp->prefix)) { |
242 | if (!replace) { | 268 | if (!replace) { |
@@ -261,7 +287,8 @@ out: | |||
261 | } | 287 | } |
262 | 288 | ||
263 | /* add a label */ | 289 | /* add a label */ |
264 | static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, | 290 | static int ip6addrlbl_add(struct net *net, |
291 | const struct in6_addr *prefix, int prefixlen, | ||
265 | int ifindex, u32 label, int replace) | 292 | int ifindex, u32 label, int replace) |
266 | { | 293 | { |
267 | struct ip6addrlbl_entry *newp; | 294 | struct ip6addrlbl_entry *newp; |
@@ -274,7 +301,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, | |||
274 | (unsigned int)label, | 301 | (unsigned int)label, |
275 | replace); | 302 | replace); |
276 | 303 | ||
277 | newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label); | 304 | newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label); |
278 | if (IS_ERR(newp)) | 305 | if (IS_ERR(newp)) |
279 | return PTR_ERR(newp); | 306 | return PTR_ERR(newp); |
280 | spin_lock(&ip6addrlbl_table.lock); | 307 | spin_lock(&ip6addrlbl_table.lock); |
@@ -286,7 +313,8 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, | |||
286 | } | 313 | } |
287 | 314 | ||
288 | /* remove a label */ | 315 | /* remove a label */ |
289 | static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | 316 | static int __ip6addrlbl_del(struct net *net, |
317 | const struct in6_addr *prefix, int prefixlen, | ||
290 | int ifindex) | 318 | int ifindex) |
291 | { | 319 | { |
292 | struct ip6addrlbl_entry *p = NULL; | 320 | struct ip6addrlbl_entry *p = NULL; |
@@ -300,6 +328,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | |||
300 | 328 | ||
301 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | 329 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { |
302 | if (p->prefixlen == prefixlen && | 330 | if (p->prefixlen == prefixlen && |
331 | net_eq(ip6addrlbl_net(p), net) && | ||
303 | p->ifindex == ifindex && | 332 | p->ifindex == ifindex && |
304 | ipv6_addr_equal(&p->prefix, prefix)) { | 333 | ipv6_addr_equal(&p->prefix, prefix)) { |
305 | hlist_del_rcu(&p->list); | 334 | hlist_del_rcu(&p->list); |
@@ -311,7 +340,8 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | |||
311 | return ret; | 340 | return ret; |
312 | } | 341 | } |
313 | 342 | ||
314 | static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | 343 | static int ip6addrlbl_del(struct net *net, |
344 | const struct in6_addr *prefix, int prefixlen, | ||
315 | int ifindex) | 345 | int ifindex) |
316 | { | 346 | { |
317 | struct in6_addr prefix_buf; | 347 | struct in6_addr prefix_buf; |
@@ -324,13 +354,13 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | |||
324 | 354 | ||
325 | ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); | 355 | ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); |
326 | spin_lock(&ip6addrlbl_table.lock); | 356 | spin_lock(&ip6addrlbl_table.lock); |
327 | ret = __ip6addrlbl_del(&prefix_buf, prefixlen, ifindex); | 357 | ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex); |
328 | spin_unlock(&ip6addrlbl_table.lock); | 358 | spin_unlock(&ip6addrlbl_table.lock); |
329 | return ret; | 359 | return ret; |
330 | } | 360 | } |
331 | 361 | ||
332 | /* add default label */ | 362 | /* add default label */ |
333 | static __init int ip6addrlbl_init(void) | 363 | static int __net_init ip6addrlbl_net_init(struct net *net) |
334 | { | 364 | { |
335 | int err = 0; | 365 | int err = 0; |
336 | int i; | 366 | int i; |
@@ -338,7 +368,8 @@ static __init int ip6addrlbl_init(void) | |||
338 | ADDRLABEL(KERN_DEBUG "%s()\n", __func__); | 368 | ADDRLABEL(KERN_DEBUG "%s()\n", __func__); |
339 | 369 | ||
340 | for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { | 370 | for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { |
341 | int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, | 371 | int ret = ip6addrlbl_add(net, |
372 | ip6addrlbl_init_table[i].prefix, | ||
342 | ip6addrlbl_init_table[i].prefixlen, | 373 | ip6addrlbl_init_table[i].prefixlen, |
343 | 0, | 374 | 0, |
344 | ip6addrlbl_init_table[i].label, 0); | 375 | ip6addrlbl_init_table[i].label, 0); |
@@ -349,11 +380,32 @@ static __init int ip6addrlbl_init(void) | |||
349 | return err; | 380 | return err; |
350 | } | 381 | } |
351 | 382 | ||
383 | static void __net_exit ip6addrlbl_net_exit(struct net *net) | ||
384 | { | ||
385 | struct ip6addrlbl_entry *p = NULL; | ||
386 | struct hlist_node *pos, *n; | ||
387 | |||
388 | /* Remove all labels belonging to the exiting net */ | ||
389 | spin_lock(&ip6addrlbl_table.lock); | ||
390 | hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { | ||
391 | if (net_eq(ip6addrlbl_net(p), net)) { | ||
392 | hlist_del_rcu(&p->list); | ||
393 | ip6addrlbl_put(p); | ||
394 | } | ||
395 | } | ||
396 | spin_unlock(&ip6addrlbl_table.lock); | ||
397 | } | ||
398 | |||
399 | static struct pernet_operations ipv6_addr_label_ops = { | ||
400 | .init = ip6addrlbl_net_init, | ||
401 | .exit = ip6addrlbl_net_exit, | ||
402 | }; | ||
403 | |||
352 | int __init ipv6_addr_label_init(void) | 404 | int __init ipv6_addr_label_init(void) |
353 | { | 405 | { |
354 | spin_lock_init(&ip6addrlbl_table.lock); | 406 | spin_lock_init(&ip6addrlbl_table.lock); |
355 | 407 | ||
356 | return ip6addrlbl_init(); | 408 | return register_pernet_subsys(&ipv6_addr_label_ops); |
357 | } | 409 | } |
358 | 410 | ||
359 | static const struct nla_policy ifal_policy[IFAL_MAX+1] = { | 411 | static const struct nla_policy ifal_policy[IFAL_MAX+1] = { |
@@ -371,9 +423,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
371 | u32 label; | 423 | u32 label; |
372 | int err = 0; | 424 | int err = 0; |
373 | 425 | ||
374 | if (net != &init_net) | ||
375 | return 0; | ||
376 | |||
377 | err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); | 426 | err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); |
378 | if (err < 0) | 427 | if (err < 0) |
379 | return err; | 428 | return err; |
@@ -385,7 +434,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
385 | return -EINVAL; | 434 | return -EINVAL; |
386 | 435 | ||
387 | if (ifal->ifal_index && | 436 | if (ifal->ifal_index && |
388 | !__dev_get_by_index(&init_net, ifal->ifal_index)) | 437 | !__dev_get_by_index(net, ifal->ifal_index)) |
389 | return -EINVAL; | 438 | return -EINVAL; |
390 | 439 | ||
391 | if (!tb[IFAL_ADDRESS]) | 440 | if (!tb[IFAL_ADDRESS]) |
@@ -403,12 +452,12 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
403 | 452 | ||
404 | switch(nlh->nlmsg_type) { | 453 | switch(nlh->nlmsg_type) { |
405 | case RTM_NEWADDRLABEL: | 454 | case RTM_NEWADDRLABEL: |
406 | err = ip6addrlbl_add(pfx, ifal->ifal_prefixlen, | 455 | err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, |
407 | ifal->ifal_index, label, | 456 | ifal->ifal_index, label, |
408 | nlh->nlmsg_flags & NLM_F_REPLACE); | 457 | nlh->nlmsg_flags & NLM_F_REPLACE); |
409 | break; | 458 | break; |
410 | case RTM_DELADDRLABEL: | 459 | case RTM_DELADDRLABEL: |
411 | err = ip6addrlbl_del(pfx, ifal->ifal_prefixlen, | 460 | err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen, |
412 | ifal->ifal_index); | 461 | ifal->ifal_index); |
413 | break; | 462 | break; |
414 | default: | 463 | default: |
@@ -458,12 +507,10 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
458 | int idx = 0, s_idx = cb->args[0]; | 507 | int idx = 0, s_idx = cb->args[0]; |
459 | int err; | 508 | int err; |
460 | 509 | ||
461 | if (net != &init_net) | ||
462 | return 0; | ||
463 | |||
464 | rcu_read_lock(); | 510 | rcu_read_lock(); |
465 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { | 511 | hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { |
466 | if (idx >= s_idx) { | 512 | if (idx >= s_idx && |
513 | net_eq(ip6addrlbl_net(p), net)) { | ||
467 | if ((err = ip6addrlbl_fill(skb, p, | 514 | if ((err = ip6addrlbl_fill(skb, p, |
468 | ip6addrlbl_table.seq, | 515 | ip6addrlbl_table.seq, |
469 | NETLINK_CB(cb->skb).pid, | 516 | NETLINK_CB(cb->skb).pid, |
@@ -499,9 +546,6 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
499 | struct ip6addrlbl_entry *p; | 546 | struct ip6addrlbl_entry *p; |
500 | struct sk_buff *skb; | 547 | struct sk_buff *skb; |
501 | 548 | ||
502 | if (net != &init_net) | ||
503 | return 0; | ||
504 | |||
505 | err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); | 549 | err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); |
506 | if (err < 0) | 550 | if (err < 0) |
507 | return err; | 551 | return err; |
@@ -513,7 +557,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
513 | return -EINVAL; | 557 | return -EINVAL; |
514 | 558 | ||
515 | if (ifal->ifal_index && | 559 | if (ifal->ifal_index && |
516 | !__dev_get_by_index(&init_net, ifal->ifal_index)) | 560 | !__dev_get_by_index(net, ifal->ifal_index)) |
517 | return -EINVAL; | 561 | return -EINVAL; |
518 | 562 | ||
519 | if (!tb[IFAL_ADDRESS]) | 563 | if (!tb[IFAL_ADDRESS]) |
@@ -524,7 +568,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
524 | return -EINVAL; | 568 | return -EINVAL; |
525 | 569 | ||
526 | rcu_read_lock(); | 570 | rcu_read_lock(); |
527 | p = __ipv6_addr_label(addr, ipv6_addr_type(addr), ifal->ifal_index); | 571 | p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); |
528 | if (p && ip6addrlbl_hold(p)) | 572 | if (p && ip6addrlbl_hold(p)) |
529 | p = NULL; | 573 | p = NULL; |
530 | lseq = ip6addrlbl_table.seq; | 574 | lseq = ip6addrlbl_table.seq; |
@@ -552,7 +596,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
552 | goto out; | 596 | goto out; |
553 | } | 597 | } |
554 | 598 | ||
555 | err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); | 599 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); |
556 | out: | 600 | out: |
557 | return err; | 601 | return err; |
558 | } | 602 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e84b3fd17fb4..3d828bc4b1cf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * | 7 | * |
8 | * Adapted from linux/net/ipv4/af_inet.c | 8 | * Adapted from linux/net/ipv4/af_inet.c |
9 | * | 9 | * |
10 | * $Id: af_inet6.c,v 1.66 2002/02/01 22:01:04 davem Exp $ | ||
11 | * | ||
12 | * Fixes: | 10 | * Fixes: |
13 | * piggy, Karl Knutson : Socket protocol table | 11 | * piggy, Karl Knutson : Socket protocol table |
14 | * Hideaki YOSHIFUJI : sin6_scope_id support | 12 | * Hideaki YOSHIFUJI : sin6_scope_id support |
@@ -61,9 +59,7 @@ | |||
61 | 59 | ||
62 | #include <asm/uaccess.h> | 60 | #include <asm/uaccess.h> |
63 | #include <asm/system.h> | 61 | #include <asm/system.h> |
64 | #ifdef CONFIG_IPV6_MROUTE | ||
65 | #include <linux/mroute6.h> | 62 | #include <linux/mroute6.h> |
66 | #endif | ||
67 | 63 | ||
68 | MODULE_AUTHOR("Cast of dozens"); | 64 | MODULE_AUTHOR("Cast of dozens"); |
69 | MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); | 65 | MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); |
@@ -373,7 +369,7 @@ int inet6_release(struct socket *sock) | |||
373 | 369 | ||
374 | EXPORT_SYMBOL(inet6_release); | 370 | EXPORT_SYMBOL(inet6_release); |
375 | 371 | ||
376 | int inet6_destroy_sock(struct sock *sk) | 372 | void inet6_destroy_sock(struct sock *sk) |
377 | { | 373 | { |
378 | struct ipv6_pinfo *np = inet6_sk(sk); | 374 | struct ipv6_pinfo *np = inet6_sk(sk); |
379 | struct sk_buff *skb; | 375 | struct sk_buff *skb; |
@@ -391,8 +387,6 @@ int inet6_destroy_sock(struct sock *sk) | |||
391 | 387 | ||
392 | if ((opt = xchg(&np->opt, NULL)) != NULL) | 388 | if ((opt = xchg(&np->opt, NULL)) != NULL) |
393 | sock_kfree_s(sk, opt, opt->tot_len); | 389 | sock_kfree_s(sk, opt, opt->tot_len); |
394 | |||
395 | return 0; | ||
396 | } | 390 | } |
397 | 391 | ||
398 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); | 392 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); |
@@ -956,9 +950,9 @@ static int __init inet6_init(void) | |||
956 | err = icmpv6_init(); | 950 | err = icmpv6_init(); |
957 | if (err) | 951 | if (err) |
958 | goto icmp_fail; | 952 | goto icmp_fail; |
959 | #ifdef CONFIG_IPV6_MROUTE | 953 | err = ip6_mr_init(); |
960 | ip6_mr_init(); | 954 | if (err) |
961 | #endif | 955 | goto ipmr_fail; |
962 | err = ndisc_init(); | 956 | err = ndisc_init(); |
963 | if (err) | 957 | if (err) |
964 | goto ndisc_fail; | 958 | goto ndisc_fail; |
@@ -1061,6 +1055,8 @@ netfilter_fail: | |||
1061 | igmp_fail: | 1055 | igmp_fail: |
1062 | ndisc_cleanup(); | 1056 | ndisc_cleanup(); |
1063 | ndisc_fail: | 1057 | ndisc_fail: |
1058 | ip6_mr_cleanup(); | ||
1059 | ipmr_fail: | ||
1064 | icmpv6_cleanup(); | 1060 | icmpv6_cleanup(); |
1065 | icmp_fail: | 1061 | icmp_fail: |
1066 | unregister_pernet_subsys(&inet6_net_ops); | 1062 | unregister_pernet_subsys(&inet6_net_ops); |
@@ -1115,6 +1111,7 @@ static void __exit inet6_exit(void) | |||
1115 | ipv6_netfilter_fini(); | 1111 | ipv6_netfilter_fini(); |
1116 | igmp6_cleanup(); | 1112 | igmp6_cleanup(); |
1117 | ndisc_cleanup(); | 1113 | ndisc_cleanup(); |
1114 | ip6_mr_cleanup(); | ||
1118 | icmpv6_cleanup(); | 1115 | icmpv6_cleanup(); |
1119 | rawv6_exit(); | 1116 | rawv6_exit(); |
1120 | 1117 | ||
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 4e1b29fabdf0..8336cd81cb4f 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -60,7 +60,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
60 | struct inet6_dev *idev; | 60 | struct inet6_dev *idev; |
61 | struct ipv6_ac_socklist *pac; | 61 | struct ipv6_ac_socklist *pac; |
62 | struct net *net = sock_net(sk); | 62 | struct net *net = sock_net(sk); |
63 | int ishost = !ipv6_devconf.forwarding; | 63 | int ishost = !net->ipv6.devconf_all->forwarding; |
64 | int err = 0; | 64 | int err = 0; |
65 | 65 | ||
66 | if (!capable(CAP_NET_ADMIN)) | 66 | if (!capable(CAP_NET_ADMIN)) |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 0f0f94a40335..f7b535dec860 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index dcf94fdfb863..837c830d6d8e 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * Andi Kleen <ak@muc.de> | 7 | * Andi Kleen <ak@muc.de> |
8 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | 8 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
9 | * | 9 | * |
10 | * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $ | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
14 | * as published by the Free Software Foundation; either version | 12 | * as published by the Free Software Foundation; either version |
@@ -321,7 +319,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) | |||
321 | int n, i; | 319 | int n, i; |
322 | struct ipv6_rt_hdr *hdr; | 320 | struct ipv6_rt_hdr *hdr; |
323 | struct rt0_hdr *rthdr; | 321 | struct rt0_hdr *rthdr; |
324 | int accept_source_route = ipv6_devconf.accept_source_route; | 322 | int accept_source_route = dev_net(skb->dev)->ipv6.devconf_all->accept_source_route; |
325 | 323 | ||
326 | idev = in6_dev_get(skb->dev); | 324 | idev = in6_dev_get(skb->dev); |
327 | if (idev) { | 325 | if (idev) { |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index d42dd16d3487..abedf95fdf2d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $ | ||
9 | * | ||
10 | * Based on net/ipv4/icmp.c | 8 | * Based on net/ipv4/icmp.c |
11 | * | 9 | * |
12 | * RFC 1885 | 10 | * RFC 1885 |
@@ -956,7 +954,8 @@ ctl_table ipv6_icmp_table_template[] = { | |||
956 | .data = &init_net.ipv6.sysctl.icmpv6_time, | 954 | .data = &init_net.ipv6.sysctl.icmpv6_time, |
957 | .maxlen = sizeof(int), | 955 | .maxlen = sizeof(int), |
958 | .mode = 0644, | 956 | .mode = 0644, |
959 | .proc_handler = &proc_dointvec | 957 | .proc_handler = &proc_dointvec_ms_jiffies, |
958 | .strategy = &sysctl_ms_jiffies | ||
960 | }, | 959 | }, |
961 | { .ctl_name = 0 }, | 960 | { .ctl_name = 0 }, |
962 | }; | 961 | }; |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 580014aea4d6..00a8a5f9380c 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -68,7 +68,7 @@ struct sock *__inet6_lookup_established(struct net *net, | |||
68 | /* Optimize here for direct hit, only listening connections can | 68 | /* Optimize here for direct hit, only listening connections can |
69 | * have wildcards anyways. | 69 | * have wildcards anyways. |
70 | */ | 70 | */ |
71 | unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); | 71 | unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); |
72 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); | 72 | struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); |
73 | rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); | 73 | rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); |
74 | 74 | ||
@@ -104,7 +104,8 @@ struct sock *inet6_lookup_listener(struct net *net, | |||
104 | int score, hiscore = 0; | 104 | int score, hiscore = 0; |
105 | 105 | ||
106 | read_lock(&hashinfo->lhash_lock); | 106 | read_lock(&hashinfo->lhash_lock); |
107 | sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { | 107 | sk_for_each(sk, node, |
108 | &hashinfo->listening_hash[inet_lhashfn(net, hnum)]) { | ||
108 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && | 109 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && |
109 | sk->sk_family == PF_INET6) { | 110 | sk->sk_family == PF_INET6) { |
110 | const struct ipv6_pinfo *np = inet6_sk(sk); | 111 | const struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -165,14 +166,14 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
165 | const struct in6_addr *saddr = &np->daddr; | 166 | const struct in6_addr *saddr = &np->daddr; |
166 | const int dif = sk->sk_bound_dev_if; | 167 | const int dif = sk->sk_bound_dev_if; |
167 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 168 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); |
168 | const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, | 169 | struct net *net = sock_net(sk); |
170 | const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, | ||
169 | inet->dport); | 171 | inet->dport); |
170 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 172 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
171 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); | 173 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); |
172 | struct sock *sk2; | 174 | struct sock *sk2; |
173 | const struct hlist_node *node; | 175 | const struct hlist_node *node; |
174 | struct inet_timewait_sock *tw; | 176 | struct inet_timewait_sock *tw; |
175 | struct net *net = sock_net(sk); | ||
176 | 177 | ||
177 | prefetch(head->chain.first); | 178 | prefetch(head->chain.first); |
178 | write_lock(lock); | 179 | write_lock(lock); |
@@ -209,11 +210,11 @@ unique: | |||
209 | 210 | ||
210 | if (twp != NULL) { | 211 | if (twp != NULL) { |
211 | *twp = tw; | 212 | *twp = tw; |
212 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 213 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); |
213 | } else if (tw != NULL) { | 214 | } else if (tw != NULL) { |
214 | /* Silly. Should hash-dance instead... */ | 215 | /* Silly. Should hash-dance instead... */ |
215 | inet_twsk_deschedule(tw, death_row); | 216 | inet_twsk_deschedule(tw, death_row); |
216 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 217 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); |
217 | 218 | ||
218 | inet_twsk_put(tw); | 219 | inet_twsk_put(tw); |
219 | } | 220 | } |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1ee4fa17c129..4de2b9efcacb 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 17eb48b8e329..7e14cccd0561 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * Ian P. Morris <I.P.Morris@soton.ac.uk> | 7 | * Ian P. Morris <I.P.Morris@soton.ac.uk> |
8 | * | 8 | * |
9 | * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $ | ||
10 | * | ||
11 | * Based in linux/net/ipv4/ip_input.c | 9 | * Based in linux/net/ipv4/ip_input.c |
12 | * | 10 | * |
13 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
@@ -73,7 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
73 | 71 | ||
74 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); | 72 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); |
75 | 73 | ||
76 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 74 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || |
75 | !idev || unlikely(idev->cnf.disable_ipv6)) { | ||
77 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); | 76 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); |
78 | rcu_read_unlock(); | 77 | rcu_read_unlock(); |
79 | goto out; | 78 | goto out; |
@@ -250,7 +249,7 @@ int ip6_mc_input(struct sk_buff *skb) | |||
250 | /* | 249 | /* |
251 | * IPv6 multicast router mode is now supported ;) | 250 | * IPv6 multicast router mode is now supported ;) |
252 | */ | 251 | */ |
253 | if (ipv6_devconf.mc_forwarding && | 252 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && |
254 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { | 253 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { |
255 | /* | 254 | /* |
256 | * Okay, we try to forward - split and duplicate | 255 | * Okay, we try to forward - split and duplicate |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 48cdce9c696c..6407c64ea4a5 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Based on linux/net/ipv4/ip_output.c | 8 | * Based on linux/net/ipv4/ip_output.c |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
@@ -175,6 +173,13 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | |||
175 | 173 | ||
176 | int ip6_output(struct sk_buff *skb) | 174 | int ip6_output(struct sk_buff *skb) |
177 | { | 175 | { |
176 | struct inet6_dev *idev = ip6_dst_idev(skb->dst); | ||
177 | if (unlikely(idev->cnf.disable_ipv6)) { | ||
178 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); | ||
179 | kfree_skb(skb); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
178 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 183 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || |
179 | dst_allfrag(skb->dst)) | 184 | dst_allfrag(skb->dst)) |
180 | return ip6_fragment(skb, ip6_output2); | 185 | return ip6_fragment(skb, ip6_output2); |
@@ -406,9 +411,12 @@ int ip6_forward(struct sk_buff *skb) | |||
406 | struct inet6_skb_parm *opt = IP6CB(skb); | 411 | struct inet6_skb_parm *opt = IP6CB(skb); |
407 | struct net *net = dev_net(dst->dev); | 412 | struct net *net = dev_net(dst->dev); |
408 | 413 | ||
409 | if (ipv6_devconf.forwarding == 0) | 414 | if (net->ipv6.devconf_all->forwarding == 0) |
410 | goto error; | 415 | goto error; |
411 | 416 | ||
417 | if (skb_warn_if_lro(skb)) | ||
418 | goto drop; | ||
419 | |||
412 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { | 420 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { |
413 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); | 421 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); |
414 | goto drop; | 422 | goto drop; |
@@ -450,7 +458,7 @@ int ip6_forward(struct sk_buff *skb) | |||
450 | } | 458 | } |
451 | 459 | ||
452 | /* XXX: idev->cnf.proxy_ndp? */ | 460 | /* XXX: idev->cnf.proxy_ndp? */ |
453 | if (ipv6_devconf.proxy_ndp && | 461 | if (net->ipv6.devconf_all->proxy_ndp && |
454 | pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { | 462 | pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { |
455 | int proxied = ip6_forward_proxy_check(skb); | 463 | int proxied = ip6_forward_proxy_check(skb); |
456 | if (proxied > 0) | 464 | if (proxied > 0) |
@@ -497,7 +505,8 @@ int ip6_forward(struct sk_buff *skb) | |||
497 | int addrtype = ipv6_addr_type(&hdr->saddr); | 505 | int addrtype = ipv6_addr_type(&hdr->saddr); |
498 | 506 | ||
499 | /* This check is security critical. */ | 507 | /* This check is security critical. */ |
500 | if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK)) | 508 | if (addrtype == IPV6_ADDR_ANY || |
509 | addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) | ||
501 | goto error; | 510 | goto error; |
502 | if (addrtype & IPV6_ADDR_LINKLOCAL) { | 511 | if (addrtype & IPV6_ADDR_LINKLOCAL) { |
503 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, | 512 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2bda3ba100b1..17c7b098cdb0 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Ville Nuorvala <vnuorval@tcs.hut.fi> | 6 | * Ville Nuorvala <vnuorval@tcs.hut.fi> |
7 | * Yasuyuki Kozakai <kozakai@linux-ipv6.org> | 7 | * Yasuyuki Kozakai <kozakai@linux-ipv6.org> |
8 | * | 8 | * |
9 | * $Id$ | ||
10 | * | ||
11 | * Based on: | 9 | * Based on: |
12 | * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c | 10 | * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c |
13 | * | 11 | * |
@@ -711,7 +709,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
711 | } | 709 | } |
712 | 710 | ||
713 | if (!ip6_tnl_rcv_ctl(t)) { | 711 | if (!ip6_tnl_rcv_ctl(t)) { |
714 | t->stat.rx_dropped++; | 712 | t->dev->stats.rx_dropped++; |
715 | read_unlock(&ip6_tnl_lock); | 713 | read_unlock(&ip6_tnl_lock); |
716 | goto discard; | 714 | goto discard; |
717 | } | 715 | } |
@@ -728,8 +726,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
728 | 726 | ||
729 | dscp_ecn_decapsulate(t, ipv6h, skb); | 727 | dscp_ecn_decapsulate(t, ipv6h, skb); |
730 | 728 | ||
731 | t->stat.rx_packets++; | 729 | t->dev->stats.rx_packets++; |
732 | t->stat.rx_bytes += skb->len; | 730 | t->dev->stats.rx_bytes += skb->len; |
733 | netif_rx(skb); | 731 | netif_rx(skb); |
734 | read_unlock(&ip6_tnl_lock); | 732 | read_unlock(&ip6_tnl_lock); |
735 | return 0; | 733 | return 0; |
@@ -849,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
849 | __u32 *pmtu) | 847 | __u32 *pmtu) |
850 | { | 848 | { |
851 | struct ip6_tnl *t = netdev_priv(dev); | 849 | struct ip6_tnl *t = netdev_priv(dev); |
852 | struct net_device_stats *stats = &t->stat; | 850 | struct net_device_stats *stats = &t->dev->stats; |
853 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 851 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
854 | struct ipv6_tel_txoption opt; | 852 | struct ipv6_tel_txoption opt; |
855 | struct dst_entry *dst; | 853 | struct dst_entry *dst; |
@@ -1043,11 +1041,11 @@ static int | |||
1043 | ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1041 | ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
1044 | { | 1042 | { |
1045 | struct ip6_tnl *t = netdev_priv(dev); | 1043 | struct ip6_tnl *t = netdev_priv(dev); |
1046 | struct net_device_stats *stats = &t->stat; | 1044 | struct net_device_stats *stats = &t->dev->stats; |
1047 | int ret; | 1045 | int ret; |
1048 | 1046 | ||
1049 | if (t->recursion++) { | 1047 | if (t->recursion++) { |
1050 | t->stat.collisions++; | 1048 | stats->collisions++; |
1051 | goto tx_err; | 1049 | goto tx_err; |
1052 | } | 1050 | } |
1053 | 1051 | ||
@@ -1289,19 +1287,6 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1289 | } | 1287 | } |
1290 | 1288 | ||
1291 | /** | 1289 | /** |
1292 | * ip6_tnl_get_stats - return the stats for tunnel device | ||
1293 | * @dev: virtual device associated with tunnel | ||
1294 | * | ||
1295 | * Return: stats for device | ||
1296 | **/ | ||
1297 | |||
1298 | static struct net_device_stats * | ||
1299 | ip6_tnl_get_stats(struct net_device *dev) | ||
1300 | { | ||
1301 | return &(((struct ip6_tnl *)netdev_priv(dev))->stat); | ||
1302 | } | ||
1303 | |||
1304 | /** | ||
1305 | * ip6_tnl_change_mtu - change mtu manually for tunnel device | 1290 | * ip6_tnl_change_mtu - change mtu manually for tunnel device |
1306 | * @dev: virtual device associated with tunnel | 1291 | * @dev: virtual device associated with tunnel |
1307 | * @new_mtu: the new mtu | 1292 | * @new_mtu: the new mtu |
@@ -1334,7 +1319,6 @@ static void ip6_tnl_dev_setup(struct net_device *dev) | |||
1334 | dev->uninit = ip6_tnl_dev_uninit; | 1319 | dev->uninit = ip6_tnl_dev_uninit; |
1335 | dev->destructor = free_netdev; | 1320 | dev->destructor = free_netdev; |
1336 | dev->hard_start_xmit = ip6_tnl_xmit; | 1321 | dev->hard_start_xmit = ip6_tnl_xmit; |
1337 | dev->get_stats = ip6_tnl_get_stats; | ||
1338 | dev->do_ioctl = ip6_tnl_ioctl; | 1322 | dev->do_ioctl = ip6_tnl_ioctl; |
1339 | dev->change_mtu = ip6_tnl_change_mtu; | 1323 | dev->change_mtu = ip6_tnl_change_mtu; |
1340 | 1324 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 14796181e8b5..095bc453ff4c 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -388,8 +388,8 @@ static int pim6_rcv(struct sk_buff *skb) | |||
388 | skb->ip_summed = 0; | 388 | skb->ip_summed = 0; |
389 | skb->pkt_type = PACKET_HOST; | 389 | skb->pkt_type = PACKET_HOST; |
390 | dst_release(skb->dst); | 390 | dst_release(skb->dst); |
391 | ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len; | 391 | reg_dev->stats.rx_bytes += skb->len; |
392 | ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++; | 392 | reg_dev->stats.rx_packets++; |
393 | skb->dst = NULL; | 393 | skb->dst = NULL; |
394 | nf_reset(skb); | 394 | nf_reset(skb); |
395 | netif_rx(skb); | 395 | netif_rx(skb); |
@@ -409,26 +409,20 @@ static struct inet6_protocol pim6_protocol = { | |||
409 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 409 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
410 | { | 410 | { |
411 | read_lock(&mrt_lock); | 411 | read_lock(&mrt_lock); |
412 | ((struct net_device_stats *)netdev_priv(dev))->tx_bytes += skb->len; | 412 | dev->stats.tx_bytes += skb->len; |
413 | ((struct net_device_stats *)netdev_priv(dev))->tx_packets++; | 413 | dev->stats.tx_packets++; |
414 | ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT); | 414 | ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT); |
415 | read_unlock(&mrt_lock); | 415 | read_unlock(&mrt_lock); |
416 | kfree_skb(skb); | 416 | kfree_skb(skb); |
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
419 | 419 | ||
420 | static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) | ||
421 | { | ||
422 | return (struct net_device_stats *)netdev_priv(dev); | ||
423 | } | ||
424 | |||
425 | static void reg_vif_setup(struct net_device *dev) | 420 | static void reg_vif_setup(struct net_device *dev) |
426 | { | 421 | { |
427 | dev->type = ARPHRD_PIMREG; | 422 | dev->type = ARPHRD_PIMREG; |
428 | dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; | 423 | dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; |
429 | dev->flags = IFF_NOARP; | 424 | dev->flags = IFF_NOARP; |
430 | dev->hard_start_xmit = reg_vif_xmit; | 425 | dev->hard_start_xmit = reg_vif_xmit; |
431 | dev->get_stats = reg_vif_get_stats; | ||
432 | dev->destructor = free_netdev; | 426 | dev->destructor = free_netdev; |
433 | } | 427 | } |
434 | 428 | ||
@@ -436,9 +430,7 @@ static struct net_device *ip6mr_reg_vif(void) | |||
436 | { | 430 | { |
437 | struct net_device *dev; | 431 | struct net_device *dev; |
438 | 432 | ||
439 | dev = alloc_netdev(sizeof(struct net_device_stats), "pim6reg", | 433 | dev = alloc_netdev(0, "pim6reg", reg_vif_setup); |
440 | reg_vif_setup); | ||
441 | |||
442 | if (dev == NULL) | 434 | if (dev == NULL) |
443 | return NULL; | 435 | return NULL; |
444 | 436 | ||
@@ -451,6 +443,7 @@ static struct net_device *ip6mr_reg_vif(void) | |||
451 | if (dev_open(dev)) | 443 | if (dev_open(dev)) |
452 | goto failure; | 444 | goto failure; |
453 | 445 | ||
446 | dev_hold(dev); | ||
454 | return dev; | 447 | return dev; |
455 | 448 | ||
456 | failure: | 449 | failure: |
@@ -603,6 +596,7 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock) | |||
603 | int vifi = vifc->mif6c_mifi; | 596 | int vifi = vifc->mif6c_mifi; |
604 | struct mif_device *v = &vif6_table[vifi]; | 597 | struct mif_device *v = &vif6_table[vifi]; |
605 | struct net_device *dev; | 598 | struct net_device *dev; |
599 | int err; | ||
606 | 600 | ||
607 | /* Is vif busy ? */ | 601 | /* Is vif busy ? */ |
608 | if (MIF_EXISTS(vifi)) | 602 | if (MIF_EXISTS(vifi)) |
@@ -620,20 +614,28 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock) | |||
620 | dev = ip6mr_reg_vif(); | 614 | dev = ip6mr_reg_vif(); |
621 | if (!dev) | 615 | if (!dev) |
622 | return -ENOBUFS; | 616 | return -ENOBUFS; |
617 | err = dev_set_allmulti(dev, 1); | ||
618 | if (err) { | ||
619 | unregister_netdevice(dev); | ||
620 | dev_put(dev); | ||
621 | return err; | ||
622 | } | ||
623 | break; | 623 | break; |
624 | #endif | 624 | #endif |
625 | case 0: | 625 | case 0: |
626 | dev = dev_get_by_index(&init_net, vifc->mif6c_pifi); | 626 | dev = dev_get_by_index(&init_net, vifc->mif6c_pifi); |
627 | if (!dev) | 627 | if (!dev) |
628 | return -EADDRNOTAVAIL; | 628 | return -EADDRNOTAVAIL; |
629 | dev_put(dev); | 629 | err = dev_set_allmulti(dev, 1); |
630 | if (err) { | ||
631 | dev_put(dev); | ||
632 | return err; | ||
633 | } | ||
630 | break; | 634 | break; |
631 | default: | 635 | default: |
632 | return -EINVAL; | 636 | return -EINVAL; |
633 | } | 637 | } |
634 | 638 | ||
635 | dev_set_allmulti(dev, 1); | ||
636 | |||
637 | /* | 639 | /* |
638 | * Fill in the VIF structures | 640 | * Fill in the VIF structures |
639 | */ | 641 | */ |
@@ -652,7 +654,6 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock) | |||
652 | 654 | ||
653 | /* And finish update writing critical data */ | 655 | /* And finish update writing critical data */ |
654 | write_lock_bh(&mrt_lock); | 656 | write_lock_bh(&mrt_lock); |
655 | dev_hold(dev); | ||
656 | v->dev = dev; | 657 | v->dev = dev; |
657 | #ifdef CONFIG_IPV6_PIMSM_V2 | 658 | #ifdef CONFIG_IPV6_PIMSM_V2 |
658 | if (v->flags & MIFF_REGISTER) | 659 | if (v->flags & MIFF_REGISTER) |
@@ -934,7 +935,7 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
934 | struct mif_device *v; | 935 | struct mif_device *v; |
935 | int ct; | 936 | int ct; |
936 | 937 | ||
937 | if (dev_net(dev) != &init_net) | 938 | if (!net_eq(dev_net(dev), &init_net)) |
938 | return NOTIFY_DONE; | 939 | return NOTIFY_DONE; |
939 | 940 | ||
940 | if (event != NETDEV_UNREGISTER) | 941 | if (event != NETDEV_UNREGISTER) |
@@ -956,23 +957,51 @@ static struct notifier_block ip6_mr_notifier = { | |||
956 | * Setup for IP multicast routing | 957 | * Setup for IP multicast routing |
957 | */ | 958 | */ |
958 | 959 | ||
959 | void __init ip6_mr_init(void) | 960 | int __init ip6_mr_init(void) |
960 | { | 961 | { |
962 | int err; | ||
963 | |||
961 | mrt_cachep = kmem_cache_create("ip6_mrt_cache", | 964 | mrt_cachep = kmem_cache_create("ip6_mrt_cache", |
962 | sizeof(struct mfc6_cache), | 965 | sizeof(struct mfc6_cache), |
963 | 0, SLAB_HWCACHE_ALIGN, | 966 | 0, SLAB_HWCACHE_ALIGN, |
964 | NULL); | 967 | NULL); |
965 | if (!mrt_cachep) | 968 | if (!mrt_cachep) |
966 | panic("cannot allocate ip6_mrt_cache"); | 969 | return -ENOMEM; |
967 | 970 | ||
968 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | 971 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); |
969 | register_netdevice_notifier(&ip6_mr_notifier); | 972 | err = register_netdevice_notifier(&ip6_mr_notifier); |
973 | if (err) | ||
974 | goto reg_notif_fail; | ||
975 | #ifdef CONFIG_PROC_FS | ||
976 | err = -ENOMEM; | ||
977 | if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops)) | ||
978 | goto proc_vif_fail; | ||
979 | if (!proc_net_fops_create(&init_net, "ip6_mr_cache", | ||
980 | 0, &ip6mr_mfc_fops)) | ||
981 | goto proc_cache_fail; | ||
982 | #endif | ||
983 | return 0; | ||
984 | reg_notif_fail: | ||
985 | kmem_cache_destroy(mrt_cachep); | ||
970 | #ifdef CONFIG_PROC_FS | 986 | #ifdef CONFIG_PROC_FS |
971 | proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops); | 987 | proc_vif_fail: |
972 | proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops); | 988 | unregister_netdevice_notifier(&ip6_mr_notifier); |
989 | proc_cache_fail: | ||
990 | proc_net_remove(&init_net, "ip6_mr_vif"); | ||
973 | #endif | 991 | #endif |
992 | return err; | ||
974 | } | 993 | } |
975 | 994 | ||
995 | void ip6_mr_cleanup(void) | ||
996 | { | ||
997 | #ifdef CONFIG_PROC_FS | ||
998 | proc_net_remove(&init_net, "ip6_mr_cache"); | ||
999 | proc_net_remove(&init_net, "ip6_mr_vif"); | ||
1000 | #endif | ||
1001 | unregister_netdevice_notifier(&ip6_mr_notifier); | ||
1002 | del_timer(&ipmr_expire_timer); | ||
1003 | kmem_cache_destroy(mrt_cachep); | ||
1004 | } | ||
976 | 1005 | ||
977 | static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) | 1006 | static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) |
978 | { | 1007 | { |
@@ -1248,7 +1277,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1248 | 1277 | ||
1249 | #endif | 1278 | #endif |
1250 | /* | 1279 | /* |
1251 | * Spurious command, or MRT_VERSION which you cannot | 1280 | * Spurious command, or MRT6_VERSION which you cannot |
1252 | * set. | 1281 | * set. |
1253 | */ | 1282 | */ |
1254 | default: | 1283 | default: |
@@ -1377,8 +1406,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | |||
1377 | if (vif->flags & MIFF_REGISTER) { | 1406 | if (vif->flags & MIFF_REGISTER) { |
1378 | vif->pkt_out++; | 1407 | vif->pkt_out++; |
1379 | vif->bytes_out += skb->len; | 1408 | vif->bytes_out += skb->len; |
1380 | ((struct net_device_stats *)netdev_priv(vif->dev))->tx_bytes += skb->len; | 1409 | vif->dev->stats.tx_bytes += skb->len; |
1381 | ((struct net_device_stats *)netdev_priv(vif->dev))->tx_packets++; | 1410 | vif->dev->stats.tx_packets++; |
1382 | ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT); | 1411 | ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT); |
1383 | kfree_skb(skb); | 1412 | kfree_skb(skb); |
1384 | return 0; | 1413 | return 0; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 86e28a75267f..ea33b26512c2 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * | 7 | * |
8 | * Based on linux/net/ipv4/ip_sockglue.c | 8 | * Based on linux/net/ipv4/ip_sockglue.c |
9 | * | 9 | * |
10 | * $Id: ipv6_sockglue.c,v 1.41 2002/02/01 22:01:04 davem Exp $ | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
14 | * as published by the Free Software Foundation; either version | 12 | * as published by the Free Software Foundation; either version |
@@ -61,7 +59,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; | |||
61 | struct ip6_ra_chain *ip6_ra_chain; | 59 | struct ip6_ra_chain *ip6_ra_chain; |
62 | DEFINE_RWLOCK(ip6_ra_lock); | 60 | DEFINE_RWLOCK(ip6_ra_lock); |
63 | 61 | ||
64 | int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *)) | 62 | int ip6_ra_control(struct sock *sk, int sel) |
65 | { | 63 | { |
66 | struct ip6_ra_chain *ra, *new_ra, **rap; | 64 | struct ip6_ra_chain *ra, *new_ra, **rap; |
67 | 65 | ||
@@ -83,8 +81,6 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *)) | |||
83 | *rap = ra->next; | 81 | *rap = ra->next; |
84 | write_unlock_bh(&ip6_ra_lock); | 82 | write_unlock_bh(&ip6_ra_lock); |
85 | 83 | ||
86 | if (ra->destructor) | ||
87 | ra->destructor(sk); | ||
88 | sock_put(sk); | 84 | sock_put(sk); |
89 | kfree(ra); | 85 | kfree(ra); |
90 | return 0; | 86 | return 0; |
@@ -96,7 +92,6 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *)) | |||
96 | } | 92 | } |
97 | new_ra->sk = sk; | 93 | new_ra->sk = sk; |
98 | new_ra->sel = sel; | 94 | new_ra->sel = sel; |
99 | new_ra->destructor = destructor; | ||
100 | new_ra->next = ra; | 95 | new_ra->next = ra; |
101 | *rap = new_ra; | 96 | *rap = new_ra; |
102 | sock_hold(sk); | 97 | sock_hold(sk); |
@@ -634,7 +629,7 @@ done: | |||
634 | case IPV6_ROUTER_ALERT: | 629 | case IPV6_ROUTER_ALERT: |
635 | if (optlen < sizeof(int)) | 630 | if (optlen < sizeof(int)) |
636 | goto e_inval; | 631 | goto e_inval; |
637 | retv = ip6_ra_control(sk, val, NULL); | 632 | retv = ip6_ra_control(sk, val); |
638 | break; | 633 | break; |
639 | case IPV6_MTU_DISCOVER: | 634 | case IPV6_MTU_DISCOVER: |
640 | if (optlen < sizeof(int)) | 635 | if (optlen < sizeof(int)) |
@@ -1043,7 +1038,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1043 | dst_release(dst); | 1038 | dst_release(dst); |
1044 | } | 1039 | } |
1045 | if (val < 0) | 1040 | if (val < 0) |
1046 | val = ipv6_devconf.hop_limit; | 1041 | val = sock_net(sk)->ipv6.devconf_all->hop_limit; |
1047 | break; | 1042 | break; |
1048 | } | 1043 | } |
1049 | 1044 | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index fd632dd7f98d..e7c03bcc2788 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $ | ||
9 | * | ||
10 | * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c | 8 | * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
@@ -153,7 +151,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | |||
153 | #define IGMP6_UNSOLICITED_IVAL (10*HZ) | 151 | #define IGMP6_UNSOLICITED_IVAL (10*HZ) |
154 | #define MLD_QRV_DEFAULT 2 | 152 | #define MLD_QRV_DEFAULT 2 |
155 | 153 | ||
156 | #define MLD_V1_SEEN(idev) (ipv6_devconf.force_mld_version == 1 || \ | 154 | #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ |
157 | (idev)->cnf.force_mld_version == 1 || \ | 155 | (idev)->cnf.force_mld_version == 1 || \ |
158 | ((idev)->mc_v1_seen && \ | 156 | ((idev)->mc_v1_seen && \ |
159 | time_before(jiffies, (idev)->mc_v1_seen))) | 157 | time_before(jiffies, (idev)->mc_v1_seen))) |
@@ -164,7 +162,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | |||
164 | ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ | 162 | ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ |
165 | (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) | 163 | (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) |
166 | 164 | ||
167 | #define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value) | ||
168 | #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) | 165 | #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) |
169 | 166 | ||
170 | #define IPV6_MLD_MAX_MSF 64 | 167 | #define IPV6_MLD_MAX_MSF 64 |
@@ -370,10 +367,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
370 | int pmclocked = 0; | 367 | int pmclocked = 0; |
371 | int err; | 368 | int err; |
372 | 369 | ||
373 | if (pgsr->gsr_group.ss_family != AF_INET6 || | ||
374 | pgsr->gsr_source.ss_family != AF_INET6) | ||
375 | return -EINVAL; | ||
376 | |||
377 | source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; | 370 | source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; |
378 | group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; | 371 | group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; |
379 | 372 | ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 282fdb31f8ed..beb48e3f038a 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -784,15 +784,17 @@ static void ndisc_recv_ns(struct sk_buff *skb) | |||
784 | 784 | ||
785 | idev = ifp->idev; | 785 | idev = ifp->idev; |
786 | } else { | 786 | } else { |
787 | struct net *net = dev_net(dev); | ||
788 | |||
787 | idev = in6_dev_get(dev); | 789 | idev = in6_dev_get(dev); |
788 | if (!idev) { | 790 | if (!idev) { |
789 | /* XXX: count this drop? */ | 791 | /* XXX: count this drop? */ |
790 | return; | 792 | return; |
791 | } | 793 | } |
792 | 794 | ||
793 | if (ipv6_chk_acast_addr(dev_net(dev), dev, &msg->target) || | 795 | if (ipv6_chk_acast_addr(net, dev, &msg->target) || |
794 | (idev->cnf.forwarding && | 796 | (idev->cnf.forwarding && |
795 | (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && | 797 | (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) && |
796 | (is_router = pndisc_is_router(&msg->target, dev)) >= 0)) { | 798 | (is_router = pndisc_is_router(&msg->target, dev)) >= 0)) { |
797 | if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && | 799 | if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && |
798 | skb->pkt_type != PACKET_HOST && | 800 | skb->pkt_type != PACKET_HOST && |
@@ -921,6 +923,7 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
921 | 923 | ||
922 | if (neigh) { | 924 | if (neigh) { |
923 | u8 old_flags = neigh->flags; | 925 | u8 old_flags = neigh->flags; |
926 | struct net *net = dev_net(dev); | ||
924 | 927 | ||
925 | if (neigh->nud_state & NUD_FAILED) | 928 | if (neigh->nud_state & NUD_FAILED) |
926 | goto out; | 929 | goto out; |
@@ -931,8 +934,8 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
931 | * has already sent a NA to us. | 934 | * has already sent a NA to us. |
932 | */ | 935 | */ |
933 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && | 936 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && |
934 | ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && | 937 | net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && |
935 | pneigh_lookup(&nd_tbl, dev_net(dev), &msg->target, dev, 0)) { | 938 | pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { |
936 | /* XXX: idev->cnf.prixy_ndp */ | 939 | /* XXX: idev->cnf.prixy_ndp */ |
937 | goto out; | 940 | goto out; |
938 | } | 941 | } |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 6cae5475737e..689dec899c57 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -208,5 +208,17 @@ config IP6_NF_RAW | |||
208 | If you want to compile it as a module, say M here and read | 208 | If you want to compile it as a module, say M here and read |
209 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. | 209 | <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. |
210 | 210 | ||
211 | # security table for MAC policy | ||
212 | config IP6_NF_SECURITY | ||
213 | tristate "Security table" | ||
214 | depends on IP6_NF_IPTABLES | ||
215 | depends on SECURITY | ||
216 | default m if NETFILTER_ADVANCED=n | ||
217 | help | ||
218 | This option adds a `security' table to iptables, for use | ||
219 | with Mandatory Access Control (MAC) policy. | ||
220 | |||
221 | If unsure, say N. | ||
222 | |||
211 | endmenu | 223 | endmenu |
212 | 224 | ||
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index fbf2c14ed887..3f17c948eefb 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o | |||
8 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o | 8 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o |
9 | obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o | 9 | obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o |
10 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o | 10 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o |
11 | obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o | ||
11 | 12 | ||
12 | # objects for l3 independent conntrack | 13 | # objects for l3 independent conntrack |
13 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o | 14 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 2eff3ae8977d..5859c046cbc4 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -159,7 +159,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
159 | case IPQ_COPY_META: | 159 | case IPQ_COPY_META: |
160 | case IPQ_COPY_NONE: | 160 | case IPQ_COPY_NONE: |
161 | size = NLMSG_SPACE(sizeof(*pmsg)); | 161 | size = NLMSG_SPACE(sizeof(*pmsg)); |
162 | data_len = 0; | ||
163 | break; | 162 | break; |
164 | 163 | ||
165 | case IPQ_COPY_PACKET: | 164 | case IPQ_COPY_PACKET: |
@@ -226,8 +225,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) | |||
226 | return skb; | 225 | return skb; |
227 | 226 | ||
228 | nlmsg_failure: | 227 | nlmsg_failure: |
229 | if (skb) | ||
230 | kfree_skb(skb); | ||
231 | *errp = -EINVAL; | 228 | *errp = -EINVAL; |
232 | printk(KERN_ERR "ip6_queue: error creating packet message\n"); | 229 | printk(KERN_ERR "ip6_queue: error creating packet message\n"); |
233 | return NULL; | 230 | return NULL; |
@@ -483,7 +480,7 @@ ipq_rcv_dev_event(struct notifier_block *this, | |||
483 | { | 480 | { |
484 | struct net_device *dev = ptr; | 481 | struct net_device *dev = ptr; |
485 | 482 | ||
486 | if (dev_net(dev) != &init_net) | 483 | if (!net_eq(dev_net(dev), &init_net)) |
487 | return NOTIFY_DONE; | 484 | return NOTIFY_DONE; |
488 | 485 | ||
489 | /* Drop any packets associated with the downed device */ | 486 | /* Drop any packets associated with the downed device */ |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index f979e48b469b..55a2c290bad4 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -61,13 +61,25 @@ static struct xt_table packet_filter = { | |||
61 | 61 | ||
62 | /* The work comes in here from netfilter.c. */ | 62 | /* The work comes in here from netfilter.c. */ |
63 | static unsigned int | 63 | static unsigned int |
64 | ip6t_hook(unsigned int hook, | 64 | ip6t_local_in_hook(unsigned int hook, |
65 | struct sk_buff *skb, | 65 | struct sk_buff *skb, |
66 | const struct net_device *in, | 66 | const struct net_device *in, |
67 | const struct net_device *out, | 67 | const struct net_device *out, |
68 | int (*okfn)(struct sk_buff *)) | 68 | int (*okfn)(struct sk_buff *)) |
69 | { | ||
70 | return ip6t_do_table(skb, hook, in, out, | ||
71 | nf_local_in_net(in, out)->ipv6.ip6table_filter); | ||
72 | } | ||
73 | |||
74 | static unsigned int | ||
75 | ip6t_forward_hook(unsigned int hook, | ||
76 | struct sk_buff *skb, | ||
77 | const struct net_device *in, | ||
78 | const struct net_device *out, | ||
79 | int (*okfn)(struct sk_buff *)) | ||
69 | { | 80 | { |
70 | return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter); | 81 | return ip6t_do_table(skb, hook, in, out, |
82 | nf_forward_net(in, out)->ipv6.ip6table_filter); | ||
71 | } | 83 | } |
72 | 84 | ||
73 | static unsigned int | 85 | static unsigned int |
@@ -87,19 +99,20 @@ ip6t_local_out_hook(unsigned int hook, | |||
87 | } | 99 | } |
88 | #endif | 100 | #endif |
89 | 101 | ||
90 | return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter); | 102 | return ip6t_do_table(skb, hook, in, out, |
103 | nf_local_out_net(in, out)->ipv6.ip6table_filter); | ||
91 | } | 104 | } |
92 | 105 | ||
93 | static struct nf_hook_ops ip6t_ops[] __read_mostly = { | 106 | static struct nf_hook_ops ip6t_ops[] __read_mostly = { |
94 | { | 107 | { |
95 | .hook = ip6t_hook, | 108 | .hook = ip6t_local_in_hook, |
96 | .owner = THIS_MODULE, | 109 | .owner = THIS_MODULE, |
97 | .pf = PF_INET6, | 110 | .pf = PF_INET6, |
98 | .hooknum = NF_INET_LOCAL_IN, | 111 | .hooknum = NF_INET_LOCAL_IN, |
99 | .priority = NF_IP6_PRI_FILTER, | 112 | .priority = NF_IP6_PRI_FILTER, |
100 | }, | 113 | }, |
101 | { | 114 | { |
102 | .hook = ip6t_hook, | 115 | .hook = ip6t_forward_hook, |
103 | .owner = THIS_MODULE, | 116 | .owner = THIS_MODULE, |
104 | .pf = PF_INET6, | 117 | .pf = PF_INET6, |
105 | .hooknum = NF_INET_FORWARD, | 118 | .hooknum = NF_INET_FORWARD, |
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c new file mode 100644 index 000000000000..a07abee30497 --- /dev/null +++ b/net/ipv6/netfilter/ip6table_security.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * "security" table for IPv6 | ||
3 | * | ||
4 | * This is for use by Mandatory Access Control (MAC) security models, | ||
5 | * which need to be able to manage security policy in separate context | ||
6 | * to DAC. | ||
7 | * | ||
8 | * Based on iptable_mangle.c | ||
9 | * | ||
10 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | ||
11 | * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> | ||
12 | * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
20 | |||
21 | MODULE_LICENSE("GPL"); | ||
22 | MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); | ||
23 | MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); | ||
24 | |||
25 | #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ | ||
26 | (1 << NF_INET_FORWARD) | \ | ||
27 | (1 << NF_INET_LOCAL_OUT) | ||
28 | |||
29 | static struct | ||
30 | { | ||
31 | struct ip6t_replace repl; | ||
32 | struct ip6t_standard entries[3]; | ||
33 | struct ip6t_error term; | ||
34 | } initial_table __initdata = { | ||
35 | .repl = { | ||
36 | .name = "security", | ||
37 | .valid_hooks = SECURITY_VALID_HOOKS, | ||
38 | .num_entries = 4, | ||
39 | .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), | ||
40 | .hook_entry = { | ||
41 | [NF_INET_LOCAL_IN] = 0, | ||
42 | [NF_INET_FORWARD] = sizeof(struct ip6t_standard), | ||
43 | [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2, | ||
44 | }, | ||
45 | .underflow = { | ||
46 | [NF_INET_LOCAL_IN] = 0, | ||
47 | [NF_INET_FORWARD] = sizeof(struct ip6t_standard), | ||
48 | [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2, | ||
49 | }, | ||
50 | }, | ||
51 | .entries = { | ||
52 | IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ | ||
53 | IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ | ||
54 | IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ | ||
55 | }, | ||
56 | .term = IP6T_ERROR_INIT, /* ERROR */ | ||
57 | }; | ||
58 | |||
59 | static struct xt_table security_table = { | ||
60 | .name = "security", | ||
61 | .valid_hooks = SECURITY_VALID_HOOKS, | ||
62 | .lock = __RW_LOCK_UNLOCKED(security_table.lock), | ||
63 | .me = THIS_MODULE, | ||
64 | .af = AF_INET6, | ||
65 | }; | ||
66 | |||
67 | static unsigned int | ||
68 | ip6t_local_in_hook(unsigned int hook, | ||
69 | struct sk_buff *skb, | ||
70 | const struct net_device *in, | ||
71 | const struct net_device *out, | ||
72 | int (*okfn)(struct sk_buff *)) | ||
73 | { | ||
74 | return ip6t_do_table(skb, hook, in, out, | ||
75 | nf_local_in_net(in, out)->ipv6.ip6table_security); | ||
76 | } | ||
77 | |||
78 | static unsigned int | ||
79 | ip6t_forward_hook(unsigned int hook, | ||
80 | struct sk_buff *skb, | ||
81 | const struct net_device *in, | ||
82 | const struct net_device *out, | ||
83 | int (*okfn)(struct sk_buff *)) | ||
84 | { | ||
85 | return ip6t_do_table(skb, hook, in, out, | ||
86 | nf_forward_net(in, out)->ipv6.ip6table_security); | ||
87 | } | ||
88 | |||
89 | static unsigned int | ||
90 | ip6t_local_out_hook(unsigned int hook, | ||
91 | struct sk_buff *skb, | ||
92 | const struct net_device *in, | ||
93 | const struct net_device *out, | ||
94 | int (*okfn)(struct sk_buff *)) | ||
95 | { | ||
96 | /* TBD: handle short packets via raw socket */ | ||
97 | return ip6t_do_table(skb, hook, in, out, | ||
98 | nf_local_out_net(in, out)->ipv6.ip6table_security); | ||
99 | } | ||
100 | |||
101 | static struct nf_hook_ops ip6t_ops[] __read_mostly = { | ||
102 | { | ||
103 | .hook = ip6t_local_in_hook, | ||
104 | .owner = THIS_MODULE, | ||
105 | .pf = PF_INET6, | ||
106 | .hooknum = NF_INET_LOCAL_IN, | ||
107 | .priority = NF_IP6_PRI_SECURITY, | ||
108 | }, | ||
109 | { | ||
110 | .hook = ip6t_forward_hook, | ||
111 | .owner = THIS_MODULE, | ||
112 | .pf = PF_INET6, | ||
113 | .hooknum = NF_INET_FORWARD, | ||
114 | .priority = NF_IP6_PRI_SECURITY, | ||
115 | }, | ||
116 | { | ||
117 | .hook = ip6t_local_out_hook, | ||
118 | .owner = THIS_MODULE, | ||
119 | .pf = PF_INET6, | ||
120 | .hooknum = NF_INET_LOCAL_OUT, | ||
121 | .priority = NF_IP6_PRI_SECURITY, | ||
122 | }, | ||
123 | }; | ||
124 | |||
125 | static int __net_init ip6table_security_net_init(struct net *net) | ||
126 | { | ||
127 | net->ipv6.ip6table_security = | ||
128 | ip6t_register_table(net, &security_table, &initial_table.repl); | ||
129 | |||
130 | if (IS_ERR(net->ipv6.ip6table_security)) | ||
131 | return PTR_ERR(net->ipv6.ip6table_security); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static void __net_exit ip6table_security_net_exit(struct net *net) | ||
137 | { | ||
138 | ip6t_unregister_table(net->ipv6.ip6table_security); | ||
139 | } | ||
140 | |||
141 | static struct pernet_operations ip6table_security_net_ops = { | ||
142 | .init = ip6table_security_net_init, | ||
143 | .exit = ip6table_security_net_exit, | ||
144 | }; | ||
145 | |||
146 | static int __init ip6table_security_init(void) | ||
147 | { | ||
148 | int ret; | ||
149 | |||
150 | ret = register_pernet_subsys(&ip6table_security_net_ops); | ||
151 | if (ret < 0) | ||
152 | return ret; | ||
153 | |||
154 | ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); | ||
155 | if (ret < 0) | ||
156 | goto cleanup_table; | ||
157 | |||
158 | return ret; | ||
159 | |||
160 | cleanup_table: | ||
161 | unregister_pernet_subsys(&ip6table_security_net_ops); | ||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | static void __exit ip6table_security_fini(void) | ||
166 | { | ||
167 | nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); | ||
168 | unregister_pernet_subsys(&ip6table_security_net_ops); | ||
169 | } | ||
170 | |||
171 | module_init(ip6table_security_init); | ||
172 | module_exit(ip6table_security_fini); | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index ee713b03e9ec..14d47d833545 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -89,9 +89,8 @@ static int icmpv6_packet(struct nf_conn *ct, | |||
89 | means this will only run once even if count hits zero twice | 89 | means this will only run once even if count hits zero twice |
90 | (theoretically possible with SMP) */ | 90 | (theoretically possible with SMP) */ |
91 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { | 91 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { |
92 | if (atomic_dec_and_test(&ct->proto.icmp.count) | 92 | if (atomic_dec_and_test(&ct->proto.icmp.count)) |
93 | && del_timer(&ct->timeout)) | 93 | nf_ct_kill_acct(ct, ctinfo, skb); |
94 | ct->timeout.function((unsigned long)ct); | ||
95 | } else { | 94 | } else { |
96 | atomic_inc(&ct->proto.icmp.count); | 95 | atomic_inc(&ct->proto.icmp.count); |
97 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); | 96 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index df0736a4cafa..f82f6074cf85 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * PROC file system. This is very similar to the IPv4 version, | 7 | * PROC file system. This is very similar to the IPv4 version, |
8 | * except it reports the sockets in the INET6 address family. | 8 | * except it reports the sockets in the INET6 address family. |
9 | * | 9 | * |
10 | * Version: $Id: proc.c,v 1.17 2002/02/01 22:01:04 davem Exp $ | ||
11 | * | ||
12 | * Authors: David S. Miller (davem@caip.rutgers.edu) | 10 | * Authors: David S. Miller (davem@caip.rutgers.edu) |
13 | * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 11 | * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> |
14 | * | 12 | * |
@@ -185,32 +183,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v) | |||
185 | 183 | ||
186 | static int sockstat6_seq_open(struct inode *inode, struct file *file) | 184 | static int sockstat6_seq_open(struct inode *inode, struct file *file) |
187 | { | 185 | { |
188 | int err; | 186 | return single_open_net(inode, file, sockstat6_seq_show); |
189 | struct net *net; | ||
190 | |||
191 | err = -ENXIO; | ||
192 | net = get_proc_net(inode); | ||
193 | if (net == NULL) | ||
194 | goto err_net; | ||
195 | |||
196 | err = single_open(file, sockstat6_seq_show, net); | ||
197 | if (err < 0) | ||
198 | goto err_open; | ||
199 | |||
200 | return 0; | ||
201 | |||
202 | err_open: | ||
203 | put_net(net); | ||
204 | err_net: | ||
205 | return err; | ||
206 | } | ||
207 | |||
208 | static int sockstat6_seq_release(struct inode *inode, struct file *file) | ||
209 | { | ||
210 | struct net *net = ((struct seq_file *)file->private_data)->private; | ||
211 | |||
212 | put_net(net); | ||
213 | return single_release(inode, file); | ||
214 | } | 187 | } |
215 | 188 | ||
216 | static const struct file_operations sockstat6_seq_fops = { | 189 | static const struct file_operations sockstat6_seq_fops = { |
@@ -218,7 +191,7 @@ static const struct file_operations sockstat6_seq_fops = { | |||
218 | .open = sockstat6_seq_open, | 191 | .open = sockstat6_seq_open, |
219 | .read = seq_read, | 192 | .read = seq_read, |
220 | .llseek = seq_lseek, | 193 | .llseek = seq_lseek, |
221 | .release = sockstat6_seq_release, | 194 | .release = single_release_net, |
222 | }; | 195 | }; |
223 | 196 | ||
224 | static int snmp6_seq_open(struct inode *inode, struct file *file) | 197 | static int snmp6_seq_open(struct inode *inode, struct file *file) |
@@ -241,7 +214,7 @@ int snmp6_register_dev(struct inet6_dev *idev) | |||
241 | if (!idev || !idev->dev) | 214 | if (!idev || !idev->dev) |
242 | return -EINVAL; | 215 | return -EINVAL; |
243 | 216 | ||
244 | if (dev_net(idev->dev) != &init_net) | 217 | if (!net_eq(dev_net(idev->dev), &init_net)) |
245 | return 0; | 218 | return 0; |
246 | 219 | ||
247 | if (!proc_net_devsnmp6) | 220 | if (!proc_net_devsnmp6) |
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index f929f47b925e..9ab789159913 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * PF_INET6 protocol dispatch tables. | 6 | * PF_INET6 protocol dispatch tables. |
7 | * | 7 | * |
8 | * Version: $Id: protocol.c,v 1.10 2001/05/18 02:25:49 davem Exp $ | ||
9 | * | ||
10 | * Authors: Pedro Roque <roque@di.fc.ul.pt> | 8 | * Authors: Pedro Roque <roque@di.fc.ul.pt> |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 3aee12310d94..01d47674f7e5 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * | 7 | * |
8 | * Adapted from linux/net/ipv4/raw.c | 8 | * Adapted from linux/net/ipv4/raw.c |
9 | * | 9 | * |
10 | * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $ | ||
11 | * | ||
12 | * Fixes: | 10 | * Fixes: |
13 | * Hideaki YOSHIFUJI : sin6_scope_id support | 11 | * Hideaki YOSHIFUJI : sin6_scope_id support |
14 | * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) | 12 | * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) |
@@ -1159,18 +1157,18 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
1159 | static void rawv6_close(struct sock *sk, long timeout) | 1157 | static void rawv6_close(struct sock *sk, long timeout) |
1160 | { | 1158 | { |
1161 | if (inet_sk(sk)->num == IPPROTO_RAW) | 1159 | if (inet_sk(sk)->num == IPPROTO_RAW) |
1162 | ip6_ra_control(sk, -1, NULL); | 1160 | ip6_ra_control(sk, -1); |
1163 | ip6mr_sk_done(sk); | 1161 | ip6mr_sk_done(sk); |
1164 | sk_common_release(sk); | 1162 | sk_common_release(sk); |
1165 | } | 1163 | } |
1166 | 1164 | ||
1167 | static int raw6_destroy(struct sock *sk) | 1165 | static void raw6_destroy(struct sock *sk) |
1168 | { | 1166 | { |
1169 | lock_sock(sk); | 1167 | lock_sock(sk); |
1170 | ip6_flush_pending_frames(sk); | 1168 | ip6_flush_pending_frames(sk); |
1171 | release_sock(sk); | 1169 | release_sock(sk); |
1172 | 1170 | ||
1173 | return inet6_destroy_sock(sk); | 1171 | inet6_destroy_sock(sk); |
1174 | } | 1172 | } |
1175 | 1173 | ||
1176 | static int rawv6_init_sk(struct sock *sk) | 1174 | static int rawv6_init_sk(struct sock *sk) |
@@ -1253,7 +1251,7 @@ static int raw6_seq_show(struct seq_file *seq, void *v) | |||
1253 | "local_address " | 1251 | "local_address " |
1254 | "remote_address " | 1252 | "remote_address " |
1255 | "st tx_queue rx_queue tr tm->when retrnsmt" | 1253 | "st tx_queue rx_queue tr tm->when retrnsmt" |
1256 | " uid timeout inode drops\n"); | 1254 | " uid timeout inode ref pointer drops\n"); |
1257 | else | 1255 | else |
1258 | raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); | 1256 | raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); |
1259 | return 0; | 1257 | return 0; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index a60d7d129713..6ab957ec2dd6 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $ | ||
9 | * | ||
10 | * Based on: net/ipv4/ip_fragment.c | 8 | * Based on: net/ipv4/ip_fragment.c |
11 | * | 9 | * |
12 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
@@ -634,7 +632,7 @@ static struct inet6_protocol frag_protocol = | |||
634 | }; | 632 | }; |
635 | 633 | ||
636 | #ifdef CONFIG_SYSCTL | 634 | #ifdef CONFIG_SYSCTL |
637 | static struct ctl_table ip6_frags_ctl_table[] = { | 635 | static struct ctl_table ip6_frags_ns_ctl_table[] = { |
638 | { | 636 | { |
639 | .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, | 637 | .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, |
640 | .procname = "ip6frag_high_thresh", | 638 | .procname = "ip6frag_high_thresh", |
@@ -660,6 +658,10 @@ static struct ctl_table ip6_frags_ctl_table[] = { | |||
660 | .proc_handler = &proc_dointvec_jiffies, | 658 | .proc_handler = &proc_dointvec_jiffies, |
661 | .strategy = &sysctl_jiffies, | 659 | .strategy = &sysctl_jiffies, |
662 | }, | 660 | }, |
661 | { } | ||
662 | }; | ||
663 | |||
664 | static struct ctl_table ip6_frags_ctl_table[] = { | ||
663 | { | 665 | { |
664 | .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, | 666 | .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, |
665 | .procname = "ip6frag_secret_interval", | 667 | .procname = "ip6frag_secret_interval", |
@@ -672,21 +674,20 @@ static struct ctl_table ip6_frags_ctl_table[] = { | |||
672 | { } | 674 | { } |
673 | }; | 675 | }; |
674 | 676 | ||
675 | static int ip6_frags_sysctl_register(struct net *net) | 677 | static int ip6_frags_ns_sysctl_register(struct net *net) |
676 | { | 678 | { |
677 | struct ctl_table *table; | 679 | struct ctl_table *table; |
678 | struct ctl_table_header *hdr; | 680 | struct ctl_table_header *hdr; |
679 | 681 | ||
680 | table = ip6_frags_ctl_table; | 682 | table = ip6_frags_ns_ctl_table; |
681 | if (net != &init_net) { | 683 | if (net != &init_net) { |
682 | table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL); | 684 | table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); |
683 | if (table == NULL) | 685 | if (table == NULL) |
684 | goto err_alloc; | 686 | goto err_alloc; |
685 | 687 | ||
686 | table[0].data = &net->ipv6.frags.high_thresh; | 688 | table[0].data = &net->ipv6.frags.high_thresh; |
687 | table[1].data = &net->ipv6.frags.low_thresh; | 689 | table[1].data = &net->ipv6.frags.low_thresh; |
688 | table[2].data = &net->ipv6.frags.timeout; | 690 | table[2].data = &net->ipv6.frags.timeout; |
689 | table[3].mode &= ~0222; | ||
690 | } | 691 | } |
691 | 692 | ||
692 | hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); | 693 | hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); |
@@ -703,7 +704,7 @@ err_alloc: | |||
703 | return -ENOMEM; | 704 | return -ENOMEM; |
704 | } | 705 | } |
705 | 706 | ||
706 | static void ip6_frags_sysctl_unregister(struct net *net) | 707 | static void ip6_frags_ns_sysctl_unregister(struct net *net) |
707 | { | 708 | { |
708 | struct ctl_table *table; | 709 | struct ctl_table *table; |
709 | 710 | ||
@@ -711,13 +712,36 @@ static void ip6_frags_sysctl_unregister(struct net *net) | |||
711 | unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); | 712 | unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); |
712 | kfree(table); | 713 | kfree(table); |
713 | } | 714 | } |
715 | |||
716 | static struct ctl_table_header *ip6_ctl_header; | ||
717 | |||
718 | static int ip6_frags_sysctl_register(void) | ||
719 | { | ||
720 | ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, | ||
721 | ip6_frags_ctl_table); | ||
722 | return ip6_ctl_header == NULL ? -ENOMEM : 0; | ||
723 | } | ||
724 | |||
725 | static void ip6_frags_sysctl_unregister(void) | ||
726 | { | ||
727 | unregister_net_sysctl_table(ip6_ctl_header); | ||
728 | } | ||
714 | #else | 729 | #else |
715 | static inline int ip6_frags_sysctl_register(struct net *net) | 730 | static inline int ip6_frags_ns_sysctl_register(struct net *net) |
716 | { | 731 | { |
717 | return 0; | 732 | return 0; |
718 | } | 733 | } |
719 | 734 | ||
720 | static inline void ip6_frags_sysctl_unregister(struct net *net) | 735 | static inline void ip6_frags_ns_sysctl_unregister(struct net *net) |
736 | { | ||
737 | } | ||
738 | |||
739 | static inline int ip6_frags_sysctl_register(void) | ||
740 | { | ||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | static inline void ip6_frags_sysctl_unregister(void) | ||
721 | { | 745 | { |
722 | } | 746 | } |
723 | #endif | 747 | #endif |
@@ -730,12 +754,12 @@ static int ipv6_frags_init_net(struct net *net) | |||
730 | 754 | ||
731 | inet_frags_init_net(&net->ipv6.frags); | 755 | inet_frags_init_net(&net->ipv6.frags); |
732 | 756 | ||
733 | return ip6_frags_sysctl_register(net); | 757 | return ip6_frags_ns_sysctl_register(net); |
734 | } | 758 | } |
735 | 759 | ||
736 | static void ipv6_frags_exit_net(struct net *net) | 760 | static void ipv6_frags_exit_net(struct net *net) |
737 | { | 761 | { |
738 | ip6_frags_sysctl_unregister(net); | 762 | ip6_frags_ns_sysctl_unregister(net); |
739 | inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); | 763 | inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); |
740 | } | 764 | } |
741 | 765 | ||
@@ -752,7 +776,13 @@ int __init ipv6_frag_init(void) | |||
752 | if (ret) | 776 | if (ret) |
753 | goto out; | 777 | goto out; |
754 | 778 | ||
755 | register_pernet_subsys(&ip6_frags_ops); | 779 | ret = ip6_frags_sysctl_register(); |
780 | if (ret) | ||
781 | goto err_sysctl; | ||
782 | |||
783 | ret = register_pernet_subsys(&ip6_frags_ops); | ||
784 | if (ret) | ||
785 | goto err_pernet; | ||
756 | 786 | ||
757 | ip6_frags.hashfn = ip6_hashfn; | 787 | ip6_frags.hashfn = ip6_hashfn; |
758 | ip6_frags.constructor = ip6_frag_init; | 788 | ip6_frags.constructor = ip6_frag_init; |
@@ -765,11 +795,18 @@ int __init ipv6_frag_init(void) | |||
765 | inet_frags_init(&ip6_frags); | 795 | inet_frags_init(&ip6_frags); |
766 | out: | 796 | out: |
767 | return ret; | 797 | return ret; |
798 | |||
799 | err_pernet: | ||
800 | ip6_frags_sysctl_unregister(); | ||
801 | err_sysctl: | ||
802 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); | ||
803 | goto out; | ||
768 | } | 804 | } |
769 | 805 | ||
770 | void ipv6_frag_exit(void) | 806 | void ipv6_frag_exit(void) |
771 | { | 807 | { |
772 | inet_frags_fini(&ip6_frags); | 808 | inet_frags_fini(&ip6_frags); |
809 | ip6_frags_sysctl_unregister(); | ||
773 | unregister_pernet_subsys(&ip6_frags_ops); | 810 | unregister_pernet_subsys(&ip6_frags_ops); |
774 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); | 811 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
775 | } | 812 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7ff687020fa9..615b328de251 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
12 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
@@ -230,7 +228,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt) | |||
230 | static inline int rt6_need_strict(struct in6_addr *daddr) | 228 | static inline int rt6_need_strict(struct in6_addr *daddr) |
231 | { | 229 | { |
232 | return (ipv6_addr_type(daddr) & | 230 | return (ipv6_addr_type(daddr) & |
233 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); | 231 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)); |
234 | } | 232 | } |
235 | 233 | ||
236 | /* | 234 | /* |
@@ -239,15 +237,20 @@ static inline int rt6_need_strict(struct in6_addr *daddr) | |||
239 | 237 | ||
240 | static inline struct rt6_info *rt6_device_match(struct net *net, | 238 | static inline struct rt6_info *rt6_device_match(struct net *net, |
241 | struct rt6_info *rt, | 239 | struct rt6_info *rt, |
240 | struct in6_addr *saddr, | ||
242 | int oif, | 241 | int oif, |
243 | int flags) | 242 | int flags) |
244 | { | 243 | { |
245 | struct rt6_info *local = NULL; | 244 | struct rt6_info *local = NULL; |
246 | struct rt6_info *sprt; | 245 | struct rt6_info *sprt; |
247 | 246 | ||
248 | if (oif) { | 247 | if (!oif && ipv6_addr_any(saddr)) |
249 | for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { | 248 | goto out; |
250 | struct net_device *dev = sprt->rt6i_dev; | 249 | |
250 | for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { | ||
251 | struct net_device *dev = sprt->rt6i_dev; | ||
252 | |||
253 | if (oif) { | ||
251 | if (dev->ifindex == oif) | 254 | if (dev->ifindex == oif) |
252 | return sprt; | 255 | return sprt; |
253 | if (dev->flags & IFF_LOOPBACK) { | 256 | if (dev->flags & IFF_LOOPBACK) { |
@@ -261,14 +264,21 @@ static inline struct rt6_info *rt6_device_match(struct net *net, | |||
261 | } | 264 | } |
262 | local = sprt; | 265 | local = sprt; |
263 | } | 266 | } |
267 | } else { | ||
268 | if (ipv6_chk_addr(net, saddr, dev, | ||
269 | flags & RT6_LOOKUP_F_IFACE)) | ||
270 | return sprt; | ||
264 | } | 271 | } |
272 | } | ||
265 | 273 | ||
274 | if (oif) { | ||
266 | if (local) | 275 | if (local) |
267 | return local; | 276 | return local; |
268 | 277 | ||
269 | if (flags & RT6_LOOKUP_F_IFACE) | 278 | if (flags & RT6_LOOKUP_F_IFACE) |
270 | return net->ipv6.ip6_null_entry; | 279 | return net->ipv6.ip6_null_entry; |
271 | } | 280 | } |
281 | out: | ||
272 | return rt; | 282 | return rt; |
273 | } | 283 | } |
274 | 284 | ||
@@ -541,7 +551,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, | |||
541 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); | 551 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); |
542 | restart: | 552 | restart: |
543 | rt = fn->leaf; | 553 | rt = fn->leaf; |
544 | rt = rt6_device_match(net, rt, fl->oif, flags); | 554 | rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); |
545 | BACKTRACK(net, &fl->fl6_src); | 555 | BACKTRACK(net, &fl->fl6_src); |
546 | out: | 556 | out: |
547 | dst_use(&rt->u.dst, jiffies); | 557 | dst_use(&rt->u.dst, jiffies); |
@@ -666,7 +676,7 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, | |||
666 | int strict = 0; | 676 | int strict = 0; |
667 | int attempts = 3; | 677 | int attempts = 3; |
668 | int err; | 678 | int err; |
669 | int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; | 679 | int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; |
670 | 680 | ||
671 | strict |= flags & RT6_LOOKUP_F_IFACE; | 681 | strict |= flags & RT6_LOOKUP_F_IFACE; |
672 | 682 | ||
@@ -1048,7 +1058,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst) | |||
1048 | hoplimit = idev->cnf.hop_limit; | 1058 | hoplimit = idev->cnf.hop_limit; |
1049 | in6_dev_put(idev); | 1059 | in6_dev_put(idev); |
1050 | } else | 1060 | } else |
1051 | hoplimit = ipv6_devconf.hop_limit; | 1061 | hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; |
1052 | } | 1062 | } |
1053 | return hoplimit; | 1063 | return hoplimit; |
1054 | } | 1064 | } |
@@ -2406,26 +2416,7 @@ static int ipv6_route_show(struct seq_file *m, void *v) | |||
2406 | 2416 | ||
2407 | static int ipv6_route_open(struct inode *inode, struct file *file) | 2417 | static int ipv6_route_open(struct inode *inode, struct file *file) |
2408 | { | 2418 | { |
2409 | int err; | 2419 | return single_open_net(inode, file, ipv6_route_show); |
2410 | struct net *net = get_proc_net(inode); | ||
2411 | if (!net) | ||
2412 | return -ENXIO; | ||
2413 | |||
2414 | err = single_open(file, ipv6_route_show, net); | ||
2415 | if (err < 0) { | ||
2416 | put_net(net); | ||
2417 | return err; | ||
2418 | } | ||
2419 | |||
2420 | return 0; | ||
2421 | } | ||
2422 | |||
2423 | static int ipv6_route_release(struct inode *inode, struct file *file) | ||
2424 | { | ||
2425 | struct seq_file *seq = file->private_data; | ||
2426 | struct net *net = seq->private; | ||
2427 | put_net(net); | ||
2428 | return single_release(inode, file); | ||
2429 | } | 2420 | } |
2430 | 2421 | ||
2431 | static const struct file_operations ipv6_route_proc_fops = { | 2422 | static const struct file_operations ipv6_route_proc_fops = { |
@@ -2433,7 +2424,7 @@ static const struct file_operations ipv6_route_proc_fops = { | |||
2433 | .open = ipv6_route_open, | 2424 | .open = ipv6_route_open, |
2434 | .read = seq_read, | 2425 | .read = seq_read, |
2435 | .llseek = seq_lseek, | 2426 | .llseek = seq_lseek, |
2436 | .release = ipv6_route_release, | 2427 | .release = single_release_net, |
2437 | }; | 2428 | }; |
2438 | 2429 | ||
2439 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) | 2430 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) |
@@ -2453,26 +2444,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v) | |||
2453 | 2444 | ||
2454 | static int rt6_stats_seq_open(struct inode *inode, struct file *file) | 2445 | static int rt6_stats_seq_open(struct inode *inode, struct file *file) |
2455 | { | 2446 | { |
2456 | int err; | 2447 | return single_open_net(inode, file, rt6_stats_seq_show); |
2457 | struct net *net = get_proc_net(inode); | ||
2458 | if (!net) | ||
2459 | return -ENXIO; | ||
2460 | |||
2461 | err = single_open(file, rt6_stats_seq_show, net); | ||
2462 | if (err < 0) { | ||
2463 | put_net(net); | ||
2464 | return err; | ||
2465 | } | ||
2466 | |||
2467 | return 0; | ||
2468 | } | ||
2469 | |||
2470 | static int rt6_stats_seq_release(struct inode *inode, struct file *file) | ||
2471 | { | ||
2472 | struct seq_file *seq = file->private_data; | ||
2473 | struct net *net = (struct net *)seq->private; | ||
2474 | put_net(net); | ||
2475 | return single_release(inode, file); | ||
2476 | } | 2448 | } |
2477 | 2449 | ||
2478 | static const struct file_operations rt6_stats_seq_fops = { | 2450 | static const struct file_operations rt6_stats_seq_fops = { |
@@ -2480,7 +2452,7 @@ static const struct file_operations rt6_stats_seq_fops = { | |||
2480 | .open = rt6_stats_seq_open, | 2452 | .open = rt6_stats_seq_open, |
2481 | .read = seq_read, | 2453 | .read = seq_read, |
2482 | .llseek = seq_lseek, | 2454 | .llseek = seq_lseek, |
2483 | .release = rt6_stats_seq_release, | 2455 | .release = single_release_net, |
2484 | }; | 2456 | }; |
2485 | #endif /* CONFIG_PROC_FS */ | 2457 | #endif /* CONFIG_PROC_FS */ |
2486 | 2458 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 32e871a6c25a..b7a50e968506 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | 7 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
8 | * | 8 | * |
9 | * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $ | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
@@ -493,13 +491,13 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
493 | 491 | ||
494 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && | 492 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && |
495 | !isatap_chksrc(skb, iph, tunnel)) { | 493 | !isatap_chksrc(skb, iph, tunnel)) { |
496 | tunnel->stat.rx_errors++; | 494 | tunnel->dev->stats.rx_errors++; |
497 | read_unlock(&ipip6_lock); | 495 | read_unlock(&ipip6_lock); |
498 | kfree_skb(skb); | 496 | kfree_skb(skb); |
499 | return 0; | 497 | return 0; |
500 | } | 498 | } |
501 | tunnel->stat.rx_packets++; | 499 | tunnel->dev->stats.rx_packets++; |
502 | tunnel->stat.rx_bytes += skb->len; | 500 | tunnel->dev->stats.rx_bytes += skb->len; |
503 | skb->dev = tunnel->dev; | 501 | skb->dev = tunnel->dev; |
504 | dst_release(skb->dst); | 502 | dst_release(skb->dst); |
505 | skb->dst = NULL; | 503 | skb->dst = NULL; |
@@ -539,7 +537,7 @@ static inline __be32 try_6to4(struct in6_addr *v6dst) | |||
539 | static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 537 | static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
540 | { | 538 | { |
541 | struct ip_tunnel *tunnel = netdev_priv(dev); | 539 | struct ip_tunnel *tunnel = netdev_priv(dev); |
542 | struct net_device_stats *stats = &tunnel->stat; | 540 | struct net_device_stats *stats = &tunnel->dev->stats; |
543 | struct iphdr *tiph = &tunnel->parms.iph; | 541 | struct iphdr *tiph = &tunnel->parms.iph; |
544 | struct ipv6hdr *iph6 = ipv6_hdr(skb); | 542 | struct ipv6hdr *iph6 = ipv6_hdr(skb); |
545 | u8 tos = tunnel->parms.iph.tos; | 543 | u8 tos = tunnel->parms.iph.tos; |
@@ -553,7 +551,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
553 | int addr_type; | 551 | int addr_type; |
554 | 552 | ||
555 | if (tunnel->recursion++) { | 553 | if (tunnel->recursion++) { |
556 | tunnel->stat.collisions++; | 554 | stats->collisions++; |
557 | goto tx_error; | 555 | goto tx_error; |
558 | } | 556 | } |
559 | 557 | ||
@@ -620,20 +618,20 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
620 | .oif = tunnel->parms.link, | 618 | .oif = tunnel->parms.link, |
621 | .proto = IPPROTO_IPV6 }; | 619 | .proto = IPPROTO_IPV6 }; |
622 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 620 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
623 | tunnel->stat.tx_carrier_errors++; | 621 | stats->tx_carrier_errors++; |
624 | goto tx_error_icmp; | 622 | goto tx_error_icmp; |
625 | } | 623 | } |
626 | } | 624 | } |
627 | if (rt->rt_type != RTN_UNICAST) { | 625 | if (rt->rt_type != RTN_UNICAST) { |
628 | ip_rt_put(rt); | 626 | ip_rt_put(rt); |
629 | tunnel->stat.tx_carrier_errors++; | 627 | stats->tx_carrier_errors++; |
630 | goto tx_error_icmp; | 628 | goto tx_error_icmp; |
631 | } | 629 | } |
632 | tdev = rt->u.dst.dev; | 630 | tdev = rt->u.dst.dev; |
633 | 631 | ||
634 | if (tdev == dev) { | 632 | if (tdev == dev) { |
635 | ip_rt_put(rt); | 633 | ip_rt_put(rt); |
636 | tunnel->stat.collisions++; | 634 | stats->collisions++; |
637 | goto tx_error; | 635 | goto tx_error; |
638 | } | 636 | } |
639 | 637 | ||
@@ -643,7 +641,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
643 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 641 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; |
644 | 642 | ||
645 | if (mtu < 68) { | 643 | if (mtu < 68) { |
646 | tunnel->stat.collisions++; | 644 | stats->collisions++; |
647 | ip_rt_put(rt); | 645 | ip_rt_put(rt); |
648 | goto tx_error; | 646 | goto tx_error; |
649 | } | 647 | } |
@@ -920,11 +918,6 @@ done: | |||
920 | return err; | 918 | return err; |
921 | } | 919 | } |
922 | 920 | ||
923 | static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) | ||
924 | { | ||
925 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | ||
926 | } | ||
927 | |||
928 | static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 921 | static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
929 | { | 922 | { |
930 | if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) | 923 | if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) |
@@ -938,7 +931,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) | |||
938 | dev->uninit = ipip6_tunnel_uninit; | 931 | dev->uninit = ipip6_tunnel_uninit; |
939 | dev->destructor = free_netdev; | 932 | dev->destructor = free_netdev; |
940 | dev->hard_start_xmit = ipip6_tunnel_xmit; | 933 | dev->hard_start_xmit = ipip6_tunnel_xmit; |
941 | dev->get_stats = ipip6_tunnel_get_stats; | ||
942 | dev->do_ioctl = ipip6_tunnel_ioctl; | 934 | dev->do_ioctl = ipip6_tunnel_ioctl; |
943 | dev->change_mtu = ipip6_tunnel_change_mtu; | 935 | dev->change_mtu = ipip6_tunnel_change_mtu; |
944 | 936 | ||
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 3ecc1157994e..6a68eeb7bbf8 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
137 | ; | 137 | ; |
138 | *mssp = msstab[mssind] + 1; | 138 | *mssp = msstab[mssind] + 1; |
139 | 139 | ||
140 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); | 140 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
141 | 141 | ||
142 | return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, | 142 | return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, |
143 | th->dest, ntohl(th->seq), | 143 | th->dest, ntohl(th->seq), |
@@ -177,11 +177,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
177 | 177 | ||
178 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || | 178 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || |
179 | (mss = cookie_check(skb, cookie)) == 0) { | 179 | (mss = cookie_check(skb, cookie)) == 0) { |
180 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); | 180 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); |
181 | goto out; | 181 | goto out; |
182 | } | 182 | } |
183 | 183 | ||
184 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); | 184 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
185 | 185 | ||
186 | /* check for timestamp cookie support */ | 186 | /* check for timestamp cookie support */ |
187 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | 187 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 3804dcbbfab0..5c99274558bf 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -37,6 +37,10 @@ static ctl_table ipv6_table_template[] = { | |||
37 | .mode = 0644, | 37 | .mode = 0644, |
38 | .proc_handler = &proc_dointvec | 38 | .proc_handler = &proc_dointvec |
39 | }, | 39 | }, |
40 | { .ctl_name = 0 } | ||
41 | }; | ||
42 | |||
43 | static ctl_table ipv6_table[] = { | ||
40 | { | 44 | { |
41 | .ctl_name = NET_IPV6_MLD_MAX_MSF, | 45 | .ctl_name = NET_IPV6_MLD_MAX_MSF, |
42 | .procname = "mld_max_msf", | 46 | .procname = "mld_max_msf", |
@@ -80,12 +84,6 @@ static int ipv6_sysctl_net_init(struct net *net) | |||
80 | 84 | ||
81 | ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; | 85 | ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; |
82 | 86 | ||
83 | /* We don't want this value to be per namespace, it should be global | ||
84 | to all namespaces, so make it read-only when we are not in the | ||
85 | init network namespace */ | ||
86 | if (net != &init_net) | ||
87 | ipv6_table[3].mode = 0444; | ||
88 | |||
89 | net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, | 87 | net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, |
90 | ipv6_table); | 88 | ipv6_table); |
91 | if (!net->ipv6.sysctl.table) | 89 | if (!net->ipv6.sysctl.table) |
@@ -126,12 +124,29 @@ static struct pernet_operations ipv6_sysctl_net_ops = { | |||
126 | .exit = ipv6_sysctl_net_exit, | 124 | .exit = ipv6_sysctl_net_exit, |
127 | }; | 125 | }; |
128 | 126 | ||
127 | static struct ctl_table_header *ip6_header; | ||
128 | |||
129 | int ipv6_sysctl_register(void) | 129 | int ipv6_sysctl_register(void) |
130 | { | 130 | { |
131 | return register_pernet_subsys(&ipv6_sysctl_net_ops); | 131 | int err = -ENOMEM;; |
132 | |||
133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); | ||
134 | if (ip6_header == NULL) | ||
135 | goto out; | ||
136 | |||
137 | err = register_pernet_subsys(&ipv6_sysctl_net_ops); | ||
138 | if (err) | ||
139 | goto err_pernet; | ||
140 | out: | ||
141 | return err; | ||
142 | |||
143 | err_pernet: | ||
144 | unregister_net_sysctl_table(ip6_header); | ||
145 | goto out; | ||
132 | } | 146 | } |
133 | 147 | ||
134 | void ipv6_sysctl_unregister(void) | 148 | void ipv6_sysctl_unregister(void) |
135 | { | 149 | { |
150 | unregister_net_sysctl_table(ip6_header); | ||
136 | unregister_pernet_subsys(&ipv6_sysctl_net_ops); | 151 | unregister_pernet_subsys(&ipv6_sysctl_net_ops); |
137 | } | 152 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 40ea9c36d24b..ae45f9835014 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * Authors: | 5 | * Authors: |
6 | * Pedro Roque <roque@di.fc.ul.pt> | 6 | * Pedro Roque <roque@di.fc.ul.pt> |
7 | * | 7 | * |
8 | * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * Based on: | 8 | * Based on: |
11 | * linux/net/ipv4/tcp.c | 9 | * linux/net/ipv4/tcp.c |
12 | * linux/net/ipv4/tcp_input.c | 10 | * linux/net/ipv4/tcp_input.c |
@@ -72,8 +70,6 @@ | |||
72 | 70 | ||
73 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); | 71 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); |
74 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); | 72 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); |
75 | static void tcp_v6_send_check(struct sock *sk, int len, | ||
76 | struct sk_buff *skb); | ||
77 | 73 | ||
78 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | 74 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
79 | 75 | ||
@@ -82,6 +78,12 @@ static struct inet_connection_sock_af_ops ipv6_specific; | |||
82 | #ifdef CONFIG_TCP_MD5SIG | 78 | #ifdef CONFIG_TCP_MD5SIG |
83 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific; | 79 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific; |
84 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | 80 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; |
81 | #else | ||
82 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | ||
83 | struct in6_addr *addr) | ||
84 | { | ||
85 | return NULL; | ||
86 | } | ||
85 | #endif | 87 | #endif |
86 | 88 | ||
87 | static void tcp_v6_hash(struct sock *sk) | 89 | static void tcp_v6_hash(struct sock *sk) |
@@ -321,8 +323,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
321 | int err; | 323 | int err; |
322 | struct tcp_sock *tp; | 324 | struct tcp_sock *tp; |
323 | __u32 seq; | 325 | __u32 seq; |
326 | struct net *net = dev_net(skb->dev); | ||
324 | 327 | ||
325 | sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr, | 328 | sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, |
326 | th->dest, &hdr->saddr, th->source, skb->dev->ifindex); | 329 | th->dest, &hdr->saddr, th->source, skb->dev->ifindex); |
327 | 330 | ||
328 | if (sk == NULL) { | 331 | if (sk == NULL) { |
@@ -337,7 +340,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
337 | 340 | ||
338 | bh_lock_sock(sk); | 341 | bh_lock_sock(sk); |
339 | if (sock_owned_by_user(sk)) | 342 | if (sock_owned_by_user(sk)) |
340 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 343 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
341 | 344 | ||
342 | if (sk->sk_state == TCP_CLOSE) | 345 | if (sk->sk_state == TCP_CLOSE) |
343 | goto out; | 346 | goto out; |
@@ -346,7 +349,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
346 | seq = ntohl(th->seq); | 349 | seq = ntohl(th->seq); |
347 | if (sk->sk_state != TCP_LISTEN && | 350 | if (sk->sk_state != TCP_LISTEN && |
348 | !between(seq, tp->snd_una, tp->snd_nxt)) { | 351 | !between(seq, tp->snd_una, tp->snd_nxt)) { |
349 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 352 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
350 | goto out; | 353 | goto out; |
351 | } | 354 | } |
352 | 355 | ||
@@ -421,7 +424,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
421 | BUG_TRAP(req->sk == NULL); | 424 | BUG_TRAP(req->sk == NULL); |
422 | 425 | ||
423 | if (seq != tcp_rsk(req)->snt_isn) { | 426 | if (seq != tcp_rsk(req)->snt_isn) { |
424 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 427 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
425 | goto out; | 428 | goto out; |
426 | } | 429 | } |
427 | 430 | ||
@@ -733,109 +736,105 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, | |||
733 | return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); | 736 | return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); |
734 | } | 737 | } |
735 | 738 | ||
736 | static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 739 | static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, |
737 | struct in6_addr *saddr, | 740 | struct in6_addr *daddr, |
738 | struct in6_addr *daddr, | 741 | struct in6_addr *saddr, int nbytes) |
739 | struct tcphdr *th, int protocol, | ||
740 | unsigned int tcplen) | ||
741 | { | 742 | { |
742 | struct scatterlist sg[4]; | ||
743 | __u16 data_len; | ||
744 | int block = 0; | ||
745 | __sum16 cksum; | ||
746 | struct tcp_md5sig_pool *hp; | ||
747 | struct tcp6_pseudohdr *bp; | 743 | struct tcp6_pseudohdr *bp; |
748 | struct hash_desc *desc; | 744 | struct scatterlist sg; |
749 | int err; | ||
750 | unsigned int nbytes = 0; | ||
751 | 745 | ||
752 | hp = tcp_get_md5sig_pool(); | ||
753 | if (!hp) { | ||
754 | printk(KERN_WARNING "%s(): hash pool not found...\n", __func__); | ||
755 | goto clear_hash_noput; | ||
756 | } | ||
757 | bp = &hp->md5_blk.ip6; | 746 | bp = &hp->md5_blk.ip6; |
758 | desc = &hp->md5_desc; | ||
759 | |||
760 | /* 1. TCP pseudo-header (RFC2460) */ | 747 | /* 1. TCP pseudo-header (RFC2460) */ |
761 | ipv6_addr_copy(&bp->saddr, saddr); | 748 | ipv6_addr_copy(&bp->saddr, saddr); |
762 | ipv6_addr_copy(&bp->daddr, daddr); | 749 | ipv6_addr_copy(&bp->daddr, daddr); |
763 | bp->len = htonl(tcplen); | 750 | bp->protocol = cpu_to_be32(IPPROTO_TCP); |
764 | bp->protocol = htonl(protocol); | 751 | bp->len = cpu_to_be16(nbytes); |
765 | |||
766 | sg_init_table(sg, 4); | ||
767 | |||
768 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
769 | nbytes += sizeof(*bp); | ||
770 | 752 | ||
771 | /* 2. TCP header, excluding options */ | 753 | sg_init_one(&sg, bp, sizeof(*bp)); |
772 | cksum = th->check; | 754 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); |
773 | th->check = 0; | 755 | } |
774 | sg_set_buf(&sg[block++], th, sizeof(*th)); | ||
775 | nbytes += sizeof(*th); | ||
776 | |||
777 | /* 3. TCP segment data (if any) */ | ||
778 | data_len = tcplen - (th->doff << 2); | ||
779 | if (data_len > 0) { | ||
780 | u8 *data = (u8 *)th + (th->doff << 2); | ||
781 | sg_set_buf(&sg[block++], data, data_len); | ||
782 | nbytes += data_len; | ||
783 | } | ||
784 | 756 | ||
785 | /* 4. shared key */ | 757 | static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
786 | sg_set_buf(&sg[block++], key->key, key->keylen); | 758 | struct in6_addr *daddr, struct in6_addr *saddr, |
787 | nbytes += key->keylen; | 759 | struct tcphdr *th) |
760 | { | ||
761 | struct tcp_md5sig_pool *hp; | ||
762 | struct hash_desc *desc; | ||
788 | 763 | ||
789 | sg_mark_end(&sg[block - 1]); | 764 | hp = tcp_get_md5sig_pool(); |
765 | if (!hp) | ||
766 | goto clear_hash_noput; | ||
767 | desc = &hp->md5_desc; | ||
790 | 768 | ||
791 | /* Now store the hash into the packet */ | 769 | if (crypto_hash_init(desc)) |
792 | err = crypto_hash_init(desc); | ||
793 | if (err) { | ||
794 | printk(KERN_WARNING "%s(): hash_init failed\n", __func__); | ||
795 | goto clear_hash; | 770 | goto clear_hash; |
796 | } | 771 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) |
797 | err = crypto_hash_update(desc, sg, nbytes); | ||
798 | if (err) { | ||
799 | printk(KERN_WARNING "%s(): hash_update failed\n", __func__); | ||
800 | goto clear_hash; | 772 | goto clear_hash; |
801 | } | 773 | if (tcp_md5_hash_header(hp, th)) |
802 | err = crypto_hash_final(desc, md5_hash); | 774 | goto clear_hash; |
803 | if (err) { | 775 | if (tcp_md5_hash_key(hp, key)) |
804 | printk(KERN_WARNING "%s(): hash_final failed\n", __func__); | 776 | goto clear_hash; |
777 | if (crypto_hash_final(desc, md5_hash)) | ||
805 | goto clear_hash; | 778 | goto clear_hash; |
806 | } | ||
807 | 779 | ||
808 | /* Reset header, and free up the crypto */ | ||
809 | tcp_put_md5sig_pool(); | 780 | tcp_put_md5sig_pool(); |
810 | th->check = cksum; | ||
811 | out: | ||
812 | return 0; | 781 | return 0; |
782 | |||
813 | clear_hash: | 783 | clear_hash: |
814 | tcp_put_md5sig_pool(); | 784 | tcp_put_md5sig_pool(); |
815 | clear_hash_noput: | 785 | clear_hash_noput: |
816 | memset(md5_hash, 0, 16); | 786 | memset(md5_hash, 0, 16); |
817 | goto out; | 787 | return 1; |
818 | } | 788 | } |
819 | 789 | ||
820 | static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 790 | static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
821 | struct sock *sk, | 791 | struct sock *sk, struct request_sock *req, |
822 | struct dst_entry *dst, | 792 | struct sk_buff *skb) |
823 | struct request_sock *req, | ||
824 | struct tcphdr *th, int protocol, | ||
825 | unsigned int tcplen) | ||
826 | { | 793 | { |
827 | struct in6_addr *saddr, *daddr; | 794 | struct in6_addr *saddr, *daddr; |
795 | struct tcp_md5sig_pool *hp; | ||
796 | struct hash_desc *desc; | ||
797 | struct tcphdr *th = tcp_hdr(skb); | ||
828 | 798 | ||
829 | if (sk) { | 799 | if (sk) { |
830 | saddr = &inet6_sk(sk)->saddr; | 800 | saddr = &inet6_sk(sk)->saddr; |
831 | daddr = &inet6_sk(sk)->daddr; | 801 | daddr = &inet6_sk(sk)->daddr; |
832 | } else { | 802 | } else if (req) { |
833 | saddr = &inet6_rsk(req)->loc_addr; | 803 | saddr = &inet6_rsk(req)->loc_addr; |
834 | daddr = &inet6_rsk(req)->rmt_addr; | 804 | daddr = &inet6_rsk(req)->rmt_addr; |
805 | } else { | ||
806 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
807 | saddr = &ip6h->saddr; | ||
808 | daddr = &ip6h->daddr; | ||
835 | } | 809 | } |
836 | return tcp_v6_do_calc_md5_hash(md5_hash, key, | 810 | |
837 | saddr, daddr, | 811 | hp = tcp_get_md5sig_pool(); |
838 | th, protocol, tcplen); | 812 | if (!hp) |
813 | goto clear_hash_noput; | ||
814 | desc = &hp->md5_desc; | ||
815 | |||
816 | if (crypto_hash_init(desc)) | ||
817 | goto clear_hash; | ||
818 | |||
819 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) | ||
820 | goto clear_hash; | ||
821 | if (tcp_md5_hash_header(hp, th)) | ||
822 | goto clear_hash; | ||
823 | if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) | ||
824 | goto clear_hash; | ||
825 | if (tcp_md5_hash_key(hp, key)) | ||
826 | goto clear_hash; | ||
827 | if (crypto_hash_final(desc, md5_hash)) | ||
828 | goto clear_hash; | ||
829 | |||
830 | tcp_put_md5sig_pool(); | ||
831 | return 0; | ||
832 | |||
833 | clear_hash: | ||
834 | tcp_put_md5sig_pool(); | ||
835 | clear_hash_noput: | ||
836 | memset(md5_hash, 0, 16); | ||
837 | return 1; | ||
839 | } | 838 | } |
840 | 839 | ||
841 | static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) | 840 | static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) |
@@ -844,43 +843,12 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) | |||
844 | struct tcp_md5sig_key *hash_expected; | 843 | struct tcp_md5sig_key *hash_expected; |
845 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | 844 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
846 | struct tcphdr *th = tcp_hdr(skb); | 845 | struct tcphdr *th = tcp_hdr(skb); |
847 | int length = (th->doff << 2) - sizeof (*th); | ||
848 | int genhash; | 846 | int genhash; |
849 | u8 *ptr; | ||
850 | u8 newhash[16]; | 847 | u8 newhash[16]; |
851 | 848 | ||
852 | hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); | 849 | hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); |
850 | hash_location = tcp_parse_md5sig_option(th); | ||
853 | 851 | ||
854 | /* If the TCP option is too short, we can short cut */ | ||
855 | if (length < TCPOLEN_MD5SIG) | ||
856 | return hash_expected ? 1 : 0; | ||
857 | |||
858 | /* parse options */ | ||
859 | ptr = (u8*)(th + 1); | ||
860 | while (length > 0) { | ||
861 | int opcode = *ptr++; | ||
862 | int opsize; | ||
863 | |||
864 | switch(opcode) { | ||
865 | case TCPOPT_EOL: | ||
866 | goto done_opts; | ||
867 | case TCPOPT_NOP: | ||
868 | length--; | ||
869 | continue; | ||
870 | default: | ||
871 | opsize = *ptr++; | ||
872 | if (opsize < 2 || opsize > length) | ||
873 | goto done_opts; | ||
874 | if (opcode == TCPOPT_MD5SIG) { | ||
875 | hash_location = ptr; | ||
876 | goto done_opts; | ||
877 | } | ||
878 | } | ||
879 | ptr += opsize - 2; | ||
880 | length -= opsize; | ||
881 | } | ||
882 | |||
883 | done_opts: | ||
884 | /* do we have a hash as expected? */ | 852 | /* do we have a hash as expected? */ |
885 | if (!hash_expected) { | 853 | if (!hash_expected) { |
886 | if (!hash_location) | 854 | if (!hash_location) |
@@ -907,11 +875,10 @@ done_opts: | |||
907 | } | 875 | } |
908 | 876 | ||
909 | /* check the signature */ | 877 | /* check the signature */ |
910 | genhash = tcp_v6_do_calc_md5_hash(newhash, | 878 | genhash = tcp_v6_md5_hash_skb(newhash, |
911 | hash_expected, | 879 | hash_expected, |
912 | &ip6h->saddr, &ip6h->daddr, | 880 | NULL, NULL, skb); |
913 | th, sk->sk_protocol, | 881 | |
914 | skb->len); | ||
915 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 882 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
916 | if (net_ratelimit()) { | 883 | if (net_ratelimit()) { |
917 | printk(KERN_INFO "MD5 Hash %s for " | 884 | printk(KERN_INFO "MD5 Hash %s for " |
@@ -1048,10 +1015,9 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
1048 | (TCPOPT_NOP << 16) | | 1015 | (TCPOPT_NOP << 16) | |
1049 | (TCPOPT_MD5SIG << 8) | | 1016 | (TCPOPT_MD5SIG << 8) | |
1050 | TCPOLEN_MD5SIG); | 1017 | TCPOLEN_MD5SIG); |
1051 | tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, | 1018 | tcp_v6_md5_hash_hdr((__u8 *)&opt[1], key, |
1052 | &ipv6_hdr(skb)->daddr, | 1019 | &ipv6_hdr(skb)->daddr, |
1053 | &ipv6_hdr(skb)->saddr, | 1020 | &ipv6_hdr(skb)->saddr, t1); |
1054 | t1, IPPROTO_TCP, tot_len); | ||
1055 | } | 1021 | } |
1056 | #endif | 1022 | #endif |
1057 | 1023 | ||
@@ -1079,8 +1045,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
1079 | 1045 | ||
1080 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { | 1046 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { |
1081 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); | 1047 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); |
1082 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 1048 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
1083 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 1049 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); |
1084 | return; | 1050 | return; |
1085 | } | 1051 | } |
1086 | } | 1052 | } |
@@ -1088,8 +1054,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
1088 | kfree_skb(buff); | 1054 | kfree_skb(buff); |
1089 | } | 1055 | } |
1090 | 1056 | ||
1091 | static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | 1057 | static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, |
1092 | struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) | 1058 | struct tcp_md5sig_key *key) |
1093 | { | 1059 | { |
1094 | struct tcphdr *th = tcp_hdr(skb), *t1; | 1060 | struct tcphdr *th = tcp_hdr(skb), *t1; |
1095 | struct sk_buff *buff; | 1061 | struct sk_buff *buff; |
@@ -1098,22 +1064,6 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | |||
1098 | struct sock *ctl_sk = net->ipv6.tcp_sk; | 1064 | struct sock *ctl_sk = net->ipv6.tcp_sk; |
1099 | unsigned int tot_len = sizeof(struct tcphdr); | 1065 | unsigned int tot_len = sizeof(struct tcphdr); |
1100 | __be32 *topt; | 1066 | __be32 *topt; |
1101 | #ifdef CONFIG_TCP_MD5SIG | ||
1102 | struct tcp_md5sig_key *key; | ||
1103 | struct tcp_md5sig_key tw_key; | ||
1104 | #endif | ||
1105 | |||
1106 | #ifdef CONFIG_TCP_MD5SIG | ||
1107 | if (!tw && skb->sk) { | ||
1108 | key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr); | ||
1109 | } else if (tw && tw->tw_md5_keylen) { | ||
1110 | tw_key.key = tw->tw_md5_key; | ||
1111 | tw_key.keylen = tw->tw_md5_keylen; | ||
1112 | key = &tw_key; | ||
1113 | } else { | ||
1114 | key = NULL; | ||
1115 | } | ||
1116 | #endif | ||
1117 | 1067 | ||
1118 | if (ts) | 1068 | if (ts) |
1119 | tot_len += TCPOLEN_TSTAMP_ALIGNED; | 1069 | tot_len += TCPOLEN_TSTAMP_ALIGNED; |
@@ -1154,10 +1104,9 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | |||
1154 | if (key) { | 1104 | if (key) { |
1155 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 1105 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
1156 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | 1106 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); |
1157 | tcp_v6_do_calc_md5_hash((__u8 *)topt, key, | 1107 | tcp_v6_md5_hash_hdr((__u8 *)topt, key, |
1158 | &ipv6_hdr(skb)->daddr, | 1108 | &ipv6_hdr(skb)->daddr, |
1159 | &ipv6_hdr(skb)->saddr, | 1109 | &ipv6_hdr(skb)->saddr, t1); |
1160 | t1, IPPROTO_TCP, tot_len); | ||
1161 | } | 1110 | } |
1162 | #endif | 1111 | #endif |
1163 | 1112 | ||
@@ -1180,7 +1129,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | |||
1180 | if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { | 1129 | if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { |
1181 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { | 1130 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { |
1182 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); | 1131 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); |
1183 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 1132 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
1184 | return; | 1133 | return; |
1185 | } | 1134 | } |
1186 | } | 1135 | } |
@@ -1193,16 +1142,17 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
1193 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1142 | struct inet_timewait_sock *tw = inet_twsk(sk); |
1194 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 1143 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
1195 | 1144 | ||
1196 | tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 1145 | tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
1197 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 1146 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
1198 | tcptw->tw_ts_recent); | 1147 | tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); |
1199 | 1148 | ||
1200 | inet_twsk_put(tw); | 1149 | inet_twsk_put(tw); |
1201 | } | 1150 | } |
1202 | 1151 | ||
1203 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | 1152 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) |
1204 | { | 1153 | { |
1205 | tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); | 1154 | tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, |
1155 | tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr)); | ||
1206 | } | 1156 | } |
1207 | 1157 | ||
1208 | 1158 | ||
@@ -1538,9 +1488,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1538 | return newsk; | 1488 | return newsk; |
1539 | 1489 | ||
1540 | out_overflow: | 1490 | out_overflow: |
1541 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | 1491 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
1542 | out: | 1492 | out: |
1543 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | 1493 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1544 | if (opt && opt != np->opt) | 1494 | if (opt && opt != np->opt) |
1545 | sock_kfree_s(sk, opt, opt->tot_len); | 1495 | sock_kfree_s(sk, opt, opt->tot_len); |
1546 | dst_release(dst); | 1496 | dst_release(dst); |
@@ -1669,7 +1619,7 @@ discard: | |||
1669 | kfree_skb(skb); | 1619 | kfree_skb(skb); |
1670 | return 0; | 1620 | return 0; |
1671 | csum_err: | 1621 | csum_err: |
1672 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1622 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
1673 | goto discard; | 1623 | goto discard; |
1674 | 1624 | ||
1675 | 1625 | ||
@@ -1707,6 +1657,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1707 | struct tcphdr *th; | 1657 | struct tcphdr *th; |
1708 | struct sock *sk; | 1658 | struct sock *sk; |
1709 | int ret; | 1659 | int ret; |
1660 | struct net *net = dev_net(skb->dev); | ||
1710 | 1661 | ||
1711 | if (skb->pkt_type != PACKET_HOST) | 1662 | if (skb->pkt_type != PACKET_HOST) |
1712 | goto discard_it; | 1663 | goto discard_it; |
@@ -1714,7 +1665,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1714 | /* | 1665 | /* |
1715 | * Count it even if it's bad. | 1666 | * Count it even if it's bad. |
1716 | */ | 1667 | */ |
1717 | TCP_INC_STATS_BH(TCP_MIB_INSEGS); | 1668 | TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); |
1718 | 1669 | ||
1719 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | 1670 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
1720 | goto discard_it; | 1671 | goto discard_it; |
@@ -1738,7 +1689,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1738 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); | 1689 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); |
1739 | TCP_SKB_CB(skb)->sacked = 0; | 1690 | TCP_SKB_CB(skb)->sacked = 0; |
1740 | 1691 | ||
1741 | sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, | 1692 | sk = __inet6_lookup(net, &tcp_hashinfo, |
1742 | &ipv6_hdr(skb)->saddr, th->source, | 1693 | &ipv6_hdr(skb)->saddr, th->source, |
1743 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), | 1694 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), |
1744 | inet6_iif(skb)); | 1695 | inet6_iif(skb)); |
@@ -1786,7 +1737,7 @@ no_tcp_socket: | |||
1786 | 1737 | ||
1787 | if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { | 1738 | if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { |
1788 | bad_packet: | 1739 | bad_packet: |
1789 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1740 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1790 | } else { | 1741 | } else { |
1791 | tcp_v6_send_reset(NULL, skb); | 1742 | tcp_v6_send_reset(NULL, skb); |
1792 | } | 1743 | } |
@@ -1811,7 +1762,7 @@ do_time_wait: | |||
1811 | } | 1762 | } |
1812 | 1763 | ||
1813 | if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { | 1764 | if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { |
1814 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1765 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1815 | inet_twsk_put(inet_twsk(sk)); | 1766 | inet_twsk_put(inet_twsk(sk)); |
1816 | goto discard_it; | 1767 | goto discard_it; |
1817 | } | 1768 | } |
@@ -1871,7 +1822,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = { | |||
1871 | #ifdef CONFIG_TCP_MD5SIG | 1822 | #ifdef CONFIG_TCP_MD5SIG |
1872 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | 1823 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { |
1873 | .md5_lookup = tcp_v6_md5_lookup, | 1824 | .md5_lookup = tcp_v6_md5_lookup, |
1874 | .calc_md5_hash = tcp_v6_calc_md5_hash, | 1825 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
1875 | .md5_add = tcp_v6_md5_add_func, | 1826 | .md5_add = tcp_v6_md5_add_func, |
1876 | .md5_parse = tcp_v6_parse_md5_keys, | 1827 | .md5_parse = tcp_v6_parse_md5_keys, |
1877 | }; | 1828 | }; |
@@ -1903,7 +1854,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1903 | #ifdef CONFIG_TCP_MD5SIG | 1854 | #ifdef CONFIG_TCP_MD5SIG |
1904 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { | 1855 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { |
1905 | .md5_lookup = tcp_v4_md5_lookup, | 1856 | .md5_lookup = tcp_v4_md5_lookup, |
1906 | .calc_md5_hash = tcp_v4_calc_md5_hash, | 1857 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1907 | .md5_add = tcp_v6_md5_add_func, | 1858 | .md5_add = tcp_v6_md5_add_func, |
1908 | .md5_parse = tcp_v6_parse_md5_keys, | 1859 | .md5_parse = tcp_v6_parse_md5_keys, |
1909 | }; | 1860 | }; |
@@ -1960,7 +1911,7 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1960 | return 0; | 1911 | return 0; |
1961 | } | 1912 | } |
1962 | 1913 | ||
1963 | static int tcp_v6_destroy_sock(struct sock *sk) | 1914 | static void tcp_v6_destroy_sock(struct sock *sk) |
1964 | { | 1915 | { |
1965 | #ifdef CONFIG_TCP_MD5SIG | 1916 | #ifdef CONFIG_TCP_MD5SIG |
1966 | /* Clean up the MD5 key list */ | 1917 | /* Clean up the MD5 key list */ |
@@ -1968,7 +1919,7 @@ static int tcp_v6_destroy_sock(struct sock *sk) | |||
1968 | tcp_v6_clear_md5_list(sk); | 1919 | tcp_v6_clear_md5_list(sk); |
1969 | #endif | 1920 | #endif |
1970 | tcp_v4_destroy_sock(sk); | 1921 | tcp_v4_destroy_sock(sk); |
1971 | return inet6_destroy_sock(sk); | 1922 | inet6_destroy_sock(sk); |
1972 | } | 1923 | } |
1973 | 1924 | ||
1974 | #ifdef CONFIG_PROC_FS | 1925 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index dd309626ae9a..d1477b350f76 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -7,8 +7,6 @@ | |||
7 | * | 7 | * |
8 | * Based on linux/ipv4/udp.c | 8 | * Based on linux/ipv4/udp.c |
9 | * | 9 | * |
10 | * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $ | ||
11 | * | ||
12 | * Fixes: | 10 | * Fixes: |
13 | * Hideaki YOSHIFUJI : sin6_scope_id support | 11 | * Hideaki YOSHIFUJI : sin6_scope_id support |
14 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which | 12 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which |
@@ -67,7 +65,7 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
67 | int badness = -1; | 65 | int badness = -1; |
68 | 66 | ||
69 | read_lock(&udp_hash_lock); | 67 | read_lock(&udp_hash_lock); |
70 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { | 68 | sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) { |
71 | struct inet_sock *inet = inet_sk(sk); | 69 | struct inet_sock *inet = inet_sk(sk); |
72 | 70 | ||
73 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | 71 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && |
@@ -168,7 +166,8 @@ try_again: | |||
168 | goto out_free; | 166 | goto out_free; |
169 | 167 | ||
170 | if (!peeked) | 168 | if (!peeked) |
171 | UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); | 169 | UDP6_INC_STATS_USER(sock_net(sk), |
170 | UDP_MIB_INDATAGRAMS, is_udplite); | ||
172 | 171 | ||
173 | sock_recv_timestamp(msg, sk, skb); | 172 | sock_recv_timestamp(msg, sk, skb); |
174 | 173 | ||
@@ -215,7 +214,7 @@ out: | |||
215 | csum_copy_err: | 214 | csum_copy_err: |
216 | lock_sock(sk); | 215 | lock_sock(sk); |
217 | if (!skb_kill_datagram(sk, skb, flags)) | 216 | if (!skb_kill_datagram(sk, skb, flags)) |
218 | UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); | 217 | UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
219 | release_sock(sk); | 218 | release_sock(sk); |
220 | 219 | ||
221 | if (flags & MSG_DONTWAIT) | 220 | if (flags & MSG_DONTWAIT) |
@@ -299,14 +298,17 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
299 | 298 | ||
300 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 299 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
301 | /* Note that an ENOMEM error is charged twice */ | 300 | /* Note that an ENOMEM error is charged twice */ |
302 | if (rc == -ENOMEM) | 301 | if (rc == -ENOMEM) { |
303 | UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); | 302 | UDP6_INC_STATS_BH(sock_net(sk), |
303 | UDP_MIB_RCVBUFERRORS, is_udplite); | ||
304 | atomic_inc(&sk->sk_drops); | ||
305 | } | ||
304 | goto drop; | 306 | goto drop; |
305 | } | 307 | } |
306 | 308 | ||
307 | return 0; | 309 | return 0; |
308 | drop: | 310 | drop: |
309 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); | 311 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
310 | kfree_skb(skb); | 312 | kfree_skb(skb); |
311 | return -1; | 313 | return -1; |
312 | } | 314 | } |
@@ -355,15 +357,16 @@ static struct sock *udp_v6_mcast_next(struct sock *sk, | |||
355 | * Note: called only from the BH handler context, | 357 | * Note: called only from the BH handler context, |
356 | * so we don't need to lock the hashes. | 358 | * so we don't need to lock the hashes. |
357 | */ | 359 | */ |
358 | static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, | 360 | static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
359 | struct in6_addr *daddr, struct hlist_head udptable[]) | 361 | struct in6_addr *saddr, struct in6_addr *daddr, |
362 | struct hlist_head udptable[]) | ||
360 | { | 363 | { |
361 | struct sock *sk, *sk2; | 364 | struct sock *sk, *sk2; |
362 | const struct udphdr *uh = udp_hdr(skb); | 365 | const struct udphdr *uh = udp_hdr(skb); |
363 | int dif; | 366 | int dif; |
364 | 367 | ||
365 | read_lock(&udp_hash_lock); | 368 | read_lock(&udp_hash_lock); |
366 | sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); | 369 | sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); |
367 | dif = inet6_iif(skb); | 370 | dif = inet6_iif(skb); |
368 | sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); | 371 | sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); |
369 | if (!sk) { | 372 | if (!sk) { |
@@ -437,6 +440,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
437 | struct net_device *dev = skb->dev; | 440 | struct net_device *dev = skb->dev; |
438 | struct in6_addr *saddr, *daddr; | 441 | struct in6_addr *saddr, *daddr; |
439 | u32 ulen = 0; | 442 | u32 ulen = 0; |
443 | struct net *net = dev_net(skb->dev); | ||
440 | 444 | ||
441 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | 445 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
442 | goto short_packet; | 446 | goto short_packet; |
@@ -475,7 +479,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
475 | * Multicast receive code | 479 | * Multicast receive code |
476 | */ | 480 | */ |
477 | if (ipv6_addr_is_multicast(daddr)) | 481 | if (ipv6_addr_is_multicast(daddr)) |
478 | return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); | 482 | return __udp6_lib_mcast_deliver(net, skb, |
483 | saddr, daddr, udptable); | ||
479 | 484 | ||
480 | /* Unicast */ | 485 | /* Unicast */ |
481 | 486 | ||
@@ -483,7 +488,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
483 | * check socket cache ... must talk to Alan about his plans | 488 | * check socket cache ... must talk to Alan about his plans |
484 | * for sock caches... i'll skip this for now. | 489 | * for sock caches... i'll skip this for now. |
485 | */ | 490 | */ |
486 | sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source, | 491 | sk = __udp6_lib_lookup(net, saddr, uh->source, |
487 | daddr, uh->dest, inet6_iif(skb), udptable); | 492 | daddr, uh->dest, inet6_iif(skb), udptable); |
488 | 493 | ||
489 | if (sk == NULL) { | 494 | if (sk == NULL) { |
@@ -492,7 +497,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
492 | 497 | ||
493 | if (udp_lib_checksum_complete(skb)) | 498 | if (udp_lib_checksum_complete(skb)) |
494 | goto discard; | 499 | goto discard; |
495 | UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); | 500 | UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, |
501 | proto == IPPROTO_UDPLITE); | ||
496 | 502 | ||
497 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); | 503 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); |
498 | 504 | ||
@@ -517,7 +523,7 @@ short_packet: | |||
517 | ulen, skb->len); | 523 | ulen, skb->len); |
518 | 524 | ||
519 | discard: | 525 | discard: |
520 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); | 526 | UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); |
521 | kfree_skb(skb); | 527 | kfree_skb(skb); |
522 | return 0; | 528 | return 0; |
523 | } | 529 | } |
@@ -587,7 +593,8 @@ out: | |||
587 | up->len = 0; | 593 | up->len = 0; |
588 | up->pending = 0; | 594 | up->pending = 0; |
589 | if (!err) | 595 | if (!err) |
590 | UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); | 596 | UDP6_INC_STATS_USER(sock_net(sk), |
597 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
591 | return err; | 598 | return err; |
592 | } | 599 | } |
593 | 600 | ||
@@ -869,7 +876,8 @@ out: | |||
869 | * seems like overkill. | 876 | * seems like overkill. |
870 | */ | 877 | */ |
871 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 878 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
872 | UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); | 879 | UDP6_INC_STATS_USER(sock_net(sk), |
880 | UDP_MIB_SNDBUFERRORS, is_udplite); | ||
873 | } | 881 | } |
874 | return err; | 882 | return err; |
875 | 883 | ||
@@ -881,15 +889,13 @@ do_confirm: | |||
881 | goto out; | 889 | goto out; |
882 | } | 890 | } |
883 | 891 | ||
884 | int udpv6_destroy_sock(struct sock *sk) | 892 | void udpv6_destroy_sock(struct sock *sk) |
885 | { | 893 | { |
886 | lock_sock(sk); | 894 | lock_sock(sk); |
887 | udp_v6_flush_pending_frames(sk); | 895 | udp_v6_flush_pending_frames(sk); |
888 | release_sock(sk); | 896 | release_sock(sk); |
889 | 897 | ||
890 | inet6_destroy_sock(sk); | 898 | inet6_destroy_sock(sk); |
891 | |||
892 | return 0; | ||
893 | } | 899 | } |
894 | 900 | ||
895 | /* | 901 | /* |
@@ -955,7 +961,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
955 | srcp = ntohs(inet->sport); | 961 | srcp = ntohs(inet->sport); |
956 | seq_printf(seq, | 962 | seq_printf(seq, |
957 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 963 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
958 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n", | 964 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
959 | bucket, | 965 | bucket, |
960 | src->s6_addr32[0], src->s6_addr32[1], | 966 | src->s6_addr32[0], src->s6_addr32[1], |
961 | src->s6_addr32[2], src->s6_addr32[3], srcp, | 967 | src->s6_addr32[2], src->s6_addr32[3], srcp, |
@@ -967,7 +973,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
967 | 0, 0L, 0, | 973 | 0, 0L, 0, |
968 | sock_i_uid(sp), 0, | 974 | sock_i_uid(sp), 0, |
969 | sock_i_ino(sp), | 975 | sock_i_ino(sp), |
970 | atomic_read(&sp->sk_refcnt), sp); | 976 | atomic_read(&sp->sk_refcnt), sp, |
977 | atomic_read(&sp->sk_drops)); | ||
971 | } | 978 | } |
972 | 979 | ||
973 | int udp6_seq_show(struct seq_file *seq, void *v) | 980 | int udp6_seq_show(struct seq_file *seq, void *v) |
@@ -978,7 +985,7 @@ int udp6_seq_show(struct seq_file *seq, void *v) | |||
978 | "local_address " | 985 | "local_address " |
979 | "remote_address " | 986 | "remote_address " |
980 | "st tx_queue rx_queue tr tm->when retrnsmt" | 987 | "st tx_queue rx_queue tr tm->when retrnsmt" |
981 | " uid timeout inode\n"); | 988 | " uid timeout inode ref pointer drops\n"); |
982 | else | 989 | else |
983 | udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); | 990 | udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); |
984 | return 0; | 991 | return 0; |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 321b81a4d418..92dd7da766d8 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
@@ -29,7 +29,7 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
29 | struct msghdr *msg, size_t len, | 29 | struct msghdr *msg, size_t len, |
30 | int noblock, int flags, int *addr_len); | 30 | int noblock, int flags, int *addr_len); |
31 | extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); | 31 | extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); |
32 | extern int udpv6_destroy_sock(struct sock *sk); | 32 | extern void udpv6_destroy_sock(struct sock *sk); |
33 | 33 | ||
34 | #ifdef CONFIG_PROC_FS | 34 | #ifdef CONFIG_PROC_FS |
35 | extern int udp6_seq_show(struct seq_file *seq, void *v); | 35 | extern int udp6_seq_show(struct seq_file *seq, void *v); |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 491efd00a866..f6cdcb348e05 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
@@ -2,8 +2,6 @@ | |||
2 | * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. | 2 | * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. |
3 | * See also net/ipv4/udplite.c | 3 | * See also net/ipv4/udplite.c |
4 | * | 4 | * |
5 | * Version: $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $ | ||
6 | * | ||
7 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> | 5 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> |
8 | * | 6 | * |
9 | * Changes: | 7 | * Changes: |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 81ae8735f5e3..b6e70f92e7fb 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -335,7 +335,7 @@ static int ipxitf_device_event(struct notifier_block *notifier, | |||
335 | struct net_device *dev = ptr; | 335 | struct net_device *dev = ptr; |
336 | struct ipx_interface *i, *tmp; | 336 | struct ipx_interface *i, *tmp; |
337 | 337 | ||
338 | if (dev_net(dev) != &init_net) | 338 | if (!net_eq(dev_net(dev), &init_net)) |
339 | return NOTIFY_DONE; | 339 | return NOTIFY_DONE; |
340 | 340 | ||
341 | if (event != NETDEV_DOWN && event != NETDEV_UP) | 341 | if (event != NETDEV_DOWN && event != NETDEV_UP) |
@@ -1636,7 +1636,7 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty | |||
1636 | u16 ipx_pktsize; | 1636 | u16 ipx_pktsize; |
1637 | int rc = 0; | 1637 | int rc = 0; |
1638 | 1638 | ||
1639 | if (dev_net(dev) != &init_net) | 1639 | if (!net_eq(dev_net(dev), &init_net)) |
1640 | goto drop; | 1640 | goto drop; |
1641 | 1641 | ||
1642 | /* Not ours */ | 1642 | /* Not ours */ |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 90894534f3cc..f17b65af9c9b 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
@@ -1326,7 +1326,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1326 | int command; | 1326 | int command; |
1327 | __u8 control; | 1327 | __u8 control; |
1328 | 1328 | ||
1329 | if (dev_net(dev) != &init_net) | 1329 | if (!net_eq(dev_net(dev), &init_net)) |
1330 | goto out; | 1330 | goto out; |
1331 | 1331 | ||
1332 | /* FIXME: should we get our own field? */ | 1332 | /* FIXME: should we get our own field? */ |
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index e84a70dd346b..6d8ae03c14f5 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -631,8 +631,8 @@ dev_irnet_poll(struct file * file, | |||
631 | * This is the way pppd configure us and control us while the PPP | 631 | * This is the way pppd configure us and control us while the PPP |
632 | * instance is active. | 632 | * instance is active. |
633 | */ | 633 | */ |
634 | static int | 634 | static long |
635 | dev_irnet_ioctl(struct inode * inode, | 635 | dev_irnet_ioctl( |
636 | struct file * file, | 636 | struct file * file, |
637 | unsigned int cmd, | 637 | unsigned int cmd, |
638 | unsigned long arg) | 638 | unsigned long arg) |
@@ -663,6 +663,7 @@ dev_irnet_ioctl(struct inode * inode, | |||
663 | { | 663 | { |
664 | DEBUG(FS_INFO, "Entering PPP discipline.\n"); | 664 | DEBUG(FS_INFO, "Entering PPP discipline.\n"); |
665 | /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ | 665 | /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ |
666 | lock_kernel(); | ||
666 | err = ppp_register_channel(&ap->chan); | 667 | err = ppp_register_channel(&ap->chan); |
667 | if(err == 0) | 668 | if(err == 0) |
668 | { | 669 | { |
@@ -675,12 +676,14 @@ dev_irnet_ioctl(struct inode * inode, | |||
675 | } | 676 | } |
676 | else | 677 | else |
677 | DERROR(FS_ERROR, "Can't setup PPP channel...\n"); | 678 | DERROR(FS_ERROR, "Can't setup PPP channel...\n"); |
679 | unlock_kernel(); | ||
678 | } | 680 | } |
679 | else | 681 | else |
680 | { | 682 | { |
681 | /* In theory, should be N_TTY */ | 683 | /* In theory, should be N_TTY */ |
682 | DEBUG(FS_INFO, "Exiting PPP discipline.\n"); | 684 | DEBUG(FS_INFO, "Exiting PPP discipline.\n"); |
683 | /* Disconnect from the generic PPP layer */ | 685 | /* Disconnect from the generic PPP layer */ |
686 | lock_kernel(); | ||
684 | if(ap->ppp_open) | 687 | if(ap->ppp_open) |
685 | { | 688 | { |
686 | ap->ppp_open = 0; | 689 | ap->ppp_open = 0; |
@@ -689,24 +692,20 @@ dev_irnet_ioctl(struct inode * inode, | |||
689 | else | 692 | else |
690 | DERROR(FS_ERROR, "Channel not registered !\n"); | 693 | DERROR(FS_ERROR, "Channel not registered !\n"); |
691 | err = 0; | 694 | err = 0; |
695 | unlock_kernel(); | ||
692 | } | 696 | } |
693 | break; | 697 | break; |
694 | 698 | ||
695 | /* Query PPP channel and unit number */ | 699 | /* Query PPP channel and unit number */ |
696 | case PPPIOCGCHAN: | 700 | case PPPIOCGCHAN: |
697 | if(!ap->ppp_open) | 701 | if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), |
698 | break; | 702 | (int __user *)argp)) |
699 | if(put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) | 703 | err = 0; |
700 | break; | ||
701 | DEBUG(FS_INFO, "Query channel.\n"); | ||
702 | err = 0; | ||
703 | break; | 704 | break; |
704 | case PPPIOCGUNIT: | 705 | case PPPIOCGUNIT: |
705 | if(!ap->ppp_open) | 706 | lock_kernel(); |
706 | break; | 707 | if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), |
707 | if(put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) | 708 | (int __user *)argp)) |
708 | break; | ||
709 | DEBUG(FS_INFO, "Query unit number.\n"); | ||
710 | err = 0; | 709 | err = 0; |
711 | break; | 710 | break; |
712 | 711 | ||
@@ -726,34 +725,39 @@ dev_irnet_ioctl(struct inode * inode, | |||
726 | DEBUG(FS_INFO, "Standard PPP ioctl.\n"); | 725 | DEBUG(FS_INFO, "Standard PPP ioctl.\n"); |
727 | if(!capable(CAP_NET_ADMIN)) | 726 | if(!capable(CAP_NET_ADMIN)) |
728 | err = -EPERM; | 727 | err = -EPERM; |
729 | else | 728 | else { |
729 | lock_kernel(); | ||
730 | err = ppp_irnet_ioctl(&ap->chan, cmd, arg); | 730 | err = ppp_irnet_ioctl(&ap->chan, cmd, arg); |
731 | unlock_kernel(); | ||
732 | } | ||
731 | break; | 733 | break; |
732 | 734 | ||
733 | /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ | 735 | /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ |
734 | /* Get termios */ | 736 | /* Get termios */ |
735 | case TCGETS: | 737 | case TCGETS: |
736 | DEBUG(FS_INFO, "Get termios.\n"); | 738 | DEBUG(FS_INFO, "Get termios.\n"); |
739 | lock_kernel(); | ||
737 | #ifndef TCGETS2 | 740 | #ifndef TCGETS2 |
738 | if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) | 741 | if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) |
739 | break; | 742 | err = 0; |
740 | #else | 743 | #else |
741 | if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) | 744 | if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) |
742 | break; | 745 | err = 0; |
743 | #endif | 746 | #endif |
744 | err = 0; | 747 | unlock_kernel(); |
745 | break; | 748 | break; |
746 | /* Set termios */ | 749 | /* Set termios */ |
747 | case TCSETSF: | 750 | case TCSETSF: |
748 | DEBUG(FS_INFO, "Set termios.\n"); | 751 | DEBUG(FS_INFO, "Set termios.\n"); |
752 | lock_kernel(); | ||
749 | #ifndef TCGETS2 | 753 | #ifndef TCGETS2 |
750 | if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) | 754 | if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) |
751 | break; | 755 | err = 0; |
752 | #else | 756 | #else |
753 | if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) | 757 | if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) |
754 | break; | 758 | err = 0; |
755 | #endif | 759 | #endif |
756 | err = 0; | 760 | unlock_kernel(); |
757 | break; | 761 | break; |
758 | 762 | ||
759 | /* Set DTR/RTS */ | 763 | /* Set DTR/RTS */ |
@@ -776,7 +780,9 @@ dev_irnet_ioctl(struct inode * inode, | |||
776 | * We should also worry that we don't accept junk here and that | 780 | * We should also worry that we don't accept junk here and that |
777 | * we get rid of our own buffers */ | 781 | * we get rid of our own buffers */ |
778 | #ifdef FLUSH_TO_PPP | 782 | #ifdef FLUSH_TO_PPP |
783 | lock_kernel(); | ||
779 | ppp_output_wakeup(&ap->chan); | 784 | ppp_output_wakeup(&ap->chan); |
785 | unlock_kernel(); | ||
780 | #endif /* FLUSH_TO_PPP */ | 786 | #endif /* FLUSH_TO_PPP */ |
781 | err = 0; | 787 | err = 0; |
782 | break; | 788 | break; |
@@ -791,7 +797,7 @@ dev_irnet_ioctl(struct inode * inode, | |||
791 | 797 | ||
792 | default: | 798 | default: |
793 | DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); | 799 | DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); |
794 | err = -ENOIOCTLCMD; | 800 | err = -ENOTTY; |
795 | } | 801 | } |
796 | 802 | ||
797 | DEXIT(FS_TRACE, " - err = 0x%X\n", err); | 803 | DEXIT(FS_TRACE, " - err = 0x%X\n", err); |
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h index d2beb7df8f7f..d9f8bd4ebd05 100644 --- a/net/irda/irnet/irnet_ppp.h +++ b/net/irda/irnet/irnet_ppp.h | |||
@@ -76,9 +76,8 @@ static ssize_t | |||
76 | static unsigned int | 76 | static unsigned int |
77 | dev_irnet_poll(struct file *, | 77 | dev_irnet_poll(struct file *, |
78 | poll_table *); | 78 | poll_table *); |
79 | static int | 79 | static long |
80 | dev_irnet_ioctl(struct inode *, | 80 | dev_irnet_ioctl(struct file *, |
81 | struct file *, | ||
82 | unsigned int, | 81 | unsigned int, |
83 | unsigned long); | 82 | unsigned long); |
84 | /* ------------------------ PPP INTERFACE ------------------------ */ | 83 | /* ------------------------ PPP INTERFACE ------------------------ */ |
@@ -102,7 +101,7 @@ static struct file_operations irnet_device_fops = | |||
102 | .read = dev_irnet_read, | 101 | .read = dev_irnet_read, |
103 | .write = dev_irnet_write, | 102 | .write = dev_irnet_write, |
104 | .poll = dev_irnet_poll, | 103 | .poll = dev_irnet_poll, |
105 | .ioctl = dev_irnet_ioctl, | 104 | .unlocked_ioctl = dev_irnet_ioctl, |
106 | .open = dev_irnet_open, | 105 | .open = dev_irnet_open, |
107 | .release = dev_irnet_close | 106 | .release = dev_irnet_close |
108 | /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ | 107 | /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index bda71015885c..29f7baa25110 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -644,6 +644,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
644 | } | 644 | } |
645 | 645 | ||
646 | txmsg.class = 0; | 646 | txmsg.class = 0; |
647 | memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len); | ||
647 | txmsg.tag = iucv->send_tag++; | 648 | txmsg.tag = iucv->send_tag++; |
648 | memcpy(skb->cb, &txmsg.tag, 4); | 649 | memcpy(skb->cb, &txmsg.tag, 4); |
649 | skb_queue_tail(&iucv->send_skb_q, skb); | 650 | skb_queue_tail(&iucv->send_skb_q, skb); |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index cc34ac769a3c..265b1b289a32 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -474,14 +474,14 @@ static void iucv_setmask_mp(void) | |||
474 | { | 474 | { |
475 | int cpu; | 475 | int cpu; |
476 | 476 | ||
477 | preempt_disable(); | 477 | get_online_cpus(); |
478 | for_each_online_cpu(cpu) | 478 | for_each_online_cpu(cpu) |
479 | /* Enable all cpus with a declared buffer. */ | 479 | /* Enable all cpus with a declared buffer. */ |
480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && |
481 | !cpu_isset(cpu, iucv_irq_cpumask)) | 481 | !cpu_isset(cpu, iucv_irq_cpumask)) |
482 | smp_call_function_single(cpu, iucv_allow_cpu, | 482 | smp_call_function_single(cpu, iucv_allow_cpu, |
483 | NULL, 1); | 483 | NULL, 1); |
484 | preempt_enable(); | 484 | put_online_cpus(); |
485 | } | 485 | } |
486 | 486 | ||
487 | /** | 487 | /** |
@@ -521,16 +521,18 @@ static int iucv_enable(void) | |||
521 | goto out; | 521 | goto out; |
522 | /* Declare per cpu buffers. */ | 522 | /* Declare per cpu buffers. */ |
523 | rc = -EIO; | 523 | rc = -EIO; |
524 | preempt_disable(); | 524 | get_online_cpus(); |
525 | for_each_online_cpu(cpu) | 525 | for_each_online_cpu(cpu) |
526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
527 | preempt_enable(); | 527 | preempt_enable(); |
528 | if (cpus_empty(iucv_buffer_cpumask)) | 528 | if (cpus_empty(iucv_buffer_cpumask)) |
529 | /* No cpu could declare an iucv buffer. */ | 529 | /* No cpu could declare an iucv buffer. */ |
530 | goto out_path; | 530 | goto out_path; |
531 | put_online_cpus(); | ||
531 | return 0; | 532 | return 0; |
532 | 533 | ||
533 | out_path: | 534 | out_path: |
535 | put_online_cpus(); | ||
534 | kfree(iucv_path_table); | 536 | kfree(iucv_path_table); |
535 | out: | 537 | out: |
536 | return rc; | 538 | return rc; |
@@ -564,8 +566,11 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
564 | return NOTIFY_BAD; | 566 | return NOTIFY_BAD; |
565 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 567 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
566 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 568 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
567 | if (!iucv_param[cpu]) | 569 | if (!iucv_param[cpu]) { |
570 | kfree(iucv_irq_data[cpu]); | ||
571 | iucv_irq_data[cpu] = NULL; | ||
568 | return NOTIFY_BAD; | 572 | return NOTIFY_BAD; |
573 | } | ||
569 | break; | 574 | break; |
570 | case CPU_UP_CANCELED: | 575 | case CPU_UP_CANCELED: |
571 | case CPU_UP_CANCELED_FROZEN: | 576 | case CPU_UP_CANCELED_FROZEN: |
@@ -598,7 +603,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
598 | return NOTIFY_OK; | 603 | return NOTIFY_OK; |
599 | } | 604 | } |
600 | 605 | ||
601 | static struct notifier_block __cpuinitdata iucv_cpu_notifier = { | 606 | static struct notifier_block __refdata iucv_cpu_notifier = { |
602 | .notifier_call = iucv_cpu_notify, | 607 | .notifier_call = iucv_cpu_notify, |
603 | }; | 608 | }; |
604 | 609 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 7470e367272b..f0fc46c8038d 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -579,25 +579,43 @@ static uint8_t pfkey_proto_from_xfrm(uint8_t proto) | |||
579 | return (proto ? proto : IPSEC_PROTO_ANY); | 579 | return (proto ? proto : IPSEC_PROTO_ANY); |
580 | } | 580 | } |
581 | 581 | ||
582 | static int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, | 582 | static inline int pfkey_sockaddr_len(sa_family_t family) |
583 | xfrm_address_t *xaddr) | ||
584 | { | 583 | { |
585 | switch (((struct sockaddr*)(addr + 1))->sa_family) { | 584 | switch (family) { |
585 | case AF_INET: | ||
586 | return sizeof(struct sockaddr_in); | ||
587 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
588 | case AF_INET6: | ||
589 | return sizeof(struct sockaddr_in6); | ||
590 | #endif | ||
591 | } | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | static | ||
596 | int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr) | ||
597 | { | ||
598 | switch (sa->sa_family) { | ||
586 | case AF_INET: | 599 | case AF_INET: |
587 | xaddr->a4 = | 600 | xaddr->a4 = |
588 | ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr; | 601 | ((struct sockaddr_in *)sa)->sin_addr.s_addr; |
589 | return AF_INET; | 602 | return AF_INET; |
590 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 603 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
591 | case AF_INET6: | 604 | case AF_INET6: |
592 | memcpy(xaddr->a6, | 605 | memcpy(xaddr->a6, |
593 | &((struct sockaddr_in6 *)(addr + 1))->sin6_addr, | 606 | &((struct sockaddr_in6 *)sa)->sin6_addr, |
594 | sizeof(struct in6_addr)); | 607 | sizeof(struct in6_addr)); |
595 | return AF_INET6; | 608 | return AF_INET6; |
596 | #endif | 609 | #endif |
597 | default: | ||
598 | return 0; | ||
599 | } | 610 | } |
600 | /* NOTREACHED */ | 611 | return 0; |
612 | } | ||
613 | |||
614 | static | ||
615 | int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr) | ||
616 | { | ||
617 | return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1), | ||
618 | xaddr); | ||
601 | } | 619 | } |
602 | 620 | ||
603 | static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs) | 621 | static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs) |
@@ -642,20 +660,11 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void ** | |||
642 | } | 660 | } |
643 | 661 | ||
644 | #define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) | 662 | #define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) |
663 | |||
645 | static int | 664 | static int |
646 | pfkey_sockaddr_size(sa_family_t family) | 665 | pfkey_sockaddr_size(sa_family_t family) |
647 | { | 666 | { |
648 | switch (family) { | 667 | return PFKEY_ALIGN8(pfkey_sockaddr_len(family)); |
649 | case AF_INET: | ||
650 | return PFKEY_ALIGN8(sizeof(struct sockaddr_in)); | ||
651 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
652 | case AF_INET6: | ||
653 | return PFKEY_ALIGN8(sizeof(struct sockaddr_in6)); | ||
654 | #endif | ||
655 | default: | ||
656 | return 0; | ||
657 | } | ||
658 | /* NOTREACHED */ | ||
659 | } | 668 | } |
660 | 669 | ||
661 | static inline int pfkey_mode_from_xfrm(int mode) | 670 | static inline int pfkey_mode_from_xfrm(int mode) |
@@ -687,6 +696,36 @@ static inline int pfkey_mode_to_xfrm(int mode) | |||
687 | } | 696 | } |
688 | } | 697 | } |
689 | 698 | ||
699 | static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port, | ||
700 | struct sockaddr *sa, | ||
701 | unsigned short family) | ||
702 | { | ||
703 | switch (family) { | ||
704 | case AF_INET: | ||
705 | { | ||
706 | struct sockaddr_in *sin = (struct sockaddr_in *)sa; | ||
707 | sin->sin_family = AF_INET; | ||
708 | sin->sin_port = port; | ||
709 | sin->sin_addr.s_addr = xaddr->a4; | ||
710 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
711 | return 32; | ||
712 | } | ||
713 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
714 | case AF_INET6: | ||
715 | { | ||
716 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; | ||
717 | sin6->sin6_family = AF_INET6; | ||
718 | sin6->sin6_port = port; | ||
719 | sin6->sin6_flowinfo = 0; | ||
720 | ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6); | ||
721 | sin6->sin6_scope_id = 0; | ||
722 | return 128; | ||
723 | } | ||
724 | #endif | ||
725 | } | ||
726 | return 0; | ||
727 | } | ||
728 | |||
690 | static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, | 729 | static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, |
691 | int add_keys, int hsc) | 730 | int add_keys, int hsc) |
692 | { | 731 | { |
@@ -697,13 +736,9 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, | |||
697 | struct sadb_address *addr; | 736 | struct sadb_address *addr; |
698 | struct sadb_key *key; | 737 | struct sadb_key *key; |
699 | struct sadb_x_sa2 *sa2; | 738 | struct sadb_x_sa2 *sa2; |
700 | struct sockaddr_in *sin; | ||
701 | struct sadb_x_sec_ctx *sec_ctx; | 739 | struct sadb_x_sec_ctx *sec_ctx; |
702 | struct xfrm_sec_ctx *xfrm_ctx; | 740 | struct xfrm_sec_ctx *xfrm_ctx; |
703 | int ctx_size = 0; | 741 | int ctx_size = 0; |
704 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
705 | struct sockaddr_in6 *sin6; | ||
706 | #endif | ||
707 | int size; | 742 | int size; |
708 | int auth_key_size = 0; | 743 | int auth_key_size = 0; |
709 | int encrypt_key_size = 0; | 744 | int encrypt_key_size = 0; |
@@ -732,14 +767,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, | |||
732 | } | 767 | } |
733 | 768 | ||
734 | /* identity & sensitivity */ | 769 | /* identity & sensitivity */ |
735 | 770 | if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, x->props.family)) | |
736 | if ((x->props.family == AF_INET && | ||
737 | x->sel.saddr.a4 != x->props.saddr.a4) | ||
738 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
739 | || (x->props.family == AF_INET6 && | ||
740 | memcmp (x->sel.saddr.a6, x->props.saddr.a6, sizeof (struct in6_addr))) | ||
741 | #endif | ||
742 | ) | ||
743 | size += sizeof(struct sadb_address) + sockaddr_size; | 771 | size += sizeof(struct sadb_address) + sockaddr_size; |
744 | 772 | ||
745 | if (add_keys) { | 773 | if (add_keys) { |
@@ -861,29 +889,12 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, | |||
861 | protocol's number." - RFC2367 */ | 889 | protocol's number." - RFC2367 */ |
862 | addr->sadb_address_proto = 0; | 890 | addr->sadb_address_proto = 0; |
863 | addr->sadb_address_reserved = 0; | 891 | addr->sadb_address_reserved = 0; |
864 | if (x->props.family == AF_INET) { | ||
865 | addr->sadb_address_prefixlen = 32; | ||
866 | 892 | ||
867 | sin = (struct sockaddr_in *) (addr + 1); | 893 | addr->sadb_address_prefixlen = |
868 | sin->sin_family = AF_INET; | 894 | pfkey_sockaddr_fill(&x->props.saddr, 0, |
869 | sin->sin_addr.s_addr = x->props.saddr.a4; | 895 | (struct sockaddr *) (addr + 1), |
870 | sin->sin_port = 0; | 896 | x->props.family); |
871 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 897 | if (!addr->sadb_address_prefixlen) |
872 | } | ||
873 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
874 | else if (x->props.family == AF_INET6) { | ||
875 | addr->sadb_address_prefixlen = 128; | ||
876 | |||
877 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
878 | sin6->sin6_family = AF_INET6; | ||
879 | sin6->sin6_port = 0; | ||
880 | sin6->sin6_flowinfo = 0; | ||
881 | memcpy(&sin6->sin6_addr, x->props.saddr.a6, | ||
882 | sizeof(struct in6_addr)); | ||
883 | sin6->sin6_scope_id = 0; | ||
884 | } | ||
885 | #endif | ||
886 | else | ||
887 | BUG(); | 898 | BUG(); |
888 | 899 | ||
889 | /* dst address */ | 900 | /* dst address */ |
@@ -894,70 +905,32 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, | |||
894 | sizeof(uint64_t); | 905 | sizeof(uint64_t); |
895 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; | 906 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; |
896 | addr->sadb_address_proto = 0; | 907 | addr->sadb_address_proto = 0; |
897 | addr->sadb_address_prefixlen = 32; /* XXX */ | ||
898 | addr->sadb_address_reserved = 0; | 908 | addr->sadb_address_reserved = 0; |
899 | if (x->props.family == AF_INET) { | ||
900 | sin = (struct sockaddr_in *) (addr + 1); | ||
901 | sin->sin_family = AF_INET; | ||
902 | sin->sin_addr.s_addr = x->id.daddr.a4; | ||
903 | sin->sin_port = 0; | ||
904 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
905 | 909 | ||
906 | if (x->sel.saddr.a4 != x->props.saddr.a4) { | 910 | addr->sadb_address_prefixlen = |
907 | addr = (struct sadb_address*) skb_put(skb, | 911 | pfkey_sockaddr_fill(&x->id.daddr, 0, |
908 | sizeof(struct sadb_address)+sockaddr_size); | 912 | (struct sockaddr *) (addr + 1), |
909 | addr->sadb_address_len = | 913 | x->props.family); |
910 | (sizeof(struct sadb_address)+sockaddr_size)/ | 914 | if (!addr->sadb_address_prefixlen) |
911 | sizeof(uint64_t); | 915 | BUG(); |
912 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; | ||
913 | addr->sadb_address_proto = | ||
914 | pfkey_proto_from_xfrm(x->sel.proto); | ||
915 | addr->sadb_address_prefixlen = x->sel.prefixlen_s; | ||
916 | addr->sadb_address_reserved = 0; | ||
917 | |||
918 | sin = (struct sockaddr_in *) (addr + 1); | ||
919 | sin->sin_family = AF_INET; | ||
920 | sin->sin_addr.s_addr = x->sel.saddr.a4; | ||
921 | sin->sin_port = x->sel.sport; | ||
922 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
923 | } | ||
924 | } | ||
925 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
926 | else if (x->props.family == AF_INET6) { | ||
927 | addr->sadb_address_prefixlen = 128; | ||
928 | 916 | ||
929 | sin6 = (struct sockaddr_in6 *) (addr + 1); | 917 | if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, |
930 | sin6->sin6_family = AF_INET6; | 918 | x->props.family)) { |
931 | sin6->sin6_port = 0; | 919 | addr = (struct sadb_address*) skb_put(skb, |
932 | sin6->sin6_flowinfo = 0; | 920 | sizeof(struct sadb_address)+sockaddr_size); |
933 | memcpy(&sin6->sin6_addr, x->id.daddr.a6, sizeof(struct in6_addr)); | 921 | addr->sadb_address_len = |
934 | sin6->sin6_scope_id = 0; | 922 | (sizeof(struct sadb_address)+sockaddr_size)/ |
923 | sizeof(uint64_t); | ||
924 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; | ||
925 | addr->sadb_address_proto = | ||
926 | pfkey_proto_from_xfrm(x->sel.proto); | ||
927 | addr->sadb_address_prefixlen = x->sel.prefixlen_s; | ||
928 | addr->sadb_address_reserved = 0; | ||
935 | 929 | ||
936 | if (memcmp (x->sel.saddr.a6, x->props.saddr.a6, | 930 | pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport, |
937 | sizeof(struct in6_addr))) { | 931 | (struct sockaddr *) (addr + 1), |
938 | addr = (struct sadb_address *) skb_put(skb, | 932 | x->props.family); |
939 | sizeof(struct sadb_address)+sockaddr_size); | ||
940 | addr->sadb_address_len = | ||
941 | (sizeof(struct sadb_address)+sockaddr_size)/ | ||
942 | sizeof(uint64_t); | ||
943 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; | ||
944 | addr->sadb_address_proto = | ||
945 | pfkey_proto_from_xfrm(x->sel.proto); | ||
946 | addr->sadb_address_prefixlen = x->sel.prefixlen_s; | ||
947 | addr->sadb_address_reserved = 0; | ||
948 | |||
949 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
950 | sin6->sin6_family = AF_INET6; | ||
951 | sin6->sin6_port = x->sel.sport; | ||
952 | sin6->sin6_flowinfo = 0; | ||
953 | memcpy(&sin6->sin6_addr, x->sel.saddr.a6, | ||
954 | sizeof(struct in6_addr)); | ||
955 | sin6->sin6_scope_id = 0; | ||
956 | } | ||
957 | } | 933 | } |
958 | #endif | ||
959 | else | ||
960 | BUG(); | ||
961 | 934 | ||
962 | /* auth key */ | 935 | /* auth key */ |
963 | if (add_keys && auth_key_size) { | 936 | if (add_keys && auth_key_size) { |
@@ -1853,10 +1826,6 @@ static int | |||
1853 | parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | 1826 | parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) |
1854 | { | 1827 | { |
1855 | struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; | 1828 | struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; |
1856 | struct sockaddr_in *sin; | ||
1857 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1858 | struct sockaddr_in6 *sin6; | ||
1859 | #endif | ||
1860 | int mode; | 1829 | int mode; |
1861 | 1830 | ||
1862 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) | 1831 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) |
@@ -1881,31 +1850,19 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
1881 | 1850 | ||
1882 | /* addresses present only in tunnel mode */ | 1851 | /* addresses present only in tunnel mode */ |
1883 | if (t->mode == XFRM_MODE_TUNNEL) { | 1852 | if (t->mode == XFRM_MODE_TUNNEL) { |
1884 | struct sockaddr *sa; | 1853 | u8 *sa = (u8 *) (rq + 1); |
1885 | sa = (struct sockaddr *)(rq+1); | 1854 | int family, socklen; |
1886 | switch(sa->sa_family) { | 1855 | |
1887 | case AF_INET: | 1856 | family = pfkey_sockaddr_extract((struct sockaddr *)sa, |
1888 | sin = (struct sockaddr_in*)sa; | 1857 | &t->saddr); |
1889 | t->saddr.a4 = sin->sin_addr.s_addr; | 1858 | if (!family) |
1890 | sin++; | ||
1891 | if (sin->sin_family != AF_INET) | ||
1892 | return -EINVAL; | ||
1893 | t->id.daddr.a4 = sin->sin_addr.s_addr; | ||
1894 | break; | ||
1895 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1896 | case AF_INET6: | ||
1897 | sin6 = (struct sockaddr_in6*)sa; | ||
1898 | memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr)); | ||
1899 | sin6++; | ||
1900 | if (sin6->sin6_family != AF_INET6) | ||
1901 | return -EINVAL; | ||
1902 | memcpy(t->id.daddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr)); | ||
1903 | break; | ||
1904 | #endif | ||
1905 | default: | ||
1906 | return -EINVAL; | 1859 | return -EINVAL; |
1907 | } | 1860 | |
1908 | t->encap_family = sa->sa_family; | 1861 | socklen = pfkey_sockaddr_len(family); |
1862 | if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), | ||
1863 | &t->id.daddr) != family) | ||
1864 | return -EINVAL; | ||
1865 | t->encap_family = family; | ||
1909 | } else | 1866 | } else |
1910 | t->encap_family = xp->family; | 1867 | t->encap_family = xp->family; |
1911 | 1868 | ||
@@ -1952,9 +1909,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp) | |||
1952 | 1909 | ||
1953 | for (i=0; i<xp->xfrm_nr; i++) { | 1910 | for (i=0; i<xp->xfrm_nr; i++) { |
1954 | t = xp->xfrm_vec + i; | 1911 | t = xp->xfrm_vec + i; |
1955 | socklen += (t->encap_family == AF_INET ? | 1912 | socklen += pfkey_sockaddr_len(t->encap_family); |
1956 | sizeof(struct sockaddr_in) : | ||
1957 | sizeof(struct sockaddr_in6)); | ||
1958 | } | 1913 | } |
1959 | 1914 | ||
1960 | return sizeof(struct sadb_msg) + | 1915 | return sizeof(struct sadb_msg) + |
@@ -1987,18 +1942,12 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in | |||
1987 | struct sadb_address *addr; | 1942 | struct sadb_address *addr; |
1988 | struct sadb_lifetime *lifetime; | 1943 | struct sadb_lifetime *lifetime; |
1989 | struct sadb_x_policy *pol; | 1944 | struct sadb_x_policy *pol; |
1990 | struct sockaddr_in *sin; | ||
1991 | struct sadb_x_sec_ctx *sec_ctx; | 1945 | struct sadb_x_sec_ctx *sec_ctx; |
1992 | struct xfrm_sec_ctx *xfrm_ctx; | 1946 | struct xfrm_sec_ctx *xfrm_ctx; |
1993 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1994 | struct sockaddr_in6 *sin6; | ||
1995 | #endif | ||
1996 | int i; | 1947 | int i; |
1997 | int size; | 1948 | int size; |
1998 | int sockaddr_size = pfkey_sockaddr_size(xp->family); | 1949 | int sockaddr_size = pfkey_sockaddr_size(xp->family); |
1999 | int socklen = (xp->family == AF_INET ? | 1950 | int socklen = pfkey_sockaddr_len(xp->family); |
2000 | sizeof(struct sockaddr_in) : | ||
2001 | sizeof(struct sockaddr_in6)); | ||
2002 | 1951 | ||
2003 | size = pfkey_xfrm_policy2msg_size(xp); | 1952 | size = pfkey_xfrm_policy2msg_size(xp); |
2004 | 1953 | ||
@@ -2016,26 +1965,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in | |||
2016 | addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); | 1965 | addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); |
2017 | addr->sadb_address_prefixlen = xp->selector.prefixlen_s; | 1966 | addr->sadb_address_prefixlen = xp->selector.prefixlen_s; |
2018 | addr->sadb_address_reserved = 0; | 1967 | addr->sadb_address_reserved = 0; |
2019 | /* src address */ | 1968 | if (!pfkey_sockaddr_fill(&xp->selector.saddr, |
2020 | if (xp->family == AF_INET) { | 1969 | xp->selector.sport, |
2021 | sin = (struct sockaddr_in *) (addr + 1); | 1970 | (struct sockaddr *) (addr + 1), |
2022 | sin->sin_family = AF_INET; | 1971 | xp->family)) |
2023 | sin->sin_addr.s_addr = xp->selector.saddr.a4; | ||
2024 | sin->sin_port = xp->selector.sport; | ||
2025 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
2026 | } | ||
2027 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2028 | else if (xp->family == AF_INET6) { | ||
2029 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
2030 | sin6->sin6_family = AF_INET6; | ||
2031 | sin6->sin6_port = xp->selector.sport; | ||
2032 | sin6->sin6_flowinfo = 0; | ||
2033 | memcpy(&sin6->sin6_addr, xp->selector.saddr.a6, | ||
2034 | sizeof(struct in6_addr)); | ||
2035 | sin6->sin6_scope_id = 0; | ||
2036 | } | ||
2037 | #endif | ||
2038 | else | ||
2039 | BUG(); | 1972 | BUG(); |
2040 | 1973 | ||
2041 | /* dst address */ | 1974 | /* dst address */ |
@@ -2048,26 +1981,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in | |||
2048 | addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); | 1981 | addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); |
2049 | addr->sadb_address_prefixlen = xp->selector.prefixlen_d; | 1982 | addr->sadb_address_prefixlen = xp->selector.prefixlen_d; |
2050 | addr->sadb_address_reserved = 0; | 1983 | addr->sadb_address_reserved = 0; |
2051 | if (xp->family == AF_INET) { | 1984 | |
2052 | sin = (struct sockaddr_in *) (addr + 1); | 1985 | pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport, |
2053 | sin->sin_family = AF_INET; | 1986 | (struct sockaddr *) (addr + 1), |
2054 | sin->sin_addr.s_addr = xp->selector.daddr.a4; | 1987 | xp->family); |
2055 | sin->sin_port = xp->selector.dport; | ||
2056 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
2057 | } | ||
2058 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2059 | else if (xp->family == AF_INET6) { | ||
2060 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
2061 | sin6->sin6_family = AF_INET6; | ||
2062 | sin6->sin6_port = xp->selector.dport; | ||
2063 | sin6->sin6_flowinfo = 0; | ||
2064 | memcpy(&sin6->sin6_addr, xp->selector.daddr.a6, | ||
2065 | sizeof(struct in6_addr)); | ||
2066 | sin6->sin6_scope_id = 0; | ||
2067 | } | ||
2068 | #endif | ||
2069 | else | ||
2070 | BUG(); | ||
2071 | 1988 | ||
2072 | /* hard time */ | 1989 | /* hard time */ |
2073 | lifetime = (struct sadb_lifetime *) skb_put(skb, | 1990 | lifetime = (struct sadb_lifetime *) skb_put(skb, |
@@ -2121,12 +2038,13 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in | |||
2121 | int mode; | 2038 | int mode; |
2122 | 2039 | ||
2123 | req_size = sizeof(struct sadb_x_ipsecrequest); | 2040 | req_size = sizeof(struct sadb_x_ipsecrequest); |
2124 | if (t->mode == XFRM_MODE_TUNNEL) | 2041 | if (t->mode == XFRM_MODE_TUNNEL) { |
2125 | req_size += ((t->encap_family == AF_INET ? | 2042 | socklen = pfkey_sockaddr_len(t->encap_family); |
2126 | sizeof(struct sockaddr_in) : | 2043 | req_size += socklen * 2; |
2127 | sizeof(struct sockaddr_in6)) * 2); | 2044 | } else { |
2128 | else | ||
2129 | size -= 2*socklen; | 2045 | size -= 2*socklen; |
2046 | socklen = 0; | ||
2047 | } | ||
2130 | rq = (void*)skb_put(skb, req_size); | 2048 | rq = (void*)skb_put(skb, req_size); |
2131 | pol->sadb_x_policy_len += req_size/8; | 2049 | pol->sadb_x_policy_len += req_size/8; |
2132 | memset(rq, 0, sizeof(*rq)); | 2050 | memset(rq, 0, sizeof(*rq)); |
@@ -2141,42 +2059,15 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in | |||
2141 | if (t->optional) | 2059 | if (t->optional) |
2142 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; | 2060 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; |
2143 | rq->sadb_x_ipsecrequest_reqid = t->reqid; | 2061 | rq->sadb_x_ipsecrequest_reqid = t->reqid; |
2062 | |||
2144 | if (t->mode == XFRM_MODE_TUNNEL) { | 2063 | if (t->mode == XFRM_MODE_TUNNEL) { |
2145 | switch (t->encap_family) { | 2064 | u8 *sa = (void *)(rq + 1); |
2146 | case AF_INET: | 2065 | pfkey_sockaddr_fill(&t->saddr, 0, |
2147 | sin = (void*)(rq+1); | 2066 | (struct sockaddr *)sa, |
2148 | sin->sin_family = AF_INET; | 2067 | t->encap_family); |
2149 | sin->sin_addr.s_addr = t->saddr.a4; | 2068 | pfkey_sockaddr_fill(&t->id.daddr, 0, |
2150 | sin->sin_port = 0; | 2069 | (struct sockaddr *) (sa + socklen), |
2151 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 2070 | t->encap_family); |
2152 | sin++; | ||
2153 | sin->sin_family = AF_INET; | ||
2154 | sin->sin_addr.s_addr = t->id.daddr.a4; | ||
2155 | sin->sin_port = 0; | ||
2156 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
2157 | break; | ||
2158 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2159 | case AF_INET6: | ||
2160 | sin6 = (void*)(rq+1); | ||
2161 | sin6->sin6_family = AF_INET6; | ||
2162 | sin6->sin6_port = 0; | ||
2163 | sin6->sin6_flowinfo = 0; | ||
2164 | memcpy(&sin6->sin6_addr, t->saddr.a6, | ||
2165 | sizeof(struct in6_addr)); | ||
2166 | sin6->sin6_scope_id = 0; | ||
2167 | |||
2168 | sin6++; | ||
2169 | sin6->sin6_family = AF_INET6; | ||
2170 | sin6->sin6_port = 0; | ||
2171 | sin6->sin6_flowinfo = 0; | ||
2172 | memcpy(&sin6->sin6_addr, t->id.daddr.a6, | ||
2173 | sizeof(struct in6_addr)); | ||
2174 | sin6->sin6_scope_id = 0; | ||
2175 | break; | ||
2176 | #endif | ||
2177 | default: | ||
2178 | break; | ||
2179 | } | ||
2180 | } | 2071 | } |
2181 | } | 2072 | } |
2182 | 2073 | ||
@@ -2459,61 +2350,31 @@ out: | |||
2459 | #ifdef CONFIG_NET_KEY_MIGRATE | 2350 | #ifdef CONFIG_NET_KEY_MIGRATE |
2460 | static int pfkey_sockaddr_pair_size(sa_family_t family) | 2351 | static int pfkey_sockaddr_pair_size(sa_family_t family) |
2461 | { | 2352 | { |
2462 | switch (family) { | 2353 | return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); |
2463 | case AF_INET: | ||
2464 | return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2); | ||
2465 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2466 | case AF_INET6: | ||
2467 | return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2); | ||
2468 | #endif | ||
2469 | default: | ||
2470 | return 0; | ||
2471 | } | ||
2472 | /* NOTREACHED */ | ||
2473 | } | 2354 | } |
2474 | 2355 | ||
2475 | static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq, | 2356 | static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq, |
2476 | xfrm_address_t *saddr, xfrm_address_t *daddr, | 2357 | xfrm_address_t *saddr, xfrm_address_t *daddr, |
2477 | u16 *family) | 2358 | u16 *family) |
2478 | { | 2359 | { |
2479 | struct sockaddr *sa = (struct sockaddr *)(rq + 1); | 2360 | u8 *sa = (u8 *) (rq + 1); |
2361 | int af, socklen; | ||
2362 | |||
2480 | if (rq->sadb_x_ipsecrequest_len < | 2363 | if (rq->sadb_x_ipsecrequest_len < |
2481 | pfkey_sockaddr_pair_size(sa->sa_family)) | 2364 | pfkey_sockaddr_pair_size(((struct sockaddr *)sa)->sa_family)) |
2482 | return -EINVAL; | 2365 | return -EINVAL; |
2483 | 2366 | ||
2484 | switch (sa->sa_family) { | 2367 | af = pfkey_sockaddr_extract((struct sockaddr *) sa, |
2485 | case AF_INET: | 2368 | saddr); |
2486 | { | 2369 | if (!af) |
2487 | struct sockaddr_in *sin; | ||
2488 | sin = (struct sockaddr_in *)sa; | ||
2489 | if ((sin+1)->sin_family != AF_INET) | ||
2490 | return -EINVAL; | ||
2491 | memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4)); | ||
2492 | sin++; | ||
2493 | memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4)); | ||
2494 | *family = AF_INET; | ||
2495 | break; | ||
2496 | } | ||
2497 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2498 | case AF_INET6: | ||
2499 | { | ||
2500 | struct sockaddr_in6 *sin6; | ||
2501 | sin6 = (struct sockaddr_in6 *)sa; | ||
2502 | if ((sin6+1)->sin6_family != AF_INET6) | ||
2503 | return -EINVAL; | ||
2504 | memcpy(&saddr->a6, &sin6->sin6_addr, | ||
2505 | sizeof(saddr->a6)); | ||
2506 | sin6++; | ||
2507 | memcpy(&daddr->a6, &sin6->sin6_addr, | ||
2508 | sizeof(daddr->a6)); | ||
2509 | *family = AF_INET6; | ||
2510 | break; | ||
2511 | } | ||
2512 | #endif | ||
2513 | default: | ||
2514 | return -EINVAL; | 2370 | return -EINVAL; |
2515 | } | ||
2516 | 2371 | ||
2372 | socklen = pfkey_sockaddr_len(af); | ||
2373 | if (pfkey_sockaddr_extract((struct sockaddr *) (sa + socklen), | ||
2374 | daddr) != af) | ||
2375 | return -EINVAL; | ||
2376 | |||
2377 | *family = af; | ||
2517 | return 0; | 2378 | return 0; |
2518 | } | 2379 | } |
2519 | 2380 | ||
@@ -3094,10 +2955,6 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct | |||
3094 | struct sadb_msg *hdr; | 2955 | struct sadb_msg *hdr; |
3095 | struct sadb_address *addr; | 2956 | struct sadb_address *addr; |
3096 | struct sadb_x_policy *pol; | 2957 | struct sadb_x_policy *pol; |
3097 | struct sockaddr_in *sin; | ||
3098 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3099 | struct sockaddr_in6 *sin6; | ||
3100 | #endif | ||
3101 | int sockaddr_size; | 2958 | int sockaddr_size; |
3102 | int size; | 2959 | int size; |
3103 | struct sadb_x_sec_ctx *sec_ctx; | 2960 | struct sadb_x_sec_ctx *sec_ctx; |
@@ -3146,29 +3003,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct | |||
3146 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; | 3003 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; |
3147 | addr->sadb_address_proto = 0; | 3004 | addr->sadb_address_proto = 0; |
3148 | addr->sadb_address_reserved = 0; | 3005 | addr->sadb_address_reserved = 0; |
3149 | if (x->props.family == AF_INET) { | 3006 | addr->sadb_address_prefixlen = |
3150 | addr->sadb_address_prefixlen = 32; | 3007 | pfkey_sockaddr_fill(&x->props.saddr, 0, |
3151 | 3008 | (struct sockaddr *) (addr + 1), | |
3152 | sin = (struct sockaddr_in *) (addr + 1); | 3009 | x->props.family); |
3153 | sin->sin_family = AF_INET; | 3010 | if (!addr->sadb_address_prefixlen) |
3154 | sin->sin_addr.s_addr = x->props.saddr.a4; | ||
3155 | sin->sin_port = 0; | ||
3156 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3157 | } | ||
3158 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3159 | else if (x->props.family == AF_INET6) { | ||
3160 | addr->sadb_address_prefixlen = 128; | ||
3161 | |||
3162 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
3163 | sin6->sin6_family = AF_INET6; | ||
3164 | sin6->sin6_port = 0; | ||
3165 | sin6->sin6_flowinfo = 0; | ||
3166 | memcpy(&sin6->sin6_addr, | ||
3167 | x->props.saddr.a6, sizeof(struct in6_addr)); | ||
3168 | sin6->sin6_scope_id = 0; | ||
3169 | } | ||
3170 | #endif | ||
3171 | else | ||
3172 | BUG(); | 3011 | BUG(); |
3173 | 3012 | ||
3174 | /* dst address */ | 3013 | /* dst address */ |
@@ -3180,29 +3019,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct | |||
3180 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; | 3019 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; |
3181 | addr->sadb_address_proto = 0; | 3020 | addr->sadb_address_proto = 0; |
3182 | addr->sadb_address_reserved = 0; | 3021 | addr->sadb_address_reserved = 0; |
3183 | if (x->props.family == AF_INET) { | 3022 | addr->sadb_address_prefixlen = |
3184 | addr->sadb_address_prefixlen = 32; | 3023 | pfkey_sockaddr_fill(&x->id.daddr, 0, |
3185 | 3024 | (struct sockaddr *) (addr + 1), | |
3186 | sin = (struct sockaddr_in *) (addr + 1); | 3025 | x->props.family); |
3187 | sin->sin_family = AF_INET; | 3026 | if (!addr->sadb_address_prefixlen) |
3188 | sin->sin_addr.s_addr = x->id.daddr.a4; | ||
3189 | sin->sin_port = 0; | ||
3190 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3191 | } | ||
3192 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3193 | else if (x->props.family == AF_INET6) { | ||
3194 | addr->sadb_address_prefixlen = 128; | ||
3195 | |||
3196 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
3197 | sin6->sin6_family = AF_INET6; | ||
3198 | sin6->sin6_port = 0; | ||
3199 | sin6->sin6_flowinfo = 0; | ||
3200 | memcpy(&sin6->sin6_addr, | ||
3201 | x->id.daddr.a6, sizeof(struct in6_addr)); | ||
3202 | sin6->sin6_scope_id = 0; | ||
3203 | } | ||
3204 | #endif | ||
3205 | else | ||
3206 | BUG(); | 3027 | BUG(); |
3207 | 3028 | ||
3208 | pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); | 3029 | pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); |
@@ -3328,10 +3149,6 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, | |||
3328 | struct sadb_sa *sa; | 3149 | struct sadb_sa *sa; |
3329 | struct sadb_address *addr; | 3150 | struct sadb_address *addr; |
3330 | struct sadb_x_nat_t_port *n_port; | 3151 | struct sadb_x_nat_t_port *n_port; |
3331 | struct sockaddr_in *sin; | ||
3332 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3333 | struct sockaddr_in6 *sin6; | ||
3334 | #endif | ||
3335 | int sockaddr_size; | 3152 | int sockaddr_size; |
3336 | int size; | 3153 | int size; |
3337 | __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); | 3154 | __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); |
@@ -3395,29 +3212,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, | |||
3395 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; | 3212 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; |
3396 | addr->sadb_address_proto = 0; | 3213 | addr->sadb_address_proto = 0; |
3397 | addr->sadb_address_reserved = 0; | 3214 | addr->sadb_address_reserved = 0; |
3398 | if (x->props.family == AF_INET) { | 3215 | addr->sadb_address_prefixlen = |
3399 | addr->sadb_address_prefixlen = 32; | 3216 | pfkey_sockaddr_fill(&x->props.saddr, 0, |
3400 | 3217 | (struct sockaddr *) (addr + 1), | |
3401 | sin = (struct sockaddr_in *) (addr + 1); | 3218 | x->props.family); |
3402 | sin->sin_family = AF_INET; | 3219 | if (!addr->sadb_address_prefixlen) |
3403 | sin->sin_addr.s_addr = x->props.saddr.a4; | ||
3404 | sin->sin_port = 0; | ||
3405 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3406 | } | ||
3407 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3408 | else if (x->props.family == AF_INET6) { | ||
3409 | addr->sadb_address_prefixlen = 128; | ||
3410 | |||
3411 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
3412 | sin6->sin6_family = AF_INET6; | ||
3413 | sin6->sin6_port = 0; | ||
3414 | sin6->sin6_flowinfo = 0; | ||
3415 | memcpy(&sin6->sin6_addr, | ||
3416 | x->props.saddr.a6, sizeof(struct in6_addr)); | ||
3417 | sin6->sin6_scope_id = 0; | ||
3418 | } | ||
3419 | #endif | ||
3420 | else | ||
3421 | BUG(); | 3220 | BUG(); |
3422 | 3221 | ||
3423 | /* NAT_T_SPORT (old port) */ | 3222 | /* NAT_T_SPORT (old port) */ |
@@ -3436,28 +3235,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, | |||
3436 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; | 3235 | addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; |
3437 | addr->sadb_address_proto = 0; | 3236 | addr->sadb_address_proto = 0; |
3438 | addr->sadb_address_reserved = 0; | 3237 | addr->sadb_address_reserved = 0; |
3439 | if (x->props.family == AF_INET) { | 3238 | addr->sadb_address_prefixlen = |
3440 | addr->sadb_address_prefixlen = 32; | 3239 | pfkey_sockaddr_fill(ipaddr, 0, |
3441 | 3240 | (struct sockaddr *) (addr + 1), | |
3442 | sin = (struct sockaddr_in *) (addr + 1); | 3241 | x->props.family); |
3443 | sin->sin_family = AF_INET; | 3242 | if (!addr->sadb_address_prefixlen) |
3444 | sin->sin_addr.s_addr = ipaddr->a4; | ||
3445 | sin->sin_port = 0; | ||
3446 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3447 | } | ||
3448 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3449 | else if (x->props.family == AF_INET6) { | ||
3450 | addr->sadb_address_prefixlen = 128; | ||
3451 | |||
3452 | sin6 = (struct sockaddr_in6 *) (addr + 1); | ||
3453 | sin6->sin6_family = AF_INET6; | ||
3454 | sin6->sin6_port = 0; | ||
3455 | sin6->sin6_flowinfo = 0; | ||
3456 | memcpy(&sin6->sin6_addr, &ipaddr->a6, sizeof(struct in6_addr)); | ||
3457 | sin6->sin6_scope_id = 0; | ||
3458 | } | ||
3459 | #endif | ||
3460 | else | ||
3461 | BUG(); | 3243 | BUG(); |
3462 | 3244 | ||
3463 | /* NAT_T_DPORT (new port) */ | 3245 | /* NAT_T_DPORT (new port) */ |
@@ -3475,10 +3257,6 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type, | |||
3475 | struct xfrm_selector *sel) | 3257 | struct xfrm_selector *sel) |
3476 | { | 3258 | { |
3477 | struct sadb_address *addr; | 3259 | struct sadb_address *addr; |
3478 | struct sockaddr_in *sin; | ||
3479 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3480 | struct sockaddr_in6 *sin6; | ||
3481 | #endif | ||
3482 | addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); | 3260 | addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); |
3483 | addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; | 3261 | addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; |
3484 | addr->sadb_address_exttype = type; | 3262 | addr->sadb_address_exttype = type; |
@@ -3487,50 +3265,16 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type, | |||
3487 | 3265 | ||
3488 | switch (type) { | 3266 | switch (type) { |
3489 | case SADB_EXT_ADDRESS_SRC: | 3267 | case SADB_EXT_ADDRESS_SRC: |
3490 | if (sel->family == AF_INET) { | 3268 | addr->sadb_address_prefixlen = sel->prefixlen_s; |
3491 | addr->sadb_address_prefixlen = sel->prefixlen_s; | 3269 | pfkey_sockaddr_fill(&sel->saddr, 0, |
3492 | sin = (struct sockaddr_in *)(addr + 1); | 3270 | (struct sockaddr *)(addr + 1), |
3493 | sin->sin_family = AF_INET; | 3271 | sel->family); |
3494 | memcpy(&sin->sin_addr.s_addr, &sel->saddr, | ||
3495 | sizeof(sin->sin_addr.s_addr)); | ||
3496 | sin->sin_port = 0; | ||
3497 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3498 | } | ||
3499 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3500 | else if (sel->family == AF_INET6) { | ||
3501 | addr->sadb_address_prefixlen = sel->prefixlen_s; | ||
3502 | sin6 = (struct sockaddr_in6 *)(addr + 1); | ||
3503 | sin6->sin6_family = AF_INET6; | ||
3504 | sin6->sin6_port = 0; | ||
3505 | sin6->sin6_flowinfo = 0; | ||
3506 | sin6->sin6_scope_id = 0; | ||
3507 | memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr, | ||
3508 | sizeof(sin6->sin6_addr.s6_addr)); | ||
3509 | } | ||
3510 | #endif | ||
3511 | break; | 3272 | break; |
3512 | case SADB_EXT_ADDRESS_DST: | 3273 | case SADB_EXT_ADDRESS_DST: |
3513 | if (sel->family == AF_INET) { | 3274 | addr->sadb_address_prefixlen = sel->prefixlen_d; |
3514 | addr->sadb_address_prefixlen = sel->prefixlen_d; | 3275 | pfkey_sockaddr_fill(&sel->daddr, 0, |
3515 | sin = (struct sockaddr_in *)(addr + 1); | 3276 | (struct sockaddr *)(addr + 1), |
3516 | sin->sin_family = AF_INET; | 3277 | sel->family); |
3517 | memcpy(&sin->sin_addr.s_addr, &sel->daddr, | ||
3518 | sizeof(sin->sin_addr.s_addr)); | ||
3519 | sin->sin_port = 0; | ||
3520 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
3521 | } | ||
3522 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3523 | else if (sel->family == AF_INET6) { | ||
3524 | addr->sadb_address_prefixlen = sel->prefixlen_d; | ||
3525 | sin6 = (struct sockaddr_in6 *)(addr + 1); | ||
3526 | sin6->sin6_family = AF_INET6; | ||
3527 | sin6->sin6_port = 0; | ||
3528 | sin6->sin6_flowinfo = 0; | ||
3529 | sin6->sin6_scope_id = 0; | ||
3530 | memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr, | ||
3531 | sizeof(sin6->sin6_addr.s6_addr)); | ||
3532 | } | ||
3533 | #endif | ||
3534 | break; | 3278 | break; |
3535 | default: | 3279 | default: |
3536 | return -EINVAL; | 3280 | return -EINVAL; |
@@ -3545,10 +3289,8 @@ static int set_ipsecrequest(struct sk_buff *skb, | |||
3545 | xfrm_address_t *src, xfrm_address_t *dst) | 3289 | xfrm_address_t *src, xfrm_address_t *dst) |
3546 | { | 3290 | { |
3547 | struct sadb_x_ipsecrequest *rq; | 3291 | struct sadb_x_ipsecrequest *rq; |
3548 | struct sockaddr_in *sin; | 3292 | u8 *sa; |
3549 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 3293 | int socklen = pfkey_sockaddr_len(family); |
3550 | struct sockaddr_in6 *sin6; | ||
3551 | #endif | ||
3552 | int size_req; | 3294 | int size_req; |
3553 | 3295 | ||
3554 | size_req = sizeof(struct sadb_x_ipsecrequest) + | 3296 | size_req = sizeof(struct sadb_x_ipsecrequest) + |
@@ -3562,38 +3304,10 @@ static int set_ipsecrequest(struct sk_buff *skb, | |||
3562 | rq->sadb_x_ipsecrequest_level = level; | 3304 | rq->sadb_x_ipsecrequest_level = level; |
3563 | rq->sadb_x_ipsecrequest_reqid = reqid; | 3305 | rq->sadb_x_ipsecrequest_reqid = reqid; |
3564 | 3306 | ||
3565 | switch (family) { | 3307 | sa = (u8 *) (rq + 1); |
3566 | case AF_INET: | 3308 | if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) || |
3567 | sin = (struct sockaddr_in *)(rq + 1); | 3309 | !pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family)) |
3568 | sin->sin_family = AF_INET; | ||
3569 | memcpy(&sin->sin_addr.s_addr, src, | ||
3570 | sizeof(sin->sin_addr.s_addr)); | ||
3571 | sin++; | ||
3572 | sin->sin_family = AF_INET; | ||
3573 | memcpy(&sin->sin_addr.s_addr, dst, | ||
3574 | sizeof(sin->sin_addr.s_addr)); | ||
3575 | break; | ||
3576 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
3577 | case AF_INET6: | ||
3578 | sin6 = (struct sockaddr_in6 *)(rq + 1); | ||
3579 | sin6->sin6_family = AF_INET6; | ||
3580 | sin6->sin6_port = 0; | ||
3581 | sin6->sin6_flowinfo = 0; | ||
3582 | sin6->sin6_scope_id = 0; | ||
3583 | memcpy(&sin6->sin6_addr.s6_addr, src, | ||
3584 | sizeof(sin6->sin6_addr.s6_addr)); | ||
3585 | sin6++; | ||
3586 | sin6->sin6_family = AF_INET6; | ||
3587 | sin6->sin6_port = 0; | ||
3588 | sin6->sin6_flowinfo = 0; | ||
3589 | sin6->sin6_scope_id = 0; | ||
3590 | memcpy(&sin6->sin6_addr.s6_addr, dst, | ||
3591 | sizeof(sin6->sin6_addr.s6_addr)); | ||
3592 | break; | ||
3593 | #endif | ||
3594 | default: | ||
3595 | return -EINVAL; | 3310 | return -EINVAL; |
3596 | } | ||
3597 | 3311 | ||
3598 | return 0; | 3312 | return 0; |
3599 | } | 3313 | } |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 97101dcde4c0..5bcc452a247f 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -128,10 +128,8 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) | |||
128 | 128 | ||
129 | static void llc_ui_sk_init(struct socket *sock, struct sock *sk) | 129 | static void llc_ui_sk_init(struct socket *sock, struct sock *sk) |
130 | { | 130 | { |
131 | sock_graft(sk, sock); | ||
131 | sk->sk_type = sock->type; | 132 | sk->sk_type = sock->type; |
132 | sk->sk_sleep = &sock->wait; | ||
133 | sk->sk_socket = sock; | ||
134 | sock->sk = sk; | ||
135 | sock->ops = &llc_ui_ops; | 133 | sock->ops = &llc_ui_ops; |
136 | } | 134 | } |
137 | 135 | ||
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 1c45f172991e..57ad974e4d94 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -150,7 +150,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
150 | int (*rcv)(struct sk_buff *, struct net_device *, | 150 | int (*rcv)(struct sk_buff *, struct net_device *, |
151 | struct packet_type *, struct net_device *); | 151 | struct packet_type *, struct net_device *); |
152 | 152 | ||
153 | if (dev_net(dev) != &init_net) | 153 | if (!net_eq(dev_net(dev), &init_net)) |
154 | goto drop; | 154 | goto drop; |
155 | 155 | ||
156 | /* | 156 | /* |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index a24b459dd45a..80d693392b0f 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -7,7 +7,6 @@ config MAC80211 | |||
7 | select CRC32 | 7 | select CRC32 |
8 | select WIRELESS_EXT | 8 | select WIRELESS_EXT |
9 | select CFG80211 | 9 | select CFG80211 |
10 | select NET_SCH_FIFO | ||
11 | ---help--- | 10 | ---help--- |
12 | This option enables the hardware independent IEEE 802.11 | 11 | This option enables the hardware independent IEEE 802.11 |
13 | networking stack. | 12 | networking stack. |
@@ -15,6 +14,14 @@ config MAC80211 | |||
15 | menu "Rate control algorithm selection" | 14 | menu "Rate control algorithm selection" |
16 | depends on MAC80211 != n | 15 | depends on MAC80211 != n |
17 | 16 | ||
17 | config MAC80211_RC_PID | ||
18 | bool "PID controller based rate control algorithm" if EMBEDDED | ||
19 | default y | ||
20 | ---help--- | ||
21 | This option enables a TX rate control algorithm for | ||
22 | mac80211 that uses a PID controller to select the TX | ||
23 | rate. | ||
24 | |||
18 | choice | 25 | choice |
19 | prompt "Default rate control algorithm" | 26 | prompt "Default rate control algorithm" |
20 | default MAC80211_RC_DEFAULT_PID | 27 | default MAC80211_RC_DEFAULT_PID |
@@ -26,40 +33,19 @@ choice | |||
26 | 33 | ||
27 | config MAC80211_RC_DEFAULT_PID | 34 | config MAC80211_RC_DEFAULT_PID |
28 | bool "PID controller based rate control algorithm" | 35 | bool "PID controller based rate control algorithm" |
29 | select MAC80211_RC_PID | 36 | depends on MAC80211_RC_PID |
30 | ---help--- | 37 | ---help--- |
31 | Select the PID controller based rate control as the | 38 | Select the PID controller based rate control as the |
32 | default rate control algorithm. You should choose | 39 | default rate control algorithm. You should choose |
33 | this unless you know what you are doing. | 40 | this unless you know what you are doing. |
34 | 41 | ||
35 | config MAC80211_RC_DEFAULT_NONE | ||
36 | bool "No default algorithm" | ||
37 | depends on EMBEDDED | ||
38 | help | ||
39 | Selecting this option will select no default algorithm | ||
40 | and allow you to not build any. Do not choose this | ||
41 | option unless you know your driver comes with another | ||
42 | suitable algorithm. | ||
43 | endchoice | 42 | endchoice |
44 | 43 | ||
45 | comment "Selecting 'y' for an algorithm will" | ||
46 | comment "build the algorithm into mac80211." | ||
47 | |||
48 | config MAC80211_RC_DEFAULT | 44 | config MAC80211_RC_DEFAULT |
49 | string | 45 | string |
50 | default "pid" if MAC80211_RC_DEFAULT_PID | 46 | default "pid" if MAC80211_RC_DEFAULT_PID |
51 | default "" | 47 | default "" |
52 | 48 | ||
53 | config MAC80211_RC_PID | ||
54 | tristate "PID controller based rate control algorithm" | ||
55 | ---help--- | ||
56 | This option enables a TX rate control algorithm for | ||
57 | mac80211 that uses a PID controller to select the TX | ||
58 | rate. | ||
59 | |||
60 | Say Y or M unless you're sure you want to use a | ||
61 | different rate control algorithm. | ||
62 | |||
63 | endmenu | 49 | endmenu |
64 | 50 | ||
65 | config MAC80211_MESH | 51 | config MAC80211_MESH |
@@ -89,10 +75,16 @@ config MAC80211_DEBUGFS | |||
89 | 75 | ||
90 | Say N unless you know you need this. | 76 | Say N unless you know you need this. |
91 | 77 | ||
78 | menuconfig MAC80211_DEBUG_MENU | ||
79 | bool "Select mac80211 debugging features" | ||
80 | depends on MAC80211 | ||
81 | ---help--- | ||
82 | This option collects various mac80211 debug settings. | ||
83 | |||
92 | config MAC80211_DEBUG_PACKET_ALIGNMENT | 84 | config MAC80211_DEBUG_PACKET_ALIGNMENT |
93 | bool "Enable packet alignment debugging" | 85 | bool "Enable packet alignment debugging" |
94 | depends on MAC80211 | 86 | depends on MAC80211_DEBUG_MENU |
95 | help | 87 | ---help--- |
96 | This option is recommended for driver authors and strongly | 88 | This option is recommended for driver authors and strongly |
97 | discouraged for everybody else, it will trigger a warning | 89 | discouraged for everybody else, it will trigger a warning |
98 | when a driver hands mac80211 a buffer that is aligned in | 90 | when a driver hands mac80211 a buffer that is aligned in |
@@ -101,33 +93,95 @@ config MAC80211_DEBUG_PACKET_ALIGNMENT | |||
101 | 93 | ||
102 | Say N unless you're writing a mac80211 based driver. | 94 | Say N unless you're writing a mac80211 based driver. |
103 | 95 | ||
104 | config MAC80211_DEBUG | 96 | config MAC80211_NOINLINE |
105 | bool "Enable debugging output" | 97 | bool "Do not inline TX/RX handlers" |
106 | depends on MAC80211 | 98 | depends on MAC80211_DEBUG_MENU |
107 | ---help--- | 99 | ---help--- |
108 | This option will enable debug tracing output for the | 100 | This option affects code generation in mac80211, when |
109 | ieee80211 network stack. | 101 | selected some functions are marked "noinline" to allow |
102 | easier debugging of problems in the transmit and receive | ||
103 | paths. | ||
104 | |||
105 | This option increases code size a bit and inserts a lot | ||
106 | of function calls in the code, but is otherwise safe to | ||
107 | enable. | ||
110 | 108 | ||
111 | If you are not trying to debug or develop the ieee80211 | 109 | If unsure, say N unless you expect to be finding problems |
112 | subsystem, you most likely want to say N here. | 110 | in mac80211. |
111 | |||
112 | config MAC80211_VERBOSE_DEBUG | ||
113 | bool "Verbose debugging output" | ||
114 | depends on MAC80211_DEBUG_MENU | ||
115 | ---help--- | ||
116 | Selecting this option causes mac80211 to print out | ||
117 | many debugging messages. It should not be selected | ||
118 | on production systems as some of the messages are | ||
119 | remotely triggerable. | ||
120 | |||
121 | Do not select this option. | ||
113 | 122 | ||
114 | config MAC80211_HT_DEBUG | 123 | config MAC80211_HT_DEBUG |
115 | bool "Enable HT debugging output" | 124 | bool "Verbose HT debugging" |
116 | depends on MAC80211_DEBUG | 125 | depends on MAC80211_DEBUG_MENU |
117 | ---help--- | 126 | ---help--- |
118 | This option enables 802.11n High Throughput features | 127 | This option enables 802.11n High Throughput features |
119 | debug tracing output. | 128 | debug tracing output. |
120 | 129 | ||
121 | If you are not trying to debug of develop the ieee80211 | 130 | It should not be selected on production systems as some |
122 | subsystem, you most likely want to say N here. | 131 | of the messages are remotely triggerable. |
123 | 132 | ||
124 | config MAC80211_VERBOSE_DEBUG | 133 | Do not select this option. |
125 | bool "Verbose debugging output" | 134 | |
126 | depends on MAC80211_DEBUG | 135 | config MAC80211_TKIP_DEBUG |
136 | bool "Verbose TKIP debugging" | ||
137 | depends on MAC80211_DEBUG_MENU | ||
138 | ---help--- | ||
139 | Selecting this option causes mac80211 to print out | ||
140 | very verbose TKIP debugging messages. It should not | ||
141 | be selected on production systems as those messages | ||
142 | are remotely triggerable. | ||
143 | |||
144 | Do not select this option. | ||
145 | |||
146 | config MAC80211_IBSS_DEBUG | ||
147 | bool "Verbose IBSS debugging" | ||
148 | depends on MAC80211_DEBUG_MENU | ||
149 | ---help--- | ||
150 | Selecting this option causes mac80211 to print out | ||
151 | very verbose IBSS debugging messages. It should not | ||
152 | be selected on production systems as those messages | ||
153 | are remotely triggerable. | ||
154 | |||
155 | Do not select this option. | ||
156 | |||
157 | config MAC80211_VERBOSE_PS_DEBUG | ||
158 | bool "Verbose powersave mode debugging" | ||
159 | depends on MAC80211_DEBUG_MENU | ||
160 | ---help--- | ||
161 | Selecting this option causes mac80211 to print out very | ||
162 | verbose power save mode debugging messages (when mac80211 | ||
163 | is an AP and has power saving stations.) | ||
164 | It should not be selected on production systems as those | ||
165 | messages are remotely triggerable. | ||
166 | |||
167 | Do not select this option. | ||
168 | |||
169 | config MAC80211_VERBOSE_MPL_DEBUG | ||
170 | bool "Verbose mesh peer link debugging" | ||
171 | depends on MAC80211_DEBUG_MENU | ||
172 | depends on MAC80211_MESH | ||
173 | ---help--- | ||
174 | Selecting this option causes mac80211 to print out very | ||
175 | verbose mesh peer link debugging messages (when mac80211 | ||
176 | is taking part in a mesh network). | ||
177 | It should not be selected on production systems as those | ||
178 | messages are remotely triggerable. | ||
179 | |||
180 | Do not select this option. | ||
127 | 181 | ||
128 | config MAC80211_LOWTX_FRAME_DUMP | 182 | config MAC80211_LOWTX_FRAME_DUMP |
129 | bool "Debug frame dumping" | 183 | bool "Debug frame dumping" |
130 | depends on MAC80211_DEBUG | 184 | depends on MAC80211_DEBUG_MENU |
131 | ---help--- | 185 | ---help--- |
132 | Selecting this option will cause the stack to | 186 | Selecting this option will cause the stack to |
133 | print a message for each frame that is handed | 187 | print a message for each frame that is handed |
@@ -138,30 +192,20 @@ config MAC80211_LOWTX_FRAME_DUMP | |||
138 | If unsure, say N and insert the debugging code | 192 | If unsure, say N and insert the debugging code |
139 | you require into the driver you are debugging. | 193 | you require into the driver you are debugging. |
140 | 194 | ||
141 | config TKIP_DEBUG | ||
142 | bool "TKIP debugging" | ||
143 | depends on MAC80211_DEBUG | ||
144 | |||
145 | config MAC80211_DEBUG_COUNTERS | 195 | config MAC80211_DEBUG_COUNTERS |
146 | bool "Extra statistics for TX/RX debugging" | 196 | bool "Extra statistics for TX/RX debugging" |
147 | depends on MAC80211_DEBUG | 197 | depends on MAC80211_DEBUG_MENU |
148 | 198 | depends on MAC80211_DEBUGFS | |
149 | config MAC80211_IBSS_DEBUG | ||
150 | bool "Support for IBSS testing" | ||
151 | depends on MAC80211_DEBUG | ||
152 | ---help--- | 199 | ---help--- |
153 | Say Y here if you intend to debug the IBSS code. | 200 | Selecting this option causes mac80211 to keep additional |
201 | and very verbose statistics about TX and RX handler use | ||
202 | and show them in debugfs. | ||
154 | 203 | ||
155 | config MAC80211_VERBOSE_PS_DEBUG | 204 | If unsure, say N. |
156 | bool "Verbose powersave mode debugging" | ||
157 | depends on MAC80211_DEBUG | ||
158 | ---help--- | ||
159 | Say Y here to print out verbose powersave | ||
160 | mode debug messages. | ||
161 | 205 | ||
162 | config MAC80211_VERBOSE_MPL_DEBUG | 206 | config MAC80211_VERBOSE_SPECT_MGMT_DEBUG |
163 | bool "Verbose mesh peer link debugging" | 207 | bool "Verbose Spectrum Management (IEEE 802.11h)debugging" |
164 | depends on MAC80211_DEBUG && MAC80211_MESH | 208 | depends on MAC80211_DEBUG_MENU |
165 | ---help--- | 209 | ---help--- |
166 | Say Y here to print out verbose mesh peer link | 210 | Say Y here to print out verbose Spectrum Management (IEEE 802.11h) |
167 | debug messages. | 211 | debug messages. |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 4e5847fd316c..a169b0201d61 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -1,13 +1,5 @@ | |||
1 | obj-$(CONFIG_MAC80211) += mac80211.o | 1 | obj-$(CONFIG_MAC80211) += mac80211.o |
2 | 2 | ||
3 | # objects for PID algorithm | ||
4 | rc80211_pid-y := rc80211_pid_algo.o | ||
5 | rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o | ||
6 | |||
7 | # build helper for PID algorithm | ||
8 | rc-pid-y := $(rc80211_pid-y) | ||
9 | rc-pid-m := rc80211_pid.o | ||
10 | |||
11 | # mac80211 objects | 3 | # mac80211 objects |
12 | mac80211-y := \ | 4 | mac80211-y := \ |
13 | main.o \ | 5 | main.o \ |
@@ -26,10 +18,10 @@ mac80211-y := \ | |||
26 | tx.o \ | 18 | tx.o \ |
27 | key.o \ | 19 | key.o \ |
28 | util.o \ | 20 | util.o \ |
21 | wme.o \ | ||
29 | event.o | 22 | event.o |
30 | 23 | ||
31 | mac80211-$(CONFIG_MAC80211_LEDS) += led.o | 24 | mac80211-$(CONFIG_MAC80211_LEDS) += led.o |
32 | mac80211-$(CONFIG_NET_SCHED) += wme.o | ||
33 | mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ | 25 | mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ |
34 | debugfs.o \ | 26 | debugfs.o \ |
35 | debugfs_sta.o \ | 27 | debugfs_sta.o \ |
@@ -42,10 +34,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \ | |||
42 | mesh_plink.o \ | 34 | mesh_plink.o \ |
43 | mesh_hwmp.o | 35 | mesh_hwmp.o |
44 | 36 | ||
37 | # objects for PID algorithm | ||
38 | rc80211_pid-y := rc80211_pid_algo.o | ||
39 | rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o | ||
45 | 40 | ||
46 | # Build rate control algorithm(s) | 41 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) |
47 | CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE | ||
48 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID)) | ||
49 | |||
50 | # Modular rate algorithms are assigned to mac80211-m - make separate modules | ||
51 | obj-m += $(mac80211-m) | ||
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 59f1691f62c8..a87cb3ba2df6 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
@@ -16,31 +16,28 @@ | |||
16 | #include "key.h" | 16 | #include "key.h" |
17 | #include "aes_ccm.h" | 17 | #include "aes_ccm.h" |
18 | 18 | ||
19 | 19 | static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a) | |
20 | static void ieee80211_aes_encrypt(struct crypto_cipher *tfm, | ||
21 | const u8 pt[16], u8 ct[16]) | ||
22 | { | ||
23 | crypto_cipher_encrypt_one(tfm, ct, pt); | ||
24 | } | ||
25 | |||
26 | |||
27 | static inline void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *b_0, u8 *aad, | ||
28 | u8 *b, u8 *s_0, u8 *a) | ||
29 | { | 20 | { |
30 | int i; | 21 | int i; |
22 | u8 *b_0, *aad, *b, *s_0; | ||
31 | 23 | ||
32 | ieee80211_aes_encrypt(tfm, b_0, b); | 24 | b_0 = scratch + 3 * AES_BLOCK_LEN; |
25 | aad = scratch + 4 * AES_BLOCK_LEN; | ||
26 | b = scratch; | ||
27 | s_0 = scratch + AES_BLOCK_LEN; | ||
28 | |||
29 | crypto_cipher_encrypt_one(tfm, b, b_0); | ||
33 | 30 | ||
34 | /* Extra Authenticate-only data (always two AES blocks) */ | 31 | /* Extra Authenticate-only data (always two AES blocks) */ |
35 | for (i = 0; i < AES_BLOCK_LEN; i++) | 32 | for (i = 0; i < AES_BLOCK_LEN; i++) |
36 | aad[i] ^= b[i]; | 33 | aad[i] ^= b[i]; |
37 | ieee80211_aes_encrypt(tfm, aad, b); | 34 | crypto_cipher_encrypt_one(tfm, b, aad); |
38 | 35 | ||
39 | aad += AES_BLOCK_LEN; | 36 | aad += AES_BLOCK_LEN; |
40 | 37 | ||
41 | for (i = 0; i < AES_BLOCK_LEN; i++) | 38 | for (i = 0; i < AES_BLOCK_LEN; i++) |
42 | aad[i] ^= b[i]; | 39 | aad[i] ^= b[i]; |
43 | ieee80211_aes_encrypt(tfm, aad, a); | 40 | crypto_cipher_encrypt_one(tfm, a, aad); |
44 | 41 | ||
45 | /* Mask out bits from auth-only-b_0 */ | 42 | /* Mask out bits from auth-only-b_0 */ |
46 | b_0[0] &= 0x07; | 43 | b_0[0] &= 0x07; |
@@ -48,24 +45,26 @@ static inline void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *b_0, u8 *aad, | |||
48 | /* S_0 is used to encrypt T (= MIC) */ | 45 | /* S_0 is used to encrypt T (= MIC) */ |
49 | b_0[14] = 0; | 46 | b_0[14] = 0; |
50 | b_0[15] = 0; | 47 | b_0[15] = 0; |
51 | ieee80211_aes_encrypt(tfm, b_0, s_0); | 48 | crypto_cipher_encrypt_one(tfm, s_0, b_0); |
52 | } | 49 | } |
53 | 50 | ||
54 | 51 | ||
55 | void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, | 52 | void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, |
56 | u8 *b_0, u8 *aad, u8 *data, size_t data_len, | 53 | u8 *data, size_t data_len, |
57 | u8 *cdata, u8 *mic) | 54 | u8 *cdata, u8 *mic) |
58 | { | 55 | { |
59 | int i, j, last_len, num_blocks; | 56 | int i, j, last_len, num_blocks; |
60 | u8 *pos, *cpos, *b, *s_0, *e; | 57 | u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; |
61 | 58 | ||
62 | b = scratch; | 59 | b = scratch; |
63 | s_0 = scratch + AES_BLOCK_LEN; | 60 | s_0 = scratch + AES_BLOCK_LEN; |
64 | e = scratch + 2 * AES_BLOCK_LEN; | 61 | e = scratch + 2 * AES_BLOCK_LEN; |
62 | b_0 = scratch + 3 * AES_BLOCK_LEN; | ||
63 | aad = scratch + 4 * AES_BLOCK_LEN; | ||
65 | 64 | ||
66 | num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); | 65 | num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); |
67 | last_len = data_len % AES_BLOCK_LEN; | 66 | last_len = data_len % AES_BLOCK_LEN; |
68 | aes_ccm_prepare(tfm, b_0, aad, b, s_0, b); | 67 | aes_ccm_prepare(tfm, scratch, b); |
69 | 68 | ||
70 | /* Process payload blocks */ | 69 | /* Process payload blocks */ |
71 | pos = data; | 70 | pos = data; |
@@ -77,11 +76,11 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, | |||
77 | /* Authentication followed by encryption */ | 76 | /* Authentication followed by encryption */ |
78 | for (i = 0; i < blen; i++) | 77 | for (i = 0; i < blen; i++) |
79 | b[i] ^= pos[i]; | 78 | b[i] ^= pos[i]; |
80 | ieee80211_aes_encrypt(tfm, b, b); | 79 | crypto_cipher_encrypt_one(tfm, b, b); |
81 | 80 | ||
82 | b_0[14] = (j >> 8) & 0xff; | 81 | b_0[14] = (j >> 8) & 0xff; |
83 | b_0[15] = j & 0xff; | 82 | b_0[15] = j & 0xff; |
84 | ieee80211_aes_encrypt(tfm, b_0, e); | 83 | crypto_cipher_encrypt_one(tfm, e, b_0); |
85 | for (i = 0; i < blen; i++) | 84 | for (i = 0; i < blen; i++) |
86 | *cpos++ = *pos++ ^ e[i]; | 85 | *cpos++ = *pos++ ^ e[i]; |
87 | } | 86 | } |
@@ -92,19 +91,20 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, | |||
92 | 91 | ||
93 | 92 | ||
94 | int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, | 93 | int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, |
95 | u8 *b_0, u8 *aad, u8 *cdata, size_t data_len, | 94 | u8 *cdata, size_t data_len, u8 *mic, u8 *data) |
96 | u8 *mic, u8 *data) | ||
97 | { | 95 | { |
98 | int i, j, last_len, num_blocks; | 96 | int i, j, last_len, num_blocks; |
99 | u8 *pos, *cpos, *b, *s_0, *a; | 97 | u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; |
100 | 98 | ||
101 | b = scratch; | 99 | b = scratch; |
102 | s_0 = scratch + AES_BLOCK_LEN; | 100 | s_0 = scratch + AES_BLOCK_LEN; |
103 | a = scratch + 2 * AES_BLOCK_LEN; | 101 | a = scratch + 2 * AES_BLOCK_LEN; |
102 | b_0 = scratch + 3 * AES_BLOCK_LEN; | ||
103 | aad = scratch + 4 * AES_BLOCK_LEN; | ||
104 | 104 | ||
105 | num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); | 105 | num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); |
106 | last_len = data_len % AES_BLOCK_LEN; | 106 | last_len = data_len % AES_BLOCK_LEN; |
107 | aes_ccm_prepare(tfm, b_0, aad, b, s_0, a); | 107 | aes_ccm_prepare(tfm, scratch, a); |
108 | 108 | ||
109 | /* Process payload blocks */ | 109 | /* Process payload blocks */ |
110 | cpos = cdata; | 110 | cpos = cdata; |
@@ -116,13 +116,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, | |||
116 | /* Decryption followed by authentication */ | 116 | /* Decryption followed by authentication */ |
117 | b_0[14] = (j >> 8) & 0xff; | 117 | b_0[14] = (j >> 8) & 0xff; |
118 | b_0[15] = j & 0xff; | 118 | b_0[15] = j & 0xff; |
119 | ieee80211_aes_encrypt(tfm, b_0, b); | 119 | crypto_cipher_encrypt_one(tfm, b, b_0); |
120 | for (i = 0; i < blen; i++) { | 120 | for (i = 0; i < blen; i++) { |
121 | *pos = *cpos++ ^ b[i]; | 121 | *pos = *cpos++ ^ b[i]; |
122 | a[i] ^= *pos++; | 122 | a[i] ^= *pos++; |
123 | } | 123 | } |
124 | 124 | crypto_cipher_encrypt_one(tfm, a, a); | |
125 | ieee80211_aes_encrypt(tfm, a, a); | ||
126 | } | 125 | } |
127 | 126 | ||
128 | for (i = 0; i < CCMP_MIC_LEN; i++) { | 127 | for (i = 0; i < CCMP_MIC_LEN; i++) { |
@@ -134,7 +133,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, | |||
134 | } | 133 | } |
135 | 134 | ||
136 | 135 | ||
137 | struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]) | 136 | struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) |
138 | { | 137 | { |
139 | struct crypto_cipher *tfm; | 138 | struct crypto_cipher *tfm; |
140 | 139 | ||
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h index 885f19030b29..6e7820ef3448 100644 --- a/net/mac80211/aes_ccm.h +++ b/net/mac80211/aes_ccm.h | |||
@@ -14,12 +14,12 @@ | |||
14 | 14 | ||
15 | #define AES_BLOCK_LEN 16 | 15 | #define AES_BLOCK_LEN 16 |
16 | 16 | ||
17 | struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]); | 17 | struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]); |
18 | void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, | 18 | void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, |
19 | u8 *b_0, u8 *aad, u8 *data, size_t data_len, | 19 | u8 *data, size_t data_len, |
20 | u8 *cdata, u8 *mic); | 20 | u8 *cdata, u8 *mic); |
21 | int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, | 21 | int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, |
22 | u8 *b_0, u8 *aad, u8 *cdata, size_t data_len, | 22 | u8 *cdata, size_t data_len, |
23 | u8 *mic, u8 *data); | 23 | u8 *mic, u8 *data); |
24 | void ieee80211_aes_key_free(struct crypto_cipher *tfm); | 24 | void ieee80211_aes_key_free(struct crypto_cipher *tfm); |
25 | 25 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a9fce4afdf21..8e7ba0e62cf5 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -50,14 +50,11 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
50 | struct ieee80211_sub_if_data *sdata; | 50 | struct ieee80211_sub_if_data *sdata; |
51 | int err; | 51 | int err; |
52 | 52 | ||
53 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) | ||
54 | return -ENODEV; | ||
55 | |||
56 | itype = nl80211_type_to_mac80211_type(type); | 53 | itype = nl80211_type_to_mac80211_type(type); |
57 | if (itype == IEEE80211_IF_TYPE_INVALID) | 54 | if (itype == IEEE80211_IF_TYPE_INVALID) |
58 | return -EINVAL; | 55 | return -EINVAL; |
59 | 56 | ||
60 | err = ieee80211_if_add(local->mdev, name, &dev, itype, params); | 57 | err = ieee80211_if_add(local, name, &dev, itype, params); |
61 | if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) | 58 | if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) |
62 | return err; | 59 | return err; |
63 | 60 | ||
@@ -68,54 +65,41 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
68 | 65 | ||
69 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | 66 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) |
70 | { | 67 | { |
71 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
72 | struct net_device *dev; | 68 | struct net_device *dev; |
73 | char *name; | ||
74 | |||
75 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) | ||
76 | return -ENODEV; | ||
77 | 69 | ||
78 | /* we're under RTNL */ | 70 | /* we're under RTNL */ |
79 | dev = __dev_get_by_index(&init_net, ifindex); | 71 | dev = __dev_get_by_index(&init_net, ifindex); |
80 | if (!dev) | 72 | if (!dev) |
81 | return 0; | 73 | return -ENODEV; |
82 | 74 | ||
83 | name = dev->name; | 75 | ieee80211_if_remove(dev); |
84 | 76 | ||
85 | return ieee80211_if_remove(local->mdev, name, -1); | 77 | return 0; |
86 | } | 78 | } |
87 | 79 | ||
88 | static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | 80 | static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, |
89 | enum nl80211_iftype type, u32 *flags, | 81 | enum nl80211_iftype type, u32 *flags, |
90 | struct vif_params *params) | 82 | struct vif_params *params) |
91 | { | 83 | { |
92 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
93 | struct net_device *dev; | 84 | struct net_device *dev; |
94 | enum ieee80211_if_types itype; | 85 | enum ieee80211_if_types itype; |
95 | struct ieee80211_sub_if_data *sdata; | 86 | struct ieee80211_sub_if_data *sdata; |
96 | 87 | int ret; | |
97 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) | ||
98 | return -ENODEV; | ||
99 | 88 | ||
100 | /* we're under RTNL */ | 89 | /* we're under RTNL */ |
101 | dev = __dev_get_by_index(&init_net, ifindex); | 90 | dev = __dev_get_by_index(&init_net, ifindex); |
102 | if (!dev) | 91 | if (!dev) |
103 | return -ENODEV; | 92 | return -ENODEV; |
104 | 93 | ||
105 | if (netif_running(dev)) | ||
106 | return -EBUSY; | ||
107 | |||
108 | itype = nl80211_type_to_mac80211_type(type); | 94 | itype = nl80211_type_to_mac80211_type(type); |
109 | if (itype == IEEE80211_IF_TYPE_INVALID) | 95 | if (itype == IEEE80211_IF_TYPE_INVALID) |
110 | return -EINVAL; | 96 | return -EINVAL; |
111 | 97 | ||
112 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 98 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
113 | 99 | ||
114 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 100 | ret = ieee80211_if_change_type(sdata, itype); |
115 | return -EOPNOTSUPP; | 101 | if (ret) |
116 | 102 | return ret; | |
117 | ieee80211_if_reinit(dev); | ||
118 | ieee80211_if_set_type(dev, itype); | ||
119 | 103 | ||
120 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) | 104 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) |
121 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, | 105 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, |
@@ -256,8 +240,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
256 | case ALG_TKIP: | 240 | case ALG_TKIP: |
257 | params.cipher = WLAN_CIPHER_SUITE_TKIP; | 241 | params.cipher = WLAN_CIPHER_SUITE_TKIP; |
258 | 242 | ||
259 | iv32 = key->u.tkip.iv32; | 243 | iv32 = key->u.tkip.tx.iv32; |
260 | iv16 = key->u.tkip.iv16; | 244 | iv16 = key->u.tkip.tx.iv16; |
261 | 245 | ||
262 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && | 246 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && |
263 | sdata->local->ops->get_tkip_seq) | 247 | sdata->local->ops->get_tkip_seq) |
@@ -485,7 +469,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, | |||
485 | 469 | ||
486 | kfree(old); | 470 | kfree(old); |
487 | 471 | ||
488 | return ieee80211_if_config_beacon(sdata->dev); | 472 | return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); |
489 | } | 473 | } |
490 | 474 | ||
491 | static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, | 475 | static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, |
@@ -539,7 +523,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) | |||
539 | synchronize_rcu(); | 523 | synchronize_rcu(); |
540 | kfree(old); | 524 | kfree(old); |
541 | 525 | ||
542 | return ieee80211_if_config_beacon(dev); | 526 | return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); |
543 | } | 527 | } |
544 | 528 | ||
545 | /* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ | 529 | /* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ |
@@ -602,6 +586,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
602 | */ | 586 | */ |
603 | 587 | ||
604 | if (params->station_flags & STATION_FLAG_CHANGED) { | 588 | if (params->station_flags & STATION_FLAG_CHANGED) { |
589 | spin_lock_bh(&sta->lock); | ||
605 | sta->flags &= ~WLAN_STA_AUTHORIZED; | 590 | sta->flags &= ~WLAN_STA_AUTHORIZED; |
606 | if (params->station_flags & STATION_FLAG_AUTHORIZED) | 591 | if (params->station_flags & STATION_FLAG_AUTHORIZED) |
607 | sta->flags |= WLAN_STA_AUTHORIZED; | 592 | sta->flags |= WLAN_STA_AUTHORIZED; |
@@ -613,6 +598,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
613 | sta->flags &= ~WLAN_STA_WME; | 598 | sta->flags &= ~WLAN_STA_WME; |
614 | if (params->station_flags & STATION_FLAG_WME) | 599 | if (params->station_flags & STATION_FLAG_WME) |
615 | sta->flags |= WLAN_STA_WME; | 600 | sta->flags |= WLAN_STA_WME; |
601 | spin_unlock_bh(&sta->lock); | ||
616 | } | 602 | } |
617 | 603 | ||
618 | /* | 604 | /* |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 1cccbfd781f6..ee509f1109e2 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -70,16 +70,6 @@ DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", | |||
70 | 70 | ||
71 | /* statistics stuff */ | 71 | /* statistics stuff */ |
72 | 72 | ||
73 | static inline int rtnl_lock_local(struct ieee80211_local *local) | ||
74 | { | ||
75 | rtnl_lock(); | ||
76 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) { | ||
77 | rtnl_unlock(); | ||
78 | return -ENODEV; | ||
79 | } | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ | 73 | #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ |
84 | DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) | 74 | DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) |
85 | 75 | ||
@@ -96,10 +86,7 @@ static ssize_t format_devstat_counter(struct ieee80211_local *local, | |||
96 | if (!local->ops->get_stats) | 86 | if (!local->ops->get_stats) |
97 | return -EOPNOTSUPP; | 87 | return -EOPNOTSUPP; |
98 | 88 | ||
99 | res = rtnl_lock_local(local); | 89 | rtnl_lock(); |
100 | if (res) | ||
101 | return res; | ||
102 | |||
103 | res = local->ops->get_stats(local_to_hw(local), &stats); | 90 | res = local->ops->get_stats(local_to_hw(local), &stats); |
104 | rtnl_unlock(); | 91 | rtnl_unlock(); |
105 | if (!res) | 92 | if (!res) |
@@ -197,45 +184,6 @@ DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u", | |||
197 | DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", | 184 | DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", |
198 | local->tx_status_drop); | 185 | local->tx_status_drop); |
199 | 186 | ||
200 | static ssize_t stats_wme_rx_queue_read(struct file *file, | ||
201 | char __user *userbuf, | ||
202 | size_t count, loff_t *ppos) | ||
203 | { | ||
204 | struct ieee80211_local *local = file->private_data; | ||
205 | char buf[NUM_RX_DATA_QUEUES*15], *p = buf; | ||
206 | int i; | ||
207 | |||
208 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | ||
209 | p += scnprintf(p, sizeof(buf)+buf-p, | ||
210 | "%u\n", local->wme_rx_queue[i]); | ||
211 | |||
212 | return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf); | ||
213 | } | ||
214 | |||
215 | static const struct file_operations stats_wme_rx_queue_ops = { | ||
216 | .read = stats_wme_rx_queue_read, | ||
217 | .open = mac80211_open_file_generic, | ||
218 | }; | ||
219 | |||
220 | static ssize_t stats_wme_tx_queue_read(struct file *file, | ||
221 | char __user *userbuf, | ||
222 | size_t count, loff_t *ppos) | ||
223 | { | ||
224 | struct ieee80211_local *local = file->private_data; | ||
225 | char buf[NUM_TX_DATA_QUEUES*15], *p = buf; | ||
226 | int i; | ||
227 | |||
228 | for (i = 0; i < NUM_TX_DATA_QUEUES; i++) | ||
229 | p += scnprintf(p, sizeof(buf)+buf-p, | ||
230 | "%u\n", local->wme_tx_queue[i]); | ||
231 | |||
232 | return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf); | ||
233 | } | ||
234 | |||
235 | static const struct file_operations stats_wme_tx_queue_ops = { | ||
236 | .read = stats_wme_tx_queue_read, | ||
237 | .open = mac80211_open_file_generic, | ||
238 | }; | ||
239 | #endif | 187 | #endif |
240 | 188 | ||
241 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); | 189 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); |
@@ -303,8 +251,6 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
303 | DEBUGFS_STATS_ADD(rx_expand_skb_head2); | 251 | DEBUGFS_STATS_ADD(rx_expand_skb_head2); |
304 | DEBUGFS_STATS_ADD(rx_handlers_fragments); | 252 | DEBUGFS_STATS_ADD(rx_handlers_fragments); |
305 | DEBUGFS_STATS_ADD(tx_status_drop); | 253 | DEBUGFS_STATS_ADD(tx_status_drop); |
306 | DEBUGFS_STATS_ADD(wme_tx_queue); | ||
307 | DEBUGFS_STATS_ADD(wme_rx_queue); | ||
308 | #endif | 254 | #endif |
309 | DEBUGFS_STATS_ADD(dot11ACKFailureCount); | 255 | DEBUGFS_STATS_ADD(dot11ACKFailureCount); |
310 | DEBUGFS_STATS_ADD(dot11RTSFailureCount); | 256 | DEBUGFS_STATS_ADD(dot11RTSFailureCount); |
@@ -356,8 +302,6 @@ void debugfs_hw_del(struct ieee80211_local *local) | |||
356 | DEBUGFS_STATS_DEL(rx_expand_skb_head2); | 302 | DEBUGFS_STATS_DEL(rx_expand_skb_head2); |
357 | DEBUGFS_STATS_DEL(rx_handlers_fragments); | 303 | DEBUGFS_STATS_DEL(rx_handlers_fragments); |
358 | DEBUGFS_STATS_DEL(tx_status_drop); | 304 | DEBUGFS_STATS_DEL(tx_status_drop); |
359 | DEBUGFS_STATS_DEL(wme_tx_queue); | ||
360 | DEBUGFS_STATS_DEL(wme_rx_queue); | ||
361 | #endif | 305 | #endif |
362 | DEBUGFS_STATS_DEL(dot11ACKFailureCount); | 306 | DEBUGFS_STATS_DEL(dot11ACKFailureCount); |
363 | DEBUGFS_STATS_DEL(dot11RTSFailureCount); | 307 | DEBUGFS_STATS_DEL(dot11RTSFailureCount); |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 19efc3a6a932..7439b63df5d0 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -97,8 +97,8 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, | |||
97 | break; | 97 | break; |
98 | case ALG_TKIP: | 98 | case ALG_TKIP: |
99 | len = scnprintf(buf, sizeof(buf), "%08x %04x\n", | 99 | len = scnprintf(buf, sizeof(buf), "%08x %04x\n", |
100 | key->u.tkip.iv32, | 100 | key->u.tkip.tx.iv32, |
101 | key->u.tkip.iv16); | 101 | key->u.tkip.tx.iv16); |
102 | break; | 102 | break; |
103 | case ALG_CCMP: | 103 | case ALG_CCMP: |
104 | tpn = key->u.ccmp.tx_pn; | 104 | tpn = key->u.ccmp.tx_pn; |
@@ -128,8 +128,8 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
128 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 128 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) |
129 | p += scnprintf(p, sizeof(buf)+buf-p, | 129 | p += scnprintf(p, sizeof(buf)+buf-p, |
130 | "%08x %04x\n", | 130 | "%08x %04x\n", |
131 | key->u.tkip.iv32_rx[i], | 131 | key->u.tkip.rx[i].iv32, |
132 | key->u.tkip.iv16_rx[i]); | 132 | key->u.tkip.rx[i].iv16); |
133 | len = p - buf; | 133 | len = p - buf; |
134 | break; | 134 | break; |
135 | case ALG_CCMP: | 135 | case ALG_CCMP: |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index e3326d046944..475f89a8aee1 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -155,8 +155,9 @@ static const struct file_operations name##_ops = { \ | |||
155 | __IEEE80211_IF_WFILE(name) | 155 | __IEEE80211_IF_WFILE(name) |
156 | 156 | ||
157 | /* common attributes */ | 157 | /* common attributes */ |
158 | IEEE80211_IF_FILE(channel_use, channel_use, DEC); | ||
159 | IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); | 158 | IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); |
159 | IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); | ||
160 | IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); | ||
160 | 161 | ||
161 | /* STA/IBSS attributes */ | 162 | /* STA/IBSS attributes */ |
162 | IEEE80211_IF_FILE(state, u.sta.state, DEC); | 163 | IEEE80211_IF_FILE(state, u.sta.state, DEC); |
@@ -192,8 +193,6 @@ __IEEE80211_IF_FILE(flags); | |||
192 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); | 193 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); |
193 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); | 194 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); |
194 | IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC); | 195 | IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC); |
195 | IEEE80211_IF_FILE(force_unicast_rateidx, u.ap.force_unicast_rateidx, DEC); | ||
196 | IEEE80211_IF_FILE(max_ratectrl_rateidx, u.ap.max_ratectrl_rateidx, DEC); | ||
197 | 196 | ||
198 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( | 197 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( |
199 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | 198 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) |
@@ -248,8 +247,10 @@ IEEE80211_IF_WFILE(min_discovery_timeout, | |||
248 | 247 | ||
249 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) | 248 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) |
250 | { | 249 | { |
251 | DEBUGFS_ADD(channel_use, sta); | ||
252 | DEBUGFS_ADD(drop_unencrypted, sta); | 250 | DEBUGFS_ADD(drop_unencrypted, sta); |
251 | DEBUGFS_ADD(force_unicast_rateidx, ap); | ||
252 | DEBUGFS_ADD(max_ratectrl_rateidx, ap); | ||
253 | |||
253 | DEBUGFS_ADD(state, sta); | 254 | DEBUGFS_ADD(state, sta); |
254 | DEBUGFS_ADD(bssid, sta); | 255 | DEBUGFS_ADD(bssid, sta); |
255 | DEBUGFS_ADD(prev_bssid, sta); | 256 | DEBUGFS_ADD(prev_bssid, sta); |
@@ -269,27 +270,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
269 | 270 | ||
270 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) | 271 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) |
271 | { | 272 | { |
272 | DEBUGFS_ADD(channel_use, ap); | ||
273 | DEBUGFS_ADD(drop_unencrypted, ap); | 273 | DEBUGFS_ADD(drop_unencrypted, ap); |
274 | DEBUGFS_ADD(force_unicast_rateidx, ap); | ||
275 | DEBUGFS_ADD(max_ratectrl_rateidx, ap); | ||
276 | |||
274 | DEBUGFS_ADD(num_sta_ps, ap); | 277 | DEBUGFS_ADD(num_sta_ps, ap); |
275 | DEBUGFS_ADD(dtim_count, ap); | 278 | DEBUGFS_ADD(dtim_count, ap); |
276 | DEBUGFS_ADD(num_beacons, ap); | 279 | DEBUGFS_ADD(num_beacons, ap); |
277 | DEBUGFS_ADD(force_unicast_rateidx, ap); | ||
278 | DEBUGFS_ADD(max_ratectrl_rateidx, ap); | ||
279 | DEBUGFS_ADD(num_buffered_multicast, ap); | 280 | DEBUGFS_ADD(num_buffered_multicast, ap); |
280 | } | 281 | } |
281 | 282 | ||
282 | static void add_wds_files(struct ieee80211_sub_if_data *sdata) | 283 | static void add_wds_files(struct ieee80211_sub_if_data *sdata) |
283 | { | 284 | { |
284 | DEBUGFS_ADD(channel_use, wds); | ||
285 | DEBUGFS_ADD(drop_unencrypted, wds); | 285 | DEBUGFS_ADD(drop_unencrypted, wds); |
286 | DEBUGFS_ADD(force_unicast_rateidx, ap); | ||
287 | DEBUGFS_ADD(max_ratectrl_rateidx, ap); | ||
288 | |||
286 | DEBUGFS_ADD(peer, wds); | 289 | DEBUGFS_ADD(peer, wds); |
287 | } | 290 | } |
288 | 291 | ||
289 | static void add_vlan_files(struct ieee80211_sub_if_data *sdata) | 292 | static void add_vlan_files(struct ieee80211_sub_if_data *sdata) |
290 | { | 293 | { |
291 | DEBUGFS_ADD(channel_use, vlan); | ||
292 | DEBUGFS_ADD(drop_unencrypted, vlan); | 294 | DEBUGFS_ADD(drop_unencrypted, vlan); |
295 | DEBUGFS_ADD(force_unicast_rateidx, ap); | ||
296 | DEBUGFS_ADD(max_ratectrl_rateidx, ap); | ||
293 | } | 297 | } |
294 | 298 | ||
295 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) | 299 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) |
@@ -376,8 +380,10 @@ static void add_files(struct ieee80211_sub_if_data *sdata) | |||
376 | 380 | ||
377 | static void del_sta_files(struct ieee80211_sub_if_data *sdata) | 381 | static void del_sta_files(struct ieee80211_sub_if_data *sdata) |
378 | { | 382 | { |
379 | DEBUGFS_DEL(channel_use, sta); | ||
380 | DEBUGFS_DEL(drop_unencrypted, sta); | 383 | DEBUGFS_DEL(drop_unencrypted, sta); |
384 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
385 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
386 | |||
381 | DEBUGFS_DEL(state, sta); | 387 | DEBUGFS_DEL(state, sta); |
382 | DEBUGFS_DEL(bssid, sta); | 388 | DEBUGFS_DEL(bssid, sta); |
383 | DEBUGFS_DEL(prev_bssid, sta); | 389 | DEBUGFS_DEL(prev_bssid, sta); |
@@ -397,27 +403,30 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata) | |||
397 | 403 | ||
398 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) | 404 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) |
399 | { | 405 | { |
400 | DEBUGFS_DEL(channel_use, ap); | ||
401 | DEBUGFS_DEL(drop_unencrypted, ap); | 406 | DEBUGFS_DEL(drop_unencrypted, ap); |
407 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
408 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
409 | |||
402 | DEBUGFS_DEL(num_sta_ps, ap); | 410 | DEBUGFS_DEL(num_sta_ps, ap); |
403 | DEBUGFS_DEL(dtim_count, ap); | 411 | DEBUGFS_DEL(dtim_count, ap); |
404 | DEBUGFS_DEL(num_beacons, ap); | 412 | DEBUGFS_DEL(num_beacons, ap); |
405 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
406 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
407 | DEBUGFS_DEL(num_buffered_multicast, ap); | 413 | DEBUGFS_DEL(num_buffered_multicast, ap); |
408 | } | 414 | } |
409 | 415 | ||
410 | static void del_wds_files(struct ieee80211_sub_if_data *sdata) | 416 | static void del_wds_files(struct ieee80211_sub_if_data *sdata) |
411 | { | 417 | { |
412 | DEBUGFS_DEL(channel_use, wds); | ||
413 | DEBUGFS_DEL(drop_unencrypted, wds); | 418 | DEBUGFS_DEL(drop_unencrypted, wds); |
419 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
420 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
421 | |||
414 | DEBUGFS_DEL(peer, wds); | 422 | DEBUGFS_DEL(peer, wds); |
415 | } | 423 | } |
416 | 424 | ||
417 | static void del_vlan_files(struct ieee80211_sub_if_data *sdata) | 425 | static void del_vlan_files(struct ieee80211_sub_if_data *sdata) |
418 | { | 426 | { |
419 | DEBUGFS_DEL(channel_use, vlan); | ||
420 | DEBUGFS_DEL(drop_unencrypted, vlan); | 427 | DEBUGFS_DEL(drop_unencrypted, vlan); |
428 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
429 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
421 | } | 430 | } |
422 | 431 | ||
423 | static void del_monitor_files(struct ieee80211_sub_if_data *sdata) | 432 | static void del_monitor_files(struct ieee80211_sub_if_data *sdata) |
@@ -467,12 +476,12 @@ static void del_mesh_config(struct ieee80211_sub_if_data *sdata) | |||
467 | } | 476 | } |
468 | #endif | 477 | #endif |
469 | 478 | ||
470 | static void del_files(struct ieee80211_sub_if_data *sdata, int type) | 479 | static void del_files(struct ieee80211_sub_if_data *sdata) |
471 | { | 480 | { |
472 | if (!sdata->debugfsdir) | 481 | if (!sdata->debugfsdir) |
473 | return; | 482 | return; |
474 | 483 | ||
475 | switch (type) { | 484 | switch (sdata->vif.type) { |
476 | case IEEE80211_IF_TYPE_MESH_POINT: | 485 | case IEEE80211_IF_TYPE_MESH_POINT: |
477 | #ifdef CONFIG_MAC80211_MESH | 486 | #ifdef CONFIG_MAC80211_MESH |
478 | del_mesh_stats(sdata); | 487 | del_mesh_stats(sdata); |
@@ -512,29 +521,23 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) | |||
512 | sprintf(buf, "netdev:%s", sdata->dev->name); | 521 | sprintf(buf, "netdev:%s", sdata->dev->name); |
513 | sdata->debugfsdir = debugfs_create_dir(buf, | 522 | sdata->debugfsdir = debugfs_create_dir(buf, |
514 | sdata->local->hw.wiphy->debugfsdir); | 523 | sdata->local->hw.wiphy->debugfsdir); |
524 | add_files(sdata); | ||
515 | } | 525 | } |
516 | 526 | ||
517 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) | 527 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) |
518 | { | 528 | { |
519 | del_files(sdata, sdata->vif.type); | 529 | del_files(sdata); |
520 | debugfs_remove(sdata->debugfsdir); | 530 | debugfs_remove(sdata->debugfsdir); |
521 | sdata->debugfsdir = NULL; | 531 | sdata->debugfsdir = NULL; |
522 | } | 532 | } |
523 | 533 | ||
524 | void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata, | 534 | static int netdev_notify(struct notifier_block *nb, |
525 | int oldtype) | ||
526 | { | ||
527 | del_files(sdata, oldtype); | ||
528 | add_files(sdata); | ||
529 | } | ||
530 | |||
531 | static int netdev_notify(struct notifier_block * nb, | ||
532 | unsigned long state, | 535 | unsigned long state, |
533 | void *ndev) | 536 | void *ndev) |
534 | { | 537 | { |
535 | struct net_device *dev = ndev; | 538 | struct net_device *dev = ndev; |
536 | struct dentry *dir; | 539 | struct dentry *dir; |
537 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 540 | struct ieee80211_sub_if_data *sdata; |
538 | char buf[10+IFNAMSIZ]; | 541 | char buf[10+IFNAMSIZ]; |
539 | 542 | ||
540 | if (state != NETDEV_CHANGENAME) | 543 | if (state != NETDEV_CHANGENAME) |
@@ -546,6 +549,8 @@ static int netdev_notify(struct notifier_block * nb, | |||
546 | if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) | 549 | if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) |
547 | return 0; | 550 | return 0; |
548 | 551 | ||
552 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
553 | |||
549 | sprintf(buf, "netdev:%s", dev->name); | 554 | sprintf(buf, "netdev:%s", dev->name); |
550 | dir = sdata->debugfsdir; | 555 | dir = sdata->debugfsdir; |
551 | if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) | 556 | if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) |
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h index a690071fde8a..7af731f0b731 100644 --- a/net/mac80211/debugfs_netdev.h +++ b/net/mac80211/debugfs_netdev.h | |||
@@ -6,8 +6,6 @@ | |||
6 | #ifdef CONFIG_MAC80211_DEBUGFS | 6 | #ifdef CONFIG_MAC80211_DEBUGFS |
7 | void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); | 7 | void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); |
8 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); | 8 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); |
9 | void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata, | ||
10 | int oldtype); | ||
11 | void ieee80211_debugfs_netdev_init(void); | 9 | void ieee80211_debugfs_netdev_init(void); |
12 | void ieee80211_debugfs_netdev_exit(void); | 10 | void ieee80211_debugfs_netdev_exit(void); |
13 | #else | 11 | #else |
@@ -17,9 +15,6 @@ static inline void ieee80211_debugfs_add_netdev( | |||
17 | static inline void ieee80211_debugfs_remove_netdev( | 15 | static inline void ieee80211_debugfs_remove_netdev( |
18 | struct ieee80211_sub_if_data *sdata) | 16 | struct ieee80211_sub_if_data *sdata) |
19 | {} | 17 | {} |
20 | static inline void ieee80211_debugfs_change_if_type( | ||
21 | struct ieee80211_sub_if_data *sdata, int oldtype) | ||
22 | {} | ||
23 | static inline void ieee80211_debugfs_netdev_init(void) | 18 | static inline void ieee80211_debugfs_netdev_init(void) |
24 | {} | 19 | {} |
25 | 20 | ||
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 6d47a1d31b37..79a062782d52 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -63,10 +63,9 @@ STA_FILE(tx_fragments, tx_fragments, LU); | |||
63 | STA_FILE(tx_filtered, tx_filtered_count, LU); | 63 | STA_FILE(tx_filtered, tx_filtered_count, LU); |
64 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | 64 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); |
65 | STA_FILE(tx_retry_count, tx_retry_count, LU); | 65 | STA_FILE(tx_retry_count, tx_retry_count, LU); |
66 | STA_FILE(last_rssi, last_rssi, D); | ||
67 | STA_FILE(last_signal, last_signal, D); | 66 | STA_FILE(last_signal, last_signal, D); |
67 | STA_FILE(last_qual, last_qual, D); | ||
68 | STA_FILE(last_noise, last_noise, D); | 68 | STA_FILE(last_noise, last_noise, D); |
69 | STA_FILE(channel_use, channel_use, D); | ||
70 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | 69 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); |
71 | 70 | ||
72 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 71 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
@@ -74,14 +73,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | |||
74 | { | 73 | { |
75 | char buf[100]; | 74 | char buf[100]; |
76 | struct sta_info *sta = file->private_data; | 75 | struct sta_info *sta = file->private_data; |
76 | u32 staflags = get_sta_flags(sta); | ||
77 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", | 77 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", |
78 | sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", | 78 | staflags & WLAN_STA_AUTH ? "AUTH\n" : "", |
79 | sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", | 79 | staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", |
80 | sta->flags & WLAN_STA_PS ? "PS\n" : "", | 80 | staflags & WLAN_STA_PS ? "PS\n" : "", |
81 | sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", | 81 | staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", |
82 | sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", | 82 | staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", |
83 | sta->flags & WLAN_STA_WME ? "WME\n" : "", | 83 | staflags & WLAN_STA_WME ? "WME\n" : "", |
84 | sta->flags & WLAN_STA_WDS ? "WDS\n" : ""); | 84 | staflags & WLAN_STA_WDS ? "WDS\n" : ""); |
85 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); | 85 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); |
86 | } | 86 | } |
87 | STA_OPS(flags); | 87 | STA_OPS(flags); |
@@ -123,36 +123,6 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, | |||
123 | } | 123 | } |
124 | STA_OPS(last_seq_ctrl); | 124 | STA_OPS(last_seq_ctrl); |
125 | 125 | ||
126 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
127 | static ssize_t sta_wme_rx_queue_read(struct file *file, char __user *userbuf, | ||
128 | size_t count, loff_t *ppos) | ||
129 | { | ||
130 | char buf[15*NUM_RX_DATA_QUEUES], *p = buf; | ||
131 | int i; | ||
132 | struct sta_info *sta = file->private_data; | ||
133 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | ||
134 | p += scnprintf(p, sizeof(buf)+buf-p, "%u ", | ||
135 | sta->wme_rx_queue[i]); | ||
136 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | ||
137 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
138 | } | ||
139 | STA_OPS(wme_rx_queue); | ||
140 | |||
141 | static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf, | ||
142 | size_t count, loff_t *ppos) | ||
143 | { | ||
144 | char buf[15*NUM_TX_DATA_QUEUES], *p = buf; | ||
145 | int i; | ||
146 | struct sta_info *sta = file->private_data; | ||
147 | for (i = 0; i < NUM_TX_DATA_QUEUES; i++) | ||
148 | p += scnprintf(p, sizeof(buf)+buf-p, "%u ", | ||
149 | sta->wme_tx_queue[i]); | ||
150 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | ||
151 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
152 | } | ||
153 | STA_OPS(wme_tx_queue); | ||
154 | #endif | ||
155 | |||
156 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | 126 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
157 | size_t count, loff_t *ppos) | 127 | size_t count, loff_t *ppos) |
158 | { | 128 | { |
@@ -293,10 +263,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
293 | DEBUGFS_ADD(num_ps_buf_frames); | 263 | DEBUGFS_ADD(num_ps_buf_frames); |
294 | DEBUGFS_ADD(inactive_ms); | 264 | DEBUGFS_ADD(inactive_ms); |
295 | DEBUGFS_ADD(last_seq_ctrl); | 265 | DEBUGFS_ADD(last_seq_ctrl); |
296 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
297 | DEBUGFS_ADD(wme_rx_queue); | ||
298 | DEBUGFS_ADD(wme_tx_queue); | ||
299 | #endif | ||
300 | DEBUGFS_ADD(agg_status); | 266 | DEBUGFS_ADD(agg_status); |
301 | } | 267 | } |
302 | 268 | ||
@@ -306,10 +272,6 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta) | |||
306 | DEBUGFS_DEL(num_ps_buf_frames); | 272 | DEBUGFS_DEL(num_ps_buf_frames); |
307 | DEBUGFS_DEL(inactive_ms); | 273 | DEBUGFS_DEL(inactive_ms); |
308 | DEBUGFS_DEL(last_seq_ctrl); | 274 | DEBUGFS_DEL(last_seq_ctrl); |
309 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
310 | DEBUGFS_DEL(wme_rx_queue); | ||
311 | DEBUGFS_DEL(wme_tx_queue); | ||
312 | #endif | ||
313 | DEBUGFS_DEL(agg_status); | 275 | DEBUGFS_DEL(agg_status); |
314 | 276 | ||
315 | debugfs_remove(sta->debugfs.dir); | 277 | debugfs_remove(sta->debugfs.dir); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 006486b26726..a4f9a832722a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | 2 | * Copyright 2002-2005, Instant802 Networks, Inc. |
3 | * Copyright 2005, Devicescape Software, Inc. | 3 | * Copyright 2005, Devicescape Software, Inc. |
4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
5 | * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -23,6 +24,8 @@ | |||
23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
24 | #include <linux/etherdevice.h> | 25 | #include <linux/etherdevice.h> |
25 | #include <net/wireless.h> | 26 | #include <net/wireless.h> |
27 | #include <net/iw_handler.h> | ||
28 | #include <net/mac80211.h> | ||
26 | #include "key.h" | 29 | #include "key.h" |
27 | #include "sta_info.h" | 30 | #include "sta_info.h" |
28 | 31 | ||
@@ -82,7 +85,7 @@ struct ieee80211_sta_bss { | |||
82 | u16 capability; /* host byte order */ | 85 | u16 capability; /* host byte order */ |
83 | enum ieee80211_band band; | 86 | enum ieee80211_band band; |
84 | int freq; | 87 | int freq; |
85 | int rssi, signal, noise; | 88 | int signal, noise, qual; |
86 | u8 *wpa_ie; | 89 | u8 *wpa_ie; |
87 | size_t wpa_ie_len; | 90 | size_t wpa_ie_len; |
88 | u8 *rsn_ie; | 91 | u8 *rsn_ie; |
@@ -91,6 +94,8 @@ struct ieee80211_sta_bss { | |||
91 | size_t wmm_ie_len; | 94 | size_t wmm_ie_len; |
92 | u8 *ht_ie; | 95 | u8 *ht_ie; |
93 | size_t ht_ie_len; | 96 | size_t ht_ie_len; |
97 | u8 *ht_add_ie; | ||
98 | size_t ht_add_ie_len; | ||
94 | #ifdef CONFIG_MAC80211_MESH | 99 | #ifdef CONFIG_MAC80211_MESH |
95 | u8 *mesh_id; | 100 | u8 *mesh_id; |
96 | size_t mesh_id_len; | 101 | size_t mesh_id_len; |
@@ -147,7 +152,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result; | |||
147 | #define IEEE80211_TX_UNICAST BIT(1) | 152 | #define IEEE80211_TX_UNICAST BIT(1) |
148 | #define IEEE80211_TX_PS_BUFFERED BIT(2) | 153 | #define IEEE80211_TX_PS_BUFFERED BIT(2) |
149 | #define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) | 154 | #define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) |
150 | #define IEEE80211_TX_INJECTED BIT(4) | ||
151 | 155 | ||
152 | struct ieee80211_tx_data { | 156 | struct ieee80211_tx_data { |
153 | struct sk_buff *skb; | 157 | struct sk_buff *skb; |
@@ -157,13 +161,12 @@ struct ieee80211_tx_data { | |||
157 | struct sta_info *sta; | 161 | struct sta_info *sta; |
158 | struct ieee80211_key *key; | 162 | struct ieee80211_key *key; |
159 | 163 | ||
160 | struct ieee80211_tx_control *control; | ||
161 | struct ieee80211_channel *channel; | 164 | struct ieee80211_channel *channel; |
162 | struct ieee80211_rate *rate; | 165 | s8 rate_idx; |
163 | /* use this rate (if set) for last fragment; rate can | 166 | /* use this rate (if set) for last fragment; rate can |
164 | * be set to lower rate for the first fragments, e.g., | 167 | * be set to lower rate for the first fragments, e.g., |
165 | * when using CTS protection with IEEE 802.11g. */ | 168 | * when using CTS protection with IEEE 802.11g. */ |
166 | struct ieee80211_rate *last_frag_rate; | 169 | s8 last_frag_rate_idx; |
167 | 170 | ||
168 | /* Extra fragments (in addition to the first fragment | 171 | /* Extra fragments (in addition to the first fragment |
169 | * in skb) */ | 172 | * in skb) */ |
@@ -202,32 +205,16 @@ struct ieee80211_rx_data { | |||
202 | unsigned int flags; | 205 | unsigned int flags; |
203 | int sent_ps_buffered; | 206 | int sent_ps_buffered; |
204 | int queue; | 207 | int queue; |
205 | int load; | ||
206 | u32 tkip_iv32; | 208 | u32 tkip_iv32; |
207 | u16 tkip_iv16; | 209 | u16 tkip_iv16; |
208 | }; | 210 | }; |
209 | 211 | ||
210 | /* flags used in struct ieee80211_tx_packet_data.flags */ | ||
211 | #define IEEE80211_TXPD_REQ_TX_STATUS BIT(0) | ||
212 | #define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) | ||
213 | #define IEEE80211_TXPD_REQUEUE BIT(2) | ||
214 | #define IEEE80211_TXPD_EAPOL_FRAME BIT(3) | ||
215 | #define IEEE80211_TXPD_AMPDU BIT(4) | ||
216 | /* Stored in sk_buff->cb */ | ||
217 | struct ieee80211_tx_packet_data { | ||
218 | int ifindex; | ||
219 | unsigned long jiffies; | ||
220 | unsigned int flags; | ||
221 | u8 queue; | ||
222 | }; | ||
223 | |||
224 | struct ieee80211_tx_stored_packet { | 212 | struct ieee80211_tx_stored_packet { |
225 | struct ieee80211_tx_control control; | ||
226 | struct sk_buff *skb; | 213 | struct sk_buff *skb; |
227 | struct sk_buff **extra_frag; | 214 | struct sk_buff **extra_frag; |
228 | struct ieee80211_rate *last_frag_rate; | 215 | s8 last_frag_rate_idx; |
229 | int num_extra_frag; | 216 | int num_extra_frag; |
230 | unsigned int last_frag_rate_ctrl_probe; | 217 | bool last_frag_rate_ctrl_probe; |
231 | }; | 218 | }; |
232 | 219 | ||
233 | struct beacon_data { | 220 | struct beacon_data { |
@@ -251,8 +238,6 @@ struct ieee80211_if_ap { | |||
251 | struct sk_buff_head ps_bc_buf; | 238 | struct sk_buff_head ps_bc_buf; |
252 | atomic_t num_sta_ps; /* number of stations in PS mode */ | 239 | atomic_t num_sta_ps; /* number of stations in PS mode */ |
253 | int dtim_count; | 240 | int dtim_count; |
254 | int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ | ||
255 | int max_ratectrl_rateidx; /* max TX rateidx for rate control */ | ||
256 | int num_beacons; /* number of TXed beacon frames for this BSS */ | 241 | int num_beacons; /* number of TXed beacon frames for this BSS */ |
257 | }; | 242 | }; |
258 | 243 | ||
@@ -262,7 +247,6 @@ struct ieee80211_if_wds { | |||
262 | }; | 247 | }; |
263 | 248 | ||
264 | struct ieee80211_if_vlan { | 249 | struct ieee80211_if_vlan { |
265 | struct ieee80211_sub_if_data *ap; | ||
266 | struct list_head list; | 250 | struct list_head list; |
267 | }; | 251 | }; |
268 | 252 | ||
@@ -436,8 +420,6 @@ struct ieee80211_sub_if_data { | |||
436 | */ | 420 | */ |
437 | u64 basic_rates; | 421 | u64 basic_rates; |
438 | 422 | ||
439 | u16 sequence; | ||
440 | |||
441 | /* Fragment table for host-based reassembly */ | 423 | /* Fragment table for host-based reassembly */ |
442 | struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; | 424 | struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; |
443 | unsigned int fragment_next; | 425 | unsigned int fragment_next; |
@@ -446,16 +428,18 @@ struct ieee80211_sub_if_data { | |||
446 | struct ieee80211_key *keys[NUM_DEFAULT_KEYS]; | 428 | struct ieee80211_key *keys[NUM_DEFAULT_KEYS]; |
447 | struct ieee80211_key *default_key; | 429 | struct ieee80211_key *default_key; |
448 | 430 | ||
431 | /* BSS configuration for this interface. */ | ||
432 | struct ieee80211_bss_conf bss_conf; | ||
433 | |||
449 | /* | 434 | /* |
450 | * BSS configuration for this interface. | 435 | * AP this belongs to: self in AP mode and |
451 | * | 436 | * corresponding AP in VLAN mode, NULL for |
452 | * FIXME: I feel bad putting this here when we already have a | 437 | * all others (might be needed later in IBSS) |
453 | * bss pointer, but the bss pointer is just wrong when | ||
454 | * you have multiple virtual STA mode interfaces... | ||
455 | * This needs to be fixed. | ||
456 | */ | 438 | */ |
457 | struct ieee80211_bss_conf bss_conf; | 439 | struct ieee80211_if_ap *bss; |
458 | struct ieee80211_if_ap *bss; /* BSS that this device belongs to */ | 440 | |
441 | int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ | ||
442 | int max_ratectrl_rateidx; /* max TX rateidx for rate control */ | ||
459 | 443 | ||
460 | union { | 444 | union { |
461 | struct ieee80211_if_ap ap; | 445 | struct ieee80211_if_ap ap; |
@@ -464,14 +448,11 @@ struct ieee80211_sub_if_data { | |||
464 | struct ieee80211_if_sta sta; | 448 | struct ieee80211_if_sta sta; |
465 | u32 mntr_flags; | 449 | u32 mntr_flags; |
466 | } u; | 450 | } u; |
467 | int channel_use; | ||
468 | int channel_use_raw; | ||
469 | 451 | ||
470 | #ifdef CONFIG_MAC80211_DEBUGFS | 452 | #ifdef CONFIG_MAC80211_DEBUGFS |
471 | struct dentry *debugfsdir; | 453 | struct dentry *debugfsdir; |
472 | union { | 454 | union { |
473 | struct { | 455 | struct { |
474 | struct dentry *channel_use; | ||
475 | struct dentry *drop_unencrypted; | 456 | struct dentry *drop_unencrypted; |
476 | struct dentry *state; | 457 | struct dentry *state; |
477 | struct dentry *bssid; | 458 | struct dentry *bssid; |
@@ -490,7 +471,6 @@ struct ieee80211_sub_if_data { | |||
490 | struct dentry *num_beacons_sta; | 471 | struct dentry *num_beacons_sta; |
491 | } sta; | 472 | } sta; |
492 | struct { | 473 | struct { |
493 | struct dentry *channel_use; | ||
494 | struct dentry *drop_unencrypted; | 474 | struct dentry *drop_unencrypted; |
495 | struct dentry *num_sta_ps; | 475 | struct dentry *num_sta_ps; |
496 | struct dentry *dtim_count; | 476 | struct dentry *dtim_count; |
@@ -500,12 +480,10 @@ struct ieee80211_sub_if_data { | |||
500 | struct dentry *num_buffered_multicast; | 480 | struct dentry *num_buffered_multicast; |
501 | } ap; | 481 | } ap; |
502 | struct { | 482 | struct { |
503 | struct dentry *channel_use; | ||
504 | struct dentry *drop_unencrypted; | 483 | struct dentry *drop_unencrypted; |
505 | struct dentry *peer; | 484 | struct dentry *peer; |
506 | } wds; | 485 | } wds; |
507 | struct { | 486 | struct { |
508 | struct dentry *channel_use; | ||
509 | struct dentry *drop_unencrypted; | 487 | struct dentry *drop_unencrypted; |
510 | } vlan; | 488 | } vlan; |
511 | struct { | 489 | struct { |
@@ -553,8 +531,6 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) | |||
553 | return container_of(p, struct ieee80211_sub_if_data, vif); | 531 | return container_of(p, struct ieee80211_sub_if_data, vif); |
554 | } | 532 | } |
555 | 533 | ||
556 | #define IEEE80211_DEV_TO_SUB_IF(dev) netdev_priv(dev) | ||
557 | |||
558 | enum { | 534 | enum { |
559 | IEEE80211_RX_MSG = 1, | 535 | IEEE80211_RX_MSG = 1, |
560 | IEEE80211_TX_STATUS_MSG = 2, | 536 | IEEE80211_TX_STATUS_MSG = 2, |
@@ -562,6 +538,9 @@ enum { | |||
562 | IEEE80211_ADDBA_MSG = 4, | 538 | IEEE80211_ADDBA_MSG = 4, |
563 | }; | 539 | }; |
564 | 540 | ||
541 | /* maximum number of hardware queues we support. */ | ||
542 | #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) | ||
543 | |||
565 | struct ieee80211_local { | 544 | struct ieee80211_local { |
566 | /* embed the driver visible part. | 545 | /* embed the driver visible part. |
567 | * don't cast (use the static inlines below), but we keep | 546 | * don't cast (use the static inlines below), but we keep |
@@ -570,6 +549,8 @@ struct ieee80211_local { | |||
570 | 549 | ||
571 | const struct ieee80211_ops *ops; | 550 | const struct ieee80211_ops *ops; |
572 | 551 | ||
552 | unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)]; | ||
553 | |||
573 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ | 554 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ |
574 | int open_count; | 555 | int open_count; |
575 | int monitors, cooked_mntrs; | 556 | int monitors, cooked_mntrs; |
@@ -581,12 +562,6 @@ struct ieee80211_local { | |||
581 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | 562 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ |
582 | int tx_headroom; /* required headroom for hardware/radiotap */ | 563 | int tx_headroom; /* required headroom for hardware/radiotap */ |
583 | 564 | ||
584 | enum { | ||
585 | IEEE80211_DEV_UNINITIALIZED = 0, | ||
586 | IEEE80211_DEV_REGISTERED, | ||
587 | IEEE80211_DEV_UNREGISTERED, | ||
588 | } reg_state; | ||
589 | |||
590 | /* Tasklet and skb queue to process calls from IRQ mode. All frames | 565 | /* Tasklet and skb queue to process calls from IRQ mode. All frames |
591 | * added to skb_queue will be processed, but frames in | 566 | * added to skb_queue will be processed, but frames in |
592 | * skb_queue_unreliable may be dropped if the total length of these | 567 | * skb_queue_unreliable may be dropped if the total length of these |
@@ -610,8 +585,8 @@ struct ieee80211_local { | |||
610 | struct sta_info *sta_hash[STA_HASH_SIZE]; | 585 | struct sta_info *sta_hash[STA_HASH_SIZE]; |
611 | struct timer_list sta_cleanup; | 586 | struct timer_list sta_cleanup; |
612 | 587 | ||
613 | unsigned long state[NUM_TX_DATA_QUEUES_AMPDU]; | 588 | unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; |
614 | struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU]; | 589 | struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; |
615 | struct tasklet_struct tx_pending_tasklet; | 590 | struct tasklet_struct tx_pending_tasklet; |
616 | 591 | ||
617 | /* number of interfaces with corresponding IFF_ flags */ | 592 | /* number of interfaces with corresponding IFF_ flags */ |
@@ -677,9 +652,6 @@ struct ieee80211_local { | |||
677 | assoc_led_name[32], radio_led_name[32]; | 652 | assoc_led_name[32], radio_led_name[32]; |
678 | #endif | 653 | #endif |
679 | 654 | ||
680 | u32 channel_use; | ||
681 | u32 channel_use_raw; | ||
682 | |||
683 | #ifdef CONFIG_MAC80211_DEBUGFS | 655 | #ifdef CONFIG_MAC80211_DEBUGFS |
684 | struct work_struct sta_debugfs_add; | 656 | struct work_struct sta_debugfs_add; |
685 | #endif | 657 | #endif |
@@ -705,8 +677,6 @@ struct ieee80211_local { | |||
705 | unsigned int rx_expand_skb_head2; | 677 | unsigned int rx_expand_skb_head2; |
706 | unsigned int rx_handlers_fragments; | 678 | unsigned int rx_handlers_fragments; |
707 | unsigned int tx_status_drop; | 679 | unsigned int tx_status_drop; |
708 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; | ||
709 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; | ||
710 | #define I802_DEBUG_INC(c) (c)++ | 680 | #define I802_DEBUG_INC(c) (c)++ |
711 | #else /* CONFIG_MAC80211_DEBUG_COUNTERS */ | 681 | #else /* CONFIG_MAC80211_DEBUG_COUNTERS */ |
712 | #define I802_DEBUG_INC(c) do { } while (0) | 682 | #define I802_DEBUG_INC(c) do { } while (0) |
@@ -764,8 +734,6 @@ struct ieee80211_local { | |||
764 | struct dentry *rx_expand_skb_head2; | 734 | struct dentry *rx_expand_skb_head2; |
765 | struct dentry *rx_handlers_fragments; | 735 | struct dentry *rx_handlers_fragments; |
766 | struct dentry *tx_status_drop; | 736 | struct dentry *tx_status_drop; |
767 | struct dentry *wme_tx_queue; | ||
768 | struct dentry *wme_rx_queue; | ||
769 | #endif | 737 | #endif |
770 | struct dentry *dot11ACKFailureCount; | 738 | struct dentry *dot11ACKFailureCount; |
771 | struct dentry *dot11RTSFailureCount; | 739 | struct dentry *dot11RTSFailureCount; |
@@ -778,6 +746,16 @@ struct ieee80211_local { | |||
778 | #endif | 746 | #endif |
779 | }; | 747 | }; |
780 | 748 | ||
749 | static inline struct ieee80211_sub_if_data * | ||
750 | IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) | ||
751 | { | ||
752 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
753 | |||
754 | BUG_ON(!local || local->mdev == dev); | ||
755 | |||
756 | return netdev_priv(dev); | ||
757 | } | ||
758 | |||
781 | /* this struct represents 802.11n's RA/TID combination */ | 759 | /* this struct represents 802.11n's RA/TID combination */ |
782 | struct ieee80211_ra_tid { | 760 | struct ieee80211_ra_tid { |
783 | u8 ra[ETH_ALEN]; | 761 | u8 ra[ETH_ALEN]; |
@@ -809,6 +787,10 @@ struct ieee802_11_elems { | |||
809 | u8 *preq; | 787 | u8 *preq; |
810 | u8 *prep; | 788 | u8 *prep; |
811 | u8 *perr; | 789 | u8 *perr; |
790 | u8 *ch_switch_elem; | ||
791 | u8 *country_elem; | ||
792 | u8 *pwr_constr_elem; | ||
793 | u8 *quiet_elem; /* first quite element */ | ||
812 | 794 | ||
813 | /* length of them, respectively */ | 795 | /* length of them, respectively */ |
814 | u8 ssid_len; | 796 | u8 ssid_len; |
@@ -833,6 +815,11 @@ struct ieee802_11_elems { | |||
833 | u8 preq_len; | 815 | u8 preq_len; |
834 | u8 prep_len; | 816 | u8 prep_len; |
835 | u8 perr_len; | 817 | u8 perr_len; |
818 | u8 ch_switch_elem_len; | ||
819 | u8 country_elem_len; | ||
820 | u8 pwr_constr_elem_len; | ||
821 | u8 quiet_elem_len; | ||
822 | u8 num_of_quiet_elem; /* can be more the one */ | ||
836 | }; | 823 | }; |
837 | 824 | ||
838 | static inline struct ieee80211_local *hw_to_local( | 825 | static inline struct ieee80211_local *hw_to_local( |
@@ -847,11 +834,6 @@ static inline struct ieee80211_hw *local_to_hw( | |||
847 | return &local->hw; | 834 | return &local->hw; |
848 | } | 835 | } |
849 | 836 | ||
850 | enum ieee80211_link_state_t { | ||
851 | IEEE80211_LINK_STATE_XOFF = 0, | ||
852 | IEEE80211_LINK_STATE_PENDING, | ||
853 | }; | ||
854 | |||
855 | struct sta_attribute { | 837 | struct sta_attribute { |
856 | struct attribute attr; | 838 | struct attribute attr; |
857 | ssize_t (*show)(const struct sta_info *, char *buf); | 839 | ssize_t (*show)(const struct sta_info *, char *buf); |
@@ -867,39 +849,16 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) | |||
867 | 849 | ||
868 | /* ieee80211.c */ | 850 | /* ieee80211.c */ |
869 | int ieee80211_hw_config(struct ieee80211_local *local); | 851 | int ieee80211_hw_config(struct ieee80211_local *local); |
870 | int ieee80211_if_config(struct net_device *dev); | 852 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed); |
871 | int ieee80211_if_config_beacon(struct net_device *dev); | ||
872 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); | 853 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); |
873 | void ieee80211_if_setup(struct net_device *dev); | ||
874 | u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | 854 | u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, |
875 | struct ieee80211_ht_info *req_ht_cap, | 855 | struct ieee80211_ht_info *req_ht_cap, |
876 | struct ieee80211_ht_bss_info *req_bss_cap); | 856 | struct ieee80211_ht_bss_info *req_bss_cap); |
877 | 857 | ||
878 | /* ieee80211_ioctl.c */ | 858 | /* ieee80211_ioctl.c */ |
879 | extern const struct iw_handler_def ieee80211_iw_handler_def; | 859 | extern const struct iw_handler_def ieee80211_iw_handler_def; |
880 | |||
881 | |||
882 | /* Least common multiple of the used rates (in 100 kbps). This is used to | ||
883 | * calculate rate_inv values for each rate so that only integers are needed. */ | ||
884 | #define CHAN_UTIL_RATE_LCM 95040 | ||
885 | /* 1 usec is 1/8 * (95040/10) = 1188 */ | ||
886 | #define CHAN_UTIL_PER_USEC 1188 | ||
887 | /* Amount of bits to shift the result right to scale the total utilization | ||
888 | * to values that will not wrap around 32-bit integers. */ | ||
889 | #define CHAN_UTIL_SHIFT 9 | ||
890 | /* Theoretical maximum of channel utilization counter in 10 ms (stat_time=1): | ||
891 | * (CHAN_UTIL_PER_USEC * 10000) >> CHAN_UTIL_SHIFT = 23203. So dividing the | ||
892 | * raw value with about 23 should give utilization in 10th of a percentage | ||
893 | * (1/1000). However, utilization is only estimated and not all intervals | ||
894 | * between frames etc. are calculated. 18 seems to give numbers that are closer | ||
895 | * to the real maximum. */ | ||
896 | #define CHAN_UTIL_PER_10MS 18 | ||
897 | #define CHAN_UTIL_HDR_LONG (202 * CHAN_UTIL_PER_USEC) | ||
898 | #define CHAN_UTIL_HDR_SHORT (40 * CHAN_UTIL_PER_USEC) | ||
899 | |||
900 | |||
901 | /* ieee80211_ioctl.c */ | ||
902 | int ieee80211_set_freq(struct net_device *dev, int freq); | 860 | int ieee80211_set_freq(struct net_device *dev, int freq); |
861 | |||
903 | /* ieee80211_sta.c */ | 862 | /* ieee80211_sta.c */ |
904 | void ieee80211_sta_timer(unsigned long data); | 863 | void ieee80211_sta_timer(unsigned long data); |
905 | void ieee80211_sta_work(struct work_struct *work); | 864 | void ieee80211_sta_work(struct work_struct *work); |
@@ -912,21 +871,23 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); | |||
912 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); | 871 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); |
913 | void ieee80211_sta_req_auth(struct net_device *dev, | 872 | void ieee80211_sta_req_auth(struct net_device *dev, |
914 | struct ieee80211_if_sta *ifsta); | 873 | struct ieee80211_if_sta *ifsta); |
915 | int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); | 874 | int ieee80211_sta_scan_results(struct net_device *dev, |
875 | struct iw_request_info *info, | ||
876 | char *buf, size_t len); | ||
916 | ieee80211_rx_result ieee80211_sta_rx_scan( | 877 | ieee80211_rx_result ieee80211_sta_rx_scan( |
917 | struct net_device *dev, struct sk_buff *skb, | 878 | struct net_device *dev, struct sk_buff *skb, |
918 | struct ieee80211_rx_status *rx_status); | 879 | struct ieee80211_rx_status *rx_status); |
919 | void ieee80211_rx_bss_list_init(struct net_device *dev); | 880 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local); |
920 | void ieee80211_rx_bss_list_deinit(struct net_device *dev); | 881 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); |
921 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); | 882 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); |
922 | struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, | 883 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, |
923 | struct sk_buff *skb, u8 *bssid, | 884 | struct sk_buff *skb, u8 *bssid, |
924 | u8 *addr); | 885 | u8 *addr, u64 supp_rates); |
925 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); | 886 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); |
926 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); | 887 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); |
927 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | 888 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, |
928 | u32 changed); | 889 | u32 changed); |
929 | void ieee80211_reset_erp_info(struct net_device *dev); | 890 | u32 ieee80211_reset_erp_info(struct net_device *dev); |
930 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | 891 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, |
931 | struct ieee80211_ht_info *ht_info); | 892 | struct ieee80211_ht_info *ht_info); |
932 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | 893 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( |
@@ -937,10 +898,10 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | |||
937 | u16 agg_size, u16 timeout); | 898 | u16 agg_size, u16 timeout); |
938 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | 899 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, |
939 | u16 initiator, u16 reason_code); | 900 | u16 initiator, u16 reason_code); |
901 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn); | ||
940 | 902 | ||
941 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, | 903 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, |
942 | u16 tid, u16 initiator, u16 reason); | 904 | u16 tid, u16 initiator, u16 reason); |
943 | void sta_rx_agg_session_timer_expired(unsigned long data); | ||
944 | void sta_addba_resp_timer_expired(unsigned long data); | 905 | void sta_addba_resp_timer_expired(unsigned long data); |
945 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); | 906 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); |
946 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | 907 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, |
@@ -958,17 +919,15 @@ static inline void ieee80211_start_mesh(struct net_device *dev) | |||
958 | {} | 919 | {} |
959 | #endif | 920 | #endif |
960 | 921 | ||
961 | /* ieee80211_iface.c */ | 922 | /* interface handling */ |
962 | int ieee80211_if_add(struct net_device *dev, const char *name, | 923 | void ieee80211_if_setup(struct net_device *dev); |
963 | struct net_device **new_dev, int type, | 924 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
925 | struct net_device **new_dev, enum ieee80211_if_types type, | ||
964 | struct vif_params *params); | 926 | struct vif_params *params); |
965 | void ieee80211_if_set_type(struct net_device *dev, int type); | 927 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, |
966 | void ieee80211_if_reinit(struct net_device *dev); | 928 | enum ieee80211_if_types type); |
967 | void __ieee80211_if_del(struct ieee80211_local *local, | 929 | void ieee80211_if_remove(struct net_device *dev); |
968 | struct ieee80211_sub_if_data *sdata); | 930 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
969 | int ieee80211_if_remove(struct net_device *dev, const char *name, int id); | ||
970 | void ieee80211_if_free(struct net_device *dev); | ||
971 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); | ||
972 | 931 | ||
973 | /* tx handling */ | 932 | /* tx handling */ |
974 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); | 933 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); |
@@ -988,4 +947,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | |||
988 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | 947 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, |
989 | struct ieee80211_hdr *hdr); | 948 | struct ieee80211_hdr *hdr); |
990 | 949 | ||
950 | #ifdef CONFIG_MAC80211_NOINLINE | ||
951 | #define debug_noinline noinline | ||
952 | #else | ||
953 | #define debug_noinline | ||
954 | #endif | ||
955 | |||
991 | #endif /* IEEE80211_I_H */ | 956 | #endif /* IEEE80211_I_H */ |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 06e88a5a036d..610ed1d9893a 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | 2 | * Copyright 2002-2005, Instant802 Networks, Inc. |
3 | * Copyright 2005-2006, Devicescape Software, Inc. | 3 | * Copyright 2005-2006, Devicescape Software, Inc. |
4 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> | 4 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> |
5 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -17,38 +18,164 @@ | |||
17 | #include "debugfs_netdev.h" | 18 | #include "debugfs_netdev.h" |
18 | #include "mesh.h" | 19 | #include "mesh.h" |
19 | 20 | ||
20 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) | 21 | /* |
22 | * Called when the netdev is removed or, by the code below, before | ||
23 | * the interface type changes. | ||
24 | */ | ||
25 | static void ieee80211_teardown_sdata(struct net_device *dev) | ||
21 | { | 26 | { |
27 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
28 | struct ieee80211_local *local = sdata->local; | ||
29 | struct beacon_data *beacon; | ||
30 | struct sk_buff *skb; | ||
31 | int flushed; | ||
22 | int i; | 32 | int i; |
23 | 33 | ||
24 | /* Default values for sub-interface parameters */ | 34 | ieee80211_debugfs_remove_netdev(sdata); |
25 | sdata->drop_unencrypted = 0; | 35 | |
36 | /* free extra data */ | ||
37 | ieee80211_free_keys(sdata); | ||
38 | |||
26 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | 39 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) |
27 | skb_queue_head_init(&sdata->fragments[i].skb_list); | 40 | __skb_queue_purge(&sdata->fragments[i].skb_list); |
41 | sdata->fragment_next = 0; | ||
28 | 42 | ||
29 | INIT_LIST_HEAD(&sdata->key_list); | 43 | switch (sdata->vif.type) { |
44 | case IEEE80211_IF_TYPE_AP: | ||
45 | beacon = sdata->u.ap.beacon; | ||
46 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | ||
47 | synchronize_rcu(); | ||
48 | kfree(beacon); | ||
49 | |||
50 | while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { | ||
51 | local->total_ps_buffered--; | ||
52 | dev_kfree_skb(skb); | ||
53 | } | ||
54 | |||
55 | break; | ||
56 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
57 | /* Allow compiler to elide mesh_rmc_free call. */ | ||
58 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
59 | mesh_rmc_free(dev); | ||
60 | /* fall through */ | ||
61 | case IEEE80211_IF_TYPE_STA: | ||
62 | case IEEE80211_IF_TYPE_IBSS: | ||
63 | kfree(sdata->u.sta.extra_ie); | ||
64 | kfree(sdata->u.sta.assocreq_ies); | ||
65 | kfree(sdata->u.sta.assocresp_ies); | ||
66 | kfree_skb(sdata->u.sta.probe_resp); | ||
67 | break; | ||
68 | case IEEE80211_IF_TYPE_WDS: | ||
69 | case IEEE80211_IF_TYPE_VLAN: | ||
70 | case IEEE80211_IF_TYPE_MNTR: | ||
71 | break; | ||
72 | case IEEE80211_IF_TYPE_INVALID: | ||
73 | BUG(); | ||
74 | break; | ||
75 | } | ||
76 | |||
77 | flushed = sta_info_flush(local, sdata); | ||
78 | WARN_ON(flushed); | ||
30 | } | 79 | } |
31 | 80 | ||
32 | static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata) | 81 | /* |
82 | * Helper function to initialise an interface to a specific type. | ||
83 | */ | ||
84 | static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | ||
85 | enum ieee80211_if_types type) | ||
33 | { | 86 | { |
34 | int i; | 87 | struct ieee80211_if_sta *ifsta; |
35 | 88 | ||
36 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { | 89 | /* clear type-dependent union */ |
37 | __skb_queue_purge(&sdata->fragments[i].skb_list); | 90 | memset(&sdata->u, 0, sizeof(sdata->u)); |
91 | |||
92 | /* and set some type-dependent values */ | ||
93 | sdata->vif.type = type; | ||
94 | |||
95 | /* only monitor differs */ | ||
96 | sdata->dev->type = ARPHRD_ETHER; | ||
97 | |||
98 | switch (type) { | ||
99 | case IEEE80211_IF_TYPE_AP: | ||
100 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | ||
101 | INIT_LIST_HEAD(&sdata->u.ap.vlans); | ||
102 | break; | ||
103 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
104 | case IEEE80211_IF_TYPE_STA: | ||
105 | case IEEE80211_IF_TYPE_IBSS: | ||
106 | ifsta = &sdata->u.sta; | ||
107 | INIT_WORK(&ifsta->work, ieee80211_sta_work); | ||
108 | setup_timer(&ifsta->timer, ieee80211_sta_timer, | ||
109 | (unsigned long) sdata); | ||
110 | skb_queue_head_init(&ifsta->skb_queue); | ||
111 | |||
112 | ifsta->capab = WLAN_CAPABILITY_ESS; | ||
113 | ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | | ||
114 | IEEE80211_AUTH_ALG_SHARED_KEY; | ||
115 | ifsta->flags |= IEEE80211_STA_CREATE_IBSS | | ||
116 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
117 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
118 | if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) | ||
119 | ifsta->flags |= IEEE80211_STA_WMM_ENABLED; | ||
120 | |||
121 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
122 | ieee80211_mesh_init_sdata(sdata); | ||
123 | break; | ||
124 | case IEEE80211_IF_TYPE_MNTR: | ||
125 | sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; | ||
126 | sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; | ||
127 | sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | | ||
128 | MONITOR_FLAG_OTHER_BSS; | ||
129 | break; | ||
130 | case IEEE80211_IF_TYPE_WDS: | ||
131 | case IEEE80211_IF_TYPE_VLAN: | ||
132 | break; | ||
133 | case IEEE80211_IF_TYPE_INVALID: | ||
134 | BUG(); | ||
135 | break; | ||
38 | } | 136 | } |
137 | |||
138 | ieee80211_debugfs_add_netdev(sdata); | ||
139 | } | ||
140 | |||
141 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | ||
142 | enum ieee80211_if_types type) | ||
143 | { | ||
144 | ASSERT_RTNL(); | ||
145 | |||
146 | if (type == sdata->vif.type) | ||
147 | return 0; | ||
148 | |||
149 | /* | ||
150 | * We could, here, on changes between IBSS/STA/MESH modes, | ||
151 | * invoke an MLME function instead that disassociates etc. | ||
152 | * and goes into the requested mode. | ||
153 | */ | ||
154 | |||
155 | if (netif_running(sdata->dev)) | ||
156 | return -EBUSY; | ||
157 | |||
158 | /* Purge and reset type-dependent state. */ | ||
159 | ieee80211_teardown_sdata(sdata->dev); | ||
160 | ieee80211_setup_sdata(sdata, type); | ||
161 | |||
162 | /* reset some values that shouldn't be kept across type changes */ | ||
163 | sdata->basic_rates = 0; | ||
164 | sdata->drop_unencrypted = 0; | ||
165 | |||
166 | return 0; | ||
39 | } | 167 | } |
40 | 168 | ||
41 | /* Must be called with rtnl lock held. */ | 169 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
42 | int ieee80211_if_add(struct net_device *dev, const char *name, | 170 | struct net_device **new_dev, enum ieee80211_if_types type, |
43 | struct net_device **new_dev, int type, | ||
44 | struct vif_params *params) | 171 | struct vif_params *params) |
45 | { | 172 | { |
46 | struct net_device *ndev; | 173 | struct net_device *ndev; |
47 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
48 | struct ieee80211_sub_if_data *sdata = NULL; | 174 | struct ieee80211_sub_if_data *sdata = NULL; |
49 | int ret; | 175 | int ret, i; |
50 | 176 | ||
51 | ASSERT_RTNL(); | 177 | ASSERT_RTNL(); |
178 | |||
52 | ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, | 179 | ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, |
53 | name, ieee80211_if_setup); | 180 | name, ieee80211_if_setup); |
54 | if (!ndev) | 181 | if (!ndev) |
@@ -68,26 +195,33 @@ int ieee80211_if_add(struct net_device *dev, const char *name, | |||
68 | goto fail; | 195 | goto fail; |
69 | 196 | ||
70 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 197 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); |
71 | ndev->base_addr = dev->base_addr; | ||
72 | ndev->irq = dev->irq; | ||
73 | ndev->mem_start = dev->mem_start; | ||
74 | ndev->mem_end = dev->mem_end; | ||
75 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 198 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
76 | 199 | ||
77 | sdata = IEEE80211_DEV_TO_SUB_IF(ndev); | 200 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
201 | sdata = netdev_priv(ndev); | ||
78 | ndev->ieee80211_ptr = &sdata->wdev; | 202 | ndev->ieee80211_ptr = &sdata->wdev; |
203 | |||
204 | /* initialise type-independent data */ | ||
79 | sdata->wdev.wiphy = local->hw.wiphy; | 205 | sdata->wdev.wiphy = local->hw.wiphy; |
80 | sdata->vif.type = IEEE80211_IF_TYPE_AP; | ||
81 | sdata->dev = ndev; | ||
82 | sdata->local = local; | 206 | sdata->local = local; |
83 | ieee80211_if_sdata_init(sdata); | 207 | sdata->dev = ndev; |
208 | |||
209 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | ||
210 | skb_queue_head_init(&sdata->fragments[i].skb_list); | ||
211 | |||
212 | INIT_LIST_HEAD(&sdata->key_list); | ||
213 | |||
214 | sdata->force_unicast_rateidx = -1; | ||
215 | sdata->max_ratectrl_rateidx = -1; | ||
216 | |||
217 | /* setup type-dependent data */ | ||
218 | ieee80211_setup_sdata(sdata, type); | ||
84 | 219 | ||
85 | ret = register_netdevice(ndev); | 220 | ret = register_netdevice(ndev); |
86 | if (ret) | 221 | if (ret) |
87 | goto fail; | 222 | goto fail; |
88 | 223 | ||
89 | ieee80211_debugfs_add_netdev(sdata); | 224 | ndev->uninit = ieee80211_teardown_sdata; |
90 | ieee80211_if_set_type(ndev, type); | ||
91 | 225 | ||
92 | if (ieee80211_vif_is_mesh(&sdata->vif) && | 226 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
93 | params && params->mesh_id_len) | 227 | params && params->mesh_id_len) |
@@ -95,11 +229,6 @@ int ieee80211_if_add(struct net_device *dev, const char *name, | |||
95 | params->mesh_id_len, | 229 | params->mesh_id_len, |
96 | params->mesh_id); | 230 | params->mesh_id); |
97 | 231 | ||
98 | /* we're under RTNL so all this is fine */ | ||
99 | if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) { | ||
100 | __ieee80211_if_del(local, sdata); | ||
101 | return -ENODEV; | ||
102 | } | ||
103 | list_add_tail_rcu(&sdata->list, &local->interfaces); | 232 | list_add_tail_rcu(&sdata->list, &local->interfaces); |
104 | 233 | ||
105 | if (new_dev) | 234 | if (new_dev) |
@@ -107,217 +236,34 @@ int ieee80211_if_add(struct net_device *dev, const char *name, | |||
107 | 236 | ||
108 | return 0; | 237 | return 0; |
109 | 238 | ||
110 | fail: | 239 | fail: |
111 | free_netdev(ndev); | 240 | free_netdev(ndev); |
112 | return ret; | 241 | return ret; |
113 | } | 242 | } |
114 | 243 | ||
115 | void ieee80211_if_set_type(struct net_device *dev, int type) | 244 | void ieee80211_if_remove(struct net_device *dev) |
116 | { | ||
117 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
118 | int oldtype = sdata->vif.type; | ||
119 | |||
120 | /* | ||
121 | * We need to call this function on the master interface | ||
122 | * which already has a hard_start_xmit routine assigned | ||
123 | * which must not be changed. | ||
124 | */ | ||
125 | if (dev != sdata->local->mdev) | ||
126 | dev->hard_start_xmit = ieee80211_subif_start_xmit; | ||
127 | |||
128 | /* | ||
129 | * Called even when register_netdevice fails, it would | ||
130 | * oops if assigned before initialising the rest. | ||
131 | */ | ||
132 | dev->uninit = ieee80211_if_reinit; | ||
133 | |||
134 | /* most have no BSS pointer */ | ||
135 | sdata->bss = NULL; | ||
136 | sdata->vif.type = type; | ||
137 | |||
138 | sdata->basic_rates = 0; | ||
139 | |||
140 | switch (type) { | ||
141 | case IEEE80211_IF_TYPE_WDS: | ||
142 | /* nothing special */ | ||
143 | break; | ||
144 | case IEEE80211_IF_TYPE_VLAN: | ||
145 | sdata->u.vlan.ap = NULL; | ||
146 | break; | ||
147 | case IEEE80211_IF_TYPE_AP: | ||
148 | sdata->u.ap.force_unicast_rateidx = -1; | ||
149 | sdata->u.ap.max_ratectrl_rateidx = -1; | ||
150 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | ||
151 | sdata->bss = &sdata->u.ap; | ||
152 | INIT_LIST_HEAD(&sdata->u.ap.vlans); | ||
153 | break; | ||
154 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
155 | case IEEE80211_IF_TYPE_STA: | ||
156 | case IEEE80211_IF_TYPE_IBSS: { | ||
157 | struct ieee80211_sub_if_data *msdata; | ||
158 | struct ieee80211_if_sta *ifsta; | ||
159 | |||
160 | ifsta = &sdata->u.sta; | ||
161 | INIT_WORK(&ifsta->work, ieee80211_sta_work); | ||
162 | setup_timer(&ifsta->timer, ieee80211_sta_timer, | ||
163 | (unsigned long) sdata); | ||
164 | skb_queue_head_init(&ifsta->skb_queue); | ||
165 | |||
166 | ifsta->capab = WLAN_CAPABILITY_ESS; | ||
167 | ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | | ||
168 | IEEE80211_AUTH_ALG_SHARED_KEY; | ||
169 | ifsta->flags |= IEEE80211_STA_CREATE_IBSS | | ||
170 | IEEE80211_STA_WMM_ENABLED | | ||
171 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
172 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
173 | |||
174 | msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); | ||
175 | sdata->bss = &msdata->u.ap; | ||
176 | |||
177 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
178 | ieee80211_mesh_init_sdata(sdata); | ||
179 | break; | ||
180 | } | ||
181 | case IEEE80211_IF_TYPE_MNTR: | ||
182 | dev->type = ARPHRD_IEEE80211_RADIOTAP; | ||
183 | dev->hard_start_xmit = ieee80211_monitor_start_xmit; | ||
184 | sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | | ||
185 | MONITOR_FLAG_OTHER_BSS; | ||
186 | break; | ||
187 | default: | ||
188 | printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", | ||
189 | dev->name, __func__, type); | ||
190 | } | ||
191 | ieee80211_debugfs_change_if_type(sdata, oldtype); | ||
192 | } | ||
193 | |||
194 | /* Must be called with rtnl lock held. */ | ||
195 | void ieee80211_if_reinit(struct net_device *dev) | ||
196 | { | 245 | { |
197 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
198 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
199 | struct sk_buff *skb; | ||
200 | int flushed; | ||
201 | 247 | ||
202 | ASSERT_RTNL(); | 248 | ASSERT_RTNL(); |
203 | 249 | ||
204 | ieee80211_free_keys(sdata); | 250 | list_del_rcu(&sdata->list); |
205 | 251 | synchronize_rcu(); | |
206 | ieee80211_if_sdata_deinit(sdata); | ||
207 | |||
208 | /* Need to handle mesh specially to allow eliding the function call */ | ||
209 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
210 | mesh_rmc_free(dev); | ||
211 | |||
212 | switch (sdata->vif.type) { | ||
213 | case IEEE80211_IF_TYPE_INVALID: | ||
214 | /* cannot happen */ | ||
215 | WARN_ON(1); | ||
216 | break; | ||
217 | case IEEE80211_IF_TYPE_AP: { | ||
218 | /* Remove all virtual interfaces that use this BSS | ||
219 | * as their sdata->bss */ | ||
220 | struct ieee80211_sub_if_data *tsdata, *n; | ||
221 | struct beacon_data *beacon; | ||
222 | |||
223 | list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { | ||
224 | if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { | ||
225 | printk(KERN_DEBUG "%s: removing virtual " | ||
226 | "interface %s because its BSS interface" | ||
227 | " is being removed\n", | ||
228 | sdata->dev->name, tsdata->dev->name); | ||
229 | list_del_rcu(&tsdata->list); | ||
230 | /* | ||
231 | * We have lots of time and can afford | ||
232 | * to sync for each interface | ||
233 | */ | ||
234 | synchronize_rcu(); | ||
235 | __ieee80211_if_del(local, tsdata); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | beacon = sdata->u.ap.beacon; | ||
240 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | ||
241 | synchronize_rcu(); | ||
242 | kfree(beacon); | ||
243 | |||
244 | while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { | ||
245 | local->total_ps_buffered--; | ||
246 | dev_kfree_skb(skb); | ||
247 | } | ||
248 | |||
249 | break; | ||
250 | } | ||
251 | case IEEE80211_IF_TYPE_WDS: | ||
252 | /* nothing to do */ | ||
253 | break; | ||
254 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
255 | case IEEE80211_IF_TYPE_STA: | ||
256 | case IEEE80211_IF_TYPE_IBSS: | ||
257 | kfree(sdata->u.sta.extra_ie); | ||
258 | sdata->u.sta.extra_ie = NULL; | ||
259 | kfree(sdata->u.sta.assocreq_ies); | ||
260 | sdata->u.sta.assocreq_ies = NULL; | ||
261 | kfree(sdata->u.sta.assocresp_ies); | ||
262 | sdata->u.sta.assocresp_ies = NULL; | ||
263 | if (sdata->u.sta.probe_resp) { | ||
264 | dev_kfree_skb(sdata->u.sta.probe_resp); | ||
265 | sdata->u.sta.probe_resp = NULL; | ||
266 | } | ||
267 | |||
268 | break; | ||
269 | case IEEE80211_IF_TYPE_MNTR: | ||
270 | dev->type = ARPHRD_ETHER; | ||
271 | break; | ||
272 | case IEEE80211_IF_TYPE_VLAN: | ||
273 | sdata->u.vlan.ap = NULL; | ||
274 | break; | ||
275 | } | ||
276 | |||
277 | flushed = sta_info_flush(local, sdata); | ||
278 | WARN_ON(flushed); | ||
279 | |||
280 | memset(&sdata->u, 0, sizeof(sdata->u)); | ||
281 | ieee80211_if_sdata_init(sdata); | ||
282 | } | ||
283 | |||
284 | /* Must be called with rtnl lock held. */ | ||
285 | void __ieee80211_if_del(struct ieee80211_local *local, | ||
286 | struct ieee80211_sub_if_data *sdata) | ||
287 | { | ||
288 | struct net_device *dev = sdata->dev; | ||
289 | |||
290 | ieee80211_debugfs_remove_netdev(sdata); | ||
291 | unregister_netdevice(dev); | 252 | unregister_netdevice(dev); |
292 | /* Except master interface, the net_device will be freed by | ||
293 | * net_device->destructor (i. e. ieee80211_if_free). */ | ||
294 | } | 253 | } |
295 | 254 | ||
296 | /* Must be called with rtnl lock held. */ | 255 | /* |
297 | int ieee80211_if_remove(struct net_device *dev, const char *name, int id) | 256 | * Remove all interfaces, may only be called at hardware unregistration |
257 | * time because it doesn't do RCU-safe list removals. | ||
258 | */ | ||
259 | void ieee80211_remove_interfaces(struct ieee80211_local *local) | ||
298 | { | 260 | { |
299 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 261 | struct ieee80211_sub_if_data *sdata, *tmp; |
300 | struct ieee80211_sub_if_data *sdata, *n; | ||
301 | 262 | ||
302 | ASSERT_RTNL(); | 263 | ASSERT_RTNL(); |
303 | 264 | ||
304 | list_for_each_entry_safe(sdata, n, &local->interfaces, list) { | 265 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { |
305 | if ((sdata->vif.type == id || id == -1) && | 266 | list_del(&sdata->list); |
306 | strcmp(name, sdata->dev->name) == 0 && | 267 | unregister_netdevice(sdata->dev); |
307 | sdata->dev != local->mdev) { | ||
308 | list_del_rcu(&sdata->list); | ||
309 | synchronize_rcu(); | ||
310 | __ieee80211_if_del(local, sdata); | ||
311 | return 0; | ||
312 | } | ||
313 | } | 268 | } |
314 | return -ENODEV; | ||
315 | } | ||
316 | |||
317 | void ieee80211_if_free(struct net_device *dev) | ||
318 | { | ||
319 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
320 | |||
321 | ieee80211_if_sdata_deinit(sdata); | ||
322 | free_netdev(dev); | ||
323 | } | 269 | } |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 220e83be3ef4..6597c779e35a 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -321,8 +321,15 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
321 | * some hardware cannot handle TKIP with QoS, so | 321 | * some hardware cannot handle TKIP with QoS, so |
322 | * we indicate whether QoS could be in use. | 322 | * we indicate whether QoS could be in use. |
323 | */ | 323 | */ |
324 | if (sta->flags & WLAN_STA_WME) | 324 | if (test_sta_flags(sta, WLAN_STA_WME)) |
325 | key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; | 325 | key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; |
326 | |||
327 | /* | ||
328 | * This key is for a specific sta interface, | ||
329 | * inform the driver that it should try to store | ||
330 | * this key as pairwise key. | ||
331 | */ | ||
332 | key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; | ||
326 | } else { | 333 | } else { |
327 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 334 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { |
328 | struct sta_info *ap; | 335 | struct sta_info *ap; |
@@ -335,7 +342,7 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
335 | /* same here, the AP could be using QoS */ | 342 | /* same here, the AP could be using QoS */ |
336 | ap = sta_info_get(key->local, key->sdata->u.sta.bssid); | 343 | ap = sta_info_get(key->local, key->sdata->u.sta.bssid); |
337 | if (ap) { | 344 | if (ap) { |
338 | if (ap->flags & WLAN_STA_WME) | 345 | if (test_sta_flags(ap, WLAN_STA_WME)) |
339 | key->conf.flags |= | 346 | key->conf.flags |= |
340 | IEEE80211_KEY_FLAG_WMM_STA; | 347 | IEEE80211_KEY_FLAG_WMM_STA; |
341 | } | 348 | } |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index f52c3df1fe9a..425816e0996c 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -16,31 +16,18 @@ | |||
16 | #include <linux/rcupdate.h> | 16 | #include <linux/rcupdate.h> |
17 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
18 | 18 | ||
19 | /* ALG_TKIP | 19 | #define WEP_IV_LEN 4 |
20 | * struct ieee80211_key::key is encoded as a 256-bit (32 byte) data block: | 20 | #define WEP_ICV_LEN 4 |
21 | * Temporal Encryption Key (128 bits) | 21 | #define ALG_TKIP_KEY_LEN 32 |
22 | * Temporal Authenticator Tx MIC Key (64 bits) | 22 | #define ALG_CCMP_KEY_LEN 16 |
23 | * Temporal Authenticator Rx MIC Key (64 bits) | 23 | #define CCMP_HDR_LEN 8 |
24 | */ | 24 | #define CCMP_MIC_LEN 8 |
25 | 25 | #define CCMP_TK_LEN 16 | |
26 | #define WEP_IV_LEN 4 | 26 | #define CCMP_PN_LEN 6 |
27 | #define WEP_ICV_LEN 4 | 27 | #define TKIP_IV_LEN 8 |
28 | 28 | #define TKIP_ICV_LEN 4 | |
29 | #define ALG_TKIP_KEY_LEN 32 | 29 | |
30 | /* Starting offsets for each key */ | 30 | #define NUM_RX_DATA_QUEUES 17 |
31 | #define ALG_TKIP_TEMP_ENCR_KEY 0 | ||
32 | #define ALG_TKIP_TEMP_AUTH_TX_MIC_KEY 16 | ||
33 | #define ALG_TKIP_TEMP_AUTH_RX_MIC_KEY 24 | ||
34 | #define TKIP_IV_LEN 8 | ||
35 | #define TKIP_ICV_LEN 4 | ||
36 | |||
37 | #define ALG_CCMP_KEY_LEN 16 | ||
38 | #define CCMP_HDR_LEN 8 | ||
39 | #define CCMP_MIC_LEN 8 | ||
40 | #define CCMP_TK_LEN 16 | ||
41 | #define CCMP_PN_LEN 6 | ||
42 | |||
43 | #define NUM_RX_DATA_QUEUES 17 | ||
44 | 31 | ||
45 | struct ieee80211_local; | 32 | struct ieee80211_local; |
46 | struct ieee80211_sub_if_data; | 33 | struct ieee80211_sub_if_data; |
@@ -69,6 +56,13 @@ enum ieee80211_internal_key_flags { | |||
69 | KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), | 56 | KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), |
70 | }; | 57 | }; |
71 | 58 | ||
59 | struct tkip_ctx { | ||
60 | u32 iv32; | ||
61 | u16 iv16; | ||
62 | u16 p1k[5]; | ||
63 | int initialized; | ||
64 | }; | ||
65 | |||
72 | struct ieee80211_key { | 66 | struct ieee80211_key { |
73 | struct ieee80211_local *local; | 67 | struct ieee80211_local *local; |
74 | struct ieee80211_sub_if_data *sdata; | 68 | struct ieee80211_sub_if_data *sdata; |
@@ -85,16 +79,10 @@ struct ieee80211_key { | |||
85 | union { | 79 | union { |
86 | struct { | 80 | struct { |
87 | /* last used TSC */ | 81 | /* last used TSC */ |
88 | u32 iv32; | 82 | struct tkip_ctx tx; |
89 | u16 iv16; | ||
90 | u16 p1k[5]; | ||
91 | int tx_initialized; | ||
92 | 83 | ||
93 | /* last received RSC */ | 84 | /* last received RSC */ |
94 | u32 iv32_rx[NUM_RX_DATA_QUEUES]; | 85 | struct tkip_ctx rx[NUM_RX_DATA_QUEUES]; |
95 | u16 iv16_rx[NUM_RX_DATA_QUEUES]; | ||
96 | u16 p1k_rx[NUM_RX_DATA_QUEUES][5]; | ||
97 | int rx_initialized[NUM_RX_DATA_QUEUES]; | ||
98 | } tkip; | 86 | } tkip; |
99 | struct { | 87 | struct { |
100 | u8 tx_pn[6]; | 88 | u8 tx_pn[6]; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index df0836ff1a20..f1a83d450ea0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include "debugfs.h" | 35 | #include "debugfs.h" |
36 | #include "debugfs_netdev.h" | 36 | #include "debugfs_netdev.h" |
37 | 37 | ||
38 | #define SUPP_MCS_SET_LEN 16 | ||
39 | |||
40 | /* | 38 | /* |
41 | * For seeing transmitted packets on monitor interfaces | 39 | * For seeing transmitted packets on monitor interfaces |
42 | * we have a radiotap header too. | 40 | * we have a radiotap header too. |
@@ -107,12 +105,18 @@ static int ieee80211_master_open(struct net_device *dev) | |||
107 | 105 | ||
108 | /* we hold the RTNL here so can safely walk the list */ | 106 | /* we hold the RTNL here so can safely walk the list */ |
109 | list_for_each_entry(sdata, &local->interfaces, list) { | 107 | list_for_each_entry(sdata, &local->interfaces, list) { |
110 | if (sdata->dev != dev && netif_running(sdata->dev)) { | 108 | if (netif_running(sdata->dev)) { |
111 | res = 0; | 109 | res = 0; |
112 | break; | 110 | break; |
113 | } | 111 | } |
114 | } | 112 | } |
115 | return res; | 113 | |
114 | if (res) | ||
115 | return res; | ||
116 | |||
117 | netif_tx_start_all_queues(local->mdev); | ||
118 | |||
119 | return 0; | ||
116 | } | 120 | } |
117 | 121 | ||
118 | static int ieee80211_master_stop(struct net_device *dev) | 122 | static int ieee80211_master_stop(struct net_device *dev) |
@@ -122,7 +126,7 @@ static int ieee80211_master_stop(struct net_device *dev) | |||
122 | 126 | ||
123 | /* we hold the RTNL here so can safely walk the list */ | 127 | /* we hold the RTNL here so can safely walk the list */ |
124 | list_for_each_entry(sdata, &local->interfaces, list) | 128 | list_for_each_entry(sdata, &local->interfaces, list) |
125 | if (sdata->dev != dev && netif_running(sdata->dev)) | 129 | if (netif_running(sdata->dev)) |
126 | dev_close(sdata->dev); | 130 | dev_close(sdata->dev); |
127 | 131 | ||
128 | return 0; | 132 | return 0; |
@@ -147,9 +151,7 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | |||
147 | /* FIX: what would be proper limits for MTU? | 151 | /* FIX: what would be proper limits for MTU? |
148 | * This interface uses 802.3 frames. */ | 152 | * This interface uses 802.3 frames. */ |
149 | if (new_mtu < 256 || | 153 | if (new_mtu < 256 || |
150 | new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { | 154 | new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { |
151 | printk(KERN_WARNING "%s: invalid MTU %d\n", | ||
152 | dev->name, new_mtu); | ||
153 | return -EINVAL; | 155 | return -EINVAL; |
154 | } | 156 | } |
155 | 157 | ||
@@ -180,10 +182,11 @@ static int ieee80211_open(struct net_device *dev) | |||
180 | { | 182 | { |
181 | struct ieee80211_sub_if_data *sdata, *nsdata; | 183 | struct ieee80211_sub_if_data *sdata, *nsdata; |
182 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 184 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
185 | struct sta_info *sta; | ||
183 | struct ieee80211_if_init_conf conf; | 186 | struct ieee80211_if_init_conf conf; |
187 | u32 changed = 0; | ||
184 | int res; | 188 | int res; |
185 | bool need_hw_reconfig = 0; | 189 | bool need_hw_reconfig = 0; |
186 | struct sta_info *sta; | ||
187 | 190 | ||
188 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 191 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
189 | 192 | ||
@@ -191,7 +194,7 @@ static int ieee80211_open(struct net_device *dev) | |||
191 | list_for_each_entry(nsdata, &local->interfaces, list) { | 194 | list_for_each_entry(nsdata, &local->interfaces, list) { |
192 | struct net_device *ndev = nsdata->dev; | 195 | struct net_device *ndev = nsdata->dev; |
193 | 196 | ||
194 | if (ndev != dev && ndev != local->mdev && netif_running(ndev)) { | 197 | if (ndev != dev && netif_running(ndev)) { |
195 | /* | 198 | /* |
196 | * Allow only a single IBSS interface to be up at any | 199 | * Allow only a single IBSS interface to be up at any |
197 | * time. This is restricted because beacon distribution | 200 | * time. This is restricted because beacon distribution |
@@ -207,30 +210,6 @@ static int ieee80211_open(struct net_device *dev) | |||
207 | return -EBUSY; | 210 | return -EBUSY; |
208 | 211 | ||
209 | /* | 212 | /* |
210 | * Disallow multiple IBSS/STA mode interfaces. | ||
211 | * | ||
212 | * This is a technical restriction, it is possible although | ||
213 | * most likely not IEEE 802.11 compliant to have multiple | ||
214 | * STAs with just a single hardware (the TSF timer will not | ||
215 | * be adjusted properly.) | ||
216 | * | ||
217 | * However, because mac80211 uses the master device's BSS | ||
218 | * information for each STA/IBSS interface, doing this will | ||
219 | * currently corrupt that BSS information completely, unless, | ||
220 | * a not very useful case, both STAs are associated to the | ||
221 | * same BSS. | ||
222 | * | ||
223 | * To remove this restriction, the BSS information needs to | ||
224 | * be embedded in the STA/IBSS mode sdata instead of using | ||
225 | * the master device's BSS structure. | ||
226 | */ | ||
227 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
228 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) && | ||
229 | (nsdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
230 | nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)) | ||
231 | return -EBUSY; | ||
232 | |||
233 | /* | ||
234 | * The remaining checks are only performed for interfaces | 213 | * The remaining checks are only performed for interfaces |
235 | * with the same MAC address. | 214 | * with the same MAC address. |
236 | */ | 215 | */ |
@@ -249,7 +228,7 @@ static int ieee80211_open(struct net_device *dev) | |||
249 | */ | 228 | */ |
250 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && | 229 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && |
251 | nsdata->vif.type == IEEE80211_IF_TYPE_AP) | 230 | nsdata->vif.type == IEEE80211_IF_TYPE_AP) |
252 | sdata->u.vlan.ap = nsdata; | 231 | sdata->bss = &nsdata->u.ap; |
253 | } | 232 | } |
254 | } | 233 | } |
255 | 234 | ||
@@ -259,10 +238,13 @@ static int ieee80211_open(struct net_device *dev) | |||
259 | return -ENOLINK; | 238 | return -ENOLINK; |
260 | break; | 239 | break; |
261 | case IEEE80211_IF_TYPE_VLAN: | 240 | case IEEE80211_IF_TYPE_VLAN: |
262 | if (!sdata->u.vlan.ap) | 241 | if (!sdata->bss) |
263 | return -ENOLINK; | 242 | return -ENOLINK; |
243 | list_add(&sdata->u.vlan.list, &sdata->bss->vlans); | ||
264 | break; | 244 | break; |
265 | case IEEE80211_IF_TYPE_AP: | 245 | case IEEE80211_IF_TYPE_AP: |
246 | sdata->bss = &sdata->u.ap; | ||
247 | break; | ||
266 | case IEEE80211_IF_TYPE_STA: | 248 | case IEEE80211_IF_TYPE_STA: |
267 | case IEEE80211_IF_TYPE_MNTR: | 249 | case IEEE80211_IF_TYPE_MNTR: |
268 | case IEEE80211_IF_TYPE_IBSS: | 250 | case IEEE80211_IF_TYPE_IBSS: |
@@ -280,14 +262,13 @@ static int ieee80211_open(struct net_device *dev) | |||
280 | if (local->ops->start) | 262 | if (local->ops->start) |
281 | res = local->ops->start(local_to_hw(local)); | 263 | res = local->ops->start(local_to_hw(local)); |
282 | if (res) | 264 | if (res) |
283 | return res; | 265 | goto err_del_bss; |
284 | need_hw_reconfig = 1; | 266 | need_hw_reconfig = 1; |
285 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | 267 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); |
286 | } | 268 | } |
287 | 269 | ||
288 | switch (sdata->vif.type) { | 270 | switch (sdata->vif.type) { |
289 | case IEEE80211_IF_TYPE_VLAN: | 271 | case IEEE80211_IF_TYPE_VLAN: |
290 | list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans); | ||
291 | /* no need to tell driver */ | 272 | /* no need to tell driver */ |
292 | break; | 273 | break; |
293 | case IEEE80211_IF_TYPE_MNTR: | 274 | case IEEE80211_IF_TYPE_MNTR: |
@@ -310,9 +291,9 @@ static int ieee80211_open(struct net_device *dev) | |||
310 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | 291 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) |
311 | local->fif_other_bss++; | 292 | local->fif_other_bss++; |
312 | 293 | ||
313 | netif_tx_lock_bh(local->mdev); | 294 | netif_addr_lock_bh(local->mdev); |
314 | ieee80211_configure_filter(local); | 295 | ieee80211_configure_filter(local); |
315 | netif_tx_unlock_bh(local->mdev); | 296 | netif_addr_unlock_bh(local->mdev); |
316 | break; | 297 | break; |
317 | case IEEE80211_IF_TYPE_STA: | 298 | case IEEE80211_IF_TYPE_STA: |
318 | case IEEE80211_IF_TYPE_IBSS: | 299 | case IEEE80211_IF_TYPE_IBSS: |
@@ -326,8 +307,10 @@ static int ieee80211_open(struct net_device *dev) | |||
326 | if (res) | 307 | if (res) |
327 | goto err_stop; | 308 | goto err_stop; |
328 | 309 | ||
329 | ieee80211_if_config(dev); | 310 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
330 | ieee80211_reset_erp_info(dev); | 311 | ieee80211_start_mesh(sdata->dev); |
312 | changed |= ieee80211_reset_erp_info(dev); | ||
313 | ieee80211_bss_info_change_notify(sdata, changed); | ||
331 | ieee80211_enable_keys(sdata); | 314 | ieee80211_enable_keys(sdata); |
332 | 315 | ||
333 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | 316 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && |
@@ -346,6 +329,7 @@ static int ieee80211_open(struct net_device *dev) | |||
346 | goto err_del_interface; | 329 | goto err_del_interface; |
347 | } | 330 | } |
348 | 331 | ||
332 | /* no locking required since STA is not live yet */ | ||
349 | sta->flags |= WLAN_STA_AUTHORIZED; | 333 | sta->flags |= WLAN_STA_AUTHORIZED; |
350 | 334 | ||
351 | res = sta_info_insert(sta); | 335 | res = sta_info_insert(sta); |
@@ -385,13 +369,13 @@ static int ieee80211_open(struct net_device *dev) | |||
385 | * yet be effective. Trigger execution of ieee80211_sta_work | 369 | * yet be effective. Trigger execution of ieee80211_sta_work |
386 | * to fix this. | 370 | * to fix this. |
387 | */ | 371 | */ |
388 | if(sdata->vif.type == IEEE80211_IF_TYPE_STA || | 372 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || |
389 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 373 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
390 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 374 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
391 | queue_work(local->hw.workqueue, &ifsta->work); | 375 | queue_work(local->hw.workqueue, &ifsta->work); |
392 | } | 376 | } |
393 | 377 | ||
394 | netif_start_queue(dev); | 378 | netif_tx_start_all_queues(dev); |
395 | 379 | ||
396 | return 0; | 380 | return 0; |
397 | err_del_interface: | 381 | err_del_interface: |
@@ -399,6 +383,10 @@ static int ieee80211_open(struct net_device *dev) | |||
399 | err_stop: | 383 | err_stop: |
400 | if (!local->open_count && local->ops->stop) | 384 | if (!local->open_count && local->ops->stop) |
401 | local->ops->stop(local_to_hw(local)); | 385 | local->ops->stop(local_to_hw(local)); |
386 | err_del_bss: | ||
387 | sdata->bss = NULL; | ||
388 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | ||
389 | list_del(&sdata->u.vlan.list); | ||
402 | return res; | 390 | return res; |
403 | } | 391 | } |
404 | 392 | ||
@@ -412,7 +400,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
412 | /* | 400 | /* |
413 | * Stop TX on this interface first. | 401 | * Stop TX on this interface first. |
414 | */ | 402 | */ |
415 | netif_stop_queue(dev); | 403 | netif_tx_stop_all_queues(dev); |
416 | 404 | ||
417 | /* | 405 | /* |
418 | * Now delete all active aggregation sessions. | 406 | * Now delete all active aggregation sessions. |
@@ -481,7 +469,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
481 | switch (sdata->vif.type) { | 469 | switch (sdata->vif.type) { |
482 | case IEEE80211_IF_TYPE_VLAN: | 470 | case IEEE80211_IF_TYPE_VLAN: |
483 | list_del(&sdata->u.vlan.list); | 471 | list_del(&sdata->u.vlan.list); |
484 | sdata->u.vlan.ap = NULL; | ||
485 | /* no need to tell driver */ | 472 | /* no need to tell driver */ |
486 | break; | 473 | break; |
487 | case IEEE80211_IF_TYPE_MNTR: | 474 | case IEEE80211_IF_TYPE_MNTR: |
@@ -503,9 +490,9 @@ static int ieee80211_stop(struct net_device *dev) | |||
503 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | 490 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) |
504 | local->fif_other_bss--; | 491 | local->fif_other_bss--; |
505 | 492 | ||
506 | netif_tx_lock_bh(local->mdev); | 493 | netif_addr_lock_bh(local->mdev); |
507 | ieee80211_configure_filter(local); | 494 | ieee80211_configure_filter(local); |
508 | netif_tx_unlock_bh(local->mdev); | 495 | netif_addr_unlock_bh(local->mdev); |
509 | break; | 496 | break; |
510 | case IEEE80211_IF_TYPE_MESH_POINT: | 497 | case IEEE80211_IF_TYPE_MESH_POINT: |
511 | case IEEE80211_IF_TYPE_STA: | 498 | case IEEE80211_IF_TYPE_STA: |
@@ -544,6 +531,8 @@ static int ieee80211_stop(struct net_device *dev) | |||
544 | local->ops->remove_interface(local_to_hw(local), &conf); | 531 | local->ops->remove_interface(local_to_hw(local), &conf); |
545 | } | 532 | } |
546 | 533 | ||
534 | sdata->bss = NULL; | ||
535 | |||
547 | if (local->open_count == 0) { | 536 | if (local->open_count == 0) { |
548 | if (netif_running(local->mdev)) | 537 | if (netif_running(local->mdev)) |
549 | dev_close(local->mdev); | 538 | dev_close(local->mdev); |
@@ -584,17 +573,19 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
584 | 573 | ||
585 | sta = sta_info_get(local, ra); | 574 | sta = sta_info_get(local, ra); |
586 | if (!sta) { | 575 | if (!sta) { |
576 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
587 | printk(KERN_DEBUG "Could not find the station\n"); | 577 | printk(KERN_DEBUG "Could not find the station\n"); |
588 | rcu_read_unlock(); | 578 | #endif |
589 | return -ENOENT; | 579 | ret = -ENOENT; |
580 | goto exit; | ||
590 | } | 581 | } |
591 | 582 | ||
592 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 583 | spin_lock_bh(&sta->lock); |
593 | 584 | ||
594 | /* we have tried too many times, receiver does not want A-MPDU */ | 585 | /* we have tried too many times, receiver does not want A-MPDU */ |
595 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | 586 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { |
596 | ret = -EBUSY; | 587 | ret = -EBUSY; |
597 | goto start_ba_exit; | 588 | goto err_unlock_sta; |
598 | } | 589 | } |
599 | 590 | ||
600 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 591 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
@@ -605,18 +596,20 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
605 | "idle on tid %u\n", tid); | 596 | "idle on tid %u\n", tid); |
606 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 597 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
607 | ret = -EAGAIN; | 598 | ret = -EAGAIN; |
608 | goto start_ba_exit; | 599 | goto err_unlock_sta; |
609 | } | 600 | } |
610 | 601 | ||
611 | /* prepare A-MPDU MLME for Tx aggregation */ | 602 | /* prepare A-MPDU MLME for Tx aggregation */ |
612 | sta->ampdu_mlme.tid_tx[tid] = | 603 | sta->ampdu_mlme.tid_tx[tid] = |
613 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | 604 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); |
614 | if (!sta->ampdu_mlme.tid_tx[tid]) { | 605 | if (!sta->ampdu_mlme.tid_tx[tid]) { |
606 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
615 | if (net_ratelimit()) | 607 | if (net_ratelimit()) |
616 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | 608 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", |
617 | tid); | 609 | tid); |
610 | #endif | ||
618 | ret = -ENOMEM; | 611 | ret = -ENOMEM; |
619 | goto start_ba_exit; | 612 | goto err_unlock_sta; |
620 | } | 613 | } |
621 | /* Tx timer */ | 614 | /* Tx timer */ |
622 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | 615 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = |
@@ -625,10 +618,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
625 | (unsigned long)&sta->timer_to_tid[tid]; | 618 | (unsigned long)&sta->timer_to_tid[tid]; |
626 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 619 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); |
627 | 620 | ||
628 | /* ensure that TX flow won't interrupt us | ||
629 | * until the end of the call to requeue function */ | ||
630 | spin_lock_bh(&local->mdev->queue_lock); | ||
631 | |||
632 | /* create a new queue for this aggregation */ | 621 | /* create a new queue for this aggregation */ |
633 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | 622 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); |
634 | 623 | ||
@@ -639,7 +628,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
639 | printk(KERN_DEBUG "BA request denied - queue unavailable for" | 628 | printk(KERN_DEBUG "BA request denied - queue unavailable for" |
640 | " tid %d\n", tid); | 629 | " tid %d\n", tid); |
641 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 630 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
642 | goto start_ba_err; | 631 | goto err_unlock_queue; |
643 | } | 632 | } |
644 | sdata = sta->sdata; | 633 | sdata = sta->sdata; |
645 | 634 | ||
@@ -655,18 +644,18 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
655 | /* No need to requeue the packets in the agg queue, since we | 644 | /* No need to requeue the packets in the agg queue, since we |
656 | * held the tx lock: no packet could be enqueued to the newly | 645 | * held the tx lock: no packet could be enqueued to the newly |
657 | * allocated queue */ | 646 | * allocated queue */ |
658 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | 647 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); |
659 | #ifdef CONFIG_MAC80211_HT_DEBUG | 648 | #ifdef CONFIG_MAC80211_HT_DEBUG |
660 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | 649 | printk(KERN_DEBUG "BA request denied - HW unavailable for" |
661 | " tid %d\n", tid); | 650 | " tid %d\n", tid); |
662 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 651 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
663 | *state = HT_AGG_STATE_IDLE; | 652 | *state = HT_AGG_STATE_IDLE; |
664 | goto start_ba_err; | 653 | goto err_unlock_queue; |
665 | } | 654 | } |
666 | 655 | ||
667 | /* Will put all the packets in the new SW queue */ | 656 | /* Will put all the packets in the new SW queue */ |
668 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | 657 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); |
669 | spin_unlock_bh(&local->mdev->queue_lock); | 658 | spin_unlock_bh(&sta->lock); |
670 | 659 | ||
671 | /* send an addBA request */ | 660 | /* send an addBA request */ |
672 | sta->ampdu_mlme.dialog_token_allocator++; | 661 | sta->ampdu_mlme.dialog_token_allocator++; |
@@ -674,25 +663,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
674 | sta->ampdu_mlme.dialog_token_allocator; | 663 | sta->ampdu_mlme.dialog_token_allocator; |
675 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | 664 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; |
676 | 665 | ||
666 | |||
677 | ieee80211_send_addba_request(sta->sdata->dev, ra, tid, | 667 | ieee80211_send_addba_request(sta->sdata->dev, ra, tid, |
678 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 668 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
679 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 669 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
680 | 0x40, 5000); | 670 | 0x40, 5000); |
681 | |||
682 | /* activate the timer for the recipient's addBA response */ | 671 | /* activate the timer for the recipient's addBA response */ |
683 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | 672 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = |
684 | jiffies + ADDBA_RESP_INTERVAL; | 673 | jiffies + ADDBA_RESP_INTERVAL; |
685 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 674 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); |
675 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
686 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | 676 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); |
687 | goto start_ba_exit; | 677 | #endif |
678 | goto exit; | ||
688 | 679 | ||
689 | start_ba_err: | 680 | err_unlock_queue: |
690 | kfree(sta->ampdu_mlme.tid_tx[tid]); | 681 | kfree(sta->ampdu_mlme.tid_tx[tid]); |
691 | sta->ampdu_mlme.tid_tx[tid] = NULL; | 682 | sta->ampdu_mlme.tid_tx[tid] = NULL; |
692 | spin_unlock_bh(&local->mdev->queue_lock); | ||
693 | ret = -EBUSY; | 683 | ret = -EBUSY; |
694 | start_ba_exit: | 684 | err_unlock_sta: |
695 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 685 | spin_unlock_bh(&sta->lock); |
686 | exit: | ||
696 | rcu_read_unlock(); | 687 | rcu_read_unlock(); |
697 | return ret; | 688 | return ret; |
698 | } | 689 | } |
@@ -720,7 +711,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | |||
720 | 711 | ||
721 | /* check if the TID is in aggregation */ | 712 | /* check if the TID is in aggregation */ |
722 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 713 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
723 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 714 | spin_lock_bh(&sta->lock); |
724 | 715 | ||
725 | if (*state != HT_AGG_STATE_OPERATIONAL) { | 716 | if (*state != HT_AGG_STATE_OPERATIONAL) { |
726 | ret = -ENOENT; | 717 | ret = -ENOENT; |
@@ -750,7 +741,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | |||
750 | } | 741 | } |
751 | 742 | ||
752 | stop_BA_exit: | 743 | stop_BA_exit: |
753 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 744 | spin_unlock_bh(&sta->lock); |
754 | rcu_read_unlock(); | 745 | rcu_read_unlock(); |
755 | return ret; | 746 | return ret; |
756 | } | 747 | } |
@@ -764,8 +755,10 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
764 | DECLARE_MAC_BUF(mac); | 755 | DECLARE_MAC_BUF(mac); |
765 | 756 | ||
766 | if (tid >= STA_TID_NUM) { | 757 | if (tid >= STA_TID_NUM) { |
758 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
767 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | 759 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", |
768 | tid, STA_TID_NUM); | 760 | tid, STA_TID_NUM); |
761 | #endif | ||
769 | return; | 762 | return; |
770 | } | 763 | } |
771 | 764 | ||
@@ -773,18 +766,22 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
773 | sta = sta_info_get(local, ra); | 766 | sta = sta_info_get(local, ra); |
774 | if (!sta) { | 767 | if (!sta) { |
775 | rcu_read_unlock(); | 768 | rcu_read_unlock(); |
769 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
776 | printk(KERN_DEBUG "Could not find station: %s\n", | 770 | printk(KERN_DEBUG "Could not find station: %s\n", |
777 | print_mac(mac, ra)); | 771 | print_mac(mac, ra)); |
772 | #endif | ||
778 | return; | 773 | return; |
779 | } | 774 | } |
780 | 775 | ||
781 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 776 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
782 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 777 | spin_lock_bh(&sta->lock); |
783 | 778 | ||
784 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 779 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { |
780 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
785 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | 781 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", |
786 | *state); | 782 | *state); |
787 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 783 | #endif |
784 | spin_unlock_bh(&sta->lock); | ||
788 | rcu_read_unlock(); | 785 | rcu_read_unlock(); |
789 | return; | 786 | return; |
790 | } | 787 | } |
@@ -794,10 +791,12 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
794 | *state |= HT_ADDBA_DRV_READY_MSK; | 791 | *state |= HT_ADDBA_DRV_READY_MSK; |
795 | 792 | ||
796 | if (*state == HT_AGG_STATE_OPERATIONAL) { | 793 | if (*state == HT_AGG_STATE_OPERATIONAL) { |
794 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
797 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | 795 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); |
796 | #endif | ||
798 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | 797 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); |
799 | } | 798 | } |
800 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 799 | spin_unlock_bh(&sta->lock); |
801 | rcu_read_unlock(); | 800 | rcu_read_unlock(); |
802 | } | 801 | } |
803 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | 802 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); |
@@ -811,8 +810,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | |||
811 | DECLARE_MAC_BUF(mac); | 810 | DECLARE_MAC_BUF(mac); |
812 | 811 | ||
813 | if (tid >= STA_TID_NUM) { | 812 | if (tid >= STA_TID_NUM) { |
813 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
814 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | 814 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", |
815 | tid, STA_TID_NUM); | 815 | tid, STA_TID_NUM); |
816 | #endif | ||
816 | return; | 817 | return; |
817 | } | 818 | } |
818 | 819 | ||
@@ -824,17 +825,23 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | |||
824 | rcu_read_lock(); | 825 | rcu_read_lock(); |
825 | sta = sta_info_get(local, ra); | 826 | sta = sta_info_get(local, ra); |
826 | if (!sta) { | 827 | if (!sta) { |
828 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
827 | printk(KERN_DEBUG "Could not find station: %s\n", | 829 | printk(KERN_DEBUG "Could not find station: %s\n", |
828 | print_mac(mac, ra)); | 830 | print_mac(mac, ra)); |
831 | #endif | ||
829 | rcu_read_unlock(); | 832 | rcu_read_unlock(); |
830 | return; | 833 | return; |
831 | } | 834 | } |
832 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 835 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
833 | 836 | ||
834 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 837 | /* NOTE: no need to use sta->lock in this state check, as |
838 | * ieee80211_stop_tx_ba_session will let only one stop call to | ||
839 | * pass through per sta/tid | ||
840 | */ | ||
835 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | 841 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { |
842 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
836 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | 843 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); |
837 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 844 | #endif |
838 | rcu_read_unlock(); | 845 | rcu_read_unlock(); |
839 | return; | 846 | return; |
840 | } | 847 | } |
@@ -845,23 +852,20 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | |||
845 | 852 | ||
846 | agg_queue = sta->tid_to_tx_q[tid]; | 853 | agg_queue = sta->tid_to_tx_q[tid]; |
847 | 854 | ||
848 | /* avoid ordering issues: we are the only one that can modify | ||
849 | * the content of the qdiscs */ | ||
850 | spin_lock_bh(&local->mdev->queue_lock); | ||
851 | /* remove the queue for this aggregation */ | ||
852 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | 855 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); |
853 | spin_unlock_bh(&local->mdev->queue_lock); | ||
854 | 856 | ||
855 | /* we just requeued the all the frames that were in the removed | 857 | /* We just requeued the all the frames that were in the |
856 | * queue, and since we might miss a softirq we do netif_schedule. | 858 | * removed queue, and since we might miss a softirq we do |
857 | * ieee80211_wake_queue is not used here as this queue is not | 859 | * netif_schedule_queue. ieee80211_wake_queue is not used |
858 | * necessarily stopped */ | 860 | * here as this queue is not necessarily stopped |
859 | netif_schedule(local->mdev); | 861 | */ |
862 | netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); | ||
863 | spin_lock_bh(&sta->lock); | ||
860 | *state = HT_AGG_STATE_IDLE; | 864 | *state = HT_AGG_STATE_IDLE; |
861 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 865 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
862 | kfree(sta->ampdu_mlme.tid_tx[tid]); | 866 | kfree(sta->ampdu_mlme.tid_tx[tid]); |
863 | sta->ampdu_mlme.tid_tx[tid] = NULL; | 867 | sta->ampdu_mlme.tid_tx[tid] = NULL; |
864 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 868 | spin_unlock_bh(&sta->lock); |
865 | 869 | ||
866 | rcu_read_unlock(); | 870 | rcu_read_unlock(); |
867 | } | 871 | } |
@@ -875,9 +879,11 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | |||
875 | struct sk_buff *skb = dev_alloc_skb(0); | 879 | struct sk_buff *skb = dev_alloc_skb(0); |
876 | 880 | ||
877 | if (unlikely(!skb)) { | 881 | if (unlikely(!skb)) { |
882 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
878 | if (net_ratelimit()) | 883 | if (net_ratelimit()) |
879 | printk(KERN_WARNING "%s: Not enough memory, " | 884 | printk(KERN_WARNING "%s: Not enough memory, " |
880 | "dropping start BA session", skb->dev->name); | 885 | "dropping start BA session", skb->dev->name); |
886 | #endif | ||
881 | return; | 887 | return; |
882 | } | 888 | } |
883 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 889 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
@@ -898,9 +904,11 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | |||
898 | struct sk_buff *skb = dev_alloc_skb(0); | 904 | struct sk_buff *skb = dev_alloc_skb(0); |
899 | 905 | ||
900 | if (unlikely(!skb)) { | 906 | if (unlikely(!skb)) { |
907 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
901 | if (net_ratelimit()) | 908 | if (net_ratelimit()) |
902 | printk(KERN_WARNING "%s: Not enough memory, " | 909 | printk(KERN_WARNING "%s: Not enough memory, " |
903 | "dropping stop BA session", skb->dev->name); | 910 | "dropping stop BA session", skb->dev->name); |
911 | #endif | ||
904 | return; | 912 | return; |
905 | } | 913 | } |
906 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 914 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
@@ -951,7 +959,6 @@ static const struct header_ops ieee80211_header_ops = { | |||
951 | .cache_update = eth_header_cache_update, | 959 | .cache_update = eth_header_cache_update, |
952 | }; | 960 | }; |
953 | 961 | ||
954 | /* Must not be called for mdev */ | ||
955 | void ieee80211_if_setup(struct net_device *dev) | 962 | void ieee80211_if_setup(struct net_device *dev) |
956 | { | 963 | { |
957 | ether_setup(dev); | 964 | ether_setup(dev); |
@@ -961,67 +968,52 @@ void ieee80211_if_setup(struct net_device *dev) | |||
961 | dev->change_mtu = ieee80211_change_mtu; | 968 | dev->change_mtu = ieee80211_change_mtu; |
962 | dev->open = ieee80211_open; | 969 | dev->open = ieee80211_open; |
963 | dev->stop = ieee80211_stop; | 970 | dev->stop = ieee80211_stop; |
964 | dev->destructor = ieee80211_if_free; | 971 | dev->destructor = free_netdev; |
965 | } | 972 | } |
966 | 973 | ||
967 | /* everything else */ | 974 | /* everything else */ |
968 | 975 | ||
969 | static int __ieee80211_if_config(struct net_device *dev, | 976 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) |
970 | struct sk_buff *beacon, | ||
971 | struct ieee80211_tx_control *control) | ||
972 | { | 977 | { |
973 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 978 | struct ieee80211_local *local = sdata->local; |
974 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
975 | struct ieee80211_if_conf conf; | 979 | struct ieee80211_if_conf conf; |
976 | 980 | ||
977 | if (!local->ops->config_interface || !netif_running(dev)) | 981 | if (WARN_ON(!netif_running(sdata->dev))) |
982 | return 0; | ||
983 | |||
984 | if (!local->ops->config_interface) | ||
978 | return 0; | 985 | return 0; |
979 | 986 | ||
980 | memset(&conf, 0, sizeof(conf)); | 987 | memset(&conf, 0, sizeof(conf)); |
981 | conf.type = sdata->vif.type; | 988 | conf.changed = changed; |
989 | |||
982 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 990 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || |
983 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 991 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
984 | conf.bssid = sdata->u.sta.bssid; | 992 | conf.bssid = sdata->u.sta.bssid; |
985 | conf.ssid = sdata->u.sta.ssid; | 993 | conf.ssid = sdata->u.sta.ssid; |
986 | conf.ssid_len = sdata->u.sta.ssid_len; | 994 | conf.ssid_len = sdata->u.sta.ssid_len; |
987 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
988 | conf.beacon = beacon; | ||
989 | conf.beacon_control = control; | ||
990 | ieee80211_start_mesh(dev); | ||
991 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 995 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { |
996 | conf.bssid = sdata->dev->dev_addr; | ||
992 | conf.ssid = sdata->u.ap.ssid; | 997 | conf.ssid = sdata->u.ap.ssid; |
993 | conf.ssid_len = sdata->u.ap.ssid_len; | 998 | conf.ssid_len = sdata->u.ap.ssid_len; |
994 | conf.beacon = beacon; | 999 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { |
995 | conf.beacon_control = control; | 1000 | u8 zero[ETH_ALEN] = { 0 }; |
1001 | conf.bssid = zero; | ||
1002 | conf.ssid = zero; | ||
1003 | conf.ssid_len = 0; | ||
1004 | } else { | ||
1005 | WARN_ON(1); | ||
1006 | return -EINVAL; | ||
996 | } | 1007 | } |
997 | return local->ops->config_interface(local_to_hw(local), | ||
998 | &sdata->vif, &conf); | ||
999 | } | ||
1000 | 1008 | ||
1001 | int ieee80211_if_config(struct net_device *dev) | 1009 | if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID))) |
1002 | { | 1010 | return -EINVAL; |
1003 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1004 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1005 | if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT && | ||
1006 | (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) | ||
1007 | return ieee80211_if_config_beacon(dev); | ||
1008 | return __ieee80211_if_config(dev, NULL, NULL); | ||
1009 | } | ||
1010 | 1011 | ||
1011 | int ieee80211_if_config_beacon(struct net_device *dev) | 1012 | if (WARN_ON(!conf.ssid && (changed & IEEE80211_IFCC_SSID))) |
1012 | { | 1013 | return -EINVAL; |
1013 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1014 | struct ieee80211_tx_control control; | ||
1015 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1016 | struct sk_buff *skb; | ||
1017 | 1014 | ||
1018 | if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) | 1015 | return local->ops->config_interface(local_to_hw(local), |
1019 | return 0; | 1016 | &sdata->vif, &conf); |
1020 | skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif, | ||
1021 | &control); | ||
1022 | if (!skb) | ||
1023 | return -ENOMEM; | ||
1024 | return __ieee80211_if_config(dev, skb, &control); | ||
1025 | } | 1017 | } |
1026 | 1018 | ||
1027 | int ieee80211_hw_config(struct ieee80211_local *local) | 1019 | int ieee80211_hw_config(struct ieee80211_local *local) |
@@ -1068,56 +1060,84 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | |||
1068 | struct ieee80211_supported_band *sband; | 1060 | struct ieee80211_supported_band *sband; |
1069 | struct ieee80211_ht_info ht_conf; | 1061 | struct ieee80211_ht_info ht_conf; |
1070 | struct ieee80211_ht_bss_info ht_bss_conf; | 1062 | struct ieee80211_ht_bss_info ht_bss_conf; |
1071 | int i; | ||
1072 | u32 changed = 0; | 1063 | u32 changed = 0; |
1064 | int i; | ||
1065 | u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS; | ||
1066 | u8 tx_mcs_set_cap; | ||
1073 | 1067 | ||
1074 | sband = local->hw.wiphy->bands[conf->channel->band]; | 1068 | sband = local->hw.wiphy->bands[conf->channel->band]; |
1075 | 1069 | ||
1070 | memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info)); | ||
1071 | memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info)); | ||
1072 | |||
1076 | /* HT is not supported */ | 1073 | /* HT is not supported */ |
1077 | if (!sband->ht_info.ht_supported) { | 1074 | if (!sband->ht_info.ht_supported) { |
1078 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; | 1075 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; |
1079 | return 0; | 1076 | goto out; |
1080 | } | 1077 | } |
1081 | 1078 | ||
1082 | memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info)); | 1079 | /* disable HT */ |
1083 | memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info)); | 1080 | if (!enable_ht) { |
1084 | 1081 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) | |
1085 | if (enable_ht) { | ||
1086 | if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) | ||
1087 | changed |= BSS_CHANGED_HT; | 1082 | changed |= BSS_CHANGED_HT; |
1083 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; | ||
1084 | conf->ht_conf.ht_supported = 0; | ||
1085 | goto out; | ||
1086 | } | ||
1088 | 1087 | ||
1089 | conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; | ||
1090 | ht_conf.ht_supported = 1; | ||
1091 | 1088 | ||
1092 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; | 1089 | if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) |
1093 | ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); | 1090 | changed |= BSS_CHANGED_HT; |
1094 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; | ||
1095 | 1091 | ||
1096 | for (i = 0; i < SUPP_MCS_SET_LEN; i++) | 1092 | conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; |
1097 | ht_conf.supp_mcs_set[i] = | 1093 | ht_conf.ht_supported = 1; |
1098 | sband->ht_info.supp_mcs_set[i] & | ||
1099 | req_ht_cap->supp_mcs_set[i]; | ||
1100 | 1094 | ||
1101 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; | 1095 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; |
1102 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; | 1096 | ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); |
1103 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; | 1097 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; |
1098 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; | ||
1099 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; | ||
1100 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; | ||
1104 | 1101 | ||
1105 | ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; | 1102 | ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; |
1106 | ht_conf.ampdu_density = req_ht_cap->ampdu_density; | 1103 | ht_conf.ampdu_density = req_ht_cap->ampdu_density; |
1107 | 1104 | ||
1108 | /* if bss configuration changed store the new one */ | 1105 | /* Bits 96-100 */ |
1109 | if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) || | 1106 | tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12]; |
1110 | memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) { | 1107 | |
1111 | changed |= BSS_CHANGED_HT; | 1108 | /* configure suppoerted Tx MCS according to requested MCS |
1112 | memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf)); | 1109 | * (based in most cases on Rx capabilities of peer) and self |
1113 | memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf)); | 1110 | * Tx MCS capabilities (as defined by low level driver HW |
1114 | } | 1111 | * Tx capabilities) */ |
1115 | } else { | 1112 | if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED)) |
1116 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) | 1113 | goto check_changed; |
1117 | changed |= BSS_CHANGED_HT; | ||
1118 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; | ||
1119 | } | ||
1120 | 1114 | ||
1115 | /* Counting from 0 therfore + 1 */ | ||
1116 | if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF) | ||
1117 | max_tx_streams = ((tx_mcs_set_cap & | ||
1118 | IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1; | ||
1119 | |||
1120 | for (i = 0; i < max_tx_streams; i++) | ||
1121 | ht_conf.supp_mcs_set[i] = | ||
1122 | sband->ht_info.supp_mcs_set[i] & | ||
1123 | req_ht_cap->supp_mcs_set[i]; | ||
1124 | |||
1125 | if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM) | ||
1126 | for (i = IEEE80211_SUPP_MCS_SET_UEQM; | ||
1127 | i < IEEE80211_SUPP_MCS_SET_LEN; i++) | ||
1128 | ht_conf.supp_mcs_set[i] = | ||
1129 | sband->ht_info.supp_mcs_set[i] & | ||
1130 | req_ht_cap->supp_mcs_set[i]; | ||
1131 | |||
1132 | check_changed: | ||
1133 | /* if bss configuration changed store the new one */ | ||
1134 | if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) || | ||
1135 | memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) { | ||
1136 | changed |= BSS_CHANGED_HT; | ||
1137 | memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf)); | ||
1138 | memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf)); | ||
1139 | } | ||
1140 | out: | ||
1121 | return changed; | 1141 | return changed; |
1122 | } | 1142 | } |
1123 | 1143 | ||
@@ -1136,50 +1156,30 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
1136 | changed); | 1156 | changed); |
1137 | } | 1157 | } |
1138 | 1158 | ||
1139 | void ieee80211_reset_erp_info(struct net_device *dev) | 1159 | u32 ieee80211_reset_erp_info(struct net_device *dev) |
1140 | { | 1160 | { |
1141 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1161 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1142 | 1162 | ||
1143 | sdata->bss_conf.use_cts_prot = 0; | 1163 | sdata->bss_conf.use_cts_prot = 0; |
1144 | sdata->bss_conf.use_short_preamble = 0; | 1164 | sdata->bss_conf.use_short_preamble = 0; |
1145 | ieee80211_bss_info_change_notify(sdata, | 1165 | return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; |
1146 | BSS_CHANGED_ERP_CTS_PROT | | ||
1147 | BSS_CHANGED_ERP_PREAMBLE); | ||
1148 | } | 1166 | } |
1149 | 1167 | ||
1150 | void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, | 1168 | void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, |
1151 | struct sk_buff *skb, | 1169 | struct sk_buff *skb) |
1152 | struct ieee80211_tx_status *status) | ||
1153 | { | 1170 | { |
1154 | struct ieee80211_local *local = hw_to_local(hw); | 1171 | struct ieee80211_local *local = hw_to_local(hw); |
1155 | struct ieee80211_tx_status *saved; | 1172 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1156 | int tmp; | 1173 | int tmp; |
1157 | 1174 | ||
1158 | skb->dev = local->mdev; | 1175 | skb->dev = local->mdev; |
1159 | saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC); | ||
1160 | if (unlikely(!saved)) { | ||
1161 | if (net_ratelimit()) | ||
1162 | printk(KERN_WARNING "%s: Not enough memory, " | ||
1163 | "dropping tx status", skb->dev->name); | ||
1164 | /* should be dev_kfree_skb_irq, but due to this function being | ||
1165 | * named _irqsafe instead of just _irq we can't be sure that | ||
1166 | * people won't call it from non-irq contexts */ | ||
1167 | dev_kfree_skb_any(skb); | ||
1168 | return; | ||
1169 | } | ||
1170 | memcpy(saved, status, sizeof(struct ieee80211_tx_status)); | ||
1171 | /* copy pointer to saved status into skb->cb for use by tasklet */ | ||
1172 | memcpy(skb->cb, &saved, sizeof(saved)); | ||
1173 | |||
1174 | skb->pkt_type = IEEE80211_TX_STATUS_MSG; | 1176 | skb->pkt_type = IEEE80211_TX_STATUS_MSG; |
1175 | skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ? | 1177 | skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? |
1176 | &local->skb_queue : &local->skb_queue_unreliable, skb); | 1178 | &local->skb_queue : &local->skb_queue_unreliable, skb); |
1177 | tmp = skb_queue_len(&local->skb_queue) + | 1179 | tmp = skb_queue_len(&local->skb_queue) + |
1178 | skb_queue_len(&local->skb_queue_unreliable); | 1180 | skb_queue_len(&local->skb_queue_unreliable); |
1179 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && | 1181 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && |
1180 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 1182 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
1181 | memcpy(&saved, skb->cb, sizeof(saved)); | ||
1182 | kfree(saved); | ||
1183 | dev_kfree_skb_irq(skb); | 1183 | dev_kfree_skb_irq(skb); |
1184 | tmp--; | 1184 | tmp--; |
1185 | I802_DEBUG_INC(local->tx_status_drop); | 1185 | I802_DEBUG_INC(local->tx_status_drop); |
@@ -1193,7 +1193,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
1193 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 1193 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
1194 | struct sk_buff *skb; | 1194 | struct sk_buff *skb; |
1195 | struct ieee80211_rx_status rx_status; | 1195 | struct ieee80211_rx_status rx_status; |
1196 | struct ieee80211_tx_status *tx_status; | ||
1197 | struct ieee80211_ra_tid *ra_tid; | 1196 | struct ieee80211_ra_tid *ra_tid; |
1198 | 1197 | ||
1199 | while ((skb = skb_dequeue(&local->skb_queue)) || | 1198 | while ((skb = skb_dequeue(&local->skb_queue)) || |
@@ -1208,12 +1207,8 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
1208 | __ieee80211_rx(local_to_hw(local), skb, &rx_status); | 1207 | __ieee80211_rx(local_to_hw(local), skb, &rx_status); |
1209 | break; | 1208 | break; |
1210 | case IEEE80211_TX_STATUS_MSG: | 1209 | case IEEE80211_TX_STATUS_MSG: |
1211 | /* get pointer to saved status out of skb->cb */ | ||
1212 | memcpy(&tx_status, skb->cb, sizeof(tx_status)); | ||
1213 | skb->pkt_type = 0; | 1210 | skb->pkt_type = 0; |
1214 | ieee80211_tx_status(local_to_hw(local), | 1211 | ieee80211_tx_status(local_to_hw(local), skb); |
1215 | skb, tx_status); | ||
1216 | kfree(tx_status); | ||
1217 | break; | 1212 | break; |
1218 | case IEEE80211_DELBA_MSG: | 1213 | case IEEE80211_DELBA_MSG: |
1219 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 1214 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
@@ -1227,9 +1222,8 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
1227 | ra_tid->ra, ra_tid->tid); | 1222 | ra_tid->ra, ra_tid->tid); |
1228 | dev_kfree_skb(skb); | 1223 | dev_kfree_skb(skb); |
1229 | break ; | 1224 | break ; |
1230 | default: /* should never get here! */ | 1225 | default: |
1231 | printk(KERN_ERR "%s: Unknown message type (%d)\n", | 1226 | WARN_ON(1); |
1232 | wiphy_name(local->hw.wiphy), skb->pkt_type); | ||
1233 | dev_kfree_skb(skb); | 1227 | dev_kfree_skb(skb); |
1234 | break; | 1228 | break; |
1235 | } | 1229 | } |
@@ -1242,24 +1236,15 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
1242 | * Also, tx_packet_data in cb is restored from tx_control. */ | 1236 | * Also, tx_packet_data in cb is restored from tx_control. */ |
1243 | static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | 1237 | static void ieee80211_remove_tx_extra(struct ieee80211_local *local, |
1244 | struct ieee80211_key *key, | 1238 | struct ieee80211_key *key, |
1245 | struct sk_buff *skb, | 1239 | struct sk_buff *skb) |
1246 | struct ieee80211_tx_control *control) | ||
1247 | { | 1240 | { |
1248 | int hdrlen, iv_len, mic_len; | 1241 | int hdrlen, iv_len, mic_len; |
1249 | struct ieee80211_tx_packet_data *pkt_data; | 1242 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1250 | 1243 | ||
1251 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | 1244 | info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS | |
1252 | pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex; | 1245 | IEEE80211_TX_CTL_DO_NOT_ENCRYPT | |
1253 | pkt_data->flags = 0; | 1246 | IEEE80211_TX_CTL_REQUEUE | |
1254 | if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS) | 1247 | IEEE80211_TX_CTL_EAPOL_FRAME; |
1255 | pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS; | ||
1256 | if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT) | ||
1257 | pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; | ||
1258 | if (control->flags & IEEE80211_TXCTL_REQUEUE) | ||
1259 | pkt_data->flags |= IEEE80211_TXPD_REQUEUE; | ||
1260 | if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME) | ||
1261 | pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; | ||
1262 | pkt_data->queue = control->queue; | ||
1263 | 1248 | ||
1264 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 1249 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
1265 | 1250 | ||
@@ -1306,9 +1291,10 @@ no_key: | |||
1306 | 1291 | ||
1307 | static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | 1292 | static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, |
1308 | struct sta_info *sta, | 1293 | struct sta_info *sta, |
1309 | struct sk_buff *skb, | 1294 | struct sk_buff *skb) |
1310 | struct ieee80211_tx_status *status) | ||
1311 | { | 1295 | { |
1296 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1297 | |||
1312 | sta->tx_filtered_count++; | 1298 | sta->tx_filtered_count++; |
1313 | 1299 | ||
1314 | /* | 1300 | /* |
@@ -1316,7 +1302,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
1316 | * packet. If the STA went to power save mode, this will happen | 1302 | * packet. If the STA went to power save mode, this will happen |
1317 | * when it wakes up for the next time. | 1303 | * when it wakes up for the next time. |
1318 | */ | 1304 | */ |
1319 | sta->flags |= WLAN_STA_CLEAR_PS_FILT; | 1305 | set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT); |
1320 | 1306 | ||
1321 | /* | 1307 | /* |
1322 | * This code races in the following way: | 1308 | * This code races in the following way: |
@@ -1348,84 +1334,89 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
1348 | * can be unknown, for example with different interrupt status | 1334 | * can be unknown, for example with different interrupt status |
1349 | * bits. | 1335 | * bits. |
1350 | */ | 1336 | */ |
1351 | if (sta->flags & WLAN_STA_PS && | 1337 | if (test_sta_flags(sta, WLAN_STA_PS) && |
1352 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { | 1338 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { |
1353 | ieee80211_remove_tx_extra(local, sta->key, skb, | 1339 | ieee80211_remove_tx_extra(local, sta->key, skb); |
1354 | &status->control); | ||
1355 | skb_queue_tail(&sta->tx_filtered, skb); | 1340 | skb_queue_tail(&sta->tx_filtered, skb); |
1356 | return; | 1341 | return; |
1357 | } | 1342 | } |
1358 | 1343 | ||
1359 | if (!(sta->flags & WLAN_STA_PS) && | 1344 | if (!test_sta_flags(sta, WLAN_STA_PS) && |
1360 | !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { | 1345 | !(info->flags & IEEE80211_TX_CTL_REQUEUE)) { |
1361 | /* Software retry the packet once */ | 1346 | /* Software retry the packet once */ |
1362 | status->control.flags |= IEEE80211_TXCTL_REQUEUE; | 1347 | info->flags |= IEEE80211_TX_CTL_REQUEUE; |
1363 | ieee80211_remove_tx_extra(local, sta->key, skb, | 1348 | ieee80211_remove_tx_extra(local, sta->key, skb); |
1364 | &status->control); | ||
1365 | dev_queue_xmit(skb); | 1349 | dev_queue_xmit(skb); |
1366 | return; | 1350 | return; |
1367 | } | 1351 | } |
1368 | 1352 | ||
1353 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
1369 | if (net_ratelimit()) | 1354 | if (net_ratelimit()) |
1370 | printk(KERN_DEBUG "%s: dropped TX filtered frame, " | 1355 | printk(KERN_DEBUG "%s: dropped TX filtered frame, " |
1371 | "queue_len=%d PS=%d @%lu\n", | 1356 | "queue_len=%d PS=%d @%lu\n", |
1372 | wiphy_name(local->hw.wiphy), | 1357 | wiphy_name(local->hw.wiphy), |
1373 | skb_queue_len(&sta->tx_filtered), | 1358 | skb_queue_len(&sta->tx_filtered), |
1374 | !!(sta->flags & WLAN_STA_PS), jiffies); | 1359 | !!test_sta_flags(sta, WLAN_STA_PS), jiffies); |
1360 | #endif | ||
1375 | dev_kfree_skb(skb); | 1361 | dev_kfree_skb(skb); |
1376 | } | 1362 | } |
1377 | 1363 | ||
1378 | void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | 1364 | void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) |
1379 | struct ieee80211_tx_status *status) | ||
1380 | { | 1365 | { |
1381 | struct sk_buff *skb2; | 1366 | struct sk_buff *skb2; |
1382 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1367 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1383 | struct ieee80211_local *local = hw_to_local(hw); | 1368 | struct ieee80211_local *local = hw_to_local(hw); |
1369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1384 | u16 frag, type; | 1370 | u16 frag, type; |
1371 | __le16 fc; | ||
1385 | struct ieee80211_tx_status_rtap_hdr *rthdr; | 1372 | struct ieee80211_tx_status_rtap_hdr *rthdr; |
1386 | struct ieee80211_sub_if_data *sdata; | 1373 | struct ieee80211_sub_if_data *sdata; |
1387 | struct net_device *prev_dev = NULL; | 1374 | struct net_device *prev_dev = NULL; |
1388 | 1375 | struct sta_info *sta; | |
1389 | if (!status) { | ||
1390 | printk(KERN_ERR | ||
1391 | "%s: ieee80211_tx_status called with NULL status\n", | ||
1392 | wiphy_name(local->hw.wiphy)); | ||
1393 | dev_kfree_skb(skb); | ||
1394 | return; | ||
1395 | } | ||
1396 | 1376 | ||
1397 | rcu_read_lock(); | 1377 | rcu_read_lock(); |
1398 | 1378 | ||
1399 | if (status->excessive_retries) { | 1379 | if (info->status.excessive_retries) { |
1400 | struct sta_info *sta; | ||
1401 | sta = sta_info_get(local, hdr->addr1); | 1380 | sta = sta_info_get(local, hdr->addr1); |
1402 | if (sta) { | 1381 | if (sta) { |
1403 | if (sta->flags & WLAN_STA_PS) { | 1382 | if (test_sta_flags(sta, WLAN_STA_PS)) { |
1404 | /* | 1383 | /* |
1405 | * The STA is in power save mode, so assume | 1384 | * The STA is in power save mode, so assume |
1406 | * that this TX packet failed because of that. | 1385 | * that this TX packet failed because of that. |
1407 | */ | 1386 | */ |
1408 | status->excessive_retries = 0; | 1387 | ieee80211_handle_filtered_frame(local, sta, skb); |
1409 | status->flags |= IEEE80211_TX_STATUS_TX_FILTERED; | ||
1410 | ieee80211_handle_filtered_frame(local, sta, | ||
1411 | skb, status); | ||
1412 | rcu_read_unlock(); | 1388 | rcu_read_unlock(); |
1413 | return; | 1389 | return; |
1414 | } | 1390 | } |
1415 | } | 1391 | } |
1416 | } | 1392 | } |
1417 | 1393 | ||
1418 | if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) { | 1394 | fc = hdr->frame_control; |
1419 | struct sta_info *sta; | 1395 | |
1396 | if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && | ||
1397 | (ieee80211_is_data_qos(fc))) { | ||
1398 | u16 tid, ssn; | ||
1399 | u8 *qc; | ||
1420 | sta = sta_info_get(local, hdr->addr1); | 1400 | sta = sta_info_get(local, hdr->addr1); |
1421 | if (sta) { | 1401 | if (sta) { |
1422 | ieee80211_handle_filtered_frame(local, sta, skb, | 1402 | qc = ieee80211_get_qos_ctl(hdr); |
1423 | status); | 1403 | tid = qc[0] & 0xf; |
1404 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) | ||
1405 | & IEEE80211_SCTL_SEQ); | ||
1406 | ieee80211_send_bar(sta->sdata->dev, hdr->addr1, | ||
1407 | tid, ssn); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { | ||
1412 | sta = sta_info_get(local, hdr->addr1); | ||
1413 | if (sta) { | ||
1414 | ieee80211_handle_filtered_frame(local, sta, skb); | ||
1424 | rcu_read_unlock(); | 1415 | rcu_read_unlock(); |
1425 | return; | 1416 | return; |
1426 | } | 1417 | } |
1427 | } else | 1418 | } else |
1428 | rate_control_tx_status(local->mdev, skb, status); | 1419 | rate_control_tx_status(local->mdev, skb); |
1429 | 1420 | ||
1430 | rcu_read_unlock(); | 1421 | rcu_read_unlock(); |
1431 | 1422 | ||
@@ -1439,14 +1430,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1439 | frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; | 1430 | frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
1440 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; | 1431 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; |
1441 | 1432 | ||
1442 | if (status->flags & IEEE80211_TX_STATUS_ACK) { | 1433 | if (info->flags & IEEE80211_TX_STAT_ACK) { |
1443 | if (frag == 0) { | 1434 | if (frag == 0) { |
1444 | local->dot11TransmittedFrameCount++; | 1435 | local->dot11TransmittedFrameCount++; |
1445 | if (is_multicast_ether_addr(hdr->addr1)) | 1436 | if (is_multicast_ether_addr(hdr->addr1)) |
1446 | local->dot11MulticastTransmittedFrameCount++; | 1437 | local->dot11MulticastTransmittedFrameCount++; |
1447 | if (status->retry_count > 0) | 1438 | if (info->status.retry_count > 0) |
1448 | local->dot11RetryCount++; | 1439 | local->dot11RetryCount++; |
1449 | if (status->retry_count > 1) | 1440 | if (info->status.retry_count > 1) |
1450 | local->dot11MultipleRetryCount++; | 1441 | local->dot11MultipleRetryCount++; |
1451 | } | 1442 | } |
1452 | 1443 | ||
@@ -1483,7 +1474,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1483 | return; | 1474 | return; |
1484 | } | 1475 | } |
1485 | 1476 | ||
1486 | rthdr = (struct ieee80211_tx_status_rtap_hdr*) | 1477 | rthdr = (struct ieee80211_tx_status_rtap_hdr *) |
1487 | skb_push(skb, sizeof(*rthdr)); | 1478 | skb_push(skb, sizeof(*rthdr)); |
1488 | 1479 | ||
1489 | memset(rthdr, 0, sizeof(*rthdr)); | 1480 | memset(rthdr, 0, sizeof(*rthdr)); |
@@ -1492,17 +1483,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1492 | cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | | 1483 | cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | |
1493 | (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); | 1484 | (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); |
1494 | 1485 | ||
1495 | if (!(status->flags & IEEE80211_TX_STATUS_ACK) && | 1486 | if (!(info->flags & IEEE80211_TX_STAT_ACK) && |
1496 | !is_multicast_ether_addr(hdr->addr1)) | 1487 | !is_multicast_ether_addr(hdr->addr1)) |
1497 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); | 1488 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); |
1498 | 1489 | ||
1499 | if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) && | 1490 | if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) && |
1500 | (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) | 1491 | (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) |
1501 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); | 1492 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); |
1502 | else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) | 1493 | else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) |
1503 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); | 1494 | rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); |
1504 | 1495 | ||
1505 | rthdr->data_retries = status->retry_count; | 1496 | rthdr->data_retries = info->status.retry_count; |
1506 | 1497 | ||
1507 | /* XXX: is this sufficient for BPF? */ | 1498 | /* XXX: is this sufficient for BPF? */ |
1508 | skb_set_mac_header(skb, 0); | 1499 | skb_set_mac_header(skb, 0); |
@@ -1628,7 +1619,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1628 | int result; | 1619 | int result; |
1629 | enum ieee80211_band band; | 1620 | enum ieee80211_band band; |
1630 | struct net_device *mdev; | 1621 | struct net_device *mdev; |
1631 | struct ieee80211_sub_if_data *sdata; | 1622 | struct wireless_dev *mwdev; |
1632 | 1623 | ||
1633 | /* | 1624 | /* |
1634 | * generic code guarantees at least one band, | 1625 | * generic code guarantees at least one band, |
@@ -1652,19 +1643,30 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1652 | if (result < 0) | 1643 | if (result < 0) |
1653 | return result; | 1644 | return result; |
1654 | 1645 | ||
1655 | /* for now, mdev needs sub_if_data :/ */ | 1646 | /* |
1656 | mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), | 1647 | * We use the number of queues for feature tests (QoS, HT) internally |
1657 | "wmaster%d", ether_setup); | 1648 | * so restrict them appropriately. |
1649 | */ | ||
1650 | if (hw->queues > IEEE80211_MAX_QUEUES) | ||
1651 | hw->queues = IEEE80211_MAX_QUEUES; | ||
1652 | if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES) | ||
1653 | hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES; | ||
1654 | if (hw->queues < 4) | ||
1655 | hw->ampdu_queues = 0; | ||
1656 | |||
1657 | mdev = alloc_netdev_mq(sizeof(struct wireless_dev), | ||
1658 | "wmaster%d", ether_setup, | ||
1659 | ieee80211_num_queues(hw)); | ||
1658 | if (!mdev) | 1660 | if (!mdev) |
1659 | goto fail_mdev_alloc; | 1661 | goto fail_mdev_alloc; |
1660 | 1662 | ||
1661 | sdata = IEEE80211_DEV_TO_SUB_IF(mdev); | 1663 | mwdev = netdev_priv(mdev); |
1662 | mdev->ieee80211_ptr = &sdata->wdev; | 1664 | mdev->ieee80211_ptr = mwdev; |
1663 | sdata->wdev.wiphy = local->hw.wiphy; | 1665 | mwdev->wiphy = local->hw.wiphy; |
1664 | 1666 | ||
1665 | local->mdev = mdev; | 1667 | local->mdev = mdev; |
1666 | 1668 | ||
1667 | ieee80211_rx_bss_list_init(mdev); | 1669 | ieee80211_rx_bss_list_init(local); |
1668 | 1670 | ||
1669 | mdev->hard_start_xmit = ieee80211_master_start_xmit; | 1671 | mdev->hard_start_xmit = ieee80211_master_start_xmit; |
1670 | mdev->open = ieee80211_master_open; | 1672 | mdev->open = ieee80211_master_open; |
@@ -1673,18 +1675,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1673 | mdev->header_ops = &ieee80211_header_ops; | 1675 | mdev->header_ops = &ieee80211_header_ops; |
1674 | mdev->set_multicast_list = ieee80211_master_set_multicast_list; | 1676 | mdev->set_multicast_list = ieee80211_master_set_multicast_list; |
1675 | 1677 | ||
1676 | sdata->vif.type = IEEE80211_IF_TYPE_AP; | ||
1677 | sdata->dev = mdev; | ||
1678 | sdata->local = local; | ||
1679 | sdata->u.ap.force_unicast_rateidx = -1; | ||
1680 | sdata->u.ap.max_ratectrl_rateidx = -1; | ||
1681 | ieee80211_if_sdata_init(sdata); | ||
1682 | |||
1683 | /* no RCU needed since we're still during init phase */ | ||
1684 | list_add_tail(&sdata->list, &local->interfaces); | ||
1685 | |||
1686 | name = wiphy_dev(local->hw.wiphy)->driver->name; | 1678 | name = wiphy_dev(local->hw.wiphy)->driver->name; |
1687 | local->hw.workqueue = create_singlethread_workqueue(name); | 1679 | local->hw.workqueue = create_freezeable_workqueue(name); |
1688 | if (!local->hw.workqueue) { | 1680 | if (!local->hw.workqueue) { |
1689 | result = -ENOMEM; | 1681 | result = -ENOMEM; |
1690 | goto fail_workqueue; | 1682 | goto fail_workqueue; |
@@ -1700,15 +1692,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1700 | 1692 | ||
1701 | debugfs_hw_add(local); | 1693 | debugfs_hw_add(local); |
1702 | 1694 | ||
1703 | local->hw.conf.beacon_int = 1000; | 1695 | if (local->hw.conf.beacon_int < 10) |
1696 | local->hw.conf.beacon_int = 100; | ||
1704 | 1697 | ||
1705 | local->wstats_flags |= local->hw.max_rssi ? | 1698 | local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC | |
1706 | IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID; | 1699 | IEEE80211_HW_SIGNAL_DB | |
1707 | local->wstats_flags |= local->hw.max_signal ? | 1700 | IEEE80211_HW_SIGNAL_DBM) ? |
1708 | IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID; | 1701 | IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID; |
1709 | local->wstats_flags |= local->hw.max_noise ? | 1702 | local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ? |
1710 | IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID; | 1703 | IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID; |
1711 | if (local->hw.max_rssi < 0 || local->hw.max_noise < 0) | 1704 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) |
1712 | local->wstats_flags |= IW_QUAL_DBM; | 1705 | local->wstats_flags |= IW_QUAL_DBM; |
1713 | 1706 | ||
1714 | result = sta_info_start(local); | 1707 | result = sta_info_start(local); |
@@ -1727,9 +1720,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1727 | if (result < 0) | 1720 | if (result < 0) |
1728 | goto fail_dev; | 1721 | goto fail_dev; |
1729 | 1722 | ||
1730 | ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev)); | ||
1731 | ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP); | ||
1732 | |||
1733 | result = ieee80211_init_rate_ctrl_alg(local, | 1723 | result = ieee80211_init_rate_ctrl_alg(local, |
1734 | hw->rate_control_algorithm); | 1724 | hw->rate_control_algorithm); |
1735 | if (result < 0) { | 1725 | if (result < 0) { |
@@ -1746,16 +1736,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1746 | goto fail_wep; | 1736 | goto fail_wep; |
1747 | } | 1737 | } |
1748 | 1738 | ||
1749 | ieee80211_install_qdisc(local->mdev); | 1739 | local->mdev->select_queue = ieee80211_select_queue; |
1750 | 1740 | ||
1751 | /* add one default STA interface */ | 1741 | /* add one default STA interface */ |
1752 | result = ieee80211_if_add(local->mdev, "wlan%d", NULL, | 1742 | result = ieee80211_if_add(local, "wlan%d", NULL, |
1753 | IEEE80211_IF_TYPE_STA, NULL); | 1743 | IEEE80211_IF_TYPE_STA, NULL); |
1754 | if (result) | 1744 | if (result) |
1755 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", | 1745 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", |
1756 | wiphy_name(local->hw.wiphy)); | 1746 | wiphy_name(local->hw.wiphy)); |
1757 | 1747 | ||
1758 | local->reg_state = IEEE80211_DEV_REGISTERED; | ||
1759 | rtnl_unlock(); | 1748 | rtnl_unlock(); |
1760 | 1749 | ||
1761 | ieee80211_led_init(local); | 1750 | ieee80211_led_init(local); |
@@ -1765,7 +1754,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1765 | fail_wep: | 1754 | fail_wep: |
1766 | rate_control_deinitialize(local); | 1755 | rate_control_deinitialize(local); |
1767 | fail_rate: | 1756 | fail_rate: |
1768 | ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev)); | ||
1769 | unregister_netdevice(local->mdev); | 1757 | unregister_netdevice(local->mdev); |
1770 | local->mdev = NULL; | 1758 | local->mdev = NULL; |
1771 | fail_dev: | 1759 | fail_dev: |
@@ -1775,10 +1763,8 @@ fail_sta_info: | |||
1775 | debugfs_hw_del(local); | 1763 | debugfs_hw_del(local); |
1776 | destroy_workqueue(local->hw.workqueue); | 1764 | destroy_workqueue(local->hw.workqueue); |
1777 | fail_workqueue: | 1765 | fail_workqueue: |
1778 | if (local->mdev != NULL) { | 1766 | if (local->mdev) |
1779 | ieee80211_if_free(local->mdev); | 1767 | free_netdev(local->mdev); |
1780 | local->mdev = NULL; | ||
1781 | } | ||
1782 | fail_mdev_alloc: | 1768 | fail_mdev_alloc: |
1783 | wiphy_unregister(local->hw.wiphy); | 1769 | wiphy_unregister(local->hw.wiphy); |
1784 | return result; | 1770 | return result; |
@@ -1788,42 +1774,27 @@ EXPORT_SYMBOL(ieee80211_register_hw); | |||
1788 | void ieee80211_unregister_hw(struct ieee80211_hw *hw) | 1774 | void ieee80211_unregister_hw(struct ieee80211_hw *hw) |
1789 | { | 1775 | { |
1790 | struct ieee80211_local *local = hw_to_local(hw); | 1776 | struct ieee80211_local *local = hw_to_local(hw); |
1791 | struct ieee80211_sub_if_data *sdata, *tmp; | ||
1792 | 1777 | ||
1793 | tasklet_kill(&local->tx_pending_tasklet); | 1778 | tasklet_kill(&local->tx_pending_tasklet); |
1794 | tasklet_kill(&local->tasklet); | 1779 | tasklet_kill(&local->tasklet); |
1795 | 1780 | ||
1796 | rtnl_lock(); | 1781 | rtnl_lock(); |
1797 | 1782 | ||
1798 | BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED); | ||
1799 | |||
1800 | local->reg_state = IEEE80211_DEV_UNREGISTERED; | ||
1801 | |||
1802 | /* | 1783 | /* |
1803 | * At this point, interface list manipulations are fine | 1784 | * At this point, interface list manipulations are fine |
1804 | * because the driver cannot be handing us frames any | 1785 | * because the driver cannot be handing us frames any |
1805 | * more and the tasklet is killed. | 1786 | * more and the tasklet is killed. |
1806 | */ | 1787 | */ |
1807 | 1788 | ||
1808 | /* | 1789 | /* First, we remove all virtual interfaces. */ |
1809 | * First, we remove all non-master interfaces. Do this because they | 1790 | ieee80211_remove_interfaces(local); |
1810 | * may have bss pointer dependency on the master, and when we free | ||
1811 | * the master these would be freed as well, breaking our list | ||
1812 | * iteration completely. | ||
1813 | */ | ||
1814 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { | ||
1815 | if (sdata->dev == local->mdev) | ||
1816 | continue; | ||
1817 | list_del(&sdata->list); | ||
1818 | __ieee80211_if_del(local, sdata); | ||
1819 | } | ||
1820 | 1791 | ||
1821 | /* then, finally, remove the master interface */ | 1792 | /* then, finally, remove the master interface */ |
1822 | __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev)); | 1793 | unregister_netdevice(local->mdev); |
1823 | 1794 | ||
1824 | rtnl_unlock(); | 1795 | rtnl_unlock(); |
1825 | 1796 | ||
1826 | ieee80211_rx_bss_list_deinit(local->mdev); | 1797 | ieee80211_rx_bss_list_deinit(local); |
1827 | ieee80211_clear_tx_pending(local); | 1798 | ieee80211_clear_tx_pending(local); |
1828 | sta_info_stop(local); | 1799 | sta_info_stop(local); |
1829 | rate_control_deinitialize(local); | 1800 | rate_control_deinitialize(local); |
@@ -1840,8 +1811,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1840 | wiphy_unregister(local->hw.wiphy); | 1811 | wiphy_unregister(local->hw.wiphy); |
1841 | ieee80211_wep_free(local); | 1812 | ieee80211_wep_free(local); |
1842 | ieee80211_led_exit(local); | 1813 | ieee80211_led_exit(local); |
1843 | ieee80211_if_free(local->mdev); | 1814 | free_netdev(local->mdev); |
1844 | local->mdev = NULL; | ||
1845 | } | 1815 | } |
1846 | EXPORT_SYMBOL(ieee80211_unregister_hw); | 1816 | EXPORT_SYMBOL(ieee80211_unregister_hw); |
1847 | 1817 | ||
@@ -1858,27 +1828,17 @@ static int __init ieee80211_init(void) | |||
1858 | struct sk_buff *skb; | 1828 | struct sk_buff *skb; |
1859 | int ret; | 1829 | int ret; |
1860 | 1830 | ||
1861 | BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); | 1831 | BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); |
1832 | BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + | ||
1833 | IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); | ||
1862 | 1834 | ||
1863 | ret = rc80211_pid_init(); | 1835 | ret = rc80211_pid_init(); |
1864 | if (ret) | 1836 | if (ret) |
1865 | goto out; | 1837 | return ret; |
1866 | |||
1867 | ret = ieee80211_wme_register(); | ||
1868 | if (ret) { | ||
1869 | printk(KERN_DEBUG "ieee80211_init: failed to " | ||
1870 | "initialize WME (err=%d)\n", ret); | ||
1871 | goto out_cleanup_pid; | ||
1872 | } | ||
1873 | 1838 | ||
1874 | ieee80211_debugfs_netdev_init(); | 1839 | ieee80211_debugfs_netdev_init(); |
1875 | 1840 | ||
1876 | return 0; | 1841 | return 0; |
1877 | |||
1878 | out_cleanup_pid: | ||
1879 | rc80211_pid_exit(); | ||
1880 | out: | ||
1881 | return ret; | ||
1882 | } | 1842 | } |
1883 | 1843 | ||
1884 | static void __exit ieee80211_exit(void) | 1844 | static void __exit ieee80211_exit(void) |
@@ -1894,7 +1854,6 @@ static void __exit ieee80211_exit(void) | |||
1894 | if (mesh_allocated) | 1854 | if (mesh_allocated) |
1895 | ieee80211s_stop(); | 1855 | ieee80211s_stop(); |
1896 | 1856 | ||
1897 | ieee80211_wme_unregister(); | ||
1898 | ieee80211_debugfs_netdev_exit(); | 1857 | ieee80211_debugfs_netdev_exit(); |
1899 | } | 1858 | } |
1900 | 1859 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 697ef67f96b6..b5933b271491 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -315,6 +315,13 @@ struct mesh_table *mesh_table_alloc(int size_order) | |||
315 | return newtbl; | 315 | return newtbl; |
316 | } | 316 | } |
317 | 317 | ||
318 | static void __mesh_table_free(struct mesh_table *tbl) | ||
319 | { | ||
320 | kfree(tbl->hash_buckets); | ||
321 | kfree(tbl->hashwlock); | ||
322 | kfree(tbl); | ||
323 | } | ||
324 | |||
318 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | 325 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
319 | { | 326 | { |
320 | struct hlist_head *mesh_hash; | 327 | struct hlist_head *mesh_hash; |
@@ -330,9 +337,7 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
330 | } | 337 | } |
331 | spin_unlock(&tbl->hashwlock[i]); | 338 | spin_unlock(&tbl->hashwlock[i]); |
332 | } | 339 | } |
333 | kfree(tbl->hash_buckets); | 340 | __mesh_table_free(tbl); |
334 | kfree(tbl->hashwlock); | ||
335 | kfree(tbl); | ||
336 | } | 341 | } |
337 | 342 | ||
338 | static void ieee80211_mesh_path_timer(unsigned long data) | 343 | static void ieee80211_mesh_path_timer(unsigned long data) |
@@ -349,21 +354,16 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | |||
349 | { | 354 | { |
350 | struct mesh_table *newtbl; | 355 | struct mesh_table *newtbl; |
351 | struct hlist_head *oldhash; | 356 | struct hlist_head *oldhash; |
352 | struct hlist_node *p; | 357 | struct hlist_node *p, *q; |
353 | int err = 0; | ||
354 | int i; | 358 | int i; |
355 | 359 | ||
356 | if (atomic_read(&tbl->entries) | 360 | if (atomic_read(&tbl->entries) |
357 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) { | 361 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) |
358 | err = -EPERM; | ||
359 | goto endgrow; | 362 | goto endgrow; |
360 | } | ||
361 | 363 | ||
362 | newtbl = mesh_table_alloc(tbl->size_order + 1); | 364 | newtbl = mesh_table_alloc(tbl->size_order + 1); |
363 | if (!newtbl) { | 365 | if (!newtbl) |
364 | err = -ENOMEM; | ||
365 | goto endgrow; | 366 | goto endgrow; |
366 | } | ||
367 | 367 | ||
368 | newtbl->free_node = tbl->free_node; | 368 | newtbl->free_node = tbl->free_node; |
369 | newtbl->mean_chain_len = tbl->mean_chain_len; | 369 | newtbl->mean_chain_len = tbl->mean_chain_len; |
@@ -373,13 +373,19 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | |||
373 | oldhash = tbl->hash_buckets; | 373 | oldhash = tbl->hash_buckets; |
374 | for (i = 0; i <= tbl->hash_mask; i++) | 374 | for (i = 0; i <= tbl->hash_mask; i++) |
375 | hlist_for_each(p, &oldhash[i]) | 375 | hlist_for_each(p, &oldhash[i]) |
376 | tbl->copy_node(p, newtbl); | 376 | if (tbl->copy_node(p, newtbl) < 0) |
377 | goto errcopy; | ||
377 | 378 | ||
379 | return newtbl; | ||
380 | |||
381 | errcopy: | ||
382 | for (i = 0; i <= newtbl->hash_mask; i++) { | ||
383 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | ||
384 | tbl->free_node(p, 0); | ||
385 | } | ||
386 | __mesh_table_free(tbl); | ||
378 | endgrow: | 387 | endgrow: |
379 | if (err) | 388 | return NULL; |
380 | return NULL; | ||
381 | else | ||
382 | return newtbl; | ||
383 | } | 389 | } |
384 | 390 | ||
385 | /** | 391 | /** |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 2e161f6d8288..669eafafe497 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -109,7 +109,7 @@ struct mesh_table { | |||
109 | __u32 hash_rnd; /* Used for hash generation */ | 109 | __u32 hash_rnd; /* Used for hash generation */ |
110 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 110 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
111 | void (*free_node) (struct hlist_node *p, bool free_leafs); | 111 | void (*free_node) (struct hlist_node *p, bool free_leafs); |
112 | void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); | 112 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); |
113 | int size_order; | 113 | int size_order; |
114 | int mean_chain_len; | 114 | int mean_chain_len; |
115 | }; | 115 | }; |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index af0cd1e3e213..7fa149e230e6 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -26,7 +26,7 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | |||
26 | { | 26 | { |
27 | if (ae) | 27 | if (ae) |
28 | offset += 6; | 28 | offset += 6; |
29 | return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); | 29 | return get_unaligned_le32(preq_elem + offset); |
30 | } | 30 | } |
31 | 31 | ||
32 | /* HWMP IE processing macros */ | 32 | /* HWMP IE processing macros */ |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 99c2d360888e..5f88a2e6ee50 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | 158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) |
159 | return -ENOSPC; | 159 | return -ENOSPC; |
160 | 160 | ||
161 | err = -ENOMEM; | ||
161 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | 162 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); |
162 | if (!new_mpath) { | 163 | if (!new_mpath) |
163 | atomic_dec(&sdata->u.sta.mpaths); | 164 | goto err_path_alloc; |
164 | err = -ENOMEM; | 165 | |
165 | goto endadd2; | ||
166 | } | ||
167 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | 166 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); |
168 | if (!new_node) { | 167 | if (!new_node) |
169 | kfree(new_mpath); | 168 | goto err_node_alloc; |
170 | atomic_dec(&sdata->u.sta.mpaths); | ||
171 | err = -ENOMEM; | ||
172 | goto endadd2; | ||
173 | } | ||
174 | 169 | ||
175 | read_lock(&pathtbl_resize_lock); | 170 | read_lock(&pathtbl_resize_lock); |
176 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 171 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
189 | 184 | ||
190 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 185 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
191 | 186 | ||
187 | err = -EEXIST; | ||
192 | hlist_for_each_entry(node, n, bucket, list) { | 188 | hlist_for_each_entry(node, n, bucket, list) { |
193 | mpath = node->mpath; | 189 | mpath = node->mpath; |
194 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) | 190 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
195 | == 0) { | 191 | goto err_exists; |
196 | err = -EEXIST; | ||
197 | atomic_dec(&sdata->u.sta.mpaths); | ||
198 | kfree(new_node); | ||
199 | kfree(new_mpath); | ||
200 | goto endadd; | ||
201 | } | ||
202 | } | 192 | } |
203 | 193 | ||
204 | hlist_add_head_rcu(&new_node->list, bucket); | 194 | hlist_add_head_rcu(&new_node->list, bucket); |
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
206 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | 196 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) |
207 | grow = 1; | 197 | grow = 1; |
208 | 198 | ||
209 | endadd: | ||
210 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 199 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); |
211 | read_unlock(&pathtbl_resize_lock); | 200 | read_unlock(&pathtbl_resize_lock); |
212 | if (!err && grow) { | 201 | if (grow) { |
213 | struct mesh_table *oldtbl, *newtbl; | 202 | struct mesh_table *oldtbl, *newtbl; |
214 | 203 | ||
215 | write_lock(&pathtbl_resize_lock); | 204 | write_lock(&pathtbl_resize_lock); |
@@ -217,7 +206,7 @@ endadd: | |||
217 | newtbl = mesh_table_grow(mesh_paths); | 206 | newtbl = mesh_table_grow(mesh_paths); |
218 | if (!newtbl) { | 207 | if (!newtbl) { |
219 | write_unlock(&pathtbl_resize_lock); | 208 | write_unlock(&pathtbl_resize_lock); |
220 | return -ENOMEM; | 209 | return 0; |
221 | } | 210 | } |
222 | rcu_assign_pointer(mesh_paths, newtbl); | 211 | rcu_assign_pointer(mesh_paths, newtbl); |
223 | write_unlock(&pathtbl_resize_lock); | 212 | write_unlock(&pathtbl_resize_lock); |
@@ -225,7 +214,16 @@ endadd: | |||
225 | synchronize_rcu(); | 214 | synchronize_rcu(); |
226 | mesh_table_free(oldtbl, false); | 215 | mesh_table_free(oldtbl, false); |
227 | } | 216 | } |
228 | endadd2: | 217 | return 0; |
218 | |||
219 | err_exists: | ||
220 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
221 | read_unlock(&pathtbl_resize_lock); | ||
222 | kfree(new_node); | ||
223 | err_node_alloc: | ||
224 | kfree(new_mpath); | ||
225 | err_path_alloc: | ||
226 | atomic_dec(&sdata->u.sta.mpaths); | ||
229 | return err; | 227 | return err; |
230 | } | 228 | } |
231 | 229 | ||
@@ -264,7 +262,6 @@ void mesh_plink_broken(struct sta_info *sta) | |||
264 | } | 262 | } |
265 | rcu_read_unlock(); | 263 | rcu_read_unlock(); |
266 | } | 264 | } |
267 | EXPORT_SYMBOL(mesh_plink_broken); | ||
268 | 265 | ||
269 | /** | 266 | /** |
270 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | 267 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
@@ -460,25 +457,28 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | |||
460 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | 457 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); |
461 | mpath = node->mpath; | 458 | mpath = node->mpath; |
462 | hlist_del_rcu(p); | 459 | hlist_del_rcu(p); |
463 | synchronize_rcu(); | ||
464 | if (free_leafs) | 460 | if (free_leafs) |
465 | kfree(mpath); | 461 | kfree(mpath); |
466 | kfree(node); | 462 | kfree(node); |
467 | } | 463 | } |
468 | 464 | ||
469 | static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | 465 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) |
470 | { | 466 | { |
471 | struct mesh_path *mpath; | 467 | struct mesh_path *mpath; |
472 | struct mpath_node *node, *new_node; | 468 | struct mpath_node *node, *new_node; |
473 | u32 hash_idx; | 469 | u32 hash_idx; |
474 | 470 | ||
471 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | ||
472 | if (new_node == NULL) | ||
473 | return -ENOMEM; | ||
474 | |||
475 | node = hlist_entry(p, struct mpath_node, list); | 475 | node = hlist_entry(p, struct mpath_node, list); |
476 | mpath = node->mpath; | 476 | mpath = node->mpath; |
477 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
478 | new_node->mpath = mpath; | 477 | new_node->mpath = mpath; |
479 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | 478 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); |
480 | hlist_add_head(&new_node->list, | 479 | hlist_add_head(&new_node->list, |
481 | &newtbl->hash_buckets[hash_idx]); | 480 | &newtbl->hash_buckets[hash_idx]); |
481 | return 0; | ||
482 | } | 482 | } |
483 | 483 | ||
484 | int mesh_pathtbl_init(void) | 484 | int mesh_pathtbl_init(void) |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 37f0c2b94ae7..9efeb1f07025 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -79,7 +79,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | |||
79 | * | 79 | * |
80 | * @sta: mes peer link to restart | 80 | * @sta: mes peer link to restart |
81 | * | 81 | * |
82 | * Locking: this function must be called holding sta->plink_lock | 82 | * Locking: this function must be called holding sta->lock |
83 | */ | 83 | */ |
84 | static inline void mesh_plink_fsm_restart(struct sta_info *sta) | 84 | static inline void mesh_plink_fsm_restart(struct sta_info *sta) |
85 | { | 85 | { |
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | |||
105 | if (!sta) | 105 | if (!sta) |
106 | return NULL; | 106 | return NULL; |
107 | 107 | ||
108 | sta->flags |= WLAN_STA_AUTHORIZED; | 108 | sta->flags = WLAN_STA_AUTHORIZED; |
109 | sta->supp_rates[local->hw.conf.channel->band] = rates; | 109 | sta->supp_rates[local->hw.conf.channel->band] = rates; |
110 | 110 | ||
111 | return sta; | 111 | return sta; |
@@ -118,7 +118,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | |||
118 | * | 118 | * |
119 | * All mesh paths with this peer as next hop will be flushed | 119 | * All mesh paths with this peer as next hop will be flushed |
120 | * | 120 | * |
121 | * Locking: the caller must hold sta->plink_lock | 121 | * Locking: the caller must hold sta->lock |
122 | */ | 122 | */ |
123 | static void __mesh_plink_deactivate(struct sta_info *sta) | 123 | static void __mesh_plink_deactivate(struct sta_info *sta) |
124 | { | 124 | { |
@@ -139,9 +139,9 @@ static void __mesh_plink_deactivate(struct sta_info *sta) | |||
139 | */ | 139 | */ |
140 | void mesh_plink_deactivate(struct sta_info *sta) | 140 | void mesh_plink_deactivate(struct sta_info *sta) |
141 | { | 141 | { |
142 | spin_lock_bh(&sta->plink_lock); | 142 | spin_lock_bh(&sta->lock); |
143 | __mesh_plink_deactivate(sta); | 143 | __mesh_plink_deactivate(sta); |
144 | spin_unlock_bh(&sta->plink_lock); | 144 | spin_unlock_bh(&sta->lock); |
145 | } | 145 | } |
146 | 146 | ||
147 | static int mesh_plink_frame_tx(struct net_device *dev, | 147 | static int mesh_plink_frame_tx(struct net_device *dev, |
@@ -270,10 +270,10 @@ static void mesh_plink_timer(unsigned long data) | |||
270 | */ | 270 | */ |
271 | sta = (struct sta_info *) data; | 271 | sta = (struct sta_info *) data; |
272 | 272 | ||
273 | spin_lock_bh(&sta->plink_lock); | 273 | spin_lock_bh(&sta->lock); |
274 | if (sta->ignore_plink_timer) { | 274 | if (sta->ignore_plink_timer) { |
275 | sta->ignore_plink_timer = false; | 275 | sta->ignore_plink_timer = false; |
276 | spin_unlock_bh(&sta->plink_lock); | 276 | spin_unlock_bh(&sta->lock); |
277 | return; | 277 | return; |
278 | } | 278 | } |
279 | mpl_dbg("Mesh plink timer for %s fired on state %d\n", | 279 | mpl_dbg("Mesh plink timer for %s fired on state %d\n", |
@@ -298,7 +298,7 @@ static void mesh_plink_timer(unsigned long data) | |||
298 | rand % sta->plink_timeout; | 298 | rand % sta->plink_timeout; |
299 | ++sta->plink_retries; | 299 | ++sta->plink_retries; |
300 | mod_plink_timer(sta, sta->plink_timeout); | 300 | mod_plink_timer(sta, sta->plink_timeout); |
301 | spin_unlock_bh(&sta->plink_lock); | 301 | spin_unlock_bh(&sta->lock); |
302 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 302 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, |
303 | 0, 0); | 303 | 0, 0); |
304 | break; | 304 | break; |
@@ -311,7 +311,7 @@ static void mesh_plink_timer(unsigned long data) | |||
311 | reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); | 311 | reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); |
312 | sta->plink_state = PLINK_HOLDING; | 312 | sta->plink_state = PLINK_HOLDING; |
313 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 313 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
314 | spin_unlock_bh(&sta->plink_lock); | 314 | spin_unlock_bh(&sta->lock); |
315 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, | 315 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, |
316 | reason); | 316 | reason); |
317 | break; | 317 | break; |
@@ -319,10 +319,10 @@ static void mesh_plink_timer(unsigned long data) | |||
319 | /* holding timer */ | 319 | /* holding timer */ |
320 | del_timer(&sta->plink_timer); | 320 | del_timer(&sta->plink_timer); |
321 | mesh_plink_fsm_restart(sta); | 321 | mesh_plink_fsm_restart(sta); |
322 | spin_unlock_bh(&sta->plink_lock); | 322 | spin_unlock_bh(&sta->lock); |
323 | break; | 323 | break; |
324 | default: | 324 | default: |
325 | spin_unlock_bh(&sta->plink_lock); | 325 | spin_unlock_bh(&sta->lock); |
326 | break; | 326 | break; |
327 | } | 327 | } |
328 | } | 328 | } |
@@ -344,16 +344,16 @@ int mesh_plink_open(struct sta_info *sta) | |||
344 | DECLARE_MAC_BUF(mac); | 344 | DECLARE_MAC_BUF(mac); |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | spin_lock_bh(&sta->plink_lock); | 347 | spin_lock_bh(&sta->lock); |
348 | get_random_bytes(&llid, 2); | 348 | get_random_bytes(&llid, 2); |
349 | sta->llid = llid; | 349 | sta->llid = llid; |
350 | if (sta->plink_state != PLINK_LISTEN) { | 350 | if (sta->plink_state != PLINK_LISTEN) { |
351 | spin_unlock_bh(&sta->plink_lock); | 351 | spin_unlock_bh(&sta->lock); |
352 | return -EBUSY; | 352 | return -EBUSY; |
353 | } | 353 | } |
354 | sta->plink_state = PLINK_OPN_SNT; | 354 | sta->plink_state = PLINK_OPN_SNT; |
355 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 355 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); |
356 | spin_unlock_bh(&sta->plink_lock); | 356 | spin_unlock_bh(&sta->lock); |
357 | mpl_dbg("Mesh plink: starting establishment with %s\n", | 357 | mpl_dbg("Mesh plink: starting establishment with %s\n", |
358 | print_mac(mac, sta->addr)); | 358 | print_mac(mac, sta->addr)); |
359 | 359 | ||
@@ -367,10 +367,10 @@ void mesh_plink_block(struct sta_info *sta) | |||
367 | DECLARE_MAC_BUF(mac); | 367 | DECLARE_MAC_BUF(mac); |
368 | #endif | 368 | #endif |
369 | 369 | ||
370 | spin_lock_bh(&sta->plink_lock); | 370 | spin_lock_bh(&sta->lock); |
371 | __mesh_plink_deactivate(sta); | 371 | __mesh_plink_deactivate(sta); |
372 | sta->plink_state = PLINK_BLOCKED; | 372 | sta->plink_state = PLINK_BLOCKED; |
373 | spin_unlock_bh(&sta->plink_lock); | 373 | spin_unlock_bh(&sta->lock); |
374 | } | 374 | } |
375 | 375 | ||
376 | int mesh_plink_close(struct sta_info *sta) | 376 | int mesh_plink_close(struct sta_info *sta) |
@@ -383,14 +383,14 @@ int mesh_plink_close(struct sta_info *sta) | |||
383 | 383 | ||
384 | mpl_dbg("Mesh plink: closing link with %s\n", | 384 | mpl_dbg("Mesh plink: closing link with %s\n", |
385 | print_mac(mac, sta->addr)); | 385 | print_mac(mac, sta->addr)); |
386 | spin_lock_bh(&sta->plink_lock); | 386 | spin_lock_bh(&sta->lock); |
387 | sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); | 387 | sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); |
388 | reason = sta->reason; | 388 | reason = sta->reason; |
389 | 389 | ||
390 | if (sta->plink_state == PLINK_LISTEN || | 390 | if (sta->plink_state == PLINK_LISTEN || |
391 | sta->plink_state == PLINK_BLOCKED) { | 391 | sta->plink_state == PLINK_BLOCKED) { |
392 | mesh_plink_fsm_restart(sta); | 392 | mesh_plink_fsm_restart(sta); |
393 | spin_unlock_bh(&sta->plink_lock); | 393 | spin_unlock_bh(&sta->lock); |
394 | return 0; | 394 | return 0; |
395 | } else if (sta->plink_state == PLINK_ESTAB) { | 395 | } else if (sta->plink_state == PLINK_ESTAB) { |
396 | __mesh_plink_deactivate(sta); | 396 | __mesh_plink_deactivate(sta); |
@@ -402,7 +402,7 @@ int mesh_plink_close(struct sta_info *sta) | |||
402 | sta->plink_state = PLINK_HOLDING; | 402 | sta->plink_state = PLINK_HOLDING; |
403 | llid = sta->llid; | 403 | llid = sta->llid; |
404 | plid = sta->plid; | 404 | plid = sta->plid; |
405 | spin_unlock_bh(&sta->plink_lock); | 405 | spin_unlock_bh(&sta->lock); |
406 | mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, | 406 | mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, |
407 | plid, reason); | 407 | plid, reason); |
408 | return 0; | 408 | return 0; |
@@ -490,7 +490,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
490 | /* avoid warning */ | 490 | /* avoid warning */ |
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | spin_lock_bh(&sta->plink_lock); | 493 | spin_lock_bh(&sta->lock); |
494 | } else if (!sta) { | 494 | } else if (!sta) { |
495 | /* ftype == PLINK_OPEN */ | 495 | /* ftype == PLINK_OPEN */ |
496 | u64 rates; | 496 | u64 rates; |
@@ -512,9 +512,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
512 | return; | 512 | return; |
513 | } | 513 | } |
514 | event = OPN_ACPT; | 514 | event = OPN_ACPT; |
515 | spin_lock_bh(&sta->plink_lock); | 515 | spin_lock_bh(&sta->lock); |
516 | } else { | 516 | } else { |
517 | spin_lock_bh(&sta->plink_lock); | 517 | spin_lock_bh(&sta->lock); |
518 | switch (ftype) { | 518 | switch (ftype) { |
519 | case PLINK_OPEN: | 519 | case PLINK_OPEN: |
520 | if (!mesh_plink_free_count(sdata) || | 520 | if (!mesh_plink_free_count(sdata) || |
@@ -551,7 +551,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
551 | break; | 551 | break; |
552 | default: | 552 | default: |
553 | mpl_dbg("Mesh plink: unknown frame subtype\n"); | 553 | mpl_dbg("Mesh plink: unknown frame subtype\n"); |
554 | spin_unlock_bh(&sta->plink_lock); | 554 | spin_unlock_bh(&sta->lock); |
555 | rcu_read_unlock(); | 555 | rcu_read_unlock(); |
556 | return; | 556 | return; |
557 | } | 557 | } |
@@ -568,7 +568,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
568 | switch (event) { | 568 | switch (event) { |
569 | case CLS_ACPT: | 569 | case CLS_ACPT: |
570 | mesh_plink_fsm_restart(sta); | 570 | mesh_plink_fsm_restart(sta); |
571 | spin_unlock_bh(&sta->plink_lock); | 571 | spin_unlock_bh(&sta->lock); |
572 | break; | 572 | break; |
573 | case OPN_ACPT: | 573 | case OPN_ACPT: |
574 | sta->plink_state = PLINK_OPN_RCVD; | 574 | sta->plink_state = PLINK_OPN_RCVD; |
@@ -576,14 +576,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
576 | get_random_bytes(&llid, 2); | 576 | get_random_bytes(&llid, 2); |
577 | sta->llid = llid; | 577 | sta->llid = llid; |
578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); |
579 | spin_unlock_bh(&sta->plink_lock); | 579 | spin_unlock_bh(&sta->lock); |
580 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 580 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, |
581 | 0, 0); | 581 | 0, 0); |
582 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, | 582 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, |
583 | llid, plid, 0); | 583 | llid, plid, 0); |
584 | break; | 584 | break; |
585 | default: | 585 | default: |
586 | spin_unlock_bh(&sta->plink_lock); | 586 | spin_unlock_bh(&sta->lock); |
587 | break; | 587 | break; |
588 | } | 588 | } |
589 | break; | 589 | break; |
@@ -603,7 +603,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
603 | sta->ignore_plink_timer = true; | 603 | sta->ignore_plink_timer = true; |
604 | 604 | ||
605 | llid = sta->llid; | 605 | llid = sta->llid; |
606 | spin_unlock_bh(&sta->plink_lock); | 606 | spin_unlock_bh(&sta->lock); |
607 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 607 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, |
608 | plid, reason); | 608 | plid, reason); |
609 | break; | 609 | break; |
@@ -612,7 +612,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
612 | sta->plink_state = PLINK_OPN_RCVD; | 612 | sta->plink_state = PLINK_OPN_RCVD; |
613 | sta->plid = plid; | 613 | sta->plid = plid; |
614 | llid = sta->llid; | 614 | llid = sta->llid; |
615 | spin_unlock_bh(&sta->plink_lock); | 615 | spin_unlock_bh(&sta->lock); |
616 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 616 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, |
617 | plid, 0); | 617 | plid, 0); |
618 | break; | 618 | break; |
@@ -622,10 +622,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
622 | dot11MeshConfirmTimeout(sdata))) | 622 | dot11MeshConfirmTimeout(sdata))) |
623 | sta->ignore_plink_timer = true; | 623 | sta->ignore_plink_timer = true; |
624 | 624 | ||
625 | spin_unlock_bh(&sta->plink_lock); | 625 | spin_unlock_bh(&sta->lock); |
626 | break; | 626 | break; |
627 | default: | 627 | default: |
628 | spin_unlock_bh(&sta->plink_lock); | 628 | spin_unlock_bh(&sta->lock); |
629 | break; | 629 | break; |
630 | } | 630 | } |
631 | break; | 631 | break; |
@@ -645,13 +645,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
645 | sta->ignore_plink_timer = true; | 645 | sta->ignore_plink_timer = true; |
646 | 646 | ||
647 | llid = sta->llid; | 647 | llid = sta->llid; |
648 | spin_unlock_bh(&sta->plink_lock); | 648 | spin_unlock_bh(&sta->lock); |
649 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 649 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, |
650 | plid, reason); | 650 | plid, reason); |
651 | break; | 651 | break; |
652 | case OPN_ACPT: | 652 | case OPN_ACPT: |
653 | llid = sta->llid; | 653 | llid = sta->llid; |
654 | spin_unlock_bh(&sta->plink_lock); | 654 | spin_unlock_bh(&sta->lock); |
655 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 655 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, |
656 | plid, 0); | 656 | plid, 0); |
657 | break; | 657 | break; |
@@ -659,12 +659,12 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
659 | del_timer(&sta->plink_timer); | 659 | del_timer(&sta->plink_timer); |
660 | sta->plink_state = PLINK_ESTAB; | 660 | sta->plink_state = PLINK_ESTAB; |
661 | mesh_plink_inc_estab_count(sdata); | 661 | mesh_plink_inc_estab_count(sdata); |
662 | spin_unlock_bh(&sta->plink_lock); | 662 | spin_unlock_bh(&sta->lock); |
663 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | 663 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", |
664 | print_mac(mac, sta->addr)); | 664 | print_mac(mac, sta->addr)); |
665 | break; | 665 | break; |
666 | default: | 666 | default: |
667 | spin_unlock_bh(&sta->plink_lock); | 667 | spin_unlock_bh(&sta->lock); |
668 | break; | 668 | break; |
669 | } | 669 | } |
670 | break; | 670 | break; |
@@ -684,7 +684,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
684 | sta->ignore_plink_timer = true; | 684 | sta->ignore_plink_timer = true; |
685 | 685 | ||
686 | llid = sta->llid; | 686 | llid = sta->llid; |
687 | spin_unlock_bh(&sta->plink_lock); | 687 | spin_unlock_bh(&sta->lock); |
688 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 688 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, |
689 | plid, reason); | 689 | plid, reason); |
690 | break; | 690 | break; |
@@ -692,14 +692,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
692 | del_timer(&sta->plink_timer); | 692 | del_timer(&sta->plink_timer); |
693 | sta->plink_state = PLINK_ESTAB; | 693 | sta->plink_state = PLINK_ESTAB; |
694 | mesh_plink_inc_estab_count(sdata); | 694 | mesh_plink_inc_estab_count(sdata); |
695 | spin_unlock_bh(&sta->plink_lock); | 695 | spin_unlock_bh(&sta->lock); |
696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | 696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", |
697 | print_mac(mac, sta->addr)); | 697 | print_mac(mac, sta->addr)); |
698 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 698 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, |
699 | plid, 0); | 699 | plid, 0); |
700 | break; | 700 | break; |
701 | default: | 701 | default: |
702 | spin_unlock_bh(&sta->plink_lock); | 702 | spin_unlock_bh(&sta->lock); |
703 | break; | 703 | break; |
704 | } | 704 | } |
705 | break; | 705 | break; |
@@ -713,18 +713,18 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
713 | sta->plink_state = PLINK_HOLDING; | 713 | sta->plink_state = PLINK_HOLDING; |
714 | llid = sta->llid; | 714 | llid = sta->llid; |
715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
716 | spin_unlock_bh(&sta->plink_lock); | 716 | spin_unlock_bh(&sta->lock); |
717 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 717 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, |
718 | plid, reason); | 718 | plid, reason); |
719 | break; | 719 | break; |
720 | case OPN_ACPT: | 720 | case OPN_ACPT: |
721 | llid = sta->llid; | 721 | llid = sta->llid; |
722 | spin_unlock_bh(&sta->plink_lock); | 722 | spin_unlock_bh(&sta->lock); |
723 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 723 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, |
724 | plid, 0); | 724 | plid, 0); |
725 | break; | 725 | break; |
726 | default: | 726 | default: |
727 | spin_unlock_bh(&sta->plink_lock); | 727 | spin_unlock_bh(&sta->lock); |
728 | break; | 728 | break; |
729 | } | 729 | } |
730 | break; | 730 | break; |
@@ -734,7 +734,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
734 | if (del_timer(&sta->plink_timer)) | 734 | if (del_timer(&sta->plink_timer)) |
735 | sta->ignore_plink_timer = 1; | 735 | sta->ignore_plink_timer = 1; |
736 | mesh_plink_fsm_restart(sta); | 736 | mesh_plink_fsm_restart(sta); |
737 | spin_unlock_bh(&sta->plink_lock); | 737 | spin_unlock_bh(&sta->lock); |
738 | break; | 738 | break; |
739 | case OPN_ACPT: | 739 | case OPN_ACPT: |
740 | case CNF_ACPT: | 740 | case CNF_ACPT: |
@@ -742,19 +742,19 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
742 | case CNF_RJCT: | 742 | case CNF_RJCT: |
743 | llid = sta->llid; | 743 | llid = sta->llid; |
744 | reason = sta->reason; | 744 | reason = sta->reason; |
745 | spin_unlock_bh(&sta->plink_lock); | 745 | spin_unlock_bh(&sta->lock); |
746 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 746 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, |
747 | plid, reason); | 747 | plid, reason); |
748 | break; | 748 | break; |
749 | default: | 749 | default: |
750 | spin_unlock_bh(&sta->plink_lock); | 750 | spin_unlock_bh(&sta->lock); |
751 | } | 751 | } |
752 | break; | 752 | break; |
753 | default: | 753 | default: |
754 | /* should not get here, PLINK_BLOCKED is dealt with at the | 754 | /* should not get here, PLINK_BLOCKED is dealt with at the |
755 | * beggining of the function | 755 | * beggining of the function |
756 | */ | 756 | */ |
757 | spin_unlock_bh(&sta->plink_lock); | 757 | spin_unlock_bh(&sta->lock); |
758 | break; | 758 | break; |
759 | } | 759 | } |
760 | 760 | ||
diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c index 0f844f7895f1..408649bd4702 100644 --- a/net/mac80211/michael.c +++ b/net/mac80211/michael.c | |||
@@ -6,85 +6,68 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | |||
10 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/bitops.h> | ||
11 | #include <linux/ieee80211.h> | ||
12 | #include <asm/unaligned.h> | ||
11 | 13 | ||
12 | #include "michael.h" | 14 | #include "michael.h" |
13 | 15 | ||
14 | static inline u32 rotr(u32 val, int bits) | 16 | static void michael_block(struct michael_mic_ctx *mctx, u32 val) |
15 | { | ||
16 | return (val >> bits) | (val << (32 - bits)); | ||
17 | } | ||
18 | |||
19 | |||
20 | static inline u32 rotl(u32 val, int bits) | ||
21 | { | ||
22 | return (val << bits) | (val >> (32 - bits)); | ||
23 | } | ||
24 | |||
25 | |||
26 | static inline u32 xswap(u32 val) | ||
27 | { | ||
28 | return ((val & 0xff00ff00) >> 8) | ((val & 0x00ff00ff) << 8); | ||
29 | } | ||
30 | |||
31 | |||
32 | #define michael_block(l, r) \ | ||
33 | do { \ | ||
34 | r ^= rotl(l, 17); \ | ||
35 | l += r; \ | ||
36 | r ^= xswap(l); \ | ||
37 | l += r; \ | ||
38 | r ^= rotl(l, 3); \ | ||
39 | l += r; \ | ||
40 | r ^= rotr(l, 2); \ | ||
41 | l += r; \ | ||
42 | } while (0) | ||
43 | |||
44 | |||
45 | static inline u32 michael_get32(u8 *data) | ||
46 | { | 17 | { |
47 | return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | 18 | mctx->l ^= val; |
19 | mctx->r ^= rol32(mctx->l, 17); | ||
20 | mctx->l += mctx->r; | ||
21 | mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | | ||
22 | ((mctx->l & 0x00ff00ff) << 8); | ||
23 | mctx->l += mctx->r; | ||
24 | mctx->r ^= rol32(mctx->l, 3); | ||
25 | mctx->l += mctx->r; | ||
26 | mctx->r ^= ror32(mctx->l, 2); | ||
27 | mctx->l += mctx->r; | ||
48 | } | 28 | } |
49 | 29 | ||
50 | 30 | static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key, | |
51 | static inline void michael_put32(u32 val, u8 *data) | 31 | struct ieee80211_hdr *hdr) |
52 | { | 32 | { |
53 | data[0] = val & 0xff; | 33 | u8 *da, *sa, tid; |
54 | data[1] = (val >> 8) & 0xff; | 34 | |
55 | data[2] = (val >> 16) & 0xff; | 35 | da = ieee80211_get_DA(hdr); |
56 | data[3] = (val >> 24) & 0xff; | 36 | sa = ieee80211_get_SA(hdr); |
37 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
38 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
39 | else | ||
40 | tid = 0; | ||
41 | |||
42 | mctx->l = get_unaligned_le32(key); | ||
43 | mctx->r = get_unaligned_le32(key + 4); | ||
44 | |||
45 | /* | ||
46 | * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC | ||
47 | * calculation, but it is _not_ transmitted | ||
48 | */ | ||
49 | michael_block(mctx, get_unaligned_le32(da)); | ||
50 | michael_block(mctx, get_unaligned_le16(&da[4]) | | ||
51 | (get_unaligned_le16(sa) << 16)); | ||
52 | michael_block(mctx, get_unaligned_le32(&sa[2])); | ||
53 | michael_block(mctx, tid); | ||
57 | } | 54 | } |
58 | 55 | ||
59 | 56 | void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, | |
60 | void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, | 57 | const u8 *data, size_t data_len, u8 *mic) |
61 | u8 *data, size_t data_len, u8 *mic) | ||
62 | { | 58 | { |
63 | u32 l, r, val; | 59 | u32 val; |
64 | size_t block, blocks, left; | 60 | size_t block, blocks, left; |
61 | struct michael_mic_ctx mctx; | ||
65 | 62 | ||
66 | l = michael_get32(key); | 63 | michael_mic_hdr(&mctx, key, hdr); |
67 | r = michael_get32(key + 4); | ||
68 | |||
69 | /* A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC | ||
70 | * calculation, but it is _not_ transmitted */ | ||
71 | l ^= michael_get32(da); | ||
72 | michael_block(l, r); | ||
73 | l ^= da[4] | (da[5] << 8) | (sa[0] << 16) | (sa[1] << 24); | ||
74 | michael_block(l, r); | ||
75 | l ^= michael_get32(&sa[2]); | ||
76 | michael_block(l, r); | ||
77 | l ^= priority; | ||
78 | michael_block(l, r); | ||
79 | 64 | ||
80 | /* Real data */ | 65 | /* Real data */ |
81 | blocks = data_len / 4; | 66 | blocks = data_len / 4; |
82 | left = data_len % 4; | 67 | left = data_len % 4; |
83 | 68 | ||
84 | for (block = 0; block < blocks; block++) { | 69 | for (block = 0; block < blocks; block++) |
85 | l ^= michael_get32(&data[block * 4]); | 70 | michael_block(&mctx, get_unaligned_le32(&data[block * 4])); |
86 | michael_block(l, r); | ||
87 | } | ||
88 | 71 | ||
89 | /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make | 72 | /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make |
90 | * total length a multiple of 4. */ | 73 | * total length a multiple of 4. */ |
@@ -94,11 +77,10 @@ void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, | |||
94 | left--; | 77 | left--; |
95 | val |= data[blocks * 4 + left]; | 78 | val |= data[blocks * 4 + left]; |
96 | } | 79 | } |
97 | l ^= val; | ||
98 | michael_block(l, r); | ||
99 | /* last block is zero, so l ^ 0 = l */ | ||
100 | michael_block(l, r); | ||
101 | 80 | ||
102 | michael_put32(l, mic); | 81 | michael_block(&mctx, val); |
103 | michael_put32(r, mic + 4); | 82 | michael_block(&mctx, 0); |
83 | |||
84 | put_unaligned_le32(mctx.l, mic); | ||
85 | put_unaligned_le32(mctx.r, mic + 4); | ||
104 | } | 86 | } |
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h index 2e6aebabeea1..3b848dad9587 100644 --- a/net/mac80211/michael.h +++ b/net/mac80211/michael.h | |||
@@ -14,7 +14,11 @@ | |||
14 | 14 | ||
15 | #define MICHAEL_MIC_LEN 8 | 15 | #define MICHAEL_MIC_LEN 8 |
16 | 16 | ||
17 | void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, | 17 | struct michael_mic_ctx { |
18 | u8 *data, size_t data_len, u8 *mic); | 18 | u32 l, r; |
19 | }; | ||
20 | |||
21 | void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, | ||
22 | const u8 *data, size_t data_len, u8 *mic); | ||
19 | 23 | ||
20 | #endif /* MICHAEL_H */ | 24 | #endif /* MICHAEL_H */ |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b404537c0bcd..d7c371e36bf0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -78,7 +78,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | |||
78 | static struct ieee80211_sta_bss * | 78 | static struct ieee80211_sta_bss * |
79 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, | 79 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, |
80 | u8 *ssid, u8 ssid_len); | 80 | u8 *ssid, u8 ssid_len); |
81 | static void ieee80211_rx_bss_put(struct net_device *dev, | 81 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, |
82 | struct ieee80211_sta_bss *bss); | 82 | struct ieee80211_sta_bss *bss); |
83 | static int ieee80211_sta_find_ibss(struct net_device *dev, | 83 | static int ieee80211_sta_find_ibss(struct net_device *dev, |
84 | struct ieee80211_if_sta *ifsta); | 84 | struct ieee80211_if_sta *ifsta); |
@@ -87,6 +87,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev, | |||
87 | u8 *ssid, size_t ssid_len); | 87 | u8 *ssid, size_t ssid_len); |
88 | static int ieee80211_sta_config_auth(struct net_device *dev, | 88 | static int ieee80211_sta_config_auth(struct net_device *dev, |
89 | struct ieee80211_if_sta *ifsta); | 89 | struct ieee80211_if_sta *ifsta); |
90 | static void sta_rx_agg_session_timer_expired(unsigned long data); | ||
90 | 91 | ||
91 | 92 | ||
92 | void ieee802_11_parse_elems(u8 *start, size_t len, | 93 | void ieee802_11_parse_elems(u8 *start, size_t len, |
@@ -203,6 +204,25 @@ void ieee802_11_parse_elems(u8 *start, size_t len, | |||
203 | elems->perr = pos; | 204 | elems->perr = pos; |
204 | elems->perr_len = elen; | 205 | elems->perr_len = elen; |
205 | break; | 206 | break; |
207 | case WLAN_EID_CHANNEL_SWITCH: | ||
208 | elems->ch_switch_elem = pos; | ||
209 | elems->ch_switch_elem_len = elen; | ||
210 | break; | ||
211 | case WLAN_EID_QUIET: | ||
212 | if (!elems->quiet_elem) { | ||
213 | elems->quiet_elem = pos; | ||
214 | elems->quiet_elem_len = elen; | ||
215 | } | ||
216 | elems->num_of_quiet_elem++; | ||
217 | break; | ||
218 | case WLAN_EID_COUNTRY: | ||
219 | elems->country_elem = pos; | ||
220 | elems->country_elem_len = elen; | ||
221 | break; | ||
222 | case WLAN_EID_PWR_CONSTRAINT: | ||
223 | elems->pwr_constr_elem = pos; | ||
224 | elems->pwr_constr_elem_len = elen; | ||
225 | break; | ||
206 | default: | 226 | default: |
207 | break; | 227 | break; |
208 | } | 228 | } |
@@ -256,19 +276,8 @@ static void ieee80211_sta_def_wmm_params(struct net_device *dev, | |||
256 | qparam.cw_max = 1023; | 276 | qparam.cw_max = 1023; |
257 | qparam.txop = 0; | 277 | qparam.txop = 0; |
258 | 278 | ||
259 | for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) | 279 | for (i = 0; i < local_to_hw(local)->queues; i++) |
260 | local->ops->conf_tx(local_to_hw(local), | 280 | local->ops->conf_tx(local_to_hw(local), i, &qparam); |
261 | i + IEEE80211_TX_QUEUE_DATA0, | ||
262 | &qparam); | ||
263 | |||
264 | if (ibss) { | ||
265 | /* IBSS uses different parameters for Beacon sending */ | ||
266 | qparam.cw_min++; | ||
267 | qparam.cw_min *= 2; | ||
268 | qparam.cw_min--; | ||
269 | local->ops->conf_tx(local_to_hw(local), | ||
270 | IEEE80211_TX_QUEUE_BEACON, &qparam); | ||
271 | } | ||
272 | } | 281 | } |
273 | } | 282 | } |
274 | 283 | ||
@@ -282,6 +291,12 @@ static void ieee80211_sta_wmm_params(struct net_device *dev, | |||
282 | int count; | 291 | int count; |
283 | u8 *pos; | 292 | u8 *pos; |
284 | 293 | ||
294 | if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED)) | ||
295 | return; | ||
296 | |||
297 | if (!wmm_param) | ||
298 | return; | ||
299 | |||
285 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) | 300 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) |
286 | return; | 301 | return; |
287 | count = wmm_param[6] & 0x0f; | 302 | count = wmm_param[6] & 0x0f; |
@@ -305,37 +320,33 @@ static void ieee80211_sta_wmm_params(struct net_device *dev, | |||
305 | 320 | ||
306 | switch (aci) { | 321 | switch (aci) { |
307 | case 1: | 322 | case 1: |
308 | queue = IEEE80211_TX_QUEUE_DATA3; | 323 | queue = 3; |
309 | if (acm) { | 324 | if (acm) |
310 | local->wmm_acm |= BIT(0) | BIT(3); | 325 | local->wmm_acm |= BIT(0) | BIT(3); |
311 | } | ||
312 | break; | 326 | break; |
313 | case 2: | 327 | case 2: |
314 | queue = IEEE80211_TX_QUEUE_DATA1; | 328 | queue = 1; |
315 | if (acm) { | 329 | if (acm) |
316 | local->wmm_acm |= BIT(4) | BIT(5); | 330 | local->wmm_acm |= BIT(4) | BIT(5); |
317 | } | ||
318 | break; | 331 | break; |
319 | case 3: | 332 | case 3: |
320 | queue = IEEE80211_TX_QUEUE_DATA0; | 333 | queue = 0; |
321 | if (acm) { | 334 | if (acm) |
322 | local->wmm_acm |= BIT(6) | BIT(7); | 335 | local->wmm_acm |= BIT(6) | BIT(7); |
323 | } | ||
324 | break; | 336 | break; |
325 | case 0: | 337 | case 0: |
326 | default: | 338 | default: |
327 | queue = IEEE80211_TX_QUEUE_DATA2; | 339 | queue = 2; |
328 | if (acm) { | 340 | if (acm) |
329 | local->wmm_acm |= BIT(1) | BIT(2); | 341 | local->wmm_acm |= BIT(1) | BIT(2); |
330 | } | ||
331 | break; | 342 | break; |
332 | } | 343 | } |
333 | 344 | ||
334 | params.aifs = pos[0] & 0x0f; | 345 | params.aifs = pos[0] & 0x0f; |
335 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | 346 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); |
336 | params.cw_min = ecw2cw(pos[1] & 0x0f); | 347 | params.cw_min = ecw2cw(pos[1] & 0x0f); |
337 | params.txop = pos[2] | (pos[3] << 8); | 348 | params.txop = get_unaligned_le16(pos + 2); |
338 | #ifdef CONFIG_MAC80211_DEBUG | 349 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
339 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | 350 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " |
340 | "cWmin=%d cWmax=%d txop=%d\n", | 351 | "cWmin=%d cWmax=%d txop=%d\n", |
341 | dev->name, queue, aci, acm, params.aifs, params.cw_min, | 352 | dev->name, queue, aci, acm, params.aifs, params.cw_min, |
@@ -355,11 +366,14 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | |||
355 | bool use_short_preamble) | 366 | bool use_short_preamble) |
356 | { | 367 | { |
357 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; | 368 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; |
369 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
358 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 370 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
359 | DECLARE_MAC_BUF(mac); | 371 | DECLARE_MAC_BUF(mac); |
372 | #endif | ||
360 | u32 changed = 0; | 373 | u32 changed = 0; |
361 | 374 | ||
362 | if (use_protection != bss_conf->use_cts_prot) { | 375 | if (use_protection != bss_conf->use_cts_prot) { |
376 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
363 | if (net_ratelimit()) { | 377 | if (net_ratelimit()) { |
364 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" | 378 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" |
365 | "%s)\n", | 379 | "%s)\n", |
@@ -367,11 +381,13 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | |||
367 | use_protection ? "enabled" : "disabled", | 381 | use_protection ? "enabled" : "disabled", |
368 | print_mac(mac, ifsta->bssid)); | 382 | print_mac(mac, ifsta->bssid)); |
369 | } | 383 | } |
384 | #endif | ||
370 | bss_conf->use_cts_prot = use_protection; | 385 | bss_conf->use_cts_prot = use_protection; |
371 | changed |= BSS_CHANGED_ERP_CTS_PROT; | 386 | changed |= BSS_CHANGED_ERP_CTS_PROT; |
372 | } | 387 | } |
373 | 388 | ||
374 | if (use_short_preamble != bss_conf->use_short_preamble) { | 389 | if (use_short_preamble != bss_conf->use_short_preamble) { |
390 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
375 | if (net_ratelimit()) { | 391 | if (net_ratelimit()) { |
376 | printk(KERN_DEBUG "%s: switched to %s barker preamble" | 392 | printk(KERN_DEBUG "%s: switched to %s barker preamble" |
377 | " (BSSID=%s)\n", | 393 | " (BSSID=%s)\n", |
@@ -379,6 +395,7 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | |||
379 | use_short_preamble ? "short" : "long", | 395 | use_short_preamble ? "short" : "long", |
380 | print_mac(mac, ifsta->bssid)); | 396 | print_mac(mac, ifsta->bssid)); |
381 | } | 397 | } |
398 | #endif | ||
382 | bss_conf->use_short_preamble = use_short_preamble; | 399 | bss_conf->use_short_preamble = use_short_preamble; |
383 | changed |= BSS_CHANGED_ERP_PREAMBLE; | 400 | changed |= BSS_CHANGED_ERP_PREAMBLE; |
384 | } | 401 | } |
@@ -537,7 +554,7 @@ static void ieee80211_set_associated(struct net_device *dev, | |||
537 | 554 | ||
538 | changed |= ieee80211_handle_bss_capability(sdata, bss); | 555 | changed |= ieee80211_handle_bss_capability(sdata, bss); |
539 | 556 | ||
540 | ieee80211_rx_bss_put(dev, bss); | 557 | ieee80211_rx_bss_put(local, bss); |
541 | } | 558 | } |
542 | 559 | ||
543 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { | 560 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { |
@@ -555,7 +572,7 @@ static void ieee80211_set_associated(struct net_device *dev, | |||
555 | netif_carrier_off(dev); | 572 | netif_carrier_off(dev); |
556 | ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); | 573 | ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); |
557 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | 574 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
558 | ieee80211_reset_erp_info(dev); | 575 | changed |= ieee80211_reset_erp_info(dev); |
559 | 576 | ||
560 | sdata->bss_conf.assoc_ht = 0; | 577 | sdata->bss_conf.assoc_ht = 0; |
561 | sdata->bss_conf.ht_conf = NULL; | 578 | sdata->bss_conf.ht_conf = NULL; |
@@ -589,7 +606,7 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | |||
589 | int encrypt) | 606 | int encrypt) |
590 | { | 607 | { |
591 | struct ieee80211_sub_if_data *sdata; | 608 | struct ieee80211_sub_if_data *sdata; |
592 | struct ieee80211_tx_packet_data *pkt_data; | 609 | struct ieee80211_tx_info *info; |
593 | 610 | ||
594 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 611 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
595 | skb->dev = sdata->local->mdev; | 612 | skb->dev = sdata->local->mdev; |
@@ -597,11 +614,11 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | |||
597 | skb_set_network_header(skb, 0); | 614 | skb_set_network_header(skb, 0); |
598 | skb_set_transport_header(skb, 0); | 615 | skb_set_transport_header(skb, 0); |
599 | 616 | ||
600 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; | 617 | info = IEEE80211_SKB_CB(skb); |
601 | memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); | 618 | memset(info, 0, sizeof(struct ieee80211_tx_info)); |
602 | pkt_data->ifindex = sdata->dev->ifindex; | 619 | info->control.ifindex = sdata->dev->ifindex; |
603 | if (!encrypt) | 620 | if (!encrypt) |
604 | pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; | 621 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
605 | 622 | ||
606 | dev_queue_xmit(skb); | 623 | dev_queue_xmit(skb); |
607 | } | 624 | } |
@@ -730,9 +747,8 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
730 | if (bss) { | 747 | if (bss) { |
731 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | 748 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) |
732 | capab |= WLAN_CAPABILITY_PRIVACY; | 749 | capab |= WLAN_CAPABILITY_PRIVACY; |
733 | if (bss->wmm_ie) { | 750 | if (bss->wmm_ie) |
734 | wmm = 1; | 751 | wmm = 1; |
735 | } | ||
736 | 752 | ||
737 | /* get all rates supported by the device and the AP as | 753 | /* get all rates supported by the device and the AP as |
738 | * some APs don't like getting a superset of their rates | 754 | * some APs don't like getting a superset of their rates |
@@ -740,7 +756,11 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
740 | * b-only mode) */ | 756 | * b-only mode) */ |
741 | rates_len = ieee80211_compatible_rates(bss, sband, &rates); | 757 | rates_len = ieee80211_compatible_rates(bss, sband, &rates); |
742 | 758 | ||
743 | ieee80211_rx_bss_put(dev, bss); | 759 | if ((bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && |
760 | (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) | ||
761 | capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; | ||
762 | |||
763 | ieee80211_rx_bss_put(local, bss); | ||
744 | } else { | 764 | } else { |
745 | rates = ~0; | 765 | rates = ~0; |
746 | rates_len = sband->n_bitrates; | 766 | rates_len = sband->n_bitrates; |
@@ -807,6 +827,26 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
807 | } | 827 | } |
808 | } | 828 | } |
809 | 829 | ||
830 | if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { | ||
831 | /* 1. power capabilities */ | ||
832 | pos = skb_put(skb, 4); | ||
833 | *pos++ = WLAN_EID_PWR_CAPABILITY; | ||
834 | *pos++ = 2; | ||
835 | *pos++ = 0; /* min tx power */ | ||
836 | *pos++ = local->hw.conf.channel->max_power; /* max tx power */ | ||
837 | |||
838 | /* 2. supported channels */ | ||
839 | /* TODO: get this in reg domain format */ | ||
840 | pos = skb_put(skb, 2 * sband->n_channels + 2); | ||
841 | *pos++ = WLAN_EID_SUPPORTED_CHANNELS; | ||
842 | *pos++ = 2 * sband->n_channels; | ||
843 | for (i = 0; i < sband->n_channels; i++) { | ||
844 | *pos++ = ieee80211_frequency_to_channel( | ||
845 | sband->channels[i].center_freq); | ||
846 | *pos++ = 1; /* one channel in the subband*/ | ||
847 | } | ||
848 | } | ||
849 | |||
810 | if (ifsta->extra_ie) { | 850 | if (ifsta->extra_ie) { |
811 | pos = skb_put(skb, ifsta->extra_ie_len); | 851 | pos = skb_put(skb, ifsta->extra_ie_len); |
812 | memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len); | 852 | memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len); |
@@ -824,9 +864,32 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
824 | *pos++ = 1; /* WME ver */ | 864 | *pos++ = 1; /* WME ver */ |
825 | *pos++ = 0; | 865 | *pos++ = 0; |
826 | } | 866 | } |
867 | |||
827 | /* wmm support is a must to HT */ | 868 | /* wmm support is a must to HT */ |
828 | if (wmm && sband->ht_info.ht_supported) { | 869 | if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && |
829 | __le16 tmp = cpu_to_le16(sband->ht_info.cap); | 870 | sband->ht_info.ht_supported && bss->ht_add_ie) { |
871 | struct ieee80211_ht_addt_info *ht_add_info = | ||
872 | (struct ieee80211_ht_addt_info *)bss->ht_add_ie; | ||
873 | u16 cap = sband->ht_info.cap; | ||
874 | __le16 tmp; | ||
875 | u32 flags = local->hw.conf.channel->flags; | ||
876 | |||
877 | switch (ht_add_info->ht_param & IEEE80211_HT_IE_CHA_SEC_OFFSET) { | ||
878 | case IEEE80211_HT_IE_CHA_SEC_ABOVE: | ||
879 | if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) { | ||
880 | cap &= ~IEEE80211_HT_CAP_SUP_WIDTH; | ||
881 | cap &= ~IEEE80211_HT_CAP_SGI_40; | ||
882 | } | ||
883 | break; | ||
884 | case IEEE80211_HT_IE_CHA_SEC_BELOW: | ||
885 | if (flags & IEEE80211_CHAN_NO_FAT_BELOW) { | ||
886 | cap &= ~IEEE80211_HT_CAP_SUP_WIDTH; | ||
887 | cap &= ~IEEE80211_HT_CAP_SGI_40; | ||
888 | } | ||
889 | break; | ||
890 | } | ||
891 | |||
892 | tmp = cpu_to_le16(cap); | ||
830 | pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); | 893 | pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); |
831 | *pos++ = WLAN_EID_HT_CAPABILITY; | 894 | *pos++ = WLAN_EID_HT_CAPABILITY; |
832 | *pos++ = sizeof(struct ieee80211_ht_cap); | 895 | *pos++ = sizeof(struct ieee80211_ht_cap); |
@@ -929,7 +992,7 @@ static int ieee80211_privacy_mismatch(struct net_device *dev, | |||
929 | wep_privacy = !!ieee80211_sta_wep_configured(dev); | 992 | wep_privacy = !!ieee80211_sta_wep_configured(dev); |
930 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); | 993 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); |
931 | 994 | ||
932 | ieee80211_rx_bss_put(dev, bss); | 995 | ieee80211_rx_bss_put(local, bss); |
933 | 996 | ||
934 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) | 997 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) |
935 | return 0; | 998 | return 0; |
@@ -1121,14 +1184,10 @@ static void ieee80211_auth_challenge(struct net_device *dev, | |||
1121 | u8 *pos; | 1184 | u8 *pos; |
1122 | struct ieee802_11_elems elems; | 1185 | struct ieee802_11_elems elems; |
1123 | 1186 | ||
1124 | printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name); | ||
1125 | pos = mgmt->u.auth.variable; | 1187 | pos = mgmt->u.auth.variable; |
1126 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); | 1188 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
1127 | if (!elems.challenge) { | 1189 | if (!elems.challenge) |
1128 | printk(KERN_DEBUG "%s: no challenge IE in shared key auth " | ||
1129 | "frame\n", dev->name); | ||
1130 | return; | 1190 | return; |
1131 | } | ||
1132 | ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, | 1191 | ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, |
1133 | elems.challenge_len + 2, 1); | 1192 | elems.challenge_len + 2, 1); |
1134 | } | 1193 | } |
@@ -1144,8 +1203,8 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, | |||
1144 | struct ieee80211_mgmt *mgmt; | 1203 | struct ieee80211_mgmt *mgmt; |
1145 | u16 capab; | 1204 | u16 capab; |
1146 | 1205 | ||
1147 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + | 1206 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
1148 | sizeof(mgmt->u.action.u.addba_resp)); | 1207 | |
1149 | if (!skb) { | 1208 | if (!skb) { |
1150 | printk(KERN_DEBUG "%s: failed to allocate buffer " | 1209 | printk(KERN_DEBUG "%s: failed to allocate buffer " |
1151 | "for addba resp frame\n", dev->name); | 1210 | "for addba resp frame\n", dev->name); |
@@ -1193,9 +1252,7 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | |||
1193 | struct ieee80211_mgmt *mgmt; | 1252 | struct ieee80211_mgmt *mgmt; |
1194 | u16 capab; | 1253 | u16 capab; |
1195 | 1254 | ||
1196 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + | 1255 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
1197 | sizeof(mgmt->u.action.u.addba_req)); | ||
1198 | |||
1199 | 1256 | ||
1200 | if (!skb) { | 1257 | if (!skb) { |
1201 | printk(KERN_ERR "%s: failed to allocate buffer " | 1258 | printk(KERN_ERR "%s: failed to allocate buffer " |
@@ -1296,7 +1353,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1296 | 1353 | ||
1297 | 1354 | ||
1298 | /* examine state machine */ | 1355 | /* examine state machine */ |
1299 | spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); | 1356 | spin_lock_bh(&sta->lock); |
1300 | 1357 | ||
1301 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | 1358 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { |
1302 | #ifdef CONFIG_MAC80211_HT_DEBUG | 1359 | #ifdef CONFIG_MAC80211_HT_DEBUG |
@@ -1312,9 +1369,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1312 | sta->ampdu_mlme.tid_rx[tid] = | 1369 | sta->ampdu_mlme.tid_rx[tid] = |
1313 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | 1370 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); |
1314 | if (!sta->ampdu_mlme.tid_rx[tid]) { | 1371 | if (!sta->ampdu_mlme.tid_rx[tid]) { |
1372 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1315 | if (net_ratelimit()) | 1373 | if (net_ratelimit()) |
1316 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | 1374 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", |
1317 | tid); | 1375 | tid); |
1376 | #endif | ||
1318 | goto end; | 1377 | goto end; |
1319 | } | 1378 | } |
1320 | /* rx timer */ | 1379 | /* rx timer */ |
@@ -1330,9 +1389,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1330 | tid_agg_rx->reorder_buf = | 1389 | tid_agg_rx->reorder_buf = |
1331 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); | 1390 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); |
1332 | if (!tid_agg_rx->reorder_buf) { | 1391 | if (!tid_agg_rx->reorder_buf) { |
1392 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1333 | if (net_ratelimit()) | 1393 | if (net_ratelimit()) |
1334 | printk(KERN_ERR "can not allocate reordering buffer " | 1394 | printk(KERN_ERR "can not allocate reordering buffer " |
1335 | "to tid %d\n", tid); | 1395 | "to tid %d\n", tid); |
1396 | #endif | ||
1336 | kfree(sta->ampdu_mlme.tid_rx[tid]); | 1397 | kfree(sta->ampdu_mlme.tid_rx[tid]); |
1337 | goto end; | 1398 | goto end; |
1338 | } | 1399 | } |
@@ -1363,7 +1424,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1363 | tid_agg_rx->stored_mpdu_num = 0; | 1424 | tid_agg_rx->stored_mpdu_num = 0; |
1364 | status = WLAN_STATUS_SUCCESS; | 1425 | status = WLAN_STATUS_SUCCESS; |
1365 | end: | 1426 | end: |
1366 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | 1427 | spin_unlock_bh(&sta->lock); |
1367 | 1428 | ||
1368 | end_no_lock: | 1429 | end_no_lock: |
1369 | ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, | 1430 | ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, |
@@ -1395,18 +1456,16 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev, | |||
1395 | 1456 | ||
1396 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 1457 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
1397 | 1458 | ||
1398 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 1459 | spin_lock_bh(&sta->lock); |
1399 | 1460 | ||
1400 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 1461 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { |
1401 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1462 | spin_unlock_bh(&sta->lock); |
1402 | printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:" | ||
1403 | "%d\n", *state); | ||
1404 | goto addba_resp_exit; | 1463 | goto addba_resp_exit; |
1405 | } | 1464 | } |
1406 | 1465 | ||
1407 | if (mgmt->u.action.u.addba_resp.dialog_token != | 1466 | if (mgmt->u.action.u.addba_resp.dialog_token != |
1408 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | 1467 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { |
1409 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1468 | spin_unlock_bh(&sta->lock); |
1410 | #ifdef CONFIG_MAC80211_HT_DEBUG | 1469 | #ifdef CONFIG_MAC80211_HT_DEBUG |
1411 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | 1470 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); |
1412 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 1471 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
@@ -1419,26 +1478,18 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev, | |||
1419 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 1478 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
1420 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | 1479 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) |
1421 | == WLAN_STATUS_SUCCESS) { | 1480 | == WLAN_STATUS_SUCCESS) { |
1422 | if (*state & HT_ADDBA_RECEIVED_MSK) | ||
1423 | printk(KERN_DEBUG "double addBA response\n"); | ||
1424 | |||
1425 | *state |= HT_ADDBA_RECEIVED_MSK; | 1481 | *state |= HT_ADDBA_RECEIVED_MSK; |
1426 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 1482 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
1427 | 1483 | ||
1428 | if (*state == HT_AGG_STATE_OPERATIONAL) { | 1484 | if (*state == HT_AGG_STATE_OPERATIONAL) |
1429 | printk(KERN_DEBUG "Aggregation on for tid %d \n", tid); | ||
1430 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | 1485 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); |
1431 | } | ||
1432 | 1486 | ||
1433 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1487 | spin_unlock_bh(&sta->lock); |
1434 | printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid); | ||
1435 | } else { | 1488 | } else { |
1436 | printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid); | ||
1437 | |||
1438 | sta->ampdu_mlme.addba_req_num[tid]++; | 1489 | sta->ampdu_mlme.addba_req_num[tid]++; |
1439 | /* this will allow the state check in stop_BA_session */ | 1490 | /* this will allow the state check in stop_BA_session */ |
1440 | *state = HT_AGG_STATE_OPERATIONAL; | 1491 | *state = HT_AGG_STATE_OPERATIONAL; |
1441 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1492 | spin_unlock_bh(&sta->lock); |
1442 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, | 1493 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, |
1443 | WLAN_BACK_INITIATOR); | 1494 | WLAN_BACK_INITIATOR); |
1444 | } | 1495 | } |
@@ -1457,8 +1508,7 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | |||
1457 | struct ieee80211_mgmt *mgmt; | 1508 | struct ieee80211_mgmt *mgmt; |
1458 | u16 params; | 1509 | u16 params; |
1459 | 1510 | ||
1460 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + | 1511 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
1461 | sizeof(mgmt->u.action.u.delba)); | ||
1462 | 1512 | ||
1463 | if (!skb) { | 1513 | if (!skb) { |
1464 | printk(KERN_ERR "%s: failed to allocate buffer " | 1514 | printk(KERN_ERR "%s: failed to allocate buffer " |
@@ -1491,6 +1541,35 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | |||
1491 | ieee80211_sta_tx(dev, skb, 0); | 1541 | ieee80211_sta_tx(dev, skb, 0); |
1492 | } | 1542 | } |
1493 | 1543 | ||
1544 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) | ||
1545 | { | ||
1546 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1547 | struct sk_buff *skb; | ||
1548 | struct ieee80211_bar *bar; | ||
1549 | u16 bar_control = 0; | ||
1550 | |||
1551 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | ||
1552 | if (!skb) { | ||
1553 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
1554 | "bar frame\n", dev->name); | ||
1555 | return; | ||
1556 | } | ||
1557 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1558 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | ||
1559 | memset(bar, 0, sizeof(*bar)); | ||
1560 | bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL, | ||
1561 | IEEE80211_STYPE_BACK_REQ); | ||
1562 | memcpy(bar->ra, ra, ETH_ALEN); | ||
1563 | memcpy(bar->ta, dev->dev_addr, ETH_ALEN); | ||
1564 | bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; | ||
1565 | bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; | ||
1566 | bar_control |= (u16)(tid << 12); | ||
1567 | bar->control = cpu_to_le16(bar_control); | ||
1568 | bar->start_seq_num = cpu_to_le16(ssn); | ||
1569 | |||
1570 | ieee80211_sta_tx(dev, skb, 0); | ||
1571 | } | ||
1572 | |||
1494 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | 1573 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, |
1495 | u16 initiator, u16 reason) | 1574 | u16 initiator, u16 reason) |
1496 | { | 1575 | { |
@@ -1509,17 +1588,17 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | |||
1509 | } | 1588 | } |
1510 | 1589 | ||
1511 | /* check if TID is in operational state */ | 1590 | /* check if TID is in operational state */ |
1512 | spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); | 1591 | spin_lock_bh(&sta->lock); |
1513 | if (sta->ampdu_mlme.tid_state_rx[tid] | 1592 | if (sta->ampdu_mlme.tid_state_rx[tid] |
1514 | != HT_AGG_STATE_OPERATIONAL) { | 1593 | != HT_AGG_STATE_OPERATIONAL) { |
1515 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | 1594 | spin_unlock_bh(&sta->lock); |
1516 | rcu_read_unlock(); | 1595 | rcu_read_unlock(); |
1517 | return; | 1596 | return; |
1518 | } | 1597 | } |
1519 | sta->ampdu_mlme.tid_state_rx[tid] = | 1598 | sta->ampdu_mlme.tid_state_rx[tid] = |
1520 | HT_AGG_STATE_REQ_STOP_BA_MSK | | 1599 | HT_AGG_STATE_REQ_STOP_BA_MSK | |
1521 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | 1600 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); |
1522 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | 1601 | spin_unlock_bh(&sta->lock); |
1523 | 1602 | ||
1524 | /* stop HW Rx aggregation. ampdu_action existence | 1603 | /* stop HW Rx aggregation. ampdu_action existence |
1525 | * already verified in session init so we add the BUG_ON */ | 1604 | * already verified in session init so we add the BUG_ON */ |
@@ -1534,7 +1613,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | |||
1534 | ra, tid, NULL); | 1613 | ra, tid, NULL); |
1535 | if (ret) | 1614 | if (ret) |
1536 | printk(KERN_DEBUG "HW problem - can not stop rx " | 1615 | printk(KERN_DEBUG "HW problem - can not stop rx " |
1537 | "aggergation for tid %d\n", tid); | 1616 | "aggregation for tid %d\n", tid); |
1538 | 1617 | ||
1539 | /* shutdown timer has not expired */ | 1618 | /* shutdown timer has not expired */ |
1540 | if (initiator != WLAN_BACK_TIMER) | 1619 | if (initiator != WLAN_BACK_TIMER) |
@@ -1596,10 +1675,10 @@ static void ieee80211_sta_process_delba(struct net_device *dev, | |||
1596 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, | 1675 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, |
1597 | WLAN_BACK_INITIATOR, 0); | 1676 | WLAN_BACK_INITIATOR, 0); |
1598 | else { /* WLAN_BACK_RECIPIENT */ | 1677 | else { /* WLAN_BACK_RECIPIENT */ |
1599 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 1678 | spin_lock_bh(&sta->lock); |
1600 | sta->ampdu_mlme.tid_state_tx[tid] = | 1679 | sta->ampdu_mlme.tid_state_tx[tid] = |
1601 | HT_AGG_STATE_OPERATIONAL; | 1680 | HT_AGG_STATE_OPERATIONAL; |
1602 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1681 | spin_unlock_bh(&sta->lock); |
1603 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, | 1682 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, |
1604 | WLAN_BACK_RECIPIENT); | 1683 | WLAN_BACK_RECIPIENT); |
1605 | } | 1684 | } |
@@ -1636,20 +1715,24 @@ void sta_addba_resp_timer_expired(unsigned long data) | |||
1636 | 1715 | ||
1637 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 1716 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
1638 | /* check if the TID waits for addBA response */ | 1717 | /* check if the TID waits for addBA response */ |
1639 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | 1718 | spin_lock_bh(&sta->lock); |
1640 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 1719 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { |
1641 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1720 | spin_unlock_bh(&sta->lock); |
1642 | *state = HT_AGG_STATE_IDLE; | 1721 | *state = HT_AGG_STATE_IDLE; |
1722 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1643 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 1723 | printk(KERN_DEBUG "timer expired on tid %d but we are not " |
1644 | "expecting addBA response there", tid); | 1724 | "expecting addBA response there", tid); |
1725 | #endif | ||
1645 | goto timer_expired_exit; | 1726 | goto timer_expired_exit; |
1646 | } | 1727 | } |
1647 | 1728 | ||
1729 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1648 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | 1730 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); |
1731 | #endif | ||
1649 | 1732 | ||
1650 | /* go through the state check in stop_BA_session */ | 1733 | /* go through the state check in stop_BA_session */ |
1651 | *state = HT_AGG_STATE_OPERATIONAL; | 1734 | *state = HT_AGG_STATE_OPERATIONAL; |
1652 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 1735 | spin_unlock_bh(&sta->lock); |
1653 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, | 1736 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, |
1654 | WLAN_BACK_INITIATOR); | 1737 | WLAN_BACK_INITIATOR); |
1655 | 1738 | ||
@@ -1662,7 +1745,7 @@ timer_expired_exit: | |||
1662 | * resetting it after each frame that arrives from the originator. | 1745 | * resetting it after each frame that arrives from the originator. |
1663 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | 1746 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. |
1664 | */ | 1747 | */ |
1665 | void sta_rx_agg_session_timer_expired(unsigned long data) | 1748 | static void sta_rx_agg_session_timer_expired(unsigned long data) |
1666 | { | 1749 | { |
1667 | /* not an elegant detour, but there is no choice as the timer passes | 1750 | /* not an elegant detour, but there is no choice as the timer passes |
1668 | * only one argument, and various sta_info are needed here, so init | 1751 | * only one argument, and various sta_info are needed here, so init |
@@ -1673,7 +1756,9 @@ void sta_rx_agg_session_timer_expired(unsigned long data) | |||
1673 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, | 1756 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, |
1674 | timer_to_tid[0]); | 1757 | timer_to_tid[0]); |
1675 | 1758 | ||
1759 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1676 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 1760 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
1761 | #endif | ||
1677 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, | 1762 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, |
1678 | (u16)*ptid, WLAN_BACK_TIMER, | 1763 | (u16)*ptid, WLAN_BACK_TIMER, |
1679 | WLAN_REASON_QSTA_TIMEOUT); | 1764 | WLAN_REASON_QSTA_TIMEOUT); |
@@ -1693,6 +1778,71 @@ void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr) | |||
1693 | } | 1778 | } |
1694 | } | 1779 | } |
1695 | 1780 | ||
1781 | static void ieee80211_send_refuse_measurement_request(struct net_device *dev, | ||
1782 | struct ieee80211_msrment_ie *request_ie, | ||
1783 | const u8 *da, const u8 *bssid, | ||
1784 | u8 dialog_token) | ||
1785 | { | ||
1786 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1787 | struct sk_buff *skb; | ||
1788 | struct ieee80211_mgmt *msr_report; | ||
1789 | |||
1790 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | ||
1791 | sizeof(struct ieee80211_msrment_ie)); | ||
1792 | |||
1793 | if (!skb) { | ||
1794 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
1795 | "measurement report frame\n", dev->name); | ||
1796 | return; | ||
1797 | } | ||
1798 | |||
1799 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1800 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | ||
1801 | memset(msr_report, 0, 24); | ||
1802 | memcpy(msr_report->da, da, ETH_ALEN); | ||
1803 | memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN); | ||
1804 | memcpy(msr_report->bssid, bssid, ETH_ALEN); | ||
1805 | msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1806 | IEEE80211_STYPE_ACTION); | ||
1807 | |||
1808 | skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); | ||
1809 | msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; | ||
1810 | msr_report->u.action.u.measurement.action_code = | ||
1811 | WLAN_ACTION_SPCT_MSR_RPRT; | ||
1812 | msr_report->u.action.u.measurement.dialog_token = dialog_token; | ||
1813 | |||
1814 | msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; | ||
1815 | msr_report->u.action.u.measurement.length = | ||
1816 | sizeof(struct ieee80211_msrment_ie); | ||
1817 | |||
1818 | memset(&msr_report->u.action.u.measurement.msr_elem, 0, | ||
1819 | sizeof(struct ieee80211_msrment_ie)); | ||
1820 | msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; | ||
1821 | msr_report->u.action.u.measurement.msr_elem.mode |= | ||
1822 | IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; | ||
1823 | msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; | ||
1824 | |||
1825 | ieee80211_sta_tx(dev, skb, 0); | ||
1826 | } | ||
1827 | |||
1828 | static void ieee80211_sta_process_measurement_req(struct net_device *dev, | ||
1829 | struct ieee80211_mgmt *mgmt, | ||
1830 | size_t len) | ||
1831 | { | ||
1832 | /* | ||
1833 | * Ignoring measurement request is spec violation. | ||
1834 | * Mandatory measurements must be reported optional | ||
1835 | * measurements might be refused or reported incapable | ||
1836 | * For now just refuse | ||
1837 | * TODO: Answer basic measurement as unmeasured | ||
1838 | */ | ||
1839 | ieee80211_send_refuse_measurement_request(dev, | ||
1840 | &mgmt->u.action.u.measurement.msr_elem, | ||
1841 | mgmt->sa, mgmt->bssid, | ||
1842 | mgmt->u.action.u.measurement.dialog_token); | ||
1843 | } | ||
1844 | |||
1845 | |||
1696 | static void ieee80211_rx_mgmt_auth(struct net_device *dev, | 1846 | static void ieee80211_rx_mgmt_auth(struct net_device *dev, |
1697 | struct ieee80211_if_sta *ifsta, | 1847 | struct ieee80211_if_sta *ifsta, |
1698 | struct ieee80211_mgmt *mgmt, | 1848 | struct ieee80211_mgmt *mgmt, |
@@ -1703,73 +1853,41 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1703 | DECLARE_MAC_BUF(mac); | 1853 | DECLARE_MAC_BUF(mac); |
1704 | 1854 | ||
1705 | if (ifsta->state != IEEE80211_AUTHENTICATE && | 1855 | if (ifsta->state != IEEE80211_AUTHENTICATE && |
1706 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { | 1856 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) |
1707 | printk(KERN_DEBUG "%s: authentication frame received from " | ||
1708 | "%s, but not in authenticate state - ignored\n", | ||
1709 | dev->name, print_mac(mac, mgmt->sa)); | ||
1710 | return; | 1857 | return; |
1711 | } | ||
1712 | 1858 | ||
1713 | if (len < 24 + 6) { | 1859 | if (len < 24 + 6) |
1714 | printk(KERN_DEBUG "%s: too short (%zd) authentication frame " | ||
1715 | "received from %s - ignored\n", | ||
1716 | dev->name, len, print_mac(mac, mgmt->sa)); | ||
1717 | return; | 1860 | return; |
1718 | } | ||
1719 | 1861 | ||
1720 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1862 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
1721 | memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { | 1863 | memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) |
1722 | printk(KERN_DEBUG "%s: authentication frame received from " | ||
1723 | "unknown AP (SA=%s BSSID=%s) - " | ||
1724 | "ignored\n", dev->name, print_mac(mac, mgmt->sa), | ||
1725 | print_mac(mac, mgmt->bssid)); | ||
1726 | return; | 1864 | return; |
1727 | } | ||
1728 | 1865 | ||
1729 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1866 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
1730 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) { | 1867 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) |
1731 | printk(KERN_DEBUG "%s: authentication frame received from " | ||
1732 | "unknown BSSID (SA=%s BSSID=%s) - " | ||
1733 | "ignored\n", dev->name, print_mac(mac, mgmt->sa), | ||
1734 | print_mac(mac, mgmt->bssid)); | ||
1735 | return; | 1868 | return; |
1736 | } | ||
1737 | 1869 | ||
1738 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 1870 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
1739 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 1871 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
1740 | status_code = le16_to_cpu(mgmt->u.auth.status_code); | 1872 | status_code = le16_to_cpu(mgmt->u.auth.status_code); |
1741 | 1873 | ||
1742 | printk(KERN_DEBUG "%s: RX authentication from %s (alg=%d " | ||
1743 | "transaction=%d status=%d)\n", | ||
1744 | dev->name, print_mac(mac, mgmt->sa), auth_alg, | ||
1745 | auth_transaction, status_code); | ||
1746 | |||
1747 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 1874 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
1748 | /* IEEE 802.11 standard does not require authentication in IBSS | 1875 | /* |
1876 | * IEEE 802.11 standard does not require authentication in IBSS | ||
1749 | * networks and most implementations do not seem to use it. | 1877 | * networks and most implementations do not seem to use it. |
1750 | * However, try to reply to authentication attempts if someone | 1878 | * However, try to reply to authentication attempts if someone |
1751 | * has actually implemented this. | 1879 | * has actually implemented this. |
1752 | * TODO: Could implement shared key authentication. */ | 1880 | */ |
1753 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) { | 1881 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
1754 | printk(KERN_DEBUG "%s: unexpected IBSS authentication " | ||
1755 | "frame (alg=%d transaction=%d)\n", | ||
1756 | dev->name, auth_alg, auth_transaction); | ||
1757 | return; | 1882 | return; |
1758 | } | ||
1759 | ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); | 1883 | ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); |
1760 | } | 1884 | } |
1761 | 1885 | ||
1762 | if (auth_alg != ifsta->auth_alg || | 1886 | if (auth_alg != ifsta->auth_alg || |
1763 | auth_transaction != ifsta->auth_transaction) { | 1887 | auth_transaction != ifsta->auth_transaction) |
1764 | printk(KERN_DEBUG "%s: unexpected authentication frame " | ||
1765 | "(alg=%d transaction=%d)\n", | ||
1766 | dev->name, auth_alg, auth_transaction); | ||
1767 | return; | 1888 | return; |
1768 | } | ||
1769 | 1889 | ||
1770 | if (status_code != WLAN_STATUS_SUCCESS) { | 1890 | if (status_code != WLAN_STATUS_SUCCESS) { |
1771 | printk(KERN_DEBUG "%s: AP denied authentication (auth_alg=%d " | ||
1772 | "code=%d)\n", dev->name, ifsta->auth_alg, status_code); | ||
1773 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { | 1891 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { |
1774 | u8 algs[3]; | 1892 | u8 algs[3]; |
1775 | const int num_algs = ARRAY_SIZE(algs); | 1893 | const int num_algs = ARRAY_SIZE(algs); |
@@ -1798,9 +1916,6 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1798 | !ieee80211_sta_wep_configured(dev)) | 1916 | !ieee80211_sta_wep_configured(dev)) |
1799 | continue; | 1917 | continue; |
1800 | ifsta->auth_alg = algs[pos]; | 1918 | ifsta->auth_alg = algs[pos]; |
1801 | printk(KERN_DEBUG "%s: set auth_alg=%d for " | ||
1802 | "next try\n", | ||
1803 | dev->name, ifsta->auth_alg); | ||
1804 | break; | 1919 | break; |
1805 | } | 1920 | } |
1806 | } | 1921 | } |
@@ -1830,30 +1945,16 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev, | |||
1830 | u16 reason_code; | 1945 | u16 reason_code; |
1831 | DECLARE_MAC_BUF(mac); | 1946 | DECLARE_MAC_BUF(mac); |
1832 | 1947 | ||
1833 | if (len < 24 + 2) { | 1948 | if (len < 24 + 2) |
1834 | printk(KERN_DEBUG "%s: too short (%zd) deauthentication frame " | ||
1835 | "received from %s - ignored\n", | ||
1836 | dev->name, len, print_mac(mac, mgmt->sa)); | ||
1837 | return; | 1949 | return; |
1838 | } | ||
1839 | 1950 | ||
1840 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { | 1951 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN)) |
1841 | printk(KERN_DEBUG "%s: deauthentication frame received from " | ||
1842 | "unknown AP (SA=%s BSSID=%s) - " | ||
1843 | "ignored\n", dev->name, print_mac(mac, mgmt->sa), | ||
1844 | print_mac(mac, mgmt->bssid)); | ||
1845 | return; | 1952 | return; |
1846 | } | ||
1847 | 1953 | ||
1848 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | 1954 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); |
1849 | 1955 | ||
1850 | printk(KERN_DEBUG "%s: RX deauthentication from %s" | 1956 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) |
1851 | " (reason=%d)\n", | ||
1852 | dev->name, print_mac(mac, mgmt->sa), reason_code); | ||
1853 | |||
1854 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) { | ||
1855 | printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); | 1957 | printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); |
1856 | } | ||
1857 | 1958 | ||
1858 | if (ifsta->state == IEEE80211_AUTHENTICATE || | 1959 | if (ifsta->state == IEEE80211_AUTHENTICATE || |
1859 | ifsta->state == IEEE80211_ASSOCIATE || | 1960 | ifsta->state == IEEE80211_ASSOCIATE || |
@@ -1876,27 +1977,14 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, | |||
1876 | u16 reason_code; | 1977 | u16 reason_code; |
1877 | DECLARE_MAC_BUF(mac); | 1978 | DECLARE_MAC_BUF(mac); |
1878 | 1979 | ||
1879 | if (len < 24 + 2) { | 1980 | if (len < 24 + 2) |
1880 | printk(KERN_DEBUG "%s: too short (%zd) disassociation frame " | ||
1881 | "received from %s - ignored\n", | ||
1882 | dev->name, len, print_mac(mac, mgmt->sa)); | ||
1883 | return; | 1981 | return; |
1884 | } | ||
1885 | 1982 | ||
1886 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { | 1983 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN)) |
1887 | printk(KERN_DEBUG "%s: disassociation frame received from " | ||
1888 | "unknown AP (SA=%s BSSID=%s) - " | ||
1889 | "ignored\n", dev->name, print_mac(mac, mgmt->sa), | ||
1890 | print_mac(mac, mgmt->bssid)); | ||
1891 | return; | 1984 | return; |
1892 | } | ||
1893 | 1985 | ||
1894 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | 1986 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); |
1895 | 1987 | ||
1896 | printk(KERN_DEBUG "%s: RX disassociation from %s" | ||
1897 | " (reason=%d)\n", | ||
1898 | dev->name, print_mac(mac, mgmt->sa), reason_code); | ||
1899 | |||
1900 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) | 1988 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) |
1901 | printk(KERN_DEBUG "%s: disassociated\n", dev->name); | 1989 | printk(KERN_DEBUG "%s: disassociated\n", dev->name); |
1902 | 1990 | ||
@@ -1932,27 +2020,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1932 | /* AssocResp and ReassocResp have identical structure, so process both | 2020 | /* AssocResp and ReassocResp have identical structure, so process both |
1933 | * of them in this function. */ | 2021 | * of them in this function. */ |
1934 | 2022 | ||
1935 | if (ifsta->state != IEEE80211_ASSOCIATE) { | 2023 | if (ifsta->state != IEEE80211_ASSOCIATE) |
1936 | printk(KERN_DEBUG "%s: association frame received from " | ||
1937 | "%s, but not in associate state - ignored\n", | ||
1938 | dev->name, print_mac(mac, mgmt->sa)); | ||
1939 | return; | 2024 | return; |
1940 | } | ||
1941 | 2025 | ||
1942 | if (len < 24 + 6) { | 2026 | if (len < 24 + 6) |
1943 | printk(KERN_DEBUG "%s: too short (%zd) association frame " | ||
1944 | "received from %s - ignored\n", | ||
1945 | dev->name, len, print_mac(mac, mgmt->sa)); | ||
1946 | return; | 2027 | return; |
1947 | } | ||
1948 | 2028 | ||
1949 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { | 2029 | if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) |
1950 | printk(KERN_DEBUG "%s: association frame received from " | ||
1951 | "unknown AP (SA=%s BSSID=%s) - " | ||
1952 | "ignored\n", dev->name, print_mac(mac, mgmt->sa), | ||
1953 | print_mac(mac, mgmt->bssid)); | ||
1954 | return; | 2030 | return; |
1955 | } | ||
1956 | 2031 | ||
1957 | capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); | 2032 | capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); |
1958 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | 2033 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); |
@@ -2016,10 +2091,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2016 | local->hw.conf.channel->center_freq, | 2091 | local->hw.conf.channel->center_freq, |
2017 | ifsta->ssid, ifsta->ssid_len); | 2092 | ifsta->ssid, ifsta->ssid_len); |
2018 | if (bss) { | 2093 | if (bss) { |
2019 | sta->last_rssi = bss->rssi; | ||
2020 | sta->last_signal = bss->signal; | 2094 | sta->last_signal = bss->signal; |
2095 | sta->last_qual = bss->qual; | ||
2021 | sta->last_noise = bss->noise; | 2096 | sta->last_noise = bss->noise; |
2022 | ieee80211_rx_bss_put(dev, bss); | 2097 | ieee80211_rx_bss_put(local, bss); |
2023 | } | 2098 | } |
2024 | 2099 | ||
2025 | err = sta_info_insert(sta); | 2100 | err = sta_info_insert(sta); |
@@ -2041,8 +2116,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2041 | * to between the sta_info_alloc() and sta_info_insert() above. | 2116 | * to between the sta_info_alloc() and sta_info_insert() above. |
2042 | */ | 2117 | */ |
2043 | 2118 | ||
2044 | sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | | 2119 | set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | |
2045 | WLAN_STA_AUTHORIZED; | 2120 | WLAN_STA_AUTHORIZED); |
2046 | 2121 | ||
2047 | rates = 0; | 2122 | rates = 0; |
2048 | basic_rates = 0; | 2123 | basic_rates = 0; |
@@ -2086,7 +2161,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2086 | else | 2161 | else |
2087 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | 2162 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; |
2088 | 2163 | ||
2089 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) { | 2164 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && |
2165 | (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { | ||
2090 | struct ieee80211_ht_bss_info bss_info; | 2166 | struct ieee80211_ht_bss_info bss_info; |
2091 | ieee80211_ht_cap_ie_to_ht_info( | 2167 | ieee80211_ht_cap_ie_to_ht_info( |
2092 | (struct ieee80211_ht_cap *) | 2168 | (struct ieee80211_ht_cap *) |
@@ -2099,8 +2175,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2099 | 2175 | ||
2100 | rate_control_rate_init(sta, local); | 2176 | rate_control_rate_init(sta, local); |
2101 | 2177 | ||
2102 | if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { | 2178 | if (elems.wmm_param) { |
2103 | sta->flags |= WLAN_STA_WME; | 2179 | set_sta_flags(sta, WLAN_STA_WME); |
2104 | rcu_read_unlock(); | 2180 | rcu_read_unlock(); |
2105 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 2181 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, |
2106 | elems.wmm_param_len); | 2182 | elems.wmm_param_len); |
@@ -2136,10 +2212,9 @@ static void __ieee80211_rx_bss_hash_add(struct net_device *dev, | |||
2136 | 2212 | ||
2137 | 2213 | ||
2138 | /* Caller must hold local->sta_bss_lock */ | 2214 | /* Caller must hold local->sta_bss_lock */ |
2139 | static void __ieee80211_rx_bss_hash_del(struct net_device *dev, | 2215 | static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local, |
2140 | struct ieee80211_sta_bss *bss) | 2216 | struct ieee80211_sta_bss *bss) |
2141 | { | 2217 | { |
2142 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2143 | struct ieee80211_sta_bss *b, *prev = NULL; | 2218 | struct ieee80211_sta_bss *b, *prev = NULL; |
2144 | b = local->sta_bss_hash[STA_HASH(bss->bssid)]; | 2219 | b = local->sta_bss_hash[STA_HASH(bss->bssid)]; |
2145 | while (b) { | 2220 | while (b) { |
@@ -2284,45 +2359,42 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | |||
2284 | kfree(bss->rsn_ie); | 2359 | kfree(bss->rsn_ie); |
2285 | kfree(bss->wmm_ie); | 2360 | kfree(bss->wmm_ie); |
2286 | kfree(bss->ht_ie); | 2361 | kfree(bss->ht_ie); |
2362 | kfree(bss->ht_add_ie); | ||
2287 | kfree(bss_mesh_id(bss)); | 2363 | kfree(bss_mesh_id(bss)); |
2288 | kfree(bss_mesh_cfg(bss)); | 2364 | kfree(bss_mesh_cfg(bss)); |
2289 | kfree(bss); | 2365 | kfree(bss); |
2290 | } | 2366 | } |
2291 | 2367 | ||
2292 | 2368 | ||
2293 | static void ieee80211_rx_bss_put(struct net_device *dev, | 2369 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, |
2294 | struct ieee80211_sta_bss *bss) | 2370 | struct ieee80211_sta_bss *bss) |
2295 | { | 2371 | { |
2296 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2297 | |||
2298 | local_bh_disable(); | 2372 | local_bh_disable(); |
2299 | if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { | 2373 | if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { |
2300 | local_bh_enable(); | 2374 | local_bh_enable(); |
2301 | return; | 2375 | return; |
2302 | } | 2376 | } |
2303 | 2377 | ||
2304 | __ieee80211_rx_bss_hash_del(dev, bss); | 2378 | __ieee80211_rx_bss_hash_del(local, bss); |
2305 | list_del(&bss->list); | 2379 | list_del(&bss->list); |
2306 | spin_unlock_bh(&local->sta_bss_lock); | 2380 | spin_unlock_bh(&local->sta_bss_lock); |
2307 | ieee80211_rx_bss_free(bss); | 2381 | ieee80211_rx_bss_free(bss); |
2308 | } | 2382 | } |
2309 | 2383 | ||
2310 | 2384 | ||
2311 | void ieee80211_rx_bss_list_init(struct net_device *dev) | 2385 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local) |
2312 | { | 2386 | { |
2313 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2314 | spin_lock_init(&local->sta_bss_lock); | 2387 | spin_lock_init(&local->sta_bss_lock); |
2315 | INIT_LIST_HEAD(&local->sta_bss_list); | 2388 | INIT_LIST_HEAD(&local->sta_bss_list); |
2316 | } | 2389 | } |
2317 | 2390 | ||
2318 | 2391 | ||
2319 | void ieee80211_rx_bss_list_deinit(struct net_device *dev) | 2392 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local) |
2320 | { | 2393 | { |
2321 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2322 | struct ieee80211_sta_bss *bss, *tmp; | 2394 | struct ieee80211_sta_bss *bss, *tmp; |
2323 | 2395 | ||
2324 | list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) | 2396 | list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) |
2325 | ieee80211_rx_bss_put(dev, bss); | 2397 | ieee80211_rx_bss_put(local, bss); |
2326 | } | 2398 | } |
2327 | 2399 | ||
2328 | 2400 | ||
@@ -2334,8 +2406,6 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2334 | int res, rates, i, j; | 2406 | int res, rates, i, j; |
2335 | struct sk_buff *skb; | 2407 | struct sk_buff *skb; |
2336 | struct ieee80211_mgmt *mgmt; | 2408 | struct ieee80211_mgmt *mgmt; |
2337 | struct ieee80211_tx_control control; | ||
2338 | struct rate_selection ratesel; | ||
2339 | u8 *pos; | 2409 | u8 *pos; |
2340 | struct ieee80211_sub_if_data *sdata; | 2410 | struct ieee80211_sub_if_data *sdata; |
2341 | struct ieee80211_supported_band *sband; | 2411 | struct ieee80211_supported_band *sband; |
@@ -2353,7 +2423,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2353 | local->ops->reset_tsf(local_to_hw(local)); | 2423 | local->ops->reset_tsf(local_to_hw(local)); |
2354 | } | 2424 | } |
2355 | memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); | 2425 | memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); |
2356 | res = ieee80211_if_config(dev); | 2426 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); |
2357 | if (res) | 2427 | if (res) |
2358 | return res; | 2428 | return res; |
2359 | 2429 | ||
@@ -2367,24 +2437,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2367 | if (res) | 2437 | if (res) |
2368 | return res; | 2438 | return res; |
2369 | 2439 | ||
2370 | /* Set beacon template */ | 2440 | /* Build IBSS probe response */ |
2371 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 2441 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
2372 | do { | 2442 | if (skb) { |
2373 | if (!skb) | ||
2374 | break; | ||
2375 | |||
2376 | skb_reserve(skb, local->hw.extra_tx_headroom); | 2443 | skb_reserve(skb, local->hw.extra_tx_headroom); |
2377 | 2444 | ||
2378 | mgmt = (struct ieee80211_mgmt *) | 2445 | mgmt = (struct ieee80211_mgmt *) |
2379 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | 2446 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); |
2380 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | 2447 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); |
2381 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 2448 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, |
2382 | IEEE80211_STYPE_BEACON); | 2449 | IEEE80211_STYPE_PROBE_RESP); |
2383 | memset(mgmt->da, 0xff, ETH_ALEN); | 2450 | memset(mgmt->da, 0xff, ETH_ALEN); |
2384 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 2451 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); |
2385 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 2452 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
2386 | mgmt->u.beacon.beacon_int = | 2453 | mgmt->u.beacon.beacon_int = |
2387 | cpu_to_le16(local->hw.conf.beacon_int); | 2454 | cpu_to_le16(local->hw.conf.beacon_int); |
2455 | mgmt->u.beacon.timestamp = cpu_to_le64(bss->timestamp); | ||
2388 | mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); | 2456 | mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); |
2389 | 2457 | ||
2390 | pos = skb_put(skb, 2 + ifsta->ssid_len); | 2458 | pos = skb_put(skb, 2 + ifsta->ssid_len); |
@@ -2422,60 +2490,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2422 | memcpy(pos, &bss->supp_rates[8], rates); | 2490 | memcpy(pos, &bss->supp_rates[8], rates); |
2423 | } | 2491 | } |
2424 | 2492 | ||
2425 | memset(&control, 0, sizeof(control)); | 2493 | ifsta->probe_resp = skb; |
2426 | rate_control_get_rate(dev, sband, skb, &ratesel); | ||
2427 | if (!ratesel.rate) { | ||
2428 | printk(KERN_DEBUG "%s: Failed to determine TX rate " | ||
2429 | "for IBSS beacon\n", dev->name); | ||
2430 | break; | ||
2431 | } | ||
2432 | control.vif = &sdata->vif; | ||
2433 | control.tx_rate = ratesel.rate; | ||
2434 | if (sdata->bss_conf.use_short_preamble && | ||
2435 | ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) | ||
2436 | control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | ||
2437 | control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; | ||
2438 | control.flags |= IEEE80211_TXCTL_NO_ACK; | ||
2439 | control.retry_limit = 1; | ||
2440 | |||
2441 | ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC); | ||
2442 | if (ifsta->probe_resp) { | ||
2443 | mgmt = (struct ieee80211_mgmt *) | ||
2444 | ifsta->probe_resp->data; | ||
2445 | mgmt->frame_control = | ||
2446 | IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
2447 | IEEE80211_STYPE_PROBE_RESP); | ||
2448 | } else { | ||
2449 | printk(KERN_DEBUG "%s: Could not allocate ProbeResp " | ||
2450 | "template for IBSS\n", dev->name); | ||
2451 | } | ||
2452 | |||
2453 | if (local->ops->beacon_update && | ||
2454 | local->ops->beacon_update(local_to_hw(local), | ||
2455 | skb, &control) == 0) { | ||
2456 | printk(KERN_DEBUG "%s: Configured IBSS beacon " | ||
2457 | "template\n", dev->name); | ||
2458 | skb = NULL; | ||
2459 | } | ||
2460 | |||
2461 | rates = 0; | ||
2462 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
2463 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
2464 | int bitrate = (bss->supp_rates[i] & 0x7f) * 5; | ||
2465 | for (j = 0; j < sband->n_bitrates; j++) | ||
2466 | if (sband->bitrates[j].bitrate == bitrate) | ||
2467 | rates |= BIT(j); | ||
2468 | } | ||
2469 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; | ||
2470 | 2494 | ||
2471 | ieee80211_sta_def_wmm_params(dev, bss, 1); | 2495 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); |
2472 | } while (0); | 2496 | } |
2473 | 2497 | ||
2474 | if (skb) { | 2498 | rates = 0; |
2475 | printk(KERN_DEBUG "%s: Failed to configure IBSS beacon " | 2499 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
2476 | "template\n", dev->name); | 2500 | for (i = 0; i < bss->supp_rates_len; i++) { |
2477 | dev_kfree_skb(skb); | 2501 | int bitrate = (bss->supp_rates[i] & 0x7f) * 5; |
2502 | for (j = 0; j < sband->n_bitrates; j++) | ||
2503 | if (sband->bitrates[j].bitrate == bitrate) | ||
2504 | rates |= BIT(j); | ||
2478 | } | 2505 | } |
2506 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; | ||
2507 | |||
2508 | ieee80211_sta_def_wmm_params(dev, bss, 1); | ||
2479 | 2509 | ||
2480 | ifsta->state = IEEE80211_IBSS_JOINED; | 2510 | ifsta->state = IEEE80211_IBSS_JOINED; |
2481 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 2511 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
@@ -2528,11 +2558,10 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2528 | struct ieee80211_mgmt *mgmt, | 2558 | struct ieee80211_mgmt *mgmt, |
2529 | size_t len, | 2559 | size_t len, |
2530 | struct ieee80211_rx_status *rx_status, | 2560 | struct ieee80211_rx_status *rx_status, |
2561 | struct ieee802_11_elems *elems, | ||
2531 | int beacon) | 2562 | int beacon) |
2532 | { | 2563 | { |
2533 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2564 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2534 | struct ieee802_11_elems elems; | ||
2535 | size_t baselen; | ||
2536 | int freq, clen; | 2565 | int freq, clen; |
2537 | struct ieee80211_sta_bss *bss; | 2566 | struct ieee80211_sta_bss *bss; |
2538 | struct sta_info *sta; | 2567 | struct sta_info *sta; |
@@ -2545,35 +2574,24 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2545 | if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) | 2574 | if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) |
2546 | return; /* ignore ProbeResp to foreign address */ | 2575 | return; /* ignore ProbeResp to foreign address */ |
2547 | 2576 | ||
2548 | #if 0 | ||
2549 | printk(KERN_DEBUG "%s: RX %s from %s to %s\n", | ||
2550 | dev->name, beacon ? "Beacon" : "Probe Response", | ||
2551 | print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da)); | ||
2552 | #endif | ||
2553 | |||
2554 | baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; | ||
2555 | if (baselen > len) | ||
2556 | return; | ||
2557 | |||
2558 | beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | 2577 | beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); |
2559 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | ||
2560 | 2578 | ||
2561 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems.mesh_id && | 2579 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id && |
2562 | elems.mesh_config && mesh_matches_local(&elems, dev)) { | 2580 | elems->mesh_config && mesh_matches_local(elems, dev)) { |
2563 | u64 rates = ieee80211_sta_get_rates(local, &elems, | 2581 | u64 rates = ieee80211_sta_get_rates(local, elems, |
2564 | rx_status->band); | 2582 | rx_status->band); |
2565 | 2583 | ||
2566 | mesh_neighbour_update(mgmt->sa, rates, dev, | 2584 | mesh_neighbour_update(mgmt->sa, rates, dev, |
2567 | mesh_peer_accepts_plinks(&elems, dev)); | 2585 | mesh_peer_accepts_plinks(elems, dev)); |
2568 | } | 2586 | } |
2569 | 2587 | ||
2570 | rcu_read_lock(); | 2588 | rcu_read_lock(); |
2571 | 2589 | ||
2572 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && | 2590 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates && |
2573 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && | 2591 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && |
2574 | (sta = sta_info_get(local, mgmt->sa))) { | 2592 | (sta = sta_info_get(local, mgmt->sa))) { |
2575 | u64 prev_rates; | 2593 | u64 prev_rates; |
2576 | u64 supp_rates = ieee80211_sta_get_rates(local, &elems, | 2594 | u64 supp_rates = ieee80211_sta_get_rates(local, elems, |
2577 | rx_status->band); | 2595 | rx_status->band); |
2578 | 2596 | ||
2579 | prev_rates = sta->supp_rates[rx_status->band]; | 2597 | prev_rates = sta->supp_rates[rx_status->band]; |
@@ -2585,21 +2603,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2585 | sta->supp_rates[rx_status->band] = | 2603 | sta->supp_rates[rx_status->band] = |
2586 | sdata->u.sta.supp_rates_bits[rx_status->band]; | 2604 | sdata->u.sta.supp_rates_bits[rx_status->band]; |
2587 | } | 2605 | } |
2588 | if (sta->supp_rates[rx_status->band] != prev_rates) { | ||
2589 | printk(KERN_DEBUG "%s: updated supp_rates set for " | ||
2590 | "%s based on beacon info (0x%llx & 0x%llx -> " | ||
2591 | "0x%llx)\n", | ||
2592 | dev->name, print_mac(mac, sta->addr), | ||
2593 | (unsigned long long) prev_rates, | ||
2594 | (unsigned long long) supp_rates, | ||
2595 | (unsigned long long) sta->supp_rates[rx_status->band]); | ||
2596 | } | ||
2597 | } | 2606 | } |
2598 | 2607 | ||
2599 | rcu_read_unlock(); | 2608 | rcu_read_unlock(); |
2600 | 2609 | ||
2601 | if (elems.ds_params && elems.ds_params_len == 1) | 2610 | if (elems->ds_params && elems->ds_params_len == 1) |
2602 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | 2611 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); |
2603 | else | 2612 | else |
2604 | freq = rx_status->freq; | 2613 | freq = rx_status->freq; |
2605 | 2614 | ||
@@ -2609,23 +2618,23 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2609 | return; | 2618 | return; |
2610 | 2619 | ||
2611 | #ifdef CONFIG_MAC80211_MESH | 2620 | #ifdef CONFIG_MAC80211_MESH |
2612 | if (elems.mesh_config) | 2621 | if (elems->mesh_config) |
2613 | bss = ieee80211_rx_mesh_bss_get(dev, elems.mesh_id, | 2622 | bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, |
2614 | elems.mesh_id_len, elems.mesh_config, freq); | 2623 | elems->mesh_id_len, elems->mesh_config, freq); |
2615 | else | 2624 | else |
2616 | #endif | 2625 | #endif |
2617 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, | 2626 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, |
2618 | elems.ssid, elems.ssid_len); | 2627 | elems->ssid, elems->ssid_len); |
2619 | if (!bss) { | 2628 | if (!bss) { |
2620 | #ifdef CONFIG_MAC80211_MESH | 2629 | #ifdef CONFIG_MAC80211_MESH |
2621 | if (elems.mesh_config) | 2630 | if (elems->mesh_config) |
2622 | bss = ieee80211_rx_mesh_bss_add(dev, elems.mesh_id, | 2631 | bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id, |
2623 | elems.mesh_id_len, elems.mesh_config, | 2632 | elems->mesh_id_len, elems->mesh_config, |
2624 | elems.mesh_config_len, freq); | 2633 | elems->mesh_config_len, freq); |
2625 | else | 2634 | else |
2626 | #endif | 2635 | #endif |
2627 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, | 2636 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, |
2628 | elems.ssid, elems.ssid_len); | 2637 | elems->ssid, elems->ssid_len); |
2629 | if (!bss) | 2638 | if (!bss) |
2630 | return; | 2639 | return; |
2631 | } else { | 2640 | } else { |
@@ -2638,46 +2647,66 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2638 | } | 2647 | } |
2639 | 2648 | ||
2640 | /* save the ERP value so that it is available at association time */ | 2649 | /* save the ERP value so that it is available at association time */ |
2641 | if (elems.erp_info && elems.erp_info_len >= 1) { | 2650 | if (elems->erp_info && elems->erp_info_len >= 1) { |
2642 | bss->erp_value = elems.erp_info[0]; | 2651 | bss->erp_value = elems->erp_info[0]; |
2643 | bss->has_erp_value = 1; | 2652 | bss->has_erp_value = 1; |
2644 | } | 2653 | } |
2645 | 2654 | ||
2646 | if (elems.ht_cap_elem && | 2655 | if (elems->ht_cap_elem && |
2647 | (!bss->ht_ie || bss->ht_ie_len != elems.ht_cap_elem_len || | 2656 | (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len || |
2648 | memcmp(bss->ht_ie, elems.ht_cap_elem, elems.ht_cap_elem_len))) { | 2657 | memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) { |
2649 | kfree(bss->ht_ie); | 2658 | kfree(bss->ht_ie); |
2650 | bss->ht_ie = kmalloc(elems.ht_cap_elem_len + 2, GFP_ATOMIC); | 2659 | bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC); |
2651 | if (bss->ht_ie) { | 2660 | if (bss->ht_ie) { |
2652 | memcpy(bss->ht_ie, elems.ht_cap_elem - 2, | 2661 | memcpy(bss->ht_ie, elems->ht_cap_elem - 2, |
2653 | elems.ht_cap_elem_len + 2); | 2662 | elems->ht_cap_elem_len + 2); |
2654 | bss->ht_ie_len = elems.ht_cap_elem_len + 2; | 2663 | bss->ht_ie_len = elems->ht_cap_elem_len + 2; |
2655 | } else | 2664 | } else |
2656 | bss->ht_ie_len = 0; | 2665 | bss->ht_ie_len = 0; |
2657 | } else if (!elems.ht_cap_elem && bss->ht_ie) { | 2666 | } else if (!elems->ht_cap_elem && bss->ht_ie) { |
2658 | kfree(bss->ht_ie); | 2667 | kfree(bss->ht_ie); |
2659 | bss->ht_ie = NULL; | 2668 | bss->ht_ie = NULL; |
2660 | bss->ht_ie_len = 0; | 2669 | bss->ht_ie_len = 0; |
2661 | } | 2670 | } |
2662 | 2671 | ||
2672 | if (elems->ht_info_elem && | ||
2673 | (!bss->ht_add_ie || | ||
2674 | bss->ht_add_ie_len != elems->ht_info_elem_len || | ||
2675 | memcmp(bss->ht_add_ie, elems->ht_info_elem, | ||
2676 | elems->ht_info_elem_len))) { | ||
2677 | kfree(bss->ht_add_ie); | ||
2678 | bss->ht_add_ie = | ||
2679 | kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC); | ||
2680 | if (bss->ht_add_ie) { | ||
2681 | memcpy(bss->ht_add_ie, elems->ht_info_elem - 2, | ||
2682 | elems->ht_info_elem_len + 2); | ||
2683 | bss->ht_add_ie_len = elems->ht_info_elem_len + 2; | ||
2684 | } else | ||
2685 | bss->ht_add_ie_len = 0; | ||
2686 | } else if (!elems->ht_info_elem && bss->ht_add_ie) { | ||
2687 | kfree(bss->ht_add_ie); | ||
2688 | bss->ht_add_ie = NULL; | ||
2689 | bss->ht_add_ie_len = 0; | ||
2690 | } | ||
2691 | |||
2663 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); | 2692 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); |
2664 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); | 2693 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); |
2665 | 2694 | ||
2666 | bss->supp_rates_len = 0; | 2695 | bss->supp_rates_len = 0; |
2667 | if (elems.supp_rates) { | 2696 | if (elems->supp_rates) { |
2668 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | 2697 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; |
2669 | if (clen > elems.supp_rates_len) | 2698 | if (clen > elems->supp_rates_len) |
2670 | clen = elems.supp_rates_len; | 2699 | clen = elems->supp_rates_len; |
2671 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems.supp_rates, | 2700 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, |
2672 | clen); | 2701 | clen); |
2673 | bss->supp_rates_len += clen; | 2702 | bss->supp_rates_len += clen; |
2674 | } | 2703 | } |
2675 | if (elems.ext_supp_rates) { | 2704 | if (elems->ext_supp_rates) { |
2676 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | 2705 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; |
2677 | if (clen > elems.ext_supp_rates_len) | 2706 | if (clen > elems->ext_supp_rates_len) |
2678 | clen = elems.ext_supp_rates_len; | 2707 | clen = elems->ext_supp_rates_len; |
2679 | memcpy(&bss->supp_rates[bss->supp_rates_len], | 2708 | memcpy(&bss->supp_rates[bss->supp_rates_len], |
2680 | elems.ext_supp_rates, clen); | 2709 | elems->ext_supp_rates, clen); |
2681 | bss->supp_rates_len += clen; | 2710 | bss->supp_rates_len += clen; |
2682 | } | 2711 | } |
2683 | 2712 | ||
@@ -2685,9 +2714,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2685 | 2714 | ||
2686 | bss->timestamp = beacon_timestamp; | 2715 | bss->timestamp = beacon_timestamp; |
2687 | bss->last_update = jiffies; | 2716 | bss->last_update = jiffies; |
2688 | bss->rssi = rx_status->ssi; | ||
2689 | bss->signal = rx_status->signal; | 2717 | bss->signal = rx_status->signal; |
2690 | bss->noise = rx_status->noise; | 2718 | bss->noise = rx_status->noise; |
2719 | bss->qual = rx_status->qual; | ||
2691 | if (!beacon && !bss->probe_resp) | 2720 | if (!beacon && !bss->probe_resp) |
2692 | bss->probe_resp = true; | 2721 | bss->probe_resp = true; |
2693 | 2722 | ||
@@ -2697,37 +2726,37 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2697 | */ | 2726 | */ |
2698 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 2727 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
2699 | bss->probe_resp && beacon) { | 2728 | bss->probe_resp && beacon) { |
2700 | ieee80211_rx_bss_put(dev, bss); | 2729 | ieee80211_rx_bss_put(local, bss); |
2701 | return; | 2730 | return; |
2702 | } | 2731 | } |
2703 | 2732 | ||
2704 | if (elems.wpa && | 2733 | if (elems->wpa && |
2705 | (!bss->wpa_ie || bss->wpa_ie_len != elems.wpa_len || | 2734 | (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len || |
2706 | memcmp(bss->wpa_ie, elems.wpa, elems.wpa_len))) { | 2735 | memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) { |
2707 | kfree(bss->wpa_ie); | 2736 | kfree(bss->wpa_ie); |
2708 | bss->wpa_ie = kmalloc(elems.wpa_len + 2, GFP_ATOMIC); | 2737 | bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC); |
2709 | if (bss->wpa_ie) { | 2738 | if (bss->wpa_ie) { |
2710 | memcpy(bss->wpa_ie, elems.wpa - 2, elems.wpa_len + 2); | 2739 | memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2); |
2711 | bss->wpa_ie_len = elems.wpa_len + 2; | 2740 | bss->wpa_ie_len = elems->wpa_len + 2; |
2712 | } else | 2741 | } else |
2713 | bss->wpa_ie_len = 0; | 2742 | bss->wpa_ie_len = 0; |
2714 | } else if (!elems.wpa && bss->wpa_ie) { | 2743 | } else if (!elems->wpa && bss->wpa_ie) { |
2715 | kfree(bss->wpa_ie); | 2744 | kfree(bss->wpa_ie); |
2716 | bss->wpa_ie = NULL; | 2745 | bss->wpa_ie = NULL; |
2717 | bss->wpa_ie_len = 0; | 2746 | bss->wpa_ie_len = 0; |
2718 | } | 2747 | } |
2719 | 2748 | ||
2720 | if (elems.rsn && | 2749 | if (elems->rsn && |
2721 | (!bss->rsn_ie || bss->rsn_ie_len != elems.rsn_len || | 2750 | (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len || |
2722 | memcmp(bss->rsn_ie, elems.rsn, elems.rsn_len))) { | 2751 | memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) { |
2723 | kfree(bss->rsn_ie); | 2752 | kfree(bss->rsn_ie); |
2724 | bss->rsn_ie = kmalloc(elems.rsn_len + 2, GFP_ATOMIC); | 2753 | bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC); |
2725 | if (bss->rsn_ie) { | 2754 | if (bss->rsn_ie) { |
2726 | memcpy(bss->rsn_ie, elems.rsn - 2, elems.rsn_len + 2); | 2755 | memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2); |
2727 | bss->rsn_ie_len = elems.rsn_len + 2; | 2756 | bss->rsn_ie_len = elems->rsn_len + 2; |
2728 | } else | 2757 | } else |
2729 | bss->rsn_ie_len = 0; | 2758 | bss->rsn_ie_len = 0; |
2730 | } else if (!elems.rsn && bss->rsn_ie) { | 2759 | } else if (!elems->rsn && bss->rsn_ie) { |
2731 | kfree(bss->rsn_ie); | 2760 | kfree(bss->rsn_ie); |
2732 | bss->rsn_ie = NULL; | 2761 | bss->rsn_ie = NULL; |
2733 | bss->rsn_ie_len = 0; | 2762 | bss->rsn_ie_len = 0; |
@@ -2747,20 +2776,21 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2747 | * inclusion of the WMM Parameters in beacons, however, is optional. | 2776 | * inclusion of the WMM Parameters in beacons, however, is optional. |
2748 | */ | 2777 | */ |
2749 | 2778 | ||
2750 | if (elems.wmm_param && | 2779 | if (elems->wmm_param && |
2751 | (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_param_len || | 2780 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len || |
2752 | memcmp(bss->wmm_ie, elems.wmm_param, elems.wmm_param_len))) { | 2781 | memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) { |
2753 | kfree(bss->wmm_ie); | 2782 | kfree(bss->wmm_ie); |
2754 | bss->wmm_ie = kmalloc(elems.wmm_param_len + 2, GFP_ATOMIC); | 2783 | bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC); |
2755 | if (bss->wmm_ie) { | 2784 | if (bss->wmm_ie) { |
2756 | memcpy(bss->wmm_ie, elems.wmm_param - 2, | 2785 | memcpy(bss->wmm_ie, elems->wmm_param - 2, |
2757 | elems.wmm_param_len + 2); | 2786 | elems->wmm_param_len + 2); |
2758 | bss->wmm_ie_len = elems.wmm_param_len + 2; | 2787 | bss->wmm_ie_len = elems->wmm_param_len + 2; |
2759 | } else | 2788 | } else |
2760 | bss->wmm_ie_len = 0; | 2789 | bss->wmm_ie_len = 0; |
2761 | } else if (elems.wmm_info && | 2790 | } else if (elems->wmm_info && |
2762 | (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_info_len || | 2791 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len || |
2763 | memcmp(bss->wmm_ie, elems.wmm_info, elems.wmm_info_len))) { | 2792 | memcmp(bss->wmm_ie, elems->wmm_info, |
2793 | elems->wmm_info_len))) { | ||
2764 | /* As for certain AP's Fifth bit is not set in WMM IE in | 2794 | /* As for certain AP's Fifth bit is not set in WMM IE in |
2765 | * beacon frames.So while parsing the beacon frame the | 2795 | * beacon frames.So while parsing the beacon frame the |
2766 | * wmm_info structure is used instead of wmm_param. | 2796 | * wmm_info structure is used instead of wmm_param. |
@@ -2770,14 +2800,14 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2770 | * n-band association. | 2800 | * n-band association. |
2771 | */ | 2801 | */ |
2772 | kfree(bss->wmm_ie); | 2802 | kfree(bss->wmm_ie); |
2773 | bss->wmm_ie = kmalloc(elems.wmm_info_len + 2, GFP_ATOMIC); | 2803 | bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC); |
2774 | if (bss->wmm_ie) { | 2804 | if (bss->wmm_ie) { |
2775 | memcpy(bss->wmm_ie, elems.wmm_info - 2, | 2805 | memcpy(bss->wmm_ie, elems->wmm_info - 2, |
2776 | elems.wmm_info_len + 2); | 2806 | elems->wmm_info_len + 2); |
2777 | bss->wmm_ie_len = elems.wmm_info_len + 2; | 2807 | bss->wmm_ie_len = elems->wmm_info_len + 2; |
2778 | } else | 2808 | } else |
2779 | bss->wmm_ie_len = 0; | 2809 | bss->wmm_ie_len = 0; |
2780 | } else if (!elems.wmm_param && !elems.wmm_info && bss->wmm_ie) { | 2810 | } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) { |
2781 | kfree(bss->wmm_ie); | 2811 | kfree(bss->wmm_ie); |
2782 | bss->wmm_ie = NULL; | 2812 | bss->wmm_ie = NULL; |
2783 | bss->wmm_ie_len = 0; | 2813 | bss->wmm_ie_len = 0; |
@@ -2788,8 +2818,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2788 | !local->sta_sw_scanning && !local->sta_hw_scanning && | 2818 | !local->sta_sw_scanning && !local->sta_hw_scanning && |
2789 | bss->capability & WLAN_CAPABILITY_IBSS && | 2819 | bss->capability & WLAN_CAPABILITY_IBSS && |
2790 | bss->freq == local->oper_channel->center_freq && | 2820 | bss->freq == local->oper_channel->center_freq && |
2791 | elems.ssid_len == sdata->u.sta.ssid_len && | 2821 | elems->ssid_len == sdata->u.sta.ssid_len && |
2792 | memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) { | 2822 | memcmp(elems->ssid, sdata->u.sta.ssid, |
2823 | sdata->u.sta.ssid_len) == 0) { | ||
2793 | if (rx_status->flag & RX_FLAG_TSFT) { | 2824 | if (rx_status->flag & RX_FLAG_TSFT) { |
2794 | /* in order for correct IBSS merging we need mactime | 2825 | /* in order for correct IBSS merging we need mactime |
2795 | * | 2826 | * |
@@ -2827,18 +2858,18 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2827 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 2858 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
2828 | if (beacon_timestamp > rx_timestamp) { | 2859 | if (beacon_timestamp > rx_timestamp) { |
2829 | #ifndef CONFIG_MAC80211_IBSS_DEBUG | 2860 | #ifndef CONFIG_MAC80211_IBSS_DEBUG |
2830 | if (net_ratelimit()) | 2861 | printk(KERN_DEBUG "%s: beacon TSF higher than " |
2862 | "local TSF - IBSS merge with BSSID %s\n", | ||
2863 | dev->name, print_mac(mac, mgmt->bssid)); | ||
2831 | #endif | 2864 | #endif |
2832 | printk(KERN_DEBUG "%s: beacon TSF higher than " | ||
2833 | "local TSF - IBSS merge with BSSID %s\n", | ||
2834 | dev->name, print_mac(mac, mgmt->bssid)); | ||
2835 | ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); | 2865 | ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); |
2836 | ieee80211_ibss_add_sta(dev, NULL, | 2866 | ieee80211_ibss_add_sta(dev, NULL, |
2837 | mgmt->bssid, mgmt->sa); | 2867 | mgmt->bssid, mgmt->sa, |
2868 | BIT(rx_status->rate_idx)); | ||
2838 | } | 2869 | } |
2839 | } | 2870 | } |
2840 | 2871 | ||
2841 | ieee80211_rx_bss_put(dev, bss); | 2872 | ieee80211_rx_bss_put(local, bss); |
2842 | } | 2873 | } |
2843 | 2874 | ||
2844 | 2875 | ||
@@ -2847,7 +2878,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, | |||
2847 | size_t len, | 2878 | size_t len, |
2848 | struct ieee80211_rx_status *rx_status) | 2879 | struct ieee80211_rx_status *rx_status) |
2849 | { | 2880 | { |
2850 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 0); | 2881 | size_t baselen; |
2882 | struct ieee802_11_elems elems; | ||
2883 | |||
2884 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | ||
2885 | if (baselen > len) | ||
2886 | return; | ||
2887 | |||
2888 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | ||
2889 | &elems); | ||
2890 | |||
2891 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); | ||
2851 | } | 2892 | } |
2852 | 2893 | ||
2853 | 2894 | ||
@@ -2864,7 +2905,14 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2864 | struct ieee80211_conf *conf = &local->hw.conf; | 2905 | struct ieee80211_conf *conf = &local->hw.conf; |
2865 | u32 changed = 0; | 2906 | u32 changed = 0; |
2866 | 2907 | ||
2867 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 1); | 2908 | /* Process beacon from the current BSS */ |
2909 | baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; | ||
2910 | if (baselen > len) | ||
2911 | return; | ||
2912 | |||
2913 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | ||
2914 | |||
2915 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); | ||
2868 | 2916 | ||
2869 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2917 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2870 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2918 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
@@ -2875,17 +2923,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2875 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) | 2923 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) |
2876 | return; | 2924 | return; |
2877 | 2925 | ||
2878 | /* Process beacon from the current BSS */ | 2926 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, |
2879 | baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; | 2927 | elems.wmm_param_len); |
2880 | if (baselen > len) | ||
2881 | return; | ||
2882 | |||
2883 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | ||
2884 | |||
2885 | if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { | ||
2886 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | ||
2887 | elems.wmm_param_len); | ||
2888 | } | ||
2889 | 2928 | ||
2890 | /* Do not send changes to driver if we are scanning. This removes | 2929 | /* Do not send changes to driver if we are scanning. This removes |
2891 | * requirement that driver's bss_info_changed function needs to be | 2930 | * requirement that driver's bss_info_changed function needs to be |
@@ -2962,11 +3001,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2962 | pos = mgmt->u.probe_req.variable; | 3001 | pos = mgmt->u.probe_req.variable; |
2963 | if (pos[0] != WLAN_EID_SSID || | 3002 | if (pos[0] != WLAN_EID_SSID || |
2964 | pos + 2 + pos[1] > end) { | 3003 | pos + 2 + pos[1] > end) { |
2965 | if (net_ratelimit()) { | 3004 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2966 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " | 3005 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " |
2967 | "from %s\n", | 3006 | "from %s\n", |
2968 | dev->name, print_mac(mac, mgmt->sa)); | 3007 | dev->name, print_mac(mac, mgmt->sa)); |
2969 | } | 3008 | #endif |
2970 | return; | 3009 | return; |
2971 | } | 3010 | } |
2972 | if (pos[1] != 0 && | 3011 | if (pos[1] != 0 && |
@@ -2997,11 +3036,24 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev, | |||
2997 | struct ieee80211_rx_status *rx_status) | 3036 | struct ieee80211_rx_status *rx_status) |
2998 | { | 3037 | { |
2999 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 3038 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
3039 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3000 | 3040 | ||
3001 | if (len < IEEE80211_MIN_ACTION_SIZE) | 3041 | if (len < IEEE80211_MIN_ACTION_SIZE) |
3002 | return; | 3042 | return; |
3003 | 3043 | ||
3004 | switch (mgmt->u.action.category) { | 3044 | switch (mgmt->u.action.category) { |
3045 | case WLAN_CATEGORY_SPECTRUM_MGMT: | ||
3046 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | ||
3047 | break; | ||
3048 | switch (mgmt->u.action.u.chan_switch.action_code) { | ||
3049 | case WLAN_ACTION_SPCT_MSR_REQ: | ||
3050 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3051 | sizeof(mgmt->u.action.u.measurement))) | ||
3052 | break; | ||
3053 | ieee80211_sta_process_measurement_req(dev, mgmt, len); | ||
3054 | break; | ||
3055 | } | ||
3056 | break; | ||
3005 | case WLAN_CATEGORY_BACK: | 3057 | case WLAN_CATEGORY_BACK: |
3006 | switch (mgmt->u.action.u.addba_req.action_code) { | 3058 | switch (mgmt->u.action.u.addba_req.action_code) { |
3007 | case WLAN_ACTION_ADDBA_REQ: | 3059 | case WLAN_ACTION_ADDBA_REQ: |
@@ -3022,11 +3074,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev, | |||
3022 | break; | 3074 | break; |
3023 | ieee80211_sta_process_delba(dev, mgmt, len); | 3075 | ieee80211_sta_process_delba(dev, mgmt, len); |
3024 | break; | 3076 | break; |
3025 | default: | ||
3026 | if (net_ratelimit()) | ||
3027 | printk(KERN_DEBUG "%s: Rx unknown A-MPDU action\n", | ||
3028 | dev->name); | ||
3029 | break; | ||
3030 | } | 3077 | } |
3031 | break; | 3078 | break; |
3032 | case PLINK_CATEGORY: | 3079 | case PLINK_CATEGORY: |
@@ -3037,11 +3084,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev, | |||
3037 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 3084 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
3038 | mesh_rx_path_sel_frame(dev, mgmt, len); | 3085 | mesh_rx_path_sel_frame(dev, mgmt, len); |
3039 | break; | 3086 | break; |
3040 | default: | ||
3041 | if (net_ratelimit()) | ||
3042 | printk(KERN_DEBUG "%s: Rx unknown action frame - " | ||
3043 | "category=%d\n", dev->name, mgmt->u.action.category); | ||
3044 | break; | ||
3045 | } | 3087 | } |
3046 | } | 3088 | } |
3047 | 3089 | ||
@@ -3077,11 +3119,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3077 | skb_queue_tail(&ifsta->skb_queue, skb); | 3119 | skb_queue_tail(&ifsta->skb_queue, skb); |
3078 | queue_work(local->hw.workqueue, &ifsta->work); | 3120 | queue_work(local->hw.workqueue, &ifsta->work); |
3079 | return; | 3121 | return; |
3080 | default: | ||
3081 | printk(KERN_DEBUG "%s: received unknown management frame - " | ||
3082 | "stype=%d\n", dev->name, | ||
3083 | (fc & IEEE80211_FCTL_STYPE) >> 4); | ||
3084 | break; | ||
3085 | } | 3122 | } |
3086 | 3123 | ||
3087 | fail: | 3124 | fail: |
@@ -3145,33 +3182,32 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, | |||
3145 | struct ieee80211_rx_status *rx_status) | 3182 | struct ieee80211_rx_status *rx_status) |
3146 | { | 3183 | { |
3147 | struct ieee80211_mgmt *mgmt; | 3184 | struct ieee80211_mgmt *mgmt; |
3148 | u16 fc; | 3185 | __le16 fc; |
3149 | 3186 | ||
3150 | if (skb->len < 2) | 3187 | if (skb->len < 2) |
3151 | return RX_DROP_UNUSABLE; | 3188 | return RX_DROP_UNUSABLE; |
3152 | 3189 | ||
3153 | mgmt = (struct ieee80211_mgmt *) skb->data; | 3190 | mgmt = (struct ieee80211_mgmt *) skb->data; |
3154 | fc = le16_to_cpu(mgmt->frame_control); | 3191 | fc = mgmt->frame_control; |
3155 | 3192 | ||
3156 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) | 3193 | if (ieee80211_is_ctl(fc)) |
3157 | return RX_CONTINUE; | 3194 | return RX_CONTINUE; |
3158 | 3195 | ||
3159 | if (skb->len < 24) | 3196 | if (skb->len < 24) |
3160 | return RX_DROP_MONITOR; | 3197 | return RX_DROP_MONITOR; |
3161 | 3198 | ||
3162 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | 3199 | if (ieee80211_is_probe_resp(fc)) { |
3163 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { | 3200 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); |
3164 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, | 3201 | dev_kfree_skb(skb); |
3165 | skb->len, rx_status); | 3202 | return RX_QUEUED; |
3166 | dev_kfree_skb(skb); | 3203 | } |
3167 | return RX_QUEUED; | 3204 | |
3168 | } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { | 3205 | if (ieee80211_is_beacon(fc)) { |
3169 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, | 3206 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); |
3170 | rx_status); | 3207 | dev_kfree_skb(skb); |
3171 | dev_kfree_skb(skb); | 3208 | return RX_QUEUED; |
3172 | return RX_QUEUED; | ||
3173 | } | ||
3174 | } | 3209 | } |
3210 | |||
3175 | return RX_CONTINUE; | 3211 | return RX_CONTINUE; |
3176 | } | 3212 | } |
3177 | 3213 | ||
@@ -3211,8 +3247,10 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) | |||
3211 | spin_lock_irqsave(&local->sta_lock, flags); | 3247 | spin_lock_irqsave(&local->sta_lock, flags); |
3212 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) | 3248 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) |
3213 | if (time_after(jiffies, sta->last_rx + exp_time)) { | 3249 | if (time_after(jiffies, sta->last_rx + exp_time)) { |
3250 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | ||
3214 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", | 3251 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", |
3215 | dev->name, print_mac(mac, sta->addr)); | 3252 | dev->name, print_mac(mac, sta->addr)); |
3253 | #endif | ||
3216 | __sta_info_unlink(&sta); | 3254 | __sta_info_unlink(&sta); |
3217 | if (sta) | 3255 | if (sta) |
3218 | list_add(&sta->list, &tmp_list); | 3256 | list_add(&sta->list, &tmp_list); |
@@ -3251,7 +3289,7 @@ static void ieee80211_mesh_housekeeping(struct net_device *dev, | |||
3251 | 3289 | ||
3252 | free_plinks = mesh_plink_availables(sdata); | 3290 | free_plinks = mesh_plink_availables(sdata); |
3253 | if (free_plinks != sdata->u.sta.accepting_plinks) | 3291 | if (free_plinks != sdata->u.sta.accepting_plinks) |
3254 | ieee80211_if_config_beacon(dev); | 3292 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); |
3255 | 3293 | ||
3256 | mod_timer(&ifsta->timer, jiffies + | 3294 | mod_timer(&ifsta->timer, jiffies + |
3257 | IEEE80211_MESH_HOUSEKEEPING_INTERVAL); | 3295 | IEEE80211_MESH_HOUSEKEEPING_INTERVAL); |
@@ -3295,13 +3333,10 @@ void ieee80211_sta_work(struct work_struct *work) | |||
3295 | if (local->sta_sw_scanning || local->sta_hw_scanning) | 3333 | if (local->sta_sw_scanning || local->sta_hw_scanning) |
3296 | return; | 3334 | return; |
3297 | 3335 | ||
3298 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 3336 | if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA && |
3299 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 3337 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
3300 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) { | 3338 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) |
3301 | printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface " | ||
3302 | "(type=%d)\n", dev->name, sdata->vif.type); | ||
3303 | return; | 3339 | return; |
3304 | } | ||
3305 | ifsta = &sdata->u.sta; | 3340 | ifsta = &sdata->u.sta; |
3306 | 3341 | ||
3307 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | 3342 | while ((skb = skb_dequeue(&ifsta->skb_queue))) |
@@ -3355,8 +3390,7 @@ void ieee80211_sta_work(struct work_struct *work) | |||
3355 | break; | 3390 | break; |
3356 | #endif | 3391 | #endif |
3357 | default: | 3392 | default: |
3358 | printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", | 3393 | WARN_ON(1); |
3359 | ifsta->state); | ||
3360 | break; | 3394 | break; |
3361 | } | 3395 | } |
3362 | 3396 | ||
@@ -3391,8 +3425,6 @@ static void ieee80211_sta_reset_auth(struct net_device *dev, | |||
3391 | ifsta->auth_alg = WLAN_AUTH_LEAP; | 3425 | ifsta->auth_alg = WLAN_AUTH_LEAP; |
3392 | else | 3426 | else |
3393 | ifsta->auth_alg = WLAN_AUTH_OPEN; | 3427 | ifsta->auth_alg = WLAN_AUTH_OPEN; |
3394 | printk(KERN_DEBUG "%s: Initial auth_alg=%d\n", dev->name, | ||
3395 | ifsta->auth_alg); | ||
3396 | ifsta->auth_transaction = -1; | 3428 | ifsta->auth_transaction = -1; |
3397 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | 3429 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
3398 | ifsta->auth_tries = ifsta->assoc_tries = 0; | 3430 | ifsta->auth_tries = ifsta->assoc_tries = 0; |
@@ -3481,9 +3513,9 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
3481 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) | 3513 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) |
3482 | continue; | 3514 | continue; |
3483 | 3515 | ||
3484 | if (!selected || top_rssi < bss->rssi) { | 3516 | if (!selected || top_rssi < bss->signal) { |
3485 | selected = bss; | 3517 | selected = bss; |
3486 | top_rssi = bss->rssi; | 3518 | top_rssi = bss->signal; |
3487 | } | 3519 | } |
3488 | } | 3520 | } |
3489 | if (selected) | 3521 | if (selected) |
@@ -3497,7 +3529,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
3497 | selected->ssid_len); | 3529 | selected->ssid_len); |
3498 | ieee80211_sta_set_bssid(dev, selected->bssid); | 3530 | ieee80211_sta_set_bssid(dev, selected->bssid); |
3499 | ieee80211_sta_def_wmm_params(dev, selected, 0); | 3531 | ieee80211_sta_def_wmm_params(dev, selected, 0); |
3500 | ieee80211_rx_bss_put(dev, selected); | 3532 | ieee80211_rx_bss_put(local, selected); |
3501 | ifsta->state = IEEE80211_AUTHENTICATE; | 3533 | ifsta->state = IEEE80211_AUTHENTICATE; |
3502 | ieee80211_sta_reset_auth(dev, ifsta); | 3534 | ieee80211_sta_reset_auth(dev, ifsta); |
3503 | return 0; | 3535 | return 0; |
@@ -3556,14 +3588,16 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3556 | sband = local->hw.wiphy->bands[bss->band]; | 3588 | sband = local->hw.wiphy->bands[bss->band]; |
3557 | 3589 | ||
3558 | if (local->hw.conf.beacon_int == 0) | 3590 | if (local->hw.conf.beacon_int == 0) |
3559 | local->hw.conf.beacon_int = 10000; | 3591 | local->hw.conf.beacon_int = 100; |
3560 | bss->beacon_int = local->hw.conf.beacon_int; | 3592 | bss->beacon_int = local->hw.conf.beacon_int; |
3561 | bss->last_update = jiffies; | 3593 | bss->last_update = jiffies; |
3562 | bss->capability = WLAN_CAPABILITY_IBSS; | 3594 | bss->capability = WLAN_CAPABILITY_IBSS; |
3563 | if (sdata->default_key) { | 3595 | |
3596 | if (sdata->default_key) | ||
3564 | bss->capability |= WLAN_CAPABILITY_PRIVACY; | 3597 | bss->capability |= WLAN_CAPABILITY_PRIVACY; |
3565 | } else | 3598 | else |
3566 | sdata->drop_unencrypted = 0; | 3599 | sdata->drop_unencrypted = 0; |
3600 | |||
3567 | bss->supp_rates_len = sband->n_bitrates; | 3601 | bss->supp_rates_len = sband->n_bitrates; |
3568 | pos = bss->supp_rates; | 3602 | pos = bss->supp_rates; |
3569 | for (i = 0; i < sband->n_bitrates; i++) { | 3603 | for (i = 0; i < sband->n_bitrates; i++) { |
@@ -3572,7 +3606,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3572 | } | 3606 | } |
3573 | 3607 | ||
3574 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 3608 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); |
3575 | ieee80211_rx_bss_put(dev, bss); | 3609 | ieee80211_rx_bss_put(local, bss); |
3576 | return ret; | 3610 | return ret; |
3577 | } | 3611 | } |
3578 | 3612 | ||
@@ -3628,7 +3662,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3628 | " based on configured SSID\n", | 3662 | " based on configured SSID\n", |
3629 | dev->name, print_mac(mac, bssid)); | 3663 | dev->name, print_mac(mac, bssid)); |
3630 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 3664 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); |
3631 | ieee80211_rx_bss_put(dev, bss); | 3665 | ieee80211_rx_bss_put(local, bss); |
3632 | return ret; | 3666 | return ret; |
3633 | } | 3667 | } |
3634 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 3668 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
@@ -3679,28 +3713,45 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | |||
3679 | { | 3713 | { |
3680 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 3714 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
3681 | struct ieee80211_if_sta *ifsta; | 3715 | struct ieee80211_if_sta *ifsta; |
3716 | int res; | ||
3682 | 3717 | ||
3683 | if (len > IEEE80211_MAX_SSID_LEN) | 3718 | if (len > IEEE80211_MAX_SSID_LEN) |
3684 | return -EINVAL; | 3719 | return -EINVAL; |
3685 | 3720 | ||
3686 | ifsta = &sdata->u.sta; | 3721 | ifsta = &sdata->u.sta; |
3687 | 3722 | ||
3688 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) | 3723 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) { |
3724 | memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); | ||
3725 | memcpy(ifsta->ssid, ssid, len); | ||
3726 | ifsta->ssid_len = len; | ||
3689 | ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; | 3727 | ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; |
3690 | memcpy(ifsta->ssid, ssid, len); | 3728 | |
3691 | memset(ifsta->ssid + len, 0, IEEE80211_MAX_SSID_LEN - len); | 3729 | res = 0; |
3692 | ifsta->ssid_len = len; | 3730 | /* |
3731 | * Hack! MLME code needs to be cleaned up to have different | ||
3732 | * entry points for configuration and internal selection change | ||
3733 | */ | ||
3734 | if (netif_running(sdata->dev)) | ||
3735 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); | ||
3736 | if (res) { | ||
3737 | printk(KERN_DEBUG "%s: Failed to config new SSID to " | ||
3738 | "the low-level driver\n", dev->name); | ||
3739 | return res; | ||
3740 | } | ||
3741 | } | ||
3693 | 3742 | ||
3694 | if (len) | 3743 | if (len) |
3695 | ifsta->flags |= IEEE80211_STA_SSID_SET; | 3744 | ifsta->flags |= IEEE80211_STA_SSID_SET; |
3696 | else | 3745 | else |
3697 | ifsta->flags &= ~IEEE80211_STA_SSID_SET; | 3746 | ifsta->flags &= ~IEEE80211_STA_SSID_SET; |
3747 | |||
3698 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | 3748 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && |
3699 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { | 3749 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { |
3700 | ifsta->ibss_join_req = jiffies; | 3750 | ifsta->ibss_join_req = jiffies; |
3701 | ifsta->state = IEEE80211_IBSS_SEARCH; | 3751 | ifsta->state = IEEE80211_IBSS_SEARCH; |
3702 | return ieee80211_sta_find_ibss(dev, ifsta); | 3752 | return ieee80211_sta_find_ibss(dev, ifsta); |
3703 | } | 3753 | } |
3754 | |||
3704 | return 0; | 3755 | return 0; |
3705 | } | 3756 | } |
3706 | 3757 | ||
@@ -3726,7 +3777,12 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) | |||
3726 | 3777 | ||
3727 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { | 3778 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { |
3728 | memcpy(ifsta->bssid, bssid, ETH_ALEN); | 3779 | memcpy(ifsta->bssid, bssid, ETH_ALEN); |
3729 | res = ieee80211_if_config(dev); | 3780 | res = 0; |
3781 | /* | ||
3782 | * Hack! See also ieee80211_sta_set_ssid. | ||
3783 | */ | ||
3784 | if (netif_running(sdata->dev)) | ||
3785 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); | ||
3730 | if (res) { | 3786 | if (res) { |
3731 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " | 3787 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " |
3732 | "the low-level driver\n", dev->name); | 3788 | "the low-level driver\n", dev->name); |
@@ -3749,7 +3805,7 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
3749 | { | 3805 | { |
3750 | struct sk_buff *skb; | 3806 | struct sk_buff *skb; |
3751 | struct ieee80211_hdr *nullfunc; | 3807 | struct ieee80211_hdr *nullfunc; |
3752 | u16 fc; | 3808 | __le16 fc; |
3753 | 3809 | ||
3754 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); | 3810 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); |
3755 | if (!skb) { | 3811 | if (!skb) { |
@@ -3761,11 +3817,11 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
3761 | 3817 | ||
3762 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); | 3818 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); |
3763 | memset(nullfunc, 0, 24); | 3819 | memset(nullfunc, 0, 24); |
3764 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | 3820 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | |
3765 | IEEE80211_FCTL_TODS; | 3821 | IEEE80211_FCTL_TODS); |
3766 | if (powersave) | 3822 | if (powersave) |
3767 | fc |= IEEE80211_FCTL_PM; | 3823 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); |
3768 | nullfunc->frame_control = cpu_to_le16(fc); | 3824 | nullfunc->frame_control = fc; |
3769 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); | 3825 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); |
3770 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); | 3826 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); |
3771 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); | 3827 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); |
@@ -3813,6 +3869,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) | |||
3813 | 3869 | ||
3814 | 3870 | ||
3815 | netif_tx_lock_bh(local->mdev); | 3871 | netif_tx_lock_bh(local->mdev); |
3872 | netif_addr_lock(local->mdev); | ||
3816 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | 3873 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; |
3817 | local->ops->configure_filter(local_to_hw(local), | 3874 | local->ops->configure_filter(local_to_hw(local), |
3818 | FIF_BCN_PRBRESP_PROMISC, | 3875 | FIF_BCN_PRBRESP_PROMISC, |
@@ -3820,15 +3877,11 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) | |||
3820 | local->mdev->mc_count, | 3877 | local->mdev->mc_count, |
3821 | local->mdev->mc_list); | 3878 | local->mdev->mc_list); |
3822 | 3879 | ||
3880 | netif_addr_unlock(local->mdev); | ||
3823 | netif_tx_unlock_bh(local->mdev); | 3881 | netif_tx_unlock_bh(local->mdev); |
3824 | 3882 | ||
3825 | rcu_read_lock(); | 3883 | rcu_read_lock(); |
3826 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 3884 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3827 | |||
3828 | /* No need to wake the master device. */ | ||
3829 | if (sdata->dev == local->mdev) | ||
3830 | continue; | ||
3831 | |||
3832 | /* Tell AP we're back */ | 3885 | /* Tell AP we're back */ |
3833 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | 3886 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && |
3834 | sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) | 3887 | sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) |
@@ -3994,12 +4047,6 @@ static int ieee80211_sta_start_scan(struct net_device *dev, | |||
3994 | 4047 | ||
3995 | rcu_read_lock(); | 4048 | rcu_read_lock(); |
3996 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 4049 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3997 | |||
3998 | /* Don't stop the master interface, otherwise we can't transmit | ||
3999 | * probes! */ | ||
4000 | if (sdata->dev == local->mdev) | ||
4001 | continue; | ||
4002 | |||
4003 | netif_stop_queue(sdata->dev); | 4050 | netif_stop_queue(sdata->dev); |
4004 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | 4051 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && |
4005 | (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) | 4052 | (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) |
@@ -4017,14 +4064,14 @@ static int ieee80211_sta_start_scan(struct net_device *dev, | |||
4017 | local->scan_band = IEEE80211_BAND_2GHZ; | 4064 | local->scan_band = IEEE80211_BAND_2GHZ; |
4018 | local->scan_dev = dev; | 4065 | local->scan_dev = dev; |
4019 | 4066 | ||
4020 | netif_tx_lock_bh(local->mdev); | 4067 | netif_addr_lock_bh(local->mdev); |
4021 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | 4068 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; |
4022 | local->ops->configure_filter(local_to_hw(local), | 4069 | local->ops->configure_filter(local_to_hw(local), |
4023 | FIF_BCN_PRBRESP_PROMISC, | 4070 | FIF_BCN_PRBRESP_PROMISC, |
4024 | &local->filter_flags, | 4071 | &local->filter_flags, |
4025 | local->mdev->mc_count, | 4072 | local->mdev->mc_count, |
4026 | local->mdev->mc_list); | 4073 | local->mdev->mc_list); |
4027 | netif_tx_unlock_bh(local->mdev); | 4074 | netif_addr_unlock_bh(local->mdev); |
4028 | 4075 | ||
4029 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | 4076 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ |
4030 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | 4077 | queue_delayed_work(local->hw.workqueue, &local->scan_work, |
@@ -4059,6 +4106,7 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len) | |||
4059 | 4106 | ||
4060 | static char * | 4107 | static char * |
4061 | ieee80211_sta_scan_result(struct net_device *dev, | 4108 | ieee80211_sta_scan_result(struct net_device *dev, |
4109 | struct iw_request_info *info, | ||
4062 | struct ieee80211_sta_bss *bss, | 4110 | struct ieee80211_sta_bss *bss, |
4063 | char *current_ev, char *end_buf) | 4111 | char *current_ev, char *end_buf) |
4064 | { | 4112 | { |
@@ -4073,7 +4121,7 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4073 | iwe.cmd = SIOCGIWAP; | 4121 | iwe.cmd = SIOCGIWAP; |
4074 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | 4122 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; |
4075 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); | 4123 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); |
4076 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 4124 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, |
4077 | IW_EV_ADDR_LEN); | 4125 | IW_EV_ADDR_LEN); |
4078 | 4126 | ||
4079 | memset(&iwe, 0, sizeof(iwe)); | 4127 | memset(&iwe, 0, sizeof(iwe)); |
@@ -4081,13 +4129,13 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4081 | if (bss_mesh_cfg(bss)) { | 4129 | if (bss_mesh_cfg(bss)) { |
4082 | iwe.u.data.length = bss_mesh_id_len(bss); | 4130 | iwe.u.data.length = bss_mesh_id_len(bss); |
4083 | iwe.u.data.flags = 1; | 4131 | iwe.u.data.flags = 1; |
4084 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | 4132 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, |
4085 | bss_mesh_id(bss)); | 4133 | &iwe, bss_mesh_id(bss)); |
4086 | } else { | 4134 | } else { |
4087 | iwe.u.data.length = bss->ssid_len; | 4135 | iwe.u.data.length = bss->ssid_len; |
4088 | iwe.u.data.flags = 1; | 4136 | iwe.u.data.flags = 1; |
4089 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | 4137 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, |
4090 | bss->ssid); | 4138 | &iwe, bss->ssid); |
4091 | } | 4139 | } |
4092 | 4140 | ||
4093 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) | 4141 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) |
@@ -4100,30 +4148,30 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4100 | iwe.u.mode = IW_MODE_MASTER; | 4148 | iwe.u.mode = IW_MODE_MASTER; |
4101 | else | 4149 | else |
4102 | iwe.u.mode = IW_MODE_ADHOC; | 4150 | iwe.u.mode = IW_MODE_ADHOC; |
4103 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 4151 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, |
4104 | IW_EV_UINT_LEN); | 4152 | &iwe, IW_EV_UINT_LEN); |
4105 | } | 4153 | } |
4106 | 4154 | ||
4107 | memset(&iwe, 0, sizeof(iwe)); | 4155 | memset(&iwe, 0, sizeof(iwe)); |
4108 | iwe.cmd = SIOCGIWFREQ; | 4156 | iwe.cmd = SIOCGIWFREQ; |
4109 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | 4157 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); |
4110 | iwe.u.freq.e = 0; | 4158 | iwe.u.freq.e = 0; |
4111 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 4159 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, |
4112 | IW_EV_FREQ_LEN); | 4160 | IW_EV_FREQ_LEN); |
4113 | 4161 | ||
4114 | memset(&iwe, 0, sizeof(iwe)); | 4162 | memset(&iwe, 0, sizeof(iwe)); |
4115 | iwe.cmd = SIOCGIWFREQ; | 4163 | iwe.cmd = SIOCGIWFREQ; |
4116 | iwe.u.freq.m = bss->freq; | 4164 | iwe.u.freq.m = bss->freq; |
4117 | iwe.u.freq.e = 6; | 4165 | iwe.u.freq.e = 6; |
4118 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 4166 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, |
4119 | IW_EV_FREQ_LEN); | 4167 | IW_EV_FREQ_LEN); |
4120 | memset(&iwe, 0, sizeof(iwe)); | 4168 | memset(&iwe, 0, sizeof(iwe)); |
4121 | iwe.cmd = IWEVQUAL; | 4169 | iwe.cmd = IWEVQUAL; |
4122 | iwe.u.qual.qual = bss->signal; | 4170 | iwe.u.qual.qual = bss->qual; |
4123 | iwe.u.qual.level = bss->rssi; | 4171 | iwe.u.qual.level = bss->signal; |
4124 | iwe.u.qual.noise = bss->noise; | 4172 | iwe.u.qual.noise = bss->noise; |
4125 | iwe.u.qual.updated = local->wstats_flags; | 4173 | iwe.u.qual.updated = local->wstats_flags; |
4126 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 4174 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, |
4127 | IW_EV_QUAL_LEN); | 4175 | IW_EV_QUAL_LEN); |
4128 | 4176 | ||
4129 | memset(&iwe, 0, sizeof(iwe)); | 4177 | memset(&iwe, 0, sizeof(iwe)); |
@@ -4133,27 +4181,36 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4133 | else | 4181 | else |
4134 | iwe.u.data.flags = IW_ENCODE_DISABLED; | 4182 | iwe.u.data.flags = IW_ENCODE_DISABLED; |
4135 | iwe.u.data.length = 0; | 4183 | iwe.u.data.length = 0; |
4136 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ""); | 4184 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, |
4185 | &iwe, ""); | ||
4137 | 4186 | ||
4138 | if (bss && bss->wpa_ie) { | 4187 | if (bss && bss->wpa_ie) { |
4139 | memset(&iwe, 0, sizeof(iwe)); | 4188 | memset(&iwe, 0, sizeof(iwe)); |
4140 | iwe.cmd = IWEVGENIE; | 4189 | iwe.cmd = IWEVGENIE; |
4141 | iwe.u.data.length = bss->wpa_ie_len; | 4190 | iwe.u.data.length = bss->wpa_ie_len; |
4142 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | 4191 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, |
4143 | bss->wpa_ie); | 4192 | &iwe, bss->wpa_ie); |
4144 | } | 4193 | } |
4145 | 4194 | ||
4146 | if (bss && bss->rsn_ie) { | 4195 | if (bss && bss->rsn_ie) { |
4147 | memset(&iwe, 0, sizeof(iwe)); | 4196 | memset(&iwe, 0, sizeof(iwe)); |
4148 | iwe.cmd = IWEVGENIE; | 4197 | iwe.cmd = IWEVGENIE; |
4149 | iwe.u.data.length = bss->rsn_ie_len; | 4198 | iwe.u.data.length = bss->rsn_ie_len; |
4150 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | 4199 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, |
4151 | bss->rsn_ie); | 4200 | &iwe, bss->rsn_ie); |
4201 | } | ||
4202 | |||
4203 | if (bss && bss->ht_ie) { | ||
4204 | memset(&iwe, 0, sizeof(iwe)); | ||
4205 | iwe.cmd = IWEVGENIE; | ||
4206 | iwe.u.data.length = bss->ht_ie_len; | ||
4207 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4208 | &iwe, bss->ht_ie); | ||
4152 | } | 4209 | } |
4153 | 4210 | ||
4154 | if (bss && bss->supp_rates_len > 0) { | 4211 | if (bss && bss->supp_rates_len > 0) { |
4155 | /* display all supported rates in readable format */ | 4212 | /* display all supported rates in readable format */ |
4156 | char *p = current_ev + IW_EV_LCP_LEN; | 4213 | char *p = current_ev + iwe_stream_lcp_len(info); |
4157 | int i; | 4214 | int i; |
4158 | 4215 | ||
4159 | memset(&iwe, 0, sizeof(iwe)); | 4216 | memset(&iwe, 0, sizeof(iwe)); |
@@ -4164,7 +4221,7 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4164 | for (i = 0; i < bss->supp_rates_len; i++) { | 4221 | for (i = 0; i < bss->supp_rates_len; i++) { |
4165 | iwe.u.bitrate.value = ((bss->supp_rates[i] & | 4222 | iwe.u.bitrate.value = ((bss->supp_rates[i] & |
4166 | 0x7f) * 500000); | 4223 | 0x7f) * 500000); |
4167 | p = iwe_stream_add_value(current_ev, p, | 4224 | p = iwe_stream_add_value(info, current_ev, p, |
4168 | end_buf, &iwe, IW_EV_PARAM_LEN); | 4225 | end_buf, &iwe, IW_EV_PARAM_LEN); |
4169 | } | 4226 | } |
4170 | current_ev = p; | 4227 | current_ev = p; |
@@ -4178,8 +4235,16 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4178 | iwe.cmd = IWEVCUSTOM; | 4235 | iwe.cmd = IWEVCUSTOM; |
4179 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); | 4236 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); |
4180 | iwe.u.data.length = strlen(buf); | 4237 | iwe.u.data.length = strlen(buf); |
4181 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4238 | current_ev = iwe_stream_add_point(info, current_ev, |
4239 | end_buf, | ||
4182 | &iwe, buf); | 4240 | &iwe, buf); |
4241 | memset(&iwe, 0, sizeof(iwe)); | ||
4242 | iwe.cmd = IWEVCUSTOM; | ||
4243 | sprintf(buf, " Last beacon: %dms ago", | ||
4244 | jiffies_to_msecs(jiffies - bss->last_update)); | ||
4245 | iwe.u.data.length = strlen(buf); | ||
4246 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4247 | end_buf, &iwe, buf); | ||
4183 | kfree(buf); | 4248 | kfree(buf); |
4184 | } | 4249 | } |
4185 | } | 4250 | } |
@@ -4193,31 +4258,36 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4193 | iwe.cmd = IWEVCUSTOM; | 4258 | iwe.cmd = IWEVCUSTOM; |
4194 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | 4259 | sprintf(buf, "Mesh network (version %d)", cfg[0]); |
4195 | iwe.u.data.length = strlen(buf); | 4260 | iwe.u.data.length = strlen(buf); |
4196 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4261 | current_ev = iwe_stream_add_point(info, current_ev, |
4262 | end_buf, | ||
4197 | &iwe, buf); | 4263 | &iwe, buf); |
4198 | sprintf(buf, "Path Selection Protocol ID: " | 4264 | sprintf(buf, "Path Selection Protocol ID: " |
4199 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | 4265 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], |
4200 | cfg[4]); | 4266 | cfg[4]); |
4201 | iwe.u.data.length = strlen(buf); | 4267 | iwe.u.data.length = strlen(buf); |
4202 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4268 | current_ev = iwe_stream_add_point(info, current_ev, |
4269 | end_buf, | ||
4203 | &iwe, buf); | 4270 | &iwe, buf); |
4204 | sprintf(buf, "Path Selection Metric ID: " | 4271 | sprintf(buf, "Path Selection Metric ID: " |
4205 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | 4272 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], |
4206 | cfg[8]); | 4273 | cfg[8]); |
4207 | iwe.u.data.length = strlen(buf); | 4274 | iwe.u.data.length = strlen(buf); |
4208 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4275 | current_ev = iwe_stream_add_point(info, current_ev, |
4276 | end_buf, | ||
4209 | &iwe, buf); | 4277 | &iwe, buf); |
4210 | sprintf(buf, "Congestion Control Mode ID: " | 4278 | sprintf(buf, "Congestion Control Mode ID: " |
4211 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | 4279 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], |
4212 | cfg[11], cfg[12]); | 4280 | cfg[11], cfg[12]); |
4213 | iwe.u.data.length = strlen(buf); | 4281 | iwe.u.data.length = strlen(buf); |
4214 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4282 | current_ev = iwe_stream_add_point(info, current_ev, |
4283 | end_buf, | ||
4215 | &iwe, buf); | 4284 | &iwe, buf); |
4216 | sprintf(buf, "Channel Precedence: " | 4285 | sprintf(buf, "Channel Precedence: " |
4217 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | 4286 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], |
4218 | cfg[15], cfg[16]); | 4287 | cfg[15], cfg[16]); |
4219 | iwe.u.data.length = strlen(buf); | 4288 | iwe.u.data.length = strlen(buf); |
4220 | current_ev = iwe_stream_add_point(current_ev, end_buf, | 4289 | current_ev = iwe_stream_add_point(info, current_ev, |
4290 | end_buf, | ||
4221 | &iwe, buf); | 4291 | &iwe, buf); |
4222 | kfree(buf); | 4292 | kfree(buf); |
4223 | } | 4293 | } |
@@ -4227,7 +4297,9 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
4227 | } | 4297 | } |
4228 | 4298 | ||
4229 | 4299 | ||
4230 | int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len) | 4300 | int ieee80211_sta_scan_results(struct net_device *dev, |
4301 | struct iw_request_info *info, | ||
4302 | char *buf, size_t len) | ||
4231 | { | 4303 | { |
4232 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 4304 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
4233 | char *current_ev = buf; | 4305 | char *current_ev = buf; |
@@ -4240,8 +4312,8 @@ int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len) | |||
4240 | spin_unlock_bh(&local->sta_bss_lock); | 4312 | spin_unlock_bh(&local->sta_bss_lock); |
4241 | return -E2BIG; | 4313 | return -E2BIG; |
4242 | } | 4314 | } |
4243 | current_ev = ieee80211_sta_scan_result(dev, bss, current_ev, | 4315 | current_ev = ieee80211_sta_scan_result(dev, info, bss, |
4244 | end_buf); | 4316 | current_ev, end_buf); |
4245 | } | 4317 | } |
4246 | spin_unlock_bh(&local->sta_bss_lock); | 4318 | spin_unlock_bh(&local->sta_bss_lock); |
4247 | return current_ev - buf; | 4319 | return current_ev - buf; |
@@ -4252,6 +4324,7 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | |||
4252 | { | 4324 | { |
4253 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 4325 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
4254 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 4326 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4327 | |||
4255 | kfree(ifsta->extra_ie); | 4328 | kfree(ifsta->extra_ie); |
4256 | if (len == 0) { | 4329 | if (len == 0) { |
4257 | ifsta->extra_ie = NULL; | 4330 | ifsta->extra_ie = NULL; |
@@ -4269,14 +4342,15 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | |||
4269 | } | 4342 | } |
4270 | 4343 | ||
4271 | 4344 | ||
4272 | struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, | 4345 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, |
4273 | struct sk_buff *skb, u8 *bssid, | 4346 | struct sk_buff *skb, u8 *bssid, |
4274 | u8 *addr) | 4347 | u8 *addr, u64 supp_rates) |
4275 | { | 4348 | { |
4276 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 4349 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
4277 | struct sta_info *sta; | 4350 | struct sta_info *sta; |
4278 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 4351 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
4279 | DECLARE_MAC_BUF(mac); | 4352 | DECLARE_MAC_BUF(mac); |
4353 | int band = local->hw.conf.channel->band; | ||
4280 | 4354 | ||
4281 | /* TODO: Could consider removing the least recently used entry and | 4355 | /* TODO: Could consider removing the least recently used entry and |
4282 | * allow new one to be added. */ | 4356 | * allow new one to be added. */ |
@@ -4288,17 +4362,24 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, | |||
4288 | return NULL; | 4362 | return NULL; |
4289 | } | 4363 | } |
4290 | 4364 | ||
4365 | if (compare_ether_addr(bssid, sdata->u.sta.bssid)) | ||
4366 | return NULL; | ||
4367 | |||
4368 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
4291 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", | 4369 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", |
4292 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); | 4370 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); |
4371 | #endif | ||
4293 | 4372 | ||
4294 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); | 4373 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); |
4295 | if (!sta) | 4374 | if (!sta) |
4296 | return NULL; | 4375 | return NULL; |
4297 | 4376 | ||
4298 | sta->flags |= WLAN_STA_AUTHORIZED; | 4377 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); |
4299 | 4378 | ||
4300 | sta->supp_rates[local->hw.conf.channel->band] = | 4379 | if (supp_rates) |
4301 | sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band]; | 4380 | sta->supp_rates[band] = supp_rates; |
4381 | else | ||
4382 | sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band]; | ||
4302 | 4383 | ||
4303 | rate_control_rate_init(sta, local); | 4384 | rate_control_rate_init(sta, local); |
4304 | 4385 | ||
@@ -4314,7 +4395,7 @@ int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason) | |||
4314 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 4395 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
4315 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 4396 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4316 | 4397 | ||
4317 | printk(KERN_DEBUG "%s: deauthenticate(reason=%d)\n", | 4398 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", |
4318 | dev->name, reason); | 4399 | dev->name, reason); |
4319 | 4400 | ||
4320 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 4401 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && |
@@ -4332,7 +4413,7 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) | |||
4332 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 4413 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
4333 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 4414 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4334 | 4415 | ||
4335 | printk(KERN_DEBUG "%s: disassociate(reason=%d)\n", | 4416 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", |
4336 | dev->name, reason); | 4417 | dev->name, reason); |
4337 | 4418 | ||
4338 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 4419 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
@@ -4356,12 +4437,10 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw, | |||
4356 | case IEEE80211_NOTIFY_RE_ASSOC: | 4437 | case IEEE80211_NOTIFY_RE_ASSOC: |
4357 | rcu_read_lock(); | 4438 | rcu_read_lock(); |
4358 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 4439 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
4440 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
4441 | continue; | ||
4359 | 4442 | ||
4360 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 4443 | ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); |
4361 | ieee80211_sta_req_auth(sdata->dev, | ||
4362 | &sdata->u.sta); | ||
4363 | } | ||
4364 | |||
4365 | } | 4444 | } |
4366 | rcu_read_unlock(); | 4445 | rcu_read_unlock(); |
4367 | break; | 4446 | break; |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 841df93807fc..0388c090dfe9 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -176,20 +176,24 @@ void rate_control_get_rate(struct net_device *dev, | |||
176 | rcu_read_lock(); | 176 | rcu_read_lock(); |
177 | sta = sta_info_get(local, hdr->addr1); | 177 | sta = sta_info_get(local, hdr->addr1); |
178 | 178 | ||
179 | memset(sel, 0, sizeof(struct rate_selection)); | 179 | sel->rate_idx = -1; |
180 | sel->nonerp_idx = -1; | ||
181 | sel->probe_idx = -1; | ||
180 | 182 | ||
181 | ref->ops->get_rate(ref->priv, dev, sband, skb, sel); | 183 | ref->ops->get_rate(ref->priv, dev, sband, skb, sel); |
182 | 184 | ||
185 | BUG_ON(sel->rate_idx < 0); | ||
186 | |||
183 | /* Select a non-ERP backup rate. */ | 187 | /* Select a non-ERP backup rate. */ |
184 | if (!sel->nonerp) { | 188 | if (sel->nonerp_idx < 0) { |
185 | for (i = 0; i < sband->n_bitrates; i++) { | 189 | for (i = 0; i < sband->n_bitrates; i++) { |
186 | struct ieee80211_rate *rate = &sband->bitrates[i]; | 190 | struct ieee80211_rate *rate = &sband->bitrates[i]; |
187 | if (sel->rate->bitrate < rate->bitrate) | 191 | if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate) |
188 | break; | 192 | break; |
189 | 193 | ||
190 | if (rate_supported(sta, sband->band, i) && | 194 | if (rate_supported(sta, sband->band, i) && |
191 | !(rate->flags & IEEE80211_RATE_ERP_G)) | 195 | !(rate->flags & IEEE80211_RATE_ERP_G)) |
192 | sel->nonerp = rate; | 196 | sel->nonerp_idx = i; |
193 | } | 197 | } |
194 | } | 198 | } |
195 | 199 | ||
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 5b45f33cb766..ede7ab56f65b 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -19,22 +19,22 @@ | |||
19 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
20 | #include "sta_info.h" | 20 | #include "sta_info.h" |
21 | 21 | ||
22 | /* TODO: kdoc */ | 22 | /** |
23 | * struct rate_selection - rate selection for rate control algos | ||
24 | * @rate: selected transmission rate index | ||
25 | * @nonerp: Non-ERP rate to use instead if ERP cannot be used | ||
26 | * @probe: rate for probing (or -1) | ||
27 | * | ||
28 | */ | ||
23 | struct rate_selection { | 29 | struct rate_selection { |
24 | /* Selected transmission rate */ | 30 | s8 rate_idx, nonerp_idx, probe_idx; |
25 | struct ieee80211_rate *rate; | ||
26 | /* Non-ERP rate to use if mac80211 decides it cannot use an ERP rate */ | ||
27 | struct ieee80211_rate *nonerp; | ||
28 | /* probe with this rate, or NULL for no probing */ | ||
29 | struct ieee80211_rate *probe; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | struct rate_control_ops { | 33 | struct rate_control_ops { |
33 | struct module *module; | 34 | struct module *module; |
34 | const char *name; | 35 | const char *name; |
35 | void (*tx_status)(void *priv, struct net_device *dev, | 36 | void (*tx_status)(void *priv, struct net_device *dev, |
36 | struct sk_buff *skb, | 37 | struct sk_buff *skb); |
37 | struct ieee80211_tx_status *status); | ||
38 | void (*get_rate)(void *priv, struct net_device *dev, | 38 | void (*get_rate)(void *priv, struct net_device *dev, |
39 | struct ieee80211_supported_band *band, | 39 | struct ieee80211_supported_band *band, |
40 | struct sk_buff *skb, | 40 | struct sk_buff *skb, |
@@ -76,13 +76,12 @@ struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); | |||
76 | void rate_control_put(struct rate_control_ref *ref); | 76 | void rate_control_put(struct rate_control_ref *ref); |
77 | 77 | ||
78 | static inline void rate_control_tx_status(struct net_device *dev, | 78 | static inline void rate_control_tx_status(struct net_device *dev, |
79 | struct sk_buff *skb, | 79 | struct sk_buff *skb) |
80 | struct ieee80211_tx_status *status) | ||
81 | { | 80 | { |
82 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 81 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
83 | struct rate_control_ref *ref = local->rate_ctrl; | 82 | struct rate_control_ref *ref = local->rate_ctrl; |
84 | 83 | ||
85 | ref->ops->tx_status(ref->priv, dev, skb, status); | 84 | ref->ops->tx_status(ref->priv, dev, skb); |
86 | } | 85 | } |
87 | 86 | ||
88 | 87 | ||
@@ -138,7 +137,7 @@ static inline int rate_supported(struct sta_info *sta, | |||
138 | return (sta == NULL || sta->supp_rates[band] & BIT(index)); | 137 | return (sta == NULL || sta->supp_rates[band] & BIT(index)); |
139 | } | 138 | } |
140 | 139 | ||
141 | static inline int | 140 | static inline s8 |
142 | rate_lowest_index(struct ieee80211_local *local, | 141 | rate_lowest_index(struct ieee80211_local *local, |
143 | struct ieee80211_supported_band *sband, | 142 | struct ieee80211_supported_band *sband, |
144 | struct sta_info *sta) | 143 | struct sta_info *sta) |
@@ -155,14 +154,6 @@ rate_lowest_index(struct ieee80211_local *local, | |||
155 | return 0; | 154 | return 0; |
156 | } | 155 | } |
157 | 156 | ||
158 | static inline struct ieee80211_rate * | ||
159 | rate_lowest(struct ieee80211_local *local, | ||
160 | struct ieee80211_supported_band *sband, | ||
161 | struct sta_info *sta) | ||
162 | { | ||
163 | return &sband->bitrates[rate_lowest_index(local, sband, sta)]; | ||
164 | } | ||
165 | |||
166 | 157 | ||
167 | /* functions for rate control related to a device */ | 158 | /* functions for rate control related to a device */ |
168 | int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, | 159 | int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, |
@@ -171,9 +162,7 @@ void rate_control_deinitialize(struct ieee80211_local *local); | |||
171 | 162 | ||
172 | 163 | ||
173 | /* Rate control algorithms */ | 164 | /* Rate control algorithms */ |
174 | #if defined(RC80211_PID_COMPILE) || \ | 165 | #ifdef CONFIG_MAC80211_RC_PID |
175 | (defined(CONFIG_MAC80211_RC_PID) && \ | ||
176 | !defined(CONFIG_MAC80211_RC_PID_MODULE)) | ||
177 | extern int rc80211_pid_init(void); | 166 | extern int rc80211_pid_init(void); |
178 | extern void rc80211_pid_exit(void); | 167 | extern void rc80211_pid_exit(void); |
179 | #else | 168 | #else |
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h index 4ea7b97d1af1..0a9135b974b5 100644 --- a/net/mac80211/rc80211_pid.h +++ b/net/mac80211/rc80211_pid.h | |||
@@ -61,7 +61,7 @@ enum rc_pid_event_type { | |||
61 | union rc_pid_event_data { | 61 | union rc_pid_event_data { |
62 | /* RC_PID_EVENT_TX_STATUS */ | 62 | /* RC_PID_EVENT_TX_STATUS */ |
63 | struct { | 63 | struct { |
64 | struct ieee80211_tx_status tx_status; | 64 | struct ieee80211_tx_info tx_status; |
65 | }; | 65 | }; |
66 | /* RC_PID_EVENT_TYPE_RATE_CHANGE */ | 66 | /* RC_PID_EVENT_TYPE_RATE_CHANGE */ |
67 | /* RC_PID_EVENT_TYPE_TX_RATE */ | 67 | /* RC_PID_EVENT_TYPE_TX_RATE */ |
@@ -156,7 +156,7 @@ struct rc_pid_debugfs_entries { | |||
156 | }; | 156 | }; |
157 | 157 | ||
158 | void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, | 158 | void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, |
159 | struct ieee80211_tx_status *stat); | 159 | struct ieee80211_tx_info *stat); |
160 | 160 | ||
161 | void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, | 161 | void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, |
162 | int index, int rate); | 162 | int index, int rate); |
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index bcd27c1d7594..a914ba73ccf5 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -237,8 +237,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
237 | } | 237 | } |
238 | 238 | ||
239 | static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | 239 | static void rate_control_pid_tx_status(void *priv, struct net_device *dev, |
240 | struct sk_buff *skb, | 240 | struct sk_buff *skb) |
241 | struct ieee80211_tx_status *status) | ||
242 | { | 241 | { |
243 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 242 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
244 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 243 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -248,6 +247,7 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
248 | struct rc_pid_sta_info *spinfo; | 247 | struct rc_pid_sta_info *spinfo; |
249 | unsigned long period; | 248 | unsigned long period; |
250 | struct ieee80211_supported_band *sband; | 249 | struct ieee80211_supported_band *sband; |
250 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
251 | 251 | ||
252 | rcu_read_lock(); | 252 | rcu_read_lock(); |
253 | 253 | ||
@@ -259,35 +259,35 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
259 | 259 | ||
260 | /* Don't update the state if we're not controlling the rate. */ | 260 | /* Don't update the state if we're not controlling the rate. */ |
261 | sdata = sta->sdata; | 261 | sdata = sta->sdata; |
262 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { | 262 | if (sdata->force_unicast_rateidx > -1) { |
263 | sta->txrate_idx = sdata->bss->max_ratectrl_rateidx; | 263 | sta->txrate_idx = sdata->max_ratectrl_rateidx; |
264 | goto unlock; | 264 | goto unlock; |
265 | } | 265 | } |
266 | 266 | ||
267 | /* Ignore all frames that were sent with a different rate than the rate | 267 | /* Ignore all frames that were sent with a different rate than the rate |
268 | * we currently advise mac80211 to use. */ | 268 | * we currently advise mac80211 to use. */ |
269 | if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx]) | 269 | if (info->tx_rate_idx != sta->txrate_idx) |
270 | goto unlock; | 270 | goto unlock; |
271 | 271 | ||
272 | spinfo = sta->rate_ctrl_priv; | 272 | spinfo = sta->rate_ctrl_priv; |
273 | spinfo->tx_num_xmit++; | 273 | spinfo->tx_num_xmit++; |
274 | 274 | ||
275 | #ifdef CONFIG_MAC80211_DEBUGFS | 275 | #ifdef CONFIG_MAC80211_DEBUGFS |
276 | rate_control_pid_event_tx_status(&spinfo->events, status); | 276 | rate_control_pid_event_tx_status(&spinfo->events, info); |
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | /* We count frames that totally failed to be transmitted as two bad | 279 | /* We count frames that totally failed to be transmitted as two bad |
280 | * frames, those that made it out but had some retries as one good and | 280 | * frames, those that made it out but had some retries as one good and |
281 | * one bad frame. */ | 281 | * one bad frame. */ |
282 | if (status->excessive_retries) { | 282 | if (info->status.excessive_retries) { |
283 | spinfo->tx_num_failed += 2; | 283 | spinfo->tx_num_failed += 2; |
284 | spinfo->tx_num_xmit++; | 284 | spinfo->tx_num_xmit++; |
285 | } else if (status->retry_count) { | 285 | } else if (info->status.retry_count) { |
286 | spinfo->tx_num_failed++; | 286 | spinfo->tx_num_failed++; |
287 | spinfo->tx_num_xmit++; | 287 | spinfo->tx_num_xmit++; |
288 | } | 288 | } |
289 | 289 | ||
290 | if (status->excessive_retries) { | 290 | if (info->status.excessive_retries) { |
291 | sta->tx_retry_failed++; | 291 | sta->tx_retry_failed++; |
292 | sta->tx_num_consecutive_failures++; | 292 | sta->tx_num_consecutive_failures++; |
293 | sta->tx_num_mpdu_fail++; | 293 | sta->tx_num_mpdu_fail++; |
@@ -295,8 +295,8 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
295 | sta->tx_num_consecutive_failures = 0; | 295 | sta->tx_num_consecutive_failures = 0; |
296 | sta->tx_num_mpdu_ok++; | 296 | sta->tx_num_mpdu_ok++; |
297 | } | 297 | } |
298 | sta->tx_retry_count += status->retry_count; | 298 | sta->tx_retry_count += info->status.retry_count; |
299 | sta->tx_num_mpdu_fail += status->retry_count; | 299 | sta->tx_num_mpdu_fail += info->status.retry_count; |
300 | 300 | ||
301 | /* Update PID controller state. */ | 301 | /* Update PID controller state. */ |
302 | period = (HZ * pinfo->sampling_period + 500) / 1000; | 302 | period = (HZ * pinfo->sampling_period + 500) / 1000; |
@@ -330,15 +330,15 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | |||
330 | fc = le16_to_cpu(hdr->frame_control); | 330 | fc = le16_to_cpu(hdr->frame_control); |
331 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || | 331 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || |
332 | is_multicast_ether_addr(hdr->addr1) || !sta) { | 332 | is_multicast_ether_addr(hdr->addr1) || !sta) { |
333 | sel->rate = rate_lowest(local, sband, sta); | 333 | sel->rate_idx = rate_lowest_index(local, sband, sta); |
334 | rcu_read_unlock(); | 334 | rcu_read_unlock(); |
335 | return; | 335 | return; |
336 | } | 336 | } |
337 | 337 | ||
338 | /* If a forced rate is in effect, select it. */ | 338 | /* If a forced rate is in effect, select it. */ |
339 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 339 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
340 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) | 340 | if (sdata->force_unicast_rateidx > -1) |
341 | sta->txrate_idx = sdata->bss->force_unicast_rateidx; | 341 | sta->txrate_idx = sdata->force_unicast_rateidx; |
342 | 342 | ||
343 | rateidx = sta->txrate_idx; | 343 | rateidx = sta->txrate_idx; |
344 | 344 | ||
@@ -349,7 +349,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | |||
349 | 349 | ||
350 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
351 | 351 | ||
352 | sel->rate = &sband->bitrates[rateidx]; | 352 | sel->rate_idx = rateidx; |
353 | 353 | ||
354 | #ifdef CONFIG_MAC80211_DEBUGFS | 354 | #ifdef CONFIG_MAC80211_DEBUGFS |
355 | rate_control_pid_event_tx_rate( | 355 | rate_control_pid_event_tx_rate( |
@@ -535,11 +535,6 @@ static struct rate_control_ops mac80211_rcpid = { | |||
535 | #endif | 535 | #endif |
536 | }; | 536 | }; |
537 | 537 | ||
538 | MODULE_DESCRIPTION("PID controller based rate control algorithm"); | ||
539 | MODULE_AUTHOR("Stefano Brivio"); | ||
540 | MODULE_AUTHOR("Mattias Nissler"); | ||
541 | MODULE_LICENSE("GPL"); | ||
542 | |||
543 | int __init rc80211_pid_init(void) | 538 | int __init rc80211_pid_init(void) |
544 | { | 539 | { |
545 | return ieee80211_rate_control_register(&mac80211_rcpid); | 540 | return ieee80211_rate_control_register(&mac80211_rcpid); |
@@ -549,8 +544,3 @@ void rc80211_pid_exit(void) | |||
549 | { | 544 | { |
550 | ieee80211_rate_control_unregister(&mac80211_rcpid); | 545 | ieee80211_rate_control_unregister(&mac80211_rcpid); |
551 | } | 546 | } |
552 | |||
553 | #ifdef CONFIG_MAC80211_RC_PID_MODULE | ||
554 | module_init(rc80211_pid_init); | ||
555 | module_exit(rc80211_pid_exit); | ||
556 | #endif | ||
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c index ff5c380f3c13..8121d3bc6835 100644 --- a/net/mac80211/rc80211_pid_debugfs.c +++ b/net/mac80211/rc80211_pid_debugfs.c | |||
@@ -39,11 +39,11 @@ static void rate_control_pid_event(struct rc_pid_event_buffer *buf, | |||
39 | } | 39 | } |
40 | 40 | ||
41 | void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, | 41 | void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, |
42 | struct ieee80211_tx_status *stat) | 42 | struct ieee80211_tx_info *stat) |
43 | { | 43 | { |
44 | union rc_pid_event_data evd; | 44 | union rc_pid_event_data evd; |
45 | 45 | ||
46 | memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_status)); | 46 | memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info)); |
47 | rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); | 47 | rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); |
48 | } | 48 | } |
49 | 49 | ||
@@ -167,8 +167,8 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, | |||
167 | switch (ev->type) { | 167 | switch (ev->type) { |
168 | case RC_PID_EVENT_TYPE_TX_STATUS: | 168 | case RC_PID_EVENT_TYPE_TX_STATUS: |
169 | p += snprintf(pb + p, length - p, "tx_status %u %u", | 169 | p += snprintf(pb + p, length - p, "tx_status %u %u", |
170 | ev->data.tx_status.excessive_retries, | 170 | ev->data.tx_status.status.excessive_retries, |
171 | ev->data.tx_status.retry_count); | 171 | ev->data.tx_status.status.retry_count); |
172 | break; | 172 | break; |
173 | case RC_PID_EVENT_TYPE_RATE_CHANGE: | 173 | case RC_PID_EVENT_TYPE_RATE_CHANGE: |
174 | p += snprintf(pb + p, length - p, "rate_change %d %d", | 174 | p += snprintf(pb + p, length - p, "rate_change %d %d", |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0941e5d6a522..6d9ae67c27ca 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -61,22 +61,147 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status, | |||
61 | int present_fcs_len, | 61 | int present_fcs_len, |
62 | int radiotap_len) | 62 | int radiotap_len) |
63 | { | 63 | { |
64 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 64 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
65 | 65 | ||
66 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | 66 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) |
67 | return 1; | 67 | return 1; |
68 | if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) | 68 | if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) |
69 | return 1; | 69 | return 1; |
70 | if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == | 70 | if (ieee80211_is_ctl(hdr->frame_control) && |
71 | cpu_to_le16(IEEE80211_FTYPE_CTL)) && | 71 | !ieee80211_is_pspoll(hdr->frame_control) && |
72 | ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != | 72 | !ieee80211_is_back_req(hdr->frame_control)) |
73 | cpu_to_le16(IEEE80211_STYPE_PSPOLL)) && | ||
74 | ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != | ||
75 | cpu_to_le16(IEEE80211_STYPE_BACK_REQ))) | ||
76 | return 1; | 73 | return 1; |
77 | return 0; | 74 | return 0; |
78 | } | 75 | } |
79 | 76 | ||
77 | static int | ||
78 | ieee80211_rx_radiotap_len(struct ieee80211_local *local, | ||
79 | struct ieee80211_rx_status *status) | ||
80 | { | ||
81 | int len; | ||
82 | |||
83 | /* always present fields */ | ||
84 | len = sizeof(struct ieee80211_radiotap_header) + 9; | ||
85 | |||
86 | if (status->flag & RX_FLAG_TSFT) | ||
87 | len += 8; | ||
88 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DB || | ||
89 | local->hw.flags & IEEE80211_HW_SIGNAL_DBM) | ||
90 | len += 1; | ||
91 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) | ||
92 | len += 1; | ||
93 | |||
94 | if (len & 1) /* padding for RX_FLAGS if necessary */ | ||
95 | len++; | ||
96 | |||
97 | /* make sure radiotap starts at a naturally aligned address */ | ||
98 | if (len % 8) | ||
99 | len = roundup(len, 8); | ||
100 | |||
101 | return len; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * ieee80211_add_rx_radiotap_header - add radiotap header | ||
106 | * | ||
107 | * add a radiotap header containing all the fields which the hardware provided. | ||
108 | */ | ||
109 | static void | ||
110 | ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | ||
111 | struct sk_buff *skb, | ||
112 | struct ieee80211_rx_status *status, | ||
113 | struct ieee80211_rate *rate, | ||
114 | int rtap_len) | ||
115 | { | ||
116 | struct ieee80211_radiotap_header *rthdr; | ||
117 | unsigned char *pos; | ||
118 | |||
119 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); | ||
120 | memset(rthdr, 0, rtap_len); | ||
121 | |||
122 | /* radiotap header, set always present flags */ | ||
123 | rthdr->it_present = | ||
124 | cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | ||
125 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
126 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
127 | (1 << IEEE80211_RADIOTAP_ANTENNA) | | ||
128 | (1 << IEEE80211_RADIOTAP_RX_FLAGS)); | ||
129 | rthdr->it_len = cpu_to_le16(rtap_len); | ||
130 | |||
131 | pos = (unsigned char *)(rthdr+1); | ||
132 | |||
133 | /* the order of the following fields is important */ | ||
134 | |||
135 | /* IEEE80211_RADIOTAP_TSFT */ | ||
136 | if (status->flag & RX_FLAG_TSFT) { | ||
137 | *(__le64 *)pos = cpu_to_le64(status->mactime); | ||
138 | rthdr->it_present |= | ||
139 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | ||
140 | pos += 8; | ||
141 | } | ||
142 | |||
143 | /* IEEE80211_RADIOTAP_FLAGS */ | ||
144 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | ||
145 | *pos |= IEEE80211_RADIOTAP_F_FCS; | ||
146 | pos++; | ||
147 | |||
148 | /* IEEE80211_RADIOTAP_RATE */ | ||
149 | *pos = rate->bitrate / 5; | ||
150 | pos++; | ||
151 | |||
152 | /* IEEE80211_RADIOTAP_CHANNEL */ | ||
153 | *(__le16 *)pos = cpu_to_le16(status->freq); | ||
154 | pos += 2; | ||
155 | if (status->band == IEEE80211_BAND_5GHZ) | ||
156 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
157 | IEEE80211_CHAN_5GHZ); | ||
158 | else | ||
159 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | | ||
160 | IEEE80211_CHAN_2GHZ); | ||
161 | pos += 2; | ||
162 | |||
163 | /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ | ||
164 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { | ||
165 | *pos = status->signal; | ||
166 | rthdr->it_present |= | ||
167 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); | ||
168 | pos++; | ||
169 | } | ||
170 | |||
171 | /* IEEE80211_RADIOTAP_DBM_ANTNOISE */ | ||
172 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { | ||
173 | *pos = status->noise; | ||
174 | rthdr->it_present |= | ||
175 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); | ||
176 | pos++; | ||
177 | } | ||
178 | |||
179 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ | ||
180 | |||
181 | /* IEEE80211_RADIOTAP_ANTENNA */ | ||
182 | *pos = status->antenna; | ||
183 | pos++; | ||
184 | |||
185 | /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */ | ||
186 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) { | ||
187 | *pos = status->signal; | ||
188 | rthdr->it_present |= | ||
189 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL); | ||
190 | pos++; | ||
191 | } | ||
192 | |||
193 | /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ | ||
194 | |||
195 | /* IEEE80211_RADIOTAP_RX_FLAGS */ | ||
196 | /* ensure 2 byte alignment for the 2 byte field as required */ | ||
197 | if ((pos - (unsigned char *)rthdr) & 1) | ||
198 | pos++; | ||
199 | /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ | ||
200 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | ||
201 | *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); | ||
202 | pos += 2; | ||
203 | } | ||
204 | |||
80 | /* | 205 | /* |
81 | * This function copies a received frame to all monitor interfaces and | 206 | * This function copies a received frame to all monitor interfaces and |
82 | * returns a cleaned-up SKB that no longer includes the FCS nor the | 207 | * returns a cleaned-up SKB that no longer includes the FCS nor the |
@@ -89,17 +214,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
89 | { | 214 | { |
90 | struct ieee80211_sub_if_data *sdata; | 215 | struct ieee80211_sub_if_data *sdata; |
91 | int needed_headroom = 0; | 216 | int needed_headroom = 0; |
92 | struct ieee80211_radiotap_header *rthdr; | ||
93 | __le64 *rttsft = NULL; | ||
94 | struct ieee80211_rtap_fixed_data { | ||
95 | u8 flags; | ||
96 | u8 rate; | ||
97 | __le16 chan_freq; | ||
98 | __le16 chan_flags; | ||
99 | u8 antsignal; | ||
100 | u8 padding_for_rxflags; | ||
101 | __le16 rx_flags; | ||
102 | } __attribute__ ((packed)) *rtfixed; | ||
103 | struct sk_buff *skb, *skb2; | 217 | struct sk_buff *skb, *skb2; |
104 | struct net_device *prev_dev = NULL; | 218 | struct net_device *prev_dev = NULL; |
105 | int present_fcs_len = 0; | 219 | int present_fcs_len = 0; |
@@ -116,8 +230,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
116 | if (status->flag & RX_FLAG_RADIOTAP) | 230 | if (status->flag & RX_FLAG_RADIOTAP) |
117 | rtap_len = ieee80211_get_radiotap_len(origskb->data); | 231 | rtap_len = ieee80211_get_radiotap_len(origskb->data); |
118 | else | 232 | else |
119 | /* room for radiotap header, always present fields and TSFT */ | 233 | /* room for the radiotap header based on driver features */ |
120 | needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8; | 234 | needed_headroom = ieee80211_rx_radiotap_len(local, status); |
121 | 235 | ||
122 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 236 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
123 | present_fcs_len = FCS_LEN; | 237 | present_fcs_len = FCS_LEN; |
@@ -163,55 +277,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
163 | } | 277 | } |
164 | 278 | ||
165 | /* if necessary, prepend radiotap information */ | 279 | /* if necessary, prepend radiotap information */ |
166 | if (!(status->flag & RX_FLAG_RADIOTAP)) { | 280 | if (!(status->flag & RX_FLAG_RADIOTAP)) |
167 | rtfixed = (void *) skb_push(skb, sizeof(*rtfixed)); | 281 | ieee80211_add_rx_radiotap_header(local, skb, status, rate, |
168 | rtap_len = sizeof(*rthdr) + sizeof(*rtfixed); | 282 | needed_headroom); |
169 | if (status->flag & RX_FLAG_TSFT) { | ||
170 | rttsft = (void *) skb_push(skb, sizeof(*rttsft)); | ||
171 | rtap_len += 8; | ||
172 | } | ||
173 | rthdr = (void *) skb_push(skb, sizeof(*rthdr)); | ||
174 | memset(rthdr, 0, sizeof(*rthdr)); | ||
175 | memset(rtfixed, 0, sizeof(*rtfixed)); | ||
176 | rthdr->it_present = | ||
177 | cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | ||
178 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
179 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
180 | (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | | ||
181 | (1 << IEEE80211_RADIOTAP_RX_FLAGS)); | ||
182 | rtfixed->flags = 0; | ||
183 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | ||
184 | rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS; | ||
185 | |||
186 | if (rttsft) { | ||
187 | *rttsft = cpu_to_le64(status->mactime); | ||
188 | rthdr->it_present |= | ||
189 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | ||
190 | } | ||
191 | |||
192 | /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ | ||
193 | rtfixed->rx_flags = 0; | ||
194 | if (status->flag & | ||
195 | (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | ||
196 | rtfixed->rx_flags |= | ||
197 | cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); | ||
198 | |||
199 | rtfixed->rate = rate->bitrate / 5; | ||
200 | |||
201 | rtfixed->chan_freq = cpu_to_le16(status->freq); | ||
202 | |||
203 | if (status->band == IEEE80211_BAND_5GHZ) | ||
204 | rtfixed->chan_flags = | ||
205 | cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
206 | IEEE80211_CHAN_5GHZ); | ||
207 | else | ||
208 | rtfixed->chan_flags = | ||
209 | cpu_to_le16(IEEE80211_CHAN_DYN | | ||
210 | IEEE80211_CHAN_2GHZ); | ||
211 | |||
212 | rtfixed->antsignal = status->ssi; | ||
213 | rthdr->it_len = cpu_to_le16(rtap_len); | ||
214 | } | ||
215 | 283 | ||
216 | skb_reset_mac_header(skb); | 284 | skb_reset_mac_header(skb); |
217 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 285 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -253,33 +321,33 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
253 | 321 | ||
254 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | 322 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) |
255 | { | 323 | { |
256 | u8 *data = rx->skb->data; | 324 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
257 | int tid; | 325 | int tid; |
258 | 326 | ||
259 | /* does the frame have a qos control field? */ | 327 | /* does the frame have a qos control field? */ |
260 | if (WLAN_FC_IS_QOS_DATA(rx->fc)) { | 328 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
261 | u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN; | 329 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
262 | /* frame has qos control */ | 330 | /* frame has qos control */ |
263 | tid = qc[0] & QOS_CONTROL_TID_MASK; | 331 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
264 | if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) | 332 | if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) |
265 | rx->flags |= IEEE80211_RX_AMSDU; | 333 | rx->flags |= IEEE80211_RX_AMSDU; |
266 | else | 334 | else |
267 | rx->flags &= ~IEEE80211_RX_AMSDU; | 335 | rx->flags &= ~IEEE80211_RX_AMSDU; |
268 | } else { | 336 | } else { |
269 | if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { | 337 | /* |
270 | /* Separate TID for management frames */ | 338 | * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): |
271 | tid = NUM_RX_DATA_QUEUES - 1; | 339 | * |
272 | } else { | 340 | * Sequence numbers for management frames, QoS data |
273 | /* no qos control present */ | 341 | * frames with a broadcast/multicast address in the |
274 | tid = 0; /* 802.1d - Best Effort */ | 342 | * Address 1 field, and all non-QoS data frames sent |
275 | } | 343 | * by QoS STAs are assigned using an additional single |
344 | * modulo-4096 counter, [...] | ||
345 | * | ||
346 | * We also use that counter for non-QoS STAs. | ||
347 | */ | ||
348 | tid = NUM_RX_DATA_QUEUES - 1; | ||
276 | } | 349 | } |
277 | 350 | ||
278 | I802_DEBUG_INC(rx->local->wme_rx_queue[tid]); | ||
279 | /* only a debug counter, sta might not be assigned properly yet */ | ||
280 | if (rx->sta) | ||
281 | I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); | ||
282 | |||
283 | rx->queue = tid; | 351 | rx->queue = tid; |
284 | /* Set skb->priority to 1d tag if highest order bit of TID is not set. | 352 | /* Set skb->priority to 1d tag if highest order bit of TID is not set. |
285 | * For now, set skb->priority to 0 for other cases. */ | 353 | * For now, set skb->priority to 0 for other cases. */ |
@@ -289,9 +357,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | |||
289 | static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) | 357 | static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) |
290 | { | 358 | { |
291 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | 359 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT |
360 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
292 | int hdrlen; | 361 | int hdrlen; |
293 | 362 | ||
294 | if (!WLAN_FC_DATA_PRESENT(rx->fc)) | 363 | if (!ieee80211_is_data_present(hdr->frame_control)) |
295 | return; | 364 | return; |
296 | 365 | ||
297 | /* | 366 | /* |
@@ -313,7 +382,7 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) | |||
313 | * header and the payload is not supported, the driver is required | 382 | * header and the payload is not supported, the driver is required |
314 | * to move the 802.11 header further back in that case. | 383 | * to move the 802.11 header further back in that case. |
315 | */ | 384 | */ |
316 | hdrlen = ieee80211_get_hdrlen(rx->fc); | 385 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
317 | if (rx->flags & IEEE80211_RX_AMSDU) | 386 | if (rx->flags & IEEE80211_RX_AMSDU) |
318 | hdrlen += ETH_HLEN; | 387 | hdrlen += ETH_HLEN; |
319 | WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); | 388 | WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); |
@@ -321,51 +390,9 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) | |||
321 | } | 390 | } |
322 | 391 | ||
323 | 392 | ||
324 | static u32 ieee80211_rx_load_stats(struct ieee80211_local *local, | ||
325 | struct sk_buff *skb, | ||
326 | struct ieee80211_rx_status *status, | ||
327 | struct ieee80211_rate *rate) | ||
328 | { | ||
329 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
330 | u32 load = 0, hdrtime; | ||
331 | |||
332 | /* Estimate total channel use caused by this frame */ | ||
333 | |||
334 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, | ||
335 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | ||
336 | |||
337 | if (status->band == IEEE80211_BAND_5GHZ || | ||
338 | (status->band == IEEE80211_BAND_5GHZ && | ||
339 | rate->flags & IEEE80211_RATE_ERP_G)) | ||
340 | hdrtime = CHAN_UTIL_HDR_SHORT; | ||
341 | else | ||
342 | hdrtime = CHAN_UTIL_HDR_LONG; | ||
343 | |||
344 | load = hdrtime; | ||
345 | if (!is_multicast_ether_addr(hdr->addr1)) | ||
346 | load += hdrtime; | ||
347 | |||
348 | /* TODO: optimise again */ | ||
349 | load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; | ||
350 | |||
351 | /* Divide channel_use by 8 to avoid wrapping around the counter */ | ||
352 | load >>= CHAN_UTIL_SHIFT; | ||
353 | |||
354 | return load; | ||
355 | } | ||
356 | |||
357 | /* rx handlers */ | 393 | /* rx handlers */ |
358 | 394 | ||
359 | static ieee80211_rx_result | 395 | static ieee80211_rx_result debug_noinline |
360 | ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx) | ||
361 | { | ||
362 | if (rx->sta) | ||
363 | rx->sta->channel_use_raw += rx->load; | ||
364 | rx->sdata->channel_use_raw += rx->load; | ||
365 | return RX_CONTINUE; | ||
366 | } | ||
367 | |||
368 | static ieee80211_rx_result | ||
369 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | 396 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) |
370 | { | 397 | { |
371 | struct ieee80211_local *local = rx->local; | 398 | struct ieee80211_local *local = rx->local; |
@@ -394,14 +421,11 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |||
394 | static ieee80211_rx_result | 421 | static ieee80211_rx_result |
395 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | 422 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) |
396 | { | 423 | { |
397 | int hdrlen = ieee80211_get_hdrlen(rx->fc); | 424 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
398 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 425 | unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); |
399 | 426 | ||
400 | #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) | 427 | if (ieee80211_is_data(hdr->frame_control)) { |
401 | 428 | if (!ieee80211_has_a4(hdr->frame_control)) | |
402 | if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { | ||
403 | if (!((rx->fc & IEEE80211_FCTL_FROMDS) && | ||
404 | (rx->fc & IEEE80211_FCTL_TODS))) | ||
405 | return RX_DROP_MONITOR; | 429 | return RX_DROP_MONITOR; |
406 | if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) | 430 | if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) |
407 | return RX_DROP_MONITOR; | 431 | return RX_DROP_MONITOR; |
@@ -414,27 +438,30 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
414 | if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { | 438 | if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { |
415 | struct ieee80211_mgmt *mgmt; | 439 | struct ieee80211_mgmt *mgmt; |
416 | 440 | ||
417 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) | 441 | if (!ieee80211_is_mgmt(hdr->frame_control)) |
418 | return RX_DROP_MONITOR; | 442 | return RX_DROP_MONITOR; |
419 | 443 | ||
420 | switch (rx->fc & IEEE80211_FCTL_STYPE) { | 444 | if (ieee80211_is_action(hdr->frame_control)) { |
421 | case IEEE80211_STYPE_ACTION: | ||
422 | mgmt = (struct ieee80211_mgmt *)hdr; | 445 | mgmt = (struct ieee80211_mgmt *)hdr; |
423 | if (mgmt->u.action.category != PLINK_CATEGORY) | 446 | if (mgmt->u.action.category != PLINK_CATEGORY) |
424 | return RX_DROP_MONITOR; | 447 | return RX_DROP_MONITOR; |
425 | /* fall through on else */ | ||
426 | case IEEE80211_STYPE_PROBE_REQ: | ||
427 | case IEEE80211_STYPE_PROBE_RESP: | ||
428 | case IEEE80211_STYPE_BEACON: | ||
429 | return RX_CONTINUE; | 448 | return RX_CONTINUE; |
430 | break; | ||
431 | default: | ||
432 | return RX_DROP_MONITOR; | ||
433 | } | 449 | } |
434 | 450 | ||
435 | } else if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 451 | if (ieee80211_is_probe_req(hdr->frame_control) || |
436 | is_multicast_ether_addr(hdr->addr1) && | 452 | ieee80211_is_probe_resp(hdr->frame_control) || |
437 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) | 453 | ieee80211_is_beacon(hdr->frame_control)) |
454 | return RX_CONTINUE; | ||
455 | |||
456 | return RX_DROP_MONITOR; | ||
457 | |||
458 | } | ||
459 | |||
460 | #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) | ||
461 | |||
462 | if (ieee80211_is_data(hdr->frame_control) && | ||
463 | is_multicast_ether_addr(hdr->addr1) && | ||
464 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) | ||
438 | return RX_DROP_MONITOR; | 465 | return RX_DROP_MONITOR; |
439 | #undef msh_h_get | 466 | #undef msh_h_get |
440 | 467 | ||
@@ -442,16 +469,14 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
442 | } | 469 | } |
443 | 470 | ||
444 | 471 | ||
445 | static ieee80211_rx_result | 472 | static ieee80211_rx_result debug_noinline |
446 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | 473 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) |
447 | { | 474 | { |
448 | struct ieee80211_hdr *hdr; | 475 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
449 | |||
450 | hdr = (struct ieee80211_hdr *) rx->skb->data; | ||
451 | 476 | ||
452 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ | 477 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ |
453 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { | 478 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { |
454 | if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && | 479 | if (unlikely(ieee80211_has_retry(hdr->frame_control) && |
455 | rx->sta->last_seq_ctrl[rx->queue] == | 480 | rx->sta->last_seq_ctrl[rx->queue] == |
456 | hdr->seq_ctrl)) { | 481 | hdr->seq_ctrl)) { |
457 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | 482 | if (rx->flags & IEEE80211_RX_RA_MATCH) { |
@@ -480,15 +505,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
480 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) | 505 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) |
481 | return ieee80211_rx_mesh_check(rx); | 506 | return ieee80211_rx_mesh_check(rx); |
482 | 507 | ||
483 | if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || | 508 | if (unlikely((ieee80211_is_data(hdr->frame_control) || |
484 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && | 509 | ieee80211_is_pspoll(hdr->frame_control)) && |
485 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && | ||
486 | rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 510 | rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
487 | (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { | 511 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { |
488 | if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && | 512 | if ((!ieee80211_has_fromds(hdr->frame_control) && |
489 | !(rx->fc & IEEE80211_FCTL_TODS) && | 513 | !ieee80211_has_tods(hdr->frame_control) && |
490 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) | 514 | ieee80211_is_data(hdr->frame_control)) || |
491 | || !(rx->flags & IEEE80211_RX_RA_MATCH)) { | 515 | !(rx->flags & IEEE80211_RX_RA_MATCH)) { |
492 | /* Drop IBSS frames and frames for other hosts | 516 | /* Drop IBSS frames and frames for other hosts |
493 | * silently. */ | 517 | * silently. */ |
494 | return RX_DROP_MONITOR; | 518 | return RX_DROP_MONITOR; |
@@ -501,10 +525,10 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
501 | } | 525 | } |
502 | 526 | ||
503 | 527 | ||
504 | static ieee80211_rx_result | 528 | static ieee80211_rx_result debug_noinline |
505 | ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | 529 | ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) |
506 | { | 530 | { |
507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 531 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
508 | int keyidx; | 532 | int keyidx; |
509 | int hdrlen; | 533 | int hdrlen; |
510 | ieee80211_rx_result result = RX_DROP_UNUSABLE; | 534 | ieee80211_rx_result result = RX_DROP_UNUSABLE; |
@@ -536,7 +560,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
536 | * possible. | 560 | * possible. |
537 | */ | 561 | */ |
538 | 562 | ||
539 | if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) | 563 | if (!ieee80211_has_protected(hdr->frame_control)) |
540 | return RX_CONTINUE; | 564 | return RX_CONTINUE; |
541 | 565 | ||
542 | /* | 566 | /* |
@@ -565,7 +589,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
565 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) | 589 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) |
566 | return RX_CONTINUE; | 590 | return RX_CONTINUE; |
567 | 591 | ||
568 | hdrlen = ieee80211_get_hdrlen(rx->fc); | 592 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
569 | 593 | ||
570 | if (rx->skb->len < 8 + hdrlen) | 594 | if (rx->skb->len < 8 + hdrlen) |
571 | return RX_DROP_UNUSABLE; /* TODO: count this? */ | 595 | return RX_DROP_UNUSABLE; /* TODO: count this? */ |
@@ -592,17 +616,12 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
592 | rx->key->tx_rx_count++; | 616 | rx->key->tx_rx_count++; |
593 | /* TODO: add threshold stuff again */ | 617 | /* TODO: add threshold stuff again */ |
594 | } else { | 618 | } else { |
595 | #ifdef CONFIG_MAC80211_DEBUG | ||
596 | if (net_ratelimit()) | ||
597 | printk(KERN_DEBUG "%s: RX protected frame," | ||
598 | " but have no key\n", rx->dev->name); | ||
599 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
600 | return RX_DROP_MONITOR; | 619 | return RX_DROP_MONITOR; |
601 | } | 620 | } |
602 | 621 | ||
603 | /* Check for weak IVs if possible */ | 622 | /* Check for weak IVs if possible */ |
604 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | 623 | if (rx->sta && rx->key->conf.alg == ALG_WEP && |
605 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | 624 | ieee80211_is_data(hdr->frame_control) && |
606 | (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || | 625 | (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || |
607 | !(rx->status->flag & RX_FLAG_DECRYPTED)) && | 626 | !(rx->status->flag & RX_FLAG_DECRYPTED)) && |
608 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | 627 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) |
@@ -633,10 +652,8 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) | |||
633 | 652 | ||
634 | sdata = sta->sdata; | 653 | sdata = sta->sdata; |
635 | 654 | ||
636 | if (sdata->bss) | 655 | atomic_inc(&sdata->bss->num_sta_ps); |
637 | atomic_inc(&sdata->bss->num_sta_ps); | 656 | set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); |
638 | sta->flags |= WLAN_STA_PS; | ||
639 | sta->flags &= ~WLAN_STA_PSPOLL; | ||
640 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 657 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
641 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", | 658 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", |
642 | dev->name, print_mac(mac, sta->addr), sta->aid); | 659 | dev->name, print_mac(mac, sta->addr), sta->aid); |
@@ -649,15 +666,14 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
649 | struct sk_buff *skb; | 666 | struct sk_buff *skb; |
650 | int sent = 0; | 667 | int sent = 0; |
651 | struct ieee80211_sub_if_data *sdata; | 668 | struct ieee80211_sub_if_data *sdata; |
652 | struct ieee80211_tx_packet_data *pkt_data; | 669 | struct ieee80211_tx_info *info; |
653 | DECLARE_MAC_BUF(mac); | 670 | DECLARE_MAC_BUF(mac); |
654 | 671 | ||
655 | sdata = sta->sdata; | 672 | sdata = sta->sdata; |
656 | 673 | ||
657 | if (sdata->bss) | 674 | atomic_dec(&sdata->bss->num_sta_ps); |
658 | atomic_dec(&sdata->bss->num_sta_ps); | ||
659 | 675 | ||
660 | sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL); | 676 | clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); |
661 | 677 | ||
662 | if (!skb_queue_empty(&sta->ps_tx_buf)) | 678 | if (!skb_queue_empty(&sta->ps_tx_buf)) |
663 | sta_info_clear_tim_bit(sta); | 679 | sta_info_clear_tim_bit(sta); |
@@ -669,13 +685,13 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
669 | 685 | ||
670 | /* Send all buffered frames to the station */ | 686 | /* Send all buffered frames to the station */ |
671 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { | 687 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { |
672 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; | 688 | info = IEEE80211_SKB_CB(skb); |
673 | sent++; | 689 | sent++; |
674 | pkt_data->flags |= IEEE80211_TXPD_REQUEUE; | 690 | info->flags |= IEEE80211_TX_CTL_REQUEUE; |
675 | dev_queue_xmit(skb); | 691 | dev_queue_xmit(skb); |
676 | } | 692 | } |
677 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { | 693 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { |
678 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; | 694 | info = IEEE80211_SKB_CB(skb); |
679 | local->total_ps_buffered--; | 695 | local->total_ps_buffered--; |
680 | sent++; | 696 | sent++; |
681 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 697 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
@@ -683,19 +699,19 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
683 | "since STA not sleeping anymore\n", dev->name, | 699 | "since STA not sleeping anymore\n", dev->name, |
684 | print_mac(mac, sta->addr), sta->aid); | 700 | print_mac(mac, sta->addr), sta->aid); |
685 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 701 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
686 | pkt_data->flags |= IEEE80211_TXPD_REQUEUE; | 702 | info->flags |= IEEE80211_TX_CTL_REQUEUE; |
687 | dev_queue_xmit(skb); | 703 | dev_queue_xmit(skb); |
688 | } | 704 | } |
689 | 705 | ||
690 | return sent; | 706 | return sent; |
691 | } | 707 | } |
692 | 708 | ||
693 | static ieee80211_rx_result | 709 | static ieee80211_rx_result debug_noinline |
694 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | 710 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) |
695 | { | 711 | { |
696 | struct sta_info *sta = rx->sta; | 712 | struct sta_info *sta = rx->sta; |
697 | struct net_device *dev = rx->dev; | 713 | struct net_device *dev = rx->dev; |
698 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 714 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
699 | 715 | ||
700 | if (!sta) | 716 | if (!sta) |
701 | return RX_CONTINUE; | 717 | return RX_CONTINUE; |
@@ -725,24 +741,26 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
725 | 741 | ||
726 | sta->rx_fragments++; | 742 | sta->rx_fragments++; |
727 | sta->rx_bytes += rx->skb->len; | 743 | sta->rx_bytes += rx->skb->len; |
728 | sta->last_rssi = rx->status->ssi; | ||
729 | sta->last_signal = rx->status->signal; | 744 | sta->last_signal = rx->status->signal; |
745 | sta->last_qual = rx->status->qual; | ||
730 | sta->last_noise = rx->status->noise; | 746 | sta->last_noise = rx->status->noise; |
731 | 747 | ||
732 | if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { | 748 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
749 | (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP || | ||
750 | rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) { | ||
733 | /* Change STA power saving mode only in the end of a frame | 751 | /* Change STA power saving mode only in the end of a frame |
734 | * exchange sequence */ | 752 | * exchange sequence */ |
735 | if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) | 753 | if (test_sta_flags(sta, WLAN_STA_PS) && |
754 | !ieee80211_has_pm(hdr->frame_control)) | ||
736 | rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); | 755 | rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); |
737 | else if (!(sta->flags & WLAN_STA_PS) && | 756 | else if (!test_sta_flags(sta, WLAN_STA_PS) && |
738 | (rx->fc & IEEE80211_FCTL_PM)) | 757 | ieee80211_has_pm(hdr->frame_control)) |
739 | ap_sta_ps_start(dev, sta); | 758 | ap_sta_ps_start(dev, sta); |
740 | } | 759 | } |
741 | 760 | ||
742 | /* Drop data::nullfunc frames silently, since they are used only to | 761 | /* Drop data::nullfunc frames silently, since they are used only to |
743 | * control station power saving mode. */ | 762 | * control station power saving mode. */ |
744 | if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 763 | if (ieee80211_is_nullfunc(hdr->frame_control)) { |
745 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) { | ||
746 | I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); | 764 | I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); |
747 | /* Update counter and free packet here to avoid counting this | 765 | /* Update counter and free packet here to avoid counting this |
748 | * as a dropped packed. */ | 766 | * as a dropped packed. */ |
@@ -768,7 +786,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |||
768 | sdata->fragment_next = 0; | 786 | sdata->fragment_next = 0; |
769 | 787 | ||
770 | if (!skb_queue_empty(&entry->skb_list)) { | 788 | if (!skb_queue_empty(&entry->skb_list)) { |
771 | #ifdef CONFIG_MAC80211_DEBUG | 789 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
772 | struct ieee80211_hdr *hdr = | 790 | struct ieee80211_hdr *hdr = |
773 | (struct ieee80211_hdr *) entry->skb_list.next->data; | 791 | (struct ieee80211_hdr *) entry->skb_list.next->data; |
774 | DECLARE_MAC_BUF(mac); | 792 | DECLARE_MAC_BUF(mac); |
@@ -780,7 +798,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |||
780 | jiffies - entry->first_frag_time, entry->seq, | 798 | jiffies - entry->first_frag_time, entry->seq, |
781 | entry->last_frag, print_mac(mac, hdr->addr1), | 799 | entry->last_frag, print_mac(mac, hdr->addr1), |
782 | print_mac(mac2, hdr->addr2)); | 800 | print_mac(mac2, hdr->addr2)); |
783 | #endif /* CONFIG_MAC80211_DEBUG */ | 801 | #endif |
784 | __skb_queue_purge(&entry->skb_list); | 802 | __skb_queue_purge(&entry->skb_list); |
785 | } | 803 | } |
786 | 804 | ||
@@ -837,7 +855,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
837 | return NULL; | 855 | return NULL; |
838 | } | 856 | } |
839 | 857 | ||
840 | static ieee80211_rx_result | 858 | static ieee80211_rx_result debug_noinline |
841 | ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | 859 | ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
842 | { | 860 | { |
843 | struct ieee80211_hdr *hdr; | 861 | struct ieee80211_hdr *hdr; |
@@ -901,18 +919,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
901 | break; | 919 | break; |
902 | } | 920 | } |
903 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; | 921 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; |
904 | if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { | 922 | if (memcmp(pn, rpn, CCMP_PN_LEN)) |
905 | if (net_ratelimit()) | ||
906 | printk(KERN_DEBUG "%s: defrag: CCMP PN not " | ||
907 | "sequential A2=%s" | ||
908 | " PN=%02x%02x%02x%02x%02x%02x " | ||
909 | "(expected %02x%02x%02x%02x%02x%02x)\n", | ||
910 | rx->dev->name, print_mac(mac, hdr->addr2), | ||
911 | rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], | ||
912 | rpn[5], pn[0], pn[1], pn[2], pn[3], | ||
913 | pn[4], pn[5]); | ||
914 | return RX_DROP_UNUSABLE; | 923 | return RX_DROP_UNUSABLE; |
915 | } | ||
916 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 924 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
917 | } | 925 | } |
918 | 926 | ||
@@ -953,7 +961,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
953 | return RX_CONTINUE; | 961 | return RX_CONTINUE; |
954 | } | 962 | } |
955 | 963 | ||
956 | static ieee80211_rx_result | 964 | static ieee80211_rx_result debug_noinline |
957 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | 965 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) |
958 | { | 966 | { |
959 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 967 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
@@ -988,7 +996,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
988 | * Tell TX path to send one frame even though the STA may | 996 | * Tell TX path to send one frame even though the STA may |
989 | * still remain is PS mode after this frame exchange. | 997 | * still remain is PS mode after this frame exchange. |
990 | */ | 998 | */ |
991 | rx->sta->flags |= WLAN_STA_PSPOLL; | 999 | set_sta_flags(rx->sta, WLAN_STA_PSPOLL); |
992 | 1000 | ||
993 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1001 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
994 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", | 1002 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", |
@@ -1016,7 +1024,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1016 | * have nothing buffered for it? | 1024 | * have nothing buffered for it? |
1017 | */ | 1025 | */ |
1018 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " | 1026 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " |
1019 | "though there is no buffered frames for it\n", | 1027 | "though there are no buffered frames for it\n", |
1020 | rx->dev->name, print_mac(mac, rx->sta->addr)); | 1028 | rx->dev->name, print_mac(mac, rx->sta->addr)); |
1021 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 1029 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
1022 | } | 1030 | } |
@@ -1028,22 +1036,22 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1028 | return RX_QUEUED; | 1036 | return RX_QUEUED; |
1029 | } | 1037 | } |
1030 | 1038 | ||
1031 | static ieee80211_rx_result | 1039 | static ieee80211_rx_result debug_noinline |
1032 | ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) | 1040 | ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) |
1033 | { | 1041 | { |
1034 | u16 fc = rx->fc; | ||
1035 | u8 *data = rx->skb->data; | 1042 | u8 *data = rx->skb->data; |
1036 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; | 1043 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; |
1037 | 1044 | ||
1038 | if (!WLAN_FC_IS_QOS_DATA(fc)) | 1045 | if (!ieee80211_is_data_qos(hdr->frame_control)) |
1039 | return RX_CONTINUE; | 1046 | return RX_CONTINUE; |
1040 | 1047 | ||
1041 | /* remove the qos control field, update frame type and meta-data */ | 1048 | /* remove the qos control field, update frame type and meta-data */ |
1042 | memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); | 1049 | memmove(data + IEEE80211_QOS_CTL_LEN, data, |
1043 | hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2); | 1050 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); |
1051 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); | ||
1044 | /* change frame type to non QOS */ | 1052 | /* change frame type to non QOS */ |
1045 | rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; | 1053 | rx->fc &= ~IEEE80211_STYPE_QOS_DATA; |
1046 | hdr->frame_control = cpu_to_le16(fc); | 1054 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1047 | 1055 | ||
1048 | return RX_CONTINUE; | 1056 | return RX_CONTINUE; |
1049 | } | 1057 | } |
@@ -1051,14 +1059,9 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) | |||
1051 | static int | 1059 | static int |
1052 | ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) | 1060 | ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) |
1053 | { | 1061 | { |
1054 | if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) { | 1062 | if (unlikely(!rx->sta || |
1055 | #ifdef CONFIG_MAC80211_DEBUG | 1063 | !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) |
1056 | if (net_ratelimit()) | ||
1057 | printk(KERN_DEBUG "%s: dropped frame " | ||
1058 | "(unauthorized port)\n", rx->dev->name); | ||
1059 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
1060 | return -EACCES; | 1064 | return -EACCES; |
1061 | } | ||
1062 | 1065 | ||
1063 | return 0; | 1066 | return 0; |
1064 | } | 1067 | } |
@@ -1138,16 +1141,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1138 | memcpy(src, hdr->addr2, ETH_ALEN); | 1141 | memcpy(src, hdr->addr2, ETH_ALEN); |
1139 | 1142 | ||
1140 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && | 1143 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && |
1141 | sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) { | 1144 | sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) |
1142 | if (net_ratelimit()) | ||
1143 | printk(KERN_DEBUG "%s: dropped ToDS frame " | ||
1144 | "(BSSID=%s SA=%s DA=%s)\n", | ||
1145 | dev->name, | ||
1146 | print_mac(mac, hdr->addr1), | ||
1147 | print_mac(mac2, hdr->addr2), | ||
1148 | print_mac(mac3, hdr->addr3)); | ||
1149 | return -1; | 1145 | return -1; |
1150 | } | ||
1151 | break; | 1146 | break; |
1152 | case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): | 1147 | case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): |
1153 | /* RA TA DA SA */ | 1148 | /* RA TA DA SA */ |
@@ -1155,17 +1150,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1155 | memcpy(src, hdr->addr4, ETH_ALEN); | 1150 | memcpy(src, hdr->addr4, ETH_ALEN); |
1156 | 1151 | ||
1157 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && | 1152 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && |
1158 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) { | 1153 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) |
1159 | if (net_ratelimit()) | ||
1160 | printk(KERN_DEBUG "%s: dropped FromDS&ToDS " | ||
1161 | "frame (RA=%s TA=%s DA=%s SA=%s)\n", | ||
1162 | rx->dev->name, | ||
1163 | print_mac(mac, hdr->addr1), | ||
1164 | print_mac(mac2, hdr->addr2), | ||
1165 | print_mac(mac3, hdr->addr3), | ||
1166 | print_mac(mac4, hdr->addr4)); | ||
1167 | return -1; | 1154 | return -1; |
1168 | } | ||
1169 | break; | 1155 | break; |
1170 | case IEEE80211_FCTL_FROMDS: | 1156 | case IEEE80211_FCTL_FROMDS: |
1171 | /* DA BSSID SA */ | 1157 | /* DA BSSID SA */ |
@@ -1182,27 +1168,13 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1182 | memcpy(dst, hdr->addr1, ETH_ALEN); | 1168 | memcpy(dst, hdr->addr1, ETH_ALEN); |
1183 | memcpy(src, hdr->addr2, ETH_ALEN); | 1169 | memcpy(src, hdr->addr2, ETH_ALEN); |
1184 | 1170 | ||
1185 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { | 1171 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) |
1186 | if (net_ratelimit()) { | ||
1187 | printk(KERN_DEBUG "%s: dropped IBSS frame " | ||
1188 | "(DA=%s SA=%s BSSID=%s)\n", | ||
1189 | dev->name, | ||
1190 | print_mac(mac, hdr->addr1), | ||
1191 | print_mac(mac2, hdr->addr2), | ||
1192 | print_mac(mac3, hdr->addr3)); | ||
1193 | } | ||
1194 | return -1; | 1172 | return -1; |
1195 | } | ||
1196 | break; | 1173 | break; |
1197 | } | 1174 | } |
1198 | 1175 | ||
1199 | if (unlikely(skb->len - hdrlen < 8)) { | 1176 | if (unlikely(skb->len - hdrlen < 8)) |
1200 | if (net_ratelimit()) { | ||
1201 | printk(KERN_DEBUG "%s: RX too short data frame " | ||
1202 | "payload\n", dev->name); | ||
1203 | } | ||
1204 | return -1; | 1177 | return -1; |
1205 | } | ||
1206 | 1178 | ||
1207 | payload = skb->data + hdrlen; | 1179 | payload = skb->data + hdrlen; |
1208 | ethertype = (payload[6] << 8) | payload[7]; | 1180 | ethertype = (payload[6] << 8) | payload[7]; |
@@ -1345,7 +1317,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1345 | } | 1317 | } |
1346 | } | 1318 | } |
1347 | 1319 | ||
1348 | static ieee80211_rx_result | 1320 | static ieee80211_rx_result debug_noinline |
1349 | ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | 1321 | ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) |
1350 | { | 1322 | { |
1351 | struct net_device *dev = rx->dev; | 1323 | struct net_device *dev = rx->dev; |
@@ -1394,10 +1366,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1394 | 1366 | ||
1395 | padding = ((4 - subframe_len) & 0x3); | 1367 | padding = ((4 - subframe_len) & 0x3); |
1396 | /* the last MSDU has no padding */ | 1368 | /* the last MSDU has no padding */ |
1397 | if (subframe_len > remaining) { | 1369 | if (subframe_len > remaining) |
1398 | printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name); | ||
1399 | return RX_DROP_UNUSABLE; | 1370 | return RX_DROP_UNUSABLE; |
1400 | } | ||
1401 | 1371 | ||
1402 | skb_pull(skb, sizeof(struct ethhdr)); | 1372 | skb_pull(skb, sizeof(struct ethhdr)); |
1403 | /* if last subframe reuse skb */ | 1373 | /* if last subframe reuse skb */ |
@@ -1418,8 +1388,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1418 | eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + | 1388 | eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + |
1419 | padding); | 1389 | padding); |
1420 | if (!eth) { | 1390 | if (!eth) { |
1421 | printk(KERN_DEBUG "%s: wrong buffer size\n", | ||
1422 | dev->name); | ||
1423 | dev_kfree_skb(frame); | 1391 | dev_kfree_skb(frame); |
1424 | return RX_DROP_UNUSABLE; | 1392 | return RX_DROP_UNUSABLE; |
1425 | } | 1393 | } |
@@ -1462,7 +1430,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1462 | return RX_QUEUED; | 1430 | return RX_QUEUED; |
1463 | } | 1431 | } |
1464 | 1432 | ||
1465 | static ieee80211_rx_result | 1433 | static ieee80211_rx_result debug_noinline |
1466 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | 1434 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) |
1467 | { | 1435 | { |
1468 | struct net_device *dev = rx->dev; | 1436 | struct net_device *dev = rx->dev; |
@@ -1493,21 +1461,21 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1493 | return RX_QUEUED; | 1461 | return RX_QUEUED; |
1494 | } | 1462 | } |
1495 | 1463 | ||
1496 | static ieee80211_rx_result | 1464 | static ieee80211_rx_result debug_noinline |
1497 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | 1465 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) |
1498 | { | 1466 | { |
1499 | struct ieee80211_local *local = rx->local; | 1467 | struct ieee80211_local *local = rx->local; |
1500 | struct ieee80211_hw *hw = &local->hw; | 1468 | struct ieee80211_hw *hw = &local->hw; |
1501 | struct sk_buff *skb = rx->skb; | 1469 | struct sk_buff *skb = rx->skb; |
1502 | struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data; | 1470 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; |
1503 | struct tid_ampdu_rx *tid_agg_rx; | 1471 | struct tid_ampdu_rx *tid_agg_rx; |
1504 | u16 start_seq_num; | 1472 | u16 start_seq_num; |
1505 | u16 tid; | 1473 | u16 tid; |
1506 | 1474 | ||
1507 | if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) | 1475 | if (likely(!ieee80211_is_ctl(bar->frame_control))) |
1508 | return RX_CONTINUE; | 1476 | return RX_CONTINUE; |
1509 | 1477 | ||
1510 | if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { | 1478 | if (ieee80211_is_back_req(bar->frame_control)) { |
1511 | if (!rx->sta) | 1479 | if (!rx->sta) |
1512 | return RX_CONTINUE; | 1480 | return RX_CONTINUE; |
1513 | tid = le16_to_cpu(bar->control) >> 12; | 1481 | tid = le16_to_cpu(bar->control) >> 12; |
@@ -1537,7 +1505,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
1537 | return RX_CONTINUE; | 1505 | return RX_CONTINUE; |
1538 | } | 1506 | } |
1539 | 1507 | ||
1540 | static ieee80211_rx_result | 1508 | static ieee80211_rx_result debug_noinline |
1541 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | 1509 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) |
1542 | { | 1510 | { |
1543 | struct ieee80211_sub_if_data *sdata; | 1511 | struct ieee80211_sub_if_data *sdata; |
@@ -1561,41 +1529,27 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1561 | struct ieee80211_hdr *hdr, | 1529 | struct ieee80211_hdr *hdr, |
1562 | struct ieee80211_rx_data *rx) | 1530 | struct ieee80211_rx_data *rx) |
1563 | { | 1531 | { |
1564 | int keyidx, hdrlen; | 1532 | int keyidx; |
1533 | unsigned int hdrlen; | ||
1565 | DECLARE_MAC_BUF(mac); | 1534 | DECLARE_MAC_BUF(mac); |
1566 | DECLARE_MAC_BUF(mac2); | 1535 | DECLARE_MAC_BUF(mac2); |
1567 | 1536 | ||
1568 | hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb); | 1537 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1569 | if (rx->skb->len >= hdrlen + 4) | 1538 | if (rx->skb->len >= hdrlen + 4) |
1570 | keyidx = rx->skb->data[hdrlen + 3] >> 6; | 1539 | keyidx = rx->skb->data[hdrlen + 3] >> 6; |
1571 | else | 1540 | else |
1572 | keyidx = -1; | 1541 | keyidx = -1; |
1573 | 1542 | ||
1574 | if (net_ratelimit()) | ||
1575 | printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC " | ||
1576 | "failure from %s to %s keyidx=%d\n", | ||
1577 | dev->name, print_mac(mac, hdr->addr2), | ||
1578 | print_mac(mac2, hdr->addr1), keyidx); | ||
1579 | |||
1580 | if (!rx->sta) { | 1543 | if (!rx->sta) { |
1581 | /* | 1544 | /* |
1582 | * Some hardware seem to generate incorrect Michael MIC | 1545 | * Some hardware seem to generate incorrect Michael MIC |
1583 | * reports; ignore them to avoid triggering countermeasures. | 1546 | * reports; ignore them to avoid triggering countermeasures. |
1584 | */ | 1547 | */ |
1585 | if (net_ratelimit()) | ||
1586 | printk(KERN_DEBUG "%s: ignored spurious Michael MIC " | ||
1587 | "error for unknown address %s\n", | ||
1588 | dev->name, print_mac(mac, hdr->addr2)); | ||
1589 | goto ignore; | 1548 | goto ignore; |
1590 | } | 1549 | } |
1591 | 1550 | ||
1592 | if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { | 1551 | if (!ieee80211_has_protected(hdr->frame_control)) |
1593 | if (net_ratelimit()) | ||
1594 | printk(KERN_DEBUG "%s: ignored spurious Michael MIC " | ||
1595 | "error for a frame with no PROTECTED flag (src " | ||
1596 | "%s)\n", dev->name, print_mac(mac, hdr->addr2)); | ||
1597 | goto ignore; | 1552 | goto ignore; |
1598 | } | ||
1599 | 1553 | ||
1600 | if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { | 1554 | if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { |
1601 | /* | 1555 | /* |
@@ -1604,24 +1558,12 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1604 | * group keys and only the AP is sending real multicast | 1558 | * group keys and only the AP is sending real multicast |
1605 | * frames in the BSS. | 1559 | * frames in the BSS. |
1606 | */ | 1560 | */ |
1607 | if (net_ratelimit()) | ||
1608 | printk(KERN_DEBUG "%s: ignored Michael MIC error for " | ||
1609 | "a frame with non-zero keyidx (%d)" | ||
1610 | " (src %s)\n", dev->name, keyidx, | ||
1611 | print_mac(mac, hdr->addr2)); | ||
1612 | goto ignore; | 1561 | goto ignore; |
1613 | } | 1562 | } |
1614 | 1563 | ||
1615 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && | 1564 | if (!ieee80211_is_data(hdr->frame_control) && |
1616 | ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 1565 | !ieee80211_is_auth(hdr->frame_control)) |
1617 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) { | ||
1618 | if (net_ratelimit()) | ||
1619 | printk(KERN_DEBUG "%s: ignored spurious Michael MIC " | ||
1620 | "error for a frame that cannot be encrypted " | ||
1621 | "(fc=0x%04x) (src %s)\n", | ||
1622 | dev->name, rx->fc, print_mac(mac, hdr->addr2)); | ||
1623 | goto ignore; | 1566 | goto ignore; |
1624 | } | ||
1625 | 1567 | ||
1626 | mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); | 1568 | mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); |
1627 | ignore: | 1569 | ignore: |
@@ -1710,67 +1652,57 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx) | |||
1710 | dev_kfree_skb(skb); | 1652 | dev_kfree_skb(skb); |
1711 | } | 1653 | } |
1712 | 1654 | ||
1713 | typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *); | ||
1714 | static ieee80211_rx_handler ieee80211_rx_handlers[] = | ||
1715 | { | ||
1716 | ieee80211_rx_h_if_stats, | ||
1717 | ieee80211_rx_h_passive_scan, | ||
1718 | ieee80211_rx_h_check, | ||
1719 | ieee80211_rx_h_decrypt, | ||
1720 | ieee80211_rx_h_sta_process, | ||
1721 | ieee80211_rx_h_defragment, | ||
1722 | ieee80211_rx_h_ps_poll, | ||
1723 | ieee80211_rx_h_michael_mic_verify, | ||
1724 | /* this must be after decryption - so header is counted in MPDU mic | ||
1725 | * must be before pae and data, so QOS_DATA format frames | ||
1726 | * are not passed to user space by these functions | ||
1727 | */ | ||
1728 | ieee80211_rx_h_remove_qos_control, | ||
1729 | ieee80211_rx_h_amsdu, | ||
1730 | ieee80211_rx_h_data, | ||
1731 | ieee80211_rx_h_ctrl, | ||
1732 | ieee80211_rx_h_mgmt, | ||
1733 | NULL | ||
1734 | }; | ||
1735 | 1655 | ||
1736 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | 1656 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, |
1737 | struct ieee80211_rx_data *rx, | 1657 | struct ieee80211_rx_data *rx, |
1738 | struct sk_buff *skb) | 1658 | struct sk_buff *skb) |
1739 | { | 1659 | { |
1740 | ieee80211_rx_handler *handler; | ||
1741 | ieee80211_rx_result res = RX_DROP_MONITOR; | 1660 | ieee80211_rx_result res = RX_DROP_MONITOR; |
1742 | 1661 | ||
1743 | rx->skb = skb; | 1662 | rx->skb = skb; |
1744 | rx->sdata = sdata; | 1663 | rx->sdata = sdata; |
1745 | rx->dev = sdata->dev; | 1664 | rx->dev = sdata->dev; |
1746 | 1665 | ||
1747 | for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) { | 1666 | #define CALL_RXH(rxh) \ |
1748 | res = (*handler)(rx); | 1667 | res = rxh(rx); \ |
1749 | 1668 | if (res != RX_CONTINUE) \ | |
1750 | switch (res) { | 1669 | goto rxh_done; |
1751 | case RX_CONTINUE: | 1670 | |
1752 | continue; | 1671 | CALL_RXH(ieee80211_rx_h_passive_scan) |
1753 | case RX_DROP_UNUSABLE: | 1672 | CALL_RXH(ieee80211_rx_h_check) |
1754 | case RX_DROP_MONITOR: | 1673 | CALL_RXH(ieee80211_rx_h_decrypt) |
1755 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 1674 | CALL_RXH(ieee80211_rx_h_sta_process) |
1756 | if (rx->sta) | 1675 | CALL_RXH(ieee80211_rx_h_defragment) |
1757 | rx->sta->rx_dropped++; | 1676 | CALL_RXH(ieee80211_rx_h_ps_poll) |
1758 | break; | 1677 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) |
1759 | case RX_QUEUED: | 1678 | /* must be after MMIC verify so header is counted in MPDU mic */ |
1760 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | 1679 | CALL_RXH(ieee80211_rx_h_remove_qos_control) |
1761 | break; | 1680 | CALL_RXH(ieee80211_rx_h_amsdu) |
1762 | } | 1681 | CALL_RXH(ieee80211_rx_h_data) |
1763 | break; | 1682 | CALL_RXH(ieee80211_rx_h_ctrl) |
1764 | } | 1683 | CALL_RXH(ieee80211_rx_h_mgmt) |
1765 | 1684 | ||
1685 | #undef CALL_RXH | ||
1686 | |||
1687 | rxh_done: | ||
1766 | switch (res) { | 1688 | switch (res) { |
1767 | case RX_CONTINUE: | ||
1768 | case RX_DROP_MONITOR: | 1689 | case RX_DROP_MONITOR: |
1690 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | ||
1691 | if (rx->sta) | ||
1692 | rx->sta->rx_dropped++; | ||
1693 | /* fall through */ | ||
1694 | case RX_CONTINUE: | ||
1769 | ieee80211_rx_cooked_monitor(rx); | 1695 | ieee80211_rx_cooked_monitor(rx); |
1770 | break; | 1696 | break; |
1771 | case RX_DROP_UNUSABLE: | 1697 | case RX_DROP_UNUSABLE: |
1698 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | ||
1699 | if (rx->sta) | ||
1700 | rx->sta->rx_dropped++; | ||
1772 | dev_kfree_skb(rx->skb); | 1701 | dev_kfree_skb(rx->skb); |
1773 | break; | 1702 | break; |
1703 | case RX_QUEUED: | ||
1704 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | ||
1705 | break; | ||
1774 | } | 1706 | } |
1775 | } | 1707 | } |
1776 | 1708 | ||
@@ -1801,9 +1733,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1801 | case IEEE80211_IF_TYPE_IBSS: | 1733 | case IEEE80211_IF_TYPE_IBSS: |
1802 | if (!bssid) | 1734 | if (!bssid) |
1803 | return 0; | 1735 | return 0; |
1804 | if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && | 1736 | if (ieee80211_is_beacon(hdr->frame_control)) { |
1805 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) | 1737 | if (!rx->sta) |
1738 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, | ||
1739 | rx->skb, bssid, hdr->addr2, | ||
1740 | BIT(rx->status->rate_idx)); | ||
1806 | return 1; | 1741 | return 1; |
1742 | } | ||
1807 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1743 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { |
1808 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | 1744 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) |
1809 | return 0; | 1745 | return 0; |
@@ -1816,7 +1752,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1816 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1752 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1817 | } else if (!rx->sta) | 1753 | } else if (!rx->sta) |
1818 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, | 1754 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, |
1819 | bssid, hdr->addr2); | 1755 | bssid, hdr->addr2, |
1756 | BIT(rx->status->rate_idx)); | ||
1820 | break; | 1757 | break; |
1821 | case IEEE80211_IF_TYPE_MESH_POINT: | 1758 | case IEEE80211_IF_TYPE_MESH_POINT: |
1822 | if (!multicast && | 1759 | if (!multicast && |
@@ -1840,15 +1777,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1840 | return 0; | 1777 | return 0; |
1841 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1778 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1842 | } | 1779 | } |
1843 | if (sdata->dev == sdata->local->mdev && | ||
1844 | !(rx->flags & IEEE80211_RX_IN_SCAN)) | ||
1845 | /* do not receive anything via | ||
1846 | * master device when not scanning */ | ||
1847 | return 0; | ||
1848 | break; | 1780 | break; |
1849 | case IEEE80211_IF_TYPE_WDS: | 1781 | case IEEE80211_IF_TYPE_WDS: |
1850 | if (bssid || | 1782 | if (bssid || !ieee80211_is_data(hdr->frame_control)) |
1851 | (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) | ||
1852 | return 0; | 1783 | return 0; |
1853 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) | 1784 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) |
1854 | return 0; | 1785 | return 0; |
@@ -1872,7 +1803,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1872 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | 1803 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, |
1873 | struct sk_buff *skb, | 1804 | struct sk_buff *skb, |
1874 | struct ieee80211_rx_status *status, | 1805 | struct ieee80211_rx_status *status, |
1875 | u32 load, | ||
1876 | struct ieee80211_rate *rate) | 1806 | struct ieee80211_rate *rate) |
1877 | { | 1807 | { |
1878 | struct ieee80211_local *local = hw_to_local(hw); | 1808 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -1891,7 +1821,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1891 | rx.local = local; | 1821 | rx.local = local; |
1892 | 1822 | ||
1893 | rx.status = status; | 1823 | rx.status = status; |
1894 | rx.load = load; | ||
1895 | rx.rate = rate; | 1824 | rx.rate = rate; |
1896 | rx.fc = le16_to_cpu(hdr->frame_control); | 1825 | rx.fc = le16_to_cpu(hdr->frame_control); |
1897 | type = rx.fc & IEEE80211_FCTL_FTYPE; | 1826 | type = rx.fc & IEEE80211_FCTL_FTYPE; |
@@ -2000,7 +1929,6 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
2000 | struct ieee80211_rx_status status; | 1929 | struct ieee80211_rx_status status; |
2001 | u16 head_seq_num, buf_size; | 1930 | u16 head_seq_num, buf_size; |
2002 | int index; | 1931 | int index; |
2003 | u32 pkt_load; | ||
2004 | struct ieee80211_supported_band *sband; | 1932 | struct ieee80211_supported_band *sband; |
2005 | struct ieee80211_rate *rate; | 1933 | struct ieee80211_rate *rate; |
2006 | 1934 | ||
@@ -2035,12 +1963,9 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
2035 | sizeof(status)); | 1963 | sizeof(status)); |
2036 | sband = local->hw.wiphy->bands[status.band]; | 1964 | sband = local->hw.wiphy->bands[status.band]; |
2037 | rate = &sband->bitrates[status.rate_idx]; | 1965 | rate = &sband->bitrates[status.rate_idx]; |
2038 | pkt_load = ieee80211_rx_load_stats(local, | ||
2039 | tid_agg_rx->reorder_buf[index], | ||
2040 | &status, rate); | ||
2041 | __ieee80211_rx_handle_packet(hw, | 1966 | __ieee80211_rx_handle_packet(hw, |
2042 | tid_agg_rx->reorder_buf[index], | 1967 | tid_agg_rx->reorder_buf[index], |
2043 | &status, pkt_load, rate); | 1968 | &status, rate); |
2044 | tid_agg_rx->stored_mpdu_num--; | 1969 | tid_agg_rx->stored_mpdu_num--; |
2045 | tid_agg_rx->reorder_buf[index] = NULL; | 1970 | tid_agg_rx->reorder_buf[index] = NULL; |
2046 | } | 1971 | } |
@@ -2082,11 +2007,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
2082 | sizeof(status)); | 2007 | sizeof(status)); |
2083 | sband = local->hw.wiphy->bands[status.band]; | 2008 | sband = local->hw.wiphy->bands[status.band]; |
2084 | rate = &sband->bitrates[status.rate_idx]; | 2009 | rate = &sband->bitrates[status.rate_idx]; |
2085 | pkt_load = ieee80211_rx_load_stats(local, | ||
2086 | tid_agg_rx->reorder_buf[index], | ||
2087 | &status, rate); | ||
2088 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], | 2010 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], |
2089 | &status, pkt_load, rate); | 2011 | &status, rate); |
2090 | tid_agg_rx->stored_mpdu_num--; | 2012 | tid_agg_rx->stored_mpdu_num--; |
2091 | tid_agg_rx->reorder_buf[index] = NULL; | 2013 | tid_agg_rx->reorder_buf[index] = NULL; |
2092 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 2014 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -2103,32 +2025,29 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2103 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 2025 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2104 | struct sta_info *sta; | 2026 | struct sta_info *sta; |
2105 | struct tid_ampdu_rx *tid_agg_rx; | 2027 | struct tid_ampdu_rx *tid_agg_rx; |
2106 | u16 fc, sc; | 2028 | u16 sc; |
2107 | u16 mpdu_seq_num; | 2029 | u16 mpdu_seq_num; |
2108 | u8 ret = 0, *qc; | 2030 | u8 ret = 0; |
2109 | int tid; | 2031 | int tid; |
2110 | 2032 | ||
2111 | sta = sta_info_get(local, hdr->addr2); | 2033 | sta = sta_info_get(local, hdr->addr2); |
2112 | if (!sta) | 2034 | if (!sta) |
2113 | return ret; | 2035 | return ret; |
2114 | 2036 | ||
2115 | fc = le16_to_cpu(hdr->frame_control); | ||
2116 | |||
2117 | /* filter the QoS data rx stream according to | 2037 | /* filter the QoS data rx stream according to |
2118 | * STA/TID and check if this STA/TID is on aggregation */ | 2038 | * STA/TID and check if this STA/TID is on aggregation */ |
2119 | if (!WLAN_FC_IS_QOS_DATA(fc)) | 2039 | if (!ieee80211_is_data_qos(hdr->frame_control)) |
2120 | goto end_reorder; | 2040 | goto end_reorder; |
2121 | 2041 | ||
2122 | qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; | 2042 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
2123 | tid = qc[0] & QOS_CONTROL_TID_MASK; | ||
2124 | 2043 | ||
2125 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | 2044 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) |
2126 | goto end_reorder; | 2045 | goto end_reorder; |
2127 | 2046 | ||
2128 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | 2047 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; |
2129 | 2048 | ||
2130 | /* null data frames are excluded */ | 2049 | /* qos null data frames are excluded */ |
2131 | if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) | 2050 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) |
2132 | goto end_reorder; | 2051 | goto end_reorder; |
2133 | 2052 | ||
2134 | /* new un-ordered ampdu frame - process it */ | 2053 | /* new un-ordered ampdu frame - process it */ |
@@ -2165,7 +2084,6 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2165 | struct ieee80211_rx_status *status) | 2084 | struct ieee80211_rx_status *status) |
2166 | { | 2085 | { |
2167 | struct ieee80211_local *local = hw_to_local(hw); | 2086 | struct ieee80211_local *local = hw_to_local(hw); |
2168 | u32 pkt_load; | ||
2169 | struct ieee80211_rate *rate = NULL; | 2087 | struct ieee80211_rate *rate = NULL; |
2170 | struct ieee80211_supported_band *sband; | 2088 | struct ieee80211_supported_band *sband; |
2171 | 2089 | ||
@@ -2205,11 +2123,8 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2205 | return; | 2123 | return; |
2206 | } | 2124 | } |
2207 | 2125 | ||
2208 | pkt_load = ieee80211_rx_load_stats(local, skb, status, rate); | ||
2209 | local->channel_use_raw += pkt_load; | ||
2210 | |||
2211 | if (!ieee80211_rx_reorder_ampdu(local, skb)) | 2126 | if (!ieee80211_rx_reorder_ampdu(local, skb)) |
2212 | __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate); | 2127 | __ieee80211_rx_handle_packet(hw, skb, status, rate); |
2213 | 2128 | ||
2214 | rcu_read_unlock(); | 2129 | rcu_read_unlock(); |
2215 | } | 2130 | } |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7d4fe4a52929..f2ba653b9d69 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -135,6 +135,7 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, | |||
135 | /** | 135 | /** |
136 | * __sta_info_free - internal STA free helper | 136 | * __sta_info_free - internal STA free helper |
137 | * | 137 | * |
138 | * @local: pointer to the global information | ||
138 | * @sta: STA info to free | 139 | * @sta: STA info to free |
139 | * | 140 | * |
140 | * This function must undo everything done by sta_info_alloc() | 141 | * This function must undo everything done by sta_info_alloc() |
@@ -202,14 +203,12 @@ void sta_info_destroy(struct sta_info *sta) | |||
202 | dev_kfree_skb_any(skb); | 203 | dev_kfree_skb_any(skb); |
203 | 204 | ||
204 | for (i = 0; i < STA_TID_NUM; i++) { | 205 | for (i = 0; i < STA_TID_NUM; i++) { |
205 | spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); | 206 | spin_lock_bh(&sta->lock); |
206 | if (sta->ampdu_mlme.tid_rx[i]) | 207 | if (sta->ampdu_mlme.tid_rx[i]) |
207 | del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); | 208 | del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); |
208 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | ||
209 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
210 | if (sta->ampdu_mlme.tid_tx[i]) | 209 | if (sta->ampdu_mlme.tid_tx[i]) |
211 | del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); | 210 | del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); |
212 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | 211 | spin_unlock_bh(&sta->lock); |
213 | } | 212 | } |
214 | 213 | ||
215 | __sta_info_free(local, sta); | 214 | __sta_info_free(local, sta); |
@@ -236,6 +235,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
236 | if (!sta) | 235 | if (!sta) |
237 | return NULL; | 236 | return NULL; |
238 | 237 | ||
238 | spin_lock_init(&sta->lock); | ||
239 | spin_lock_init(&sta->flaglock); | ||
240 | |||
239 | memcpy(sta->addr, addr, ETH_ALEN); | 241 | memcpy(sta->addr, addr, ETH_ALEN); |
240 | sta->local = local; | 242 | sta->local = local; |
241 | sta->sdata = sdata; | 243 | sta->sdata = sdata; |
@@ -249,15 +251,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
249 | return NULL; | 251 | return NULL; |
250 | } | 252 | } |
251 | 253 | ||
252 | spin_lock_init(&sta->ampdu_mlme.ampdu_rx); | ||
253 | spin_lock_init(&sta->ampdu_mlme.ampdu_tx); | ||
254 | for (i = 0; i < STA_TID_NUM; i++) { | 254 | for (i = 0; i < STA_TID_NUM; i++) { |
255 | /* timer_to_tid must be initialized with identity mapping to | 255 | /* timer_to_tid must be initialized with identity mapping to |
256 | * enable session_timer's data differentiation. refer to | 256 | * enable session_timer's data differentiation. refer to |
257 | * sta_rx_agg_session_timer_expired for useage */ | 257 | * sta_rx_agg_session_timer_expired for useage */ |
258 | sta->timer_to_tid[i] = i; | 258 | sta->timer_to_tid[i] = i; |
259 | /* tid to tx queue: initialize according to HW (0 is valid) */ | 259 | /* tid to tx queue: initialize according to HW (0 is valid) */ |
260 | sta->tid_to_tx_q[i] = local->hw.queues; | 260 | sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw); |
261 | /* rx */ | 261 | /* rx */ |
262 | sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; | 262 | sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; |
263 | sta->ampdu_mlme.tid_rx[i] = NULL; | 263 | sta->ampdu_mlme.tid_rx[i] = NULL; |
@@ -276,7 +276,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
276 | 276 | ||
277 | #ifdef CONFIG_MAC80211_MESH | 277 | #ifdef CONFIG_MAC80211_MESH |
278 | sta->plink_state = PLINK_LISTEN; | 278 | sta->plink_state = PLINK_LISTEN; |
279 | spin_lock_init(&sta->plink_lock); | ||
280 | init_timer(&sta->plink_timer); | 279 | init_timer(&sta->plink_timer); |
281 | #endif | 280 | #endif |
282 | 281 | ||
@@ -321,7 +320,9 @@ int sta_info_insert(struct sta_info *sta) | |||
321 | /* notify driver */ | 320 | /* notify driver */ |
322 | if (local->ops->sta_notify) { | 321 | if (local->ops->sta_notify) { |
323 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 322 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) |
324 | sdata = sdata->u.vlan.ap; | 323 | sdata = container_of(sdata->bss, |
324 | struct ieee80211_sub_if_data, | ||
325 | u.ap); | ||
325 | 326 | ||
326 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 327 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
327 | STA_NOTIFY_ADD, sta->addr); | 328 | STA_NOTIFY_ADD, sta->addr); |
@@ -376,8 +377,10 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) | |||
376 | static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, | 377 | static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, |
377 | struct sta_info *sta) | 378 | struct sta_info *sta) |
378 | { | 379 | { |
379 | if (bss) | 380 | BUG_ON(!bss); |
380 | __bss_tim_set(bss, sta->aid); | 381 | |
382 | __bss_tim_set(bss, sta->aid); | ||
383 | |||
381 | if (sta->local->ops->set_tim) { | 384 | if (sta->local->ops->set_tim) { |
382 | sta->local->tim_in_locked_section = true; | 385 | sta->local->tim_in_locked_section = true; |
383 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); | 386 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); |
@@ -389,6 +392,8 @@ void sta_info_set_tim_bit(struct sta_info *sta) | |||
389 | { | 392 | { |
390 | unsigned long flags; | 393 | unsigned long flags; |
391 | 394 | ||
395 | BUG_ON(!sta->sdata->bss); | ||
396 | |||
392 | spin_lock_irqsave(&sta->local->sta_lock, flags); | 397 | spin_lock_irqsave(&sta->local->sta_lock, flags); |
393 | __sta_info_set_tim_bit(sta->sdata->bss, sta); | 398 | __sta_info_set_tim_bit(sta->sdata->bss, sta); |
394 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | 399 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); |
@@ -397,8 +402,10 @@ void sta_info_set_tim_bit(struct sta_info *sta) | |||
397 | static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, | 402 | static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, |
398 | struct sta_info *sta) | 403 | struct sta_info *sta) |
399 | { | 404 | { |
400 | if (bss) | 405 | BUG_ON(!bss); |
401 | __bss_tim_clear(bss, sta->aid); | 406 | |
407 | __bss_tim_clear(bss, sta->aid); | ||
408 | |||
402 | if (sta->local->ops->set_tim) { | 409 | if (sta->local->ops->set_tim) { |
403 | sta->local->tim_in_locked_section = true; | 410 | sta->local->tim_in_locked_section = true; |
404 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); | 411 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); |
@@ -410,6 +417,8 @@ void sta_info_clear_tim_bit(struct sta_info *sta) | |||
410 | { | 417 | { |
411 | unsigned long flags; | 418 | unsigned long flags; |
412 | 419 | ||
420 | BUG_ON(!sta->sdata->bss); | ||
421 | |||
413 | spin_lock_irqsave(&sta->local->sta_lock, flags); | 422 | spin_lock_irqsave(&sta->local->sta_lock, flags); |
414 | __sta_info_clear_tim_bit(sta->sdata->bss, sta); | 423 | __sta_info_clear_tim_bit(sta->sdata->bss, sta); |
415 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | 424 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); |
@@ -437,10 +446,10 @@ void __sta_info_unlink(struct sta_info **sta) | |||
437 | 446 | ||
438 | list_del(&(*sta)->list); | 447 | list_del(&(*sta)->list); |
439 | 448 | ||
440 | if ((*sta)->flags & WLAN_STA_PS) { | 449 | if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) { |
441 | (*sta)->flags &= ~WLAN_STA_PS; | 450 | BUG_ON(!sdata->bss); |
442 | if (sdata->bss) | 451 | |
443 | atomic_dec(&sdata->bss->num_sta_ps); | 452 | atomic_dec(&sdata->bss->num_sta_ps); |
444 | __sta_info_clear_tim_bit(sdata->bss, *sta); | 453 | __sta_info_clear_tim_bit(sdata->bss, *sta); |
445 | } | 454 | } |
446 | 455 | ||
@@ -448,7 +457,9 @@ void __sta_info_unlink(struct sta_info **sta) | |||
448 | 457 | ||
449 | if (local->ops->sta_notify) { | 458 | if (local->ops->sta_notify) { |
450 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 459 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) |
451 | sdata = sdata->u.vlan.ap; | 460 | sdata = container_of(sdata->bss, |
461 | struct ieee80211_sub_if_data, | ||
462 | u.ap); | ||
452 | 463 | ||
453 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 464 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
454 | STA_NOTIFY_REMOVE, (*sta)->addr); | 465 | STA_NOTIFY_REMOVE, (*sta)->addr); |
@@ -515,20 +526,20 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local, | |||
515 | struct sta_info *sta, | 526 | struct sta_info *sta, |
516 | struct sk_buff *skb) | 527 | struct sk_buff *skb) |
517 | { | 528 | { |
518 | struct ieee80211_tx_packet_data *pkt_data; | 529 | struct ieee80211_tx_info *info; |
519 | int timeout; | 530 | int timeout; |
520 | 531 | ||
521 | if (!skb) | 532 | if (!skb) |
522 | return 0; | 533 | return 0; |
523 | 534 | ||
524 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; | 535 | info = IEEE80211_SKB_CB(skb); |
525 | 536 | ||
526 | /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ | 537 | /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ |
527 | timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / | 538 | timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / |
528 | 15625) * HZ; | 539 | 15625) * HZ; |
529 | if (timeout < STA_TX_BUFFER_EXPIRE) | 540 | if (timeout < STA_TX_BUFFER_EXPIRE) |
530 | timeout = STA_TX_BUFFER_EXPIRE; | 541 | timeout = STA_TX_BUFFER_EXPIRE; |
531 | return time_after(jiffies, pkt_data->jiffies + timeout); | 542 | return time_after(jiffies, info->control.jiffies + timeout); |
532 | } | 543 | } |
533 | 544 | ||
534 | 545 | ||
@@ -557,8 +568,10 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
557 | 568 | ||
558 | sdata = sta->sdata; | 569 | sdata = sta->sdata; |
559 | local->total_ps_buffered--; | 570 | local->total_ps_buffered--; |
571 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
560 | printk(KERN_DEBUG "Buffered frame expired (STA " | 572 | printk(KERN_DEBUG "Buffered frame expired (STA " |
561 | "%s)\n", print_mac(mac, sta->addr)); | 573 | "%s)\n", print_mac(mac, sta->addr)); |
574 | #endif | ||
562 | dev_kfree_skb(skb); | 575 | dev_kfree_skb(skb); |
563 | 576 | ||
564 | if (skb_queue_empty(&sta->ps_tx_buf)) | 577 | if (skb_queue_empty(&sta->ps_tx_buf)) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index f8c95bc9659c..109db787ccb7 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -32,7 +32,7 @@ | |||
32 | * @WLAN_STA_WDS: Station is one of our WDS peers. | 32 | * @WLAN_STA_WDS: Station is one of our WDS peers. |
33 | * @WLAN_STA_PSPOLL: Station has just PS-polled us. | 33 | * @WLAN_STA_PSPOLL: Station has just PS-polled us. |
34 | * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the | 34 | * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the |
35 | * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next | 35 | * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next |
36 | * frame to this station is transmitted. | 36 | * frame to this station is transmitted. |
37 | */ | 37 | */ |
38 | enum ieee80211_sta_info_flags { | 38 | enum ieee80211_sta_info_flags { |
@@ -129,23 +129,19 @@ enum plink_state { | |||
129 | * | 129 | * |
130 | * @tid_state_rx: TID's state in Rx session state machine. | 130 | * @tid_state_rx: TID's state in Rx session state machine. |
131 | * @tid_rx: aggregation info for Rx per TID | 131 | * @tid_rx: aggregation info for Rx per TID |
132 | * @ampdu_rx: for locking sections in aggregation Rx flow | ||
133 | * @tid_state_tx: TID's state in Tx session state machine. | 132 | * @tid_state_tx: TID's state in Tx session state machine. |
134 | * @tid_tx: aggregation info for Tx per TID | 133 | * @tid_tx: aggregation info for Tx per TID |
135 | * @addba_req_num: number of times addBA request has been sent. | 134 | * @addba_req_num: number of times addBA request has been sent. |
136 | * @ampdu_tx: for locking sectionsi in aggregation Tx flow | ||
137 | * @dialog_token_allocator: dialog token enumerator for each new session; | 135 | * @dialog_token_allocator: dialog token enumerator for each new session; |
138 | */ | 136 | */ |
139 | struct sta_ampdu_mlme { | 137 | struct sta_ampdu_mlme { |
140 | /* rx */ | 138 | /* rx */ |
141 | u8 tid_state_rx[STA_TID_NUM]; | 139 | u8 tid_state_rx[STA_TID_NUM]; |
142 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | 140 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; |
143 | spinlock_t ampdu_rx; | ||
144 | /* tx */ | 141 | /* tx */ |
145 | u8 tid_state_tx[STA_TID_NUM]; | 142 | u8 tid_state_tx[STA_TID_NUM]; |
146 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | 143 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; |
147 | u8 addba_req_num[STA_TID_NUM]; | 144 | u8 addba_req_num[STA_TID_NUM]; |
148 | spinlock_t ampdu_tx; | ||
149 | u8 dialog_token_allocator; | 145 | u8 dialog_token_allocator; |
150 | }; | 146 | }; |
151 | 147 | ||
@@ -164,9 +160,20 @@ struct sta_ampdu_mlme { | |||
164 | * @list: global linked list entry | 160 | * @list: global linked list entry |
165 | * @hnext: hash table linked list pointer | 161 | * @hnext: hash table linked list pointer |
166 | * @local: pointer to the global information | 162 | * @local: pointer to the global information |
163 | * @sdata: TBD | ||
164 | * @key: TBD | ||
165 | * @rate_ctrl: TBD | ||
166 | * @rate_ctrl_priv: TBD | ||
167 | * @lock: used for locking all fields that require locking, see comments | ||
168 | * in the header file. | ||
169 | * @flaglock: spinlock for flags accesses | ||
170 | * @ht_info: HT capabilities of this STA | ||
171 | * @supp_rates: Bitmap of supported rates (per band) | ||
167 | * @addr: MAC address of this STA | 172 | * @addr: MAC address of this STA |
168 | * @aid: STA's unique AID (1..2007, 0 = not assigned yet), | 173 | * @aid: STA's unique AID (1..2007, 0 = not assigned yet), |
169 | * only used in AP (and IBSS?) mode | 174 | * only used in AP (and IBSS?) mode |
175 | * @listen_interval: TBD | ||
176 | * @pin_status: TBD | ||
170 | * @flags: STA flags, see &enum ieee80211_sta_info_flags | 177 | * @flags: STA flags, see &enum ieee80211_sta_info_flags |
171 | * @ps_tx_buf: buffer of frames to transmit to this station | 178 | * @ps_tx_buf: buffer of frames to transmit to this station |
172 | * when it leaves power saving state | 179 | * when it leaves power saving state |
@@ -175,8 +182,41 @@ struct sta_ampdu_mlme { | |||
175 | * power saving state | 182 | * power saving state |
176 | * @rx_packets: Number of MSDUs received from this STA | 183 | * @rx_packets: Number of MSDUs received from this STA |
177 | * @rx_bytes: Number of bytes received from this STA | 184 | * @rx_bytes: Number of bytes received from this STA |
178 | * @supp_rates: Bitmap of supported rates (per band) | 185 | * @wep_weak_iv_count: TBD |
179 | * @ht_info: HT capabilities of this STA | 186 | * @last_rx: TBD |
187 | * @num_duplicates: number of duplicate frames received from this STA | ||
188 | * @rx_fragments: number of received MPDUs | ||
189 | * @rx_dropped: number of dropped MPDUs from this STA | ||
190 | * @last_signal: signal of last received frame from this STA | ||
191 | * @last_qual: qual of last received frame from this STA | ||
192 | * @last_noise: noise of last received frame from this STA | ||
193 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) | ||
194 | * @wme_rx_queue: TBD | ||
195 | * @tx_filtered_count: TBD | ||
196 | * @tx_retry_failed: TBD | ||
197 | * @tx_retry_count: TBD | ||
198 | * @tx_num_consecutive_failures: TBD | ||
199 | * @tx_num_mpdu_ok: TBD | ||
200 | * @tx_num_mpdu_fail: TBD | ||
201 | * @fail_avg: moving percentage of failed MSDUs | ||
202 | * @tx_packets: number of RX/TX MSDUs | ||
203 | * @tx_bytes: TBD | ||
204 | * @tx_fragments: number of transmitted MPDUs | ||
205 | * @txrate_idx: TBD | ||
206 | * @last_txrate_idx: TBD | ||
207 | * @wme_tx_queue: TBD | ||
208 | * @ampdu_mlme: TBD | ||
209 | * @timer_to_tid: identity mapping to ID timers | ||
210 | * @tid_to_tx_q: map tid to tx queue | ||
211 | * @llid: Local link ID | ||
212 | * @plid: Peer link ID | ||
213 | * @reason: Cancel reason on PLINK_HOLDING state | ||
214 | * @plink_retries: Retries in establishment | ||
215 | * @ignore_plink_timer: TBD | ||
216 | * @plink_state plink_state: TBD | ||
217 | * @plink_timeout: TBD | ||
218 | * @plink_timer: TBD | ||
219 | * @debugfs: debug filesystem info | ||
180 | */ | 220 | */ |
181 | struct sta_info { | 221 | struct sta_info { |
182 | /* General information, mostly static */ | 222 | /* General information, mostly static */ |
@@ -187,6 +227,8 @@ struct sta_info { | |||
187 | struct ieee80211_key *key; | 227 | struct ieee80211_key *key; |
188 | struct rate_control_ref *rate_ctrl; | 228 | struct rate_control_ref *rate_ctrl; |
189 | void *rate_ctrl_priv; | 229 | void *rate_ctrl_priv; |
230 | spinlock_t lock; | ||
231 | spinlock_t flaglock; | ||
190 | struct ieee80211_ht_info ht_info; | 232 | struct ieee80211_ht_info ht_info; |
191 | u64 supp_rates[IEEE80211_NUM_BANDS]; | 233 | u64 supp_rates[IEEE80211_NUM_BANDS]; |
192 | u8 addr[ETH_ALEN]; | 234 | u8 addr[ETH_ALEN]; |
@@ -199,7 +241,10 @@ struct sta_info { | |||
199 | */ | 241 | */ |
200 | u8 pin_status; | 242 | u8 pin_status; |
201 | 243 | ||
202 | /* frequently updated information, needs locking? */ | 244 | /* |
245 | * frequently updated, locked with own spinlock (flaglock), | ||
246 | * use the accessors defined below | ||
247 | */ | ||
203 | u32 flags; | 248 | u32 flags; |
204 | 249 | ||
205 | /* | 250 | /* |
@@ -213,14 +258,12 @@ struct sta_info { | |||
213 | unsigned long rx_packets, rx_bytes; | 258 | unsigned long rx_packets, rx_bytes; |
214 | unsigned long wep_weak_iv_count; | 259 | unsigned long wep_weak_iv_count; |
215 | unsigned long last_rx; | 260 | unsigned long last_rx; |
216 | unsigned long num_duplicates; /* number of duplicate frames received | 261 | unsigned long num_duplicates; |
217 | * from this STA */ | 262 | unsigned long rx_fragments; |
218 | unsigned long rx_fragments; /* number of received MPDUs */ | 263 | unsigned long rx_dropped; |
219 | unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ | 264 | int last_signal; |
220 | int last_rssi; /* RSSI of last received frame from this STA */ | 265 | int last_qual; |
221 | int last_signal; /* signal of last received frame from this STA */ | 266 | int last_noise; |
222 | int last_noise; /* noise of last received frame from this STA */ | ||
223 | /* last received seq/frag number from this STA (per RX queue) */ | ||
224 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 267 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
225 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 268 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
226 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; | 269 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; |
@@ -237,42 +280,36 @@ struct sta_info { | |||
237 | unsigned int fail_avg; | 280 | unsigned int fail_avg; |
238 | 281 | ||
239 | /* Updated from TX path only, no locking requirements */ | 282 | /* Updated from TX path only, no locking requirements */ |
240 | unsigned long tx_packets; /* number of RX/TX MSDUs */ | 283 | unsigned long tx_packets; |
241 | unsigned long tx_bytes; | 284 | unsigned long tx_bytes; |
242 | unsigned long tx_fragments; /* number of transmitted MPDUs */ | 285 | unsigned long tx_fragments; |
243 | int txrate_idx; | 286 | int txrate_idx; |
244 | int last_txrate_idx; | 287 | int last_txrate_idx; |
288 | u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; | ||
245 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 289 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
246 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; | 290 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; |
247 | #endif | 291 | #endif |
248 | 292 | ||
249 | /* Debug counters, no locking doesn't matter */ | ||
250 | int channel_use; | ||
251 | int channel_use_raw; | ||
252 | |||
253 | /* | 293 | /* |
254 | * Aggregation information, comes with own locking. | 294 | * Aggregation information, locked with lock. |
255 | */ | 295 | */ |
256 | struct sta_ampdu_mlme ampdu_mlme; | 296 | struct sta_ampdu_mlme ampdu_mlme; |
257 | u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */ | 297 | u8 timer_to_tid[STA_TID_NUM]; |
258 | u8 tid_to_tx_q[STA_TID_NUM]; /* map tid to tx queue */ | 298 | u8 tid_to_tx_q[STA_TID_NUM]; |
259 | 299 | ||
260 | #ifdef CONFIG_MAC80211_MESH | 300 | #ifdef CONFIG_MAC80211_MESH |
261 | /* | 301 | /* |
262 | * Mesh peer link attributes | 302 | * Mesh peer link attributes |
263 | * TODO: move to a sub-structure that is referenced with pointer? | 303 | * TODO: move to a sub-structure that is referenced with pointer? |
264 | */ | 304 | */ |
265 | __le16 llid; /* Local link ID */ | 305 | __le16 llid; |
266 | __le16 plid; /* Peer link ID */ | 306 | __le16 plid; |
267 | __le16 reason; /* Cancel reason on PLINK_HOLDING state */ | 307 | __le16 reason; |
268 | u8 plink_retries; /* Retries in establishment */ | 308 | u8 plink_retries; |
269 | bool ignore_plink_timer; | 309 | bool ignore_plink_timer; |
270 | enum plink_state plink_state; | 310 | enum plink_state plink_state; |
271 | u32 plink_timeout; | 311 | u32 plink_timeout; |
272 | struct timer_list plink_timer; | 312 | struct timer_list plink_timer; |
273 | spinlock_t plink_lock; /* For peer_state reads / updates and other | ||
274 | updates in the structure. Ensures robust | ||
275 | transitions for the peerlink FSM */ | ||
276 | #endif | 313 | #endif |
277 | 314 | ||
278 | #ifdef CONFIG_MAC80211_DEBUGFS | 315 | #ifdef CONFIG_MAC80211_DEBUGFS |
@@ -299,6 +336,73 @@ static inline enum plink_state sta_plink_state(struct sta_info *sta) | |||
299 | return PLINK_LISTEN; | 336 | return PLINK_LISTEN; |
300 | } | 337 | } |
301 | 338 | ||
339 | static inline void set_sta_flags(struct sta_info *sta, const u32 flags) | ||
340 | { | ||
341 | unsigned long irqfl; | ||
342 | |||
343 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
344 | sta->flags |= flags; | ||
345 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
346 | } | ||
347 | |||
348 | static inline void clear_sta_flags(struct sta_info *sta, const u32 flags) | ||
349 | { | ||
350 | unsigned long irqfl; | ||
351 | |||
352 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
353 | sta->flags &= ~flags; | ||
354 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
355 | } | ||
356 | |||
357 | static inline void set_and_clear_sta_flags(struct sta_info *sta, | ||
358 | const u32 set, const u32 clear) | ||
359 | { | ||
360 | unsigned long irqfl; | ||
361 | |||
362 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
363 | sta->flags |= set; | ||
364 | sta->flags &= ~clear; | ||
365 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
366 | } | ||
367 | |||
368 | static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) | ||
369 | { | ||
370 | u32 ret; | ||
371 | unsigned long irqfl; | ||
372 | |||
373 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
374 | ret = sta->flags & flags; | ||
375 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
376 | |||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | static inline u32 test_and_clear_sta_flags(struct sta_info *sta, | ||
381 | const u32 flags) | ||
382 | { | ||
383 | u32 ret; | ||
384 | unsigned long irqfl; | ||
385 | |||
386 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
387 | ret = sta->flags & flags; | ||
388 | sta->flags &= ~flags; | ||
389 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
390 | |||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | static inline u32 get_sta_flags(struct sta_info *sta) | ||
395 | { | ||
396 | u32 ret; | ||
397 | unsigned long irqfl; | ||
398 | |||
399 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
400 | ret = sta->flags; | ||
401 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
402 | |||
403 | return ret; | ||
404 | } | ||
405 | |||
302 | 406 | ||
303 | /* Maximum number of concurrently registered stations */ | 407 | /* Maximum number of concurrently registered stations */ |
304 | #define MAX_STA_COUNT 2007 | 408 | #define MAX_STA_COUNT 2007 |
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 09093da24af6..995f7af3d25e 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c | |||
@@ -6,25 +6,23 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | |||
10 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/bitops.h> | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <asm/unaligned.h> | ||
13 | 14 | ||
14 | #include <net/mac80211.h> | 15 | #include <net/mac80211.h> |
15 | #include "key.h" | 16 | #include "key.h" |
16 | #include "tkip.h" | 17 | #include "tkip.h" |
17 | #include "wep.h" | 18 | #include "wep.h" |
18 | 19 | ||
19 | |||
20 | /* TKIP key mixing functions */ | ||
21 | |||
22 | |||
23 | #define PHASE1_LOOP_COUNT 8 | 20 | #define PHASE1_LOOP_COUNT 8 |
24 | 21 | ||
25 | 22 | /* | |
26 | /* 2-byte by 2-byte subset of the full AES S-box table; second part of this | 23 | * 2-byte by 2-byte subset of the full AES S-box table; second part of this |
27 | * table is identical to first part but byte-swapped */ | 24 | * table is identical to first part but byte-swapped |
25 | */ | ||
28 | static const u16 tkip_sbox[256] = | 26 | static const u16 tkip_sbox[256] = |
29 | { | 27 | { |
30 | 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, | 28 | 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, |
@@ -61,84 +59,54 @@ static const u16 tkip_sbox[256] = | |||
61 | 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, | 59 | 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, |
62 | }; | 60 | }; |
63 | 61 | ||
64 | 62 | static u16 tkipS(u16 val) | |
65 | static inline u16 Mk16(u8 x, u8 y) | ||
66 | { | 63 | { |
67 | return ((u16) x << 8) | (u16) y; | 64 | return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]); |
68 | } | 65 | } |
69 | 66 | ||
70 | 67 | static u8 *write_tkip_iv(u8 *pos, u16 iv16) | |
71 | static inline u8 Hi8(u16 v) | ||
72 | { | ||
73 | return v >> 8; | ||
74 | } | ||
75 | |||
76 | |||
77 | static inline u8 Lo8(u16 v) | ||
78 | { | ||
79 | return v & 0xff; | ||
80 | } | ||
81 | |||
82 | |||
83 | static inline u16 Hi16(u32 v) | ||
84 | { | ||
85 | return v >> 16; | ||
86 | } | ||
87 | |||
88 | |||
89 | static inline u16 Lo16(u32 v) | ||
90 | { | ||
91 | return v & 0xffff; | ||
92 | } | ||
93 | |||
94 | |||
95 | static inline u16 RotR1(u16 v) | ||
96 | { | ||
97 | return (v >> 1) | ((v & 0x0001) << 15); | ||
98 | } | ||
99 | |||
100 | |||
101 | static inline u16 tkip_S(u16 val) | ||
102 | { | 68 | { |
103 | u16 a = tkip_sbox[Hi8(val)]; | 69 | *pos++ = iv16 >> 8; |
104 | 70 | *pos++ = ((iv16 >> 8) | 0x20) & 0x7f; | |
105 | return tkip_sbox[Lo8(val)] ^ Hi8(a) ^ (Lo8(a) << 8); | 71 | *pos++ = iv16 & 0xFF; |
72 | return pos; | ||
106 | } | 73 | } |
107 | 74 | ||
108 | 75 | /* | |
109 | 76 | * P1K := Phase1(TA, TK, TSC) | |
110 | /* P1K := Phase1(TA, TK, TSC) | ||
111 | * TA = transmitter address (48 bits) | 77 | * TA = transmitter address (48 bits) |
112 | * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) | 78 | * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) |
113 | * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) | 79 | * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) |
114 | * P1K: 80 bits | 80 | * P1K: 80 bits |
115 | */ | 81 | */ |
116 | static void tkip_mixing_phase1(const u8 *ta, const u8 *tk, u32 tsc_IV32, | 82 | static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, |
117 | u16 *p1k) | 83 | const u8 *ta, u32 tsc_IV32) |
118 | { | 84 | { |
119 | int i, j; | 85 | int i, j; |
86 | u16 *p1k = ctx->p1k; | ||
120 | 87 | ||
121 | p1k[0] = Lo16(tsc_IV32); | 88 | p1k[0] = tsc_IV32 & 0xFFFF; |
122 | p1k[1] = Hi16(tsc_IV32); | 89 | p1k[1] = tsc_IV32 >> 16; |
123 | p1k[2] = Mk16(ta[1], ta[0]); | 90 | p1k[2] = get_unaligned_le16(ta + 0); |
124 | p1k[3] = Mk16(ta[3], ta[2]); | 91 | p1k[3] = get_unaligned_le16(ta + 2); |
125 | p1k[4] = Mk16(ta[5], ta[4]); | 92 | p1k[4] = get_unaligned_le16(ta + 4); |
126 | 93 | ||
127 | for (i = 0; i < PHASE1_LOOP_COUNT; i++) { | 94 | for (i = 0; i < PHASE1_LOOP_COUNT; i++) { |
128 | j = 2 * (i & 1); | 95 | j = 2 * (i & 1); |
129 | p1k[0] += tkip_S(p1k[4] ^ Mk16(tk[ 1 + j], tk[ 0 + j])); | 96 | p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); |
130 | p1k[1] += tkip_S(p1k[0] ^ Mk16(tk[ 5 + j], tk[ 4 + j])); | 97 | p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); |
131 | p1k[2] += tkip_S(p1k[1] ^ Mk16(tk[ 9 + j], tk[ 8 + j])); | 98 | p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); |
132 | p1k[3] += tkip_S(p1k[2] ^ Mk16(tk[13 + j], tk[12 + j])); | 99 | p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); |
133 | p1k[4] += tkip_S(p1k[3] ^ Mk16(tk[ 1 + j], tk[ 0 + j])) + i; | 100 | p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; |
134 | } | 101 | } |
102 | ctx->initialized = 1; | ||
135 | } | 103 | } |
136 | 104 | ||
137 | 105 | static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, | |
138 | static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16, | 106 | u16 tsc_IV16, u8 *rc4key) |
139 | u8 *rc4key) | ||
140 | { | 107 | { |
141 | u16 ppk[6]; | 108 | u16 ppk[6]; |
109 | const u16 *p1k = ctx->p1k; | ||
142 | int i; | 110 | int i; |
143 | 111 | ||
144 | ppk[0] = p1k[0]; | 112 | ppk[0] = p1k[0]; |
@@ -148,70 +116,35 @@ static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16, | |||
148 | ppk[4] = p1k[4]; | 116 | ppk[4] = p1k[4]; |
149 | ppk[5] = p1k[4] + tsc_IV16; | 117 | ppk[5] = p1k[4] + tsc_IV16; |
150 | 118 | ||
151 | ppk[0] += tkip_S(ppk[5] ^ Mk16(tk[ 1], tk[ 0])); | 119 | ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); |
152 | ppk[1] += tkip_S(ppk[0] ^ Mk16(tk[ 3], tk[ 2])); | 120 | ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); |
153 | ppk[2] += tkip_S(ppk[1] ^ Mk16(tk[ 5], tk[ 4])); | 121 | ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); |
154 | ppk[3] += tkip_S(ppk[2] ^ Mk16(tk[ 7], tk[ 6])); | 122 | ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); |
155 | ppk[4] += tkip_S(ppk[3] ^ Mk16(tk[ 9], tk[ 8])); | 123 | ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); |
156 | ppk[5] += tkip_S(ppk[4] ^ Mk16(tk[11], tk[10])); | 124 | ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); |
157 | ppk[0] += RotR1(ppk[5] ^ Mk16(tk[13], tk[12])); | 125 | ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); |
158 | ppk[1] += RotR1(ppk[0] ^ Mk16(tk[15], tk[14])); | 126 | ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); |
159 | ppk[2] += RotR1(ppk[1]); | 127 | ppk[2] += ror16(ppk[1], 1); |
160 | ppk[3] += RotR1(ppk[2]); | 128 | ppk[3] += ror16(ppk[2], 1); |
161 | ppk[4] += RotR1(ppk[3]); | 129 | ppk[4] += ror16(ppk[3], 1); |
162 | ppk[5] += RotR1(ppk[4]); | 130 | ppk[5] += ror16(ppk[4], 1); |
163 | 131 | ||
164 | rc4key[0] = Hi8(tsc_IV16); | 132 | rc4key = write_tkip_iv(rc4key, tsc_IV16); |
165 | rc4key[1] = (Hi8(tsc_IV16) | 0x20) & 0x7f; | 133 | *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; |
166 | rc4key[2] = Lo8(tsc_IV16); | 134 | |
167 | rc4key[3] = Lo8((ppk[5] ^ Mk16(tk[1], tk[0])) >> 1); | 135 | for (i = 0; i < 6; i++) |
168 | 136 | put_unaligned_le16(ppk[i], rc4key + 2 * i); | |
169 | for (i = 0; i < 6; i++) { | ||
170 | rc4key[4 + 2 * i] = Lo8(ppk[i]); | ||
171 | rc4key[5 + 2 * i] = Hi8(ppk[i]); | ||
172 | } | ||
173 | } | 137 | } |
174 | 138 | ||
175 | |||
176 | /* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets | 139 | /* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets |
177 | * of the IV. Returns pointer to the octet following IVs (i.e., beginning of | 140 | * of the IV. Returns pointer to the octet following IVs (i.e., beginning of |
178 | * the packet payload). */ | 141 | * the packet payload). */ |
179 | u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, | 142 | u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16) |
180 | u8 iv0, u8 iv1, u8 iv2) | ||
181 | { | 143 | { |
182 | *pos++ = iv0; | 144 | pos = write_tkip_iv(pos, iv16); |
183 | *pos++ = iv1; | ||
184 | *pos++ = iv2; | ||
185 | *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; | 145 | *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; |
186 | *pos++ = key->u.tkip.iv32 & 0xff; | 146 | put_unaligned_le32(key->u.tkip.tx.iv32, pos); |
187 | *pos++ = (key->u.tkip.iv32 >> 8) & 0xff; | 147 | return pos + 4; |
188 | *pos++ = (key->u.tkip.iv32 >> 16) & 0xff; | ||
189 | *pos++ = (key->u.tkip.iv32 >> 24) & 0xff; | ||
190 | return pos; | ||
191 | } | ||
192 | |||
193 | |||
194 | void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta, | ||
195 | u16 *phase1key) | ||
196 | { | ||
197 | tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | ||
198 | key->u.tkip.iv32, phase1key); | ||
199 | } | ||
200 | |||
201 | void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta, | ||
202 | u8 *rc4key) | ||
203 | { | ||
204 | /* Calculate per-packet key */ | ||
205 | if (key->u.tkip.iv16 == 0 || !key->u.tkip.tx_initialized) { | ||
206 | /* IV16 wrapped around - perform TKIP phase 1 */ | ||
207 | tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | ||
208 | key->u.tkip.iv32, key->u.tkip.p1k); | ||
209 | key->u.tkip.tx_initialized = 1; | ||
210 | } | ||
211 | |||
212 | tkip_mixing_phase2(key->u.tkip.p1k, | ||
213 | &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | ||
214 | key->u.tkip.iv16, rc4key); | ||
215 | } | 148 | } |
216 | 149 | ||
217 | void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, | 150 | void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, |
@@ -220,48 +153,44 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, | |||
220 | { | 153 | { |
221 | struct ieee80211_key *key = (struct ieee80211_key *) | 154 | struct ieee80211_key *key = (struct ieee80211_key *) |
222 | container_of(keyconf, struct ieee80211_key, conf); | 155 | container_of(keyconf, struct ieee80211_key, conf); |
223 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 156 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
224 | u8 *data = (u8 *) hdr; | 157 | u8 *data; |
225 | u16 fc = le16_to_cpu(hdr->frame_control); | 158 | const u8 *tk; |
226 | int hdr_len = ieee80211_get_hdrlen(fc); | 159 | struct tkip_ctx *ctx; |
227 | u8 *ta = hdr->addr2; | ||
228 | u16 iv16; | 160 | u16 iv16; |
229 | u32 iv32; | 161 | u32 iv32; |
230 | 162 | ||
231 | iv16 = data[hdr_len] << 8; | 163 | data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); |
232 | iv16 += data[hdr_len + 2]; | 164 | iv16 = data[2] | (data[0] << 8); |
233 | iv32 = data[hdr_len + 4] | (data[hdr_len + 5] << 8) | | 165 | iv32 = get_unaligned_le32(&data[4]); |
234 | (data[hdr_len + 6] << 16) | (data[hdr_len + 7] << 24); | 166 | |
167 | tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; | ||
168 | ctx = &key->u.tkip.tx; | ||
235 | 169 | ||
236 | #ifdef CONFIG_TKIP_DEBUG | 170 | #ifdef CONFIG_MAC80211_TKIP_DEBUG |
237 | printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", | 171 | printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", |
238 | iv16, iv32); | 172 | iv16, iv32); |
239 | 173 | ||
240 | if (iv32 != key->u.tkip.iv32) { | 174 | if (iv32 != ctx->iv32) { |
241 | printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", | 175 | printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", |
242 | iv32, key->u.tkip.iv32); | 176 | iv32, ctx->iv32); |
243 | printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " | 177 | printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " |
244 | "fragmented packet\n"); | 178 | "fragmented packet\n"); |
245 | } | 179 | } |
246 | #endif /* CONFIG_TKIP_DEBUG */ | 180 | #endif |
247 | 181 | ||
248 | /* Update the p1k only when the iv16 in the packet wraps around, this | 182 | /* Update the p1k only when the iv16 in the packet wraps around, this |
249 | * might occur after the wrap around of iv16 in the key in case of | 183 | * might occur after the wrap around of iv16 in the key in case of |
250 | * fragmented packets. */ | 184 | * fragmented packets. */ |
251 | if (iv16 == 0 || !key->u.tkip.tx_initialized) { | 185 | if (iv16 == 0 || !ctx->initialized) |
252 | /* IV16 wrapped around - perform TKIP phase 1 */ | 186 | tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); |
253 | tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | ||
254 | iv32, key->u.tkip.p1k); | ||
255 | key->u.tkip.tx_initialized = 1; | ||
256 | } | ||
257 | 187 | ||
258 | if (type == IEEE80211_TKIP_P1_KEY) { | 188 | if (type == IEEE80211_TKIP_P1_KEY) { |
259 | memcpy(outkey, key->u.tkip.p1k, sizeof(u16) * 5); | 189 | memcpy(outkey, ctx->p1k, sizeof(u16) * 5); |
260 | return; | 190 | return; |
261 | } | 191 | } |
262 | 192 | ||
263 | tkip_mixing_phase2(key->u.tkip.p1k, | 193 | tkip_mixing_phase2(tk, ctx, iv16, outkey); |
264 | &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, outkey); | ||
265 | } | 194 | } |
266 | EXPORT_SYMBOL(ieee80211_get_tkip_key); | 195 | EXPORT_SYMBOL(ieee80211_get_tkip_key); |
267 | 196 | ||
@@ -275,13 +204,19 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, | |||
275 | u8 *pos, size_t payload_len, u8 *ta) | 204 | u8 *pos, size_t payload_len, u8 *ta) |
276 | { | 205 | { |
277 | u8 rc4key[16]; | 206 | u8 rc4key[16]; |
207 | struct tkip_ctx *ctx = &key->u.tkip.tx; | ||
208 | const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; | ||
209 | |||
210 | /* Calculate per-packet key */ | ||
211 | if (ctx->iv16 == 0 || !ctx->initialized) | ||
212 | tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); | ||
213 | |||
214 | tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); | ||
278 | 215 | ||
279 | ieee80211_tkip_gen_rc4key(key, ta, rc4key); | 216 | pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); |
280 | pos = ieee80211_tkip_add_iv(pos, key, rc4key[0], rc4key[1], rc4key[2]); | ||
281 | ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); | 217 | ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); |
282 | } | 218 | } |
283 | 219 | ||
284 | |||
285 | /* Decrypt packet payload with TKIP using @key. @pos is a pointer to the | 220 | /* Decrypt packet payload with TKIP using @key. @pos is a pointer to the |
286 | * beginning of the buffer containing IEEE 802.11 header payload, i.e., | 221 | * beginning of the buffer containing IEEE 802.11 header payload, i.e., |
287 | * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the | 222 | * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the |
@@ -296,15 +231,16 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
296 | u32 iv16; | 231 | u32 iv16; |
297 | u8 rc4key[16], keyid, *pos = payload; | 232 | u8 rc4key[16], keyid, *pos = payload; |
298 | int res; | 233 | int res; |
234 | const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; | ||
299 | 235 | ||
300 | if (payload_len < 12) | 236 | if (payload_len < 12) |
301 | return -1; | 237 | return -1; |
302 | 238 | ||
303 | iv16 = (pos[0] << 8) | pos[2]; | 239 | iv16 = (pos[0] << 8) | pos[2]; |
304 | keyid = pos[3]; | 240 | keyid = pos[3]; |
305 | iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); | 241 | iv32 = get_unaligned_le32(pos + 4); |
306 | pos += 8; | 242 | pos += 8; |
307 | #ifdef CONFIG_TKIP_DEBUG | 243 | #ifdef CONFIG_MAC80211_TKIP_DEBUG |
308 | { | 244 | { |
309 | int i; | 245 | int i; |
310 | printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len); | 246 | printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len); |
@@ -314,7 +250,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
314 | printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n", | 250 | printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n", |
315 | iv16, iv32); | 251 | iv16, iv32); |
316 | } | 252 | } |
317 | #endif /* CONFIG_TKIP_DEBUG */ | 253 | #endif |
318 | 254 | ||
319 | if (!(keyid & (1 << 5))) | 255 | if (!(keyid & (1 << 5))) |
320 | return TKIP_DECRYPT_NO_EXT_IV; | 256 | return TKIP_DECRYPT_NO_EXT_IV; |
@@ -322,50 +258,48 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
322 | if ((keyid >> 6) != key->conf.keyidx) | 258 | if ((keyid >> 6) != key->conf.keyidx) |
323 | return TKIP_DECRYPT_INVALID_KEYIDX; | 259 | return TKIP_DECRYPT_INVALID_KEYIDX; |
324 | 260 | ||
325 | if (key->u.tkip.rx_initialized[queue] && | 261 | if (key->u.tkip.rx[queue].initialized && |
326 | (iv32 < key->u.tkip.iv32_rx[queue] || | 262 | (iv32 < key->u.tkip.rx[queue].iv32 || |
327 | (iv32 == key->u.tkip.iv32_rx[queue] && | 263 | (iv32 == key->u.tkip.rx[queue].iv32 && |
328 | iv16 <= key->u.tkip.iv16_rx[queue]))) { | 264 | iv16 <= key->u.tkip.rx[queue].iv16))) { |
329 | #ifdef CONFIG_TKIP_DEBUG | 265 | #ifdef CONFIG_MAC80211_TKIP_DEBUG |
330 | DECLARE_MAC_BUF(mac); | 266 | DECLARE_MAC_BUF(mac); |
331 | printk(KERN_DEBUG "TKIP replay detected for RX frame from " | 267 | printk(KERN_DEBUG "TKIP replay detected for RX frame from " |
332 | "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", | 268 | "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", |
333 | print_mac(mac, ta), | 269 | print_mac(mac, ta), |
334 | iv32, iv16, key->u.tkip.iv32_rx[queue], | 270 | iv32, iv16, key->u.tkip.rx[queue].iv32, |
335 | key->u.tkip.iv16_rx[queue]); | 271 | key->u.tkip.rx[queue].iv16); |
336 | #endif /* CONFIG_TKIP_DEBUG */ | 272 | #endif |
337 | return TKIP_DECRYPT_REPLAY; | 273 | return TKIP_DECRYPT_REPLAY; |
338 | } | 274 | } |
339 | 275 | ||
340 | if (only_iv) { | 276 | if (only_iv) { |
341 | res = TKIP_DECRYPT_OK; | 277 | res = TKIP_DECRYPT_OK; |
342 | key->u.tkip.rx_initialized[queue] = 1; | 278 | key->u.tkip.rx[queue].initialized = 1; |
343 | goto done; | 279 | goto done; |
344 | } | 280 | } |
345 | 281 | ||
346 | if (!key->u.tkip.rx_initialized[queue] || | 282 | if (!key->u.tkip.rx[queue].initialized || |
347 | key->u.tkip.iv32_rx[queue] != iv32) { | 283 | key->u.tkip.rx[queue].iv32 != iv32) { |
348 | key->u.tkip.rx_initialized[queue] = 1; | ||
349 | /* IV16 wrapped around - perform TKIP phase 1 */ | 284 | /* IV16 wrapped around - perform TKIP phase 1 */ |
350 | tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | 285 | tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); |
351 | iv32, key->u.tkip.p1k_rx[queue]); | 286 | #ifdef CONFIG_MAC80211_TKIP_DEBUG |
352 | #ifdef CONFIG_TKIP_DEBUG | ||
353 | { | 287 | { |
354 | int i; | 288 | int i; |
289 | u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY; | ||
355 | DECLARE_MAC_BUF(mac); | 290 | DECLARE_MAC_BUF(mac); |
356 | printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s" | 291 | printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s" |
357 | " TK=", print_mac(mac, ta)); | 292 | " TK=", print_mac(mac, ta)); |
358 | for (i = 0; i < 16; i++) | 293 | for (i = 0; i < 16; i++) |
359 | printk("%02x ", | 294 | printk("%02x ", |
360 | key->conf.key[ | 295 | key->conf.key[key_offset + i]); |
361 | ALG_TKIP_TEMP_ENCR_KEY + i]); | ||
362 | printk("\n"); | 296 | printk("\n"); |
363 | printk(KERN_DEBUG "TKIP decrypt: P1K="); | 297 | printk(KERN_DEBUG "TKIP decrypt: P1K="); |
364 | for (i = 0; i < 5; i++) | 298 | for (i = 0; i < 5; i++) |
365 | printk("%04x ", key->u.tkip.p1k_rx[queue][i]); | 299 | printk("%04x ", key->u.tkip.rx[queue].p1k[i]); |
366 | printk("\n"); | 300 | printk("\n"); |
367 | } | 301 | } |
368 | #endif /* CONFIG_TKIP_DEBUG */ | 302 | #endif |
369 | if (key->local->ops->update_tkip_key && | 303 | if (key->local->ops->update_tkip_key && |
370 | key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 304 | key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
371 | u8 bcast[ETH_ALEN] = | 305 | u8 bcast[ETH_ALEN] = |
@@ -377,14 +311,12 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
377 | 311 | ||
378 | key->local->ops->update_tkip_key( | 312 | key->local->ops->update_tkip_key( |
379 | local_to_hw(key->local), &key->conf, | 313 | local_to_hw(key->local), &key->conf, |
380 | sta_addr, iv32, key->u.tkip.p1k_rx[queue]); | 314 | sta_addr, iv32, key->u.tkip.rx[queue].p1k); |
381 | } | 315 | } |
382 | } | 316 | } |
383 | 317 | ||
384 | tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], | 318 | tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); |
385 | &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], | 319 | #ifdef CONFIG_MAC80211_TKIP_DEBUG |
386 | iv16, rc4key); | ||
387 | #ifdef CONFIG_TKIP_DEBUG | ||
388 | { | 320 | { |
389 | int i; | 321 | int i; |
390 | printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key="); | 322 | printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key="); |
@@ -392,7 +324,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
392 | printk("%02x ", rc4key[i]); | 324 | printk("%02x ", rc4key[i]); |
393 | printk("\n"); | 325 | printk("\n"); |
394 | } | 326 | } |
395 | #endif /* CONFIG_TKIP_DEBUG */ | 327 | #endif |
396 | 328 | ||
397 | res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); | 329 | res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); |
398 | done: | 330 | done: |
@@ -409,5 +341,3 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
409 | 341 | ||
410 | return res; | 342 | return res; |
411 | } | 343 | } |
412 | |||
413 | |||
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h index b7c2ee763d9d..d4714383f5fc 100644 --- a/net/mac80211/tkip.h +++ b/net/mac80211/tkip.h | |||
@@ -13,12 +13,8 @@ | |||
13 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
14 | #include "key.h" | 14 | #include "key.h" |
15 | 15 | ||
16 | u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, | 16 | u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); |
17 | u8 iv0, u8 iv1, u8 iv2); | 17 | |
18 | void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta, | ||
19 | u16 *phase1key); | ||
20 | void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta, | ||
21 | u8 *rc4key); | ||
22 | void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, | 18 | void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, |
23 | struct ieee80211_key *key, | 19 | struct ieee80211_key *key, |
24 | u8 *pos, size_t payload_len, u8 *ta); | 20 | u8 *pos, size_t payload_len, u8 *ta); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c80d5899f279..0fbadd8b983c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -38,23 +38,12 @@ | |||
38 | 38 | ||
39 | /* misc utils */ | 39 | /* misc utils */ |
40 | 40 | ||
41 | static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata, | ||
42 | struct ieee80211_hdr *hdr) | ||
43 | { | ||
44 | /* Set the sequence number for this frame. */ | ||
45 | hdr->seq_ctrl = cpu_to_le16(sdata->sequence); | ||
46 | |||
47 | /* Increase the sequence number. */ | ||
48 | sdata->sequence = (sdata->sequence + 0x10) & IEEE80211_SCTL_SEQ; | ||
49 | } | ||
50 | |||
51 | #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP | 41 | #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP |
52 | static void ieee80211_dump_frame(const char *ifname, const char *title, | 42 | static void ieee80211_dump_frame(const char *ifname, const char *title, |
53 | const struct sk_buff *skb) | 43 | const struct sk_buff *skb) |
54 | { | 44 | { |
55 | const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 45 | const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
56 | u16 fc; | 46 | unsigned int hdrlen; |
57 | int hdrlen; | ||
58 | DECLARE_MAC_BUF(mac); | 47 | DECLARE_MAC_BUF(mac); |
59 | 48 | ||
60 | printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); | 49 | printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); |
@@ -63,13 +52,12 @@ static void ieee80211_dump_frame(const char *ifname, const char *title, | |||
63 | return; | 52 | return; |
64 | } | 53 | } |
65 | 54 | ||
66 | fc = le16_to_cpu(hdr->frame_control); | 55 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
67 | hdrlen = ieee80211_get_hdrlen(fc); | ||
68 | if (hdrlen > skb->len) | 56 | if (hdrlen > skb->len) |
69 | hdrlen = skb->len; | 57 | hdrlen = skb->len; |
70 | if (hdrlen >= 4) | 58 | if (hdrlen >= 4) |
71 | printk(" FC=0x%04x DUR=0x%04x", | 59 | printk(" FC=0x%04x DUR=0x%04x", |
72 | fc, le16_to_cpu(hdr->duration_id)); | 60 | le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id)); |
73 | if (hdrlen >= 10) | 61 | if (hdrlen >= 10) |
74 | printk(" A1=%s", print_mac(mac, hdr->addr1)); | 62 | printk(" A1=%s", print_mac(mac, hdr->addr1)); |
75 | if (hdrlen >= 16) | 63 | if (hdrlen >= 16) |
@@ -87,15 +75,16 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title, | |||
87 | } | 75 | } |
88 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ | 76 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ |
89 | 77 | ||
90 | static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | 78 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, |
91 | int next_frag_len) | 79 | int next_frag_len) |
92 | { | 80 | { |
93 | int rate, mrate, erp, dur, i; | 81 | int rate, mrate, erp, dur, i; |
94 | struct ieee80211_rate *txrate = tx->rate; | 82 | struct ieee80211_rate *txrate; |
95 | struct ieee80211_local *local = tx->local; | 83 | struct ieee80211_local *local = tx->local; |
96 | struct ieee80211_supported_band *sband; | 84 | struct ieee80211_supported_band *sband; |
97 | 85 | ||
98 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 86 | sband = local->hw.wiphy->bands[tx->channel->band]; |
87 | txrate = &sband->bitrates[tx->rate_idx]; | ||
99 | 88 | ||
100 | erp = 0; | 89 | erp = 0; |
101 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 90 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) |
@@ -139,7 +128,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
139 | 128 | ||
140 | /* data/mgmt */ | 129 | /* data/mgmt */ |
141 | if (0 /* FIX: data/mgmt during CFP */) | 130 | if (0 /* FIX: data/mgmt during CFP */) |
142 | return 32768; | 131 | return cpu_to_le16(32768); |
143 | 132 | ||
144 | if (group_addr) /* Group address as the destination - no ACK */ | 133 | if (group_addr) /* Group address as the destination - no ACK */ |
145 | return 0; | 134 | return 0; |
@@ -209,19 +198,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
209 | tx->sdata->bss_conf.use_short_preamble); | 198 | tx->sdata->bss_conf.use_short_preamble); |
210 | } | 199 | } |
211 | 200 | ||
212 | return dur; | 201 | return cpu_to_le16(dur); |
213 | } | ||
214 | |||
215 | static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local, | ||
216 | int queue) | ||
217 | { | ||
218 | return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); | ||
219 | } | ||
220 | |||
221 | static inline int __ieee80211_queue_pending(const struct ieee80211_local *local, | ||
222 | int queue) | ||
223 | { | ||
224 | return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]); | ||
225 | } | 202 | } |
226 | 203 | ||
227 | static int inline is_ieee80211_device(struct net_device *dev, | 204 | static int inline is_ieee80211_device(struct net_device *dev, |
@@ -233,16 +210,16 @@ static int inline is_ieee80211_device(struct net_device *dev, | |||
233 | 210 | ||
234 | /* tx handlers */ | 211 | /* tx handlers */ |
235 | 212 | ||
236 | static ieee80211_tx_result | 213 | static ieee80211_tx_result debug_noinline |
237 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | 214 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
238 | { | 215 | { |
239 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 216 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
240 | struct sk_buff *skb = tx->skb; | 217 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
241 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
242 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 218 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
219 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
243 | u32 sta_flags; | 220 | u32 sta_flags; |
244 | 221 | ||
245 | if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) | 222 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) |
246 | return TX_CONTINUE; | 223 | return TX_CONTINUE; |
247 | 224 | ||
248 | if (unlikely(tx->local->sta_sw_scanning) && | 225 | if (unlikely(tx->local->sta_sw_scanning) && |
@@ -256,7 +233,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
256 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) | 233 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) |
257 | return TX_CONTINUE; | 234 | return TX_CONTINUE; |
258 | 235 | ||
259 | sta_flags = tx->sta ? tx->sta->flags : 0; | 236 | sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; |
260 | 237 | ||
261 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { | 238 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { |
262 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && | 239 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && |
@@ -287,17 +264,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
287 | return TX_CONTINUE; | 264 | return TX_CONTINUE; |
288 | } | 265 | } |
289 | 266 | ||
290 | static ieee80211_tx_result | ||
291 | ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | ||
292 | { | ||
293 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
294 | |||
295 | if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) | ||
296 | ieee80211_include_sequence(tx->sdata, hdr); | ||
297 | |||
298 | return TX_CONTINUE; | ||
299 | } | ||
300 | |||
301 | /* This function is called whenever the AP is about to exceed the maximum limit | 267 | /* This function is called whenever the AP is about to exceed the maximum limit |
302 | * of buffered frames for power saving STAs. This situation should not really | 268 | * of buffered frames for power saving STAs. This situation should not really |
303 | * happen often during normal operation, so dropping the oldest buffered packet | 269 | * happen often during normal operation, so dropping the oldest buffered packet |
@@ -316,8 +282,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
316 | 282 | ||
317 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 283 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
318 | struct ieee80211_if_ap *ap; | 284 | struct ieee80211_if_ap *ap; |
319 | if (sdata->dev == local->mdev || | 285 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) |
320 | sdata->vif.type != IEEE80211_IF_TYPE_AP) | ||
321 | continue; | 286 | continue; |
322 | ap = &sdata->u.ap; | 287 | ap = &sdata->u.ap; |
323 | skb = skb_dequeue(&ap->ps_bc_buf); | 288 | skb = skb_dequeue(&ap->ps_bc_buf); |
@@ -340,13 +305,17 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
340 | rcu_read_unlock(); | 305 | rcu_read_unlock(); |
341 | 306 | ||
342 | local->total_ps_buffered = total; | 307 | local->total_ps_buffered = total; |
308 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
343 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", | 309 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", |
344 | wiphy_name(local->hw.wiphy), purged); | 310 | wiphy_name(local->hw.wiphy), purged); |
311 | #endif | ||
345 | } | 312 | } |
346 | 313 | ||
347 | static ieee80211_tx_result | 314 | static ieee80211_tx_result |
348 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | 315 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) |
349 | { | 316 | { |
317 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
318 | |||
350 | /* | 319 | /* |
351 | * broadcast/multicast frame | 320 | * broadcast/multicast frame |
352 | * | 321 | * |
@@ -355,8 +324,12 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
355 | * This is done either by the hardware or us. | 324 | * This is done either by the hardware or us. |
356 | */ | 325 | */ |
357 | 326 | ||
358 | /* not AP/IBSS or ordered frame */ | 327 | /* powersaving STAs only in AP/VLAN mode */ |
359 | if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) | 328 | if (!tx->sdata->bss) |
329 | return TX_CONTINUE; | ||
330 | |||
331 | /* no buffering for ordered frames */ | ||
332 | if (tx->fc & IEEE80211_FCTL_ORDER) | ||
360 | return TX_CONTINUE; | 333 | return TX_CONTINUE; |
361 | 334 | ||
362 | /* no stations in PS mode */ | 335 | /* no stations in PS mode */ |
@@ -369,11 +342,13 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
369 | purge_old_ps_buffers(tx->local); | 342 | purge_old_ps_buffers(tx->local); |
370 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= | 343 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= |
371 | AP_MAX_BC_BUFFER) { | 344 | AP_MAX_BC_BUFFER) { |
345 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
372 | if (net_ratelimit()) { | 346 | if (net_ratelimit()) { |
373 | printk(KERN_DEBUG "%s: BC TX buffer full - " | 347 | printk(KERN_DEBUG "%s: BC TX buffer full - " |
374 | "dropping the oldest frame\n", | 348 | "dropping the oldest frame\n", |
375 | tx->dev->name); | 349 | tx->dev->name); |
376 | } | 350 | } |
351 | #endif | ||
377 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); | 352 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); |
378 | } else | 353 | } else |
379 | tx->local->total_ps_buffered++; | 354 | tx->local->total_ps_buffered++; |
@@ -382,7 +357,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
382 | } | 357 | } |
383 | 358 | ||
384 | /* buffered in hardware */ | 359 | /* buffered in hardware */ |
385 | tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; | 360 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
386 | 361 | ||
387 | return TX_CONTINUE; | 362 | return TX_CONTINUE; |
388 | } | 363 | } |
@@ -391,6 +366,8 @@ static ieee80211_tx_result | |||
391 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | 366 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) |
392 | { | 367 | { |
393 | struct sta_info *sta = tx->sta; | 368 | struct sta_info *sta = tx->sta; |
369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
370 | u32 staflags; | ||
394 | DECLARE_MAC_BUF(mac); | 371 | DECLARE_MAC_BUF(mac); |
395 | 372 | ||
396 | if (unlikely(!sta || | 373 | if (unlikely(!sta || |
@@ -398,9 +375,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
398 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) | 375 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) |
399 | return TX_CONTINUE; | 376 | return TX_CONTINUE; |
400 | 377 | ||
401 | if (unlikely((sta->flags & WLAN_STA_PS) && | 378 | staflags = get_sta_flags(sta); |
402 | !(sta->flags & WLAN_STA_PSPOLL))) { | 379 | |
403 | struct ieee80211_tx_packet_data *pkt_data; | 380 | if (unlikely((staflags & WLAN_STA_PS) && |
381 | !(staflags & WLAN_STA_PSPOLL))) { | ||
404 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 382 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
405 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " | 383 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " |
406 | "before %d)\n", | 384 | "before %d)\n", |
@@ -411,11 +389,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
411 | purge_old_ps_buffers(tx->local); | 389 | purge_old_ps_buffers(tx->local); |
412 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { | 390 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { |
413 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); | 391 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); |
392 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
414 | if (net_ratelimit()) { | 393 | if (net_ratelimit()) { |
415 | printk(KERN_DEBUG "%s: STA %s TX " | 394 | printk(KERN_DEBUG "%s: STA %s TX " |
416 | "buffer full - dropping oldest frame\n", | 395 | "buffer full - dropping oldest frame\n", |
417 | tx->dev->name, print_mac(mac, sta->addr)); | 396 | tx->dev->name, print_mac(mac, sta->addr)); |
418 | } | 397 | } |
398 | #endif | ||
419 | dev_kfree_skb(old); | 399 | dev_kfree_skb(old); |
420 | } else | 400 | } else |
421 | tx->local->total_ps_buffered++; | 401 | tx->local->total_ps_buffered++; |
@@ -424,24 +404,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
424 | if (skb_queue_empty(&sta->ps_tx_buf)) | 404 | if (skb_queue_empty(&sta->ps_tx_buf)) |
425 | sta_info_set_tim_bit(sta); | 405 | sta_info_set_tim_bit(sta); |
426 | 406 | ||
427 | pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; | 407 | info->control.jiffies = jiffies; |
428 | pkt_data->jiffies = jiffies; | ||
429 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 408 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
430 | return TX_QUEUED; | 409 | return TX_QUEUED; |
431 | } | 410 | } |
432 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 411 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
433 | else if (unlikely(sta->flags & WLAN_STA_PS)) { | 412 | else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { |
434 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " | 413 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " |
435 | "set -> send frame\n", tx->dev->name, | 414 | "set -> send frame\n", tx->dev->name, |
436 | print_mac(mac, sta->addr)); | 415 | print_mac(mac, sta->addr)); |
437 | } | 416 | } |
438 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 417 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
439 | sta->flags &= ~WLAN_STA_PSPOLL; | 418 | clear_sta_flags(sta, WLAN_STA_PSPOLL); |
440 | 419 | ||
441 | return TX_CONTINUE; | 420 | return TX_CONTINUE; |
442 | } | 421 | } |
443 | 422 | ||
444 | static ieee80211_tx_result | 423 | static ieee80211_tx_result debug_noinline |
445 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | 424 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
446 | { | 425 | { |
447 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) | 426 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
@@ -453,21 +432,22 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | |||
453 | return ieee80211_tx_h_multicast_ps_buf(tx); | 432 | return ieee80211_tx_h_multicast_ps_buf(tx); |
454 | } | 433 | } |
455 | 434 | ||
456 | static ieee80211_tx_result | 435 | static ieee80211_tx_result debug_noinline |
457 | ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | 436 | ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) |
458 | { | 437 | { |
459 | struct ieee80211_key *key; | 438 | struct ieee80211_key *key; |
439 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
460 | u16 fc = tx->fc; | 440 | u16 fc = tx->fc; |
461 | 441 | ||
462 | if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | 442 | if (unlikely(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) |
463 | tx->key = NULL; | 443 | tx->key = NULL; |
464 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) | 444 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) |
465 | tx->key = key; | 445 | tx->key = key; |
466 | else if ((key = rcu_dereference(tx->sdata->default_key))) | 446 | else if ((key = rcu_dereference(tx->sdata->default_key))) |
467 | tx->key = key; | 447 | tx->key = key; |
468 | else if (tx->sdata->drop_unencrypted && | 448 | else if (tx->sdata->drop_unencrypted && |
469 | !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && | 449 | !(info->flags & IEEE80211_TX_CTL_EAPOL_FRAME) && |
470 | !(tx->flags & IEEE80211_TX_INJECTED)) { | 450 | !(info->flags & IEEE80211_TX_CTL_INJECTED)) { |
471 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); | 451 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); |
472 | return TX_DROP; | 452 | return TX_DROP; |
473 | } else | 453 | } else |
@@ -496,15 +476,197 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
496 | } | 476 | } |
497 | 477 | ||
498 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 478 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
499 | tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 479 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
500 | 480 | ||
501 | return TX_CONTINUE; | 481 | return TX_CONTINUE; |
502 | } | 482 | } |
503 | 483 | ||
504 | static ieee80211_tx_result | 484 | static ieee80211_tx_result debug_noinline |
485 | ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | ||
486 | { | ||
487 | struct rate_selection rsel; | ||
488 | struct ieee80211_supported_band *sband; | ||
489 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
490 | |||
491 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | ||
492 | |||
493 | if (likely(tx->rate_idx < 0)) { | ||
494 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); | ||
495 | tx->rate_idx = rsel.rate_idx; | ||
496 | if (unlikely(rsel.probe_idx >= 0)) { | ||
497 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
498 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
499 | info->control.alt_retry_rate_idx = tx->rate_idx; | ||
500 | tx->rate_idx = rsel.probe_idx; | ||
501 | } else | ||
502 | info->control.alt_retry_rate_idx = -1; | ||
503 | |||
504 | if (unlikely(tx->rate_idx < 0)) | ||
505 | return TX_DROP; | ||
506 | } else | ||
507 | info->control.alt_retry_rate_idx = -1; | ||
508 | |||
509 | if (tx->sdata->bss_conf.use_cts_prot && | ||
510 | (tx->flags & IEEE80211_TX_FRAGMENTED) && (rsel.nonerp_idx >= 0)) { | ||
511 | tx->last_frag_rate_idx = tx->rate_idx; | ||
512 | if (rsel.probe_idx >= 0) | ||
513 | tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG; | ||
514 | else | ||
515 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
516 | tx->rate_idx = rsel.nonerp_idx; | ||
517 | info->tx_rate_idx = rsel.nonerp_idx; | ||
518 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
519 | } else { | ||
520 | tx->last_frag_rate_idx = tx->rate_idx; | ||
521 | info->tx_rate_idx = tx->rate_idx; | ||
522 | } | ||
523 | info->tx_rate_idx = tx->rate_idx; | ||
524 | |||
525 | return TX_CONTINUE; | ||
526 | } | ||
527 | |||
528 | static ieee80211_tx_result debug_noinline | ||
529 | ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | ||
530 | { | ||
531 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
532 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
533 | struct ieee80211_supported_band *sband; | ||
534 | |||
535 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | ||
536 | |||
537 | if (tx->sta) | ||
538 | info->control.aid = tx->sta->aid; | ||
539 | |||
540 | if (!info->control.retry_limit) { | ||
541 | if (!is_multicast_ether_addr(hdr->addr1)) { | ||
542 | int len = min_t(int, tx->skb->len + FCS_LEN, | ||
543 | tx->local->fragmentation_threshold); | ||
544 | if (len > tx->local->rts_threshold | ||
545 | && tx->local->rts_threshold < | ||
546 | IEEE80211_MAX_RTS_THRESHOLD) { | ||
547 | info->flags |= IEEE80211_TX_CTL_USE_RTS_CTS; | ||
548 | info->flags |= | ||
549 | IEEE80211_TX_CTL_LONG_RETRY_LIMIT; | ||
550 | info->control.retry_limit = | ||
551 | tx->local->long_retry_limit; | ||
552 | } else { | ||
553 | info->control.retry_limit = | ||
554 | tx->local->short_retry_limit; | ||
555 | } | ||
556 | } else { | ||
557 | info->control.retry_limit = 1; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | ||
562 | /* Do not use multiple retry rates when sending fragmented | ||
563 | * frames. | ||
564 | * TODO: The last fragment could still use multiple retry | ||
565 | * rates. */ | ||
566 | info->control.alt_retry_rate_idx = -1; | ||
567 | } | ||
568 | |||
569 | /* Use CTS protection for unicast frames sent using extended rates if | ||
570 | * there are associated non-ERP stations and RTS/CTS is not configured | ||
571 | * for the frame. */ | ||
572 | if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) && | ||
573 | (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_ERP_G) && | ||
574 | (tx->flags & IEEE80211_TX_UNICAST) && | ||
575 | tx->sdata->bss_conf.use_cts_prot && | ||
576 | !(info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)) | ||
577 | info->flags |= IEEE80211_TX_CTL_USE_CTS_PROTECT; | ||
578 | |||
579 | /* Transmit data frames using short preambles if the driver supports | ||
580 | * short preambles at the selected rate and short preambles are | ||
581 | * available on the network at the current point in time. */ | ||
582 | if (ieee80211_is_data(hdr->frame_control) && | ||
583 | (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) && | ||
584 | tx->sdata->bss_conf.use_short_preamble && | ||
585 | (!tx->sta || test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))) { | ||
586 | info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE; | ||
587 | } | ||
588 | |||
589 | if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) || | ||
590 | (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) { | ||
591 | struct ieee80211_rate *rate; | ||
592 | s8 baserate = -1; | ||
593 | int idx; | ||
594 | |||
595 | /* Do not use multiple retry rates when using RTS/CTS */ | ||
596 | info->control.alt_retry_rate_idx = -1; | ||
597 | |||
598 | /* Use min(data rate, max base rate) as CTS/RTS rate */ | ||
599 | rate = &sband->bitrates[tx->rate_idx]; | ||
600 | |||
601 | for (idx = 0; idx < sband->n_bitrates; idx++) { | ||
602 | if (sband->bitrates[idx].bitrate > rate->bitrate) | ||
603 | continue; | ||
604 | if (tx->sdata->basic_rates & BIT(idx) && | ||
605 | (baserate < 0 || | ||
606 | (sband->bitrates[baserate].bitrate | ||
607 | < sband->bitrates[idx].bitrate))) | ||
608 | baserate = idx; | ||
609 | } | ||
610 | |||
611 | if (baserate >= 0) | ||
612 | info->control.rts_cts_rate_idx = baserate; | ||
613 | else | ||
614 | info->control.rts_cts_rate_idx = 0; | ||
615 | } | ||
616 | |||
617 | if (tx->sta) | ||
618 | info->control.aid = tx->sta->aid; | ||
619 | |||
620 | return TX_CONTINUE; | ||
621 | } | ||
622 | |||
623 | static ieee80211_tx_result debug_noinline | ||
624 | ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | ||
625 | { | ||
626 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
627 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
628 | u16 *seq; | ||
629 | u8 *qc; | ||
630 | int tid; | ||
631 | |||
632 | /* only for injected frames */ | ||
633 | if (unlikely(ieee80211_is_ctl(hdr->frame_control))) | ||
634 | return TX_CONTINUE; | ||
635 | |||
636 | if (ieee80211_hdrlen(hdr->frame_control) < 24) | ||
637 | return TX_CONTINUE; | ||
638 | |||
639 | if (!ieee80211_is_data_qos(hdr->frame_control)) { | ||
640 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; | ||
641 | return TX_CONTINUE; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * This should be true for injected/management frames only, for | ||
646 | * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ | ||
647 | * above since they are not QoS-data frames. | ||
648 | */ | ||
649 | if (!tx->sta) | ||
650 | return TX_CONTINUE; | ||
651 | |||
652 | /* include per-STA, per-TID sequence counter */ | ||
653 | |||
654 | qc = ieee80211_get_qos_ctl(hdr); | ||
655 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | ||
656 | seq = &tx->sta->tid_seq[tid]; | ||
657 | |||
658 | hdr->seq_ctrl = cpu_to_le16(*seq); | ||
659 | |||
660 | /* Increase the sequence number. */ | ||
661 | *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; | ||
662 | |||
663 | return TX_CONTINUE; | ||
664 | } | ||
665 | |||
666 | static ieee80211_tx_result debug_noinline | ||
505 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | 667 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) |
506 | { | 668 | { |
507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | 669 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
508 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; | 670 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; |
509 | struct sk_buff **frags, *first, *frag; | 671 | struct sk_buff **frags, *first, *frag; |
510 | int i; | 672 | int i; |
@@ -515,9 +677,19 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
515 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) | 677 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) |
516 | return TX_CONTINUE; | 678 | return TX_CONTINUE; |
517 | 679 | ||
680 | /* | ||
681 | * Warn when submitting a fragmented A-MPDU frame and drop it. | ||
682 | * This scenario is handled in __ieee80211_tx_prepare but extra | ||
683 | * caution taken here as fragmented ampdu may cause Tx stop. | ||
684 | */ | ||
685 | if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU || | ||
686 | skb_get_queue_mapping(tx->skb) >= | ||
687 | ieee80211_num_regular_queues(&tx->local->hw))) | ||
688 | return TX_DROP; | ||
689 | |||
518 | first = tx->skb; | 690 | first = tx->skb; |
519 | 691 | ||
520 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 692 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
521 | payload_len = first->len - hdrlen; | 693 | payload_len = first->len - hdrlen; |
522 | per_fragm = frag_threshold - hdrlen - FCS_LEN; | 694 | per_fragm = frag_threshold - hdrlen - FCS_LEN; |
523 | num_fragm = DIV_ROUND_UP(payload_len, per_fragm); | 695 | num_fragm = DIV_ROUND_UP(payload_len, per_fragm); |
@@ -558,6 +730,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
558 | fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); | 730 | fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); |
559 | copylen = left > per_fragm ? per_fragm : left; | 731 | copylen = left > per_fragm ? per_fragm : left; |
560 | memcpy(skb_put(frag, copylen), pos, copylen); | 732 | memcpy(skb_put(frag, copylen), pos, copylen); |
733 | memcpy(frag->cb, first->cb, sizeof(frag->cb)); | ||
734 | skb_copy_queue_mapping(frag, first); | ||
561 | 735 | ||
562 | pos += copylen; | 736 | pos += copylen; |
563 | left -= copylen; | 737 | left -= copylen; |
@@ -570,7 +744,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
570 | return TX_CONTINUE; | 744 | return TX_CONTINUE; |
571 | 745 | ||
572 | fail: | 746 | fail: |
573 | printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); | ||
574 | if (frags) { | 747 | if (frags) { |
575 | for (i = 0; i < num_fragm - 1; i++) | 748 | for (i = 0; i < num_fragm - 1; i++) |
576 | if (frags[i]) | 749 | if (frags[i]) |
@@ -581,7 +754,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
581 | return TX_DROP; | 754 | return TX_DROP; |
582 | } | 755 | } |
583 | 756 | ||
584 | static ieee80211_tx_result | 757 | static ieee80211_tx_result debug_noinline |
585 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | 758 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) |
586 | { | 759 | { |
587 | if (!tx->key) | 760 | if (!tx->key) |
@@ -601,236 +774,57 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | |||
601 | return TX_DROP; | 774 | return TX_DROP; |
602 | } | 775 | } |
603 | 776 | ||
604 | static ieee80211_tx_result | 777 | static ieee80211_tx_result debug_noinline |
605 | ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | 778 | ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) |
606 | { | 779 | { |
607 | struct rate_selection rsel; | 780 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
608 | struct ieee80211_supported_band *sband; | 781 | int next_len, i; |
609 | 782 | int group_addr = is_multicast_ether_addr(hdr->addr1); | |
610 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | ||
611 | |||
612 | if (likely(!tx->rate)) { | ||
613 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); | ||
614 | tx->rate = rsel.rate; | ||
615 | if (unlikely(rsel.probe)) { | ||
616 | tx->control->flags |= | ||
617 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | ||
618 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
619 | tx->control->alt_retry_rate = tx->rate; | ||
620 | tx->rate = rsel.probe; | ||
621 | } else | ||
622 | tx->control->alt_retry_rate = NULL; | ||
623 | |||
624 | if (!tx->rate) | ||
625 | return TX_DROP; | ||
626 | } else | ||
627 | tx->control->alt_retry_rate = NULL; | ||
628 | 783 | ||
629 | if (tx->sdata->bss_conf.use_cts_prot && | 784 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) { |
630 | (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) { | 785 | hdr->duration_id = ieee80211_duration(tx, group_addr, 0); |
631 | tx->last_frag_rate = tx->rate; | 786 | return TX_CONTINUE; |
632 | if (rsel.probe) | ||
633 | tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG; | ||
634 | else | ||
635 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
636 | tx->rate = rsel.nonerp; | ||
637 | tx->control->tx_rate = rsel.nonerp; | ||
638 | tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; | ||
639 | } else { | ||
640 | tx->last_frag_rate = tx->rate; | ||
641 | tx->control->tx_rate = tx->rate; | ||
642 | } | 787 | } |
643 | tx->control->tx_rate = tx->rate; | ||
644 | |||
645 | return TX_CONTINUE; | ||
646 | } | ||
647 | 788 | ||
648 | static ieee80211_tx_result | 789 | hdr->duration_id = ieee80211_duration(tx, group_addr, |
649 | ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | 790 | tx->extra_frag[0]->len); |
650 | { | ||
651 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | ||
652 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
653 | u16 dur; | ||
654 | struct ieee80211_tx_control *control = tx->control; | ||
655 | 791 | ||
656 | if (!control->retry_limit) { | 792 | for (i = 0; i < tx->num_extra_frag; i++) { |
657 | if (!is_multicast_ether_addr(hdr->addr1)) { | 793 | if (i + 1 < tx->num_extra_frag) { |
658 | if (tx->skb->len + FCS_LEN > tx->local->rts_threshold | 794 | next_len = tx->extra_frag[i + 1]->len; |
659 | && tx->local->rts_threshold < | ||
660 | IEEE80211_MAX_RTS_THRESHOLD) { | ||
661 | control->flags |= | ||
662 | IEEE80211_TXCTL_USE_RTS_CTS; | ||
663 | control->flags |= | ||
664 | IEEE80211_TXCTL_LONG_RETRY_LIMIT; | ||
665 | control->retry_limit = | ||
666 | tx->local->long_retry_limit; | ||
667 | } else { | ||
668 | control->retry_limit = | ||
669 | tx->local->short_retry_limit; | ||
670 | } | ||
671 | } else { | 795 | } else { |
672 | control->retry_limit = 1; | 796 | next_len = 0; |
673 | } | 797 | tx->rate_idx = tx->last_frag_rate_idx; |
674 | } | ||
675 | |||
676 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | ||
677 | /* Do not use multiple retry rates when sending fragmented | ||
678 | * frames. | ||
679 | * TODO: The last fragment could still use multiple retry | ||
680 | * rates. */ | ||
681 | control->alt_retry_rate = NULL; | ||
682 | } | ||
683 | |||
684 | /* Use CTS protection for unicast frames sent using extended rates if | ||
685 | * there are associated non-ERP stations and RTS/CTS is not configured | ||
686 | * for the frame. */ | ||
687 | if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) && | ||
688 | (tx->rate->flags & IEEE80211_RATE_ERP_G) && | ||
689 | (tx->flags & IEEE80211_TX_UNICAST) && | ||
690 | tx->sdata->bss_conf.use_cts_prot && | ||
691 | !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) | ||
692 | control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; | ||
693 | |||
694 | /* Transmit data frames using short preambles if the driver supports | ||
695 | * short preambles at the selected rate and short preambles are | ||
696 | * available on the network at the current point in time. */ | ||
697 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | ||
698 | (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && | ||
699 | tx->sdata->bss_conf.use_short_preamble && | ||
700 | (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { | ||
701 | tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | ||
702 | } | ||
703 | |||
704 | /* Setup duration field for the first fragment of the frame. Duration | ||
705 | * for remaining fragments will be updated when they are being sent | ||
706 | * to low-level driver in ieee80211_tx(). */ | ||
707 | dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), | ||
708 | (tx->flags & IEEE80211_TX_FRAGMENTED) ? | ||
709 | tx->extra_frag[0]->len : 0); | ||
710 | hdr->duration_id = cpu_to_le16(dur); | ||
711 | |||
712 | if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || | ||
713 | (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { | ||
714 | struct ieee80211_supported_band *sband; | ||
715 | struct ieee80211_rate *rate, *baserate; | ||
716 | int idx; | ||
717 | |||
718 | sband = tx->local->hw.wiphy->bands[ | ||
719 | tx->local->hw.conf.channel->band]; | ||
720 | |||
721 | /* Do not use multiple retry rates when using RTS/CTS */ | ||
722 | control->alt_retry_rate = NULL; | ||
723 | |||
724 | /* Use min(data rate, max base rate) as CTS/RTS rate */ | ||
725 | rate = tx->rate; | ||
726 | baserate = NULL; | ||
727 | |||
728 | for (idx = 0; idx < sband->n_bitrates; idx++) { | ||
729 | if (sband->bitrates[idx].bitrate > rate->bitrate) | ||
730 | continue; | ||
731 | if (tx->sdata->basic_rates & BIT(idx) && | ||
732 | (!baserate || | ||
733 | (baserate->bitrate < sband->bitrates[idx].bitrate))) | ||
734 | baserate = &sband->bitrates[idx]; | ||
735 | } | 798 | } |
736 | 799 | ||
737 | if (baserate) | 800 | hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data; |
738 | control->rts_cts_rate = baserate; | 801 | hdr->duration_id = ieee80211_duration(tx, 0, next_len); |
739 | else | ||
740 | control->rts_cts_rate = &sband->bitrates[0]; | ||
741 | } | ||
742 | |||
743 | if (tx->sta) { | ||
744 | control->aid = tx->sta->aid; | ||
745 | tx->sta->tx_packets++; | ||
746 | tx->sta->tx_fragments++; | ||
747 | tx->sta->tx_bytes += tx->skb->len; | ||
748 | if (tx->extra_frag) { | ||
749 | int i; | ||
750 | tx->sta->tx_fragments += tx->num_extra_frag; | ||
751 | for (i = 0; i < tx->num_extra_frag; i++) { | ||
752 | tx->sta->tx_bytes += | ||
753 | tx->extra_frag[i]->len; | ||
754 | } | ||
755 | } | ||
756 | } | 802 | } |
757 | 803 | ||
758 | return TX_CONTINUE; | 804 | return TX_CONTINUE; |
759 | } | 805 | } |
760 | 806 | ||
761 | static ieee80211_tx_result | 807 | static ieee80211_tx_result debug_noinline |
762 | ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) | 808 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) |
763 | { | 809 | { |
764 | struct ieee80211_local *local = tx->local; | 810 | int i; |
765 | struct sk_buff *skb = tx->skb; | ||
766 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
767 | u32 load = 0, hdrtime; | ||
768 | struct ieee80211_rate *rate = tx->rate; | ||
769 | |||
770 | /* TODO: this could be part of tx_status handling, so that the number | ||
771 | * of retries would be known; TX rate should in that case be stored | ||
772 | * somewhere with the packet */ | ||
773 | |||
774 | /* Estimate total channel use caused by this frame */ | ||
775 | |||
776 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, | ||
777 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | ||
778 | |||
779 | if (tx->channel->band == IEEE80211_BAND_5GHZ || | ||
780 | (tx->channel->band == IEEE80211_BAND_2GHZ && | ||
781 | rate->flags & IEEE80211_RATE_ERP_G)) | ||
782 | hdrtime = CHAN_UTIL_HDR_SHORT; | ||
783 | else | ||
784 | hdrtime = CHAN_UTIL_HDR_LONG; | ||
785 | |||
786 | load = hdrtime; | ||
787 | if (!is_multicast_ether_addr(hdr->addr1)) | ||
788 | load += hdrtime; | ||
789 | |||
790 | if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS) | ||
791 | load += 2 * hdrtime; | ||
792 | else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) | ||
793 | load += hdrtime; | ||
794 | 811 | ||
795 | /* TODO: optimise again */ | 812 | if (!tx->sta) |
796 | load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; | 813 | return TX_CONTINUE; |
797 | 814 | ||
815 | tx->sta->tx_packets++; | ||
816 | tx->sta->tx_fragments++; | ||
817 | tx->sta->tx_bytes += tx->skb->len; | ||
798 | if (tx->extra_frag) { | 818 | if (tx->extra_frag) { |
799 | int i; | 819 | tx->sta->tx_fragments += tx->num_extra_frag; |
800 | for (i = 0; i < tx->num_extra_frag; i++) { | 820 | for (i = 0; i < tx->num_extra_frag; i++) |
801 | load += 2 * hdrtime; | 821 | tx->sta->tx_bytes += tx->extra_frag[i]->len; |
802 | load += tx->extra_frag[i]->len * | ||
803 | tx->rate->bitrate; | ||
804 | } | ||
805 | } | 822 | } |
806 | 823 | ||
807 | /* Divide channel_use by 8 to avoid wrapping around the counter */ | ||
808 | load >>= CHAN_UTIL_SHIFT; | ||
809 | local->channel_use_raw += load; | ||
810 | if (tx->sta) | ||
811 | tx->sta->channel_use_raw += load; | ||
812 | tx->sdata->channel_use_raw += load; | ||
813 | |||
814 | return TX_CONTINUE; | 824 | return TX_CONTINUE; |
815 | } | 825 | } |
816 | 826 | ||
817 | 827 | ||
818 | typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *); | ||
819 | static ieee80211_tx_handler ieee80211_tx_handlers[] = | ||
820 | { | ||
821 | ieee80211_tx_h_check_assoc, | ||
822 | ieee80211_tx_h_sequence, | ||
823 | ieee80211_tx_h_ps_buf, | ||
824 | ieee80211_tx_h_select_key, | ||
825 | ieee80211_tx_h_michael_mic_add, | ||
826 | ieee80211_tx_h_fragment, | ||
827 | ieee80211_tx_h_encrypt, | ||
828 | ieee80211_tx_h_rate_ctrl, | ||
829 | ieee80211_tx_h_misc, | ||
830 | ieee80211_tx_h_load_stats, | ||
831 | NULL | ||
832 | }; | ||
833 | |||
834 | /* actual transmit path */ | 828 | /* actual transmit path */ |
835 | 829 | ||
836 | /* | 830 | /* |
@@ -854,12 +848,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
854 | (struct ieee80211_radiotap_header *) skb->data; | 848 | (struct ieee80211_radiotap_header *) skb->data; |
855 | struct ieee80211_supported_band *sband; | 849 | struct ieee80211_supported_band *sband; |
856 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); | 850 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); |
857 | struct ieee80211_tx_control *control = tx->control; | 851 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
858 | 852 | ||
859 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | 853 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
860 | 854 | ||
861 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 855 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
862 | tx->flags |= IEEE80211_TX_INJECTED; | 856 | info->flags |= IEEE80211_TX_CTL_INJECTED; |
863 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 857 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
864 | 858 | ||
865 | /* | 859 | /* |
@@ -896,7 +890,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
896 | r = &sband->bitrates[i]; | 890 | r = &sband->bitrates[i]; |
897 | 891 | ||
898 | if (r->bitrate == target_rate) { | 892 | if (r->bitrate == target_rate) { |
899 | tx->rate = r; | 893 | tx->rate_idx = i; |
900 | break; | 894 | break; |
901 | } | 895 | } |
902 | } | 896 | } |
@@ -907,7 +901,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
907 | * radiotap uses 0 for 1st ant, mac80211 is 1 for | 901 | * radiotap uses 0 for 1st ant, mac80211 is 1 for |
908 | * 1st ant | 902 | * 1st ant |
909 | */ | 903 | */ |
910 | control->antenna_sel_tx = (*iterator.this_arg) + 1; | 904 | info->antenna_sel_tx = (*iterator.this_arg) + 1; |
911 | break; | 905 | break; |
912 | 906 | ||
913 | #if 0 | 907 | #if 0 |
@@ -931,8 +925,8 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
931 | skb_trim(skb, skb->len - FCS_LEN); | 925 | skb_trim(skb, skb->len - FCS_LEN); |
932 | } | 926 | } |
933 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) | 927 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) |
934 | control->flags &= | 928 | info->flags &= |
935 | ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 929 | ~IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
936 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) | 930 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) |
937 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 931 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
938 | break; | 932 | break; |
@@ -967,12 +961,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
967 | static ieee80211_tx_result | 961 | static ieee80211_tx_result |
968 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 962 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
969 | struct sk_buff *skb, | 963 | struct sk_buff *skb, |
970 | struct net_device *dev, | 964 | struct net_device *dev) |
971 | struct ieee80211_tx_control *control) | ||
972 | { | 965 | { |
973 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 966 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
974 | struct ieee80211_hdr *hdr; | 967 | struct ieee80211_hdr *hdr; |
975 | struct ieee80211_sub_if_data *sdata; | 968 | struct ieee80211_sub_if_data *sdata; |
969 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
976 | 970 | ||
977 | int hdrlen; | 971 | int hdrlen; |
978 | 972 | ||
@@ -981,7 +975,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
981 | tx->dev = dev; /* use original interface */ | 975 | tx->dev = dev; /* use original interface */ |
982 | tx->local = local; | 976 | tx->local = local; |
983 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 977 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
984 | tx->control = control; | 978 | tx->channel = local->hw.conf.channel; |
979 | tx->rate_idx = -1; | ||
980 | tx->last_frag_rate_idx = -1; | ||
985 | /* | 981 | /* |
986 | * Set this flag (used below to indicate "automatic fragmentation"), | 982 | * Set this flag (used below to indicate "automatic fragmentation"), |
987 | * it will be cleared/left by radiotap as desired. | 983 | * it will be cleared/left by radiotap as desired. |
@@ -1008,34 +1004,33 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1008 | 1004 | ||
1009 | if (is_multicast_ether_addr(hdr->addr1)) { | 1005 | if (is_multicast_ether_addr(hdr->addr1)) { |
1010 | tx->flags &= ~IEEE80211_TX_UNICAST; | 1006 | tx->flags &= ~IEEE80211_TX_UNICAST; |
1011 | control->flags |= IEEE80211_TXCTL_NO_ACK; | 1007 | info->flags |= IEEE80211_TX_CTL_NO_ACK; |
1012 | } else { | 1008 | } else { |
1013 | tx->flags |= IEEE80211_TX_UNICAST; | 1009 | tx->flags |= IEEE80211_TX_UNICAST; |
1014 | control->flags &= ~IEEE80211_TXCTL_NO_ACK; | 1010 | info->flags &= ~IEEE80211_TX_CTL_NO_ACK; |
1015 | } | 1011 | } |
1016 | 1012 | ||
1017 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | 1013 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { |
1018 | if ((tx->flags & IEEE80211_TX_UNICAST) && | 1014 | if ((tx->flags & IEEE80211_TX_UNICAST) && |
1019 | skb->len + FCS_LEN > local->fragmentation_threshold && | 1015 | skb->len + FCS_LEN > local->fragmentation_threshold && |
1020 | !local->ops->set_frag_threshold) | 1016 | !local->ops->set_frag_threshold && |
1017 | !(info->flags & IEEE80211_TX_CTL_AMPDU)) | ||
1021 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 1018 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
1022 | else | 1019 | else |
1023 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 1020 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
1024 | } | 1021 | } |
1025 | 1022 | ||
1026 | if (!tx->sta) | 1023 | if (!tx->sta) |
1027 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | 1024 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1028 | else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { | 1025 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) |
1029 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | 1026 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1030 | tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT; | ||
1031 | } | ||
1032 | 1027 | ||
1033 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 1028 | hdrlen = ieee80211_get_hdrlen(tx->fc); |
1034 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { | 1029 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { |
1035 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; | 1030 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; |
1036 | tx->ethertype = (pos[0] << 8) | pos[1]; | 1031 | tx->ethertype = (pos[0] << 8) | pos[1]; |
1037 | } | 1032 | } |
1038 | control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; | 1033 | info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; |
1039 | 1034 | ||
1040 | return TX_CONTINUE; | 1035 | return TX_CONTINUE; |
1041 | } | 1036 | } |
@@ -1045,14 +1040,12 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1045 | */ | 1040 | */ |
1046 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 1041 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
1047 | struct sk_buff *skb, | 1042 | struct sk_buff *skb, |
1048 | struct net_device *mdev, | 1043 | struct net_device *mdev) |
1049 | struct ieee80211_tx_control *control) | ||
1050 | { | 1044 | { |
1051 | struct ieee80211_tx_packet_data *pkt_data; | 1045 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1052 | struct net_device *dev; | 1046 | struct net_device *dev; |
1053 | 1047 | ||
1054 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | 1048 | dev = dev_get_by_index(&init_net, info->control.ifindex); |
1055 | dev = dev_get_by_index(&init_net, pkt_data->ifindex); | ||
1056 | if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { | 1049 | if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { |
1057 | dev_put(dev); | 1050 | dev_put(dev); |
1058 | dev = NULL; | 1051 | dev = NULL; |
@@ -1060,7 +1053,7 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1060 | if (unlikely(!dev)) | 1053 | if (unlikely(!dev)) |
1061 | return -ENODEV; | 1054 | return -ENODEV; |
1062 | /* initialises tx with control */ | 1055 | /* initialises tx with control */ |
1063 | __ieee80211_tx_prepare(tx, skb, dev, control); | 1056 | __ieee80211_tx_prepare(tx, skb, dev); |
1064 | dev_put(dev); | 1057 | dev_put(dev); |
1065 | return 0; | 1058 | return 0; |
1066 | } | 1059 | } |
@@ -1068,50 +1061,49 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1068 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | 1061 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, |
1069 | struct ieee80211_tx_data *tx) | 1062 | struct ieee80211_tx_data *tx) |
1070 | { | 1063 | { |
1071 | struct ieee80211_tx_control *control = tx->control; | 1064 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1072 | int ret, i; | 1065 | int ret, i; |
1073 | 1066 | ||
1074 | if (!ieee80211_qdisc_installed(local->mdev) && | 1067 | if (netif_subqueue_stopped(local->mdev, skb)) |
1075 | __ieee80211_queue_stopped(local, 0)) { | ||
1076 | netif_stop_queue(local->mdev); | ||
1077 | return IEEE80211_TX_AGAIN; | 1068 | return IEEE80211_TX_AGAIN; |
1078 | } | 1069 | |
1079 | if (skb) { | 1070 | if (skb) { |
1080 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | 1071 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), |
1081 | "TX to low-level driver", skb); | 1072 | "TX to low-level driver", skb); |
1082 | ret = local->ops->tx(local_to_hw(local), skb, control); | 1073 | ret = local->ops->tx(local_to_hw(local), skb); |
1083 | if (ret) | 1074 | if (ret) |
1084 | return IEEE80211_TX_AGAIN; | 1075 | return IEEE80211_TX_AGAIN; |
1085 | local->mdev->trans_start = jiffies; | 1076 | local->mdev->trans_start = jiffies; |
1086 | ieee80211_led_tx(local, 1); | 1077 | ieee80211_led_tx(local, 1); |
1087 | } | 1078 | } |
1088 | if (tx->extra_frag) { | 1079 | if (tx->extra_frag) { |
1089 | control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | | ||
1090 | IEEE80211_TXCTL_USE_CTS_PROTECT | | ||
1091 | IEEE80211_TXCTL_CLEAR_PS_FILT | | ||
1092 | IEEE80211_TXCTL_FIRST_FRAGMENT); | ||
1093 | for (i = 0; i < tx->num_extra_frag; i++) { | 1080 | for (i = 0; i < tx->num_extra_frag; i++) { |
1094 | if (!tx->extra_frag[i]) | 1081 | if (!tx->extra_frag[i]) |
1095 | continue; | 1082 | continue; |
1096 | if (__ieee80211_queue_stopped(local, control->queue)) | 1083 | info = IEEE80211_SKB_CB(tx->extra_frag[i]); |
1084 | info->flags &= ~(IEEE80211_TX_CTL_USE_RTS_CTS | | ||
1085 | IEEE80211_TX_CTL_USE_CTS_PROTECT | | ||
1086 | IEEE80211_TX_CTL_CLEAR_PS_FILT | | ||
1087 | IEEE80211_TX_CTL_FIRST_FRAGMENT); | ||
1088 | if (netif_subqueue_stopped(local->mdev, | ||
1089 | tx->extra_frag[i])) | ||
1097 | return IEEE80211_TX_FRAG_AGAIN; | 1090 | return IEEE80211_TX_FRAG_AGAIN; |
1098 | if (i == tx->num_extra_frag) { | 1091 | if (i == tx->num_extra_frag) { |
1099 | control->tx_rate = tx->last_frag_rate; | 1092 | info->tx_rate_idx = tx->last_frag_rate_idx; |
1100 | 1093 | ||
1101 | if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) | 1094 | if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) |
1102 | control->flags |= | 1095 | info->flags |= |
1103 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | 1096 | IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
1104 | else | 1097 | else |
1105 | control->flags &= | 1098 | info->flags &= |
1106 | ~IEEE80211_TXCTL_RATE_CTRL_PROBE; | 1099 | ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
1107 | } | 1100 | } |
1108 | 1101 | ||
1109 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | 1102 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), |
1110 | "TX to low-level driver", | 1103 | "TX to low-level driver", |
1111 | tx->extra_frag[i]); | 1104 | tx->extra_frag[i]); |
1112 | ret = local->ops->tx(local_to_hw(local), | 1105 | ret = local->ops->tx(local_to_hw(local), |
1113 | tx->extra_frag[i], | 1106 | tx->extra_frag[i]); |
1114 | control); | ||
1115 | if (ret) | 1107 | if (ret) |
1116 | return IEEE80211_TX_FRAG_AGAIN; | 1108 | return IEEE80211_TX_FRAG_AGAIN; |
1117 | local->mdev->trans_start = jiffies; | 1109 | local->mdev->trans_start = jiffies; |
@@ -1124,17 +1116,65 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1124 | return IEEE80211_TX_OK; | 1116 | return IEEE80211_TX_OK; |
1125 | } | 1117 | } |
1126 | 1118 | ||
1127 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | 1119 | /* |
1128 | struct ieee80211_tx_control *control) | 1120 | * Invoke TX handlers, return 0 on success and non-zero if the |
1121 | * frame was dropped or queued. | ||
1122 | */ | ||
1123 | static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | ||
1124 | { | ||
1125 | struct sk_buff *skb = tx->skb; | ||
1126 | ieee80211_tx_result res = TX_DROP; | ||
1127 | int i; | ||
1128 | |||
1129 | #define CALL_TXH(txh) \ | ||
1130 | res = txh(tx); \ | ||
1131 | if (res != TX_CONTINUE) \ | ||
1132 | goto txh_done; | ||
1133 | |||
1134 | CALL_TXH(ieee80211_tx_h_check_assoc) | ||
1135 | CALL_TXH(ieee80211_tx_h_ps_buf) | ||
1136 | CALL_TXH(ieee80211_tx_h_select_key) | ||
1137 | CALL_TXH(ieee80211_tx_h_michael_mic_add) | ||
1138 | CALL_TXH(ieee80211_tx_h_rate_ctrl) | ||
1139 | CALL_TXH(ieee80211_tx_h_misc) | ||
1140 | CALL_TXH(ieee80211_tx_h_sequence) | ||
1141 | CALL_TXH(ieee80211_tx_h_fragment) | ||
1142 | /* handlers after fragment must be aware of tx info fragmentation! */ | ||
1143 | CALL_TXH(ieee80211_tx_h_encrypt) | ||
1144 | CALL_TXH(ieee80211_tx_h_calculate_duration) | ||
1145 | CALL_TXH(ieee80211_tx_h_stats) | ||
1146 | #undef CALL_TXH | ||
1147 | |||
1148 | txh_done: | ||
1149 | if (unlikely(res == TX_DROP)) { | ||
1150 | I802_DEBUG_INC(tx->local->tx_handlers_drop); | ||
1151 | dev_kfree_skb(skb); | ||
1152 | for (i = 0; i < tx->num_extra_frag; i++) | ||
1153 | if (tx->extra_frag[i]) | ||
1154 | dev_kfree_skb(tx->extra_frag[i]); | ||
1155 | kfree(tx->extra_frag); | ||
1156 | return -1; | ||
1157 | } else if (unlikely(res == TX_QUEUED)) { | ||
1158 | I802_DEBUG_INC(tx->local->tx_handlers_queued); | ||
1159 | return -1; | ||
1160 | } | ||
1161 | |||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | ||
1129 | { | 1166 | { |
1130 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1167 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1131 | struct sta_info *sta; | 1168 | struct sta_info *sta; |
1132 | ieee80211_tx_handler *handler; | ||
1133 | struct ieee80211_tx_data tx; | 1169 | struct ieee80211_tx_data tx; |
1134 | ieee80211_tx_result res = TX_DROP, res_prepare; | 1170 | ieee80211_tx_result res_prepare; |
1135 | int ret, i, retries = 0; | 1171 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1172 | int ret, i; | ||
1173 | u16 queue; | ||
1174 | |||
1175 | queue = skb_get_queue_mapping(skb); | ||
1136 | 1176 | ||
1137 | WARN_ON(__ieee80211_queue_pending(local, control->queue)); | 1177 | WARN_ON(test_bit(queue, local->queues_pending)); |
1138 | 1178 | ||
1139 | if (unlikely(skb->len < 10)) { | 1179 | if (unlikely(skb->len < 10)) { |
1140 | dev_kfree_skb(skb); | 1180 | dev_kfree_skb(skb); |
@@ -1144,7 +1184,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1144 | rcu_read_lock(); | 1184 | rcu_read_lock(); |
1145 | 1185 | ||
1146 | /* initialises tx */ | 1186 | /* initialises tx */ |
1147 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); | 1187 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); |
1148 | 1188 | ||
1149 | if (res_prepare == TX_DROP) { | 1189 | if (res_prepare == TX_DROP) { |
1150 | dev_kfree_skb(skb); | 1190 | dev_kfree_skb(skb); |
@@ -1154,86 +1194,53 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1154 | 1194 | ||
1155 | sta = tx.sta; | 1195 | sta = tx.sta; |
1156 | tx.channel = local->hw.conf.channel; | 1196 | tx.channel = local->hw.conf.channel; |
1197 | info->band = tx.channel->band; | ||
1157 | 1198 | ||
1158 | for (handler = ieee80211_tx_handlers; *handler != NULL; | 1199 | if (invoke_tx_handlers(&tx)) |
1159 | handler++) { | 1200 | goto out; |
1160 | res = (*handler)(&tx); | ||
1161 | if (res != TX_CONTINUE) | ||
1162 | break; | ||
1163 | } | ||
1164 | |||
1165 | skb = tx.skb; /* handlers are allowed to change skb */ | ||
1166 | |||
1167 | if (unlikely(res == TX_DROP)) { | ||
1168 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
1169 | goto drop; | ||
1170 | } | ||
1171 | |||
1172 | if (unlikely(res == TX_QUEUED)) { | ||
1173 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
1174 | rcu_read_unlock(); | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | if (tx.extra_frag) { | ||
1179 | for (i = 0; i < tx.num_extra_frag; i++) { | ||
1180 | int next_len, dur; | ||
1181 | struct ieee80211_hdr *hdr = | ||
1182 | (struct ieee80211_hdr *) | ||
1183 | tx.extra_frag[i]->data; | ||
1184 | |||
1185 | if (i + 1 < tx.num_extra_frag) { | ||
1186 | next_len = tx.extra_frag[i + 1]->len; | ||
1187 | } else { | ||
1188 | next_len = 0; | ||
1189 | tx.rate = tx.last_frag_rate; | ||
1190 | } | ||
1191 | dur = ieee80211_duration(&tx, 0, next_len); | ||
1192 | hdr->duration_id = cpu_to_le16(dur); | ||
1193 | } | ||
1194 | } | ||
1195 | 1201 | ||
1196 | retry: | 1202 | retry: |
1197 | ret = __ieee80211_tx(local, skb, &tx); | 1203 | ret = __ieee80211_tx(local, skb, &tx); |
1198 | if (ret) { | 1204 | if (ret) { |
1199 | struct ieee80211_tx_stored_packet *store = | 1205 | struct ieee80211_tx_stored_packet *store; |
1200 | &local->pending_packet[control->queue]; | 1206 | |
1207 | /* | ||
1208 | * Since there are no fragmented frames on A-MPDU | ||
1209 | * queues, there's no reason for a driver to reject | ||
1210 | * a frame there, warn and drop it. | ||
1211 | */ | ||
1212 | if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw))) | ||
1213 | goto drop; | ||
1214 | |||
1215 | store = &local->pending_packet[queue]; | ||
1201 | 1216 | ||
1202 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1217 | if (ret == IEEE80211_TX_FRAG_AGAIN) |
1203 | skb = NULL; | 1218 | skb = NULL; |
1204 | set_bit(IEEE80211_LINK_STATE_PENDING, | 1219 | set_bit(queue, local->queues_pending); |
1205 | &local->state[control->queue]); | ||
1206 | smp_mb(); | 1220 | smp_mb(); |
1207 | /* When the driver gets out of buffers during sending of | 1221 | /* |
1208 | * fragments and calls ieee80211_stop_queue, there is | 1222 | * When the driver gets out of buffers during sending of |
1209 | * a small window between IEEE80211_LINK_STATE_XOFF and | 1223 | * fragments and calls ieee80211_stop_queue, the netif |
1210 | * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer | 1224 | * subqueue is stopped. There is, however, a small window |
1225 | * in which the PENDING bit is not yet set. If a buffer | ||
1211 | * gets available in that window (i.e. driver calls | 1226 | * gets available in that window (i.e. driver calls |
1212 | * ieee80211_wake_queue), we would end up with ieee80211_tx | 1227 | * ieee80211_wake_queue), we would end up with ieee80211_tx |
1213 | * called with IEEE80211_LINK_STATE_PENDING. Prevent this by | 1228 | * called with the PENDING bit still set. Prevent this by |
1214 | * continuing transmitting here when that situation is | 1229 | * continuing transmitting here when that situation is |
1215 | * possible to have happened. */ | 1230 | * possible to have happened. |
1216 | if (!__ieee80211_queue_stopped(local, control->queue)) { | 1231 | */ |
1217 | clear_bit(IEEE80211_LINK_STATE_PENDING, | 1232 | if (!__netif_subqueue_stopped(local->mdev, queue)) { |
1218 | &local->state[control->queue]); | 1233 | clear_bit(queue, local->queues_pending); |
1219 | retries++; | ||
1220 | /* | ||
1221 | * Driver bug, it's rejecting packets but | ||
1222 | * not stopping queues. | ||
1223 | */ | ||
1224 | if (WARN_ON_ONCE(retries > 5)) | ||
1225 | goto drop; | ||
1226 | goto retry; | 1234 | goto retry; |
1227 | } | 1235 | } |
1228 | memcpy(&store->control, control, | ||
1229 | sizeof(struct ieee80211_tx_control)); | ||
1230 | store->skb = skb; | 1236 | store->skb = skb; |
1231 | store->extra_frag = tx.extra_frag; | 1237 | store->extra_frag = tx.extra_frag; |
1232 | store->num_extra_frag = tx.num_extra_frag; | 1238 | store->num_extra_frag = tx.num_extra_frag; |
1233 | store->last_frag_rate = tx.last_frag_rate; | 1239 | store->last_frag_rate_idx = tx.last_frag_rate_idx; |
1234 | store->last_frag_rate_ctrl_probe = | 1240 | store->last_frag_rate_ctrl_probe = |
1235 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); | 1241 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); |
1236 | } | 1242 | } |
1243 | out: | ||
1237 | rcu_read_unlock(); | 1244 | rcu_read_unlock(); |
1238 | return 0; | 1245 | return 0; |
1239 | 1246 | ||
@@ -1250,24 +1257,57 @@ retry: | |||
1250 | 1257 | ||
1251 | /* device xmit handlers */ | 1258 | /* device xmit handlers */ |
1252 | 1259 | ||
1260 | static int ieee80211_skb_resize(struct ieee80211_local *local, | ||
1261 | struct sk_buff *skb, | ||
1262 | int head_need, bool may_encrypt) | ||
1263 | { | ||
1264 | int tail_need = 0; | ||
1265 | |||
1266 | /* | ||
1267 | * This could be optimised, devices that do full hardware | ||
1268 | * crypto (including TKIP MMIC) need no tailroom... But we | ||
1269 | * have no drivers for such devices currently. | ||
1270 | */ | ||
1271 | if (may_encrypt) { | ||
1272 | tail_need = IEEE80211_ENCRYPT_TAILROOM; | ||
1273 | tail_need -= skb_tailroom(skb); | ||
1274 | tail_need = max_t(int, tail_need, 0); | ||
1275 | } | ||
1276 | |||
1277 | if (head_need || tail_need) { | ||
1278 | /* Sorry. Can't account for this any more */ | ||
1279 | skb_orphan(skb); | ||
1280 | } | ||
1281 | |||
1282 | if (skb_header_cloned(skb)) | ||
1283 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | ||
1284 | else | ||
1285 | I802_DEBUG_INC(local->tx_expand_skb_head); | ||
1286 | |||
1287 | if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { | ||
1288 | printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", | ||
1289 | wiphy_name(local->hw.wiphy)); | ||
1290 | return -ENOMEM; | ||
1291 | } | ||
1292 | |||
1293 | /* update truesize too */ | ||
1294 | skb->truesize += head_need + tail_need; | ||
1295 | |||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1253 | int ieee80211_master_start_xmit(struct sk_buff *skb, | 1299 | int ieee80211_master_start_xmit(struct sk_buff *skb, |
1254 | struct net_device *dev) | 1300 | struct net_device *dev) |
1255 | { | 1301 | { |
1256 | struct ieee80211_tx_control control; | 1302 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1257 | struct ieee80211_tx_packet_data *pkt_data; | ||
1258 | struct net_device *odev = NULL; | 1303 | struct net_device *odev = NULL; |
1259 | struct ieee80211_sub_if_data *osdata; | 1304 | struct ieee80211_sub_if_data *osdata; |
1260 | int headroom; | 1305 | int headroom; |
1306 | bool may_encrypt; | ||
1261 | int ret; | 1307 | int ret; |
1262 | 1308 | ||
1263 | /* | 1309 | if (info->control.ifindex) |
1264 | * copy control out of the skb so other people can use skb->cb | 1310 | odev = dev_get_by_index(&init_net, info->control.ifindex); |
1265 | */ | ||
1266 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | ||
1267 | memset(&control, 0, sizeof(struct ieee80211_tx_control)); | ||
1268 | |||
1269 | if (pkt_data->ifindex) | ||
1270 | odev = dev_get_by_index(&init_net, pkt_data->ifindex); | ||
1271 | if (unlikely(odev && !is_ieee80211_device(odev, dev))) { | 1311 | if (unlikely(odev && !is_ieee80211_device(odev, dev))) { |
1272 | dev_put(odev); | 1312 | dev_put(odev); |
1273 | odev = NULL; | 1313 | odev = NULL; |
@@ -1280,32 +1320,25 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1280 | dev_kfree_skb(skb); | 1320 | dev_kfree_skb(skb); |
1281 | return 0; | 1321 | return 0; |
1282 | } | 1322 | } |
1323 | |||
1283 | osdata = IEEE80211_DEV_TO_SUB_IF(odev); | 1324 | osdata = IEEE80211_DEV_TO_SUB_IF(odev); |
1284 | 1325 | ||
1285 | headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; | 1326 | may_encrypt = !(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT); |
1286 | if (skb_headroom(skb) < headroom) { | 1327 | |
1287 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { | 1328 | headroom = osdata->local->tx_headroom; |
1288 | dev_kfree_skb(skb); | 1329 | if (may_encrypt) |
1289 | dev_put(odev); | 1330 | headroom += IEEE80211_ENCRYPT_HEADROOM; |
1290 | return 0; | 1331 | headroom -= skb_headroom(skb); |
1291 | } | 1332 | headroom = max_t(int, 0, headroom); |
1333 | |||
1334 | if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { | ||
1335 | dev_kfree_skb(skb); | ||
1336 | dev_put(odev); | ||
1337 | return 0; | ||
1292 | } | 1338 | } |
1293 | 1339 | ||
1294 | control.vif = &osdata->vif; | 1340 | info->control.vif = &osdata->vif; |
1295 | control.type = osdata->vif.type; | 1341 | ret = ieee80211_tx(odev, skb); |
1296 | if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS) | ||
1297 | control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS; | ||
1298 | if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT) | ||
1299 | control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | ||
1300 | if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) | ||
1301 | control.flags |= IEEE80211_TXCTL_REQUEUE; | ||
1302 | if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) | ||
1303 | control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; | ||
1304 | if (pkt_data->flags & IEEE80211_TXPD_AMPDU) | ||
1305 | control.flags |= IEEE80211_TXCTL_AMPDU; | ||
1306 | control.queue = pkt_data->queue; | ||
1307 | |||
1308 | ret = ieee80211_tx(odev, skb, &control); | ||
1309 | dev_put(odev); | 1342 | dev_put(odev); |
1310 | 1343 | ||
1311 | return ret; | 1344 | return ret; |
@@ -1315,7 +1348,7 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1315 | struct net_device *dev) | 1348 | struct net_device *dev) |
1316 | { | 1349 | { |
1317 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1350 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1318 | struct ieee80211_tx_packet_data *pkt_data; | 1351 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1319 | struct ieee80211_radiotap_header *prthdr = | 1352 | struct ieee80211_radiotap_header *prthdr = |
1320 | (struct ieee80211_radiotap_header *)skb->data; | 1353 | (struct ieee80211_radiotap_header *)skb->data; |
1321 | u16 len_rthdr; | 1354 | u16 len_rthdr; |
@@ -1337,12 +1370,12 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1337 | 1370 | ||
1338 | skb->dev = local->mdev; | 1371 | skb->dev = local->mdev; |
1339 | 1372 | ||
1340 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | ||
1341 | memset(pkt_data, 0, sizeof(*pkt_data)); | ||
1342 | /* needed because we set skb device to master */ | 1373 | /* needed because we set skb device to master */ |
1343 | pkt_data->ifindex = dev->ifindex; | 1374 | info->control.ifindex = dev->ifindex; |
1344 | 1375 | ||
1345 | pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; | 1376 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
1377 | /* Interfaces should always request a status report */ | ||
1378 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1346 | 1379 | ||
1347 | /* | 1380 | /* |
1348 | * fix up the pointers accounting for the radiotap | 1381 | * fix up the pointers accounting for the radiotap |
@@ -1386,10 +1419,11 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1386 | struct net_device *dev) | 1419 | struct net_device *dev) |
1387 | { | 1420 | { |
1388 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1421 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1389 | struct ieee80211_tx_packet_data *pkt_data; | 1422 | struct ieee80211_tx_info *info; |
1390 | struct ieee80211_sub_if_data *sdata; | 1423 | struct ieee80211_sub_if_data *sdata; |
1391 | int ret = 1, head_need; | 1424 | int ret = 1, head_need; |
1392 | u16 ethertype, hdrlen, meshhdrlen = 0, fc; | 1425 | u16 ethertype, hdrlen, meshhdrlen = 0; |
1426 | __le16 fc; | ||
1393 | struct ieee80211_hdr hdr; | 1427 | struct ieee80211_hdr hdr; |
1394 | struct ieee80211s_hdr mesh_hdr; | 1428 | struct ieee80211s_hdr mesh_hdr; |
1395 | const u8 *encaps_data; | 1429 | const u8 *encaps_data; |
@@ -1400,8 +1434,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1400 | 1434 | ||
1401 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1435 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1402 | if (unlikely(skb->len < ETH_HLEN)) { | 1436 | if (unlikely(skb->len < ETH_HLEN)) { |
1403 | printk(KERN_DEBUG "%s: short skb (len=%d)\n", | ||
1404 | dev->name, skb->len); | ||
1405 | ret = 0; | 1437 | ret = 0; |
1406 | goto fail; | 1438 | goto fail; |
1407 | } | 1439 | } |
@@ -1412,12 +1444,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1412 | /* convert Ethernet header to proper 802.11 header (based on | 1444 | /* convert Ethernet header to proper 802.11 header (based on |
1413 | * operation mode) */ | 1445 | * operation mode) */ |
1414 | ethertype = (skb->data[12] << 8) | skb->data[13]; | 1446 | ethertype = (skb->data[12] << 8) | skb->data[13]; |
1415 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; | 1447 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); |
1416 | 1448 | ||
1417 | switch (sdata->vif.type) { | 1449 | switch (sdata->vif.type) { |
1418 | case IEEE80211_IF_TYPE_AP: | 1450 | case IEEE80211_IF_TYPE_AP: |
1419 | case IEEE80211_IF_TYPE_VLAN: | 1451 | case IEEE80211_IF_TYPE_VLAN: |
1420 | fc |= IEEE80211_FCTL_FROMDS; | 1452 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
1421 | /* DA BSSID SA */ | 1453 | /* DA BSSID SA */ |
1422 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1454 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
1423 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | 1455 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); |
@@ -1425,7 +1457,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1425 | hdrlen = 24; | 1457 | hdrlen = 24; |
1426 | break; | 1458 | break; |
1427 | case IEEE80211_IF_TYPE_WDS: | 1459 | case IEEE80211_IF_TYPE_WDS: |
1428 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 1460 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1429 | /* RA TA DA SA */ | 1461 | /* RA TA DA SA */ |
1430 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); | 1462 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); |
1431 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | 1463 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); |
@@ -1435,7 +1467,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1435 | break; | 1467 | break; |
1436 | #ifdef CONFIG_MAC80211_MESH | 1468 | #ifdef CONFIG_MAC80211_MESH |
1437 | case IEEE80211_IF_TYPE_MESH_POINT: | 1469 | case IEEE80211_IF_TYPE_MESH_POINT: |
1438 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 1470 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1439 | /* RA TA DA SA */ | 1471 | /* RA TA DA SA */ |
1440 | if (is_multicast_ether_addr(skb->data)) | 1472 | if (is_multicast_ether_addr(skb->data)) |
1441 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1473 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
@@ -1465,7 +1497,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1465 | break; | 1497 | break; |
1466 | #endif | 1498 | #endif |
1467 | case IEEE80211_IF_TYPE_STA: | 1499 | case IEEE80211_IF_TYPE_STA: |
1468 | fc |= IEEE80211_FCTL_TODS; | 1500 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); |
1469 | /* BSSID SA DA */ | 1501 | /* BSSID SA DA */ |
1470 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); | 1502 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); |
1471 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 1503 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); |
@@ -1493,13 +1525,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1493 | rcu_read_lock(); | 1525 | rcu_read_lock(); |
1494 | sta = sta_info_get(local, hdr.addr1); | 1526 | sta = sta_info_get(local, hdr.addr1); |
1495 | if (sta) | 1527 | if (sta) |
1496 | sta_flags = sta->flags; | 1528 | sta_flags = get_sta_flags(sta); |
1497 | rcu_read_unlock(); | 1529 | rcu_read_unlock(); |
1498 | } | 1530 | } |
1499 | 1531 | ||
1500 | /* receiver is QoS enabled, use a QoS type frame */ | 1532 | /* receiver and we are QoS enabled, use a QoS type frame */ |
1501 | if (sta_flags & WLAN_STA_WME) { | 1533 | if (sta_flags & WLAN_STA_WME && |
1502 | fc |= IEEE80211_STYPE_QOS_DATA; | 1534 | ieee80211_num_regular_queues(&local->hw) >= 4) { |
1535 | fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | ||
1503 | hdrlen += 2; | 1536 | hdrlen += 2; |
1504 | } | 1537 | } |
1505 | 1538 | ||
@@ -1527,7 +1560,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1527 | goto fail; | 1560 | goto fail; |
1528 | } | 1561 | } |
1529 | 1562 | ||
1530 | hdr.frame_control = cpu_to_le16(fc); | 1563 | hdr.frame_control = fc; |
1531 | hdr.duration_id = 0; | 1564 | hdr.duration_id = 0; |
1532 | hdr.seq_ctrl = 0; | 1565 | hdr.seq_ctrl = 0; |
1533 | 1566 | ||
@@ -1562,32 +1595,26 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1562 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and | 1595 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and |
1563 | * alloc_skb() (net/core/skbuff.c) | 1596 | * alloc_skb() (net/core/skbuff.c) |
1564 | */ | 1597 | */ |
1565 | head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; | 1598 | head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); |
1566 | head_need -= skb_headroom(skb); | ||
1567 | 1599 | ||
1568 | /* We are going to modify skb data, so make a copy of it if happens to | 1600 | /* |
1569 | * be cloned. This could happen, e.g., with Linux bridge code passing | 1601 | * So we need to modify the skb header and hence need a copy of |
1570 | * us broadcast frames. */ | 1602 | * that. The head_need variable above doesn't, so far, include |
1603 | * the needed header space that we don't need right away. If we | ||
1604 | * can, then we don't reallocate right now but only after the | ||
1605 | * frame arrives at the master device (if it does...) | ||
1606 | * | ||
1607 | * If we cannot, however, then we will reallocate to include all | ||
1608 | * the ever needed space. Also, if we need to reallocate it anyway, | ||
1609 | * make it big enough for everything we may ever need. | ||
1610 | */ | ||
1571 | 1611 | ||
1572 | if (head_need > 0 || skb_cloned(skb)) { | 1612 | if (head_need > 0 || skb_cloned(skb)) { |
1573 | #if 0 | 1613 | head_need += IEEE80211_ENCRYPT_HEADROOM; |
1574 | printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " | 1614 | head_need += local->tx_headroom; |
1575 | "of headroom\n", dev->name, head_need); | 1615 | head_need = max_t(int, 0, head_need); |
1576 | #endif | 1616 | if (ieee80211_skb_resize(local, skb, head_need, true)) |
1577 | |||
1578 | if (skb_cloned(skb)) | ||
1579 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | ||
1580 | else | ||
1581 | I802_DEBUG_INC(local->tx_expand_skb_head); | ||
1582 | /* Since we have to reallocate the buffer, make sure that there | ||
1583 | * is enough room for possible WEP IV/ICV and TKIP (8 bytes | ||
1584 | * before payload and 12 after). */ | ||
1585 | if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8), | ||
1586 | 12, GFP_ATOMIC)) { | ||
1587 | printk(KERN_DEBUG "%s: failed to reallocate TX buffer" | ||
1588 | "\n", dev->name); | ||
1589 | goto fail; | 1617 | goto fail; |
1590 | } | ||
1591 | } | 1618 | } |
1592 | 1619 | ||
1593 | if (encaps_data) { | 1620 | if (encaps_data) { |
@@ -1602,7 +1629,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1602 | h_pos += meshhdrlen; | 1629 | h_pos += meshhdrlen; |
1603 | } | 1630 | } |
1604 | 1631 | ||
1605 | if (fc & IEEE80211_STYPE_QOS_DATA) { | 1632 | if (ieee80211_is_data_qos(fc)) { |
1606 | __le16 *qos_control; | 1633 | __le16 *qos_control; |
1607 | 1634 | ||
1608 | qos_control = (__le16*) skb_push(skb, 2); | 1635 | qos_control = (__le16*) skb_push(skb, 2); |
@@ -1618,11 +1645,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1618 | nh_pos += hdrlen; | 1645 | nh_pos += hdrlen; |
1619 | h_pos += hdrlen; | 1646 | h_pos += hdrlen; |
1620 | 1647 | ||
1621 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | 1648 | info = IEEE80211_SKB_CB(skb); |
1622 | memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); | 1649 | memset(info, 0, sizeof(*info)); |
1623 | pkt_data->ifindex = dev->ifindex; | 1650 | info->control.ifindex = dev->ifindex; |
1624 | if (ethertype == ETH_P_PAE) | 1651 | if (ethertype == ETH_P_PAE) |
1625 | pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; | 1652 | info->flags |= IEEE80211_TX_CTL_EAPOL_FRAME; |
1653 | |||
1654 | /* Interfaces should always request a status report */ | ||
1655 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1626 | 1656 | ||
1627 | skb->dev = local->mdev; | 1657 | skb->dev = local->mdev; |
1628 | dev->stats.tx_packets++; | 1658 | dev->stats.tx_packets++; |
@@ -1647,46 +1677,55 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1647 | return ret; | 1677 | return ret; |
1648 | } | 1678 | } |
1649 | 1679 | ||
1650 | /* helper functions for pending packets for when queues are stopped */ | ||
1651 | 1680 | ||
1681 | /* | ||
1682 | * ieee80211_clear_tx_pending may not be called in a context where | ||
1683 | * it is possible that it packets could come in again. | ||
1684 | */ | ||
1652 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 1685 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
1653 | { | 1686 | { |
1654 | int i, j; | 1687 | int i, j; |
1655 | struct ieee80211_tx_stored_packet *store; | 1688 | struct ieee80211_tx_stored_packet *store; |
1656 | 1689 | ||
1657 | for (i = 0; i < local->hw.queues; i++) { | 1690 | for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { |
1658 | if (!__ieee80211_queue_pending(local, i)) | 1691 | if (!test_bit(i, local->queues_pending)) |
1659 | continue; | 1692 | continue; |
1660 | store = &local->pending_packet[i]; | 1693 | store = &local->pending_packet[i]; |
1661 | kfree_skb(store->skb); | 1694 | kfree_skb(store->skb); |
1662 | for (j = 0; j < store->num_extra_frag; j++) | 1695 | for (j = 0; j < store->num_extra_frag; j++) |
1663 | kfree_skb(store->extra_frag[j]); | 1696 | kfree_skb(store->extra_frag[j]); |
1664 | kfree(store->extra_frag); | 1697 | kfree(store->extra_frag); |
1665 | clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); | 1698 | clear_bit(i, local->queues_pending); |
1666 | } | 1699 | } |
1667 | } | 1700 | } |
1668 | 1701 | ||
1702 | /* | ||
1703 | * Transmit all pending packets. Called from tasklet, locks master device | ||
1704 | * TX lock so that no new packets can come in. | ||
1705 | */ | ||
1669 | void ieee80211_tx_pending(unsigned long data) | 1706 | void ieee80211_tx_pending(unsigned long data) |
1670 | { | 1707 | { |
1671 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1708 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1672 | struct net_device *dev = local->mdev; | 1709 | struct net_device *dev = local->mdev; |
1673 | struct ieee80211_tx_stored_packet *store; | 1710 | struct ieee80211_tx_stored_packet *store; |
1674 | struct ieee80211_tx_data tx; | 1711 | struct ieee80211_tx_data tx; |
1675 | int i, ret, reschedule = 0; | 1712 | int i, ret; |
1676 | 1713 | ||
1677 | netif_tx_lock_bh(dev); | 1714 | netif_tx_lock_bh(dev); |
1678 | for (i = 0; i < local->hw.queues; i++) { | 1715 | for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { |
1679 | if (__ieee80211_queue_stopped(local, i)) | 1716 | /* Check that this queue is ok */ |
1717 | if (__netif_subqueue_stopped(local->mdev, i)) | ||
1680 | continue; | 1718 | continue; |
1681 | if (!__ieee80211_queue_pending(local, i)) { | 1719 | |
1682 | reschedule = 1; | 1720 | if (!test_bit(i, local->queues_pending)) { |
1721 | ieee80211_wake_queue(&local->hw, i); | ||
1683 | continue; | 1722 | continue; |
1684 | } | 1723 | } |
1724 | |||
1685 | store = &local->pending_packet[i]; | 1725 | store = &local->pending_packet[i]; |
1686 | tx.control = &store->control; | ||
1687 | tx.extra_frag = store->extra_frag; | 1726 | tx.extra_frag = store->extra_frag; |
1688 | tx.num_extra_frag = store->num_extra_frag; | 1727 | tx.num_extra_frag = store->num_extra_frag; |
1689 | tx.last_frag_rate = store->last_frag_rate; | 1728 | tx.last_frag_rate_idx = store->last_frag_rate_idx; |
1690 | tx.flags = 0; | 1729 | tx.flags = 0; |
1691 | if (store->last_frag_rate_ctrl_probe) | 1730 | if (store->last_frag_rate_ctrl_probe) |
1692 | tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; | 1731 | tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; |
@@ -1695,19 +1734,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
1695 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1734 | if (ret == IEEE80211_TX_FRAG_AGAIN) |
1696 | store->skb = NULL; | 1735 | store->skb = NULL; |
1697 | } else { | 1736 | } else { |
1698 | clear_bit(IEEE80211_LINK_STATE_PENDING, | 1737 | clear_bit(i, local->queues_pending); |
1699 | &local->state[i]); | 1738 | ieee80211_wake_queue(&local->hw, i); |
1700 | reschedule = 1; | ||
1701 | } | 1739 | } |
1702 | } | 1740 | } |
1703 | netif_tx_unlock_bh(dev); | 1741 | netif_tx_unlock_bh(dev); |
1704 | if (reschedule) { | ||
1705 | if (!ieee80211_qdisc_installed(dev)) { | ||
1706 | if (!__ieee80211_queue_stopped(local, 0)) | ||
1707 | netif_wake_queue(dev); | ||
1708 | } else | ||
1709 | netif_schedule(dev); | ||
1710 | } | ||
1711 | } | 1742 | } |
1712 | 1743 | ||
1713 | /* functions for drivers to get certain frames */ | 1744 | /* functions for drivers to get certain frames */ |
@@ -1776,23 +1807,24 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local, | |||
1776 | } | 1807 | } |
1777 | 1808 | ||
1778 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | 1809 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, |
1779 | struct ieee80211_vif *vif, | 1810 | struct ieee80211_vif *vif) |
1780 | struct ieee80211_tx_control *control) | ||
1781 | { | 1811 | { |
1782 | struct ieee80211_local *local = hw_to_local(hw); | 1812 | struct ieee80211_local *local = hw_to_local(hw); |
1783 | struct sk_buff *skb; | 1813 | struct sk_buff *skb = NULL; |
1814 | struct ieee80211_tx_info *info; | ||
1784 | struct net_device *bdev; | 1815 | struct net_device *bdev; |
1785 | struct ieee80211_sub_if_data *sdata = NULL; | 1816 | struct ieee80211_sub_if_data *sdata = NULL; |
1786 | struct ieee80211_if_ap *ap = NULL; | 1817 | struct ieee80211_if_ap *ap = NULL; |
1818 | struct ieee80211_if_sta *ifsta = NULL; | ||
1787 | struct rate_selection rsel; | 1819 | struct rate_selection rsel; |
1788 | struct beacon_data *beacon; | 1820 | struct beacon_data *beacon; |
1789 | struct ieee80211_supported_band *sband; | 1821 | struct ieee80211_supported_band *sband; |
1790 | struct ieee80211_mgmt *mgmt; | 1822 | struct ieee80211_mgmt *mgmt; |
1791 | int *num_beacons; | 1823 | int *num_beacons; |
1792 | bool err = true; | 1824 | enum ieee80211_band band = local->hw.conf.channel->band; |
1793 | u8 *pos; | 1825 | u8 *pos; |
1794 | 1826 | ||
1795 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 1827 | sband = local->hw.wiphy->bands[band]; |
1796 | 1828 | ||
1797 | rcu_read_lock(); | 1829 | rcu_read_lock(); |
1798 | 1830 | ||
@@ -1817,9 +1849,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1817 | memcpy(skb_put(skb, beacon->head_len), beacon->head, | 1849 | memcpy(skb_put(skb, beacon->head_len), beacon->head, |
1818 | beacon->head_len); | 1850 | beacon->head_len); |
1819 | 1851 | ||
1820 | ieee80211_include_sequence(sdata, | ||
1821 | (struct ieee80211_hdr *)skb->data); | ||
1822 | |||
1823 | /* | 1852 | /* |
1824 | * Not very nice, but we want to allow the driver to call | 1853 | * Not very nice, but we want to allow the driver to call |
1825 | * ieee80211_beacon_get() as a response to the set_tim() | 1854 | * ieee80211_beacon_get() as a response to the set_tim() |
@@ -1842,9 +1871,24 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1842 | beacon->tail, beacon->tail_len); | 1871 | beacon->tail, beacon->tail_len); |
1843 | 1872 | ||
1844 | num_beacons = &ap->num_beacons; | 1873 | num_beacons = &ap->num_beacons; |
1874 | } else | ||
1875 | goto out; | ||
1876 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | ||
1877 | struct ieee80211_hdr *hdr; | ||
1878 | ifsta = &sdata->u.sta; | ||
1845 | 1879 | ||
1846 | err = false; | 1880 | if (!ifsta->probe_resp) |
1847 | } | 1881 | goto out; |
1882 | |||
1883 | skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC); | ||
1884 | if (!skb) | ||
1885 | goto out; | ||
1886 | |||
1887 | hdr = (struct ieee80211_hdr *) skb->data; | ||
1888 | hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1889 | IEEE80211_STYPE_BEACON); | ||
1890 | |||
1891 | num_beacons = &ifsta->num_beacons; | ||
1848 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | 1892 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { |
1849 | /* headroom, head length, tail length and maximum TIM length */ | 1893 | /* headroom, head length, tail length and maximum TIM length */ |
1850 | skb = dev_alloc_skb(local->tx_headroom + 400); | 1894 | skb = dev_alloc_skb(local->tx_headroom + 400); |
@@ -1855,8 +1899,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1855 | mgmt = (struct ieee80211_mgmt *) | 1899 | mgmt = (struct ieee80211_mgmt *) |
1856 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | 1900 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); |
1857 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | 1901 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); |
1858 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1902 | mgmt->frame_control = |
1859 | IEEE80211_STYPE_BEACON); | 1903 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); |
1860 | memset(mgmt->da, 0xff, ETH_ALEN); | 1904 | memset(mgmt->da, 0xff, ETH_ALEN); |
1861 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 1905 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
1862 | /* BSSID is left zeroed, wildcard value */ | 1906 | /* BSSID is left zeroed, wildcard value */ |
@@ -1871,44 +1915,41 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1871 | mesh_mgmt_ies_add(skb, sdata->dev); | 1915 | mesh_mgmt_ies_add(skb, sdata->dev); |
1872 | 1916 | ||
1873 | num_beacons = &sdata->u.sta.num_beacons; | 1917 | num_beacons = &sdata->u.sta.num_beacons; |
1874 | 1918 | } else { | |
1875 | err = false; | 1919 | WARN_ON(1); |
1920 | goto out; | ||
1876 | } | 1921 | } |
1877 | 1922 | ||
1878 | if (err) { | 1923 | info = IEEE80211_SKB_CB(skb); |
1879 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1924 | |
1880 | if (net_ratelimit()) | 1925 | info->band = band; |
1881 | printk(KERN_DEBUG "no beacon data avail for %s\n", | 1926 | rate_control_get_rate(local->mdev, sband, skb, &rsel); |
1882 | bdev->name); | 1927 | |
1883 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 1928 | if (unlikely(rsel.rate_idx < 0)) { |
1929 | if (net_ratelimit()) { | ||
1930 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " | ||
1931 | "no rate found\n", | ||
1932 | wiphy_name(local->hw.wiphy)); | ||
1933 | } | ||
1934 | dev_kfree_skb(skb); | ||
1884 | skb = NULL; | 1935 | skb = NULL; |
1885 | goto out; | 1936 | goto out; |
1886 | } | 1937 | } |
1887 | 1938 | ||
1888 | if (control) { | 1939 | info->control.vif = vif; |
1889 | rate_control_get_rate(local->mdev, sband, skb, &rsel); | 1940 | info->tx_rate_idx = rsel.rate_idx; |
1890 | if (!rsel.rate) { | 1941 | |
1891 | if (net_ratelimit()) { | 1942 | info->flags |= IEEE80211_TX_CTL_NO_ACK; |
1892 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " | 1943 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
1893 | "no rate found\n", | 1944 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1894 | wiphy_name(local->hw.wiphy)); | 1945 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; |
1895 | } | 1946 | if (sdata->bss_conf.use_short_preamble && |
1896 | dev_kfree_skb(skb); | 1947 | sband->bitrates[rsel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) |
1897 | skb = NULL; | 1948 | info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE; |
1898 | goto out; | 1949 | |
1899 | } | 1950 | info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; |
1951 | info->control.retry_limit = 1; | ||
1900 | 1952 | ||
1901 | control->vif = vif; | ||
1902 | control->tx_rate = rsel.rate; | ||
1903 | if (sdata->bss_conf.use_short_preamble && | ||
1904 | rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) | ||
1905 | control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | ||
1906 | control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; | ||
1907 | control->flags |= IEEE80211_TXCTL_NO_ACK; | ||
1908 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | ||
1909 | control->retry_limit = 1; | ||
1910 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | ||
1911 | } | ||
1912 | (*num_beacons)++; | 1953 | (*num_beacons)++; |
1913 | out: | 1954 | out: |
1914 | rcu_read_unlock(); | 1955 | rcu_read_unlock(); |
@@ -1918,14 +1959,13 @@ EXPORT_SYMBOL(ieee80211_beacon_get); | |||
1918 | 1959 | ||
1919 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 1960 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
1920 | const void *frame, size_t frame_len, | 1961 | const void *frame, size_t frame_len, |
1921 | const struct ieee80211_tx_control *frame_txctl, | 1962 | const struct ieee80211_tx_info *frame_txctl, |
1922 | struct ieee80211_rts *rts) | 1963 | struct ieee80211_rts *rts) |
1923 | { | 1964 | { |
1924 | const struct ieee80211_hdr *hdr = frame; | 1965 | const struct ieee80211_hdr *hdr = frame; |
1925 | u16 fctl; | ||
1926 | 1966 | ||
1927 | fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; | 1967 | rts->frame_control = |
1928 | rts->frame_control = cpu_to_le16(fctl); | 1968 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); |
1929 | rts->duration = ieee80211_rts_duration(hw, vif, frame_len, | 1969 | rts->duration = ieee80211_rts_duration(hw, vif, frame_len, |
1930 | frame_txctl); | 1970 | frame_txctl); |
1931 | memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); | 1971 | memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); |
@@ -1935,14 +1975,13 @@ EXPORT_SYMBOL(ieee80211_rts_get); | |||
1935 | 1975 | ||
1936 | void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 1976 | void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
1937 | const void *frame, size_t frame_len, | 1977 | const void *frame, size_t frame_len, |
1938 | const struct ieee80211_tx_control *frame_txctl, | 1978 | const struct ieee80211_tx_info *frame_txctl, |
1939 | struct ieee80211_cts *cts) | 1979 | struct ieee80211_cts *cts) |
1940 | { | 1980 | { |
1941 | const struct ieee80211_hdr *hdr = frame; | 1981 | const struct ieee80211_hdr *hdr = frame; |
1942 | u16 fctl; | ||
1943 | 1982 | ||
1944 | fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; | 1983 | cts->frame_control = |
1945 | cts->frame_control = cpu_to_le16(fctl); | 1984 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); |
1946 | cts->duration = ieee80211_ctstoself_duration(hw, vif, | 1985 | cts->duration = ieee80211_ctstoself_duration(hw, vif, |
1947 | frame_len, frame_txctl); | 1986 | frame_len, frame_txctl); |
1948 | memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); | 1987 | memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); |
@@ -1951,23 +1990,21 @@ EXPORT_SYMBOL(ieee80211_ctstoself_get); | |||
1951 | 1990 | ||
1952 | struct sk_buff * | 1991 | struct sk_buff * |
1953 | ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | 1992 | ieee80211_get_buffered_bc(struct ieee80211_hw *hw, |
1954 | struct ieee80211_vif *vif, | 1993 | struct ieee80211_vif *vif) |
1955 | struct ieee80211_tx_control *control) | ||
1956 | { | 1994 | { |
1957 | struct ieee80211_local *local = hw_to_local(hw); | 1995 | struct ieee80211_local *local = hw_to_local(hw); |
1958 | struct sk_buff *skb; | 1996 | struct sk_buff *skb = NULL; |
1959 | struct sta_info *sta; | 1997 | struct sta_info *sta; |
1960 | ieee80211_tx_handler *handler; | ||
1961 | struct ieee80211_tx_data tx; | 1998 | struct ieee80211_tx_data tx; |
1962 | ieee80211_tx_result res = TX_DROP; | ||
1963 | struct net_device *bdev; | 1999 | struct net_device *bdev; |
1964 | struct ieee80211_sub_if_data *sdata; | 2000 | struct ieee80211_sub_if_data *sdata; |
1965 | struct ieee80211_if_ap *bss = NULL; | 2001 | struct ieee80211_if_ap *bss = NULL; |
1966 | struct beacon_data *beacon; | 2002 | struct beacon_data *beacon; |
2003 | struct ieee80211_tx_info *info; | ||
1967 | 2004 | ||
1968 | sdata = vif_to_sdata(vif); | 2005 | sdata = vif_to_sdata(vif); |
1969 | bdev = sdata->dev; | 2006 | bdev = sdata->dev; |
1970 | 2007 | bss = &sdata->u.ap; | |
1971 | 2008 | ||
1972 | if (!bss) | 2009 | if (!bss) |
1973 | return NULL; | 2010 | return NULL; |
@@ -1975,19 +2012,16 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1975 | rcu_read_lock(); | 2012 | rcu_read_lock(); |
1976 | beacon = rcu_dereference(bss->beacon); | 2013 | beacon = rcu_dereference(bss->beacon); |
1977 | 2014 | ||
1978 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || | 2015 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head) |
1979 | !beacon->head) { | 2016 | goto out; |
1980 | rcu_read_unlock(); | ||
1981 | return NULL; | ||
1982 | } | ||
1983 | 2017 | ||
1984 | if (bss->dtim_count != 0) | 2018 | if (bss->dtim_count != 0) |
1985 | return NULL; /* send buffered bc/mc only after DTIM beacon */ | 2019 | goto out; /* send buffered bc/mc only after DTIM beacon */ |
1986 | memset(control, 0, sizeof(*control)); | 2020 | |
1987 | while (1) { | 2021 | while (1) { |
1988 | skb = skb_dequeue(&bss->ps_bc_buf); | 2022 | skb = skb_dequeue(&bss->ps_bc_buf); |
1989 | if (!skb) | 2023 | if (!skb) |
1990 | return NULL; | 2024 | goto out; |
1991 | local->total_ps_buffered--; | 2025 | local->total_ps_buffered--; |
1992 | 2026 | ||
1993 | if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { | 2027 | if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { |
@@ -2000,30 +2034,21 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2000 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2034 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2001 | } | 2035 | } |
2002 | 2036 | ||
2003 | if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) | 2037 | if (!ieee80211_tx_prepare(&tx, skb, local->mdev)) |
2004 | break; | 2038 | break; |
2005 | dev_kfree_skb_any(skb); | 2039 | dev_kfree_skb_any(skb); |
2006 | } | 2040 | } |
2041 | |||
2042 | info = IEEE80211_SKB_CB(skb); | ||
2043 | |||
2007 | sta = tx.sta; | 2044 | sta = tx.sta; |
2008 | tx.flags |= IEEE80211_TX_PS_BUFFERED; | 2045 | tx.flags |= IEEE80211_TX_PS_BUFFERED; |
2009 | tx.channel = local->hw.conf.channel; | 2046 | tx.channel = local->hw.conf.channel; |
2047 | info->band = tx.channel->band; | ||
2010 | 2048 | ||
2011 | for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { | 2049 | if (invoke_tx_handlers(&tx)) |
2012 | res = (*handler)(&tx); | ||
2013 | if (res == TX_DROP || res == TX_QUEUED) | ||
2014 | break; | ||
2015 | } | ||
2016 | skb = tx.skb; /* handlers are allowed to change skb */ | ||
2017 | |||
2018 | if (res == TX_DROP) { | ||
2019 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
2020 | dev_kfree_skb(skb); | ||
2021 | skb = NULL; | ||
2022 | } else if (res == TX_QUEUED) { | ||
2023 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
2024 | skb = NULL; | 2050 | skb = NULL; |
2025 | } | 2051 | out: |
2026 | |||
2027 | rcu_read_unlock(); | 2052 | rcu_read_unlock(); |
2028 | 2053 | ||
2029 | return skb; | 2054 | return skb; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 4e97b266f907..19f85e1b3695 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -45,38 +45,37 @@ const unsigned char bridge_tunnel_header[] __aligned(2) = | |||
45 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | 45 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, |
46 | enum ieee80211_if_types type) | 46 | enum ieee80211_if_types type) |
47 | { | 47 | { |
48 | u16 fc; | 48 | __le16 fc = hdr->frame_control; |
49 | 49 | ||
50 | /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ | 50 | /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ |
51 | if (len < 16) | 51 | if (len < 16) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | fc = le16_to_cpu(hdr->frame_control); | 54 | if (ieee80211_is_data(fc)) { |
55 | |||
56 | switch (fc & IEEE80211_FCTL_FTYPE) { | ||
57 | case IEEE80211_FTYPE_DATA: | ||
58 | if (len < 24) /* drop incorrect hdr len (data) */ | 55 | if (len < 24) /* drop incorrect hdr len (data) */ |
59 | return NULL; | 56 | return NULL; |
60 | switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { | 57 | |
61 | case IEEE80211_FCTL_TODS: | 58 | if (ieee80211_has_a4(fc)) |
62 | return hdr->addr1; | ||
63 | case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): | ||
64 | return NULL; | 59 | return NULL; |
65 | case IEEE80211_FCTL_FROMDS: | 60 | if (ieee80211_has_tods(fc)) |
61 | return hdr->addr1; | ||
62 | if (ieee80211_has_fromds(fc)) | ||
66 | return hdr->addr2; | 63 | return hdr->addr2; |
67 | case 0: | 64 | |
68 | return hdr->addr3; | 65 | return hdr->addr3; |
69 | } | 66 | } |
70 | break; | 67 | |
71 | case IEEE80211_FTYPE_MGMT: | 68 | if (ieee80211_is_mgmt(fc)) { |
72 | if (len < 24) /* drop incorrect hdr len (mgmt) */ | 69 | if (len < 24) /* drop incorrect hdr len (mgmt) */ |
73 | return NULL; | 70 | return NULL; |
74 | return hdr->addr3; | 71 | return hdr->addr3; |
75 | case IEEE80211_FTYPE_CTL: | 72 | } |
76 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL) | 73 | |
74 | if (ieee80211_is_ctl(fc)) { | ||
75 | if(ieee80211_is_pspoll(fc)) | ||
77 | return hdr->addr1; | 76 | return hdr->addr1; |
78 | else if ((fc & IEEE80211_FCTL_STYPE) == | 77 | |
79 | IEEE80211_STYPE_BACK_REQ) { | 78 | if (ieee80211_is_back_req(fc)) { |
80 | switch (type) { | 79 | switch (type) { |
81 | case IEEE80211_IF_TYPE_STA: | 80 | case IEEE80211_IF_TYPE_STA: |
82 | return hdr->addr2; | 81 | return hdr->addr2; |
@@ -84,11 +83,9 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
84 | case IEEE80211_IF_TYPE_VLAN: | 83 | case IEEE80211_IF_TYPE_VLAN: |
85 | return hdr->addr1; | 84 | return hdr->addr1; |
86 | default: | 85 | default: |
87 | return NULL; | 86 | break; /* fall through to the return */ |
88 | } | 87 | } |
89 | } | 88 | } |
90 | else | ||
91 | return NULL; | ||
92 | } | 89 | } |
93 | 90 | ||
94 | return NULL; | 91 | return NULL; |
@@ -133,14 +130,46 @@ int ieee80211_get_hdrlen(u16 fc) | |||
133 | } | 130 | } |
134 | EXPORT_SYMBOL(ieee80211_get_hdrlen); | 131 | EXPORT_SYMBOL(ieee80211_get_hdrlen); |
135 | 132 | ||
136 | int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) | 133 | unsigned int ieee80211_hdrlen(__le16 fc) |
137 | { | 134 | { |
138 | const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *) skb->data; | 135 | unsigned int hdrlen = 24; |
139 | int hdrlen; | 136 | |
137 | if (ieee80211_is_data(fc)) { | ||
138 | if (ieee80211_has_a4(fc)) | ||
139 | hdrlen = 30; | ||
140 | if (ieee80211_is_data_qos(fc)) | ||
141 | hdrlen += IEEE80211_QOS_CTL_LEN; | ||
142 | goto out; | ||
143 | } | ||
144 | |||
145 | if (ieee80211_is_ctl(fc)) { | ||
146 | /* | ||
147 | * ACK and CTS are 10 bytes, all others 16. To see how | ||
148 | * to get this condition consider | ||
149 | * subtype mask: 0b0000000011110000 (0x00F0) | ||
150 | * ACK subtype: 0b0000000011010000 (0x00D0) | ||
151 | * CTS subtype: 0b0000000011000000 (0x00C0) | ||
152 | * bits that matter: ^^^ (0x00E0) | ||
153 | * value of those: 0b0000000011000000 (0x00C0) | ||
154 | */ | ||
155 | if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0)) | ||
156 | hdrlen = 10; | ||
157 | else | ||
158 | hdrlen = 16; | ||
159 | } | ||
160 | out: | ||
161 | return hdrlen; | ||
162 | } | ||
163 | EXPORT_SYMBOL(ieee80211_hdrlen); | ||
164 | |||
165 | unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) | ||
166 | { | ||
167 | const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data; | ||
168 | unsigned int hdrlen; | ||
140 | 169 | ||
141 | if (unlikely(skb->len < 10)) | 170 | if (unlikely(skb->len < 10)) |
142 | return 0; | 171 | return 0; |
143 | hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); | 172 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
144 | if (unlikely(hdrlen > skb->len)) | 173 | if (unlikely(hdrlen > skb->len)) |
145 | return 0; | 174 | return 0; |
146 | return hdrlen; | 175 | return hdrlen; |
@@ -258,7 +287,7 @@ EXPORT_SYMBOL(ieee80211_generic_frame_duration); | |||
258 | 287 | ||
259 | __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, | 288 | __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, |
260 | struct ieee80211_vif *vif, size_t frame_len, | 289 | struct ieee80211_vif *vif, size_t frame_len, |
261 | const struct ieee80211_tx_control *frame_txctl) | 290 | const struct ieee80211_tx_info *frame_txctl) |
262 | { | 291 | { |
263 | struct ieee80211_local *local = hw_to_local(hw); | 292 | struct ieee80211_local *local = hw_to_local(hw); |
264 | struct ieee80211_rate *rate; | 293 | struct ieee80211_rate *rate; |
@@ -266,10 +295,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, | |||
266 | bool short_preamble; | 295 | bool short_preamble; |
267 | int erp; | 296 | int erp; |
268 | u16 dur; | 297 | u16 dur; |
298 | struct ieee80211_supported_band *sband; | ||
299 | |||
300 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
269 | 301 | ||
270 | short_preamble = sdata->bss_conf.use_short_preamble; | 302 | short_preamble = sdata->bss_conf.use_short_preamble; |
271 | 303 | ||
272 | rate = frame_txctl->rts_cts_rate; | 304 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; |
273 | 305 | ||
274 | erp = 0; | 306 | erp = 0; |
275 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 307 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) |
@@ -292,7 +324,7 @@ EXPORT_SYMBOL(ieee80211_rts_duration); | |||
292 | __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | 324 | __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, |
293 | struct ieee80211_vif *vif, | 325 | struct ieee80211_vif *vif, |
294 | size_t frame_len, | 326 | size_t frame_len, |
295 | const struct ieee80211_tx_control *frame_txctl) | 327 | const struct ieee80211_tx_info *frame_txctl) |
296 | { | 328 | { |
297 | struct ieee80211_local *local = hw_to_local(hw); | 329 | struct ieee80211_local *local = hw_to_local(hw); |
298 | struct ieee80211_rate *rate; | 330 | struct ieee80211_rate *rate; |
@@ -300,10 +332,13 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
300 | bool short_preamble; | 332 | bool short_preamble; |
301 | int erp; | 333 | int erp; |
302 | u16 dur; | 334 | u16 dur; |
335 | struct ieee80211_supported_band *sband; | ||
336 | |||
337 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
303 | 338 | ||
304 | short_preamble = sdata->bss_conf.use_short_preamble; | 339 | short_preamble = sdata->bss_conf.use_short_preamble; |
305 | 340 | ||
306 | rate = frame_txctl->rts_cts_rate; | 341 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; |
307 | erp = 0; | 342 | erp = 0; |
308 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 343 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) |
309 | erp = rate->flags & IEEE80211_RATE_ERP_G; | 344 | erp = rate->flags & IEEE80211_RATE_ERP_G; |
@@ -311,7 +346,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
311 | /* Data frame duration */ | 346 | /* Data frame duration */ |
312 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, | 347 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, |
313 | erp, short_preamble); | 348 | erp, short_preamble); |
314 | if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { | 349 | if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { |
315 | /* ACK duration */ | 350 | /* ACK duration */ |
316 | dur += ieee80211_frame_duration(local, 10, rate->bitrate, | 351 | dur += ieee80211_frame_duration(local, 10, rate->bitrate, |
317 | erp, short_preamble); | 352 | erp, short_preamble); |
@@ -325,17 +360,10 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) | |||
325 | { | 360 | { |
326 | struct ieee80211_local *local = hw_to_local(hw); | 361 | struct ieee80211_local *local = hw_to_local(hw); |
327 | 362 | ||
328 | if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, | 363 | if (test_bit(queue, local->queues_pending)) { |
329 | &local->state[queue])) { | 364 | tasklet_schedule(&local->tx_pending_tasklet); |
330 | if (test_bit(IEEE80211_LINK_STATE_PENDING, | 365 | } else { |
331 | &local->state[queue])) | 366 | netif_wake_subqueue(local->mdev, queue); |
332 | tasklet_schedule(&local->tx_pending_tasklet); | ||
333 | else | ||
334 | if (!ieee80211_qdisc_installed(local->mdev)) { | ||
335 | if (queue == 0) | ||
336 | netif_wake_queue(local->mdev); | ||
337 | } else | ||
338 | __netif_schedule(local->mdev); | ||
339 | } | 367 | } |
340 | } | 368 | } |
341 | EXPORT_SYMBOL(ieee80211_wake_queue); | 369 | EXPORT_SYMBOL(ieee80211_wake_queue); |
@@ -344,29 +372,15 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) | |||
344 | { | 372 | { |
345 | struct ieee80211_local *local = hw_to_local(hw); | 373 | struct ieee80211_local *local = hw_to_local(hw); |
346 | 374 | ||
347 | if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) | 375 | netif_stop_subqueue(local->mdev, queue); |
348 | netif_stop_queue(local->mdev); | ||
349 | set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); | ||
350 | } | 376 | } |
351 | EXPORT_SYMBOL(ieee80211_stop_queue); | 377 | EXPORT_SYMBOL(ieee80211_stop_queue); |
352 | 378 | ||
353 | void ieee80211_start_queues(struct ieee80211_hw *hw) | ||
354 | { | ||
355 | struct ieee80211_local *local = hw_to_local(hw); | ||
356 | int i; | ||
357 | |||
358 | for (i = 0; i < local->hw.queues; i++) | ||
359 | clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]); | ||
360 | if (!ieee80211_qdisc_installed(local->mdev)) | ||
361 | netif_start_queue(local->mdev); | ||
362 | } | ||
363 | EXPORT_SYMBOL(ieee80211_start_queues); | ||
364 | |||
365 | void ieee80211_stop_queues(struct ieee80211_hw *hw) | 379 | void ieee80211_stop_queues(struct ieee80211_hw *hw) |
366 | { | 380 | { |
367 | int i; | 381 | int i; |
368 | 382 | ||
369 | for (i = 0; i < hw->queues; i++) | 383 | for (i = 0; i < ieee80211_num_queues(hw); i++) |
370 | ieee80211_stop_queue(hw, i); | 384 | ieee80211_stop_queue(hw, i); |
371 | } | 385 | } |
372 | EXPORT_SYMBOL(ieee80211_stop_queues); | 386 | EXPORT_SYMBOL(ieee80211_stop_queues); |
@@ -375,7 +389,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw) | |||
375 | { | 389 | { |
376 | int i; | 390 | int i; |
377 | 391 | ||
378 | for (i = 0; i < hw->queues; i++) | 392 | for (i = 0; i < hw->queues + hw->ampdu_queues; i++) |
379 | ieee80211_wake_queue(hw, i); | 393 | ieee80211_wake_queue(hw, i); |
380 | } | 394 | } |
381 | EXPORT_SYMBOL(ieee80211_wake_queues); | 395 | EXPORT_SYMBOL(ieee80211_wake_queues); |
@@ -404,8 +418,6 @@ void ieee80211_iterate_active_interfaces( | |||
404 | case IEEE80211_IF_TYPE_MESH_POINT: | 418 | case IEEE80211_IF_TYPE_MESH_POINT: |
405 | break; | 419 | break; |
406 | } | 420 | } |
407 | if (sdata->dev == local->mdev) | ||
408 | continue; | ||
409 | if (netif_running(sdata->dev)) | 421 | if (netif_running(sdata->dev)) |
410 | iterator(data, sdata->dev->dev_addr, | 422 | iterator(data, sdata->dev->dev_addr, |
411 | &sdata->vif); | 423 | &sdata->vif); |
@@ -439,8 +451,6 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
439 | case IEEE80211_IF_TYPE_MESH_POINT: | 451 | case IEEE80211_IF_TYPE_MESH_POINT: |
440 | break; | 452 | break; |
441 | } | 453 | } |
442 | if (sdata->dev == local->mdev) | ||
443 | continue; | ||
444 | if (netif_running(sdata->dev)) | 454 | if (netif_running(sdata->dev)) |
445 | iterator(data, sdata->dev->dev_addr, | 455 | iterator(data, sdata->dev->dev_addr, |
446 | &sdata->vif); | 456 | &sdata->vif); |
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index affcecd78c10..872d2fcd1a5b 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -84,24 +84,17 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, | |||
84 | struct sk_buff *skb, | 84 | struct sk_buff *skb, |
85 | struct ieee80211_key *key) | 85 | struct ieee80211_key *key) |
86 | { | 86 | { |
87 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 87 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
88 | u16 fc; | 88 | unsigned int hdrlen; |
89 | int hdrlen; | ||
90 | u8 *newhdr; | 89 | u8 *newhdr; |
91 | 90 | ||
92 | fc = le16_to_cpu(hdr->frame_control); | 91 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
93 | fc |= IEEE80211_FCTL_PROTECTED; | ||
94 | hdr->frame_control = cpu_to_le16(fc); | ||
95 | 92 | ||
96 | if ((skb_headroom(skb) < WEP_IV_LEN || | 93 | if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN || |
97 | skb_tailroom(skb) < WEP_ICV_LEN)) { | 94 | skb_headroom(skb) < WEP_IV_LEN)) |
98 | I802_DEBUG_INC(local->tx_expand_skb_head); | 95 | return NULL; |
99 | if (unlikely(pskb_expand_head(skb, WEP_IV_LEN, WEP_ICV_LEN, | ||
100 | GFP_ATOMIC))) | ||
101 | return NULL; | ||
102 | } | ||
103 | 96 | ||
104 | hdrlen = ieee80211_get_hdrlen(fc); | 97 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
105 | newhdr = skb_push(skb, WEP_IV_LEN); | 98 | newhdr = skb_push(skb, WEP_IV_LEN); |
106 | memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); | 99 | memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); |
107 | ieee80211_wep_get_iv(local, key, newhdr + hdrlen); | 100 | ieee80211_wep_get_iv(local, key, newhdr + hdrlen); |
@@ -113,12 +106,10 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local, | |||
113 | struct sk_buff *skb, | 106 | struct sk_buff *skb, |
114 | struct ieee80211_key *key) | 107 | struct ieee80211_key *key) |
115 | { | 108 | { |
116 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 109 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
117 | u16 fc; | 110 | unsigned int hdrlen; |
118 | int hdrlen; | ||
119 | 111 | ||
120 | fc = le16_to_cpu(hdr->frame_control); | 112 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
121 | hdrlen = ieee80211_get_hdrlen(fc); | ||
122 | memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); | 113 | memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); |
123 | skb_pull(skb, WEP_IV_LEN); | 114 | skb_pull(skb, WEP_IV_LEN); |
124 | } | 115 | } |
@@ -228,17 +219,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
228 | u32 klen; | 219 | u32 klen; |
229 | u8 *rc4key; | 220 | u8 *rc4key; |
230 | u8 keyidx; | 221 | u8 keyidx; |
231 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 222 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
232 | u16 fc; | 223 | unsigned int hdrlen; |
233 | int hdrlen; | ||
234 | size_t len; | 224 | size_t len; |
235 | int ret = 0; | 225 | int ret = 0; |
236 | 226 | ||
237 | fc = le16_to_cpu(hdr->frame_control); | 227 | if (!ieee80211_has_protected(hdr->frame_control)) |
238 | if (!(fc & IEEE80211_FCTL_PROTECTED)) | ||
239 | return -1; | 228 | return -1; |
240 | 229 | ||
241 | hdrlen = ieee80211_get_hdrlen(fc); | 230 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
242 | 231 | ||
243 | if (skb->len < 8 + hdrlen) | 232 | if (skb->len < 8 + hdrlen) |
244 | return -1; | 233 | return -1; |
@@ -264,11 +253,8 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
264 | 253 | ||
265 | if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, | 254 | if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, |
266 | skb->data + hdrlen + WEP_IV_LEN, | 255 | skb->data + hdrlen + WEP_IV_LEN, |
267 | len)) { | 256 | len)) |
268 | if (net_ratelimit()) | ||
269 | printk(KERN_DEBUG "WEP decrypt failed (ICV)\n"); | ||
270 | ret = -1; | 257 | ret = -1; |
271 | } | ||
272 | 258 | ||
273 | kfree(rc4key); | 259 | kfree(rc4key); |
274 | 260 | ||
@@ -285,17 +271,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
285 | 271 | ||
286 | u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) | 272 | u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) |
287 | { | 273 | { |
288 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 274 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
289 | u16 fc; | 275 | unsigned int hdrlen; |
290 | int hdrlen; | ||
291 | u8 *ivpos; | 276 | u8 *ivpos; |
292 | u32 iv; | 277 | u32 iv; |
293 | 278 | ||
294 | fc = le16_to_cpu(hdr->frame_control); | 279 | if (!ieee80211_has_protected(hdr->frame_control)) |
295 | if (!(fc & IEEE80211_FCTL_PROTECTED)) | ||
296 | return NULL; | 280 | return NULL; |
297 | 281 | ||
298 | hdrlen = ieee80211_get_hdrlen(fc); | 282 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
299 | ivpos = skb->data + hdrlen; | 283 | ivpos = skb->data + hdrlen; |
300 | iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; | 284 | iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; |
301 | 285 | ||
@@ -314,14 +298,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | |||
314 | return RX_CONTINUE; | 298 | return RX_CONTINUE; |
315 | 299 | ||
316 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { | 300 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
317 | if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { | 301 | if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) |
318 | #ifdef CONFIG_MAC80211_DEBUG | ||
319 | if (net_ratelimit()) | ||
320 | printk(KERN_DEBUG "%s: RX WEP frame, decrypt " | ||
321 | "failed\n", rx->dev->name); | ||
322 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
323 | return RX_DROP_UNUSABLE; | 302 | return RX_DROP_UNUSABLE; |
324 | } | ||
325 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { | 303 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { |
326 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); | 304 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); |
327 | /* remove ICV */ | 305 | /* remove ICV */ |
@@ -333,11 +311,16 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | |||
333 | 311 | ||
334 | static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | 312 | static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) |
335 | { | 313 | { |
314 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
315 | |||
316 | info->control.iv_len = WEP_IV_LEN; | ||
317 | info->control.icv_len = WEP_ICV_LEN; | ||
318 | |||
336 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { | 319 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { |
337 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) | 320 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) |
338 | return -1; | 321 | return -1; |
339 | } else { | 322 | } else { |
340 | tx->control->key_idx = tx->key->conf.hw_key_idx; | 323 | info->control.hw_key = &tx->key->conf; |
341 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { | 324 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { |
342 | if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) | 325 | if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) |
343 | return -1; | 326 | return -1; |
@@ -349,8 +332,6 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | |||
349 | ieee80211_tx_result | 332 | ieee80211_tx_result |
350 | ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) | 333 | ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) |
351 | { | 334 | { |
352 | tx->control->iv_len = WEP_IV_LEN; | ||
353 | tx->control->icv_len = WEP_ICV_LEN; | ||
354 | ieee80211_tx_set_protected(tx); | 335 | ieee80211_tx_set_protected(tx); |
355 | 336 | ||
356 | if (wep_encrypt_skb(tx, tx->skb) < 0) { | 337 | if (wep_encrypt_skb(tx, tx->skb) < 0) { |
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index 363779c50658..e587172115b8 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h | |||
@@ -26,7 +26,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
26 | struct ieee80211_key *key); | 26 | struct ieee80211_key *key); |
27 | int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | 27 | int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, |
28 | struct ieee80211_key *key); | 28 | struct ieee80211_key *key); |
29 | u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); | 29 | u8 *ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); |
30 | 30 | ||
31 | ieee80211_rx_result | 31 | ieee80211_rx_result |
32 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); | 32 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index e8404212ad57..34fa8ed1e784 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -142,7 +142,39 @@ static int ieee80211_ioctl_giwname(struct net_device *dev, | |||
142 | struct iw_request_info *info, | 142 | struct iw_request_info *info, |
143 | char *name, char *extra) | 143 | char *name, char *extra) |
144 | { | 144 | { |
145 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
146 | struct ieee80211_supported_band *sband; | ||
147 | u8 is_ht = 0, is_a = 0, is_b = 0, is_g = 0; | ||
148 | |||
149 | |||
150 | sband = local->hw.wiphy->bands[IEEE80211_BAND_5GHZ]; | ||
151 | if (sband) { | ||
152 | is_a = 1; | ||
153 | is_ht |= sband->ht_info.ht_supported; | ||
154 | } | ||
155 | |||
156 | sband = local->hw.wiphy->bands[IEEE80211_BAND_2GHZ]; | ||
157 | if (sband) { | ||
158 | int i; | ||
159 | /* Check for mandatory rates */ | ||
160 | for (i = 0; i < sband->n_bitrates; i++) { | ||
161 | if (sband->bitrates[i].bitrate == 10) | ||
162 | is_b = 1; | ||
163 | if (sband->bitrates[i].bitrate == 60) | ||
164 | is_g = 1; | ||
165 | } | ||
166 | is_ht |= sband->ht_info.ht_supported; | ||
167 | } | ||
168 | |||
145 | strcpy(name, "IEEE 802.11"); | 169 | strcpy(name, "IEEE 802.11"); |
170 | if (is_a) | ||
171 | strcat(name, "a"); | ||
172 | if (is_b) | ||
173 | strcat(name, "b"); | ||
174 | if (is_g) | ||
175 | strcat(name, "g"); | ||
176 | if (is_ht) | ||
177 | strcat(name, "n"); | ||
146 | 178 | ||
147 | return 0; | 179 | return 0; |
148 | } | 180 | } |
@@ -176,14 +208,26 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev, | |||
176 | range->num_encoding_sizes = 2; | 208 | range->num_encoding_sizes = 2; |
177 | range->max_encoding_tokens = NUM_DEFAULT_KEYS; | 209 | range->max_encoding_tokens = NUM_DEFAULT_KEYS; |
178 | 210 | ||
179 | range->max_qual.qual = local->hw.max_signal; | 211 | if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC || |
180 | range->max_qual.level = local->hw.max_rssi; | 212 | local->hw.flags & IEEE80211_HW_SIGNAL_DB) |
181 | range->max_qual.noise = local->hw.max_noise; | 213 | range->max_qual.level = local->hw.max_signal; |
214 | else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) | ||
215 | range->max_qual.level = -110; | ||
216 | else | ||
217 | range->max_qual.level = 0; | ||
218 | |||
219 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) | ||
220 | range->max_qual.noise = -110; | ||
221 | else | ||
222 | range->max_qual.noise = 0; | ||
223 | |||
224 | range->max_qual.qual = 100; | ||
182 | range->max_qual.updated = local->wstats_flags; | 225 | range->max_qual.updated = local->wstats_flags; |
183 | 226 | ||
184 | range->avg_qual.qual = local->hw.max_signal/2; | 227 | range->avg_qual.qual = 50; |
185 | range->avg_qual.level = 0; | 228 | /* not always true but better than nothing */ |
186 | range->avg_qual.noise = 0; | 229 | range->avg_qual.level = range->max_qual.level / 2; |
230 | range->avg_qual.noise = range->max_qual.noise / 2; | ||
187 | range->avg_qual.updated = local->wstats_flags; | 231 | range->avg_qual.updated = local->wstats_flags; |
188 | 232 | ||
189 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | 233 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | |
@@ -252,15 +296,7 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev, | |||
252 | return -EINVAL; | 296 | return -EINVAL; |
253 | } | 297 | } |
254 | 298 | ||
255 | if (type == sdata->vif.type) | 299 | return ieee80211_if_change_type(sdata, type); |
256 | return 0; | ||
257 | if (netif_running(dev)) | ||
258 | return -EBUSY; | ||
259 | |||
260 | ieee80211_if_reinit(dev); | ||
261 | ieee80211_if_set_type(dev, type); | ||
262 | |||
263 | return 0; | ||
264 | } | 300 | } |
265 | 301 | ||
266 | 302 | ||
@@ -408,7 +444,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, | |||
408 | memset(sdata->u.ap.ssid + len, 0, | 444 | memset(sdata->u.ap.ssid + len, 0, |
409 | IEEE80211_MAX_SSID_LEN - len); | 445 | IEEE80211_MAX_SSID_LEN - len); |
410 | sdata->u.ap.ssid_len = len; | 446 | sdata->u.ap.ssid_len = len; |
411 | return ieee80211_if_config(dev); | 447 | return ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); |
412 | } | 448 | } |
413 | return -EOPNOTSUPP; | 449 | return -EOPNOTSUPP; |
414 | } | 450 | } |
@@ -562,7 +598,7 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev, | |||
562 | if (local->sta_sw_scanning || local->sta_hw_scanning) | 598 | if (local->sta_sw_scanning || local->sta_hw_scanning) |
563 | return -EAGAIN; | 599 | return -EAGAIN; |
564 | 600 | ||
565 | res = ieee80211_sta_scan_results(dev, extra, data->length); | 601 | res = ieee80211_sta_scan_results(dev, info, extra, data->length); |
566 | if (res >= 0) { | 602 | if (res >= 0) { |
567 | data->length = res; | 603 | data->length = res; |
568 | return 0; | 604 | return 0; |
@@ -583,16 +619,14 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev, | |||
583 | struct ieee80211_supported_band *sband; | 619 | struct ieee80211_supported_band *sband; |
584 | 620 | ||
585 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 621 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
586 | if (!sdata->bss) | ||
587 | return -ENODEV; | ||
588 | 622 | ||
589 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 623 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
590 | 624 | ||
591 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates | 625 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates |
592 | * target_rate = X, rate->fixed = 1 means only rate X | 626 | * target_rate = X, rate->fixed = 1 means only rate X |
593 | * target_rate = X, rate->fixed = 0 means all rates <= X */ | 627 | * target_rate = X, rate->fixed = 0 means all rates <= X */ |
594 | sdata->bss->max_ratectrl_rateidx = -1; | 628 | sdata->max_ratectrl_rateidx = -1; |
595 | sdata->bss->force_unicast_rateidx = -1; | 629 | sdata->force_unicast_rateidx = -1; |
596 | if (rate->value < 0) | 630 | if (rate->value < 0) |
597 | return 0; | 631 | return 0; |
598 | 632 | ||
@@ -601,9 +635,9 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev, | |||
601 | int this_rate = brate->bitrate; | 635 | int this_rate = brate->bitrate; |
602 | 636 | ||
603 | if (target_rate == this_rate) { | 637 | if (target_rate == this_rate) { |
604 | sdata->bss->max_ratectrl_rateidx = i; | 638 | sdata->max_ratectrl_rateidx = i; |
605 | if (rate->fixed) | 639 | if (rate->fixed) |
606 | sdata->bss->force_unicast_rateidx = i; | 640 | sdata->force_unicast_rateidx = i; |
607 | err = 0; | 641 | err = 0; |
608 | break; | 642 | break; |
609 | } | 643 | } |
@@ -716,6 +750,9 @@ static int ieee80211_ioctl_siwrts(struct net_device *dev, | |||
716 | 750 | ||
717 | if (rts->disabled) | 751 | if (rts->disabled) |
718 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; | 752 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; |
753 | else if (!rts->fixed) | ||
754 | /* if the rts value is not fixed, then take default */ | ||
755 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; | ||
719 | else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD) | 756 | else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD) |
720 | return -EINVAL; | 757 | return -EINVAL; |
721 | else | 758 | else |
@@ -753,6 +790,8 @@ static int ieee80211_ioctl_siwfrag(struct net_device *dev, | |||
753 | 790 | ||
754 | if (frag->disabled) | 791 | if (frag->disabled) |
755 | local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; | 792 | local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; |
793 | else if (!frag->fixed) | ||
794 | local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; | ||
756 | else if (frag->value < 256 || | 795 | else if (frag->value < 256 || |
757 | frag->value > IEEE80211_MAX_FRAG_THRESHOLD) | 796 | frag->value > IEEE80211_MAX_FRAG_THRESHOLD) |
758 | return -EINVAL; | 797 | return -EINVAL; |
@@ -944,6 +983,58 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev, | |||
944 | erq->length = sdata->keys[idx]->conf.keylen; | 983 | erq->length = sdata->keys[idx]->conf.keylen; |
945 | erq->flags |= IW_ENCODE_ENABLED; | 984 | erq->flags |= IW_ENCODE_ENABLED; |
946 | 985 | ||
986 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | ||
987 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
988 | switch (ifsta->auth_alg) { | ||
989 | case WLAN_AUTH_OPEN: | ||
990 | case WLAN_AUTH_LEAP: | ||
991 | erq->flags |= IW_ENCODE_OPEN; | ||
992 | break; | ||
993 | case WLAN_AUTH_SHARED_KEY: | ||
994 | erq->flags |= IW_ENCODE_RESTRICTED; | ||
995 | break; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | static int ieee80211_ioctl_siwpower(struct net_device *dev, | ||
1003 | struct iw_request_info *info, | ||
1004 | struct iw_param *wrq, | ||
1005 | char *extra) | ||
1006 | { | ||
1007 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1008 | struct ieee80211_conf *conf = &local->hw.conf; | ||
1009 | |||
1010 | if (wrq->disabled) { | ||
1011 | conf->flags &= ~IEEE80211_CONF_PS; | ||
1012 | return ieee80211_hw_config(local); | ||
1013 | } | ||
1014 | |||
1015 | switch (wrq->flags & IW_POWER_MODE) { | ||
1016 | case IW_POWER_ON: /* If not specified */ | ||
1017 | case IW_POWER_MODE: /* If set all mask */ | ||
1018 | case IW_POWER_ALL_R: /* If explicitely state all */ | ||
1019 | conf->flags |= IEEE80211_CONF_PS; | ||
1020 | break; | ||
1021 | default: /* Otherwise we don't support it */ | ||
1022 | return -EINVAL; | ||
1023 | } | ||
1024 | |||
1025 | return ieee80211_hw_config(local); | ||
1026 | } | ||
1027 | |||
1028 | static int ieee80211_ioctl_giwpower(struct net_device *dev, | ||
1029 | struct iw_request_info *info, | ||
1030 | union iwreq_data *wrqu, | ||
1031 | char *extra) | ||
1032 | { | ||
1033 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1034 | struct ieee80211_conf *conf = &local->hw.conf; | ||
1035 | |||
1036 | wrqu->power.disabled = !(conf->flags & IEEE80211_CONF_PS); | ||
1037 | |||
947 | return 0; | 1038 | return 0; |
948 | } | 1039 | } |
949 | 1040 | ||
@@ -1015,8 +1106,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev | |||
1015 | wstats->qual.noise = 0; | 1106 | wstats->qual.noise = 0; |
1016 | wstats->qual.updated = IW_QUAL_ALL_INVALID; | 1107 | wstats->qual.updated = IW_QUAL_ALL_INVALID; |
1017 | } else { | 1108 | } else { |
1018 | wstats->qual.level = sta->last_rssi; | 1109 | wstats->qual.level = sta->last_signal; |
1019 | wstats->qual.qual = sta->last_signal; | 1110 | wstats->qual.qual = sta->last_qual; |
1020 | wstats->qual.noise = sta->last_noise; | 1111 | wstats->qual.noise = sta->last_noise; |
1021 | wstats->qual.updated = local->wstats_flags; | 1112 | wstats->qual.updated = local->wstats_flags; |
1022 | } | 1113 | } |
@@ -1149,8 +1240,8 @@ static const iw_handler ieee80211_handler[] = | |||
1149 | (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ | 1240 | (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ |
1150 | (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ | 1241 | (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ |
1151 | (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */ | 1242 | (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */ |
1152 | (iw_handler) NULL, /* SIOCSIWPOWER */ | 1243 | (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */ |
1153 | (iw_handler) NULL, /* SIOCGIWPOWER */ | 1244 | (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */ |
1154 | (iw_handler) NULL, /* -- hole -- */ | 1245 | (iw_handler) NULL, /* -- hole -- */ |
1155 | (iw_handler) NULL, /* -- hole -- */ | 1246 | (iw_handler) NULL, /* -- hole -- */ |
1156 | (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */ | 1247 | (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */ |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 5d09e8698b57..07edda0b8a5c 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -18,61 +18,42 @@ | |||
18 | #include "ieee80211_i.h" | 18 | #include "ieee80211_i.h" |
19 | #include "wme.h" | 19 | #include "wme.h" |
20 | 20 | ||
21 | /* maximum number of hardware queues we support. */ | 21 | /* Default mapping in classifier to work with default |
22 | #define TC_80211_MAX_QUEUES 16 | 22 | * queue setup. |
23 | 23 | */ | |
24 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; | 24 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; |
25 | 25 | ||
26 | struct ieee80211_sched_data | ||
27 | { | ||
28 | unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)]; | ||
29 | struct tcf_proto *filter_list; | ||
30 | struct Qdisc *queues[TC_80211_MAX_QUEUES]; | ||
31 | struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; | ||
32 | }; | ||
33 | |||
34 | static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; | 26 | static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; |
35 | 27 | ||
36 | /* given a data frame determine the 802.1p/1d tag to use */ | 28 | /* Given a data frame determine the 802.1p/1d tag to use. */ |
37 | static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd) | 29 | static unsigned int classify_1d(struct sk_buff *skb) |
38 | { | 30 | { |
39 | struct iphdr *ip; | 31 | unsigned int dscp; |
40 | int dscp; | ||
41 | int offset; | ||
42 | |||
43 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
44 | struct tcf_result res = { -1, 0 }; | ||
45 | |||
46 | /* if there is a user set filter list, call out to that */ | ||
47 | if (q->filter_list) { | ||
48 | tc_classify(skb, q->filter_list, &res); | ||
49 | if (res.class != -1) | ||
50 | return res.class; | ||
51 | } | ||
52 | 32 | ||
53 | /* skb->priority values from 256->263 are magic values to | 33 | /* skb->priority values from 256->263 are magic values to |
54 | * directly indicate a specific 802.1d priority. | 34 | * directly indicate a specific 802.1d priority. This is used |
55 | * This is used to allow 802.1d priority to be passed directly in | 35 | * to allow 802.1d priority to be passed directly in from VLAN |
56 | * from VLAN tags, etc. */ | 36 | * tags, etc. |
37 | */ | ||
57 | if (skb->priority >= 256 && skb->priority <= 263) | 38 | if (skb->priority >= 256 && skb->priority <= 263) |
58 | return skb->priority - 256; | 39 | return skb->priority - 256; |
59 | 40 | ||
60 | /* check there is a valid IP header present */ | 41 | switch (skb->protocol) { |
61 | offset = ieee80211_get_hdrlen_from_skb(skb); | 42 | case __constant_htons(ETH_P_IP): |
62 | if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) || | 43 | dscp = ip_hdr(skb)->tos & 0xfc; |
63 | memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr))) | 44 | break; |
64 | return 0; | ||
65 | 45 | ||
66 | ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr)); | 46 | default: |
47 | return 0; | ||
48 | } | ||
67 | 49 | ||
68 | dscp = ip->tos & 0xfc; | ||
69 | if (dscp & 0x1c) | 50 | if (dscp & 0x1c) |
70 | return 0; | 51 | return 0; |
71 | return dscp >> 5; | 52 | return dscp >> 5; |
72 | } | 53 | } |
73 | 54 | ||
74 | 55 | ||
75 | static inline int wme_downgrade_ac(struct sk_buff *skb) | 56 | static int wme_downgrade_ac(struct sk_buff *skb) |
76 | { | 57 | { |
77 | switch (skb->priority) { | 58 | switch (skb->priority) { |
78 | case 6: | 59 | case 6: |
@@ -93,43 +74,38 @@ static inline int wme_downgrade_ac(struct sk_buff *skb) | |||
93 | } | 74 | } |
94 | 75 | ||
95 | 76 | ||
96 | /* positive return value indicates which queue to use | 77 | /* Indicate which queue to use. */ |
97 | * negative return value indicates to drop the frame */ | 78 | static u16 classify80211(struct sk_buff *skb, struct net_device *dev) |
98 | static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) | ||
99 | { | 79 | { |
100 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 80 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
101 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 81 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
102 | unsigned short fc = le16_to_cpu(hdr->frame_control); | ||
103 | int qos; | ||
104 | 82 | ||
105 | /* see if frame is data or non data frame */ | 83 | if (!ieee80211_is_data(hdr->frame_control)) { |
106 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { | ||
107 | /* management frames go on AC_VO queue, but are sent | 84 | /* management frames go on AC_VO queue, but are sent |
108 | * without QoS control fields */ | 85 | * without QoS control fields */ |
109 | return IEEE80211_TX_QUEUE_DATA0; | 86 | return 0; |
110 | } | 87 | } |
111 | 88 | ||
112 | if (0 /* injected */) { | 89 | if (0 /* injected */) { |
113 | /* use AC from radiotap */ | 90 | /* use AC from radiotap */ |
114 | } | 91 | } |
115 | 92 | ||
116 | /* is this a QoS frame? */ | 93 | if (!ieee80211_is_data_qos(hdr->frame_control)) { |
117 | qos = fc & IEEE80211_STYPE_QOS_DATA; | ||
118 | |||
119 | if (!qos) { | ||
120 | skb->priority = 0; /* required for correct WPA/11i MIC */ | 94 | skb->priority = 0; /* required for correct WPA/11i MIC */ |
121 | return ieee802_1d_to_ac[skb->priority]; | 95 | return ieee802_1d_to_ac[skb->priority]; |
122 | } | 96 | } |
123 | 97 | ||
124 | /* use the data classifier to determine what 802.1d tag the | 98 | /* use the data classifier to determine what 802.1d tag the |
125 | * data frame has */ | 99 | * data frame has */ |
126 | skb->priority = classify_1d(skb, qd); | 100 | skb->priority = classify_1d(skb); |
127 | 101 | ||
128 | /* in case we are a client verify acm is not set for this ac */ | 102 | /* in case we are a client verify acm is not set for this ac */ |
129 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { | 103 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { |
130 | if (wme_downgrade_ac(skb)) { | 104 | if (wme_downgrade_ac(skb)) { |
131 | /* No AC with lower priority has acm=0, drop packet. */ | 105 | /* The old code would drop the packet in this |
132 | return -1; | 106 | * case. |
107 | */ | ||
108 | return 0; | ||
133 | } | 109 | } |
134 | } | 110 | } |
135 | 111 | ||
@@ -137,55 +113,52 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) | |||
137 | return ieee802_1d_to_ac[skb->priority]; | 113 | return ieee802_1d_to_ac[skb->priority]; |
138 | } | 114 | } |
139 | 115 | ||
140 | 116 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |
141 | static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | ||
142 | { | 117 | { |
143 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
144 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
145 | struct ieee80211_tx_packet_data *pkt_data = | ||
146 | (struct ieee80211_tx_packet_data *) skb->cb; | ||
147 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
148 | unsigned short fc = le16_to_cpu(hdr->frame_control); | 119 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
149 | struct Qdisc *qdisc; | 120 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
150 | int err, queue; | ||
151 | struct sta_info *sta; | 121 | struct sta_info *sta; |
122 | u16 queue; | ||
152 | u8 tid; | 123 | u8 tid; |
153 | 124 | ||
154 | if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { | 125 | queue = classify80211(skb, dev); |
155 | queue = pkt_data->queue; | 126 | if (unlikely(queue >= local->hw.queues)) |
127 | queue = local->hw.queues - 1; | ||
128 | |||
129 | if (info->flags & IEEE80211_TX_CTL_REQUEUE) { | ||
156 | rcu_read_lock(); | 130 | rcu_read_lock(); |
157 | sta = sta_info_get(local, hdr->addr1); | 131 | sta = sta_info_get(local, hdr->addr1); |
158 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | 132 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
159 | if (sta) { | 133 | if (sta) { |
134 | struct ieee80211_hw *hw = &local->hw; | ||
160 | int ampdu_queue = sta->tid_to_tx_q[tid]; | 135 | int ampdu_queue = sta->tid_to_tx_q[tid]; |
161 | if ((ampdu_queue < local->hw.queues) && | 136 | |
162 | test_bit(ampdu_queue, q->qdisc_pool)) { | 137 | if ((ampdu_queue < ieee80211_num_queues(hw)) && |
138 | test_bit(ampdu_queue, local->queue_pool)) { | ||
163 | queue = ampdu_queue; | 139 | queue = ampdu_queue; |
164 | pkt_data->flags |= IEEE80211_TXPD_AMPDU; | 140 | info->flags |= IEEE80211_TX_CTL_AMPDU; |
165 | } else { | 141 | } else { |
166 | pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; | 142 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
167 | } | 143 | } |
168 | } | 144 | } |
169 | rcu_read_unlock(); | 145 | rcu_read_unlock(); |
170 | skb_queue_tail(&q->requeued[queue], skb); | ||
171 | qd->q.qlen++; | ||
172 | return 0; | ||
173 | } | ||
174 | 146 | ||
175 | queue = classify80211(skb, qd); | 147 | return queue; |
148 | } | ||
176 | 149 | ||
177 | /* now we know the 1d priority, fill in the QoS header if there is one | 150 | /* Now we know the 1d priority, fill in the QoS header if |
151 | * there is one. | ||
178 | */ | 152 | */ |
179 | if (WLAN_FC_IS_QOS_DATA(fc)) { | 153 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
180 | u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; | 154 | u8 *p = ieee80211_get_qos_ctl(hdr); |
181 | u8 ack_policy = 0; | 155 | u8 ack_policy = 0; |
182 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | 156 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
183 | if (local->wifi_wme_noack_test) | 157 | if (local->wifi_wme_noack_test) |
184 | ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << | 158 | ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << |
185 | QOS_CONTROL_ACK_POLICY_SHIFT; | 159 | QOS_CONTROL_ACK_POLICY_SHIFT; |
186 | /* qos header is 2 bytes, second reserved */ | 160 | /* qos header is 2 bytes, second reserved */ |
187 | *p = ack_policy | tid; | 161 | *p++ = ack_policy | tid; |
188 | p++; | ||
189 | *p = 0; | 162 | *p = 0; |
190 | 163 | ||
191 | rcu_read_lock(); | 164 | rcu_read_lock(); |
@@ -193,475 +166,37 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | |||
193 | sta = sta_info_get(local, hdr->addr1); | 166 | sta = sta_info_get(local, hdr->addr1); |
194 | if (sta) { | 167 | if (sta) { |
195 | int ampdu_queue = sta->tid_to_tx_q[tid]; | 168 | int ampdu_queue = sta->tid_to_tx_q[tid]; |
196 | if ((ampdu_queue < local->hw.queues) && | 169 | struct ieee80211_hw *hw = &local->hw; |
197 | test_bit(ampdu_queue, q->qdisc_pool)) { | 170 | |
171 | if ((ampdu_queue < ieee80211_num_queues(hw)) && | ||
172 | test_bit(ampdu_queue, local->queue_pool)) { | ||
198 | queue = ampdu_queue; | 173 | queue = ampdu_queue; |
199 | pkt_data->flags |= IEEE80211_TXPD_AMPDU; | 174 | info->flags |= IEEE80211_TX_CTL_AMPDU; |
200 | } else { | 175 | } else { |
201 | pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; | 176 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
202 | } | 177 | } |
203 | } | 178 | } |
204 | 179 | ||
205 | rcu_read_unlock(); | 180 | rcu_read_unlock(); |
206 | } | 181 | } |
207 | 182 | ||
208 | if (unlikely(queue >= local->hw.queues)) { | ||
209 | #if 0 | ||
210 | if (net_ratelimit()) { | ||
211 | printk(KERN_DEBUG "%s - queue=%d (hw does not " | ||
212 | "support) -> %d\n", | ||
213 | __func__, queue, local->hw.queues - 1); | ||
214 | } | ||
215 | #endif | ||
216 | queue = local->hw.queues - 1; | ||
217 | } | ||
218 | |||
219 | if (unlikely(queue < 0)) { | ||
220 | kfree_skb(skb); | ||
221 | err = NET_XMIT_DROP; | ||
222 | } else { | ||
223 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | ||
224 | pkt_data->queue = (unsigned int) queue; | ||
225 | qdisc = q->queues[queue]; | ||
226 | err = qdisc->enqueue(skb, qdisc); | ||
227 | if (err == NET_XMIT_SUCCESS) { | ||
228 | qd->q.qlen++; | ||
229 | qd->bstats.bytes += skb->len; | ||
230 | qd->bstats.packets++; | ||
231 | return NET_XMIT_SUCCESS; | ||
232 | } | ||
233 | } | ||
234 | qd->qstats.drops++; | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | |||
239 | /* TODO: clean up the cases where master_hard_start_xmit | ||
240 | * returns non 0 - it shouldn't ever do that. Once done we | ||
241 | * can remove this function */ | ||
242 | static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd) | ||
243 | { | ||
244 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
245 | struct ieee80211_tx_packet_data *pkt_data = | ||
246 | (struct ieee80211_tx_packet_data *) skb->cb; | ||
247 | struct Qdisc *qdisc; | ||
248 | int err; | ||
249 | |||
250 | /* we recorded which queue to use earlier! */ | ||
251 | qdisc = q->queues[pkt_data->queue]; | ||
252 | |||
253 | if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) { | ||
254 | qd->q.qlen++; | ||
255 | return 0; | ||
256 | } | ||
257 | qd->qstats.drops++; | ||
258 | return err; | ||
259 | } | ||
260 | |||
261 | |||
262 | static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) | ||
263 | { | ||
264 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
265 | struct net_device *dev = qd->dev; | ||
266 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
267 | struct ieee80211_hw *hw = &local->hw; | ||
268 | struct sk_buff *skb; | ||
269 | struct Qdisc *qdisc; | ||
270 | int queue; | ||
271 | |||
272 | /* check all the h/w queues in numeric/priority order */ | ||
273 | for (queue = 0; queue < hw->queues; queue++) { | ||
274 | /* see if there is room in this hardware queue */ | ||
275 | if ((test_bit(IEEE80211_LINK_STATE_XOFF, | ||
276 | &local->state[queue])) || | ||
277 | (test_bit(IEEE80211_LINK_STATE_PENDING, | ||
278 | &local->state[queue])) || | ||
279 | (!test_bit(queue, q->qdisc_pool))) | ||
280 | continue; | ||
281 | |||
282 | /* there is space - try and get a frame */ | ||
283 | skb = skb_dequeue(&q->requeued[queue]); | ||
284 | if (skb) { | ||
285 | qd->q.qlen--; | ||
286 | return skb; | ||
287 | } | ||
288 | |||
289 | qdisc = q->queues[queue]; | ||
290 | skb = qdisc->dequeue(qdisc); | ||
291 | if (skb) { | ||
292 | qd->q.qlen--; | ||
293 | return skb; | ||
294 | } | ||
295 | } | ||
296 | /* returning a NULL here when all the h/w queues are full means we | ||
297 | * never need to call netif_stop_queue in the driver */ | ||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | |||
302 | static void wme_qdiscop_reset(struct Qdisc* qd) | ||
303 | { | ||
304 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
305 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
306 | struct ieee80211_hw *hw = &local->hw; | ||
307 | int queue; | ||
308 | |||
309 | /* QUESTION: should we have some hardware flush functionality here? */ | ||
310 | |||
311 | for (queue = 0; queue < hw->queues; queue++) { | ||
312 | skb_queue_purge(&q->requeued[queue]); | ||
313 | qdisc_reset(q->queues[queue]); | ||
314 | } | ||
315 | qd->q.qlen = 0; | ||
316 | } | ||
317 | |||
318 | |||
319 | static void wme_qdiscop_destroy(struct Qdisc* qd) | ||
320 | { | ||
321 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
322 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
323 | struct ieee80211_hw *hw = &local->hw; | ||
324 | int queue; | ||
325 | |||
326 | tcf_destroy_chain(&q->filter_list); | ||
327 | |||
328 | for (queue=0; queue < hw->queues; queue++) { | ||
329 | skb_queue_purge(&q->requeued[queue]); | ||
330 | qdisc_destroy(q->queues[queue]); | ||
331 | q->queues[queue] = &noop_qdisc; | ||
332 | } | ||
333 | } | ||
334 | |||
335 | |||
336 | /* called whenever parameters are updated on existing qdisc */ | ||
337 | static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) | ||
338 | { | ||
339 | /* struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
340 | */ | ||
341 | /* check our options block is the right size */ | ||
342 | /* copy any options to our local structure */ | ||
343 | /* Ignore options block for now - always use static mapping | ||
344 | struct tc_ieee80211_qopt *qopt = nla_data(opt); | ||
345 | |||
346 | if (opt->nla_len < nla_attr_size(sizeof(*qopt))) | ||
347 | return -EINVAL; | ||
348 | memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue)); | ||
349 | */ | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | |||
354 | /* called during initial creation of qdisc on device */ | ||
355 | static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) | ||
356 | { | ||
357 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
358 | struct net_device *dev = qd->dev; | ||
359 | struct ieee80211_local *local; | ||
360 | int queues; | ||
361 | int err = 0, i; | ||
362 | |||
363 | /* check that device is a mac80211 device */ | ||
364 | if (!dev->ieee80211_ptr || | ||
365 | dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) | ||
366 | return -EINVAL; | ||
367 | |||
368 | /* check this device is an ieee80211 master type device */ | ||
369 | if (dev->type != ARPHRD_IEEE80211) | ||
370 | return -EINVAL; | ||
371 | |||
372 | /* check that there is no qdisc currently attached to device | ||
373 | * this ensures that we will be the root qdisc. (I can't find a better | ||
374 | * way to test this explicitly) */ | ||
375 | if (dev->qdisc_sleeping != &noop_qdisc) | ||
376 | return -EINVAL; | ||
377 | |||
378 | if (qd->flags & TCQ_F_INGRESS) | ||
379 | return -EINVAL; | ||
380 | |||
381 | local = wdev_priv(dev->ieee80211_ptr); | ||
382 | queues = local->hw.queues; | ||
383 | |||
384 | /* if options were passed in, set them */ | ||
385 | if (opt) { | ||
386 | err = wme_qdiscop_tune(qd, opt); | ||
387 | } | ||
388 | |||
389 | /* create child queues */ | ||
390 | for (i = 0; i < queues; i++) { | ||
391 | skb_queue_head_init(&q->requeued[i]); | ||
392 | q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, | ||
393 | qd->handle); | ||
394 | if (!q->queues[i]) { | ||
395 | q->queues[i] = &noop_qdisc; | ||
396 | printk(KERN_ERR "%s child qdisc %i creation failed\n", | ||
397 | dev->name, i); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | /* reserve all legacy QoS queues */ | ||
402 | for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++) | ||
403 | set_bit(i, q->qdisc_pool); | ||
404 | |||
405 | return err; | ||
406 | } | ||
407 | |||
408 | static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb) | ||
409 | { | ||
410 | /* struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
411 | unsigned char *p = skb->tail; | ||
412 | struct tc_ieee80211_qopt opt; | ||
413 | |||
414 | memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1); | ||
415 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | ||
416 | */ return skb->len; | ||
417 | /* | ||
418 | nla_put_failure: | ||
419 | skb_trim(skb, p - skb->data);*/ | ||
420 | return -1; | ||
421 | } | ||
422 | |||
423 | |||
424 | static int wme_classop_graft(struct Qdisc *qd, unsigned long arg, | ||
425 | struct Qdisc *new, struct Qdisc **old) | ||
426 | { | ||
427 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
428 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
429 | struct ieee80211_hw *hw = &local->hw; | ||
430 | unsigned long queue = arg - 1; | ||
431 | |||
432 | if (queue >= hw->queues) | ||
433 | return -EINVAL; | ||
434 | |||
435 | if (!new) | ||
436 | new = &noop_qdisc; | ||
437 | |||
438 | sch_tree_lock(qd); | ||
439 | *old = q->queues[queue]; | ||
440 | q->queues[queue] = new; | ||
441 | qdisc_reset(*old); | ||
442 | sch_tree_unlock(qd); | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | |||
448 | static struct Qdisc * | ||
449 | wme_classop_leaf(struct Qdisc *qd, unsigned long arg) | ||
450 | { | ||
451 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
452 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
453 | struct ieee80211_hw *hw = &local->hw; | ||
454 | unsigned long queue = arg - 1; | ||
455 | |||
456 | if (queue >= hw->queues) | ||
457 | return NULL; | ||
458 | |||
459 | return q->queues[queue]; | ||
460 | } | ||
461 | |||
462 | |||
463 | static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid) | ||
464 | { | ||
465 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
466 | struct ieee80211_hw *hw = &local->hw; | ||
467 | unsigned long queue = TC_H_MIN(classid); | ||
468 | |||
469 | if (queue - 1 >= hw->queues) | ||
470 | return 0; | ||
471 | |||
472 | return queue; | 183 | return queue; |
473 | } | 184 | } |
474 | 185 | ||
475 | |||
476 | static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent, | ||
477 | u32 classid) | ||
478 | { | ||
479 | return wme_classop_get(qd, classid); | ||
480 | } | ||
481 | |||
482 | |||
483 | static void wme_classop_put(struct Qdisc *q, unsigned long cl) | ||
484 | { | ||
485 | } | ||
486 | |||
487 | |||
488 | static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, | ||
489 | struct nlattr **tca, unsigned long *arg) | ||
490 | { | ||
491 | unsigned long cl = *arg; | ||
492 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
493 | struct ieee80211_hw *hw = &local->hw; | ||
494 | |||
495 | if (cl - 1 > hw->queues) | ||
496 | return -ENOENT; | ||
497 | |||
498 | /* TODO: put code to program hardware queue parameters here, | ||
499 | * to allow programming from tc command line */ | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | |||
505 | /* we don't support deleting hardware queues | ||
506 | * when we add WMM-SA support - TSPECs may be deleted here */ | ||
507 | static int wme_classop_delete(struct Qdisc *qd, unsigned long cl) | ||
508 | { | ||
509 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
510 | struct ieee80211_hw *hw = &local->hw; | ||
511 | |||
512 | if (cl - 1 > hw->queues) | ||
513 | return -ENOENT; | ||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | |||
518 | static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl, | ||
519 | struct sk_buff *skb, struct tcmsg *tcm) | ||
520 | { | ||
521 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
522 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
523 | struct ieee80211_hw *hw = &local->hw; | ||
524 | |||
525 | if (cl - 1 > hw->queues) | ||
526 | return -ENOENT; | ||
527 | tcm->tcm_handle = TC_H_MIN(cl); | ||
528 | tcm->tcm_parent = qd->handle; | ||
529 | tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */ | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | |||
534 | static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg) | ||
535 | { | ||
536 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | ||
537 | struct ieee80211_hw *hw = &local->hw; | ||
538 | int queue; | ||
539 | |||
540 | if (arg->stop) | ||
541 | return; | ||
542 | |||
543 | for (queue = 0; queue < hw->queues; queue++) { | ||
544 | if (arg->count < arg->skip) { | ||
545 | arg->count++; | ||
546 | continue; | ||
547 | } | ||
548 | /* we should return classids for our internal queues here | ||
549 | * as well as the external ones */ | ||
550 | if (arg->fn(qd, queue+1, arg) < 0) { | ||
551 | arg->stop = 1; | ||
552 | break; | ||
553 | } | ||
554 | arg->count++; | ||
555 | } | ||
556 | } | ||
557 | |||
558 | |||
559 | static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd, | ||
560 | unsigned long cl) | ||
561 | { | ||
562 | struct ieee80211_sched_data *q = qdisc_priv(qd); | ||
563 | |||
564 | if (cl) | ||
565 | return NULL; | ||
566 | |||
567 | return &q->filter_list; | ||
568 | } | ||
569 | |||
570 | |||
571 | /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached) | ||
572 | * - these are the operations on the classes */ | ||
573 | static const struct Qdisc_class_ops class_ops = | ||
574 | { | ||
575 | .graft = wme_classop_graft, | ||
576 | .leaf = wme_classop_leaf, | ||
577 | |||
578 | .get = wme_classop_get, | ||
579 | .put = wme_classop_put, | ||
580 | .change = wme_classop_change, | ||
581 | .delete = wme_classop_delete, | ||
582 | .walk = wme_classop_walk, | ||
583 | |||
584 | .tcf_chain = wme_classop_find_tcf, | ||
585 | .bind_tcf = wme_classop_bind, | ||
586 | .unbind_tcf = wme_classop_put, | ||
587 | |||
588 | .dump = wme_classop_dump_class, | ||
589 | }; | ||
590 | |||
591 | |||
592 | /* queueing discipline operations */ | ||
593 | static struct Qdisc_ops wme_qdisc_ops __read_mostly = | ||
594 | { | ||
595 | .next = NULL, | ||
596 | .cl_ops = &class_ops, | ||
597 | .id = "ieee80211", | ||
598 | .priv_size = sizeof(struct ieee80211_sched_data), | ||
599 | |||
600 | .enqueue = wme_qdiscop_enqueue, | ||
601 | .dequeue = wme_qdiscop_dequeue, | ||
602 | .requeue = wme_qdiscop_requeue, | ||
603 | .drop = NULL, /* drop not needed since we are always the root qdisc */ | ||
604 | |||
605 | .init = wme_qdiscop_init, | ||
606 | .reset = wme_qdiscop_reset, | ||
607 | .destroy = wme_qdiscop_destroy, | ||
608 | .change = wme_qdiscop_tune, | ||
609 | |||
610 | .dump = wme_qdiscop_dump, | ||
611 | }; | ||
612 | |||
613 | |||
614 | void ieee80211_install_qdisc(struct net_device *dev) | ||
615 | { | ||
616 | struct Qdisc *qdisc; | ||
617 | |||
618 | qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT); | ||
619 | if (!qdisc) { | ||
620 | printk(KERN_ERR "%s: qdisc installation failed\n", dev->name); | ||
621 | return; | ||
622 | } | ||
623 | |||
624 | /* same handle as would be allocated by qdisc_alloc_handle() */ | ||
625 | qdisc->handle = 0x80010000; | ||
626 | |||
627 | qdisc_lock_tree(dev); | ||
628 | list_add_tail(&qdisc->list, &dev->qdisc_list); | ||
629 | dev->qdisc_sleeping = qdisc; | ||
630 | qdisc_unlock_tree(dev); | ||
631 | } | ||
632 | |||
633 | |||
634 | int ieee80211_qdisc_installed(struct net_device *dev) | ||
635 | { | ||
636 | return dev->qdisc_sleeping->ops == &wme_qdisc_ops; | ||
637 | } | ||
638 | |||
639 | |||
640 | int ieee80211_wme_register(void) | ||
641 | { | ||
642 | return register_qdisc(&wme_qdisc_ops); | ||
643 | } | ||
644 | |||
645 | |||
646 | void ieee80211_wme_unregister(void) | ||
647 | { | ||
648 | unregister_qdisc(&wme_qdisc_ops); | ||
649 | } | ||
650 | |||
651 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | 186 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, |
652 | struct sta_info *sta, u16 tid) | 187 | struct sta_info *sta, u16 tid) |
653 | { | 188 | { |
654 | int i; | 189 | int i; |
655 | struct ieee80211_sched_data *q = | ||
656 | qdisc_priv(local->mdev->qdisc_sleeping); | ||
657 | DECLARE_MAC_BUF(mac); | ||
658 | 190 | ||
659 | /* prepare the filter and save it for the SW queue | 191 | /* prepare the filter and save it for the SW queue |
660 | * matching the recieved HW queue */ | 192 | * matching the received HW queue */ |
193 | |||
194 | if (!local->hw.ampdu_queues) | ||
195 | return -EPERM; | ||
661 | 196 | ||
662 | /* try to get a Qdisc from the pool */ | 197 | /* try to get a Qdisc from the pool */ |
663 | for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++) | 198 | for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++) |
664 | if (!test_and_set_bit(i, q->qdisc_pool)) { | 199 | if (!test_and_set_bit(i, local->queue_pool)) { |
665 | ieee80211_stop_queue(local_to_hw(local), i); | 200 | ieee80211_stop_queue(local_to_hw(local), i); |
666 | sta->tid_to_tx_q[tid] = i; | 201 | sta->tid_to_tx_q[tid] = i; |
667 | 202 | ||
@@ -670,11 +205,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | |||
670 | * on the previous queue | 205 | * on the previous queue |
671 | * since HT is strict in order */ | 206 | * since HT is strict in order */ |
672 | #ifdef CONFIG_MAC80211_HT_DEBUG | 207 | #ifdef CONFIG_MAC80211_HT_DEBUG |
673 | if (net_ratelimit()) | 208 | if (net_ratelimit()) { |
209 | DECLARE_MAC_BUF(mac); | ||
674 | printk(KERN_DEBUG "allocated aggregation queue" | 210 | printk(KERN_DEBUG "allocated aggregation queue" |
675 | " %d tid %d addr %s pool=0x%lX\n", | 211 | " %d tid %d addr %s pool=0x%lX\n", |
676 | i, tid, print_mac(mac, sta->addr), | 212 | i, tid, print_mac(mac, sta->addr), |
677 | q->qdisc_pool[0]); | 213 | local->queue_pool[0]); |
214 | } | ||
678 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 215 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
679 | return 0; | 216 | return 0; |
680 | } | 217 | } |
@@ -683,44 +220,79 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | |||
683 | } | 220 | } |
684 | 221 | ||
685 | /** | 222 | /** |
686 | * the caller needs to hold local->mdev->queue_lock | 223 | * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock |
687 | */ | 224 | */ |
688 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | 225 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, |
689 | struct sta_info *sta, u16 tid, | 226 | struct sta_info *sta, u16 tid, |
690 | u8 requeue) | 227 | u8 requeue) |
691 | { | 228 | { |
692 | struct ieee80211_sched_data *q = | ||
693 | qdisc_priv(local->mdev->qdisc_sleeping); | ||
694 | int agg_queue = sta->tid_to_tx_q[tid]; | 229 | int agg_queue = sta->tid_to_tx_q[tid]; |
230 | struct ieee80211_hw *hw = &local->hw; | ||
695 | 231 | ||
696 | /* return the qdisc to the pool */ | 232 | /* return the qdisc to the pool */ |
697 | clear_bit(agg_queue, q->qdisc_pool); | 233 | clear_bit(agg_queue, local->queue_pool); |
698 | sta->tid_to_tx_q[tid] = local->hw.queues; | 234 | sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw); |
699 | 235 | ||
700 | if (requeue) | 236 | if (requeue) { |
701 | ieee80211_requeue(local, agg_queue); | 237 | ieee80211_requeue(local, agg_queue); |
702 | else | 238 | } else { |
703 | q->queues[agg_queue]->ops->reset(q->queues[agg_queue]); | 239 | struct netdev_queue *txq; |
240 | spinlock_t *root_lock; | ||
241 | |||
242 | txq = netdev_get_tx_queue(local->mdev, agg_queue); | ||
243 | root_lock = qdisc_root_lock(txq->qdisc); | ||
244 | |||
245 | spin_lock_bh(root_lock); | ||
246 | qdisc_reset(txq->qdisc); | ||
247 | spin_unlock_bh(root_lock); | ||
248 | } | ||
704 | } | 249 | } |
705 | 250 | ||
706 | void ieee80211_requeue(struct ieee80211_local *local, int queue) | 251 | void ieee80211_requeue(struct ieee80211_local *local, int queue) |
707 | { | 252 | { |
708 | struct Qdisc *root_qd = local->mdev->qdisc_sleeping; | 253 | struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); |
709 | struct ieee80211_sched_data *q = qdisc_priv(root_qd); | 254 | struct sk_buff_head list; |
710 | struct Qdisc *qdisc = q->queues[queue]; | 255 | spinlock_t *root_lock; |
711 | struct sk_buff *skb = NULL; | 256 | struct Qdisc *qdisc; |
712 | u32 len; | 257 | u32 len; |
713 | 258 | ||
259 | rcu_read_lock_bh(); | ||
260 | |||
261 | qdisc = rcu_dereference(txq->qdisc); | ||
714 | if (!qdisc || !qdisc->dequeue) | 262 | if (!qdisc || !qdisc->dequeue) |
715 | return; | 263 | goto out_unlock; |
264 | |||
265 | skb_queue_head_init(&list); | ||
716 | 266 | ||
717 | printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen); | 267 | root_lock = qdisc_root_lock(qdisc); |
268 | spin_lock(root_lock); | ||
718 | for (len = qdisc->q.qlen; len > 0; len--) { | 269 | for (len = qdisc->q.qlen; len > 0; len--) { |
719 | skb = qdisc->dequeue(qdisc); | 270 | struct sk_buff *skb = qdisc->dequeue(qdisc); |
720 | root_qd->q.qlen--; | 271 | |
721 | /* packet will be classified again and */ | ||
722 | /* skb->packet_data->queue will be overridden if needed */ | ||
723 | if (skb) | 272 | if (skb) |
724 | wme_qdiscop_enqueue(skb, root_qd); | 273 | __skb_queue_tail(&list, skb); |
274 | } | ||
275 | spin_unlock(root_lock); | ||
276 | |||
277 | for (len = list.qlen; len > 0; len--) { | ||
278 | struct sk_buff *skb = __skb_dequeue(&list); | ||
279 | u16 new_queue; | ||
280 | |||
281 | BUG_ON(!skb); | ||
282 | new_queue = ieee80211_select_queue(local->mdev, skb); | ||
283 | skb_set_queue_mapping(skb, new_queue); | ||
284 | |||
285 | txq = netdev_get_tx_queue(local->mdev, new_queue); | ||
286 | |||
287 | |||
288 | qdisc = rcu_dereference(txq->qdisc); | ||
289 | root_lock = qdisc_root_lock(qdisc); | ||
290 | |||
291 | spin_lock(root_lock); | ||
292 | qdisc_enqueue_root(skb, qdisc); | ||
293 | spin_unlock(root_lock); | ||
725 | } | 294 | } |
295 | |||
296 | out_unlock: | ||
297 | rcu_read_unlock_bh(); | ||
726 | } | 298 | } |
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index fcc6b05508cc..04de28c071a6 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -19,57 +19,16 @@ | |||
19 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 | 19 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 |
20 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 | 20 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 |
21 | 21 | ||
22 | #define QOS_CONTROL_TID_MASK 0x0f | ||
23 | #define QOS_CONTROL_ACK_POLICY_SHIFT 5 | 22 | #define QOS_CONTROL_ACK_POLICY_SHIFT 5 |
24 | 23 | ||
25 | #define QOS_CONTROL_TAG1D_MASK 0x07 | ||
26 | |||
27 | extern const int ieee802_1d_to_ac[8]; | 24 | extern const int ieee802_1d_to_ac[8]; |
28 | 25 | ||
29 | static inline int WLAN_FC_IS_QOS_DATA(u16 fc) | 26 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb); |
30 | { | ||
31 | return (fc & 0x8C) == 0x88; | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_NET_SCHED | ||
35 | void ieee80211_install_qdisc(struct net_device *dev); | ||
36 | int ieee80211_qdisc_installed(struct net_device *dev); | ||
37 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | 27 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, |
38 | struct sta_info *sta, u16 tid); | 28 | struct sta_info *sta, u16 tid); |
39 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | 29 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, |
40 | struct sta_info *sta, u16 tid, | 30 | struct sta_info *sta, u16 tid, |
41 | u8 requeue); | 31 | u8 requeue); |
42 | void ieee80211_requeue(struct ieee80211_local *local, int queue); | 32 | void ieee80211_requeue(struct ieee80211_local *local, int queue); |
43 | int ieee80211_wme_register(void); | ||
44 | void ieee80211_wme_unregister(void); | ||
45 | #else | ||
46 | static inline void ieee80211_install_qdisc(struct net_device *dev) | ||
47 | { | ||
48 | } | ||
49 | static inline int ieee80211_qdisc_installed(struct net_device *dev) | ||
50 | { | ||
51 | return 0; | ||
52 | } | ||
53 | static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | ||
54 | struct sta_info *sta, u16 tid) | ||
55 | { | ||
56 | return -EAGAIN; | ||
57 | } | ||
58 | static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | ||
59 | struct sta_info *sta, u16 tid, | ||
60 | u8 requeue) | ||
61 | { | ||
62 | } | ||
63 | static inline void ieee80211_requeue(struct ieee80211_local *local, int queue) | ||
64 | { | ||
65 | } | ||
66 | static inline int ieee80211_wme_register(void) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | static inline void ieee80211_wme_unregister(void) | ||
71 | { | ||
72 | } | ||
73 | #endif /* CONFIG_NET_SCHED */ | ||
74 | 33 | ||
75 | #endif /* _WME_H */ | 34 | #endif /* _WME_H */ |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 45709ada8fee..2f33df0dcccf 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
14 | #include <linux/ieee80211.h> | ||
15 | #include <asm/unaligned.h> | ||
14 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
15 | 17 | ||
16 | #include "ieee80211_i.h" | 18 | #include "ieee80211_i.h" |
@@ -19,76 +21,30 @@ | |||
19 | #include "aes_ccm.h" | 21 | #include "aes_ccm.h" |
20 | #include "wpa.h" | 22 | #include "wpa.h" |
21 | 23 | ||
22 | static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da, | ||
23 | u8 *qos_tid, u8 **data, size_t *data_len) | ||
24 | { | ||
25 | struct ieee80211_hdr *hdr; | ||
26 | size_t hdrlen; | ||
27 | u16 fc; | ||
28 | int a4_included; | ||
29 | u8 *pos; | ||
30 | |||
31 | hdr = (struct ieee80211_hdr *) skb->data; | ||
32 | fc = le16_to_cpu(hdr->frame_control); | ||
33 | |||
34 | hdrlen = 24; | ||
35 | if ((fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) == | ||
36 | (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { | ||
37 | hdrlen += ETH_ALEN; | ||
38 | *sa = hdr->addr4; | ||
39 | *da = hdr->addr3; | ||
40 | } else if (fc & IEEE80211_FCTL_FROMDS) { | ||
41 | *sa = hdr->addr3; | ||
42 | *da = hdr->addr1; | ||
43 | } else if (fc & IEEE80211_FCTL_TODS) { | ||
44 | *sa = hdr->addr2; | ||
45 | *da = hdr->addr3; | ||
46 | } else { | ||
47 | *sa = hdr->addr2; | ||
48 | *da = hdr->addr1; | ||
49 | } | ||
50 | |||
51 | if (fc & 0x80) | ||
52 | hdrlen += 2; | ||
53 | |||
54 | *data = skb->data + hdrlen; | ||
55 | *data_len = skb->len - hdrlen; | ||
56 | |||
57 | a4_included = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | ||
58 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); | ||
59 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | ||
60 | fc & IEEE80211_STYPE_QOS_DATA) { | ||
61 | pos = (u8 *) &hdr->addr4; | ||
62 | if (a4_included) | ||
63 | pos += 6; | ||
64 | *qos_tid = pos[0] & 0x0f; | ||
65 | *qos_tid |= 0x80; /* qos_included flag */ | ||
66 | } else | ||
67 | *qos_tid = 0; | ||
68 | |||
69 | return skb->len < hdrlen ? -1 : 0; | ||
70 | } | ||
71 | |||
72 | |||
73 | ieee80211_tx_result | 24 | ieee80211_tx_result |
74 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | 25 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) |
75 | { | 26 | { |
76 | u8 *data, *sa, *da, *key, *mic, qos_tid; | 27 | u8 *data, *key, *mic, key_offset; |
77 | size_t data_len; | 28 | size_t data_len; |
78 | u16 fc; | 29 | unsigned int hdrlen; |
30 | struct ieee80211_hdr *hdr; | ||
79 | struct sk_buff *skb = tx->skb; | 31 | struct sk_buff *skb = tx->skb; |
80 | int authenticator; | 32 | int authenticator; |
81 | int wpa_test = 0; | 33 | int wpa_test = 0; |
34 | int tail; | ||
82 | 35 | ||
83 | fc = tx->fc; | 36 | hdr = (struct ieee80211_hdr *)skb->data; |
84 | |||
85 | if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || | 37 | if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || |
86 | !WLAN_FC_DATA_PRESENT(fc)) | 38 | !ieee80211_is_data_present(hdr->frame_control)) |
87 | return TX_CONTINUE; | 39 | return TX_CONTINUE; |
88 | 40 | ||
89 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) | 41 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
42 | if (skb->len < hdrlen) | ||
90 | return TX_DROP; | 43 | return TX_DROP; |
91 | 44 | ||
45 | data = skb->data + hdrlen; | ||
46 | data_len = skb->len - hdrlen; | ||
47 | |||
92 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 48 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
93 | !(tx->flags & IEEE80211_TX_FRAGMENTED) && | 49 | !(tx->flags & IEEE80211_TX_FRAGMENTED) && |
94 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && | 50 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && |
@@ -98,26 +54,27 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | |||
98 | return TX_CONTINUE; | 54 | return TX_CONTINUE; |
99 | } | 55 | } |
100 | 56 | ||
101 | if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { | 57 | tail = MICHAEL_MIC_LEN; |
102 | I802_DEBUG_INC(tx->local->tx_expand_skb_head); | 58 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
103 | if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, | 59 | tail += TKIP_ICV_LEN; |
104 | MICHAEL_MIC_LEN + TKIP_ICV_LEN, | 60 | |
105 | GFP_ATOMIC))) { | 61 | if (WARN_ON(skb_tailroom(skb) < tail || |
106 | printk(KERN_DEBUG "%s: failed to allocate more memory " | 62 | skb_headroom(skb) < TKIP_IV_LEN)) |
107 | "for Michael MIC\n", tx->dev->name); | 63 | return TX_DROP; |
108 | return TX_DROP; | ||
109 | } | ||
110 | } | ||
111 | 64 | ||
112 | #if 0 | 65 | #if 0 |
113 | authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ | 66 | authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ |
114 | #else | 67 | #else |
115 | authenticator = 1; | 68 | authenticator = 1; |
116 | #endif | 69 | #endif |
117 | key = &tx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_TX_MIC_KEY : | 70 | /* At this point we know we're using ALG_TKIP. To get the MIC key |
118 | ALG_TKIP_TEMP_AUTH_RX_MIC_KEY]; | 71 | * we now will rely on the offset from the ieee80211_key_conf::key */ |
72 | key_offset = authenticator ? | ||
73 | NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY : | ||
74 | NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; | ||
75 | key = &tx->key->conf.key[key_offset]; | ||
119 | mic = skb_put(skb, MICHAEL_MIC_LEN); | 76 | mic = skb_put(skb, MICHAEL_MIC_LEN); |
120 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); | 77 | michael_mic(key, hdr, data, data_len, mic); |
121 | 78 | ||
122 | return TX_CONTINUE; | 79 | return TX_CONTINUE; |
123 | } | 80 | } |
@@ -126,47 +83,50 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) | |||
126 | ieee80211_rx_result | 83 | ieee80211_rx_result |
127 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | 84 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) |
128 | { | 85 | { |
129 | u8 *data, *sa, *da, *key = NULL, qos_tid; | 86 | u8 *data, *key = NULL, key_offset; |
130 | size_t data_len; | 87 | size_t data_len; |
131 | u16 fc; | 88 | unsigned int hdrlen; |
89 | struct ieee80211_hdr *hdr; | ||
132 | u8 mic[MICHAEL_MIC_LEN]; | 90 | u8 mic[MICHAEL_MIC_LEN]; |
133 | struct sk_buff *skb = rx->skb; | 91 | struct sk_buff *skb = rx->skb; |
134 | int authenticator = 1, wpa_test = 0; | 92 | int authenticator = 1, wpa_test = 0; |
135 | DECLARE_MAC_BUF(mac); | 93 | DECLARE_MAC_BUF(mac); |
136 | 94 | ||
137 | fc = rx->fc; | ||
138 | |||
139 | /* | 95 | /* |
140 | * No way to verify the MIC if the hardware stripped it | 96 | * No way to verify the MIC if the hardware stripped it |
141 | */ | 97 | */ |
142 | if (rx->status->flag & RX_FLAG_MMIC_STRIPPED) | 98 | if (rx->status->flag & RX_FLAG_MMIC_STRIPPED) |
143 | return RX_CONTINUE; | 99 | return RX_CONTINUE; |
144 | 100 | ||
101 | hdr = (struct ieee80211_hdr *)skb->data; | ||
145 | if (!rx->key || rx->key->conf.alg != ALG_TKIP || | 102 | if (!rx->key || rx->key->conf.alg != ALG_TKIP || |
146 | !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) | 103 | !ieee80211_has_protected(hdr->frame_control) || |
104 | !ieee80211_is_data_present(hdr->frame_control)) | ||
147 | return RX_CONTINUE; | 105 | return RX_CONTINUE; |
148 | 106 | ||
149 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) | 107 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
150 | || data_len < MICHAEL_MIC_LEN) | 108 | if (skb->len < hdrlen + MICHAEL_MIC_LEN) |
151 | return RX_DROP_UNUSABLE; | 109 | return RX_DROP_UNUSABLE; |
152 | 110 | ||
153 | data_len -= MICHAEL_MIC_LEN; | 111 | data = skb->data + hdrlen; |
112 | data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; | ||
154 | 113 | ||
155 | #if 0 | 114 | #if 0 |
156 | authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ | 115 | authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ |
157 | #else | 116 | #else |
158 | authenticator = 1; | 117 | authenticator = 1; |
159 | #endif | 118 | #endif |
160 | key = &rx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_RX_MIC_KEY : | 119 | /* At this point we know we're using ALG_TKIP. To get the MIC key |
161 | ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; | 120 | * we now will rely on the offset from the ieee80211_key_conf::key */ |
162 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); | 121 | key_offset = authenticator ? |
122 | NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY : | ||
123 | NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; | ||
124 | key = &rx->key->conf.key[key_offset]; | ||
125 | michael_mic(key, hdr, data, data_len, mic); | ||
163 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { | 126 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { |
164 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 127 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
165 | return RX_DROP_UNUSABLE; | 128 | return RX_DROP_UNUSABLE; |
166 | 129 | ||
167 | printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " | ||
168 | "%s\n", rx->dev->name, print_mac(mac, sa)); | ||
169 | |||
170 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, | 130 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, |
171 | (void *) skb->data); | 131 | (void *) skb->data); |
172 | return RX_DROP_UNUSABLE; | 132 | return RX_DROP_UNUSABLE; |
@@ -176,59 +136,58 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
176 | skb_trim(skb, skb->len - MICHAEL_MIC_LEN); | 136 | skb_trim(skb, skb->len - MICHAEL_MIC_LEN); |
177 | 137 | ||
178 | /* update IV in key information to be able to detect replays */ | 138 | /* update IV in key information to be able to detect replays */ |
179 | rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32; | 139 | rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; |
180 | rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16; | 140 | rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; |
181 | 141 | ||
182 | return RX_CONTINUE; | 142 | return RX_CONTINUE; |
183 | } | 143 | } |
184 | 144 | ||
185 | 145 | ||
186 | static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, | 146 | static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) |
187 | struct sk_buff *skb, int test) | ||
188 | { | 147 | { |
189 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 148 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
190 | struct ieee80211_key *key = tx->key; | 149 | struct ieee80211_key *key = tx->key; |
191 | int hdrlen, len, tailneed; | 150 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
192 | u16 fc; | 151 | unsigned int hdrlen; |
152 | int len, tail; | ||
193 | u8 *pos; | 153 | u8 *pos; |
194 | 154 | ||
195 | fc = le16_to_cpu(hdr->frame_control); | 155 | info->control.icv_len = TKIP_ICV_LEN; |
196 | hdrlen = ieee80211_get_hdrlen(fc); | 156 | info->control.iv_len = TKIP_IV_LEN; |
157 | |||
158 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | ||
159 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { | ||
160 | /* hwaccel - with no need for preallocated room for IV/ICV */ | ||
161 | info->control.hw_key = &tx->key->conf; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
197 | len = skb->len - hdrlen; | 166 | len = skb->len - hdrlen; |
198 | 167 | ||
199 | if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) | 168 | if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) |
200 | tailneed = 0; | 169 | tail = 0; |
201 | else | 170 | else |
202 | tailneed = TKIP_ICV_LEN; | 171 | tail = TKIP_ICV_LEN; |
203 | 172 | ||
204 | if ((skb_headroom(skb) < TKIP_IV_LEN || | 173 | if (WARN_ON(skb_tailroom(skb) < tail || |
205 | skb_tailroom(skb) < tailneed)) { | 174 | skb_headroom(skb) < TKIP_IV_LEN)) |
206 | I802_DEBUG_INC(tx->local->tx_expand_skb_head); | 175 | return -1; |
207 | if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, tailneed, | ||
208 | GFP_ATOMIC))) | ||
209 | return -1; | ||
210 | } | ||
211 | 176 | ||
212 | pos = skb_push(skb, TKIP_IV_LEN); | 177 | pos = skb_push(skb, TKIP_IV_LEN); |
213 | memmove(pos, pos + TKIP_IV_LEN, hdrlen); | 178 | memmove(pos, pos + TKIP_IV_LEN, hdrlen); |
214 | pos += hdrlen; | 179 | pos += hdrlen; |
215 | 180 | ||
216 | /* Increase IV for the frame */ | 181 | /* Increase IV for the frame */ |
217 | key->u.tkip.iv16++; | 182 | key->u.tkip.tx.iv16++; |
218 | if (key->u.tkip.iv16 == 0) | 183 | if (key->u.tkip.tx.iv16 == 0) |
219 | key->u.tkip.iv32++; | 184 | key->u.tkip.tx.iv32++; |
220 | 185 | ||
221 | if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 186 | if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
222 | hdr = (struct ieee80211_hdr *)skb->data; | ||
223 | |||
224 | /* hwaccel - with preallocated room for IV */ | 187 | /* hwaccel - with preallocated room for IV */ |
225 | ieee80211_tkip_add_iv(pos, key, | 188 | ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); |
226 | (u8) (key->u.tkip.iv16 >> 8), | ||
227 | (u8) (((key->u.tkip.iv16 >> 8) | 0x20) & | ||
228 | 0x7f), | ||
229 | (u8) key->u.tkip.iv16); | ||
230 | 189 | ||
231 | tx->control->key_idx = tx->key->conf.hw_key_idx; | 190 | info->control.hw_key = &tx->key->conf; |
232 | return 0; | 191 | return 0; |
233 | } | 192 | } |
234 | 193 | ||
@@ -246,28 +205,16 @@ ieee80211_tx_result | |||
246 | ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) | 205 | ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) |
247 | { | 206 | { |
248 | struct sk_buff *skb = tx->skb; | 207 | struct sk_buff *skb = tx->skb; |
249 | int wpa_test = 0, test = 0; | ||
250 | 208 | ||
251 | tx->control->icv_len = TKIP_ICV_LEN; | ||
252 | tx->control->iv_len = TKIP_IV_LEN; | ||
253 | ieee80211_tx_set_protected(tx); | 209 | ieee80211_tx_set_protected(tx); |
254 | 210 | ||
255 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 211 | if (tkip_encrypt_skb(tx, skb) < 0) |
256 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && | ||
257 | !wpa_test) { | ||
258 | /* hwaccel - with no need for preallocated room for IV/ICV */ | ||
259 | tx->control->key_idx = tx->key->conf.hw_key_idx; | ||
260 | return TX_CONTINUE; | ||
261 | } | ||
262 | |||
263 | if (tkip_encrypt_skb(tx, skb, test) < 0) | ||
264 | return TX_DROP; | 212 | return TX_DROP; |
265 | 213 | ||
266 | if (tx->extra_frag) { | 214 | if (tx->extra_frag) { |
267 | int i; | 215 | int i; |
268 | for (i = 0; i < tx->num_extra_frag; i++) { | 216 | for (i = 0; i < tx->num_extra_frag; i++) { |
269 | if (tkip_encrypt_skb(tx, tx->extra_frag[i], test) | 217 | if (tkip_encrypt_skb(tx, tx->extra_frag[i]) < 0) |
270 | < 0) | ||
271 | return TX_DROP; | 218 | return TX_DROP; |
272 | } | 219 | } |
273 | } | 220 | } |
@@ -280,16 +227,14 @@ ieee80211_rx_result | |||
280 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | 227 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) |
281 | { | 228 | { |
282 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 229 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
283 | u16 fc; | ||
284 | int hdrlen, res, hwaccel = 0, wpa_test = 0; | 230 | int hdrlen, res, hwaccel = 0, wpa_test = 0; |
285 | struct ieee80211_key *key = rx->key; | 231 | struct ieee80211_key *key = rx->key; |
286 | struct sk_buff *skb = rx->skb; | 232 | struct sk_buff *skb = rx->skb; |
287 | DECLARE_MAC_BUF(mac); | 233 | DECLARE_MAC_BUF(mac); |
288 | 234 | ||
289 | fc = le16_to_cpu(hdr->frame_control); | 235 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
290 | hdrlen = ieee80211_get_hdrlen(fc); | ||
291 | 236 | ||
292 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) | 237 | if (!ieee80211_is_data(hdr->frame_control)) |
293 | return RX_CONTINUE; | 238 | return RX_CONTINUE; |
294 | 239 | ||
295 | if (!rx->sta || skb->len - hdrlen < 12) | 240 | if (!rx->sta || skb->len - hdrlen < 12) |
@@ -315,15 +260,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
315 | hdr->addr1, hwaccel, rx->queue, | 260 | hdr->addr1, hwaccel, rx->queue, |
316 | &rx->tkip_iv32, | 261 | &rx->tkip_iv32, |
317 | &rx->tkip_iv16); | 262 | &rx->tkip_iv16); |
318 | if (res != TKIP_DECRYPT_OK || wpa_test) { | 263 | if (res != TKIP_DECRYPT_OK || wpa_test) |
319 | #ifdef CONFIG_MAC80211_DEBUG | ||
320 | if (net_ratelimit()) | ||
321 | printk(KERN_DEBUG "%s: TKIP decrypt failed for RX " | ||
322 | "frame from %s (res=%d)\n", rx->dev->name, | ||
323 | print_mac(mac, rx->sta->addr), res); | ||
324 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
325 | return RX_DROP_UNUSABLE; | 264 | return RX_DROP_UNUSABLE; |
326 | } | ||
327 | 265 | ||
328 | /* Trim ICV */ | 266 | /* Trim ICV */ |
329 | skb_trim(skb, skb->len - TKIP_ICV_LEN); | 267 | skb_trim(skb, skb->len - TKIP_ICV_LEN); |
@@ -336,70 +274,68 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
336 | } | 274 | } |
337 | 275 | ||
338 | 276 | ||
339 | static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad, | 277 | static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, |
340 | int encrypted) | 278 | int encrypted) |
341 | { | 279 | { |
342 | u16 fc; | 280 | __le16 mask_fc; |
343 | int a4_included, qos_included; | 281 | int a4_included; |
344 | u8 qos_tid, *fc_pos, *data, *sa, *da; | 282 | u8 qos_tid; |
345 | int len_a; | 283 | u8 *b_0, *aad; |
346 | size_t data_len; | 284 | u16 data_len, len_a; |
347 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 285 | unsigned int hdrlen; |
286 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
348 | 287 | ||
349 | fc_pos = (u8 *) &hdr->frame_control; | 288 | b_0 = scratch + 3 * AES_BLOCK_LEN; |
350 | fc = fc_pos[0] ^ (fc_pos[1] << 8); | 289 | aad = scratch + 4 * AES_BLOCK_LEN; |
351 | a4_included = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | 290 | |
352 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); | 291 | /* |
353 | 292 | * Mask FC: zero subtype b4 b5 b6 | |
354 | ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len); | 293 | * Retry, PwrMgt, MoreData; set Protected |
355 | data_len -= CCMP_HDR_LEN + (encrypted ? CCMP_MIC_LEN : 0); | 294 | */ |
356 | if (qos_tid & 0x80) { | 295 | mask_fc = hdr->frame_control; |
357 | qos_included = 1; | 296 | mask_fc &= ~cpu_to_le16(0x0070 | IEEE80211_FCTL_RETRY | |
358 | qos_tid &= 0x0f; | 297 | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); |
359 | } else | 298 | mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
360 | qos_included = 0; | 299 | |
361 | /* First block, b_0 */ | 300 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
301 | len_a = hdrlen - 2; | ||
302 | a4_included = ieee80211_has_a4(hdr->frame_control); | ||
303 | |||
304 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
305 | qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
306 | else | ||
307 | qos_tid = 0; | ||
308 | |||
309 | data_len = skb->len - hdrlen - CCMP_HDR_LEN; | ||
310 | if (encrypted) | ||
311 | data_len -= CCMP_MIC_LEN; | ||
362 | 312 | ||
313 | /* First block, b_0 */ | ||
363 | b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ | 314 | b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ |
364 | /* Nonce: QoS Priority | A2 | PN */ | 315 | /* Nonce: QoS Priority | A2 | PN */ |
365 | b_0[1] = qos_tid; | 316 | b_0[1] = qos_tid; |
366 | memcpy(&b_0[2], hdr->addr2, 6); | 317 | memcpy(&b_0[2], hdr->addr2, ETH_ALEN); |
367 | memcpy(&b_0[8], pn, CCMP_PN_LEN); | 318 | memcpy(&b_0[8], pn, CCMP_PN_LEN); |
368 | /* l(m) */ | 319 | /* l(m) */ |
369 | b_0[14] = (data_len >> 8) & 0xff; | 320 | put_unaligned_be16(data_len, &b_0[14]); |
370 | b_0[15] = data_len & 0xff; | ||
371 | |||
372 | 321 | ||
373 | /* AAD (extra authenticate-only data) / masked 802.11 header | 322 | /* AAD (extra authenticate-only data) / masked 802.11 header |
374 | * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ | 323 | * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ |
375 | 324 | put_unaligned_be16(len_a, &aad[0]); | |
376 | len_a = a4_included ? 28 : 22; | 325 | put_unaligned(mask_fc, (__le16 *)&aad[2]); |
377 | if (qos_included) | 326 | memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); |
378 | len_a += 2; | ||
379 | |||
380 | aad[0] = 0; /* (len_a >> 8) & 0xff; */ | ||
381 | aad[1] = len_a & 0xff; | ||
382 | /* Mask FC: zero subtype b4 b5 b6 */ | ||
383 | aad[2] = fc_pos[0] & ~(BIT(4) | BIT(5) | BIT(6)); | ||
384 | /* Retry, PwrMgt, MoreData; set Protected */ | ||
385 | aad[3] = (fc_pos[1] & ~(BIT(3) | BIT(4) | BIT(5))) | BIT(6); | ||
386 | memcpy(&aad[4], &hdr->addr1, 18); | ||
387 | 327 | ||
388 | /* Mask Seq#, leave Frag# */ | 328 | /* Mask Seq#, leave Frag# */ |
389 | aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; | 329 | aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; |
390 | aad[23] = 0; | 330 | aad[23] = 0; |
331 | |||
391 | if (a4_included) { | 332 | if (a4_included) { |
392 | memcpy(&aad[24], hdr->addr4, 6); | 333 | memcpy(&aad[24], hdr->addr4, ETH_ALEN); |
393 | aad[30] = 0; | 334 | aad[30] = qos_tid; |
394 | aad[31] = 0; | 335 | aad[31] = 0; |
395 | } else | 336 | } else { |
396 | memset(&aad[24], 0, 8); | 337 | memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); |
397 | if (qos_included) { | 338 | aad[24] = qos_tid; |
398 | u8 *dpos = &aad[a4_included ? 30 : 24]; | ||
399 | |||
400 | /* Mask QoS Control field */ | ||
401 | dpos[0] = qos_tid; | ||
402 | dpos[1] = 0; | ||
403 | } | 339 | } |
404 | } | 340 | } |
405 | 341 | ||
@@ -429,36 +365,37 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr) | |||
429 | } | 365 | } |
430 | 366 | ||
431 | 367 | ||
432 | static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, | 368 | static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) |
433 | struct sk_buff *skb, int test) | ||
434 | { | 369 | { |
435 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 370 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
436 | struct ieee80211_key *key = tx->key; | 371 | struct ieee80211_key *key = tx->key; |
437 | int hdrlen, len, tailneed; | 372 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
438 | u16 fc; | 373 | int hdrlen, len, tail; |
439 | u8 *pos, *pn, *b_0, *aad, *scratch; | 374 | u8 *pos, *pn; |
440 | int i; | 375 | int i; |
441 | 376 | ||
442 | scratch = key->u.ccmp.tx_crypto_buf; | 377 | info->control.icv_len = CCMP_MIC_LEN; |
443 | b_0 = scratch + 3 * AES_BLOCK_LEN; | 378 | info->control.iv_len = CCMP_HDR_LEN; |
444 | aad = scratch + 4 * AES_BLOCK_LEN; | ||
445 | 379 | ||
446 | fc = le16_to_cpu(hdr->frame_control); | 380 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
447 | hdrlen = ieee80211_get_hdrlen(fc); | 381 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { |
382 | /* hwaccel - with no need for preallocated room for CCMP " | ||
383 | * header or MIC fields */ | ||
384 | info->control.hw_key = &tx->key->conf; | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
448 | len = skb->len - hdrlen; | 389 | len = skb->len - hdrlen; |
449 | 390 | ||
450 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) | 391 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) |
451 | tailneed = 0; | 392 | tail = 0; |
452 | else | 393 | else |
453 | tailneed = CCMP_MIC_LEN; | 394 | tail = CCMP_MIC_LEN; |
454 | 395 | ||
455 | if ((skb_headroom(skb) < CCMP_HDR_LEN || | 396 | if (WARN_ON(skb_tailroom(skb) < tail || |
456 | skb_tailroom(skb) < tailneed)) { | 397 | skb_headroom(skb) < CCMP_HDR_LEN)) |
457 | I802_DEBUG_INC(tx->local->tx_expand_skb_head); | 398 | return -1; |
458 | if (unlikely(pskb_expand_head(skb, CCMP_HDR_LEN, tailneed, | ||
459 | GFP_ATOMIC))) | ||
460 | return -1; | ||
461 | } | ||
462 | 399 | ||
463 | pos = skb_push(skb, CCMP_HDR_LEN); | 400 | pos = skb_push(skb, CCMP_HDR_LEN); |
464 | memmove(pos, pos + CCMP_HDR_LEN, hdrlen); | 401 | memmove(pos, pos + CCMP_HDR_LEN, hdrlen); |
@@ -478,13 +415,13 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, | |||
478 | 415 | ||
479 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 416 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
480 | /* hwaccel - with preallocated room for CCMP header */ | 417 | /* hwaccel - with preallocated room for CCMP header */ |
481 | tx->control->key_idx = key->conf.hw_key_idx; | 418 | info->control.hw_key = &tx->key->conf; |
482 | return 0; | 419 | return 0; |
483 | } | 420 | } |
484 | 421 | ||
485 | pos += CCMP_HDR_LEN; | 422 | pos += CCMP_HDR_LEN; |
486 | ccmp_special_blocks(skb, pn, b_0, aad, 0); | 423 | ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); |
487 | ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, b_0, aad, pos, len, | 424 | ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len, |
488 | pos, skb_put(skb, CCMP_MIC_LEN)); | 425 | pos, skb_put(skb, CCMP_MIC_LEN)); |
489 | 426 | ||
490 | return 0; | 427 | return 0; |
@@ -495,28 +432,16 @@ ieee80211_tx_result | |||
495 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) | 432 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) |
496 | { | 433 | { |
497 | struct sk_buff *skb = tx->skb; | 434 | struct sk_buff *skb = tx->skb; |
498 | int test = 0; | ||
499 | 435 | ||
500 | tx->control->icv_len = CCMP_MIC_LEN; | ||
501 | tx->control->iv_len = CCMP_HDR_LEN; | ||
502 | ieee80211_tx_set_protected(tx); | 436 | ieee80211_tx_set_protected(tx); |
503 | 437 | ||
504 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 438 | if (ccmp_encrypt_skb(tx, skb) < 0) |
505 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { | ||
506 | /* hwaccel - with no need for preallocated room for CCMP " | ||
507 | * header or MIC fields */ | ||
508 | tx->control->key_idx = tx->key->conf.hw_key_idx; | ||
509 | return TX_CONTINUE; | ||
510 | } | ||
511 | |||
512 | if (ccmp_encrypt_skb(tx, skb, test) < 0) | ||
513 | return TX_DROP; | 439 | return TX_DROP; |
514 | 440 | ||
515 | if (tx->extra_frag) { | 441 | if (tx->extra_frag) { |
516 | int i; | 442 | int i; |
517 | for (i = 0; i < tx->num_extra_frag; i++) { | 443 | for (i = 0; i < tx->num_extra_frag; i++) { |
518 | if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test) | 444 | if (ccmp_encrypt_skb(tx, tx->extra_frag[i]) < 0) |
519 | < 0) | ||
520 | return TX_DROP; | 445 | return TX_DROP; |
521 | } | 446 | } |
522 | } | 447 | } |
@@ -528,8 +453,7 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) | |||
528 | ieee80211_rx_result | 453 | ieee80211_rx_result |
529 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | 454 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) |
530 | { | 455 | { |
531 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 456 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
532 | u16 fc; | ||
533 | int hdrlen; | 457 | int hdrlen; |
534 | struct ieee80211_key *key = rx->key; | 458 | struct ieee80211_key *key = rx->key; |
535 | struct sk_buff *skb = rx->skb; | 459 | struct sk_buff *skb = rx->skb; |
@@ -537,10 +461,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
537 | int data_len; | 461 | int data_len; |
538 | DECLARE_MAC_BUF(mac); | 462 | DECLARE_MAC_BUF(mac); |
539 | 463 | ||
540 | fc = le16_to_cpu(hdr->frame_control); | 464 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
541 | hdrlen = ieee80211_get_hdrlen(fc); | ||
542 | 465 | ||
543 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) | 466 | if (!ieee80211_is_data(hdr->frame_control)) |
544 | return RX_CONTINUE; | 467 | return RX_CONTINUE; |
545 | 468 | ||
546 | data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; | 469 | data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; |
@@ -554,41 +477,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
554 | (void) ccmp_hdr2pn(pn, skb->data + hdrlen); | 477 | (void) ccmp_hdr2pn(pn, skb->data + hdrlen); |
555 | 478 | ||
556 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { | 479 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { |
557 | #ifdef CONFIG_MAC80211_DEBUG | ||
558 | u8 *ppn = key->u.ccmp.rx_pn[rx->queue]; | ||
559 | |||
560 | printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from " | ||
561 | "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " | ||
562 | "%02x%02x%02x%02x%02x%02x)\n", rx->dev->name, | ||
563 | print_mac(mac, rx->sta->addr), | ||
564 | pn[0], pn[1], pn[2], pn[3], pn[4], pn[5], | ||
565 | ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); | ||
566 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
567 | key->u.ccmp.replays++; | 480 | key->u.ccmp.replays++; |
568 | return RX_DROP_UNUSABLE; | 481 | return RX_DROP_UNUSABLE; |
569 | } | 482 | } |
570 | 483 | ||
571 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { | 484 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
572 | /* hardware didn't decrypt/verify MIC */ | 485 | /* hardware didn't decrypt/verify MIC */ |
573 | u8 *scratch, *b_0, *aad; | 486 | ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); |
574 | |||
575 | scratch = key->u.ccmp.rx_crypto_buf; | ||
576 | b_0 = scratch + 3 * AES_BLOCK_LEN; | ||
577 | aad = scratch + 4 * AES_BLOCK_LEN; | ||
578 | |||
579 | ccmp_special_blocks(skb, pn, b_0, aad, 1); | ||
580 | 487 | ||
581 | if (ieee80211_aes_ccm_decrypt( | 488 | if (ieee80211_aes_ccm_decrypt( |
582 | key->u.ccmp.tfm, scratch, b_0, aad, | 489 | key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf, |
583 | skb->data + hdrlen + CCMP_HDR_LEN, data_len, | 490 | skb->data + hdrlen + CCMP_HDR_LEN, data_len, |
584 | skb->data + skb->len - CCMP_MIC_LEN, | 491 | skb->data + skb->len - CCMP_MIC_LEN, |
585 | skb->data + hdrlen + CCMP_HDR_LEN)) { | 492 | skb->data + hdrlen + CCMP_HDR_LEN)) { |
586 | #ifdef CONFIG_MAC80211_DEBUG | ||
587 | if (net_ratelimit()) | ||
588 | printk(KERN_DEBUG "%s: CCMP decrypt failed " | ||
589 | "for RX frame from %s\n", rx->dev->name, | ||
590 | print_mac(mac, rx->sta->addr)); | ||
591 | #endif /* CONFIG_MAC80211_DEBUG */ | ||
592 | return RX_DROP_UNUSABLE; | 493 | return RX_DROP_UNUSABLE; |
593 | } | 494 | } |
594 | } | 495 | } |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index aa8d80c35e28..316c7af1d2b1 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -33,9 +33,8 @@ config NF_CONNTRACK | |||
33 | into connections. | 33 | into connections. |
34 | 34 | ||
35 | This is required to do Masquerading or other kinds of Network | 35 | This is required to do Masquerading or other kinds of Network |
36 | Address Translation (except for Fast NAT). It can also be used to | 36 | Address Translation. It can also be used to enhance packet |
37 | enhance packet filtering (see `Connection state match support' | 37 | filtering (see `Connection state match support' below). |
38 | below). | ||
39 | 38 | ||
40 | To compile it as a module, choose M here. If unsure, say N. | 39 | To compile it as a module, choose M here. If unsure, say N. |
41 | 40 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 662c1ccfee26..28d03e64200b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -464,7 +464,8 @@ static noinline int early_drop(unsigned int hash) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | 466 | struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, |
467 | const struct nf_conntrack_tuple *repl) | 467 | const struct nf_conntrack_tuple *repl, |
468 | gfp_t gfp) | ||
468 | { | 469 | { |
469 | struct nf_conn *ct = NULL; | 470 | struct nf_conn *ct = NULL; |
470 | 471 | ||
@@ -489,7 +490,7 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
489 | } | 490 | } |
490 | } | 491 | } |
491 | 492 | ||
492 | ct = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); | 493 | ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); |
493 | if (ct == NULL) { | 494 | if (ct == NULL) { |
494 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); | 495 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); |
495 | atomic_dec(&nf_conntrack_count); | 496 | atomic_dec(&nf_conntrack_count); |
@@ -542,7 +543,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple, | |||
542 | return NULL; | 543 | return NULL; |
543 | } | 544 | } |
544 | 545 | ||
545 | ct = nf_conntrack_alloc(tuple, &repl_tuple); | 546 | ct = nf_conntrack_alloc(tuple, &repl_tuple, GFP_ATOMIC); |
546 | if (ct == NULL || IS_ERR(ct)) { | 547 | if (ct == NULL || IS_ERR(ct)) { |
547 | pr_debug("Can't allocate conntrack.\n"); | 548 | pr_debug("Can't allocate conntrack.\n"); |
548 | return (struct nf_conntrack_tuple_hash *)ct; | 549 | return (struct nf_conntrack_tuple_hash *)ct; |
@@ -847,6 +848,28 @@ acct: | |||
847 | } | 848 | } |
848 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | 849 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); |
849 | 850 | ||
851 | bool __nf_ct_kill_acct(struct nf_conn *ct, | ||
852 | enum ip_conntrack_info ctinfo, | ||
853 | const struct sk_buff *skb, | ||
854 | int do_acct) | ||
855 | { | ||
856 | #ifdef CONFIG_NF_CT_ACCT | ||
857 | if (do_acct) { | ||
858 | spin_lock_bh(&nf_conntrack_lock); | ||
859 | ct->counters[CTINFO2DIR(ctinfo)].packets++; | ||
860 | ct->counters[CTINFO2DIR(ctinfo)].bytes += | ||
861 | skb->len - skb_network_offset(skb); | ||
862 | spin_unlock_bh(&nf_conntrack_lock); | ||
863 | } | ||
864 | #endif | ||
865 | if (del_timer(&ct->timeout)) { | ||
866 | ct->timeout.function((unsigned long)ct); | ||
867 | return true; | ||
868 | } | ||
869 | return false; | ||
870 | } | ||
871 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | ||
872 | |||
850 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 873 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
851 | 874 | ||
852 | #include <linux/netfilter/nfnetlink.h> | 875 | #include <linux/netfilter/nfnetlink.h> |
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 8a3f8b34e466..3469bc71a385 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c | |||
@@ -95,13 +95,11 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) | |||
95 | newlen = newoff + t->len; | 95 | newlen = newoff + t->len; |
96 | rcu_read_unlock(); | 96 | rcu_read_unlock(); |
97 | 97 | ||
98 | if (newlen >= ksize(ct->ext)) { | 98 | new = krealloc(ct->ext, newlen, gfp); |
99 | new = kmalloc(newlen, gfp); | 99 | if (!new) |
100 | if (!new) | 100 | return NULL; |
101 | return NULL; | ||
102 | |||
103 | memcpy(new, ct->ext, ct->ext->len); | ||
104 | 101 | ||
102 | if (new != ct->ext) { | ||
105 | for (i = 0; i < NF_CT_EXT_NUM; i++) { | 103 | for (i = 0; i < NF_CT_EXT_NUM; i++) { |
106 | if (!nf_ct_ext_exist(ct, i)) | 104 | if (!nf_ct_ext_exist(ct, i)) |
107 | continue; | 105 | continue; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 077bcd228799..95a7967731f9 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> | 4 | * (C) 2001 by Jay Schulist <jschlst@samba.org> |
5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> | 5 | * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> |
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2007 by Pablo Neira Ayuso <pablo@netfilter.org> | 7 | * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> |
8 | * | 8 | * |
9 | * Initial connection tracking via netlink development funded and | 9 | * Initial connection tracking via netlink development funded and |
10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 10 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
@@ -476,14 +476,14 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
476 | if (ctnetlink_dump_id(skb, ct) < 0) | 476 | if (ctnetlink_dump_id(skb, ct) < 0) |
477 | goto nla_put_failure; | 477 | goto nla_put_failure; |
478 | 478 | ||
479 | if (ctnetlink_dump_status(skb, ct) < 0) | ||
480 | goto nla_put_failure; | ||
481 | |||
479 | if (events & IPCT_DESTROY) { | 482 | if (events & IPCT_DESTROY) { |
480 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | 483 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || |
481 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) | 484 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) |
482 | goto nla_put_failure; | 485 | goto nla_put_failure; |
483 | } else { | 486 | } else { |
484 | if (ctnetlink_dump_status(skb, ct) < 0) | ||
485 | goto nla_put_failure; | ||
486 | |||
487 | if (ctnetlink_dump_timeout(skb, ct) < 0) | 487 | if (ctnetlink_dump_timeout(skb, ct) < 0) |
488 | goto nla_put_failure; | 488 | goto nla_put_failure; |
489 | 489 | ||
@@ -813,9 +813,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
813 | return -ENOENT; | 813 | return -ENOENT; |
814 | } | 814 | } |
815 | } | 815 | } |
816 | if (del_timer(&ct->timeout)) | ||
817 | ct->timeout.function((unsigned long)ct); | ||
818 | 816 | ||
817 | nf_ct_kill(ct); | ||
819 | nf_ct_put(ct); | 818 | nf_ct_put(ct); |
820 | 819 | ||
821 | return 0; | 820 | return 0; |
@@ -892,20 +891,19 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[]) | |||
892 | 891 | ||
893 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) | 892 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) |
894 | /* unchangeable */ | 893 | /* unchangeable */ |
895 | return -EINVAL; | 894 | return -EBUSY; |
896 | 895 | ||
897 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) | 896 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) |
898 | /* SEEN_REPLY bit can only be set */ | 897 | /* SEEN_REPLY bit can only be set */ |
899 | return -EINVAL; | 898 | return -EBUSY; |
900 | |||
901 | 899 | ||
902 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) | 900 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) |
903 | /* ASSURED bit can only be set */ | 901 | /* ASSURED bit can only be set */ |
904 | return -EINVAL; | 902 | return -EBUSY; |
905 | 903 | ||
906 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { | 904 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { |
907 | #ifndef CONFIG_NF_NAT_NEEDED | 905 | #ifndef CONFIG_NF_NAT_NEEDED |
908 | return -EINVAL; | 906 | return -EOPNOTSUPP; |
909 | #else | 907 | #else |
910 | struct nf_nat_range range; | 908 | struct nf_nat_range range; |
911 | 909 | ||
@@ -946,7 +944,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) | |||
946 | 944 | ||
947 | /* don't change helper of sibling connections */ | 945 | /* don't change helper of sibling connections */ |
948 | if (ct->master) | 946 | if (ct->master) |
949 | return -EINVAL; | 947 | return -EBUSY; |
950 | 948 | ||
951 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); | 949 | err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); |
952 | if (err < 0) | 950 | if (err < 0) |
@@ -964,7 +962,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) | |||
964 | 962 | ||
965 | helper = __nf_conntrack_helper_find_byname(helpname); | 963 | helper = __nf_conntrack_helper_find_byname(helpname); |
966 | if (helper == NULL) | 964 | if (helper == NULL) |
967 | return -EINVAL; | 965 | return -EOPNOTSUPP; |
968 | 966 | ||
969 | if (help) { | 967 | if (help) { |
970 | if (help->helper == helper) | 968 | if (help->helper == helper) |
@@ -1131,7 +1129,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[], | |||
1131 | struct nf_conn_help *help; | 1129 | struct nf_conn_help *help; |
1132 | struct nf_conntrack_helper *helper; | 1130 | struct nf_conntrack_helper *helper; |
1133 | 1131 | ||
1134 | ct = nf_conntrack_alloc(otuple, rtuple); | 1132 | ct = nf_conntrack_alloc(otuple, rtuple, GFP_KERNEL); |
1135 | if (ct == NULL || IS_ERR(ct)) | 1133 | if (ct == NULL || IS_ERR(ct)) |
1136 | return -ENOMEM; | 1134 | return -ENOMEM; |
1137 | 1135 | ||
@@ -1259,12 +1257,12 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1259 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { | 1257 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { |
1260 | /* we only allow nat config for new conntracks */ | 1258 | /* we only allow nat config for new conntracks */ |
1261 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { | 1259 | if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { |
1262 | err = -EINVAL; | 1260 | err = -EOPNOTSUPP; |
1263 | goto out_unlock; | 1261 | goto out_unlock; |
1264 | } | 1262 | } |
1265 | /* can't link an existing conntrack to a master */ | 1263 | /* can't link an existing conntrack to a master */ |
1266 | if (cda[CTA_TUPLE_MASTER]) { | 1264 | if (cda[CTA_TUPLE_MASTER]) { |
1267 | err = -EINVAL; | 1265 | err = -EOPNOTSUPP; |
1268 | goto out_unlock; | 1266 | goto out_unlock; |
1269 | } | 1267 | } |
1270 | err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), | 1268 | err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), |
@@ -1609,7 +1607,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1609 | h = __nf_conntrack_helper_find_byname(name); | 1607 | h = __nf_conntrack_helper_find_byname(name); |
1610 | if (!h) { | 1608 | if (!h) { |
1611 | spin_unlock_bh(&nf_conntrack_lock); | 1609 | spin_unlock_bh(&nf_conntrack_lock); |
1612 | return -EINVAL; | 1610 | return -EOPNOTSUPP; |
1613 | } | 1611 | } |
1614 | for (i = 0; i < nf_ct_expect_hsize; i++) { | 1612 | for (i = 0; i < nf_ct_expect_hsize; i++) { |
1615 | hlist_for_each_entry_safe(exp, n, next, | 1613 | hlist_for_each_entry_safe(exp, n, next, |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index afb4a1861d2c..e7866dd3cde6 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -475,8 +475,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, | |||
475 | if (type == DCCP_PKT_RESET && | 475 | if (type == DCCP_PKT_RESET && |
476 | !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | 476 | !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
477 | /* Tear down connection immediately if only reply is a RESET */ | 477 | /* Tear down connection immediately if only reply is a RESET */ |
478 | if (del_timer(&ct->timeout)) | 478 | nf_ct_kill_acct(ct, ctinfo, skb); |
479 | ct->timeout.function((unsigned long)ct); | ||
480 | return NF_ACCEPT; | 479 | return NF_ACCEPT; |
481 | } | 480 | } |
482 | 481 | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index cbf2e27a22b2..41183a4d2d62 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -463,6 +463,82 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, | |||
463 | return true; | 463 | return true; |
464 | } | 464 | } |
465 | 465 | ||
466 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
467 | |||
468 | #include <linux/netfilter/nfnetlink.h> | ||
469 | #include <linux/netfilter/nfnetlink_conntrack.h> | ||
470 | |||
471 | static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, | ||
472 | const struct nf_conn *ct) | ||
473 | { | ||
474 | struct nlattr *nest_parms; | ||
475 | |||
476 | read_lock_bh(&sctp_lock); | ||
477 | nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED); | ||
478 | if (!nest_parms) | ||
479 | goto nla_put_failure; | ||
480 | |||
481 | NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); | ||
482 | |||
483 | NLA_PUT_BE32(skb, | ||
484 | CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, | ||
485 | htonl(ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL])); | ||
486 | |||
487 | NLA_PUT_BE32(skb, | ||
488 | CTA_PROTOINFO_SCTP_VTAG_REPLY, | ||
489 | htonl(ct->proto.sctp.vtag[IP_CT_DIR_REPLY])); | ||
490 | |||
491 | read_unlock_bh(&sctp_lock); | ||
492 | |||
493 | nla_nest_end(skb, nest_parms); | ||
494 | |||
495 | return 0; | ||
496 | |||
497 | nla_put_failure: | ||
498 | read_unlock_bh(&sctp_lock); | ||
499 | return -1; | ||
500 | } | ||
501 | |||
502 | static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { | ||
503 | [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, | ||
504 | [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, | ||
505 | [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, | ||
506 | }; | ||
507 | |||
508 | static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) | ||
509 | { | ||
510 | struct nlattr *attr = cda[CTA_PROTOINFO_SCTP]; | ||
511 | struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; | ||
512 | int err; | ||
513 | |||
514 | /* updates may not contain the internal protocol info, skip parsing */ | ||
515 | if (!attr) | ||
516 | return 0; | ||
517 | |||
518 | err = nla_parse_nested(tb, | ||
519 | CTA_PROTOINFO_SCTP_MAX, | ||
520 | attr, | ||
521 | sctp_nla_policy); | ||
522 | if (err < 0) | ||
523 | return err; | ||
524 | |||
525 | if (!tb[CTA_PROTOINFO_SCTP_STATE] || | ||
526 | !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || | ||
527 | !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) | ||
528 | return -EINVAL; | ||
529 | |||
530 | write_lock_bh(&sctp_lock); | ||
531 | ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); | ||
532 | ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = | ||
533 | ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL])); | ||
534 | ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = | ||
535 | ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])); | ||
536 | write_unlock_bh(&sctp_lock); | ||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | #endif | ||
541 | |||
466 | #ifdef CONFIG_SYSCTL | 542 | #ifdef CONFIG_SYSCTL |
467 | static unsigned int sctp_sysctl_table_users; | 543 | static unsigned int sctp_sysctl_table_users; |
468 | static struct ctl_table_header *sctp_sysctl_header; | 544 | static struct ctl_table_header *sctp_sysctl_header; |
@@ -591,6 +667,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { | |||
591 | .new = sctp_new, | 667 | .new = sctp_new, |
592 | .me = THIS_MODULE, | 668 | .me = THIS_MODULE, |
593 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 669 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
670 | .to_nlattr = sctp_to_nlattr, | ||
671 | .from_nlattr = nlattr_to_sctp, | ||
594 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 672 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
595 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 673 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
596 | .nla_policy = nf_ct_port_nla_policy, | 674 | .nla_policy = nf_ct_port_nla_policy, |
@@ -617,6 +695,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { | |||
617 | .new = sctp_new, | 695 | .new = sctp_new, |
618 | .me = THIS_MODULE, | 696 | .me = THIS_MODULE, |
619 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 697 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
698 | .to_nlattr = sctp_to_nlattr, | ||
699 | .from_nlattr = nlattr_to_sctp, | ||
620 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 700 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
621 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 701 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
622 | .nla_policy = nf_ct_port_nla_policy, | 702 | .nla_policy = nf_ct_port_nla_policy, |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index dd28fb239a60..420a10d8eb1e 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -844,14 +844,13 @@ static int tcp_packet(struct nf_conn *ct, | |||
844 | /* Attempt to reopen a closed/aborted connection. | 844 | /* Attempt to reopen a closed/aborted connection. |
845 | * Delete this connection and look up again. */ | 845 | * Delete this connection and look up again. */ |
846 | write_unlock_bh(&tcp_lock); | 846 | write_unlock_bh(&tcp_lock); |
847 | |||
847 | /* Only repeat if we can actually remove the timer. | 848 | /* Only repeat if we can actually remove the timer. |
848 | * Destruction may already be in progress in process | 849 | * Destruction may already be in progress in process |
849 | * context and we must give it a chance to terminate. | 850 | * context and we must give it a chance to terminate. |
850 | */ | 851 | */ |
851 | if (del_timer(&ct->timeout)) { | 852 | if (nf_ct_kill(ct)) |
852 | ct->timeout.function((unsigned long)ct); | ||
853 | return -NF_REPEAT; | 853 | return -NF_REPEAT; |
854 | } | ||
855 | return -NF_DROP; | 854 | return -NF_DROP; |
856 | } | 855 | } |
857 | /* Fall through */ | 856 | /* Fall through */ |
@@ -884,8 +883,7 @@ static int tcp_packet(struct nf_conn *ct, | |||
884 | if (LOG_INVALID(IPPROTO_TCP)) | 883 | if (LOG_INVALID(IPPROTO_TCP)) |
885 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, | 884 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, |
886 | "nf_ct_tcp: killing out of sync session "); | 885 | "nf_ct_tcp: killing out of sync session "); |
887 | if (del_timer(&ct->timeout)) | 886 | nf_ct_kill(ct); |
888 | ct->timeout.function((unsigned long)ct); | ||
889 | return -NF_DROP; | 887 | return -NF_DROP; |
890 | } | 888 | } |
891 | ct->proto.tcp.last_index = index; | 889 | ct->proto.tcp.last_index = index; |
@@ -968,8 +966,7 @@ static int tcp_packet(struct nf_conn *ct, | |||
968 | problem case, so we can delete the conntrack | 966 | problem case, so we can delete the conntrack |
969 | immediately. --RR */ | 967 | immediately. --RR */ |
970 | if (th->rst) { | 968 | if (th->rst) { |
971 | if (del_timer(&ct->timeout)) | 969 | nf_ct_kill_acct(ct, ctinfo, skb); |
972 | ct->timeout.function((unsigned long)ct); | ||
973 | return NF_ACCEPT; | 970 | return NF_ACCEPT; |
974 | } | 971 | } |
975 | } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) | 972 | } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) |
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 69d699f95f4c..01489681fa96 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c | |||
@@ -65,7 +65,7 @@ static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf, | |||
65 | { | 65 | { |
66 | struct nf_sockopt_ops *ops; | 66 | struct nf_sockopt_ops *ops; |
67 | 67 | ||
68 | if (sock_net(sk) != &init_net) | 68 | if (!net_eq(sock_net(sk), &init_net)) |
69 | return ERR_PTR(-ENOPROTOOPT); | 69 | return ERR_PTR(-ENOPROTOOPT); |
70 | 70 | ||
71 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) | 71 | if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 3447025ce068..8c860112ce05 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -243,7 +243,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
243 | switch ((enum nfqnl_config_mode)queue->copy_mode) { | 243 | switch ((enum nfqnl_config_mode)queue->copy_mode) { |
244 | case NFQNL_COPY_META: | 244 | case NFQNL_COPY_META: |
245 | case NFQNL_COPY_NONE: | 245 | case NFQNL_COPY_NONE: |
246 | data_len = 0; | ||
247 | break; | 246 | break; |
248 | 247 | ||
249 | case NFQNL_COPY_PACKET: | 248 | case NFQNL_COPY_PACKET: |
@@ -556,7 +555,7 @@ nfqnl_rcv_dev_event(struct notifier_block *this, | |||
556 | { | 555 | { |
557 | struct net_device *dev = ptr; | 556 | struct net_device *dev = ptr; |
558 | 557 | ||
559 | if (dev_net(dev) != &init_net) | 558 | if (!net_eq(dev_net(dev), &init_net)) |
560 | return NOTIFY_DONE; | 559 | return NOTIFY_DONE; |
561 | 560 | ||
562 | /* Drop any packets associated with the downed device */ | 561 | /* Drop any packets associated with the downed device */ |
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index 211189eb2b67..76ca1f2421eb 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> | 8 | * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> |
9 | * by Henrik Nordstrom <hno@marasystems.com> | 9 | * by Henrik Nordstrom <hno@marasystems.com> |
10 | * | 10 | * |
11 | * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> | 11 | * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -94,6 +94,12 @@ connsecmark_tg_check(const char *tablename, const void *entry, | |||
94 | { | 94 | { |
95 | const struct xt_connsecmark_target_info *info = targinfo; | 95 | const struct xt_connsecmark_target_info *info = targinfo; |
96 | 96 | ||
97 | if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) { | ||
98 | printk(KERN_INFO PFX "target only valid in the \'mangle\' " | ||
99 | "or \'security\' tables, not \'%s\'.\n", tablename); | ||
100 | return false; | ||
101 | } | ||
102 | |||
97 | switch (info->mode) { | 103 | switch (info->mode) { |
98 | case CONNSECMARK_SAVE: | 104 | case CONNSECMARK_SAVE: |
99 | case CONNSECMARK_RESTORE: | 105 | case CONNSECMARK_RESTORE: |
@@ -126,7 +132,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = { | |||
126 | .destroy = connsecmark_tg_destroy, | 132 | .destroy = connsecmark_tg_destroy, |
127 | .target = connsecmark_tg, | 133 | .target = connsecmark_tg, |
128 | .targetsize = sizeof(struct xt_connsecmark_target_info), | 134 | .targetsize = sizeof(struct xt_connsecmark_target_info), |
129 | .table = "mangle", | ||
130 | .me = THIS_MODULE, | 135 | .me = THIS_MODULE, |
131 | }, | 136 | }, |
132 | { | 137 | { |
@@ -136,7 +141,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = { | |||
136 | .destroy = connsecmark_tg_destroy, | 141 | .destroy = connsecmark_tg_destroy, |
137 | .target = connsecmark_tg, | 142 | .target = connsecmark_tg, |
138 | .targetsize = sizeof(struct xt_connsecmark_target_info), | 143 | .targetsize = sizeof(struct xt_connsecmark_target_info), |
139 | .table = "mangle", | ||
140 | .me = THIS_MODULE, | 144 | .me = THIS_MODULE, |
141 | }, | 145 | }, |
142 | }; | 146 | }; |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index c0284856ccd4..94f87ee7552b 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Based on the nfmark match by: | 5 | * Based on the nfmark match by: |
6 | * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> | 6 | * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> |
7 | * | 7 | * |
8 | * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> | 8 | * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -89,6 +89,12 @@ secmark_tg_check(const char *tablename, const void *entry, | |||
89 | { | 89 | { |
90 | struct xt_secmark_target_info *info = targinfo; | 90 | struct xt_secmark_target_info *info = targinfo; |
91 | 91 | ||
92 | if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) { | ||
93 | printk(KERN_INFO PFX "target only valid in the \'mangle\' " | ||
94 | "or \'security\' tables, not \'%s\'.\n", tablename); | ||
95 | return false; | ||
96 | } | ||
97 | |||
92 | if (mode && mode != info->mode) { | 98 | if (mode && mode != info->mode) { |
93 | printk(KERN_INFO PFX "mode already set to %hu cannot mix with " | 99 | printk(KERN_INFO PFX "mode already set to %hu cannot mix with " |
94 | "rules for mode %hu\n", mode, info->mode); | 100 | "rules for mode %hu\n", mode, info->mode); |
@@ -127,7 +133,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = { | |||
127 | .destroy = secmark_tg_destroy, | 133 | .destroy = secmark_tg_destroy, |
128 | .target = secmark_tg, | 134 | .target = secmark_tg, |
129 | .targetsize = sizeof(struct xt_secmark_target_info), | 135 | .targetsize = sizeof(struct xt_secmark_target_info), |
130 | .table = "mangle", | ||
131 | .me = THIS_MODULE, | 136 | .me = THIS_MODULE, |
132 | }, | 137 | }, |
133 | { | 138 | { |
@@ -137,7 +142,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = { | |||
137 | .destroy = secmark_tg_destroy, | 142 | .destroy = secmark_tg_destroy, |
138 | .target = secmark_tg, | 143 | .target = secmark_tg, |
139 | .targetsize = sizeof(struct xt_secmark_target_info), | 144 | .targetsize = sizeof(struct xt_secmark_target_info), |
140 | .table = "mangle", | ||
141 | .me = THIS_MODULE, | 145 | .me = THIS_MODULE, |
142 | }, | 146 | }, |
143 | }; | 147 | }; |
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 72f694d947f4..4903182a062b 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c | |||
@@ -29,12 +29,16 @@ string_mt(const struct sk_buff *skb, const struct net_device *in, | |||
29 | { | 29 | { |
30 | const struct xt_string_info *conf = matchinfo; | 30 | const struct xt_string_info *conf = matchinfo; |
31 | struct ts_state state; | 31 | struct ts_state state; |
32 | int invert; | ||
32 | 33 | ||
33 | memset(&state, 0, sizeof(struct ts_state)); | 34 | memset(&state, 0, sizeof(struct ts_state)); |
34 | 35 | ||
36 | invert = (match->revision == 0 ? conf->u.v0.invert : | ||
37 | conf->u.v1.flags & XT_STRING_FLAG_INVERT); | ||
38 | |||
35 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, | 39 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, |
36 | conf->to_offset, conf->config, &state) | 40 | conf->to_offset, conf->config, &state) |
37 | != UINT_MAX) ^ conf->invert; | 41 | != UINT_MAX) ^ invert; |
38 | } | 42 | } |
39 | 43 | ||
40 | #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) | 44 | #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) |
@@ -46,6 +50,7 @@ string_mt_check(const char *tablename, const void *ip, | |||
46 | { | 50 | { |
47 | struct xt_string_info *conf = matchinfo; | 51 | struct xt_string_info *conf = matchinfo; |
48 | struct ts_config *ts_conf; | 52 | struct ts_config *ts_conf; |
53 | int flags = TS_AUTOLOAD; | ||
49 | 54 | ||
50 | /* Damn, can't handle this case properly with iptables... */ | 55 | /* Damn, can't handle this case properly with iptables... */ |
51 | if (conf->from_offset > conf->to_offset) | 56 | if (conf->from_offset > conf->to_offset) |
@@ -54,8 +59,15 @@ string_mt_check(const char *tablename, const void *ip, | |||
54 | return false; | 59 | return false; |
55 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) | 60 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) |
56 | return false; | 61 | return false; |
62 | if (match->revision == 1) { | ||
63 | if (conf->u.v1.flags & | ||
64 | ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT)) | ||
65 | return false; | ||
66 | if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE) | ||
67 | flags |= TS_IGNORECASE; | ||
68 | } | ||
57 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, | 69 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, |
58 | GFP_KERNEL, TS_AUTOLOAD); | 70 | GFP_KERNEL, flags); |
59 | if (IS_ERR(ts_conf)) | 71 | if (IS_ERR(ts_conf)) |
60 | return false; | 72 | return false; |
61 | 73 | ||
@@ -72,6 +84,17 @@ static void string_mt_destroy(const struct xt_match *match, void *matchinfo) | |||
72 | static struct xt_match string_mt_reg[] __read_mostly = { | 84 | static struct xt_match string_mt_reg[] __read_mostly = { |
73 | { | 85 | { |
74 | .name = "string", | 86 | .name = "string", |
87 | .revision = 0, | ||
88 | .family = AF_INET, | ||
89 | .checkentry = string_mt_check, | ||
90 | .match = string_mt, | ||
91 | .destroy = string_mt_destroy, | ||
92 | .matchsize = sizeof(struct xt_string_info), | ||
93 | .me = THIS_MODULE | ||
94 | }, | ||
95 | { | ||
96 | .name = "string", | ||
97 | .revision = 1, | ||
75 | .family = AF_INET, | 98 | .family = AF_INET, |
76 | .checkentry = string_mt_check, | 99 | .checkentry = string_mt_check, |
77 | .match = string_mt, | 100 | .match = string_mt, |
@@ -81,6 +104,17 @@ static struct xt_match string_mt_reg[] __read_mostly = { | |||
81 | }, | 104 | }, |
82 | { | 105 | { |
83 | .name = "string", | 106 | .name = "string", |
107 | .revision = 0, | ||
108 | .family = AF_INET6, | ||
109 | .checkentry = string_mt_check, | ||
110 | .match = string_mt, | ||
111 | .destroy = string_mt_destroy, | ||
112 | .matchsize = sizeof(struct xt_string_info), | ||
113 | .me = THIS_MODULE | ||
114 | }, | ||
115 | { | ||
116 | .name = "string", | ||
117 | .revision = 1, | ||
84 | .family = AF_INET6, | 118 | .family = AF_INET6, |
85 | .checkentry = string_mt_check, | 119 | .checkentry = string_mt_check, |
86 | .match = string_mt, | 120 | .match = string_mt, |
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 9080c61b71a5..0aec318bf0ef 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
@@ -591,7 +591,7 @@ list_retry: | |||
591 | if (nlsze_mult < 4) { | 591 | if (nlsze_mult < 4) { |
592 | rcu_read_unlock(); | 592 | rcu_read_unlock(); |
593 | kfree_skb(ans_skb); | 593 | kfree_skb(ans_skb); |
594 | nlsze_mult++; | 594 | nlsze_mult *= 2; |
595 | goto list_start; | 595 | goto list_start; |
596 | } | 596 | } |
597 | list_failure_lock: | 597 | list_failure_lock: |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 56f80872924e..921c118ead89 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -954,7 +954,7 @@ static int netlbl_unlhsh_netdev_handler(struct notifier_block *this, | |||
954 | struct net_device *dev = ptr; | 954 | struct net_device *dev = ptr; |
955 | struct netlbl_unlhsh_iface *iface = NULL; | 955 | struct netlbl_unlhsh_iface *iface = NULL; |
956 | 956 | ||
957 | if (dev_net(dev) != &init_net) | 957 | if (!net_eq(dev_net(dev), &init_net)) |
958 | return NOTIFY_DONE; | 958 | return NOTIFY_DONE; |
959 | 959 | ||
960 | /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */ | 960 | /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */ |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 349aba189558..98bfe277eab2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -759,7 +759,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp) | |||
759 | * 0: continue | 759 | * 0: continue |
760 | * 1: repeat lookup - reference dropped while waiting for socket memory. | 760 | * 1: repeat lookup - reference dropped while waiting for socket memory. |
761 | */ | 761 | */ |
762 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | 762 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, |
763 | long *timeo, struct sock *ssk) | 763 | long *timeo, struct sock *ssk) |
764 | { | 764 | { |
765 | struct netlink_sock *nlk; | 765 | struct netlink_sock *nlk; |
@@ -892,7 +892,7 @@ retry: | |||
892 | return err; | 892 | return err; |
893 | } | 893 | } |
894 | 894 | ||
895 | err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); | 895 | err = netlink_attachskb(sk, skb, &timeo, ssk); |
896 | if (err == 1) | 896 | if (err == 1) |
897 | goto retry; | 897 | goto retry; |
898 | if (err) | 898 | if (err) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 4bae8b998cab..fccc250f95f5 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -74,6 +74,18 @@ static const struct proto_ops nr_proto_ops; | |||
74 | */ | 74 | */ |
75 | static struct lock_class_key nr_netdev_xmit_lock_key; | 75 | static struct lock_class_key nr_netdev_xmit_lock_key; |
76 | 76 | ||
77 | static void nr_set_lockdep_one(struct net_device *dev, | ||
78 | struct netdev_queue *txq, | ||
79 | void *_unused) | ||
80 | { | ||
81 | lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); | ||
82 | } | ||
83 | |||
84 | static void nr_set_lockdep_key(struct net_device *dev) | ||
85 | { | ||
86 | netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); | ||
87 | } | ||
88 | |||
77 | /* | 89 | /* |
78 | * Socket removal during an interrupt is now safe. | 90 | * Socket removal during an interrupt is now safe. |
79 | */ | 91 | */ |
@@ -106,7 +118,7 @@ static int nr_device_event(struct notifier_block *this, unsigned long event, voi | |||
106 | { | 118 | { |
107 | struct net_device *dev = (struct net_device *)ptr; | 119 | struct net_device *dev = (struct net_device *)ptr; |
108 | 120 | ||
109 | if (dev_net(dev) != &init_net) | 121 | if (!net_eq(dev_net(dev), &init_net)) |
110 | return NOTIFY_DONE; | 122 | return NOTIFY_DONE; |
111 | 123 | ||
112 | if (event != NETDEV_DOWN) | 124 | if (event != NETDEV_DOWN) |
@@ -475,13 +487,11 @@ static struct sock *nr_make_new(struct sock *osk) | |||
475 | sock_init_data(NULL, sk); | 487 | sock_init_data(NULL, sk); |
476 | 488 | ||
477 | sk->sk_type = osk->sk_type; | 489 | sk->sk_type = osk->sk_type; |
478 | sk->sk_socket = osk->sk_socket; | ||
479 | sk->sk_priority = osk->sk_priority; | 490 | sk->sk_priority = osk->sk_priority; |
480 | sk->sk_protocol = osk->sk_protocol; | 491 | sk->sk_protocol = osk->sk_protocol; |
481 | sk->sk_rcvbuf = osk->sk_rcvbuf; | 492 | sk->sk_rcvbuf = osk->sk_rcvbuf; |
482 | sk->sk_sndbuf = osk->sk_sndbuf; | 493 | sk->sk_sndbuf = osk->sk_sndbuf; |
483 | sk->sk_state = TCP_ESTABLISHED; | 494 | sk->sk_state = TCP_ESTABLISHED; |
484 | sk->sk_sleep = osk->sk_sleep; | ||
485 | sock_copy_flags(sk, osk); | 495 | sock_copy_flags(sk, osk); |
486 | 496 | ||
487 | skb_queue_head_init(&nr->ack_queue); | 497 | skb_queue_head_init(&nr->ack_queue); |
@@ -538,11 +548,9 @@ static int nr_release(struct socket *sock) | |||
538 | sk->sk_state_change(sk); | 548 | sk->sk_state_change(sk); |
539 | sock_orphan(sk); | 549 | sock_orphan(sk); |
540 | sock_set_flag(sk, SOCK_DESTROY); | 550 | sock_set_flag(sk, SOCK_DESTROY); |
541 | sk->sk_socket = NULL; | ||
542 | break; | 551 | break; |
543 | 552 | ||
544 | default: | 553 | default: |
545 | sk->sk_socket = NULL; | ||
546 | break; | 554 | break; |
547 | } | 555 | } |
548 | 556 | ||
@@ -810,13 +818,11 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
810 | goto out_release; | 818 | goto out_release; |
811 | 819 | ||
812 | newsk = skb->sk; | 820 | newsk = skb->sk; |
813 | newsk->sk_socket = newsock; | 821 | sock_graft(newsk, newsock); |
814 | newsk->sk_sleep = &newsock->wait; | ||
815 | 822 | ||
816 | /* Now attach up the new socket */ | 823 | /* Now attach up the new socket */ |
817 | kfree_skb(skb); | 824 | kfree_skb(skb); |
818 | sk_acceptq_removed(sk); | 825 | sk_acceptq_removed(sk); |
819 | newsock->sk = newsk; | ||
820 | 826 | ||
821 | out_release: | 827 | out_release: |
822 | release_sock(sk); | 828 | release_sock(sk); |
@@ -1436,7 +1442,7 @@ static int __init nr_proto_init(void) | |||
1436 | free_netdev(dev); | 1442 | free_netdev(dev); |
1437 | goto fail; | 1443 | goto fail; |
1438 | } | 1444 | } |
1439 | lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key); | 1445 | nr_set_lockdep_key(dev); |
1440 | dev_nr[i] = dev; | 1446 | dev_nr[i] = dev; |
1441 | } | 1447 | } |
1442 | 1448 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2cee87da4441..d56cae112dc8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * PACKET - implements raw packet sockets. | 6 | * PACKET - implements raw packet sockets. |
7 | * | 7 | * |
8 | * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $ | ||
9 | * | ||
10 | * Authors: Ross Biro | 8 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | 10 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
@@ -188,6 +186,9 @@ struct packet_sock { | |||
188 | unsigned int pg_vec_order; | 186 | unsigned int pg_vec_order; |
189 | unsigned int pg_vec_pages; | 187 | unsigned int pg_vec_pages; |
190 | unsigned int pg_vec_len; | 188 | unsigned int pg_vec_len; |
189 | enum tpacket_versions tp_version; | ||
190 | unsigned int tp_hdrlen; | ||
191 | unsigned int tp_reserve; | ||
191 | #endif | 192 | #endif |
192 | }; | 193 | }; |
193 | 194 | ||
@@ -203,14 +204,52 @@ struct packet_skb_cb { | |||
203 | 204 | ||
204 | #ifdef CONFIG_PACKET_MMAP | 205 | #ifdef CONFIG_PACKET_MMAP |
205 | 206 | ||
206 | static inline struct tpacket_hdr *packet_lookup_frame(struct packet_sock *po, unsigned int position) | 207 | static void *packet_lookup_frame(struct packet_sock *po, unsigned int position, |
208 | int status) | ||
207 | { | 209 | { |
208 | unsigned int pg_vec_pos, frame_offset; | 210 | unsigned int pg_vec_pos, frame_offset; |
211 | union { | ||
212 | struct tpacket_hdr *h1; | ||
213 | struct tpacket2_hdr *h2; | ||
214 | void *raw; | ||
215 | } h; | ||
209 | 216 | ||
210 | pg_vec_pos = position / po->frames_per_block; | 217 | pg_vec_pos = position / po->frames_per_block; |
211 | frame_offset = position % po->frames_per_block; | 218 | frame_offset = position % po->frames_per_block; |
212 | 219 | ||
213 | return (struct tpacket_hdr *)(po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size)); | 220 | h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size); |
221 | switch (po->tp_version) { | ||
222 | case TPACKET_V1: | ||
223 | if (status != h.h1->tp_status ? TP_STATUS_USER : | ||
224 | TP_STATUS_KERNEL) | ||
225 | return NULL; | ||
226 | break; | ||
227 | case TPACKET_V2: | ||
228 | if (status != h.h2->tp_status ? TP_STATUS_USER : | ||
229 | TP_STATUS_KERNEL) | ||
230 | return NULL; | ||
231 | break; | ||
232 | } | ||
233 | return h.raw; | ||
234 | } | ||
235 | |||
236 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) | ||
237 | { | ||
238 | union { | ||
239 | struct tpacket_hdr *h1; | ||
240 | struct tpacket2_hdr *h2; | ||
241 | void *raw; | ||
242 | } h; | ||
243 | |||
244 | h.raw = frame; | ||
245 | switch (po->tp_version) { | ||
246 | case TPACKET_V1: | ||
247 | h.h1->tp_status = status; | ||
248 | break; | ||
249 | case TPACKET_V2: | ||
250 | h.h2->tp_status = status; | ||
251 | break; | ||
252 | } | ||
214 | } | 253 | } |
215 | #endif | 254 | #endif |
216 | 255 | ||
@@ -553,14 +592,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
553 | struct sock *sk; | 592 | struct sock *sk; |
554 | struct packet_sock *po; | 593 | struct packet_sock *po; |
555 | struct sockaddr_ll *sll; | 594 | struct sockaddr_ll *sll; |
556 | struct tpacket_hdr *h; | 595 | union { |
596 | struct tpacket_hdr *h1; | ||
597 | struct tpacket2_hdr *h2; | ||
598 | void *raw; | ||
599 | } h; | ||
557 | u8 * skb_head = skb->data; | 600 | u8 * skb_head = skb->data; |
558 | int skb_len = skb->len; | 601 | int skb_len = skb->len; |
559 | unsigned int snaplen, res; | 602 | unsigned int snaplen, res; |
560 | unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; | 603 | unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; |
561 | unsigned short macoff, netoff; | 604 | unsigned short macoff, netoff, hdrlen; |
562 | struct sk_buff *copy_skb = NULL; | 605 | struct sk_buff *copy_skb = NULL; |
563 | struct timeval tv; | 606 | struct timeval tv; |
607 | struct timespec ts; | ||
564 | 608 | ||
565 | if (skb->pkt_type == PACKET_LOOPBACK) | 609 | if (skb->pkt_type == PACKET_LOOPBACK) |
566 | goto drop; | 610 | goto drop; |
@@ -592,10 +636,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
592 | snaplen = res; | 636 | snaplen = res; |
593 | 637 | ||
594 | if (sk->sk_type == SOCK_DGRAM) { | 638 | if (sk->sk_type == SOCK_DGRAM) { |
595 | macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16; | 639 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + |
640 | po->tp_reserve; | ||
596 | } else { | 641 | } else { |
597 | unsigned maclen = skb_network_offset(skb); | 642 | unsigned maclen = skb_network_offset(skb); |
598 | netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen)); | 643 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
644 | (maclen < 16 ? 16 : maclen)) + | ||
645 | po->tp_reserve; | ||
599 | macoff = netoff - maclen; | 646 | macoff = netoff - maclen; |
600 | } | 647 | } |
601 | 648 | ||
@@ -618,9 +665,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
618 | } | 665 | } |
619 | 666 | ||
620 | spin_lock(&sk->sk_receive_queue.lock); | 667 | spin_lock(&sk->sk_receive_queue.lock); |
621 | h = packet_lookup_frame(po, po->head); | 668 | h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL); |
622 | 669 | if (!h.raw) | |
623 | if (h->tp_status) | ||
624 | goto ring_is_full; | 670 | goto ring_is_full; |
625 | po->head = po->head != po->frame_max ? po->head+1 : 0; | 671 | po->head = po->head != po->frame_max ? po->head+1 : 0; |
626 | po->stats.tp_packets++; | 672 | po->stats.tp_packets++; |
@@ -632,20 +678,41 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
632 | status &= ~TP_STATUS_LOSING; | 678 | status &= ~TP_STATUS_LOSING; |
633 | spin_unlock(&sk->sk_receive_queue.lock); | 679 | spin_unlock(&sk->sk_receive_queue.lock); |
634 | 680 | ||
635 | skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen); | 681 | skb_copy_bits(skb, 0, h.raw + macoff, snaplen); |
636 | 682 | ||
637 | h->tp_len = skb->len; | 683 | switch (po->tp_version) { |
638 | h->tp_snaplen = snaplen; | 684 | case TPACKET_V1: |
639 | h->tp_mac = macoff; | 685 | h.h1->tp_len = skb->len; |
640 | h->tp_net = netoff; | 686 | h.h1->tp_snaplen = snaplen; |
641 | if (skb->tstamp.tv64) | 687 | h.h1->tp_mac = macoff; |
642 | tv = ktime_to_timeval(skb->tstamp); | 688 | h.h1->tp_net = netoff; |
643 | else | 689 | if (skb->tstamp.tv64) |
644 | do_gettimeofday(&tv); | 690 | tv = ktime_to_timeval(skb->tstamp); |
645 | h->tp_sec = tv.tv_sec; | 691 | else |
646 | h->tp_usec = tv.tv_usec; | 692 | do_gettimeofday(&tv); |
693 | h.h1->tp_sec = tv.tv_sec; | ||
694 | h.h1->tp_usec = tv.tv_usec; | ||
695 | hdrlen = sizeof(*h.h1); | ||
696 | break; | ||
697 | case TPACKET_V2: | ||
698 | h.h2->tp_len = skb->len; | ||
699 | h.h2->tp_snaplen = snaplen; | ||
700 | h.h2->tp_mac = macoff; | ||
701 | h.h2->tp_net = netoff; | ||
702 | if (skb->tstamp.tv64) | ||
703 | ts = ktime_to_timespec(skb->tstamp); | ||
704 | else | ||
705 | getnstimeofday(&ts); | ||
706 | h.h2->tp_sec = ts.tv_sec; | ||
707 | h.h2->tp_nsec = ts.tv_nsec; | ||
708 | h.h2->tp_vlan_tci = skb->vlan_tci; | ||
709 | hdrlen = sizeof(*h.h2); | ||
710 | break; | ||
711 | default: | ||
712 | BUG(); | ||
713 | } | ||
647 | 714 | ||
648 | sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); | 715 | sll = h.raw + TPACKET_ALIGN(hdrlen); |
649 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); | 716 | sll->sll_halen = dev_parse_header(skb, sll->sll_addr); |
650 | sll->sll_family = AF_PACKET; | 717 | sll->sll_family = AF_PACKET; |
651 | sll->sll_hatype = dev->type; | 718 | sll->sll_hatype = dev->type; |
@@ -656,14 +723,14 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
656 | else | 723 | else |
657 | sll->sll_ifindex = dev->ifindex; | 724 | sll->sll_ifindex = dev->ifindex; |
658 | 725 | ||
659 | h->tp_status = status; | 726 | __packet_set_status(po, h.raw, status); |
660 | smp_mb(); | 727 | smp_mb(); |
661 | 728 | ||
662 | { | 729 | { |
663 | struct page *p_start, *p_end; | 730 | struct page *p_start, *p_end; |
664 | u8 *h_end = (u8 *)h + macoff + snaplen - 1; | 731 | u8 *h_end = h.raw + macoff + snaplen - 1; |
665 | 732 | ||
666 | p_start = virt_to_page(h); | 733 | p_start = virt_to_page(h.raw); |
667 | p_end = virt_to_page(h_end); | 734 | p_end = virt_to_page(h_end); |
668 | while (p_start <= p_end) { | 735 | while (p_start <= p_end) { |
669 | flush_dcache_page(p_start); | 736 | flush_dcache_page(p_start); |
@@ -1109,6 +1176,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1109 | aux.tp_snaplen = skb->len; | 1176 | aux.tp_snaplen = skb->len; |
1110 | aux.tp_mac = 0; | 1177 | aux.tp_mac = 0; |
1111 | aux.tp_net = skb_network_offset(skb); | 1178 | aux.tp_net = skb_network_offset(skb); |
1179 | aux.tp_vlan_tci = skb->vlan_tci; | ||
1112 | 1180 | ||
1113 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); | 1181 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
1114 | } | 1182 | } |
@@ -1175,7 +1243,8 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1175 | return 0; | 1243 | return 0; |
1176 | } | 1244 | } |
1177 | 1245 | ||
1178 | static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) | 1246 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, |
1247 | int what) | ||
1179 | { | 1248 | { |
1180 | switch (i->type) { | 1249 | switch (i->type) { |
1181 | case PACKET_MR_MULTICAST: | 1250 | case PACKET_MR_MULTICAST: |
@@ -1185,13 +1254,14 @@ static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int w | |||
1185 | dev_mc_delete(dev, i->addr, i->alen, 0); | 1254 | dev_mc_delete(dev, i->addr, i->alen, 0); |
1186 | break; | 1255 | break; |
1187 | case PACKET_MR_PROMISC: | 1256 | case PACKET_MR_PROMISC: |
1188 | dev_set_promiscuity(dev, what); | 1257 | return dev_set_promiscuity(dev, what); |
1189 | break; | 1258 | break; |
1190 | case PACKET_MR_ALLMULTI: | 1259 | case PACKET_MR_ALLMULTI: |
1191 | dev_set_allmulti(dev, what); | 1260 | return dev_set_allmulti(dev, what); |
1192 | break; | 1261 | break; |
1193 | default:; | 1262 | default:; |
1194 | } | 1263 | } |
1264 | return 0; | ||
1195 | } | 1265 | } |
1196 | 1266 | ||
1197 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 1267 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) |
@@ -1245,7 +1315,11 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) | |||
1245 | i->count = 1; | 1315 | i->count = 1; |
1246 | i->next = po->mclist; | 1316 | i->next = po->mclist; |
1247 | po->mclist = i; | 1317 | po->mclist = i; |
1248 | packet_dev_mc(dev, i, +1); | 1318 | err = packet_dev_mc(dev, i, 1); |
1319 | if (err) { | ||
1320 | po->mclist = i->next; | ||
1321 | kfree(i); | ||
1322 | } | ||
1249 | 1323 | ||
1250 | done: | 1324 | done: |
1251 | rtnl_unlock(); | 1325 | rtnl_unlock(); |
@@ -1358,6 +1432,38 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1358 | pkt_sk(sk)->copy_thresh = val; | 1432 | pkt_sk(sk)->copy_thresh = val; |
1359 | return 0; | 1433 | return 0; |
1360 | } | 1434 | } |
1435 | case PACKET_VERSION: | ||
1436 | { | ||
1437 | int val; | ||
1438 | |||
1439 | if (optlen != sizeof(val)) | ||
1440 | return -EINVAL; | ||
1441 | if (po->pg_vec) | ||
1442 | return -EBUSY; | ||
1443 | if (copy_from_user(&val, optval, sizeof(val))) | ||
1444 | return -EFAULT; | ||
1445 | switch (val) { | ||
1446 | case TPACKET_V1: | ||
1447 | case TPACKET_V2: | ||
1448 | po->tp_version = val; | ||
1449 | return 0; | ||
1450 | default: | ||
1451 | return -EINVAL; | ||
1452 | } | ||
1453 | } | ||
1454 | case PACKET_RESERVE: | ||
1455 | { | ||
1456 | unsigned int val; | ||
1457 | |||
1458 | if (optlen != sizeof(val)) | ||
1459 | return -EINVAL; | ||
1460 | if (po->pg_vec) | ||
1461 | return -EBUSY; | ||
1462 | if (copy_from_user(&val, optval, sizeof(val))) | ||
1463 | return -EFAULT; | ||
1464 | po->tp_reserve = val; | ||
1465 | return 0; | ||
1466 | } | ||
1361 | #endif | 1467 | #endif |
1362 | case PACKET_AUXDATA: | 1468 | case PACKET_AUXDATA: |
1363 | { | 1469 | { |
@@ -1433,6 +1539,37 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
1433 | 1539 | ||
1434 | data = &val; | 1540 | data = &val; |
1435 | break; | 1541 | break; |
1542 | #ifdef CONFIG_PACKET_MMAP | ||
1543 | case PACKET_VERSION: | ||
1544 | if (len > sizeof(int)) | ||
1545 | len = sizeof(int); | ||
1546 | val = po->tp_version; | ||
1547 | data = &val; | ||
1548 | break; | ||
1549 | case PACKET_HDRLEN: | ||
1550 | if (len > sizeof(int)) | ||
1551 | len = sizeof(int); | ||
1552 | if (copy_from_user(&val, optval, len)) | ||
1553 | return -EFAULT; | ||
1554 | switch (val) { | ||
1555 | case TPACKET_V1: | ||
1556 | val = sizeof(struct tpacket_hdr); | ||
1557 | break; | ||
1558 | case TPACKET_V2: | ||
1559 | val = sizeof(struct tpacket2_hdr); | ||
1560 | break; | ||
1561 | default: | ||
1562 | return -EINVAL; | ||
1563 | } | ||
1564 | data = &val; | ||
1565 | break; | ||
1566 | case PACKET_RESERVE: | ||
1567 | if (len > sizeof(unsigned int)) | ||
1568 | len = sizeof(unsigned int); | ||
1569 | val = po->tp_reserve; | ||
1570 | data = &val; | ||
1571 | break; | ||
1572 | #endif | ||
1436 | default: | 1573 | default: |
1437 | return -ENOPROTOOPT; | 1574 | return -ENOPROTOOPT; |
1438 | } | 1575 | } |
@@ -1540,7 +1677,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
1540 | case SIOCGIFDSTADDR: | 1677 | case SIOCGIFDSTADDR: |
1541 | case SIOCSIFDSTADDR: | 1678 | case SIOCSIFDSTADDR: |
1542 | case SIOCSIFFLAGS: | 1679 | case SIOCSIFFLAGS: |
1543 | if (sock_net(sk) != &init_net) | 1680 | if (!net_eq(sock_net(sk), &init_net)) |
1544 | return -ENOIOCTLCMD; | 1681 | return -ENOIOCTLCMD; |
1545 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 1682 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
1546 | #endif | 1683 | #endif |
@@ -1566,11 +1703,8 @@ static unsigned int packet_poll(struct file * file, struct socket *sock, | |||
1566 | spin_lock_bh(&sk->sk_receive_queue.lock); | 1703 | spin_lock_bh(&sk->sk_receive_queue.lock); |
1567 | if (po->pg_vec) { | 1704 | if (po->pg_vec) { |
1568 | unsigned last = po->head ? po->head-1 : po->frame_max; | 1705 | unsigned last = po->head ? po->head-1 : po->frame_max; |
1569 | struct tpacket_hdr *h; | ||
1570 | |||
1571 | h = packet_lookup_frame(po, last); | ||
1572 | 1706 | ||
1573 | if (h->tp_status) | 1707 | if (packet_lookup_frame(po, last, TP_STATUS_USER)) |
1574 | mask |= POLLIN | POLLRDNORM; | 1708 | mask |= POLLIN | POLLRDNORM; |
1575 | } | 1709 | } |
1576 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 1710 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
@@ -1665,11 +1799,21 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1665 | if (unlikely(po->pg_vec)) | 1799 | if (unlikely(po->pg_vec)) |
1666 | return -EBUSY; | 1800 | return -EBUSY; |
1667 | 1801 | ||
1802 | switch (po->tp_version) { | ||
1803 | case TPACKET_V1: | ||
1804 | po->tp_hdrlen = TPACKET_HDRLEN; | ||
1805 | break; | ||
1806 | case TPACKET_V2: | ||
1807 | po->tp_hdrlen = TPACKET2_HDRLEN; | ||
1808 | break; | ||
1809 | } | ||
1810 | |||
1668 | if (unlikely((int)req->tp_block_size <= 0)) | 1811 | if (unlikely((int)req->tp_block_size <= 0)) |
1669 | return -EINVAL; | 1812 | return -EINVAL; |
1670 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) | 1813 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
1671 | return -EINVAL; | 1814 | return -EINVAL; |
1672 | if (unlikely(req->tp_frame_size < TPACKET_HDRLEN)) | 1815 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
1816 | po->tp_reserve)) | ||
1673 | return -EINVAL; | 1817 | return -EINVAL; |
1674 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) | 1818 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) |
1675 | return -EINVAL; | 1819 | return -EINVAL; |
@@ -1688,13 +1832,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1688 | goto out; | 1832 | goto out; |
1689 | 1833 | ||
1690 | for (i = 0; i < req->tp_block_nr; i++) { | 1834 | for (i = 0; i < req->tp_block_nr; i++) { |
1691 | char *ptr = pg_vec[i]; | 1835 | void *ptr = pg_vec[i]; |
1692 | struct tpacket_hdr *header; | ||
1693 | int k; | 1836 | int k; |
1694 | 1837 | ||
1695 | for (k = 0; k < po->frames_per_block; k++) { | 1838 | for (k = 0; k < po->frames_per_block; k++) { |
1696 | header = (struct tpacket_hdr *) ptr; | 1839 | __packet_set_status(po, ptr, TP_STATUS_KERNEL); |
1697 | header->tp_status = TP_STATUS_KERNEL; | ||
1698 | ptr += req->tp_frame_size; | 1840 | ptr += req->tp_frame_size; |
1699 | } | 1841 | } |
1700 | } | 1842 | } |
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index e4b051dbed61..8aa822730145 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c | |||
@@ -30,39 +30,62 @@ struct rfkill_task { | |||
30 | spinlock_t lock; /* for accessing last and desired state */ | 30 | spinlock_t lock; /* for accessing last and desired state */ |
31 | unsigned long last; /* last schedule */ | 31 | unsigned long last; /* last schedule */ |
32 | enum rfkill_state desired_state; /* on/off */ | 32 | enum rfkill_state desired_state; /* on/off */ |
33 | enum rfkill_state current_state; /* on/off */ | ||
34 | }; | 33 | }; |
35 | 34 | ||
36 | static void rfkill_task_handler(struct work_struct *work) | 35 | static void rfkill_task_handler(struct work_struct *work) |
37 | { | 36 | { |
38 | struct rfkill_task *task = container_of(work, struct rfkill_task, work); | 37 | struct rfkill_task *task = container_of(work, struct rfkill_task, work); |
39 | enum rfkill_state state; | ||
40 | 38 | ||
41 | mutex_lock(&task->mutex); | 39 | mutex_lock(&task->mutex); |
42 | 40 | ||
43 | /* | 41 | rfkill_switch_all(task->type, task->desired_state); |
44 | * Use temp variable to fetch desired state to keep it | ||
45 | * consistent even if rfkill_schedule_toggle() runs in | ||
46 | * another thread or interrupts us. | ||
47 | */ | ||
48 | state = task->desired_state; | ||
49 | 42 | ||
50 | if (state != task->current_state) { | 43 | mutex_unlock(&task->mutex); |
51 | rfkill_switch_all(task->type, state); | 44 | } |
52 | task->current_state = state; | 45 | |
46 | static void rfkill_task_epo_handler(struct work_struct *work) | ||
47 | { | ||
48 | rfkill_epo(); | ||
49 | } | ||
50 | |||
51 | static DECLARE_WORK(epo_work, rfkill_task_epo_handler); | ||
52 | |||
53 | static void rfkill_schedule_epo(void) | ||
54 | { | ||
55 | schedule_work(&epo_work); | ||
56 | } | ||
57 | |||
58 | static void rfkill_schedule_set(struct rfkill_task *task, | ||
59 | enum rfkill_state desired_state) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | |||
63 | if (unlikely(work_pending(&epo_work))) | ||
64 | return; | ||
65 | |||
66 | spin_lock_irqsave(&task->lock, flags); | ||
67 | |||
68 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | ||
69 | task->desired_state = desired_state; | ||
70 | task->last = jiffies; | ||
71 | schedule_work(&task->work); | ||
53 | } | 72 | } |
54 | 73 | ||
55 | mutex_unlock(&task->mutex); | 74 | spin_unlock_irqrestore(&task->lock, flags); |
56 | } | 75 | } |
57 | 76 | ||
58 | static void rfkill_schedule_toggle(struct rfkill_task *task) | 77 | static void rfkill_schedule_toggle(struct rfkill_task *task) |
59 | { | 78 | { |
60 | unsigned long flags; | 79 | unsigned long flags; |
61 | 80 | ||
81 | if (unlikely(work_pending(&epo_work))) | ||
82 | return; | ||
83 | |||
62 | spin_lock_irqsave(&task->lock, flags); | 84 | spin_lock_irqsave(&task->lock, flags); |
63 | 85 | ||
64 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | 86 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { |
65 | task->desired_state = !task->desired_state; | 87 | task->desired_state = |
88 | rfkill_state_complement(task->desired_state); | ||
66 | task->last = jiffies; | 89 | task->last = jiffies; |
67 | schedule_work(&task->work); | 90 | schedule_work(&task->work); |
68 | } | 91 | } |
@@ -70,26 +93,26 @@ static void rfkill_schedule_toggle(struct rfkill_task *task) | |||
70 | spin_unlock_irqrestore(&task->lock, flags); | 93 | spin_unlock_irqrestore(&task->lock, flags); |
71 | } | 94 | } |
72 | 95 | ||
73 | #define DEFINE_RFKILL_TASK(n, t) \ | 96 | #define DEFINE_RFKILL_TASK(n, t) \ |
74 | struct rfkill_task n = { \ | 97 | struct rfkill_task n = { \ |
75 | .work = __WORK_INITIALIZER(n.work, \ | 98 | .work = __WORK_INITIALIZER(n.work, \ |
76 | rfkill_task_handler), \ | 99 | rfkill_task_handler), \ |
77 | .type = t, \ | 100 | .type = t, \ |
78 | .mutex = __MUTEX_INITIALIZER(n.mutex), \ | 101 | .mutex = __MUTEX_INITIALIZER(n.mutex), \ |
79 | .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ | 102 | .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ |
80 | .desired_state = RFKILL_STATE_ON, \ | 103 | .desired_state = RFKILL_STATE_UNBLOCKED, \ |
81 | .current_state = RFKILL_STATE_ON, \ | ||
82 | } | 104 | } |
83 | 105 | ||
84 | static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); | 106 | static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); |
85 | static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); | 107 | static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); |
86 | static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); | 108 | static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); |
87 | static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); | 109 | static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); |
110 | static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN); | ||
88 | 111 | ||
89 | static void rfkill_event(struct input_handle *handle, unsigned int type, | 112 | static void rfkill_event(struct input_handle *handle, unsigned int type, |
90 | unsigned int code, int down) | 113 | unsigned int code, int data) |
91 | { | 114 | { |
92 | if (type == EV_KEY && down == 1) { | 115 | if (type == EV_KEY && data == 1) { |
93 | switch (code) { | 116 | switch (code) { |
94 | case KEY_WLAN: | 117 | case KEY_WLAN: |
95 | rfkill_schedule_toggle(&rfkill_wlan); | 118 | rfkill_schedule_toggle(&rfkill_wlan); |
@@ -106,6 +129,28 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, | |||
106 | default: | 129 | default: |
107 | break; | 130 | break; |
108 | } | 131 | } |
132 | } else if (type == EV_SW) { | ||
133 | switch (code) { | ||
134 | case SW_RFKILL_ALL: | ||
135 | /* EVERY radio type. data != 0 means radios ON */ | ||
136 | /* handle EPO (emergency power off) through shortcut */ | ||
137 | if (data) { | ||
138 | rfkill_schedule_set(&rfkill_wwan, | ||
139 | RFKILL_STATE_UNBLOCKED); | ||
140 | rfkill_schedule_set(&rfkill_wimax, | ||
141 | RFKILL_STATE_UNBLOCKED); | ||
142 | rfkill_schedule_set(&rfkill_uwb, | ||
143 | RFKILL_STATE_UNBLOCKED); | ||
144 | rfkill_schedule_set(&rfkill_bt, | ||
145 | RFKILL_STATE_UNBLOCKED); | ||
146 | rfkill_schedule_set(&rfkill_wlan, | ||
147 | RFKILL_STATE_UNBLOCKED); | ||
148 | } else | ||
149 | rfkill_schedule_epo(); | ||
150 | break; | ||
151 | default: | ||
152 | break; | ||
153 | } | ||
109 | } | 154 | } |
110 | } | 155 | } |
111 | 156 | ||
@@ -168,6 +213,11 @@ static const struct input_device_id rfkill_ids[] = { | |||
168 | .evbit = { BIT_MASK(EV_KEY) }, | 213 | .evbit = { BIT_MASK(EV_KEY) }, |
169 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | 214 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, |
170 | }, | 215 | }, |
216 | { | ||
217 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
218 | .evbit = { BIT(EV_SW) }, | ||
219 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
220 | }, | ||
171 | { } | 221 | { } |
172 | }; | 222 | }; |
173 | 223 | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h index 4dae5006fc77..f63d05045685 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill-input.h | |||
@@ -12,5 +12,6 @@ | |||
12 | #define __RFKILL_INPUT_H | 12 | #define __RFKILL_INPUT_H |
13 | 13 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); |
15 | void rfkill_epo(void); | ||
15 | 16 | ||
16 | #endif /* __RFKILL_INPUT_H */ | 17 | #endif /* __RFKILL_INPUT_H */ |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 4e10a95de832..7a560b785097 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -39,8 +39,56 @@ MODULE_LICENSE("GPL"); | |||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | 39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ |
40 | static DEFINE_MUTEX(rfkill_mutex); | 40 | static DEFINE_MUTEX(rfkill_mutex); |
41 | 41 | ||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | ||
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
44 | MODULE_PARM_DESC(default_state, | ||
45 | "Default initial state for all radio types, 0 = radio off"); | ||
46 | |||
42 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; | 47 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; |
43 | 48 | ||
49 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | ||
50 | |||
51 | |||
52 | /** | ||
53 | * register_rfkill_notifier - Add notifier to rfkill notifier chain | ||
54 | * @nb: pointer to the new entry to add to the chain | ||
55 | * | ||
56 | * See blocking_notifier_chain_register() for return value and further | ||
57 | * observations. | ||
58 | * | ||
59 | * Adds a notifier to the rfkill notifier chain. The chain will be | ||
60 | * called with a pointer to the relevant rfkill structure as a parameter, | ||
61 | * refer to include/linux/rfkill.h for the possible events. | ||
62 | * | ||
63 | * Notifiers added to this chain are to always return NOTIFY_DONE. This | ||
64 | * chain is a blocking notifier chain: notifiers can sleep. | ||
65 | * | ||
66 | * Calls to this chain may have been done through a workqueue. One must | ||
67 | * assume unordered asynchronous behaviour, there is no way to know if | ||
68 | * actions related to the event that generated the notification have been | ||
69 | * carried out already. | ||
70 | */ | ||
71 | int register_rfkill_notifier(struct notifier_block *nb) | ||
72 | { | ||
73 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); | ||
76 | |||
77 | /** | ||
78 | * unregister_rfkill_notifier - remove notifier from rfkill notifier chain | ||
79 | * @nb: pointer to the entry to remove from the chain | ||
80 | * | ||
81 | * See blocking_notifier_chain_unregister() for return value and further | ||
82 | * observations. | ||
83 | * | ||
84 | * Removes a notifier from the rfkill notifier chain. | ||
85 | */ | ||
86 | int unregister_rfkill_notifier(struct notifier_block *nb) | ||
87 | { | ||
88 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); | ||
91 | |||
44 | 92 | ||
45 | static void rfkill_led_trigger(struct rfkill *rfkill, | 93 | static void rfkill_led_trigger(struct rfkill *rfkill, |
46 | enum rfkill_state state) | 94 | enum rfkill_state state) |
@@ -50,24 +98,101 @@ static void rfkill_led_trigger(struct rfkill *rfkill, | |||
50 | 98 | ||
51 | if (!led->name) | 99 | if (!led->name) |
52 | return; | 100 | return; |
53 | if (state == RFKILL_STATE_OFF) | 101 | if (state != RFKILL_STATE_UNBLOCKED) |
54 | led_trigger_event(led, LED_OFF); | 102 | led_trigger_event(led, LED_OFF); |
55 | else | 103 | else |
56 | led_trigger_event(led, LED_FULL); | 104 | led_trigger_event(led, LED_FULL); |
57 | #endif /* CONFIG_RFKILL_LEDS */ | 105 | #endif /* CONFIG_RFKILL_LEDS */ |
58 | } | 106 | } |
59 | 107 | ||
108 | static void notify_rfkill_state_change(struct rfkill *rfkill) | ||
109 | { | ||
110 | blocking_notifier_call_chain(&rfkill_notifier_list, | ||
111 | RFKILL_STATE_CHANGED, | ||
112 | rfkill); | ||
113 | } | ||
114 | |||
115 | static void update_rfkill_state(struct rfkill *rfkill) | ||
116 | { | ||
117 | enum rfkill_state newstate, oldstate; | ||
118 | |||
119 | if (rfkill->get_state) { | ||
120 | mutex_lock(&rfkill->mutex); | ||
121 | if (!rfkill->get_state(rfkill->data, &newstate)) { | ||
122 | oldstate = rfkill->state; | ||
123 | rfkill->state = newstate; | ||
124 | if (oldstate != newstate) | ||
125 | notify_rfkill_state_change(rfkill); | ||
126 | } | ||
127 | mutex_unlock(&rfkill->mutex); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * rfkill_toggle_radio - wrapper for toggle_radio hook | ||
133 | * | ||
134 | * @rfkill: the rfkill struct to use | ||
135 | * @force: calls toggle_radio even if cache says it is not needed, | ||
136 | * and also makes sure notifications of the state will be | ||
137 | * sent even if it didn't change | ||
138 | * @state: the new state to call toggle_radio() with | ||
139 | * | ||
140 | * Calls rfkill->toggle_radio, enforcing the API for toggle_radio | ||
141 | * calls and handling all the red tape such as issuing notifications | ||
142 | * if the call is successful. | ||
143 | * | ||
144 | * Note that @force cannot override a (possibly cached) state of | ||
145 | * RFKILL_STATE_HARD_BLOCKED. Any device making use of | ||
146 | * RFKILL_STATE_HARD_BLOCKED implements either get_state() or | ||
147 | * rfkill_force_state(), so the cache either is bypassed or valid. | ||
148 | * | ||
149 | * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED | ||
150 | * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to | ||
151 | * give the driver a hint that it should double-BLOCK the transmitter. | ||
152 | * | ||
153 | * Caller must have aquired rfkill_mutex. | ||
154 | */ | ||
60 | static int rfkill_toggle_radio(struct rfkill *rfkill, | 155 | static int rfkill_toggle_radio(struct rfkill *rfkill, |
61 | enum rfkill_state state) | 156 | enum rfkill_state state, |
157 | int force) | ||
62 | { | 158 | { |
63 | int retval = 0; | 159 | int retval = 0; |
160 | enum rfkill_state oldstate, newstate; | ||
161 | |||
162 | oldstate = rfkill->state; | ||
163 | |||
164 | if (rfkill->get_state && !force && | ||
165 | !rfkill->get_state(rfkill->data, &newstate)) | ||
166 | rfkill->state = newstate; | ||
167 | |||
168 | switch (state) { | ||
169 | case RFKILL_STATE_HARD_BLOCKED: | ||
170 | /* typically happens when refreshing hardware state, | ||
171 | * such as on resume */ | ||
172 | state = RFKILL_STATE_SOFT_BLOCKED; | ||
173 | break; | ||
174 | case RFKILL_STATE_UNBLOCKED: | ||
175 | /* force can't override this, only rfkill_force_state() can */ | ||
176 | if (rfkill->state == RFKILL_STATE_HARD_BLOCKED) | ||
177 | return -EPERM; | ||
178 | break; | ||
179 | case RFKILL_STATE_SOFT_BLOCKED: | ||
180 | /* nothing to do, we want to give drivers the hint to double | ||
181 | * BLOCK even a transmitter that is already in state | ||
182 | * RFKILL_STATE_HARD_BLOCKED */ | ||
183 | break; | ||
184 | } | ||
64 | 185 | ||
65 | if (state != rfkill->state) { | 186 | if (force || state != rfkill->state) { |
66 | retval = rfkill->toggle_radio(rfkill->data, state); | 187 | retval = rfkill->toggle_radio(rfkill->data, state); |
67 | if (!retval) { | 188 | /* never allow a HARD->SOFT downgrade! */ |
189 | if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED) | ||
68 | rfkill->state = state; | 190 | rfkill->state = state; |
69 | rfkill_led_trigger(rfkill, state); | 191 | } |
70 | } | 192 | |
193 | if (force || rfkill->state != oldstate) { | ||
194 | rfkill_led_trigger(rfkill, rfkill->state); | ||
195 | notify_rfkill_state_change(rfkill); | ||
71 | } | 196 | } |
72 | 197 | ||
73 | return retval; | 198 | return retval; |
@@ -82,7 +207,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
82 | * a specific switch is claimed by userspace in which case it is | 207 | * a specific switch is claimed by userspace in which case it is |
83 | * left alone. | 208 | * left alone. |
84 | */ | 209 | */ |
85 | |||
86 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | 210 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) |
87 | { | 211 | { |
88 | struct rfkill *rfkill; | 212 | struct rfkill *rfkill; |
@@ -93,13 +217,66 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | |||
93 | 217 | ||
94 | list_for_each_entry(rfkill, &rfkill_list, node) { | 218 | list_for_each_entry(rfkill, &rfkill_list, node) { |
95 | if ((!rfkill->user_claim) && (rfkill->type == type)) | 219 | if ((!rfkill->user_claim) && (rfkill->type == type)) |
96 | rfkill_toggle_radio(rfkill, state); | 220 | rfkill_toggle_radio(rfkill, state, 0); |
97 | } | 221 | } |
98 | 222 | ||
99 | mutex_unlock(&rfkill_mutex); | 223 | mutex_unlock(&rfkill_mutex); |
100 | } | 224 | } |
101 | EXPORT_SYMBOL(rfkill_switch_all); | 225 | EXPORT_SYMBOL(rfkill_switch_all); |
102 | 226 | ||
227 | /** | ||
228 | * rfkill_epo - emergency power off all transmitters | ||
229 | * | ||
230 | * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring | ||
231 | * everything in its path but rfkill_mutex. | ||
232 | */ | ||
233 | void rfkill_epo(void) | ||
234 | { | ||
235 | struct rfkill *rfkill; | ||
236 | |||
237 | mutex_lock(&rfkill_mutex); | ||
238 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
239 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
240 | } | ||
241 | mutex_unlock(&rfkill_mutex); | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(rfkill_epo); | ||
244 | |||
245 | /** | ||
246 | * rfkill_force_state - Force the internal rfkill radio state | ||
247 | * @rfkill: pointer to the rfkill class to modify. | ||
248 | * @state: the current radio state the class should be forced to. | ||
249 | * | ||
250 | * This function updates the internal state of the radio cached | ||
251 | * by the rfkill class. It should be used when the driver gets | ||
252 | * a notification by the firmware/hardware of the current *real* | ||
253 | * state of the radio rfkill switch. | ||
254 | * | ||
255 | * It may not be called from an atomic context. | ||
256 | */ | ||
257 | int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | ||
258 | { | ||
259 | enum rfkill_state oldstate; | ||
260 | |||
261 | if (state != RFKILL_STATE_SOFT_BLOCKED && | ||
262 | state != RFKILL_STATE_UNBLOCKED && | ||
263 | state != RFKILL_STATE_HARD_BLOCKED) | ||
264 | return -EINVAL; | ||
265 | |||
266 | mutex_lock(&rfkill->mutex); | ||
267 | |||
268 | oldstate = rfkill->state; | ||
269 | rfkill->state = state; | ||
270 | |||
271 | if (state != oldstate) | ||
272 | notify_rfkill_state_change(rfkill); | ||
273 | |||
274 | mutex_unlock(&rfkill->mutex); | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | EXPORT_SYMBOL(rfkill_force_state); | ||
279 | |||
103 | static ssize_t rfkill_name_show(struct device *dev, | 280 | static ssize_t rfkill_name_show(struct device *dev, |
104 | struct device_attribute *attr, | 281 | struct device_attribute *attr, |
105 | char *buf) | 282 | char *buf) |
@@ -109,31 +286,31 @@ static ssize_t rfkill_name_show(struct device *dev, | |||
109 | return sprintf(buf, "%s\n", rfkill->name); | 286 | return sprintf(buf, "%s\n", rfkill->name); |
110 | } | 287 | } |
111 | 288 | ||
112 | static ssize_t rfkill_type_show(struct device *dev, | 289 | static const char *rfkill_get_type_str(enum rfkill_type type) |
113 | struct device_attribute *attr, | ||
114 | char *buf) | ||
115 | { | 290 | { |
116 | struct rfkill *rfkill = to_rfkill(dev); | 291 | switch (type) { |
117 | const char *type; | ||
118 | |||
119 | switch (rfkill->type) { | ||
120 | case RFKILL_TYPE_WLAN: | 292 | case RFKILL_TYPE_WLAN: |
121 | type = "wlan"; | 293 | return "wlan"; |
122 | break; | ||
123 | case RFKILL_TYPE_BLUETOOTH: | 294 | case RFKILL_TYPE_BLUETOOTH: |
124 | type = "bluetooth"; | 295 | return "bluetooth"; |
125 | break; | ||
126 | case RFKILL_TYPE_UWB: | 296 | case RFKILL_TYPE_UWB: |
127 | type = "ultrawideband"; | 297 | return "ultrawideband"; |
128 | break; | ||
129 | case RFKILL_TYPE_WIMAX: | 298 | case RFKILL_TYPE_WIMAX: |
130 | type = "wimax"; | 299 | return "wimax"; |
131 | break; | 300 | case RFKILL_TYPE_WWAN: |
301 | return "wwan"; | ||
132 | default: | 302 | default: |
133 | BUG(); | 303 | BUG(); |
134 | } | 304 | } |
305 | } | ||
306 | |||
307 | static ssize_t rfkill_type_show(struct device *dev, | ||
308 | struct device_attribute *attr, | ||
309 | char *buf) | ||
310 | { | ||
311 | struct rfkill *rfkill = to_rfkill(dev); | ||
135 | 312 | ||
136 | return sprintf(buf, "%s\n", type); | 313 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); |
137 | } | 314 | } |
138 | 315 | ||
139 | static ssize_t rfkill_state_show(struct device *dev, | 316 | static ssize_t rfkill_state_show(struct device *dev, |
@@ -142,6 +319,7 @@ static ssize_t rfkill_state_show(struct device *dev, | |||
142 | { | 319 | { |
143 | struct rfkill *rfkill = to_rfkill(dev); | 320 | struct rfkill *rfkill = to_rfkill(dev); |
144 | 321 | ||
322 | update_rfkill_state(rfkill); | ||
145 | return sprintf(buf, "%d\n", rfkill->state); | 323 | return sprintf(buf, "%d\n", rfkill->state); |
146 | } | 324 | } |
147 | 325 | ||
@@ -156,10 +334,14 @@ static ssize_t rfkill_state_store(struct device *dev, | |||
156 | if (!capable(CAP_NET_ADMIN)) | 334 | if (!capable(CAP_NET_ADMIN)) |
157 | return -EPERM; | 335 | return -EPERM; |
158 | 336 | ||
337 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
338 | if (state != RFKILL_STATE_UNBLOCKED && | ||
339 | state != RFKILL_STATE_SOFT_BLOCKED) | ||
340 | return -EINVAL; | ||
341 | |||
159 | if (mutex_lock_interruptible(&rfkill->mutex)) | 342 | if (mutex_lock_interruptible(&rfkill->mutex)) |
160 | return -ERESTARTSYS; | 343 | return -ERESTARTSYS; |
161 | error = rfkill_toggle_radio(rfkill, | 344 | error = rfkill_toggle_radio(rfkill, state, 0); |
162 | state ? RFKILL_STATE_ON : RFKILL_STATE_OFF); | ||
163 | mutex_unlock(&rfkill->mutex); | 345 | mutex_unlock(&rfkill->mutex); |
164 | 346 | ||
165 | return error ? error : count; | 347 | return error ? error : count; |
@@ -200,7 +382,8 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
200 | if (rfkill->user_claim != claim) { | 382 | if (rfkill->user_claim != claim) { |
201 | if (!claim) | 383 | if (!claim) |
202 | rfkill_toggle_radio(rfkill, | 384 | rfkill_toggle_radio(rfkill, |
203 | rfkill_states[rfkill->type]); | 385 | rfkill_states[rfkill->type], |
386 | 0); | ||
204 | rfkill->user_claim = claim; | 387 | rfkill->user_claim = claim; |
205 | } | 388 | } |
206 | 389 | ||
@@ -233,12 +416,12 @@ static int rfkill_suspend(struct device *dev, pm_message_t state) | |||
233 | 416 | ||
234 | if (dev->power.power_state.event != state.event) { | 417 | if (dev->power.power_state.event != state.event) { |
235 | if (state.event & PM_EVENT_SLEEP) { | 418 | if (state.event & PM_EVENT_SLEEP) { |
236 | mutex_lock(&rfkill->mutex); | 419 | /* Stop transmitter, keep state, no notifies */ |
237 | 420 | update_rfkill_state(rfkill); | |
238 | if (rfkill->state == RFKILL_STATE_ON) | ||
239 | rfkill->toggle_radio(rfkill->data, | ||
240 | RFKILL_STATE_OFF); | ||
241 | 421 | ||
422 | mutex_lock(&rfkill->mutex); | ||
423 | rfkill->toggle_radio(rfkill->data, | ||
424 | RFKILL_STATE_SOFT_BLOCKED); | ||
242 | mutex_unlock(&rfkill->mutex); | 425 | mutex_unlock(&rfkill->mutex); |
243 | } | 426 | } |
244 | 427 | ||
@@ -255,8 +438,8 @@ static int rfkill_resume(struct device *dev) | |||
255 | if (dev->power.power_state.event != PM_EVENT_ON) { | 438 | if (dev->power.power_state.event != PM_EVENT_ON) { |
256 | mutex_lock(&rfkill->mutex); | 439 | mutex_lock(&rfkill->mutex); |
257 | 440 | ||
258 | if (rfkill->state == RFKILL_STATE_ON) | 441 | /* restore radio state AND notify everybody */ |
259 | rfkill->toggle_radio(rfkill->data, RFKILL_STATE_ON); | 442 | rfkill_toggle_radio(rfkill, rfkill->state, 1); |
260 | 443 | ||
261 | mutex_unlock(&rfkill->mutex); | 444 | mutex_unlock(&rfkill->mutex); |
262 | } | 445 | } |
@@ -269,34 +452,71 @@ static int rfkill_resume(struct device *dev) | |||
269 | #define rfkill_resume NULL | 452 | #define rfkill_resume NULL |
270 | #endif | 453 | #endif |
271 | 454 | ||
455 | static int rfkill_blocking_uevent_notifier(struct notifier_block *nb, | ||
456 | unsigned long eventid, | ||
457 | void *data) | ||
458 | { | ||
459 | struct rfkill *rfkill = (struct rfkill *)data; | ||
460 | |||
461 | switch (eventid) { | ||
462 | case RFKILL_STATE_CHANGED: | ||
463 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
464 | break; | ||
465 | default: | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | return NOTIFY_DONE; | ||
470 | } | ||
471 | |||
472 | static struct notifier_block rfkill_blocking_uevent_nb = { | ||
473 | .notifier_call = rfkill_blocking_uevent_notifier, | ||
474 | .priority = 0, | ||
475 | }; | ||
476 | |||
477 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
478 | { | ||
479 | struct rfkill *rfkill = to_rfkill(dev); | ||
480 | int error; | ||
481 | |||
482 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
483 | if (error) | ||
484 | return error; | ||
485 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
486 | rfkill_get_type_str(rfkill->type)); | ||
487 | if (error) | ||
488 | return error; | ||
489 | error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state); | ||
490 | return error; | ||
491 | } | ||
492 | |||
272 | static struct class rfkill_class = { | 493 | static struct class rfkill_class = { |
273 | .name = "rfkill", | 494 | .name = "rfkill", |
274 | .dev_release = rfkill_release, | 495 | .dev_release = rfkill_release, |
275 | .dev_attrs = rfkill_dev_attrs, | 496 | .dev_attrs = rfkill_dev_attrs, |
276 | .suspend = rfkill_suspend, | 497 | .suspend = rfkill_suspend, |
277 | .resume = rfkill_resume, | 498 | .resume = rfkill_resume, |
499 | .dev_uevent = rfkill_dev_uevent, | ||
278 | }; | 500 | }; |
279 | 501 | ||
280 | static int rfkill_add_switch(struct rfkill *rfkill) | 502 | static int rfkill_add_switch(struct rfkill *rfkill) |
281 | { | 503 | { |
282 | int error; | ||
283 | |||
284 | mutex_lock(&rfkill_mutex); | 504 | mutex_lock(&rfkill_mutex); |
285 | 505 | ||
286 | error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); | 506 | rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); |
287 | if (!error) | 507 | |
288 | list_add_tail(&rfkill->node, &rfkill_list); | 508 | list_add_tail(&rfkill->node, &rfkill_list); |
289 | 509 | ||
290 | mutex_unlock(&rfkill_mutex); | 510 | mutex_unlock(&rfkill_mutex); |
291 | 511 | ||
292 | return error; | 512 | return 0; |
293 | } | 513 | } |
294 | 514 | ||
295 | static void rfkill_remove_switch(struct rfkill *rfkill) | 515 | static void rfkill_remove_switch(struct rfkill *rfkill) |
296 | { | 516 | { |
297 | mutex_lock(&rfkill_mutex); | 517 | mutex_lock(&rfkill_mutex); |
298 | list_del_init(&rfkill->node); | 518 | list_del_init(&rfkill->node); |
299 | rfkill_toggle_radio(rfkill, RFKILL_STATE_OFF); | 519 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
300 | mutex_unlock(&rfkill_mutex); | 520 | mutex_unlock(&rfkill_mutex); |
301 | } | 521 | } |
302 | 522 | ||
@@ -412,7 +632,7 @@ int rfkill_register(struct rfkill *rfkill) | |||
412 | EXPORT_SYMBOL(rfkill_register); | 632 | EXPORT_SYMBOL(rfkill_register); |
413 | 633 | ||
414 | /** | 634 | /** |
415 | * rfkill_unregister - Uegister a rfkill structure. | 635 | * rfkill_unregister - Unregister a rfkill structure. |
416 | * @rfkill: rfkill structure to be unregistered | 636 | * @rfkill: rfkill structure to be unregistered |
417 | * | 637 | * |
418 | * This function should be called by the network driver during device | 638 | * This function should be called by the network driver during device |
@@ -436,8 +656,13 @@ static int __init rfkill_init(void) | |||
436 | int error; | 656 | int error; |
437 | int i; | 657 | int i; |
438 | 658 | ||
659 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
660 | if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED && | ||
661 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | ||
662 | return -EINVAL; | ||
663 | |||
439 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) | 664 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) |
440 | rfkill_states[i] = RFKILL_STATE_ON; | 665 | rfkill_states[i] = rfkill_default_state; |
441 | 666 | ||
442 | error = class_register(&rfkill_class); | 667 | error = class_register(&rfkill_class); |
443 | if (error) { | 668 | if (error) { |
@@ -445,11 +670,14 @@ static int __init rfkill_init(void) | |||
445 | return error; | 670 | return error; |
446 | } | 671 | } |
447 | 672 | ||
673 | register_rfkill_notifier(&rfkill_blocking_uevent_nb); | ||
674 | |||
448 | return 0; | 675 | return 0; |
449 | } | 676 | } |
450 | 677 | ||
451 | static void __exit rfkill_exit(void) | 678 | static void __exit rfkill_exit(void) |
452 | { | 679 | { |
680 | unregister_rfkill_notifier(&rfkill_blocking_uevent_nb); | ||
453 | class_unregister(&rfkill_class); | 681 | class_unregister(&rfkill_class); |
454 | } | 682 | } |
455 | 683 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 1ebf65294405..dbc963b4f5fb 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -75,6 +75,18 @@ ax25_address rose_callsign; | |||
75 | */ | 75 | */ |
76 | static struct lock_class_key rose_netdev_xmit_lock_key; | 76 | static struct lock_class_key rose_netdev_xmit_lock_key; |
77 | 77 | ||
78 | static void rose_set_lockdep_one(struct net_device *dev, | ||
79 | struct netdev_queue *txq, | ||
80 | void *_unused) | ||
81 | { | ||
82 | lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); | ||
83 | } | ||
84 | |||
85 | static void rose_set_lockdep_key(struct net_device *dev) | ||
86 | { | ||
87 | netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); | ||
88 | } | ||
89 | |||
78 | /* | 90 | /* |
79 | * Convert a ROSE address into text. | 91 | * Convert a ROSE address into text. |
80 | */ | 92 | */ |
@@ -197,7 +209,7 @@ static int rose_device_event(struct notifier_block *this, unsigned long event, | |||
197 | { | 209 | { |
198 | struct net_device *dev = (struct net_device *)ptr; | 210 | struct net_device *dev = (struct net_device *)ptr; |
199 | 211 | ||
200 | if (dev_net(dev) != &init_net) | 212 | if (!net_eq(dev_net(dev), &init_net)) |
201 | return NOTIFY_DONE; | 213 | return NOTIFY_DONE; |
202 | 214 | ||
203 | if (event != NETDEV_DOWN) | 215 | if (event != NETDEV_DOWN) |
@@ -566,13 +578,11 @@ static struct sock *rose_make_new(struct sock *osk) | |||
566 | #endif | 578 | #endif |
567 | 579 | ||
568 | sk->sk_type = osk->sk_type; | 580 | sk->sk_type = osk->sk_type; |
569 | sk->sk_socket = osk->sk_socket; | ||
570 | sk->sk_priority = osk->sk_priority; | 581 | sk->sk_priority = osk->sk_priority; |
571 | sk->sk_protocol = osk->sk_protocol; | 582 | sk->sk_protocol = osk->sk_protocol; |
572 | sk->sk_rcvbuf = osk->sk_rcvbuf; | 583 | sk->sk_rcvbuf = osk->sk_rcvbuf; |
573 | sk->sk_sndbuf = osk->sk_sndbuf; | 584 | sk->sk_sndbuf = osk->sk_sndbuf; |
574 | sk->sk_state = TCP_ESTABLISHED; | 585 | sk->sk_state = TCP_ESTABLISHED; |
575 | sk->sk_sleep = osk->sk_sleep; | ||
576 | sock_copy_flags(sk, osk); | 586 | sock_copy_flags(sk, osk); |
577 | 587 | ||
578 | init_timer(&rose->timer); | 588 | init_timer(&rose->timer); |
@@ -759,7 +769,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
759 | sock->state = SS_UNCONNECTED; | 769 | sock->state = SS_UNCONNECTED; |
760 | 770 | ||
761 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, | 771 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, |
762 | &diagnostic); | 772 | &diagnostic, 0); |
763 | if (!rose->neighbour) { | 773 | if (!rose->neighbour) { |
764 | err = -ENETUNREACH; | 774 | err = -ENETUNREACH; |
765 | goto out_release; | 775 | goto out_release; |
@@ -855,7 +865,7 @@ rose_try_next_neigh: | |||
855 | 865 | ||
856 | if (sk->sk_state != TCP_ESTABLISHED) { | 866 | if (sk->sk_state != TCP_ESTABLISHED) { |
857 | /* Try next neighbour */ | 867 | /* Try next neighbour */ |
858 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); | 868 | rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); |
859 | if (rose->neighbour) | 869 | if (rose->neighbour) |
860 | goto rose_try_next_neigh; | 870 | goto rose_try_next_neigh; |
861 | 871 | ||
@@ -924,14 +934,12 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) | |||
924 | goto out_release; | 934 | goto out_release; |
925 | 935 | ||
926 | newsk = skb->sk; | 936 | newsk = skb->sk; |
927 | newsk->sk_socket = newsock; | 937 | sock_graft(newsk, newsock); |
928 | newsk->sk_sleep = &newsock->wait; | ||
929 | 938 | ||
930 | /* Now attach up the new socket */ | 939 | /* Now attach up the new socket */ |
931 | skb->sk = NULL; | 940 | skb->sk = NULL; |
932 | kfree_skb(skb); | 941 | kfree_skb(skb); |
933 | sk->sk_ack_backlog--; | 942 | sk->sk_ack_backlog--; |
934 | newsock->sk = newsk; | ||
935 | 943 | ||
936 | out_release: | 944 | out_release: |
937 | release_sock(sk); | 945 | release_sock(sk); |
@@ -1580,7 +1588,7 @@ static int __init rose_proto_init(void) | |||
1580 | free_netdev(dev); | 1588 | free_netdev(dev); |
1581 | goto fail; | 1589 | goto fail; |
1582 | } | 1590 | } |
1583 | lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); | 1591 | rose_set_lockdep_key(dev); |
1584 | dev_rose[i] = dev; | 1592 | dev_rose[i] = dev; |
1585 | } | 1593 | } |
1586 | 1594 | ||
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index bd593871c81e..a81066a1010a 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -662,27 +662,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig | |||
662 | } | 662 | } |
663 | 663 | ||
664 | /* | 664 | /* |
665 | * Find a neighbour given a ROSE address. | 665 | * Find a neighbour or a route given a ROSE address. |
666 | */ | 666 | */ |
667 | struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, | 667 | struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, |
668 | unsigned char *diagnostic) | 668 | unsigned char *diagnostic, int new) |
669 | { | 669 | { |
670 | struct rose_neigh *res = NULL; | 670 | struct rose_neigh *res = NULL; |
671 | struct rose_node *node; | 671 | struct rose_node *node; |
672 | int failed = 0; | 672 | int failed = 0; |
673 | int i; | 673 | int i; |
674 | 674 | ||
675 | spin_lock_bh(&rose_node_list_lock); | 675 | if (!new) spin_lock_bh(&rose_node_list_lock); |
676 | for (node = rose_node_list; node != NULL; node = node->next) { | 676 | for (node = rose_node_list; node != NULL; node = node->next) { |
677 | if (rosecmpm(addr, &node->address, node->mask) == 0) { | 677 | if (rosecmpm(addr, &node->address, node->mask) == 0) { |
678 | for (i = 0; i < node->count; i++) { | 678 | for (i = 0; i < node->count; i++) { |
679 | if (!rose_ftimer_running(node->neighbour[i])) { | 679 | if (new) { |
680 | res = node->neighbour[i]; | 680 | if (node->neighbour[i]->restarted) { |
681 | goto out; | 681 | res = node->neighbour[i]; |
682 | } else | 682 | goto out; |
683 | failed = 1; | 683 | } |
684 | } | ||
685 | else { | ||
686 | if (!rose_ftimer_running(node->neighbour[i])) { | ||
687 | res = node->neighbour[i]; | ||
688 | goto out; | ||
689 | } else | ||
690 | failed = 1; | ||
691 | } | ||
684 | } | 692 | } |
685 | break; | ||
686 | } | 693 | } |
687 | } | 694 | } |
688 | 695 | ||
@@ -695,7 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, | |||
695 | } | 702 | } |
696 | 703 | ||
697 | out: | 704 | out: |
698 | spin_unlock_bh(&rose_node_list_lock); | 705 | if (!new) spin_unlock_bh(&rose_node_list_lock); |
699 | 706 | ||
700 | return res; | 707 | return res; |
701 | } | 708 | } |
@@ -1018,7 +1025,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
1018 | rose_route = rose_route->next; | 1025 | rose_route = rose_route->next; |
1019 | } | 1026 | } |
1020 | 1027 | ||
1021 | if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic)) == NULL) { | 1028 | if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) { |
1022 | rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic); | 1029 | rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic); |
1023 | goto out; | 1030 | goto out; |
1024 | } | 1031 | } |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index f8a699e92962..f98c8027e5c1 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <net/af_rxrpc.h> | 21 | #include <net/af_rxrpc.h> |
22 | #include <net/ip.h> | 22 | #include <net/ip.h> |
23 | #include <net/udp.h> | 23 | #include <net/udp.h> |
24 | #include <net/net_namespace.h> | ||
24 | #include "ar-internal.h" | 25 | #include "ar-internal.h" |
25 | 26 | ||
26 | unsigned long rxrpc_ack_timeout = 1; | 27 | unsigned long rxrpc_ack_timeout = 1; |
@@ -708,12 +709,12 @@ void rxrpc_data_ready(struct sock *sk, int count) | |||
708 | if (skb_checksum_complete(skb)) { | 709 | if (skb_checksum_complete(skb)) { |
709 | rxrpc_free_skb(skb); | 710 | rxrpc_free_skb(skb); |
710 | rxrpc_put_local(local); | 711 | rxrpc_put_local(local); |
711 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0); | 712 | UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0); |
712 | _leave(" [CSUM failed]"); | 713 | _leave(" [CSUM failed]"); |
713 | return; | 714 | return; |
714 | } | 715 | } |
715 | 716 | ||
716 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0); | 717 | UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0); |
717 | 718 | ||
718 | /* the socket buffer we have is owned by UDP, with UDP's data all over | 719 | /* the socket buffer we have is owned by UDP, with UDP's data all over |
719 | * it, but we really want our own */ | 720 | * it, but we really want our own */ |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 422872c4f14b..ac04289da5d7 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -139,7 +139,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
139 | #else | 139 | #else |
140 | action = gact->tcf_action; | 140 | action = gact->tcf_action; |
141 | #endif | 141 | #endif |
142 | gact->tcf_bstats.bytes += skb->len; | 142 | gact->tcf_bstats.bytes += qdisc_pkt_len(skb); |
143 | gact->tcf_bstats.packets++; | 143 | gact->tcf_bstats.packets++; |
144 | if (action == TC_ACT_SHOT) | 144 | if (action == TC_ACT_SHOT) |
145 | gact->tcf_qstats.drops++; | 145 | gact->tcf_qstats.drops++; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index da696fd3e341..d1263b3c96c3 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -205,7 +205,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
205 | spin_lock(&ipt->tcf_lock); | 205 | spin_lock(&ipt->tcf_lock); |
206 | 206 | ||
207 | ipt->tcf_tm.lastuse = jiffies; | 207 | ipt->tcf_tm.lastuse = jiffies; |
208 | ipt->tcf_bstats.bytes += skb->len; | 208 | ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); |
209 | ipt->tcf_bstats.packets++; | 209 | ipt->tcf_bstats.packets++; |
210 | 210 | ||
211 | /* yes, we have to worry about both in and out dev | 211 | /* yes, we have to worry about both in and out dev |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 1aff005d95cd..70341c020b6d 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -164,7 +164,7 @@ bad_mirred: | |||
164 | if (skb2 != NULL) | 164 | if (skb2 != NULL) |
165 | kfree_skb(skb2); | 165 | kfree_skb(skb2); |
166 | m->tcf_qstats.overlimits++; | 166 | m->tcf_qstats.overlimits++; |
167 | m->tcf_bstats.bytes += skb->len; | 167 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); |
168 | m->tcf_bstats.packets++; | 168 | m->tcf_bstats.packets++; |
169 | spin_unlock(&m->tcf_lock); | 169 | spin_unlock(&m->tcf_lock); |
170 | /* should we be asking for packet to be dropped? | 170 | /* should we be asking for packet to be dropped? |
@@ -184,7 +184,7 @@ bad_mirred: | |||
184 | goto bad_mirred; | 184 | goto bad_mirred; |
185 | } | 185 | } |
186 | 186 | ||
187 | m->tcf_bstats.bytes += skb2->len; | 187 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); |
188 | m->tcf_bstats.packets++; | 188 | m->tcf_bstats.packets++; |
189 | if (!(at & AT_EGRESS)) | 189 | if (!(at & AT_EGRESS)) |
190 | if (m->tcfm_ok_push) | 190 | if (m->tcfm_ok_push) |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 0a3c8339767a..7b39ed485bca 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -124,7 +124,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
124 | egress = p->flags & TCA_NAT_FLAG_EGRESS; | 124 | egress = p->flags & TCA_NAT_FLAG_EGRESS; |
125 | action = p->tcf_action; | 125 | action = p->tcf_action; |
126 | 126 | ||
127 | p->tcf_bstats.bytes += skb->len; | 127 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); |
128 | p->tcf_bstats.packets++; | 128 | p->tcf_bstats.packets++; |
129 | 129 | ||
130 | spin_unlock(&p->tcf_lock); | 130 | spin_unlock(&p->tcf_lock); |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3cc4cb9e500e..d5f4e3404864 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -182,7 +182,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
182 | bad: | 182 | bad: |
183 | p->tcf_qstats.overlimits++; | 183 | p->tcf_qstats.overlimits++; |
184 | done: | 184 | done: |
185 | p->tcf_bstats.bytes += skb->len; | 185 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); |
186 | p->tcf_bstats.packets++; | 186 | p->tcf_bstats.packets++; |
187 | spin_unlock(&p->tcf_lock); | 187 | spin_unlock(&p->tcf_lock); |
188 | return p->tcf_action; | 188 | return p->tcf_action; |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0898120bbcc0..32c3f9d9fb7a 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -272,7 +272,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
272 | 272 | ||
273 | spin_lock(&police->tcf_lock); | 273 | spin_lock(&police->tcf_lock); |
274 | 274 | ||
275 | police->tcf_bstats.bytes += skb->len; | 275 | police->tcf_bstats.bytes += qdisc_pkt_len(skb); |
276 | police->tcf_bstats.packets++; | 276 | police->tcf_bstats.packets++; |
277 | 277 | ||
278 | if (police->tcfp_ewma_rate && | 278 | if (police->tcfp_ewma_rate && |
@@ -282,7 +282,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
282 | return police->tcf_action; | 282 | return police->tcf_action; |
283 | } | 283 | } |
284 | 284 | ||
285 | if (skb->len <= police->tcfp_mtu) { | 285 | if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { |
286 | if (police->tcfp_R_tab == NULL) { | 286 | if (police->tcfp_R_tab == NULL) { |
287 | spin_unlock(&police->tcf_lock); | 287 | spin_unlock(&police->tcf_lock); |
288 | return police->tcfp_result; | 288 | return police->tcfp_result; |
@@ -295,12 +295,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
295 | ptoks = toks + police->tcfp_ptoks; | 295 | ptoks = toks + police->tcfp_ptoks; |
296 | if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) | 296 | if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) |
297 | ptoks = (long)L2T_P(police, police->tcfp_mtu); | 297 | ptoks = (long)L2T_P(police, police->tcfp_mtu); |
298 | ptoks -= L2T_P(police, skb->len); | 298 | ptoks -= L2T_P(police, qdisc_pkt_len(skb)); |
299 | } | 299 | } |
300 | toks += police->tcfp_toks; | 300 | toks += police->tcfp_toks; |
301 | if (toks > (long)police->tcfp_burst) | 301 | if (toks > (long)police->tcfp_burst) |
302 | toks = police->tcfp_burst; | 302 | toks = police->tcfp_burst; |
303 | toks -= L2T(police, skb->len); | 303 | toks -= L2T(police, qdisc_pkt_len(skb)); |
304 | if ((toks|ptoks) >= 0) { | 304 | if ((toks|ptoks) >= 0) { |
305 | police->tcfp_t_c = now; | 305 | police->tcfp_t_c = now; |
306 | police->tcfp_toks = toks; | 306 | police->tcfp_toks = toks; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 1d421d059caf..e7851ce92cfe 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -41,7 +41,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
41 | 41 | ||
42 | spin_lock(&d->tcf_lock); | 42 | spin_lock(&d->tcf_lock); |
43 | d->tcf_tm.lastuse = jiffies; | 43 | d->tcf_tm.lastuse = jiffies; |
44 | d->tcf_bstats.bytes += skb->len; | 44 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); |
45 | d->tcf_bstats.packets++; | 45 | d->tcf_bstats.packets++; |
46 | 46 | ||
47 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9360fc81e8c7..d2b6f54a6261 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -120,6 +120,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
120 | { | 120 | { |
121 | struct net *net = sock_net(skb->sk); | 121 | struct net *net = sock_net(skb->sk); |
122 | struct nlattr *tca[TCA_MAX + 1]; | 122 | struct nlattr *tca[TCA_MAX + 1]; |
123 | spinlock_t *root_lock; | ||
123 | struct tcmsg *t; | 124 | struct tcmsg *t; |
124 | u32 protocol; | 125 | u32 protocol; |
125 | u32 prio; | 126 | u32 prio; |
@@ -166,7 +167,8 @@ replay: | |||
166 | 167 | ||
167 | /* Find qdisc */ | 168 | /* Find qdisc */ |
168 | if (!parent) { | 169 | if (!parent) { |
169 | q = dev->qdisc_sleeping; | 170 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
171 | q = dev_queue->qdisc_sleeping; | ||
170 | parent = q->handle; | 172 | parent = q->handle; |
171 | } else { | 173 | } else { |
172 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); | 174 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); |
@@ -203,6 +205,8 @@ replay: | |||
203 | } | 205 | } |
204 | } | 206 | } |
205 | 207 | ||
208 | root_lock = qdisc_root_lock(q); | ||
209 | |||
206 | if (tp == NULL) { | 210 | if (tp == NULL) { |
207 | /* Proto-tcf does not exist, create new one */ | 211 | /* Proto-tcf does not exist, create new one */ |
208 | 212 | ||
@@ -262,10 +266,10 @@ replay: | |||
262 | goto errout; | 266 | goto errout; |
263 | } | 267 | } |
264 | 268 | ||
265 | qdisc_lock_tree(dev); | 269 | spin_lock_bh(root_lock); |
266 | tp->next = *back; | 270 | tp->next = *back; |
267 | *back = tp; | 271 | *back = tp; |
268 | qdisc_unlock_tree(dev); | 272 | spin_unlock_bh(root_lock); |
269 | 273 | ||
270 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) | 274 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) |
271 | goto errout; | 275 | goto errout; |
@@ -274,9 +278,9 @@ replay: | |||
274 | 278 | ||
275 | if (fh == 0) { | 279 | if (fh == 0) { |
276 | if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { | 280 | if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { |
277 | qdisc_lock_tree(dev); | 281 | spin_lock_bh(root_lock); |
278 | *back = tp->next; | 282 | *back = tp->next; |
279 | qdisc_unlock_tree(dev); | 283 | spin_lock_bh(root_lock); |
280 | 284 | ||
281 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 285 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); |
282 | tcf_destroy(tp); | 286 | tcf_destroy(tp); |
@@ -334,7 +338,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, | |||
334 | tcm->tcm_family = AF_UNSPEC; | 338 | tcm->tcm_family = AF_UNSPEC; |
335 | tcm->tcm__pad1 = 0; | 339 | tcm->tcm__pad1 = 0; |
336 | tcm->tcm__pad1 = 0; | 340 | tcm->tcm__pad1 = 0; |
337 | tcm->tcm_ifindex = tp->q->dev->ifindex; | 341 | tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; |
338 | tcm->tcm_parent = tp->classid; | 342 | tcm->tcm_parent = tp->classid; |
339 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); | 343 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
340 | NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); | 344 | NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); |
@@ -390,6 +394,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, | |||
390 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | 394 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
391 | { | 395 | { |
392 | struct net *net = sock_net(skb->sk); | 396 | struct net *net = sock_net(skb->sk); |
397 | struct netdev_queue *dev_queue; | ||
393 | int t; | 398 | int t; |
394 | int s_t; | 399 | int s_t; |
395 | struct net_device *dev; | 400 | struct net_device *dev; |
@@ -408,8 +413,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
408 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 413 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
409 | return skb->len; | 414 | return skb->len; |
410 | 415 | ||
416 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
411 | if (!tcm->tcm_parent) | 417 | if (!tcm->tcm_parent) |
412 | q = dev->qdisc_sleeping; | 418 | q = dev_queue->qdisc_sleeping; |
413 | else | 419 | else |
414 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); | 420 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
415 | if (!q) | 421 | if (!q) |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 971b867e0484..8f63a1a94014 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -36,6 +36,8 @@ struct flow_filter { | |||
36 | struct list_head list; | 36 | struct list_head list; |
37 | struct tcf_exts exts; | 37 | struct tcf_exts exts; |
38 | struct tcf_ematch_tree ematches; | 38 | struct tcf_ematch_tree ematches; |
39 | struct timer_list perturb_timer; | ||
40 | u32 perturb_period; | ||
39 | u32 handle; | 41 | u32 handle; |
40 | 42 | ||
41 | u32 nkeys; | 43 | u32 nkeys; |
@@ -47,11 +49,9 @@ struct flow_filter { | |||
47 | u32 addend; | 49 | u32 addend; |
48 | u32 divisor; | 50 | u32 divisor; |
49 | u32 baseclass; | 51 | u32 baseclass; |
52 | u32 hashrnd; | ||
50 | }; | 53 | }; |
51 | 54 | ||
52 | static u32 flow_hashrnd __read_mostly; | ||
53 | static int flow_hashrnd_initted __read_mostly; | ||
54 | |||
55 | static const struct tcf_ext_map flow_ext_map = { | 55 | static const struct tcf_ext_map flow_ext_map = { |
56 | .action = TCA_FLOW_ACT, | 56 | .action = TCA_FLOW_ACT, |
57 | .police = TCA_FLOW_POLICE, | 57 | .police = TCA_FLOW_POLICE, |
@@ -348,7 +348,7 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
348 | } | 348 | } |
349 | 349 | ||
350 | if (f->mode == FLOW_MODE_HASH) | 350 | if (f->mode == FLOW_MODE_HASH) |
351 | classid = jhash2(keys, f->nkeys, flow_hashrnd); | 351 | classid = jhash2(keys, f->nkeys, f->hashrnd); |
352 | else { | 352 | else { |
353 | classid = keys[0]; | 353 | classid = keys[0]; |
354 | classid = (classid & f->mask) ^ f->xor; | 354 | classid = (classid & f->mask) ^ f->xor; |
@@ -369,6 +369,15 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
369 | return -1; | 369 | return -1; |
370 | } | 370 | } |
371 | 371 | ||
372 | static void flow_perturbation(unsigned long arg) | ||
373 | { | ||
374 | struct flow_filter *f = (struct flow_filter *)arg; | ||
375 | |||
376 | get_random_bytes(&f->hashrnd, 4); | ||
377 | if (f->perturb_period) | ||
378 | mod_timer(&f->perturb_timer, jiffies + f->perturb_period); | ||
379 | } | ||
380 | |||
372 | static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { | 381 | static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { |
373 | [TCA_FLOW_KEYS] = { .type = NLA_U32 }, | 382 | [TCA_FLOW_KEYS] = { .type = NLA_U32 }, |
374 | [TCA_FLOW_MODE] = { .type = NLA_U32 }, | 383 | [TCA_FLOW_MODE] = { .type = NLA_U32 }, |
@@ -381,6 +390,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { | |||
381 | [TCA_FLOW_ACT] = { .type = NLA_NESTED }, | 390 | [TCA_FLOW_ACT] = { .type = NLA_NESTED }, |
382 | [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, | 391 | [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, |
383 | [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, | 392 | [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, |
393 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, | ||
384 | }; | 394 | }; |
385 | 395 | ||
386 | static int flow_change(struct tcf_proto *tp, unsigned long base, | 396 | static int flow_change(struct tcf_proto *tp, unsigned long base, |
@@ -394,6 +404,7 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
394 | struct tcf_exts e; | 404 | struct tcf_exts e; |
395 | struct tcf_ematch_tree t; | 405 | struct tcf_ematch_tree t; |
396 | unsigned int nkeys = 0; | 406 | unsigned int nkeys = 0; |
407 | unsigned int perturb_period = 0; | ||
397 | u32 baseclass = 0; | 408 | u32 baseclass = 0; |
398 | u32 keymask = 0; | 409 | u32 keymask = 0; |
399 | u32 mode; | 410 | u32 mode; |
@@ -442,6 +453,14 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
442 | mode = nla_get_u32(tb[TCA_FLOW_MODE]); | 453 | mode = nla_get_u32(tb[TCA_FLOW_MODE]); |
443 | if (mode != FLOW_MODE_HASH && nkeys > 1) | 454 | if (mode != FLOW_MODE_HASH && nkeys > 1) |
444 | goto err2; | 455 | goto err2; |
456 | |||
457 | if (mode == FLOW_MODE_HASH) | ||
458 | perturb_period = f->perturb_period; | ||
459 | if (tb[TCA_FLOW_PERTURB]) { | ||
460 | if (mode != FLOW_MODE_HASH) | ||
461 | goto err2; | ||
462 | perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; | ||
463 | } | ||
445 | } else { | 464 | } else { |
446 | err = -EINVAL; | 465 | err = -EINVAL; |
447 | if (!handle) | 466 | if (!handle) |
@@ -455,6 +474,12 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
455 | if (mode != FLOW_MODE_HASH && nkeys > 1) | 474 | if (mode != FLOW_MODE_HASH && nkeys > 1) |
456 | goto err2; | 475 | goto err2; |
457 | 476 | ||
477 | if (tb[TCA_FLOW_PERTURB]) { | ||
478 | if (mode != FLOW_MODE_HASH) | ||
479 | goto err2; | ||
480 | perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; | ||
481 | } | ||
482 | |||
458 | if (TC_H_MAJ(baseclass) == 0) | 483 | if (TC_H_MAJ(baseclass) == 0) |
459 | baseclass = TC_H_MAKE(tp->q->handle, baseclass); | 484 | baseclass = TC_H_MAKE(tp->q->handle, baseclass); |
460 | if (TC_H_MIN(baseclass) == 0) | 485 | if (TC_H_MIN(baseclass) == 0) |
@@ -467,6 +492,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
467 | 492 | ||
468 | f->handle = handle; | 493 | f->handle = handle; |
469 | f->mask = ~0U; | 494 | f->mask = ~0U; |
495 | |||
496 | get_random_bytes(&f->hashrnd, 4); | ||
497 | f->perturb_timer.function = flow_perturbation; | ||
498 | f->perturb_timer.data = (unsigned long)f; | ||
499 | init_timer_deferrable(&f->perturb_timer); | ||
470 | } | 500 | } |
471 | 501 | ||
472 | tcf_exts_change(tp, &f->exts, &e); | 502 | tcf_exts_change(tp, &f->exts, &e); |
@@ -495,6 +525,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
495 | if (baseclass) | 525 | if (baseclass) |
496 | f->baseclass = baseclass; | 526 | f->baseclass = baseclass; |
497 | 527 | ||
528 | f->perturb_period = perturb_period; | ||
529 | del_timer(&f->perturb_timer); | ||
530 | if (perturb_period) | ||
531 | mod_timer(&f->perturb_timer, jiffies + perturb_period); | ||
532 | |||
498 | if (*arg == 0) | 533 | if (*arg == 0) |
499 | list_add_tail(&f->list, &head->filters); | 534 | list_add_tail(&f->list, &head->filters); |
500 | 535 | ||
@@ -512,6 +547,7 @@ err1: | |||
512 | 547 | ||
513 | static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) | 548 | static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) |
514 | { | 549 | { |
550 | del_timer_sync(&f->perturb_timer); | ||
515 | tcf_exts_destroy(tp, &f->exts); | 551 | tcf_exts_destroy(tp, &f->exts); |
516 | tcf_em_tree_destroy(tp, &f->ematches); | 552 | tcf_em_tree_destroy(tp, &f->ematches); |
517 | kfree(f); | 553 | kfree(f); |
@@ -532,11 +568,6 @@ static int flow_init(struct tcf_proto *tp) | |||
532 | { | 568 | { |
533 | struct flow_head *head; | 569 | struct flow_head *head; |
534 | 570 | ||
535 | if (!flow_hashrnd_initted) { | ||
536 | get_random_bytes(&flow_hashrnd, 4); | ||
537 | flow_hashrnd_initted = 1; | ||
538 | } | ||
539 | |||
540 | head = kzalloc(sizeof(*head), GFP_KERNEL); | 571 | head = kzalloc(sizeof(*head), GFP_KERNEL); |
541 | if (head == NULL) | 572 | if (head == NULL) |
542 | return -ENOBUFS; | 573 | return -ENOBUFS; |
@@ -605,6 +636,9 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh, | |||
605 | if (f->baseclass) | 636 | if (f->baseclass) |
606 | NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); | 637 | NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); |
607 | 638 | ||
639 | if (f->perturb_period) | ||
640 | NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); | ||
641 | |||
608 | if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) | 642 | if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) |
609 | goto nla_put_failure; | 643 | goto nla_put_failure; |
610 | #ifdef CONFIG_NET_EMATCH | 644 | #ifdef CONFIG_NET_EMATCH |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 784dcb870b98..481260a4f10f 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -73,11 +73,13 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | static inline | 75 | static inline |
76 | void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) | 76 | void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) |
77 | { | 77 | { |
78 | qdisc_lock_tree(dev); | 78 | spinlock_t *root_lock = qdisc_root_lock(q); |
79 | |||
80 | spin_lock_bh(root_lock); | ||
79 | memset(head->fastmap, 0, sizeof(head->fastmap)); | 81 | memset(head->fastmap, 0, sizeof(head->fastmap)); |
80 | qdisc_unlock_tree(dev); | 82 | spin_unlock_bh(root_lock); |
81 | } | 83 | } |
82 | 84 | ||
83 | static inline void | 85 | static inline void |
@@ -302,7 +304,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) | |||
302 | *fp = f->next; | 304 | *fp = f->next; |
303 | tcf_tree_unlock(tp); | 305 | tcf_tree_unlock(tp); |
304 | 306 | ||
305 | route4_reset_fastmap(tp->q->dev, head, f->id); | 307 | route4_reset_fastmap(tp->q, head, f->id); |
306 | route4_delete_filter(tp, f); | 308 | route4_delete_filter(tp, f); |
307 | 309 | ||
308 | /* Strip tree */ | 310 | /* Strip tree */ |
@@ -500,7 +502,7 @@ reinsert: | |||
500 | } | 502 | } |
501 | tcf_tree_unlock(tp); | 503 | tcf_tree_unlock(tp); |
502 | 504 | ||
503 | route4_reset_fastmap(tp->q->dev, head, f->id); | 505 | route4_reset_fastmap(tp->q, head, f->id); |
504 | *arg = (unsigned long)f; | 506 | *arg = (unsigned long)f; |
505 | return 0; | 507 | return 0; |
506 | 508 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4d755444c449..527db2559dd2 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -75,7 +75,6 @@ struct tc_u_hnode | |||
75 | 75 | ||
76 | struct tc_u_common | 76 | struct tc_u_common |
77 | { | 77 | { |
78 | struct tc_u_common *next; | ||
79 | struct tc_u_hnode *hlist; | 78 | struct tc_u_hnode *hlist; |
80 | struct Qdisc *q; | 79 | struct Qdisc *q; |
81 | int refcnt; | 80 | int refcnt; |
@@ -87,8 +86,6 @@ static const struct tcf_ext_map u32_ext_map = { | |||
87 | .police = TCA_U32_POLICE | 86 | .police = TCA_U32_POLICE |
88 | }; | 87 | }; |
89 | 88 | ||
90 | static struct tc_u_common *u32_list; | ||
91 | |||
92 | static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) | 89 | static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) |
93 | { | 90 | { |
94 | unsigned h = ntohl(key & sel->hmask)>>fshift; | 91 | unsigned h = ntohl(key & sel->hmask)>>fshift; |
@@ -287,9 +284,7 @@ static int u32_init(struct tcf_proto *tp) | |||
287 | struct tc_u_hnode *root_ht; | 284 | struct tc_u_hnode *root_ht; |
288 | struct tc_u_common *tp_c; | 285 | struct tc_u_common *tp_c; |
289 | 286 | ||
290 | for (tp_c = u32_list; tp_c; tp_c = tp_c->next) | 287 | tp_c = tp->q->u32_node; |
291 | if (tp_c->q == tp->q) | ||
292 | break; | ||
293 | 288 | ||
294 | root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); | 289 | root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); |
295 | if (root_ht == NULL) | 290 | if (root_ht == NULL) |
@@ -307,8 +302,7 @@ static int u32_init(struct tcf_proto *tp) | |||
307 | return -ENOBUFS; | 302 | return -ENOBUFS; |
308 | } | 303 | } |
309 | tp_c->q = tp->q; | 304 | tp_c->q = tp->q; |
310 | tp_c->next = u32_list; | 305 | tp->q->u32_node = tp_c; |
311 | u32_list = tp_c; | ||
312 | } | 306 | } |
313 | 307 | ||
314 | tp_c->refcnt++; | 308 | tp_c->refcnt++; |
@@ -402,14 +396,8 @@ static void u32_destroy(struct tcf_proto *tp) | |||
402 | 396 | ||
403 | if (--tp_c->refcnt == 0) { | 397 | if (--tp_c->refcnt == 0) { |
404 | struct tc_u_hnode *ht; | 398 | struct tc_u_hnode *ht; |
405 | struct tc_u_common **tp_cp; | ||
406 | 399 | ||
407 | for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) { | 400 | tp->q->u32_node = NULL; |
408 | if (*tp_cp == tp_c) { | ||
409 | *tp_cp = tp_c->next; | ||
410 | break; | ||
411 | } | ||
412 | } | ||
413 | 401 | ||
414 | for (ht = tp_c->hlist; ht; ht = ht->next) { | 402 | for (ht = tp_c->hlist; ht; ht = ht->next) { |
415 | ht->refcnt--; | 403 | ht->refcnt--; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 10f01ad04380..5219d5f9d754 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -99,7 +99,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
99 | ---requeue | 99 | ---requeue |
100 | 100 | ||
101 | requeues once dequeued packet. It is used for non-standard or | 101 | requeues once dequeued packet. It is used for non-standard or |
102 | just buggy devices, which can defer output even if dev->tbusy=0. | 102 | just buggy devices, which can defer output even if netif_queue_stopped()=0. |
103 | 103 | ||
104 | ---reset | 104 | ---reset |
105 | 105 | ||
@@ -185,11 +185,20 @@ EXPORT_SYMBOL(unregister_qdisc); | |||
185 | 185 | ||
186 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 186 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
187 | { | 187 | { |
188 | struct Qdisc *q; | 188 | unsigned int i; |
189 | |||
190 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
191 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
192 | struct Qdisc *q, *txq_root = txq->qdisc; | ||
193 | |||
194 | if (!(txq_root->flags & TCQ_F_BUILTIN) && | ||
195 | txq_root->handle == handle) | ||
196 | return txq_root; | ||
189 | 197 | ||
190 | list_for_each_entry(q, &dev->qdisc_list, list) { | 198 | list_for_each_entry(q, &txq_root->list, list) { |
191 | if (q->handle == handle) | 199 | if (q->handle == handle) |
192 | return q; | 200 | return q; |
201 | } | ||
193 | } | 202 | } |
194 | return NULL; | 203 | return NULL; |
195 | } | 204 | } |
@@ -277,15 +286,137 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) | |||
277 | } | 286 | } |
278 | EXPORT_SYMBOL(qdisc_put_rtab); | 287 | EXPORT_SYMBOL(qdisc_put_rtab); |
279 | 288 | ||
289 | static LIST_HEAD(qdisc_stab_list); | ||
290 | static DEFINE_SPINLOCK(qdisc_stab_lock); | ||
291 | |||
292 | static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { | ||
293 | [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, | ||
294 | [TCA_STAB_DATA] = { .type = NLA_BINARY }, | ||
295 | }; | ||
296 | |||
297 | static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) | ||
298 | { | ||
299 | struct nlattr *tb[TCA_STAB_MAX + 1]; | ||
300 | struct qdisc_size_table *stab; | ||
301 | struct tc_sizespec *s; | ||
302 | unsigned int tsize = 0; | ||
303 | u16 *tab = NULL; | ||
304 | int err; | ||
305 | |||
306 | err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy); | ||
307 | if (err < 0) | ||
308 | return ERR_PTR(err); | ||
309 | if (!tb[TCA_STAB_BASE]) | ||
310 | return ERR_PTR(-EINVAL); | ||
311 | |||
312 | s = nla_data(tb[TCA_STAB_BASE]); | ||
313 | |||
314 | if (s->tsize > 0) { | ||
315 | if (!tb[TCA_STAB_DATA]) | ||
316 | return ERR_PTR(-EINVAL); | ||
317 | tab = nla_data(tb[TCA_STAB_DATA]); | ||
318 | tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); | ||
319 | } | ||
320 | |||
321 | if (!s || tsize != s->tsize || (!tab && tsize > 0)) | ||
322 | return ERR_PTR(-EINVAL); | ||
323 | |||
324 | spin_lock(&qdisc_stab_lock); | ||
325 | |||
326 | list_for_each_entry(stab, &qdisc_stab_list, list) { | ||
327 | if (memcmp(&stab->szopts, s, sizeof(*s))) | ||
328 | continue; | ||
329 | if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) | ||
330 | continue; | ||
331 | stab->refcnt++; | ||
332 | spin_unlock(&qdisc_stab_lock); | ||
333 | return stab; | ||
334 | } | ||
335 | |||
336 | spin_unlock(&qdisc_stab_lock); | ||
337 | |||
338 | stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); | ||
339 | if (!stab) | ||
340 | return ERR_PTR(-ENOMEM); | ||
341 | |||
342 | stab->refcnt = 1; | ||
343 | stab->szopts = *s; | ||
344 | if (tsize > 0) | ||
345 | memcpy(stab->data, tab, tsize * sizeof(u16)); | ||
346 | |||
347 | spin_lock(&qdisc_stab_lock); | ||
348 | list_add_tail(&stab->list, &qdisc_stab_list); | ||
349 | spin_unlock(&qdisc_stab_lock); | ||
350 | |||
351 | return stab; | ||
352 | } | ||
353 | |||
354 | void qdisc_put_stab(struct qdisc_size_table *tab) | ||
355 | { | ||
356 | if (!tab) | ||
357 | return; | ||
358 | |||
359 | spin_lock(&qdisc_stab_lock); | ||
360 | |||
361 | if (--tab->refcnt == 0) { | ||
362 | list_del(&tab->list); | ||
363 | kfree(tab); | ||
364 | } | ||
365 | |||
366 | spin_unlock(&qdisc_stab_lock); | ||
367 | } | ||
368 | EXPORT_SYMBOL(qdisc_put_stab); | ||
369 | |||
370 | static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) | ||
371 | { | ||
372 | struct nlattr *nest; | ||
373 | |||
374 | nest = nla_nest_start(skb, TCA_STAB); | ||
375 | NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); | ||
376 | nla_nest_end(skb, nest); | ||
377 | |||
378 | return skb->len; | ||
379 | |||
380 | nla_put_failure: | ||
381 | return -1; | ||
382 | } | ||
383 | |||
384 | void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab) | ||
385 | { | ||
386 | int pkt_len, slot; | ||
387 | |||
388 | pkt_len = skb->len + stab->szopts.overhead; | ||
389 | if (unlikely(!stab->szopts.tsize)) | ||
390 | goto out; | ||
391 | |||
392 | slot = pkt_len + stab->szopts.cell_align; | ||
393 | if (unlikely(slot < 0)) | ||
394 | slot = 0; | ||
395 | |||
396 | slot >>= stab->szopts.cell_log; | ||
397 | if (likely(slot < stab->szopts.tsize)) | ||
398 | pkt_len = stab->data[slot]; | ||
399 | else | ||
400 | pkt_len = stab->data[stab->szopts.tsize - 1] * | ||
401 | (slot / stab->szopts.tsize) + | ||
402 | stab->data[slot % stab->szopts.tsize]; | ||
403 | |||
404 | pkt_len <<= stab->szopts.size_log; | ||
405 | out: | ||
406 | if (unlikely(pkt_len < 1)) | ||
407 | pkt_len = 1; | ||
408 | qdisc_skb_cb(skb)->pkt_len = pkt_len; | ||
409 | } | ||
410 | EXPORT_SYMBOL(qdisc_calculate_pkt_len); | ||
411 | |||
280 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | 412 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) |
281 | { | 413 | { |
282 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 414 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
283 | timer); | 415 | timer); |
284 | struct net_device *dev = wd->qdisc->dev; | ||
285 | 416 | ||
286 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 417 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
287 | smp_wmb(); | 418 | smp_wmb(); |
288 | netif_schedule(dev); | 419 | __netif_schedule(wd->qdisc); |
289 | 420 | ||
290 | return HRTIMER_NORESTART; | 421 | return HRTIMER_NORESTART; |
291 | } | 422 | } |
@@ -316,6 +447,110 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) | |||
316 | } | 447 | } |
317 | EXPORT_SYMBOL(qdisc_watchdog_cancel); | 448 | EXPORT_SYMBOL(qdisc_watchdog_cancel); |
318 | 449 | ||
450 | struct hlist_head *qdisc_class_hash_alloc(unsigned int n) | ||
451 | { | ||
452 | unsigned int size = n * sizeof(struct hlist_head), i; | ||
453 | struct hlist_head *h; | ||
454 | |||
455 | if (size <= PAGE_SIZE) | ||
456 | h = kmalloc(size, GFP_KERNEL); | ||
457 | else | ||
458 | h = (struct hlist_head *) | ||
459 | __get_free_pages(GFP_KERNEL, get_order(size)); | ||
460 | |||
461 | if (h != NULL) { | ||
462 | for (i = 0; i < n; i++) | ||
463 | INIT_HLIST_HEAD(&h[i]); | ||
464 | } | ||
465 | return h; | ||
466 | } | ||
467 | |||
468 | static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) | ||
469 | { | ||
470 | unsigned int size = n * sizeof(struct hlist_head); | ||
471 | |||
472 | if (size <= PAGE_SIZE) | ||
473 | kfree(h); | ||
474 | else | ||
475 | free_pages((unsigned long)h, get_order(size)); | ||
476 | } | ||
477 | |||
478 | void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) | ||
479 | { | ||
480 | struct Qdisc_class_common *cl; | ||
481 | struct hlist_node *n, *next; | ||
482 | struct hlist_head *nhash, *ohash; | ||
483 | unsigned int nsize, nmask, osize; | ||
484 | unsigned int i, h; | ||
485 | |||
486 | /* Rehash when load factor exceeds 0.75 */ | ||
487 | if (clhash->hashelems * 4 <= clhash->hashsize * 3) | ||
488 | return; | ||
489 | nsize = clhash->hashsize * 2; | ||
490 | nmask = nsize - 1; | ||
491 | nhash = qdisc_class_hash_alloc(nsize); | ||
492 | if (nhash == NULL) | ||
493 | return; | ||
494 | |||
495 | ohash = clhash->hash; | ||
496 | osize = clhash->hashsize; | ||
497 | |||
498 | sch_tree_lock(sch); | ||
499 | for (i = 0; i < osize; i++) { | ||
500 | hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) { | ||
501 | h = qdisc_class_hash(cl->classid, nmask); | ||
502 | hlist_add_head(&cl->hnode, &nhash[h]); | ||
503 | } | ||
504 | } | ||
505 | clhash->hash = nhash; | ||
506 | clhash->hashsize = nsize; | ||
507 | clhash->hashmask = nmask; | ||
508 | sch_tree_unlock(sch); | ||
509 | |||
510 | qdisc_class_hash_free(ohash, osize); | ||
511 | } | ||
512 | EXPORT_SYMBOL(qdisc_class_hash_grow); | ||
513 | |||
514 | int qdisc_class_hash_init(struct Qdisc_class_hash *clhash) | ||
515 | { | ||
516 | unsigned int size = 4; | ||
517 | |||
518 | clhash->hash = qdisc_class_hash_alloc(size); | ||
519 | if (clhash->hash == NULL) | ||
520 | return -ENOMEM; | ||
521 | clhash->hashsize = size; | ||
522 | clhash->hashmask = size - 1; | ||
523 | clhash->hashelems = 0; | ||
524 | return 0; | ||
525 | } | ||
526 | EXPORT_SYMBOL(qdisc_class_hash_init); | ||
527 | |||
528 | void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) | ||
529 | { | ||
530 | qdisc_class_hash_free(clhash->hash, clhash->hashsize); | ||
531 | } | ||
532 | EXPORT_SYMBOL(qdisc_class_hash_destroy); | ||
533 | |||
534 | void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash, | ||
535 | struct Qdisc_class_common *cl) | ||
536 | { | ||
537 | unsigned int h; | ||
538 | |||
539 | INIT_HLIST_NODE(&cl->hnode); | ||
540 | h = qdisc_class_hash(cl->classid, clhash->hashmask); | ||
541 | hlist_add_head(&cl->hnode, &clhash->hash[h]); | ||
542 | clhash->hashelems++; | ||
543 | } | ||
544 | EXPORT_SYMBOL(qdisc_class_hash_insert); | ||
545 | |||
546 | void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash, | ||
547 | struct Qdisc_class_common *cl) | ||
548 | { | ||
549 | hlist_del(&cl->hnode); | ||
550 | clhash->hashelems--; | ||
551 | } | ||
552 | EXPORT_SYMBOL(qdisc_class_hash_remove); | ||
553 | |||
319 | /* Allocate an unique handle from space managed by kernel */ | 554 | /* Allocate an unique handle from space managed by kernel */ |
320 | 555 | ||
321 | static u32 qdisc_alloc_handle(struct net_device *dev) | 556 | static u32 qdisc_alloc_handle(struct net_device *dev) |
@@ -332,32 +567,39 @@ static u32 qdisc_alloc_handle(struct net_device *dev) | |||
332 | return i>0 ? autohandle : 0; | 567 | return i>0 ? autohandle : 0; |
333 | } | 568 | } |
334 | 569 | ||
335 | /* Attach toplevel qdisc to device dev */ | 570 | /* Attach toplevel qdisc to device queue. */ |
336 | 571 | ||
337 | static struct Qdisc * | 572 | static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
338 | dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | 573 | struct Qdisc *qdisc) |
339 | { | 574 | { |
575 | spinlock_t *root_lock; | ||
340 | struct Qdisc *oqdisc; | 576 | struct Qdisc *oqdisc; |
577 | int ingress; | ||
578 | |||
579 | ingress = 0; | ||
580 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) | ||
581 | ingress = 1; | ||
582 | |||
583 | if (ingress) { | ||
584 | oqdisc = dev_queue->qdisc; | ||
585 | } else { | ||
586 | oqdisc = dev_queue->qdisc_sleeping; | ||
587 | } | ||
341 | 588 | ||
342 | if (dev->flags & IFF_UP) | 589 | root_lock = qdisc_root_lock(oqdisc); |
343 | dev_deactivate(dev); | 590 | spin_lock_bh(root_lock); |
344 | 591 | ||
345 | qdisc_lock_tree(dev); | 592 | if (ingress) { |
346 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) { | ||
347 | oqdisc = dev->qdisc_ingress; | ||
348 | /* Prune old scheduler */ | 593 | /* Prune old scheduler */ |
349 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { | 594 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { |
350 | /* delete */ | 595 | /* delete */ |
351 | qdisc_reset(oqdisc); | 596 | qdisc_reset(oqdisc); |
352 | dev->qdisc_ingress = NULL; | 597 | dev_queue->qdisc = NULL; |
353 | } else { /* new */ | 598 | } else { /* new */ |
354 | dev->qdisc_ingress = qdisc; | 599 | dev_queue->qdisc = qdisc; |
355 | } | 600 | } |
356 | 601 | ||
357 | } else { | 602 | } else { |
358 | |||
359 | oqdisc = dev->qdisc_sleeping; | ||
360 | |||
361 | /* Prune old scheduler */ | 603 | /* Prune old scheduler */ |
362 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) | 604 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) |
363 | qdisc_reset(oqdisc); | 605 | qdisc_reset(oqdisc); |
@@ -365,14 +607,11 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
365 | /* ... and graft new one */ | 607 | /* ... and graft new one */ |
366 | if (qdisc == NULL) | 608 | if (qdisc == NULL) |
367 | qdisc = &noop_qdisc; | 609 | qdisc = &noop_qdisc; |
368 | dev->qdisc_sleeping = qdisc; | 610 | dev_queue->qdisc_sleeping = qdisc; |
369 | dev->qdisc = &noop_qdisc; | 611 | dev_queue->qdisc = &noop_qdisc; |
370 | } | 612 | } |
371 | 613 | ||
372 | qdisc_unlock_tree(dev); | 614 | spin_unlock_bh(root_lock); |
373 | |||
374 | if (dev->flags & IFF_UP) | ||
375 | dev_activate(dev); | ||
376 | 615 | ||
377 | return oqdisc; | 616 | return oqdisc; |
378 | } | 617 | } |
@@ -389,7 +628,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
389 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) | 628 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) |
390 | return; | 629 | return; |
391 | 630 | ||
392 | sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); | 631 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); |
393 | if (sch == NULL) { | 632 | if (sch == NULL) { |
394 | WARN_ON(parentid != TC_H_ROOT); | 633 | WARN_ON(parentid != TC_H_ROOT); |
395 | return; | 634 | return; |
@@ -405,26 +644,66 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
405 | } | 644 | } |
406 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 645 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
407 | 646 | ||
408 | /* Graft qdisc "new" to class "classid" of qdisc "parent" or | 647 | static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, |
409 | to device "dev". | 648 | struct Qdisc *old, struct Qdisc *new) |
649 | { | ||
650 | if (new || old) | ||
651 | qdisc_notify(skb, n, clid, old, new); | ||
652 | |||
653 | if (old) { | ||
654 | spin_lock_bh(&old->q.lock); | ||
655 | qdisc_destroy(old); | ||
656 | spin_unlock_bh(&old->q.lock); | ||
657 | } | ||
658 | } | ||
410 | 659 | ||
411 | Old qdisc is not destroyed but returned in *old. | 660 | /* Graft qdisc "new" to class "classid" of qdisc "parent" or |
661 | * to device "dev". | ||
662 | * | ||
663 | * When appropriate send a netlink notification using 'skb' | ||
664 | * and "n". | ||
665 | * | ||
666 | * On success, destroy old qdisc. | ||
412 | */ | 667 | */ |
413 | 668 | ||
414 | static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | 669 | static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, |
415 | u32 classid, | 670 | struct sk_buff *skb, struct nlmsghdr *n, u32 classid, |
416 | struct Qdisc *new, struct Qdisc **old) | 671 | struct Qdisc *new, struct Qdisc *old) |
417 | { | 672 | { |
673 | struct Qdisc *q = old; | ||
418 | int err = 0; | 674 | int err = 0; |
419 | struct Qdisc *q = *old; | ||
420 | |||
421 | 675 | ||
422 | if (parent == NULL) { | 676 | if (parent == NULL) { |
423 | if (q && q->flags&TCQ_F_INGRESS) { | 677 | unsigned int i, num_q, ingress; |
424 | *old = dev_graft_qdisc(dev, q); | 678 | |
425 | } else { | 679 | ingress = 0; |
426 | *old = dev_graft_qdisc(dev, new); | 680 | num_q = dev->num_tx_queues; |
681 | if (q && q->flags & TCQ_F_INGRESS) { | ||
682 | num_q = 1; | ||
683 | ingress = 1; | ||
684 | } | ||
685 | |||
686 | if (dev->flags & IFF_UP) | ||
687 | dev_deactivate(dev); | ||
688 | |||
689 | for (i = 0; i < num_q; i++) { | ||
690 | struct netdev_queue *dev_queue = &dev->rx_queue; | ||
691 | |||
692 | if (!ingress) | ||
693 | dev_queue = netdev_get_tx_queue(dev, i); | ||
694 | |||
695 | if (ingress) { | ||
696 | old = dev_graft_qdisc(dev_queue, q); | ||
697 | } else { | ||
698 | old = dev_graft_qdisc(dev_queue, new); | ||
699 | if (new && i > 0) | ||
700 | atomic_inc(&new->refcnt); | ||
701 | } | ||
702 | notify_and_destroy(skb, n, classid, old, new); | ||
427 | } | 703 | } |
704 | |||
705 | if (dev->flags & IFF_UP) | ||
706 | dev_activate(dev); | ||
428 | } else { | 707 | } else { |
429 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; | 708 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; |
430 | 709 | ||
@@ -433,10 +712,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
433 | if (cops) { | 712 | if (cops) { |
434 | unsigned long cl = cops->get(parent, classid); | 713 | unsigned long cl = cops->get(parent, classid); |
435 | if (cl) { | 714 | if (cl) { |
436 | err = cops->graft(parent, cl, new, old); | 715 | err = cops->graft(parent, cl, new, &old); |
437 | cops->put(parent, cl); | 716 | cops->put(parent, cl); |
438 | } | 717 | } |
439 | } | 718 | } |
719 | if (!err) | ||
720 | notify_and_destroy(skb, n, classid, old, new); | ||
440 | } | 721 | } |
441 | return err; | 722 | return err; |
442 | } | 723 | } |
@@ -448,13 +729,14 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
448 | */ | 729 | */ |
449 | 730 | ||
450 | static struct Qdisc * | 731 | static struct Qdisc * |
451 | qdisc_create(struct net_device *dev, u32 parent, u32 handle, | 732 | qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, |
452 | struct nlattr **tca, int *errp) | 733 | u32 parent, u32 handle, struct nlattr **tca, int *errp) |
453 | { | 734 | { |
454 | int err; | 735 | int err; |
455 | struct nlattr *kind = tca[TCA_KIND]; | 736 | struct nlattr *kind = tca[TCA_KIND]; |
456 | struct Qdisc *sch; | 737 | struct Qdisc *sch; |
457 | struct Qdisc_ops *ops; | 738 | struct Qdisc_ops *ops; |
739 | struct qdisc_size_table *stab; | ||
458 | 740 | ||
459 | ops = qdisc_lookup_ops(kind); | 741 | ops = qdisc_lookup_ops(kind); |
460 | #ifdef CONFIG_KMOD | 742 | #ifdef CONFIG_KMOD |
@@ -489,7 +771,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
489 | if (ops == NULL) | 771 | if (ops == NULL) |
490 | goto err_out; | 772 | goto err_out; |
491 | 773 | ||
492 | sch = qdisc_alloc(dev, ops); | 774 | sch = qdisc_alloc(dev_queue, ops); |
493 | if (IS_ERR(sch)) { | 775 | if (IS_ERR(sch)) { |
494 | err = PTR_ERR(sch); | 776 | err = PTR_ERR(sch); |
495 | goto err_out2; | 777 | goto err_out2; |
@@ -499,10 +781,8 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
499 | 781 | ||
500 | if (handle == TC_H_INGRESS) { | 782 | if (handle == TC_H_INGRESS) { |
501 | sch->flags |= TCQ_F_INGRESS; | 783 | sch->flags |= TCQ_F_INGRESS; |
502 | sch->stats_lock = &dev->ingress_lock; | ||
503 | handle = TC_H_MAKE(TC_H_INGRESS, 0); | 784 | handle = TC_H_MAKE(TC_H_INGRESS, 0); |
504 | } else { | 785 | } else { |
505 | sch->stats_lock = &dev->queue_lock; | ||
506 | if (handle == 0) { | 786 | if (handle == 0) { |
507 | handle = qdisc_alloc_handle(dev); | 787 | handle = qdisc_alloc_handle(dev); |
508 | err = -ENOMEM; | 788 | err = -ENOMEM; |
@@ -514,9 +794,17 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
514 | sch->handle = handle; | 794 | sch->handle = handle; |
515 | 795 | ||
516 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { | 796 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { |
797 | if (tca[TCA_STAB]) { | ||
798 | stab = qdisc_get_stab(tca[TCA_STAB]); | ||
799 | if (IS_ERR(stab)) { | ||
800 | err = PTR_ERR(stab); | ||
801 | goto err_out3; | ||
802 | } | ||
803 | sch->stab = stab; | ||
804 | } | ||
517 | if (tca[TCA_RATE]) { | 805 | if (tca[TCA_RATE]) { |
518 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, | 806 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, |
519 | sch->stats_lock, | 807 | qdisc_root_lock(sch), |
520 | tca[TCA_RATE]); | 808 | tca[TCA_RATE]); |
521 | if (err) { | 809 | if (err) { |
522 | /* | 810 | /* |
@@ -529,13 +817,13 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
529 | goto err_out3; | 817 | goto err_out3; |
530 | } | 818 | } |
531 | } | 819 | } |
532 | qdisc_lock_tree(dev); | 820 | if (parent) |
533 | list_add_tail(&sch->list, &dev->qdisc_list); | 821 | list_add_tail(&sch->list, &dev_queue->qdisc->list); |
534 | qdisc_unlock_tree(dev); | ||
535 | 822 | ||
536 | return sch; | 823 | return sch; |
537 | } | 824 | } |
538 | err_out3: | 825 | err_out3: |
826 | qdisc_put_stab(sch->stab); | ||
539 | dev_put(dev); | 827 | dev_put(dev); |
540 | kfree((char *) sch - sch->padded); | 828 | kfree((char *) sch - sch->padded); |
541 | err_out2: | 829 | err_out2: |
@@ -547,18 +835,29 @@ err_out: | |||
547 | 835 | ||
548 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | 836 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) |
549 | { | 837 | { |
550 | if (tca[TCA_OPTIONS]) { | 838 | struct qdisc_size_table *stab = NULL; |
551 | int err; | 839 | int err = 0; |
552 | 840 | ||
841 | if (tca[TCA_OPTIONS]) { | ||
553 | if (sch->ops->change == NULL) | 842 | if (sch->ops->change == NULL) |
554 | return -EINVAL; | 843 | return -EINVAL; |
555 | err = sch->ops->change(sch, tca[TCA_OPTIONS]); | 844 | err = sch->ops->change(sch, tca[TCA_OPTIONS]); |
556 | if (err) | 845 | if (err) |
557 | return err; | 846 | return err; |
558 | } | 847 | } |
848 | |||
849 | if (tca[TCA_STAB]) { | ||
850 | stab = qdisc_get_stab(tca[TCA_STAB]); | ||
851 | if (IS_ERR(stab)) | ||
852 | return PTR_ERR(stab); | ||
853 | } | ||
854 | |||
855 | qdisc_put_stab(sch->stab); | ||
856 | sch->stab = stab; | ||
857 | |||
559 | if (tca[TCA_RATE]) | 858 | if (tca[TCA_RATE]) |
560 | gen_replace_estimator(&sch->bstats, &sch->rate_est, | 859 | gen_replace_estimator(&sch->bstats, &sch->rate_est, |
561 | sch->stats_lock, tca[TCA_RATE]); | 860 | qdisc_root_lock(sch), tca[TCA_RATE]); |
562 | return 0; | 861 | return 0; |
563 | } | 862 | } |
564 | 863 | ||
@@ -634,10 +933,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
634 | return -ENOENT; | 933 | return -ENOENT; |
635 | q = qdisc_leaf(p, clid); | 934 | q = qdisc_leaf(p, clid); |
636 | } else { /* ingress */ | 935 | } else { /* ingress */ |
637 | q = dev->qdisc_ingress; | 936 | q = dev->rx_queue.qdisc; |
638 | } | 937 | } |
639 | } else { | 938 | } else { |
640 | q = dev->qdisc_sleeping; | 939 | struct netdev_queue *dev_queue; |
940 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
941 | q = dev_queue->qdisc_sleeping; | ||
641 | } | 942 | } |
642 | if (!q) | 943 | if (!q) |
643 | return -ENOENT; | 944 | return -ENOENT; |
@@ -657,14 +958,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
657 | return -EINVAL; | 958 | return -EINVAL; |
658 | if (q->handle == 0) | 959 | if (q->handle == 0) |
659 | return -ENOENT; | 960 | return -ENOENT; |
660 | if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) | 961 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) |
661 | return err; | 962 | return err; |
662 | if (q) { | ||
663 | qdisc_notify(skb, n, clid, q, NULL); | ||
664 | qdisc_lock_tree(dev); | ||
665 | qdisc_destroy(q); | ||
666 | qdisc_unlock_tree(dev); | ||
667 | } | ||
668 | } else { | 963 | } else { |
669 | qdisc_notify(skb, n, clid, NULL, q); | 964 | qdisc_notify(skb, n, clid, NULL, q); |
670 | } | 965 | } |
@@ -708,10 +1003,12 @@ replay: | |||
708 | return -ENOENT; | 1003 | return -ENOENT; |
709 | q = qdisc_leaf(p, clid); | 1004 | q = qdisc_leaf(p, clid); |
710 | } else { /*ingress */ | 1005 | } else { /*ingress */ |
711 | q = dev->qdisc_ingress; | 1006 | q = dev->rx_queue.qdisc; |
712 | } | 1007 | } |
713 | } else { | 1008 | } else { |
714 | q = dev->qdisc_sleeping; | 1009 | struct netdev_queue *dev_queue; |
1010 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
1011 | q = dev_queue->qdisc_sleeping; | ||
715 | } | 1012 | } |
716 | 1013 | ||
717 | /* It may be default qdisc, ignore it */ | 1014 | /* It may be default qdisc, ignore it */ |
@@ -788,10 +1085,12 @@ create_n_graft: | |||
788 | if (!(n->nlmsg_flags&NLM_F_CREATE)) | 1085 | if (!(n->nlmsg_flags&NLM_F_CREATE)) |
789 | return -ENOENT; | 1086 | return -ENOENT; |
790 | if (clid == TC_H_INGRESS) | 1087 | if (clid == TC_H_INGRESS) |
791 | q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent, | 1088 | q = qdisc_create(dev, &dev->rx_queue, |
1089 | tcm->tcm_parent, tcm->tcm_parent, | ||
792 | tca, &err); | 1090 | tca, &err); |
793 | else | 1091 | else |
794 | q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle, | 1092 | q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), |
1093 | tcm->tcm_parent, tcm->tcm_handle, | ||
795 | tca, &err); | 1094 | tca, &err); |
796 | if (q == NULL) { | 1095 | if (q == NULL) { |
797 | if (err == -EAGAIN) | 1096 | if (err == -EAGAIN) |
@@ -801,22 +1100,18 @@ create_n_graft: | |||
801 | 1100 | ||
802 | graft: | 1101 | graft: |
803 | if (1) { | 1102 | if (1) { |
804 | struct Qdisc *old_q = NULL; | 1103 | spinlock_t *root_lock; |
805 | err = qdisc_graft(dev, p, clid, q, &old_q); | 1104 | |
1105 | err = qdisc_graft(dev, p, skb, n, clid, q, NULL); | ||
806 | if (err) { | 1106 | if (err) { |
807 | if (q) { | 1107 | if (q) { |
808 | qdisc_lock_tree(dev); | 1108 | root_lock = qdisc_root_lock(q); |
1109 | spin_lock_bh(root_lock); | ||
809 | qdisc_destroy(q); | 1110 | qdisc_destroy(q); |
810 | qdisc_unlock_tree(dev); | 1111 | spin_unlock_bh(root_lock); |
811 | } | 1112 | } |
812 | return err; | 1113 | return err; |
813 | } | 1114 | } |
814 | qdisc_notify(skb, n, clid, old_q, q); | ||
815 | if (old_q) { | ||
816 | qdisc_lock_tree(dev); | ||
817 | qdisc_destroy(old_q); | ||
818 | qdisc_unlock_tree(dev); | ||
819 | } | ||
820 | } | 1115 | } |
821 | return 0; | 1116 | return 0; |
822 | } | 1117 | } |
@@ -834,7 +1129,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
834 | tcm->tcm_family = AF_UNSPEC; | 1129 | tcm->tcm_family = AF_UNSPEC; |
835 | tcm->tcm__pad1 = 0; | 1130 | tcm->tcm__pad1 = 0; |
836 | tcm->tcm__pad2 = 0; | 1131 | tcm->tcm__pad2 = 0; |
837 | tcm->tcm_ifindex = q->dev->ifindex; | 1132 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
838 | tcm->tcm_parent = clid; | 1133 | tcm->tcm_parent = clid; |
839 | tcm->tcm_handle = q->handle; | 1134 | tcm->tcm_handle = q->handle; |
840 | tcm->tcm_info = atomic_read(&q->refcnt); | 1135 | tcm->tcm_info = atomic_read(&q->refcnt); |
@@ -843,8 +1138,11 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
843 | goto nla_put_failure; | 1138 | goto nla_put_failure; |
844 | q->qstats.qlen = q->q.qlen; | 1139 | q->qstats.qlen = q->q.qlen; |
845 | 1140 | ||
1141 | if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) | ||
1142 | goto nla_put_failure; | ||
1143 | |||
846 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, | 1144 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, |
847 | TCA_XSTATS, q->stats_lock, &d) < 0) | 1145 | TCA_XSTATS, qdisc_root_lock(q), &d) < 0) |
848 | goto nla_put_failure; | 1146 | goto nla_put_failure; |
849 | 1147 | ||
850 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) | 1148 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) |
@@ -894,13 +1192,57 @@ err_out: | |||
894 | return -EINVAL; | 1192 | return -EINVAL; |
895 | } | 1193 | } |
896 | 1194 | ||
1195 | static bool tc_qdisc_dump_ignore(struct Qdisc *q) | ||
1196 | { | ||
1197 | return (q->flags & TCQ_F_BUILTIN) ? true : false; | ||
1198 | } | ||
1199 | |||
1200 | static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, | ||
1201 | struct netlink_callback *cb, | ||
1202 | int *q_idx_p, int s_q_idx) | ||
1203 | { | ||
1204 | int ret = 0, q_idx = *q_idx_p; | ||
1205 | struct Qdisc *q; | ||
1206 | |||
1207 | if (!root) | ||
1208 | return 0; | ||
1209 | |||
1210 | q = root; | ||
1211 | if (q_idx < s_q_idx) { | ||
1212 | q_idx++; | ||
1213 | } else { | ||
1214 | if (!tc_qdisc_dump_ignore(q) && | ||
1215 | tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, | ||
1216 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) | ||
1217 | goto done; | ||
1218 | q_idx++; | ||
1219 | } | ||
1220 | list_for_each_entry(q, &root->list, list) { | ||
1221 | if (q_idx < s_q_idx) { | ||
1222 | q_idx++; | ||
1223 | continue; | ||
1224 | } | ||
1225 | if (!tc_qdisc_dump_ignore(q) && | ||
1226 | tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, | ||
1227 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) | ||
1228 | goto done; | ||
1229 | q_idx++; | ||
1230 | } | ||
1231 | |||
1232 | out: | ||
1233 | *q_idx_p = q_idx; | ||
1234 | return ret; | ||
1235 | done: | ||
1236 | ret = -1; | ||
1237 | goto out; | ||
1238 | } | ||
1239 | |||
897 | static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | 1240 | static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) |
898 | { | 1241 | { |
899 | struct net *net = sock_net(skb->sk); | 1242 | struct net *net = sock_net(skb->sk); |
900 | int idx, q_idx; | 1243 | int idx, q_idx; |
901 | int s_idx, s_q_idx; | 1244 | int s_idx, s_q_idx; |
902 | struct net_device *dev; | 1245 | struct net_device *dev; |
903 | struct Qdisc *q; | ||
904 | 1246 | ||
905 | if (net != &init_net) | 1247 | if (net != &init_net) |
906 | return 0; | 1248 | return 0; |
@@ -910,21 +1252,22 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
910 | read_lock(&dev_base_lock); | 1252 | read_lock(&dev_base_lock); |
911 | idx = 0; | 1253 | idx = 0; |
912 | for_each_netdev(&init_net, dev) { | 1254 | for_each_netdev(&init_net, dev) { |
1255 | struct netdev_queue *dev_queue; | ||
1256 | |||
913 | if (idx < s_idx) | 1257 | if (idx < s_idx) |
914 | goto cont; | 1258 | goto cont; |
915 | if (idx > s_idx) | 1259 | if (idx > s_idx) |
916 | s_q_idx = 0; | 1260 | s_q_idx = 0; |
917 | q_idx = 0; | 1261 | q_idx = 0; |
918 | list_for_each_entry(q, &dev->qdisc_list, list) { | 1262 | |
919 | if (q_idx < s_q_idx) { | 1263 | dev_queue = netdev_get_tx_queue(dev, 0); |
920 | q_idx++; | 1264 | if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) |
921 | continue; | 1265 | goto done; |
922 | } | 1266 | |
923 | if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, | 1267 | dev_queue = &dev->rx_queue; |
924 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) | 1268 | if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) |
925 | goto done; | 1269 | goto done; |
926 | q_idx++; | 1270 | |
927 | } | ||
928 | cont: | 1271 | cont: |
929 | idx++; | 1272 | idx++; |
930 | } | 1273 | } |
@@ -949,6 +1292,7 @@ done: | |||
949 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | 1292 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) |
950 | { | 1293 | { |
951 | struct net *net = sock_net(skb->sk); | 1294 | struct net *net = sock_net(skb->sk); |
1295 | struct netdev_queue *dev_queue; | ||
952 | struct tcmsg *tcm = NLMSG_DATA(n); | 1296 | struct tcmsg *tcm = NLMSG_DATA(n); |
953 | struct nlattr *tca[TCA_MAX + 1]; | 1297 | struct nlattr *tca[TCA_MAX + 1]; |
954 | struct net_device *dev; | 1298 | struct net_device *dev; |
@@ -986,6 +1330,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
986 | 1330 | ||
987 | /* Step 1. Determine qdisc handle X:0 */ | 1331 | /* Step 1. Determine qdisc handle X:0 */ |
988 | 1332 | ||
1333 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
989 | if (pid != TC_H_ROOT) { | 1334 | if (pid != TC_H_ROOT) { |
990 | u32 qid1 = TC_H_MAJ(pid); | 1335 | u32 qid1 = TC_H_MAJ(pid); |
991 | 1336 | ||
@@ -996,7 +1341,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
996 | } else if (qid1) { | 1341 | } else if (qid1) { |
997 | qid = qid1; | 1342 | qid = qid1; |
998 | } else if (qid == 0) | 1343 | } else if (qid == 0) |
999 | qid = dev->qdisc_sleeping->handle; | 1344 | qid = dev_queue->qdisc_sleeping->handle; |
1000 | 1345 | ||
1001 | /* Now qid is genuine qdisc handle consistent | 1346 | /* Now qid is genuine qdisc handle consistent |
1002 | both with parent and child. | 1347 | both with parent and child. |
@@ -1007,7 +1352,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1007 | pid = TC_H_MAKE(qid, pid); | 1352 | pid = TC_H_MAKE(qid, pid); |
1008 | } else { | 1353 | } else { |
1009 | if (qid == 0) | 1354 | if (qid == 0) |
1010 | qid = dev->qdisc_sleeping->handle; | 1355 | qid = dev_queue->qdisc_sleeping->handle; |
1011 | } | 1356 | } |
1012 | 1357 | ||
1013 | /* OK. Locate qdisc */ | 1358 | /* OK. Locate qdisc */ |
@@ -1080,7 +1425,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, | |||
1080 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); | 1425 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); |
1081 | tcm = NLMSG_DATA(nlh); | 1426 | tcm = NLMSG_DATA(nlh); |
1082 | tcm->tcm_family = AF_UNSPEC; | 1427 | tcm->tcm_family = AF_UNSPEC; |
1083 | tcm->tcm_ifindex = q->dev->ifindex; | 1428 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
1084 | tcm->tcm_parent = q->handle; | 1429 | tcm->tcm_parent = q->handle; |
1085 | tcm->tcm_handle = q->handle; | 1430 | tcm->tcm_handle = q->handle; |
1086 | tcm->tcm_info = 0; | 1431 | tcm->tcm_info = 0; |
@@ -1089,7 +1434,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, | |||
1089 | goto nla_put_failure; | 1434 | goto nla_put_failure; |
1090 | 1435 | ||
1091 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, | 1436 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, |
1092 | TCA_XSTATS, q->stats_lock, &d) < 0) | 1437 | TCA_XSTATS, qdisc_root_lock(q), &d) < 0) |
1093 | goto nla_put_failure; | 1438 | goto nla_put_failure; |
1094 | 1439 | ||
1095 | if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) | 1440 | if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) |
@@ -1140,15 +1485,62 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk | |||
1140 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); | 1485 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); |
1141 | } | 1486 | } |
1142 | 1487 | ||
1488 | static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, | ||
1489 | struct tcmsg *tcm, struct netlink_callback *cb, | ||
1490 | int *t_p, int s_t) | ||
1491 | { | ||
1492 | struct qdisc_dump_args arg; | ||
1493 | |||
1494 | if (tc_qdisc_dump_ignore(q) || | ||
1495 | *t_p < s_t || !q->ops->cl_ops || | ||
1496 | (tcm->tcm_parent && | ||
1497 | TC_H_MAJ(tcm->tcm_parent) != q->handle)) { | ||
1498 | (*t_p)++; | ||
1499 | return 0; | ||
1500 | } | ||
1501 | if (*t_p > s_t) | ||
1502 | memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); | ||
1503 | arg.w.fn = qdisc_class_dump; | ||
1504 | arg.skb = skb; | ||
1505 | arg.cb = cb; | ||
1506 | arg.w.stop = 0; | ||
1507 | arg.w.skip = cb->args[1]; | ||
1508 | arg.w.count = 0; | ||
1509 | q->ops->cl_ops->walk(q, &arg.w); | ||
1510 | cb->args[1] = arg.w.count; | ||
1511 | if (arg.w.stop) | ||
1512 | return -1; | ||
1513 | (*t_p)++; | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1517 | static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, | ||
1518 | struct tcmsg *tcm, struct netlink_callback *cb, | ||
1519 | int *t_p, int s_t) | ||
1520 | { | ||
1521 | struct Qdisc *q; | ||
1522 | |||
1523 | if (!root) | ||
1524 | return 0; | ||
1525 | |||
1526 | if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) | ||
1527 | return -1; | ||
1528 | |||
1529 | list_for_each_entry(q, &root->list, list) { | ||
1530 | if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) | ||
1531 | return -1; | ||
1532 | } | ||
1533 | |||
1534 | return 0; | ||
1535 | } | ||
1536 | |||
1143 | static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | 1537 | static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) |
1144 | { | 1538 | { |
1539 | struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); | ||
1145 | struct net *net = sock_net(skb->sk); | 1540 | struct net *net = sock_net(skb->sk); |
1146 | int t; | 1541 | struct netdev_queue *dev_queue; |
1147 | int s_t; | ||
1148 | struct net_device *dev; | 1542 | struct net_device *dev; |
1149 | struct Qdisc *q; | 1543 | int t, s_t; |
1150 | struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); | ||
1151 | struct qdisc_dump_args arg; | ||
1152 | 1544 | ||
1153 | if (net != &init_net) | 1545 | if (net != &init_net) |
1154 | return 0; | 1546 | return 0; |
@@ -1161,28 +1553,15 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1161 | s_t = cb->args[0]; | 1553 | s_t = cb->args[0]; |
1162 | t = 0; | 1554 | t = 0; |
1163 | 1555 | ||
1164 | list_for_each_entry(q, &dev->qdisc_list, list) { | 1556 | dev_queue = netdev_get_tx_queue(dev, 0); |
1165 | if (t < s_t || !q->ops->cl_ops || | 1557 | if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) |
1166 | (tcm->tcm_parent && | 1558 | goto done; |
1167 | TC_H_MAJ(tcm->tcm_parent) != q->handle)) { | ||
1168 | t++; | ||
1169 | continue; | ||
1170 | } | ||
1171 | if (t > s_t) | ||
1172 | memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); | ||
1173 | arg.w.fn = qdisc_class_dump; | ||
1174 | arg.skb = skb; | ||
1175 | arg.cb = cb; | ||
1176 | arg.w.stop = 0; | ||
1177 | arg.w.skip = cb->args[1]; | ||
1178 | arg.w.count = 0; | ||
1179 | q->ops->cl_ops->walk(q, &arg.w); | ||
1180 | cb->args[1] = arg.w.count; | ||
1181 | if (arg.w.stop) | ||
1182 | break; | ||
1183 | t++; | ||
1184 | } | ||
1185 | 1559 | ||
1560 | dev_queue = &dev->rx_queue; | ||
1561 | if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) | ||
1562 | goto done; | ||
1563 | |||
1564 | done: | ||
1186 | cb->args[0] = t; | 1565 | cb->args[0] = t; |
1187 | 1566 | ||
1188 | dev_put(dev); | 1567 | dev_put(dev); |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index db0e23ae85f8..04faa835be17 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -296,7 +296,8 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
296 | goto err_out; | 296 | goto err_out; |
297 | } | 297 | } |
298 | flow->filter_list = NULL; | 298 | flow->filter_list = NULL; |
299 | flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); | 299 | flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
300 | &pfifo_qdisc_ops, classid); | ||
300 | if (!flow->q) | 301 | if (!flow->q) |
301 | flow->q = &noop_qdisc; | 302 | flow->q = &noop_qdisc; |
302 | pr_debug("atm_tc_change: qdisc %p\n", flow->q); | 303 | pr_debug("atm_tc_change: qdisc %p\n", flow->q); |
@@ -428,7 +429,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
428 | #endif | 429 | #endif |
429 | } | 430 | } |
430 | 431 | ||
431 | ret = flow->q->enqueue(skb, flow->q); | 432 | ret = qdisc_enqueue(skb, flow->q); |
432 | if (ret != 0) { | 433 | if (ret != 0) { |
433 | drop: __maybe_unused | 434 | drop: __maybe_unused |
434 | sch->qstats.drops++; | 435 | sch->qstats.drops++; |
@@ -436,9 +437,9 @@ drop: __maybe_unused | |||
436 | flow->qstats.drops++; | 437 | flow->qstats.drops++; |
437 | return ret; | 438 | return ret; |
438 | } | 439 | } |
439 | sch->bstats.bytes += skb->len; | 440 | sch->bstats.bytes += qdisc_pkt_len(skb); |
440 | sch->bstats.packets++; | 441 | sch->bstats.packets++; |
441 | flow->bstats.bytes += skb->len; | 442 | flow->bstats.bytes += qdisc_pkt_len(skb); |
442 | flow->bstats.packets++; | 443 | flow->bstats.packets++; |
443 | /* | 444 | /* |
444 | * Okay, this may seem weird. We pretend we've dropped the packet if | 445 | * Okay, this may seem weird. We pretend we've dropped the packet if |
@@ -555,7 +556,8 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) | |||
555 | 556 | ||
556 | pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); | 557 | pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); |
557 | p->flows = &p->link; | 558 | p->flows = &p->link; |
558 | p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); | 559 | p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
560 | &pfifo_qdisc_ops, sch->handle); | ||
559 | if (!p->link.q) | 561 | if (!p->link.q) |
560 | p->link.q = &noop_qdisc; | 562 | p->link.q = &noop_qdisc; |
561 | pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); | 563 | pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 2a3c97f7dc63..f1d2f8ec8b4c 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -73,11 +73,10 @@ struct cbq_sched_data; | |||
73 | 73 | ||
74 | struct cbq_class | 74 | struct cbq_class |
75 | { | 75 | { |
76 | struct cbq_class *next; /* hash table link */ | 76 | struct Qdisc_class_common common; |
77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ | 77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ |
78 | 78 | ||
79 | /* Parameters */ | 79 | /* Parameters */ |
80 | u32 classid; | ||
81 | unsigned char priority; /* class priority */ | 80 | unsigned char priority; /* class priority */ |
82 | unsigned char priority2; /* priority to be used after overlimit */ | 81 | unsigned char priority2; /* priority to be used after overlimit */ |
83 | unsigned char ewma_log; /* time constant for idle time calculation */ | 82 | unsigned char ewma_log; /* time constant for idle time calculation */ |
@@ -144,7 +143,7 @@ struct cbq_class | |||
144 | 143 | ||
145 | struct cbq_sched_data | 144 | struct cbq_sched_data |
146 | { | 145 | { |
147 | struct cbq_class *classes[16]; /* Hash table of all classes */ | 146 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
148 | int nclasses[TC_CBQ_MAXPRIO+1]; | 147 | int nclasses[TC_CBQ_MAXPRIO+1]; |
149 | unsigned quanta[TC_CBQ_MAXPRIO+1]; | 148 | unsigned quanta[TC_CBQ_MAXPRIO+1]; |
150 | 149 | ||
@@ -177,23 +176,15 @@ struct cbq_sched_data | |||
177 | 176 | ||
178 | #define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) | 177 | #define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) |
179 | 178 | ||
180 | |||
181 | static __inline__ unsigned cbq_hash(u32 h) | ||
182 | { | ||
183 | h ^= h>>8; | ||
184 | h ^= h>>4; | ||
185 | return h&0xF; | ||
186 | } | ||
187 | |||
188 | static __inline__ struct cbq_class * | 179 | static __inline__ struct cbq_class * |
189 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | 180 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) |
190 | { | 181 | { |
191 | struct cbq_class *cl; | 182 | struct Qdisc_class_common *clc; |
192 | 183 | ||
193 | for (cl = q->classes[cbq_hash(classid)]; cl; cl = cl->next) | 184 | clc = qdisc_class_find(&q->clhash, classid); |
194 | if (cl->classid == classid) | 185 | if (clc == NULL) |
195 | return cl; | 186 | return NULL; |
196 | return NULL; | 187 | return container_of(clc, struct cbq_class, common); |
197 | } | 188 | } |
198 | 189 | ||
199 | #ifdef CONFIG_NET_CLS_ACT | 190 | #ifdef CONFIG_NET_CLS_ACT |
@@ -379,7 +370,6 @@ static int | |||
379 | cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 370 | cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
380 | { | 371 | { |
381 | struct cbq_sched_data *q = qdisc_priv(sch); | 372 | struct cbq_sched_data *q = qdisc_priv(sch); |
382 | int len = skb->len; | ||
383 | int uninitialized_var(ret); | 373 | int uninitialized_var(ret); |
384 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); | 374 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); |
385 | 375 | ||
@@ -396,10 +386,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
396 | #ifdef CONFIG_NET_CLS_ACT | 386 | #ifdef CONFIG_NET_CLS_ACT |
397 | cl->q->__parent = sch; | 387 | cl->q->__parent = sch; |
398 | #endif | 388 | #endif |
399 | if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { | 389 | ret = qdisc_enqueue(skb, cl->q); |
390 | if (ret == NET_XMIT_SUCCESS) { | ||
400 | sch->q.qlen++; | 391 | sch->q.qlen++; |
401 | sch->bstats.packets++; | 392 | sch->bstats.packets++; |
402 | sch->bstats.bytes+=len; | 393 | sch->bstats.bytes += qdisc_pkt_len(skb); |
403 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
404 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
405 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -659,14 +650,13 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
659 | } | 650 | } |
660 | 651 | ||
661 | sch->flags &= ~TCQ_F_THROTTLED; | 652 | sch->flags &= ~TCQ_F_THROTTLED; |
662 | netif_schedule(sch->dev); | 653 | __netif_schedule(sch); |
663 | return HRTIMER_NORESTART; | 654 | return HRTIMER_NORESTART; |
664 | } | 655 | } |
665 | 656 | ||
666 | #ifdef CONFIG_NET_CLS_ACT | 657 | #ifdef CONFIG_NET_CLS_ACT |
667 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | 658 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) |
668 | { | 659 | { |
669 | int len = skb->len; | ||
670 | struct Qdisc *sch = child->__parent; | 660 | struct Qdisc *sch = child->__parent; |
671 | struct cbq_sched_data *q = qdisc_priv(sch); | 661 | struct cbq_sched_data *q = qdisc_priv(sch); |
672 | struct cbq_class *cl = q->rx_class; | 662 | struct cbq_class *cl = q->rx_class; |
@@ -680,10 +670,10 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
680 | q->rx_class = cl; | 670 | q->rx_class = cl; |
681 | cl->q->__parent = sch; | 671 | cl->q->__parent = sch; |
682 | 672 | ||
683 | if (cl->q->enqueue(skb, cl->q) == 0) { | 673 | if (qdisc_enqueue(skb, cl->q) == 0) { |
684 | sch->q.qlen++; | 674 | sch->q.qlen++; |
685 | sch->bstats.packets++; | 675 | sch->bstats.packets++; |
686 | sch->bstats.bytes+=len; | 676 | sch->bstats.bytes += qdisc_pkt_len(skb); |
687 | if (!cl->next_alive) | 677 | if (!cl->next_alive) |
688 | cbq_activate_class(cl); | 678 | cbq_activate_class(cl); |
689 | return 0; | 679 | return 0; |
@@ -889,7 +879,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
889 | if (skb == NULL) | 879 | if (skb == NULL) |
890 | goto skip_class; | 880 | goto skip_class; |
891 | 881 | ||
892 | cl->deficit -= skb->len; | 882 | cl->deficit -= qdisc_pkt_len(skb); |
893 | q->tx_class = cl; | 883 | q->tx_class = cl; |
894 | q->tx_borrowed = borrow; | 884 | q->tx_borrowed = borrow; |
895 | if (borrow != cl) { | 885 | if (borrow != cl) { |
@@ -897,11 +887,11 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) | |||
897 | borrow->xstats.borrows++; | 887 | borrow->xstats.borrows++; |
898 | cl->xstats.borrows++; | 888 | cl->xstats.borrows++; |
899 | #else | 889 | #else |
900 | borrow->xstats.borrows += skb->len; | 890 | borrow->xstats.borrows += qdisc_pkt_len(skb); |
901 | cl->xstats.borrows += skb->len; | 891 | cl->xstats.borrows += qdisc_pkt_len(skb); |
902 | #endif | 892 | #endif |
903 | } | 893 | } |
904 | q->tx_len = skb->len; | 894 | q->tx_len = qdisc_pkt_len(skb); |
905 | 895 | ||
906 | if (cl->deficit <= 0) { | 896 | if (cl->deficit <= 0) { |
907 | q->active[prio] = cl; | 897 | q->active[prio] = cl; |
@@ -1071,13 +1061,14 @@ static void cbq_adjust_levels(struct cbq_class *this) | |||
1071 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | 1061 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) |
1072 | { | 1062 | { |
1073 | struct cbq_class *cl; | 1063 | struct cbq_class *cl; |
1074 | unsigned h; | 1064 | struct hlist_node *n; |
1065 | unsigned int h; | ||
1075 | 1066 | ||
1076 | if (q->quanta[prio] == 0) | 1067 | if (q->quanta[prio] == 0) |
1077 | return; | 1068 | return; |
1078 | 1069 | ||
1079 | for (h=0; h<16; h++) { | 1070 | for (h = 0; h < q->clhash.hashsize; h++) { |
1080 | for (cl = q->classes[h]; cl; cl = cl->next) { | 1071 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1081 | /* BUGGGG... Beware! This expression suffer of | 1072 | /* BUGGGG... Beware! This expression suffer of |
1082 | arithmetic overflows! | 1073 | arithmetic overflows! |
1083 | */ | 1074 | */ |
@@ -1085,9 +1076,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | |||
1085 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ | 1076 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ |
1086 | q->quanta[prio]; | 1077 | q->quanta[prio]; |
1087 | } | 1078 | } |
1088 | if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { | 1079 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
1089 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum); | 1080 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); |
1090 | cl->quantum = cl->qdisc->dev->mtu/2 + 1; | 1081 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1091 | } | 1082 | } |
1092 | } | 1083 | } |
1093 | } | 1084 | } |
@@ -1114,10 +1105,12 @@ static void cbq_sync_defmap(struct cbq_class *cl) | |||
1114 | if (split->defaults[i]) | 1105 | if (split->defaults[i]) |
1115 | continue; | 1106 | continue; |
1116 | 1107 | ||
1117 | for (h=0; h<16; h++) { | 1108 | for (h = 0; h < q->clhash.hashsize; h++) { |
1109 | struct hlist_node *n; | ||
1118 | struct cbq_class *c; | 1110 | struct cbq_class *c; |
1119 | 1111 | ||
1120 | for (c = q->classes[h]; c; c = c->next) { | 1112 | hlist_for_each_entry(c, n, &q->clhash.hash[h], |
1113 | common.hnode) { | ||
1121 | if (c->split == split && c->level < level && | 1114 | if (c->split == split && c->level < level && |
1122 | c->defmap&(1<<i)) { | 1115 | c->defmap&(1<<i)) { |
1123 | split->defaults[i] = c; | 1116 | split->defaults[i] = c; |
@@ -1135,12 +1128,12 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma | |||
1135 | if (splitid == 0) { | 1128 | if (splitid == 0) { |
1136 | if ((split = cl->split) == NULL) | 1129 | if ((split = cl->split) == NULL) |
1137 | return; | 1130 | return; |
1138 | splitid = split->classid; | 1131 | splitid = split->common.classid; |
1139 | } | 1132 | } |
1140 | 1133 | ||
1141 | if (split == NULL || split->classid != splitid) { | 1134 | if (split == NULL || split->common.classid != splitid) { |
1142 | for (split = cl->tparent; split; split = split->tparent) | 1135 | for (split = cl->tparent; split; split = split->tparent) |
1143 | if (split->classid == splitid) | 1136 | if (split->common.classid == splitid) |
1144 | break; | 1137 | break; |
1145 | } | 1138 | } |
1146 | 1139 | ||
@@ -1163,13 +1156,7 @@ static void cbq_unlink_class(struct cbq_class *this) | |||
1163 | struct cbq_class *cl, **clp; | 1156 | struct cbq_class *cl, **clp; |
1164 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); | 1157 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); |
1165 | 1158 | ||
1166 | for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) { | 1159 | qdisc_class_hash_remove(&q->clhash, &this->common); |
1167 | if (cl == this) { | ||
1168 | *clp = cl->next; | ||
1169 | cl->next = NULL; | ||
1170 | break; | ||
1171 | } | ||
1172 | } | ||
1173 | 1160 | ||
1174 | if (this->tparent) { | 1161 | if (this->tparent) { |
1175 | clp=&this->sibling; | 1162 | clp=&this->sibling; |
@@ -1195,12 +1182,10 @@ static void cbq_unlink_class(struct cbq_class *this) | |||
1195 | static void cbq_link_class(struct cbq_class *this) | 1182 | static void cbq_link_class(struct cbq_class *this) |
1196 | { | 1183 | { |
1197 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); | 1184 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); |
1198 | unsigned h = cbq_hash(this->classid); | ||
1199 | struct cbq_class *parent = this->tparent; | 1185 | struct cbq_class *parent = this->tparent; |
1200 | 1186 | ||
1201 | this->sibling = this; | 1187 | this->sibling = this; |
1202 | this->next = q->classes[h]; | 1188 | qdisc_class_hash_insert(&q->clhash, &this->common); |
1203 | q->classes[h] = this; | ||
1204 | 1189 | ||
1205 | if (parent == NULL) | 1190 | if (parent == NULL) |
1206 | return; | 1191 | return; |
@@ -1242,6 +1227,7 @@ cbq_reset(struct Qdisc* sch) | |||
1242 | { | 1227 | { |
1243 | struct cbq_sched_data *q = qdisc_priv(sch); | 1228 | struct cbq_sched_data *q = qdisc_priv(sch); |
1244 | struct cbq_class *cl; | 1229 | struct cbq_class *cl; |
1230 | struct hlist_node *n; | ||
1245 | int prio; | 1231 | int prio; |
1246 | unsigned h; | 1232 | unsigned h; |
1247 | 1233 | ||
@@ -1258,8 +1244,8 @@ cbq_reset(struct Qdisc* sch) | |||
1258 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) | 1244 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) |
1259 | q->active[prio] = NULL; | 1245 | q->active[prio] = NULL; |
1260 | 1246 | ||
1261 | for (h = 0; h < 16; h++) { | 1247 | for (h = 0; h < q->clhash.hashsize; h++) { |
1262 | for (cl = q->classes[h]; cl; cl = cl->next) { | 1248 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1263 | qdisc_reset(cl->q); | 1249 | qdisc_reset(cl->q); |
1264 | 1250 | ||
1265 | cl->next_alive = NULL; | 1251 | cl->next_alive = NULL; |
@@ -1406,11 +1392,16 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1406 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) | 1392 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) |
1407 | return -EINVAL; | 1393 | return -EINVAL; |
1408 | 1394 | ||
1395 | err = qdisc_class_hash_init(&q->clhash); | ||
1396 | if (err < 0) | ||
1397 | goto put_rtab; | ||
1398 | |||
1409 | q->link.refcnt = 1; | 1399 | q->link.refcnt = 1; |
1410 | q->link.sibling = &q->link; | 1400 | q->link.sibling = &q->link; |
1411 | q->link.classid = sch->handle; | 1401 | q->link.common.classid = sch->handle; |
1412 | q->link.qdisc = sch; | 1402 | q->link.qdisc = sch; |
1413 | if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1403 | if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1404 | &pfifo_qdisc_ops, | ||
1414 | sch->handle))) | 1405 | sch->handle))) |
1415 | q->link.q = &noop_qdisc; | 1406 | q->link.q = &noop_qdisc; |
1416 | 1407 | ||
@@ -1419,7 +1410,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1419 | q->link.cpriority = TC_CBQ_MAXPRIO-1; | 1410 | q->link.cpriority = TC_CBQ_MAXPRIO-1; |
1420 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; | 1411 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1421 | q->link.overlimit = cbq_ovl_classic; | 1412 | q->link.overlimit = cbq_ovl_classic; |
1422 | q->link.allot = psched_mtu(sch->dev); | 1413 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
1423 | q->link.quantum = q->link.allot; | 1414 | q->link.quantum = q->link.allot; |
1424 | q->link.weight = q->link.R_tab->rate.rate; | 1415 | q->link.weight = q->link.R_tab->rate.rate; |
1425 | 1416 | ||
@@ -1441,6 +1432,10 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1441 | 1432 | ||
1442 | cbq_addprio(q, &q->link); | 1433 | cbq_addprio(q, &q->link); |
1443 | return 0; | 1434 | return 0; |
1435 | |||
1436 | put_rtab: | ||
1437 | qdisc_put_rtab(q->link.R_tab); | ||
1438 | return err; | ||
1444 | } | 1439 | } |
1445 | 1440 | ||
1446 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | 1441 | static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
@@ -1521,7 +1516,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | |||
1521 | struct tc_cbq_fopt opt; | 1516 | struct tc_cbq_fopt opt; |
1522 | 1517 | ||
1523 | if (cl->split || cl->defmap) { | 1518 | if (cl->split || cl->defmap) { |
1524 | opt.split = cl->split ? cl->split->classid : 0; | 1519 | opt.split = cl->split ? cl->split->common.classid : 0; |
1525 | opt.defmap = cl->defmap; | 1520 | opt.defmap = cl->defmap; |
1526 | opt.defchange = ~0; | 1521 | opt.defchange = ~0; |
1527 | NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); | 1522 | NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); |
@@ -1602,10 +1597,10 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1602 | struct nlattr *nest; | 1597 | struct nlattr *nest; |
1603 | 1598 | ||
1604 | if (cl->tparent) | 1599 | if (cl->tparent) |
1605 | tcm->tcm_parent = cl->tparent->classid; | 1600 | tcm->tcm_parent = cl->tparent->common.classid; |
1606 | else | 1601 | else |
1607 | tcm->tcm_parent = TC_H_ROOT; | 1602 | tcm->tcm_parent = TC_H_ROOT; |
1608 | tcm->tcm_handle = cl->classid; | 1603 | tcm->tcm_handle = cl->common.classid; |
1609 | tcm->tcm_info = cl->q->handle; | 1604 | tcm->tcm_info = cl->q->handle; |
1610 | 1605 | ||
1611 | nest = nla_nest_start(skb, TCA_OPTIONS); | 1606 | nest = nla_nest_start(skb, TCA_OPTIONS); |
@@ -1650,8 +1645,10 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1650 | 1645 | ||
1651 | if (cl) { | 1646 | if (cl) { |
1652 | if (new == NULL) { | 1647 | if (new == NULL) { |
1653 | if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1648 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1654 | cl->classid)) == NULL) | 1649 | &pfifo_qdisc_ops, |
1650 | cl->common.classid); | ||
1651 | if (new == NULL) | ||
1655 | return -ENOBUFS; | 1652 | return -ENOBUFS; |
1656 | } else { | 1653 | } else { |
1657 | #ifdef CONFIG_NET_CLS_ACT | 1654 | #ifdef CONFIG_NET_CLS_ACT |
@@ -1716,6 +1713,7 @@ static void | |||
1716 | cbq_destroy(struct Qdisc* sch) | 1713 | cbq_destroy(struct Qdisc* sch) |
1717 | { | 1714 | { |
1718 | struct cbq_sched_data *q = qdisc_priv(sch); | 1715 | struct cbq_sched_data *q = qdisc_priv(sch); |
1716 | struct hlist_node *n, *next; | ||
1719 | struct cbq_class *cl; | 1717 | struct cbq_class *cl; |
1720 | unsigned h; | 1718 | unsigned h; |
1721 | 1719 | ||
@@ -1727,18 +1725,16 @@ cbq_destroy(struct Qdisc* sch) | |||
1727 | * classes from root to leafs which means that filters can still | 1725 | * classes from root to leafs which means that filters can still |
1728 | * be bound to classes which have been destroyed already. --TGR '04 | 1726 | * be bound to classes which have been destroyed already. --TGR '04 |
1729 | */ | 1727 | */ |
1730 | for (h = 0; h < 16; h++) { | 1728 | for (h = 0; h < q->clhash.hashsize; h++) { |
1731 | for (cl = q->classes[h]; cl; cl = cl->next) | 1729 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) |
1732 | tcf_destroy_chain(&cl->filter_list); | 1730 | tcf_destroy_chain(&cl->filter_list); |
1733 | } | 1731 | } |
1734 | for (h = 0; h < 16; h++) { | 1732 | for (h = 0; h < q->clhash.hashsize; h++) { |
1735 | struct cbq_class *next; | 1733 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], |
1736 | 1734 | common.hnode) | |
1737 | for (cl = q->classes[h]; cl; cl = next) { | ||
1738 | next = cl->next; | ||
1739 | cbq_destroy_class(sch, cl); | 1735 | cbq_destroy_class(sch, cl); |
1740 | } | ||
1741 | } | 1736 | } |
1737 | qdisc_class_hash_destroy(&q->clhash); | ||
1742 | } | 1738 | } |
1743 | 1739 | ||
1744 | static void cbq_put(struct Qdisc *sch, unsigned long arg) | 1740 | static void cbq_put(struct Qdisc *sch, unsigned long arg) |
@@ -1747,12 +1743,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) | |||
1747 | 1743 | ||
1748 | if (--cl->refcnt == 0) { | 1744 | if (--cl->refcnt == 0) { |
1749 | #ifdef CONFIG_NET_CLS_ACT | 1745 | #ifdef CONFIG_NET_CLS_ACT |
1746 | spinlock_t *root_lock = qdisc_root_lock(sch); | ||
1750 | struct cbq_sched_data *q = qdisc_priv(sch); | 1747 | struct cbq_sched_data *q = qdisc_priv(sch); |
1751 | 1748 | ||
1752 | spin_lock_bh(&sch->dev->queue_lock); | 1749 | spin_lock_bh(root_lock); |
1753 | if (q->rx_class == cl) | 1750 | if (q->rx_class == cl) |
1754 | q->rx_class = NULL; | 1751 | q->rx_class = NULL; |
1755 | spin_unlock_bh(&sch->dev->queue_lock); | 1752 | spin_unlock_bh(root_lock); |
1756 | #endif | 1753 | #endif |
1757 | 1754 | ||
1758 | cbq_destroy_class(sch, cl); | 1755 | cbq_destroy_class(sch, cl); |
@@ -1781,7 +1778,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1781 | if (cl) { | 1778 | if (cl) { |
1782 | /* Check parent */ | 1779 | /* Check parent */ |
1783 | if (parentid) { | 1780 | if (parentid) { |
1784 | if (cl->tparent && cl->tparent->classid != parentid) | 1781 | if (cl->tparent && |
1782 | cl->tparent->common.classid != parentid) | ||
1785 | return -EINVAL; | 1783 | return -EINVAL; |
1786 | if (!cl->tparent && parentid != TC_H_ROOT) | 1784 | if (!cl->tparent && parentid != TC_H_ROOT) |
1787 | return -EINVAL; | 1785 | return -EINVAL; |
@@ -1830,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1830 | 1828 | ||
1831 | if (tca[TCA_RATE]) | 1829 | if (tca[TCA_RATE]) |
1832 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1830 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1833 | &sch->dev->queue_lock, | 1831 | qdisc_root_lock(sch), |
1834 | tca[TCA_RATE]); | 1832 | tca[TCA_RATE]); |
1835 | return 0; | 1833 | return 0; |
1836 | } | 1834 | } |
@@ -1881,9 +1879,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1881 | cl->R_tab = rtab; | 1879 | cl->R_tab = rtab; |
1882 | rtab = NULL; | 1880 | rtab = NULL; |
1883 | cl->refcnt = 1; | 1881 | cl->refcnt = 1; |
1884 | if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) | 1882 | if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1883 | &pfifo_qdisc_ops, classid))) | ||
1885 | cl->q = &noop_qdisc; | 1884 | cl->q = &noop_qdisc; |
1886 | cl->classid = classid; | 1885 | cl->common.classid = classid; |
1887 | cl->tparent = parent; | 1886 | cl->tparent = parent; |
1888 | cl->qdisc = sch; | 1887 | cl->qdisc = sch; |
1889 | cl->allot = parent->allot; | 1888 | cl->allot = parent->allot; |
@@ -1916,9 +1915,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1916 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); | 1915 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); |
1917 | sch_tree_unlock(sch); | 1916 | sch_tree_unlock(sch); |
1918 | 1917 | ||
1918 | qdisc_class_hash_grow(sch, &q->clhash); | ||
1919 | |||
1919 | if (tca[TCA_RATE]) | 1920 | if (tca[TCA_RATE]) |
1920 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1921 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1921 | &sch->dev->queue_lock, tca[TCA_RATE]); | 1922 | qdisc_root_lock(sch), tca[TCA_RATE]); |
1922 | 1923 | ||
1923 | *arg = (unsigned long)cl; | 1924 | *arg = (unsigned long)cl; |
1924 | return 0; | 1925 | return 0; |
@@ -2008,15 +2009,15 @@ static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) | |||
2008 | static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 2009 | static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
2009 | { | 2010 | { |
2010 | struct cbq_sched_data *q = qdisc_priv(sch); | 2011 | struct cbq_sched_data *q = qdisc_priv(sch); |
2012 | struct cbq_class *cl; | ||
2013 | struct hlist_node *n; | ||
2011 | unsigned h; | 2014 | unsigned h; |
2012 | 2015 | ||
2013 | if (arg->stop) | 2016 | if (arg->stop) |
2014 | return; | 2017 | return; |
2015 | 2018 | ||
2016 | for (h = 0; h < 16; h++) { | 2019 | for (h = 0; h < q->clhash.hashsize; h++) { |
2017 | struct cbq_class *cl; | 2020 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
2018 | |||
2019 | for (cl = q->classes[h]; cl; cl = cl->next) { | ||
2020 | if (arg->count < arg->skip) { | 2021 | if (arg->count < arg->skip) { |
2021 | arg->count++; | 2022 | arg->count++; |
2022 | continue; | 2023 | continue; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index c4c1317cd47d..a935676987e2 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -60,7 +60,8 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, | |||
60 | sch, p, new, old); | 60 | sch, p, new, old); |
61 | 61 | ||
62 | if (new == NULL) { | 62 | if (new == NULL) { |
63 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 63 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
64 | &pfifo_qdisc_ops, | ||
64 | sch->handle); | 65 | sch->handle); |
65 | if (new == NULL) | 66 | if (new == NULL) |
66 | new = &noop_qdisc; | 67 | new = &noop_qdisc; |
@@ -251,13 +252,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
251 | } | 252 | } |
252 | } | 253 | } |
253 | 254 | ||
254 | err = p->q->enqueue(skb, p->q); | 255 | err = qdisc_enqueue(skb, p->q); |
255 | if (err != NET_XMIT_SUCCESS) { | 256 | if (err != NET_XMIT_SUCCESS) { |
256 | sch->qstats.drops++; | 257 | sch->qstats.drops++; |
257 | return err; | 258 | return err; |
258 | } | 259 | } |
259 | 260 | ||
260 | sch->bstats.bytes += skb->len; | 261 | sch->bstats.bytes += qdisc_pkt_len(skb); |
261 | sch->bstats.packets++; | 262 | sch->bstats.packets++; |
262 | sch->q.qlen++; | 263 | sch->q.qlen++; |
263 | 264 | ||
@@ -390,7 +391,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) | |||
390 | p->default_index = default_index; | 391 | p->default_index = default_index; |
391 | p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); | 392 | p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); |
392 | 393 | ||
393 | p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); | 394 | p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
395 | &pfifo_qdisc_ops, sch->handle); | ||
394 | if (p->q == NULL) | 396 | if (p->q == NULL) |
395 | p->q = &noop_qdisc; | 397 | p->q = &noop_qdisc; |
396 | 398 | ||
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 95ed48221652..23d258bfe8ac 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -27,7 +27,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
27 | { | 27 | { |
28 | struct fifo_sched_data *q = qdisc_priv(sch); | 28 | struct fifo_sched_data *q = qdisc_priv(sch); |
29 | 29 | ||
30 | if (likely(sch->qstats.backlog + skb->len <= q->limit)) | 30 | if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit)) |
31 | return qdisc_enqueue_tail(skb, sch); | 31 | return qdisc_enqueue_tail(skb, sch); |
32 | 32 | ||
33 | return qdisc_reshape_fail(skb, sch); | 33 | return qdisc_reshape_fail(skb, sch); |
@@ -48,10 +48,10 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
48 | struct fifo_sched_data *q = qdisc_priv(sch); | 48 | struct fifo_sched_data *q = qdisc_priv(sch); |
49 | 49 | ||
50 | if (opt == NULL) { | 50 | if (opt == NULL) { |
51 | u32 limit = sch->dev->tx_queue_len ? : 1; | 51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; |
52 | 52 | ||
53 | if (sch->ops == &bfifo_qdisc_ops) | 53 | if (sch->ops == &bfifo_qdisc_ops) |
54 | limit *= sch->dev->mtu; | 54 | limit *= qdisc_dev(sch)->mtu; |
55 | 55 | ||
56 | q->limit = limit; | 56 | q->limit = limit; |
57 | } else { | 57 | } else { |
@@ -107,3 +107,46 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { | |||
107 | .owner = THIS_MODULE, | 107 | .owner = THIS_MODULE, |
108 | }; | 108 | }; |
109 | EXPORT_SYMBOL(bfifo_qdisc_ops); | 109 | EXPORT_SYMBOL(bfifo_qdisc_ops); |
110 | |||
111 | /* Pass size change message down to embedded FIFO */ | ||
112 | int fifo_set_limit(struct Qdisc *q, unsigned int limit) | ||
113 | { | ||
114 | struct nlattr *nla; | ||
115 | int ret = -ENOMEM; | ||
116 | |||
117 | /* Hack to avoid sending change message to non-FIFO */ | ||
118 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) | ||
119 | return 0; | ||
120 | |||
121 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | ||
122 | if (nla) { | ||
123 | nla->nla_type = RTM_NEWQDISC; | ||
124 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); | ||
125 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; | ||
126 | |||
127 | ret = q->ops->change(q, nla); | ||
128 | kfree(nla); | ||
129 | } | ||
130 | return ret; | ||
131 | } | ||
132 | EXPORT_SYMBOL(fifo_set_limit); | ||
133 | |||
134 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, | ||
135 | unsigned int limit) | ||
136 | { | ||
137 | struct Qdisc *q; | ||
138 | int err = -ENOMEM; | ||
139 | |||
140 | q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | ||
141 | ops, TC_H_MAKE(sch->handle, 1)); | ||
142 | if (q) { | ||
143 | err = fifo_set_limit(q, limit); | ||
144 | if (err < 0) { | ||
145 | qdisc_destroy(q); | ||
146 | q = NULL; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | return q ? : ERR_PTR(err); | ||
151 | } | ||
152 | EXPORT_SYMBOL(fifo_create_dflt); | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 13afa7214392..27a51f04db49 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -29,58 +29,36 @@ | |||
29 | /* Main transmission queue. */ | 29 | /* Main transmission queue. */ |
30 | 30 | ||
31 | /* Modifications to data participating in scheduling must be protected with | 31 | /* Modifications to data participating in scheduling must be protected with |
32 | * dev->queue_lock spinlock. | 32 | * qdisc_root_lock(qdisc) spinlock. |
33 | * | 33 | * |
34 | * The idea is the following: | 34 | * The idea is the following: |
35 | * - enqueue, dequeue are serialized via top level device | 35 | * - enqueue, dequeue are serialized via qdisc root lock |
36 | * spinlock dev->queue_lock. | 36 | * - ingress filtering is also serialized via qdisc root lock |
37 | * - ingress filtering is serialized via top level device | ||
38 | * spinlock dev->ingress_lock. | ||
39 | * - updates to tree and tree walking are only done under the rtnl mutex. | 37 | * - updates to tree and tree walking are only done under the rtnl mutex. |
40 | */ | 38 | */ |
41 | 39 | ||
42 | void qdisc_lock_tree(struct net_device *dev) | ||
43 | __acquires(dev->queue_lock) | ||
44 | __acquires(dev->ingress_lock) | ||
45 | { | ||
46 | spin_lock_bh(&dev->queue_lock); | ||
47 | spin_lock(&dev->ingress_lock); | ||
48 | } | ||
49 | EXPORT_SYMBOL(qdisc_lock_tree); | ||
50 | |||
51 | void qdisc_unlock_tree(struct net_device *dev) | ||
52 | __releases(dev->ingress_lock) | ||
53 | __releases(dev->queue_lock) | ||
54 | { | ||
55 | spin_unlock(&dev->ingress_lock); | ||
56 | spin_unlock_bh(&dev->queue_lock); | ||
57 | } | ||
58 | EXPORT_SYMBOL(qdisc_unlock_tree); | ||
59 | |||
60 | static inline int qdisc_qlen(struct Qdisc *q) | 40 | static inline int qdisc_qlen(struct Qdisc *q) |
61 | { | 41 | { |
62 | return q->q.qlen; | 42 | return q->q.qlen; |
63 | } | 43 | } |
64 | 44 | ||
65 | static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, | 45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
66 | struct Qdisc *q) | ||
67 | { | 46 | { |
68 | if (unlikely(skb->next)) | 47 | if (unlikely(skb->next)) |
69 | dev->gso_skb = skb; | 48 | q->gso_skb = skb; |
70 | else | 49 | else |
71 | q->ops->requeue(skb, q); | 50 | q->ops->requeue(skb, q); |
72 | 51 | ||
73 | netif_schedule(dev); | 52 | __netif_schedule(q); |
74 | return 0; | 53 | return 0; |
75 | } | 54 | } |
76 | 55 | ||
77 | static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, | 56 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) |
78 | struct Qdisc *q) | ||
79 | { | 57 | { |
80 | struct sk_buff *skb; | 58 | struct sk_buff *skb; |
81 | 59 | ||
82 | if ((skb = dev->gso_skb)) | 60 | if ((skb = q->gso_skb)) |
83 | dev->gso_skb = NULL; | 61 | q->gso_skb = NULL; |
84 | else | 62 | else |
85 | skb = q->dequeue(q); | 63 | skb = q->dequeue(q); |
86 | 64 | ||
@@ -88,12 +66,12 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, | |||
88 | } | 66 | } |
89 | 67 | ||
90 | static inline int handle_dev_cpu_collision(struct sk_buff *skb, | 68 | static inline int handle_dev_cpu_collision(struct sk_buff *skb, |
91 | struct net_device *dev, | 69 | struct netdev_queue *dev_queue, |
92 | struct Qdisc *q) | 70 | struct Qdisc *q) |
93 | { | 71 | { |
94 | int ret; | 72 | int ret; |
95 | 73 | ||
96 | if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { | 74 | if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { |
97 | /* | 75 | /* |
98 | * Same CPU holding the lock. It may be a transient | 76 | * Same CPU holding the lock. It may be a transient |
99 | * configuration error, when hard_start_xmit() recurses. We | 77 | * configuration error, when hard_start_xmit() recurses. We |
@@ -103,7 +81,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
103 | kfree_skb(skb); | 81 | kfree_skb(skb); |
104 | if (net_ratelimit()) | 82 | if (net_ratelimit()) |
105 | printk(KERN_WARNING "Dead loop on netdevice %s, " | 83 | printk(KERN_WARNING "Dead loop on netdevice %s, " |
106 | "fix it urgently!\n", dev->name); | 84 | "fix it urgently!\n", dev_queue->dev->name); |
107 | ret = qdisc_qlen(q); | 85 | ret = qdisc_qlen(q); |
108 | } else { | 86 | } else { |
109 | /* | 87 | /* |
@@ -111,22 +89,22 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
111 | * some time. | 89 | * some time. |
112 | */ | 90 | */ |
113 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 91 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
114 | ret = dev_requeue_skb(skb, dev, q); | 92 | ret = dev_requeue_skb(skb, q); |
115 | } | 93 | } |
116 | 94 | ||
117 | return ret; | 95 | return ret; |
118 | } | 96 | } |
119 | 97 | ||
120 | /* | 98 | /* |
121 | * NOTE: Called under dev->queue_lock with locally disabled BH. | 99 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. |
122 | * | 100 | * |
123 | * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this | 101 | * __QDISC_STATE_RUNNING guarantees only one CPU can process |
124 | * device at a time. dev->queue_lock serializes queue accesses for | 102 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for |
125 | * this device AND dev->qdisc pointer itself. | 103 | * this queue. |
126 | * | 104 | * |
127 | * netif_tx_lock serializes accesses to device driver. | 105 | * netif_tx_lock serializes accesses to device driver. |
128 | * | 106 | * |
129 | * dev->queue_lock and netif_tx_lock are mutually exclusive, | 107 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, |
130 | * if one is grabbed, another must be free. | 108 | * if one is grabbed, another must be free. |
131 | * | 109 | * |
132 | * Note, that this procedure can be called by a watchdog timer | 110 | * Note, that this procedure can be called by a watchdog timer |
@@ -136,27 +114,32 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
136 | * >0 - queue is not empty. | 114 | * >0 - queue is not empty. |
137 | * | 115 | * |
138 | */ | 116 | */ |
139 | static inline int qdisc_restart(struct net_device *dev) | 117 | static inline int qdisc_restart(struct Qdisc *q) |
140 | { | 118 | { |
141 | struct Qdisc *q = dev->qdisc; | 119 | struct netdev_queue *txq; |
142 | struct sk_buff *skb; | ||
143 | int ret = NETDEV_TX_BUSY; | 120 | int ret = NETDEV_TX_BUSY; |
121 | struct net_device *dev; | ||
122 | spinlock_t *root_lock; | ||
123 | struct sk_buff *skb; | ||
144 | 124 | ||
145 | /* Dequeue packet */ | 125 | /* Dequeue packet */ |
146 | if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) | 126 | if (unlikely((skb = dequeue_skb(q)) == NULL)) |
147 | return 0; | 127 | return 0; |
148 | 128 | ||
129 | root_lock = qdisc_root_lock(q); | ||
149 | 130 | ||
150 | /* And release queue */ | 131 | /* And release qdisc */ |
151 | spin_unlock(&dev->queue_lock); | 132 | spin_unlock(root_lock); |
152 | 133 | ||
153 | HARD_TX_LOCK(dev, smp_processor_id()); | 134 | dev = qdisc_dev(q); |
135 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
136 | |||
137 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | ||
154 | if (!netif_subqueue_stopped(dev, skb)) | 138 | if (!netif_subqueue_stopped(dev, skb)) |
155 | ret = dev_hard_start_xmit(skb, dev); | 139 | ret = dev_hard_start_xmit(skb, dev, txq); |
156 | HARD_TX_UNLOCK(dev); | 140 | HARD_TX_UNLOCK(dev, txq); |
157 | 141 | ||
158 | spin_lock(&dev->queue_lock); | 142 | spin_lock(root_lock); |
159 | q = dev->qdisc; | ||
160 | 143 | ||
161 | switch (ret) { | 144 | switch (ret) { |
162 | case NETDEV_TX_OK: | 145 | case NETDEV_TX_OK: |
@@ -166,7 +149,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
166 | 149 | ||
167 | case NETDEV_TX_LOCKED: | 150 | case NETDEV_TX_LOCKED: |
168 | /* Driver try lock failed */ | 151 | /* Driver try lock failed */ |
169 | ret = handle_dev_cpu_collision(skb, dev, q); | 152 | ret = handle_dev_cpu_collision(skb, txq, q); |
170 | break; | 153 | break; |
171 | 154 | ||
172 | default: | 155 | default: |
@@ -175,33 +158,33 @@ static inline int qdisc_restart(struct net_device *dev) | |||
175 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 158 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
176 | dev->name, ret, q->q.qlen); | 159 | dev->name, ret, q->q.qlen); |
177 | 160 | ||
178 | ret = dev_requeue_skb(skb, dev, q); | 161 | ret = dev_requeue_skb(skb, q); |
179 | break; | 162 | break; |
180 | } | 163 | } |
181 | 164 | ||
165 | if (ret && netif_tx_queue_stopped(txq)) | ||
166 | ret = 0; | ||
167 | |||
182 | return ret; | 168 | return ret; |
183 | } | 169 | } |
184 | 170 | ||
185 | void __qdisc_run(struct net_device *dev) | 171 | void __qdisc_run(struct Qdisc *q) |
186 | { | 172 | { |
187 | unsigned long start_time = jiffies; | 173 | unsigned long start_time = jiffies; |
188 | 174 | ||
189 | while (qdisc_restart(dev)) { | 175 | while (qdisc_restart(q)) { |
190 | if (netif_queue_stopped(dev)) | ||
191 | break; | ||
192 | |||
193 | /* | 176 | /* |
194 | * Postpone processing if | 177 | * Postpone processing if |
195 | * 1. another process needs the CPU; | 178 | * 1. another process needs the CPU; |
196 | * 2. we've been doing it for too long. | 179 | * 2. we've been doing it for too long. |
197 | */ | 180 | */ |
198 | if (need_resched() || jiffies != start_time) { | 181 | if (need_resched() || jiffies != start_time) { |
199 | netif_schedule(dev); | 182 | __netif_schedule(q); |
200 | break; | 183 | break; |
201 | } | 184 | } |
202 | } | 185 | } |
203 | 186 | ||
204 | clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | 187 | clear_bit(__QDISC_STATE_RUNNING, &q->state); |
205 | } | 188 | } |
206 | 189 | ||
207 | static void dev_watchdog(unsigned long arg) | 190 | static void dev_watchdog(unsigned long arg) |
@@ -209,19 +192,35 @@ static void dev_watchdog(unsigned long arg) | |||
209 | struct net_device *dev = (struct net_device *)arg; | 192 | struct net_device *dev = (struct net_device *)arg; |
210 | 193 | ||
211 | netif_tx_lock(dev); | 194 | netif_tx_lock(dev); |
212 | if (dev->qdisc != &noop_qdisc) { | 195 | if (!qdisc_tx_is_noop(dev)) { |
213 | if (netif_device_present(dev) && | 196 | if (netif_device_present(dev) && |
214 | netif_running(dev) && | 197 | netif_running(dev) && |
215 | netif_carrier_ok(dev)) { | 198 | netif_carrier_ok(dev)) { |
216 | if (netif_queue_stopped(dev) && | 199 | int some_queue_stopped = 0; |
217 | time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { | 200 | unsigned int i; |
201 | |||
202 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
203 | struct netdev_queue *txq; | ||
204 | |||
205 | txq = netdev_get_tx_queue(dev, i); | ||
206 | if (netif_tx_queue_stopped(txq)) { | ||
207 | some_queue_stopped = 1; | ||
208 | break; | ||
209 | } | ||
210 | } | ||
218 | 211 | ||
219 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", | 212 | if (some_queue_stopped && |
213 | time_after(jiffies, (dev->trans_start + | ||
214 | dev->watchdog_timeo))) { | ||
215 | printk(KERN_INFO "NETDEV WATCHDOG: %s: " | ||
216 | "transmit timed out\n", | ||
220 | dev->name); | 217 | dev->name); |
221 | dev->tx_timeout(dev); | 218 | dev->tx_timeout(dev); |
222 | WARN_ON_ONCE(1); | 219 | WARN_ON_ONCE(1); |
223 | } | 220 | } |
224 | if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) | 221 | if (!mod_timer(&dev->watchdog_timer, |
222 | round_jiffies(jiffies + | ||
223 | dev->watchdog_timeo))) | ||
225 | dev_hold(dev); | 224 | dev_hold(dev); |
226 | } | 225 | } |
227 | } | 226 | } |
@@ -317,12 +316,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = { | |||
317 | .owner = THIS_MODULE, | 316 | .owner = THIS_MODULE, |
318 | }; | 317 | }; |
319 | 318 | ||
319 | static struct netdev_queue noop_netdev_queue = { | ||
320 | .qdisc = &noop_qdisc, | ||
321 | }; | ||
322 | |||
320 | struct Qdisc noop_qdisc = { | 323 | struct Qdisc noop_qdisc = { |
321 | .enqueue = noop_enqueue, | 324 | .enqueue = noop_enqueue, |
322 | .dequeue = noop_dequeue, | 325 | .dequeue = noop_dequeue, |
323 | .flags = TCQ_F_BUILTIN, | 326 | .flags = TCQ_F_BUILTIN, |
324 | .ops = &noop_qdisc_ops, | 327 | .ops = &noop_qdisc_ops, |
325 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 328 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
329 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | ||
330 | .dev_queue = &noop_netdev_queue, | ||
326 | }; | 331 | }; |
327 | EXPORT_SYMBOL(noop_qdisc); | 332 | EXPORT_SYMBOL(noop_qdisc); |
328 | 333 | ||
@@ -335,112 +340,65 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { | |||
335 | .owner = THIS_MODULE, | 340 | .owner = THIS_MODULE, |
336 | }; | 341 | }; |
337 | 342 | ||
343 | static struct Qdisc noqueue_qdisc; | ||
344 | static struct netdev_queue noqueue_netdev_queue = { | ||
345 | .qdisc = &noqueue_qdisc, | ||
346 | }; | ||
347 | |||
338 | static struct Qdisc noqueue_qdisc = { | 348 | static struct Qdisc noqueue_qdisc = { |
339 | .enqueue = NULL, | 349 | .enqueue = NULL, |
340 | .dequeue = noop_dequeue, | 350 | .dequeue = noop_dequeue, |
341 | .flags = TCQ_F_BUILTIN, | 351 | .flags = TCQ_F_BUILTIN, |
342 | .ops = &noqueue_qdisc_ops, | 352 | .ops = &noqueue_qdisc_ops, |
343 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 353 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
354 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | ||
355 | .dev_queue = &noqueue_netdev_queue, | ||
344 | }; | 356 | }; |
345 | 357 | ||
346 | 358 | ||
347 | static const u8 prio2band[TC_PRIO_MAX+1] = | 359 | static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
348 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | ||
349 | |||
350 | /* 3-band FIFO queue: old style, but should be a bit faster than | ||
351 | generic prio+fifo combination. | ||
352 | */ | ||
353 | |||
354 | #define PFIFO_FAST_BANDS 3 | ||
355 | |||
356 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | ||
357 | struct Qdisc *qdisc) | ||
358 | { | ||
359 | struct sk_buff_head *list = qdisc_priv(qdisc); | ||
360 | return list + prio2band[skb->priority & TC_PRIO_MAX]; | ||
361 | } | ||
362 | |||
363 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | ||
364 | { | 360 | { |
365 | struct sk_buff_head *list = prio2list(skb, qdisc); | 361 | struct sk_buff_head *list = &qdisc->q; |
366 | 362 | ||
367 | if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { | 363 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) |
368 | qdisc->q.qlen++; | ||
369 | return __qdisc_enqueue_tail(skb, qdisc, list); | 364 | return __qdisc_enqueue_tail(skb, qdisc, list); |
370 | } | ||
371 | 365 | ||
372 | return qdisc_drop(skb, qdisc); | 366 | return qdisc_drop(skb, qdisc); |
373 | } | 367 | } |
374 | 368 | ||
375 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 369 | static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc) |
376 | { | 370 | { |
377 | int prio; | 371 | struct sk_buff_head *list = &qdisc->q; |
378 | struct sk_buff_head *list = qdisc_priv(qdisc); | ||
379 | 372 | ||
380 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 373 | if (!skb_queue_empty(list)) |
381 | if (!skb_queue_empty(list + prio)) { | 374 | return __qdisc_dequeue_head(qdisc, list); |
382 | qdisc->q.qlen--; | ||
383 | return __qdisc_dequeue_head(qdisc, list + prio); | ||
384 | } | ||
385 | } | ||
386 | 375 | ||
387 | return NULL; | 376 | return NULL; |
388 | } | 377 | } |
389 | 378 | ||
390 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | 379 | static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) |
391 | { | 380 | { |
392 | qdisc->q.qlen++; | 381 | return __qdisc_requeue(skb, qdisc, &qdisc->q); |
393 | return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); | ||
394 | } | 382 | } |
395 | 383 | ||
396 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 384 | static void fifo_fast_reset(struct Qdisc* qdisc) |
397 | { | 385 | { |
398 | int prio; | 386 | __qdisc_reset_queue(qdisc, &qdisc->q); |
399 | struct sk_buff_head *list = qdisc_priv(qdisc); | ||
400 | |||
401 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | ||
402 | __qdisc_reset_queue(qdisc, list + prio); | ||
403 | |||
404 | qdisc->qstats.backlog = 0; | 387 | qdisc->qstats.backlog = 0; |
405 | qdisc->q.qlen = 0; | ||
406 | } | ||
407 | |||
408 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | ||
409 | { | ||
410 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | ||
411 | |||
412 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | ||
413 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | ||
414 | return skb->len; | ||
415 | |||
416 | nla_put_failure: | ||
417 | return -1; | ||
418 | } | 388 | } |
419 | 389 | ||
420 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) | 390 | static struct Qdisc_ops fifo_fast_ops __read_mostly = { |
421 | { | 391 | .id = "fifo_fast", |
422 | int prio; | 392 | .priv_size = 0, |
423 | struct sk_buff_head *list = qdisc_priv(qdisc); | 393 | .enqueue = fifo_fast_enqueue, |
424 | 394 | .dequeue = fifo_fast_dequeue, | |
425 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 395 | .requeue = fifo_fast_requeue, |
426 | skb_queue_head_init(list + prio); | 396 | .reset = fifo_fast_reset, |
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | ||
432 | .id = "pfifo_fast", | ||
433 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | ||
434 | .enqueue = pfifo_fast_enqueue, | ||
435 | .dequeue = pfifo_fast_dequeue, | ||
436 | .requeue = pfifo_fast_requeue, | ||
437 | .init = pfifo_fast_init, | ||
438 | .reset = pfifo_fast_reset, | ||
439 | .dump = pfifo_fast_dump, | ||
440 | .owner = THIS_MODULE, | 397 | .owner = THIS_MODULE, |
441 | }; | 398 | }; |
442 | 399 | ||
443 | struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | 400 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
401 | struct Qdisc_ops *ops) | ||
444 | { | 402 | { |
445 | void *p; | 403 | void *p; |
446 | struct Qdisc *sch; | 404 | struct Qdisc *sch; |
@@ -462,8 +420,8 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | |||
462 | sch->ops = ops; | 420 | sch->ops = ops; |
463 | sch->enqueue = ops->enqueue; | 421 | sch->enqueue = ops->enqueue; |
464 | sch->dequeue = ops->dequeue; | 422 | sch->dequeue = ops->dequeue; |
465 | sch->dev = dev; | 423 | sch->dev_queue = dev_queue; |
466 | dev_hold(dev); | 424 | dev_hold(qdisc_dev(sch)); |
467 | atomic_set(&sch->refcnt, 1); | 425 | atomic_set(&sch->refcnt, 1); |
468 | 426 | ||
469 | return sch; | 427 | return sch; |
@@ -471,15 +429,16 @@ errout: | |||
471 | return ERR_PTR(err); | 429 | return ERR_PTR(err); |
472 | } | 430 | } |
473 | 431 | ||
474 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, | 432 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, |
433 | struct netdev_queue *dev_queue, | ||
434 | struct Qdisc_ops *ops, | ||
475 | unsigned int parentid) | 435 | unsigned int parentid) |
476 | { | 436 | { |
477 | struct Qdisc *sch; | 437 | struct Qdisc *sch; |
478 | 438 | ||
479 | sch = qdisc_alloc(dev, ops); | 439 | sch = qdisc_alloc(dev_queue, ops); |
480 | if (IS_ERR(sch)) | 440 | if (IS_ERR(sch)) |
481 | goto errout; | 441 | goto errout; |
482 | sch->stats_lock = &dev->queue_lock; | ||
483 | sch->parent = parentid; | 442 | sch->parent = parentid; |
484 | 443 | ||
485 | if (!ops->init || ops->init(sch, NULL) == 0) | 444 | if (!ops->init || ops->init(sch, NULL) == 0) |
@@ -491,7 +450,7 @@ errout: | |||
491 | } | 450 | } |
492 | EXPORT_SYMBOL(qdisc_create_dflt); | 451 | EXPORT_SYMBOL(qdisc_create_dflt); |
493 | 452 | ||
494 | /* Under dev->queue_lock and BH! */ | 453 | /* Under qdisc_root_lock(qdisc) and BH! */ |
495 | 454 | ||
496 | void qdisc_reset(struct Qdisc *qdisc) | 455 | void qdisc_reset(struct Qdisc *qdisc) |
497 | { | 456 | { |
@@ -508,86 +467,162 @@ EXPORT_SYMBOL(qdisc_reset); | |||
508 | static void __qdisc_destroy(struct rcu_head *head) | 467 | static void __qdisc_destroy(struct rcu_head *head) |
509 | { | 468 | { |
510 | struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); | 469 | struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); |
470 | const struct Qdisc_ops *ops = qdisc->ops; | ||
471 | |||
472 | qdisc_put_stab(qdisc->stab); | ||
473 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | ||
474 | if (ops->reset) | ||
475 | ops->reset(qdisc); | ||
476 | if (ops->destroy) | ||
477 | ops->destroy(qdisc); | ||
478 | |||
479 | module_put(ops->owner); | ||
480 | dev_put(qdisc_dev(qdisc)); | ||
481 | |||
482 | kfree_skb(qdisc->gso_skb); | ||
483 | |||
511 | kfree((char *) qdisc - qdisc->padded); | 484 | kfree((char *) qdisc - qdisc->padded); |
512 | } | 485 | } |
513 | 486 | ||
514 | /* Under dev->queue_lock and BH! */ | 487 | /* Under qdisc_root_lock(qdisc) and BH! */ |
515 | 488 | ||
516 | void qdisc_destroy(struct Qdisc *qdisc) | 489 | void qdisc_destroy(struct Qdisc *qdisc) |
517 | { | 490 | { |
518 | const struct Qdisc_ops *ops = qdisc->ops; | ||
519 | |||
520 | if (qdisc->flags & TCQ_F_BUILTIN || | 491 | if (qdisc->flags & TCQ_F_BUILTIN || |
521 | !atomic_dec_and_test(&qdisc->refcnt)) | 492 | !atomic_dec_and_test(&qdisc->refcnt)) |
522 | return; | 493 | return; |
523 | 494 | ||
524 | list_del(&qdisc->list); | 495 | if (qdisc->parent) |
525 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | 496 | list_del(&qdisc->list); |
526 | if (ops->reset) | ||
527 | ops->reset(qdisc); | ||
528 | if (ops->destroy) | ||
529 | ops->destroy(qdisc); | ||
530 | 497 | ||
531 | module_put(ops->owner); | ||
532 | dev_put(qdisc->dev); | ||
533 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); | 498 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); |
534 | } | 499 | } |
535 | EXPORT_SYMBOL(qdisc_destroy); | 500 | EXPORT_SYMBOL(qdisc_destroy); |
536 | 501 | ||
502 | static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) | ||
503 | { | ||
504 | unsigned int i; | ||
505 | |||
506 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
507 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
508 | |||
509 | if (txq->qdisc_sleeping != &noop_qdisc) | ||
510 | return false; | ||
511 | } | ||
512 | return true; | ||
513 | } | ||
514 | |||
515 | static void attach_one_default_qdisc(struct net_device *dev, | ||
516 | struct netdev_queue *dev_queue, | ||
517 | void *_unused) | ||
518 | { | ||
519 | struct Qdisc *qdisc; | ||
520 | |||
521 | if (dev->tx_queue_len) { | ||
522 | qdisc = qdisc_create_dflt(dev, dev_queue, | ||
523 | &fifo_fast_ops, TC_H_ROOT); | ||
524 | if (!qdisc) { | ||
525 | printk(KERN_INFO "%s: activation failed\n", dev->name); | ||
526 | return; | ||
527 | } | ||
528 | } else { | ||
529 | qdisc = &noqueue_qdisc; | ||
530 | } | ||
531 | dev_queue->qdisc_sleeping = qdisc; | ||
532 | } | ||
533 | |||
534 | static void transition_one_qdisc(struct net_device *dev, | ||
535 | struct netdev_queue *dev_queue, | ||
536 | void *_need_watchdog) | ||
537 | { | ||
538 | struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; | ||
539 | int *need_watchdog_p = _need_watchdog; | ||
540 | |||
541 | rcu_assign_pointer(dev_queue->qdisc, new_qdisc); | ||
542 | if (new_qdisc != &noqueue_qdisc) | ||
543 | *need_watchdog_p = 1; | ||
544 | } | ||
545 | |||
537 | void dev_activate(struct net_device *dev) | 546 | void dev_activate(struct net_device *dev) |
538 | { | 547 | { |
548 | int need_watchdog; | ||
549 | |||
539 | /* No queueing discipline is attached to device; | 550 | /* No queueing discipline is attached to device; |
540 | create default one i.e. pfifo_fast for devices, | 551 | * create default one i.e. fifo_fast for devices, |
541 | which need queueing and noqueue_qdisc for | 552 | * which need queueing and noqueue_qdisc for |
542 | virtual interfaces | 553 | * virtual interfaces. |
543 | */ | 554 | */ |
544 | 555 | ||
545 | if (dev->qdisc_sleeping == &noop_qdisc) { | 556 | if (dev_all_qdisc_sleeping_noop(dev)) |
546 | struct Qdisc *qdisc; | 557 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
547 | if (dev->tx_queue_len) { | ||
548 | qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, | ||
549 | TC_H_ROOT); | ||
550 | if (qdisc == NULL) { | ||
551 | printk(KERN_INFO "%s: activation failed\n", dev->name); | ||
552 | return; | ||
553 | } | ||
554 | list_add_tail(&qdisc->list, &dev->qdisc_list); | ||
555 | } else { | ||
556 | qdisc = &noqueue_qdisc; | ||
557 | } | ||
558 | dev->qdisc_sleeping = qdisc; | ||
559 | } | ||
560 | 558 | ||
561 | if (!netif_carrier_ok(dev)) | 559 | if (!netif_carrier_ok(dev)) |
562 | /* Delay activation until next carrier-on event */ | 560 | /* Delay activation until next carrier-on event */ |
563 | return; | 561 | return; |
564 | 562 | ||
565 | spin_lock_bh(&dev->queue_lock); | 563 | need_watchdog = 0; |
566 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 564 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
567 | if (dev->qdisc != &noqueue_qdisc) { | 565 | |
566 | if (need_watchdog) { | ||
568 | dev->trans_start = jiffies; | 567 | dev->trans_start = jiffies; |
569 | dev_watchdog_up(dev); | 568 | dev_watchdog_up(dev); |
570 | } | 569 | } |
571 | spin_unlock_bh(&dev->queue_lock); | ||
572 | } | 570 | } |
573 | 571 | ||
574 | void dev_deactivate(struct net_device *dev) | 572 | static void dev_deactivate_queue(struct net_device *dev, |
573 | struct netdev_queue *dev_queue, | ||
574 | void *_qdisc_default) | ||
575 | { | 575 | { |
576 | struct Qdisc *qdisc_default = _qdisc_default; | ||
577 | struct sk_buff *skb = NULL; | ||
576 | struct Qdisc *qdisc; | 578 | struct Qdisc *qdisc; |
577 | struct sk_buff *skb; | ||
578 | int running; | ||
579 | 579 | ||
580 | spin_lock_bh(&dev->queue_lock); | 580 | qdisc = dev_queue->qdisc; |
581 | qdisc = dev->qdisc; | 581 | if (qdisc) { |
582 | dev->qdisc = &noop_qdisc; | 582 | spin_lock_bh(qdisc_lock(qdisc)); |
583 | 583 | ||
584 | qdisc_reset(qdisc); | 584 | dev_queue->qdisc = qdisc_default; |
585 | qdisc_reset(qdisc); | ||
585 | 586 | ||
586 | skb = dev->gso_skb; | 587 | spin_unlock_bh(qdisc_lock(qdisc)); |
587 | dev->gso_skb = NULL; | 588 | } |
588 | spin_unlock_bh(&dev->queue_lock); | ||
589 | 589 | ||
590 | kfree_skb(skb); | 590 | kfree_skb(skb); |
591 | } | ||
592 | |||
593 | static bool some_qdisc_is_running(struct net_device *dev, int lock) | ||
594 | { | ||
595 | unsigned int i; | ||
596 | |||
597 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
598 | struct netdev_queue *dev_queue; | ||
599 | spinlock_t *root_lock; | ||
600 | struct Qdisc *q; | ||
601 | int val; | ||
602 | |||
603 | dev_queue = netdev_get_tx_queue(dev, i); | ||
604 | q = dev_queue->qdisc; | ||
605 | root_lock = qdisc_root_lock(q); | ||
606 | |||
607 | if (lock) | ||
608 | spin_lock_bh(root_lock); | ||
609 | |||
610 | val = test_bit(__QDISC_STATE_RUNNING, &q->state); | ||
611 | |||
612 | if (lock) | ||
613 | spin_unlock_bh(root_lock); | ||
614 | |||
615 | if (val) | ||
616 | return true; | ||
617 | } | ||
618 | return false; | ||
619 | } | ||
620 | |||
621 | void dev_deactivate(struct net_device *dev) | ||
622 | { | ||
623 | bool running; | ||
624 | |||
625 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | ||
591 | 626 | ||
592 | dev_watchdog_down(dev); | 627 | dev_watchdog_down(dev); |
593 | 628 | ||
@@ -596,16 +631,14 @@ void dev_deactivate(struct net_device *dev) | |||
596 | 631 | ||
597 | /* Wait for outstanding qdisc_run calls. */ | 632 | /* Wait for outstanding qdisc_run calls. */ |
598 | do { | 633 | do { |
599 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) | 634 | while (some_qdisc_is_running(dev, 0)) |
600 | yield(); | 635 | yield(); |
601 | 636 | ||
602 | /* | 637 | /* |
603 | * Double-check inside queue lock to ensure that all effects | 638 | * Double-check inside queue lock to ensure that all effects |
604 | * of the queue run are visible when we return. | 639 | * of the queue run are visible when we return. |
605 | */ | 640 | */ |
606 | spin_lock_bh(&dev->queue_lock); | 641 | running = some_qdisc_is_running(dev, 1); |
607 | running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | ||
608 | spin_unlock_bh(&dev->queue_lock); | ||
609 | 642 | ||
610 | /* | 643 | /* |
611 | * The running flag should never be set at this point because | 644 | * The running flag should never be set at this point because |
@@ -618,32 +651,46 @@ void dev_deactivate(struct net_device *dev) | |||
618 | } while (WARN_ON_ONCE(running)); | 651 | } while (WARN_ON_ONCE(running)); |
619 | } | 652 | } |
620 | 653 | ||
654 | static void dev_init_scheduler_queue(struct net_device *dev, | ||
655 | struct netdev_queue *dev_queue, | ||
656 | void *_qdisc) | ||
657 | { | ||
658 | struct Qdisc *qdisc = _qdisc; | ||
659 | |||
660 | dev_queue->qdisc = qdisc; | ||
661 | dev_queue->qdisc_sleeping = qdisc; | ||
662 | } | ||
663 | |||
621 | void dev_init_scheduler(struct net_device *dev) | 664 | void dev_init_scheduler(struct net_device *dev) |
622 | { | 665 | { |
623 | qdisc_lock_tree(dev); | 666 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
624 | dev->qdisc = &noop_qdisc; | 667 | dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); |
625 | dev->qdisc_sleeping = &noop_qdisc; | ||
626 | INIT_LIST_HEAD(&dev->qdisc_list); | ||
627 | qdisc_unlock_tree(dev); | ||
628 | 668 | ||
629 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 669 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
630 | } | 670 | } |
631 | 671 | ||
632 | void dev_shutdown(struct net_device *dev) | 672 | static void shutdown_scheduler_queue(struct net_device *dev, |
673 | struct netdev_queue *dev_queue, | ||
674 | void *_qdisc_default) | ||
633 | { | 675 | { |
634 | struct Qdisc *qdisc; | 676 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
677 | struct Qdisc *qdisc_default = _qdisc_default; | ||
678 | |||
679 | if (qdisc) { | ||
680 | spinlock_t *root_lock = qdisc_root_lock(qdisc); | ||
635 | 681 | ||
636 | qdisc_lock_tree(dev); | 682 | dev_queue->qdisc = qdisc_default; |
637 | qdisc = dev->qdisc_sleeping; | 683 | dev_queue->qdisc_sleeping = qdisc_default; |
638 | dev->qdisc = &noop_qdisc; | 684 | |
639 | dev->qdisc_sleeping = &noop_qdisc; | 685 | spin_lock(root_lock); |
640 | qdisc_destroy(qdisc); | ||
641 | #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) | ||
642 | if ((qdisc = dev->qdisc_ingress) != NULL) { | ||
643 | dev->qdisc_ingress = NULL; | ||
644 | qdisc_destroy(qdisc); | 686 | qdisc_destroy(qdisc); |
687 | spin_unlock(root_lock); | ||
645 | } | 688 | } |
646 | #endif | 689 | } |
690 | |||
691 | void dev_shutdown(struct net_device *dev) | ||
692 | { | ||
693 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | ||
694 | shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); | ||
647 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); | 695 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); |
648 | qdisc_unlock_tree(dev); | ||
649 | } | 696 | } |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index c89fba56db56..c1ad6b8de105 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -164,7 +164,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
164 | * if no default DP has been configured. This | 164 | * if no default DP has been configured. This |
165 | * allows for DP flows to be left untouched. | 165 | * allows for DP flows to be left untouched. |
166 | */ | 166 | */ |
167 | if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len) | 167 | if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len) |
168 | return qdisc_enqueue_tail(skb, sch); | 168 | return qdisc_enqueue_tail(skb, sch); |
169 | else | 169 | else |
170 | goto drop; | 170 | goto drop; |
@@ -188,7 +188,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
188 | } | 188 | } |
189 | 189 | ||
190 | q->packetsin++; | 190 | q->packetsin++; |
191 | q->bytesin += skb->len; | 191 | q->bytesin += qdisc_pkt_len(skb); |
192 | 192 | ||
193 | if (gred_wred_mode(t)) | 193 | if (gred_wred_mode(t)) |
194 | gred_load_wred_set(t, q); | 194 | gred_load_wred_set(t, q); |
@@ -226,8 +226,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
226 | break; | 226 | break; |
227 | } | 227 | } |
228 | 228 | ||
229 | if (q->backlog + skb->len <= q->limit) { | 229 | if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { |
230 | q->backlog += skb->len; | 230 | q->backlog += qdisc_pkt_len(skb); |
231 | return qdisc_enqueue_tail(skb, sch); | 231 | return qdisc_enqueue_tail(skb, sch); |
232 | } | 232 | } |
233 | 233 | ||
@@ -254,7 +254,7 @@ static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
254 | } else { | 254 | } else { |
255 | if (red_is_idling(&q->parms)) | 255 | if (red_is_idling(&q->parms)) |
256 | red_end_of_idle_period(&q->parms); | 256 | red_end_of_idle_period(&q->parms); |
257 | q->backlog += skb->len; | 257 | q->backlog += qdisc_pkt_len(skb); |
258 | } | 258 | } |
259 | 259 | ||
260 | return qdisc_requeue(skb, sch); | 260 | return qdisc_requeue(skb, sch); |
@@ -277,7 +277,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch) | |||
277 | "VQ 0x%x after dequeue, screwing up " | 277 | "VQ 0x%x after dequeue, screwing up " |
278 | "backlog.\n", tc_index_to_dp(skb)); | 278 | "backlog.\n", tc_index_to_dp(skb)); |
279 | } else { | 279 | } else { |
280 | q->backlog -= skb->len; | 280 | q->backlog -= qdisc_pkt_len(skb); |
281 | 281 | ||
282 | if (!q->backlog && !gred_wred_mode(t)) | 282 | if (!q->backlog && !gred_wred_mode(t)) |
283 | red_start_of_idle_period(&q->parms); | 283 | red_start_of_idle_period(&q->parms); |
@@ -299,7 +299,7 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
299 | 299 | ||
300 | skb = qdisc_dequeue_tail(sch); | 300 | skb = qdisc_dequeue_tail(sch); |
301 | if (skb) { | 301 | if (skb) { |
302 | unsigned int len = skb->len; | 302 | unsigned int len = qdisc_pkt_len(skb); |
303 | struct gred_sched_data *q; | 303 | struct gred_sched_data *q; |
304 | u16 dp = tc_index_to_dp(skb); | 304 | u16 dp = tc_index_to_dp(skb); |
305 | 305 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index e817aa00441d..0ae7d19dcba8 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -113,7 +113,7 @@ enum hfsc_class_flags | |||
113 | 113 | ||
114 | struct hfsc_class | 114 | struct hfsc_class |
115 | { | 115 | { |
116 | u32 classid; /* class id */ | 116 | struct Qdisc_class_common cl_common; |
117 | unsigned int refcnt; /* usage count */ | 117 | unsigned int refcnt; /* usage count */ |
118 | 118 | ||
119 | struct gnet_stats_basic bstats; | 119 | struct gnet_stats_basic bstats; |
@@ -134,7 +134,6 @@ struct hfsc_class | |||
134 | struct rb_node vt_node; /* parent's vt_tree member */ | 134 | struct rb_node vt_node; /* parent's vt_tree member */ |
135 | struct rb_root cf_tree; /* active children sorted by cl_f */ | 135 | struct rb_root cf_tree; /* active children sorted by cl_f */ |
136 | struct rb_node cf_node; /* parent's cf_heap member */ | 136 | struct rb_node cf_node; /* parent's cf_heap member */ |
137 | struct list_head hlist; /* hash list member */ | ||
138 | struct list_head dlist; /* drop list member */ | 137 | struct list_head dlist; /* drop list member */ |
139 | 138 | ||
140 | u64 cl_total; /* total work in bytes */ | 139 | u64 cl_total; /* total work in bytes */ |
@@ -177,13 +176,11 @@ struct hfsc_class | |||
177 | unsigned long cl_nactive; /* number of active children */ | 176 | unsigned long cl_nactive; /* number of active children */ |
178 | }; | 177 | }; |
179 | 178 | ||
180 | #define HFSC_HSIZE 16 | ||
181 | |||
182 | struct hfsc_sched | 179 | struct hfsc_sched |
183 | { | 180 | { |
184 | u16 defcls; /* default class id */ | 181 | u16 defcls; /* default class id */ |
185 | struct hfsc_class root; /* root class */ | 182 | struct hfsc_class root; /* root class */ |
186 | struct list_head clhash[HFSC_HSIZE]; /* class hash */ | 183 | struct Qdisc_class_hash clhash; /* class hash */ |
187 | struct rb_root eligible; /* eligible tree */ | 184 | struct rb_root eligible; /* eligible tree */ |
188 | struct list_head droplist; /* active leaf class list (for | 185 | struct list_head droplist; /* active leaf class list (for |
189 | dropping) */ | 186 | dropping) */ |
@@ -898,7 +895,7 @@ qdisc_peek_len(struct Qdisc *sch) | |||
898 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); | 895 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); |
899 | return 0; | 896 | return 0; |
900 | } | 897 | } |
901 | len = skb->len; | 898 | len = qdisc_pkt_len(skb); |
902 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { | 899 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { |
903 | if (net_ratelimit()) | 900 | if (net_ratelimit()) |
904 | printk("qdisc_peek_len: failed to requeue\n"); | 901 | printk("qdisc_peek_len: failed to requeue\n"); |
@@ -933,26 +930,16 @@ hfsc_adjust_levels(struct hfsc_class *cl) | |||
933 | } while ((cl = cl->cl_parent) != NULL); | 930 | } while ((cl = cl->cl_parent) != NULL); |
934 | } | 931 | } |
935 | 932 | ||
936 | static inline unsigned int | ||
937 | hfsc_hash(u32 h) | ||
938 | { | ||
939 | h ^= h >> 8; | ||
940 | h ^= h >> 4; | ||
941 | |||
942 | return h & (HFSC_HSIZE - 1); | ||
943 | } | ||
944 | |||
945 | static inline struct hfsc_class * | 933 | static inline struct hfsc_class * |
946 | hfsc_find_class(u32 classid, struct Qdisc *sch) | 934 | hfsc_find_class(u32 classid, struct Qdisc *sch) |
947 | { | 935 | { |
948 | struct hfsc_sched *q = qdisc_priv(sch); | 936 | struct hfsc_sched *q = qdisc_priv(sch); |
949 | struct hfsc_class *cl; | 937 | struct Qdisc_class_common *clc; |
950 | 938 | ||
951 | list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) { | 939 | clc = qdisc_class_find(&q->clhash, classid); |
952 | if (cl->classid == classid) | 940 | if (clc == NULL) |
953 | return cl; | 941 | return NULL; |
954 | } | 942 | return container_of(clc, struct hfsc_class, cl_common); |
955 | return NULL; | ||
956 | } | 943 | } |
957 | 944 | ||
958 | static void | 945 | static void |
@@ -1032,7 +1019,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1032 | 1019 | ||
1033 | if (cl != NULL) { | 1020 | if (cl != NULL) { |
1034 | if (parentid) { | 1021 | if (parentid) { |
1035 | if (cl->cl_parent && cl->cl_parent->classid != parentid) | 1022 | if (cl->cl_parent && |
1023 | cl->cl_parent->cl_common.classid != parentid) | ||
1036 | return -EINVAL; | 1024 | return -EINVAL; |
1037 | if (cl->cl_parent == NULL && parentid != TC_H_ROOT) | 1025 | if (cl->cl_parent == NULL && parentid != TC_H_ROOT) |
1038 | return -EINVAL; | 1026 | return -EINVAL; |
@@ -1057,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1057 | 1045 | ||
1058 | if (tca[TCA_RATE]) | 1046 | if (tca[TCA_RATE]) |
1059 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1047 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1060 | &sch->dev->queue_lock, | 1048 | qdisc_root_lock(sch), |
1061 | tca[TCA_RATE]); | 1049 | tca[TCA_RATE]); |
1062 | return 0; | 1050 | return 0; |
1063 | } | 1051 | } |
@@ -1091,11 +1079,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1091 | if (usc != NULL) | 1079 | if (usc != NULL) |
1092 | hfsc_change_usc(cl, usc, 0); | 1080 | hfsc_change_usc(cl, usc, 0); |
1093 | 1081 | ||
1082 | cl->cl_common.classid = classid; | ||
1094 | cl->refcnt = 1; | 1083 | cl->refcnt = 1; |
1095 | cl->classid = classid; | ||
1096 | cl->sched = q; | 1084 | cl->sched = q; |
1097 | cl->cl_parent = parent; | 1085 | cl->cl_parent = parent; |
1098 | cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); | 1086 | cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1087 | &pfifo_qdisc_ops, classid); | ||
1099 | if (cl->qdisc == NULL) | 1088 | if (cl->qdisc == NULL) |
1100 | cl->qdisc = &noop_qdisc; | 1089 | cl->qdisc = &noop_qdisc; |
1101 | INIT_LIST_HEAD(&cl->children); | 1090 | INIT_LIST_HEAD(&cl->children); |
@@ -1103,7 +1092,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1103 | cl->cf_tree = RB_ROOT; | 1092 | cl->cf_tree = RB_ROOT; |
1104 | 1093 | ||
1105 | sch_tree_lock(sch); | 1094 | sch_tree_lock(sch); |
1106 | list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]); | 1095 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); |
1107 | list_add_tail(&cl->siblings, &parent->children); | 1096 | list_add_tail(&cl->siblings, &parent->children); |
1108 | if (parent->level == 0) | 1097 | if (parent->level == 0) |
1109 | hfsc_purge_queue(sch, parent); | 1098 | hfsc_purge_queue(sch, parent); |
@@ -1111,9 +1100,11 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1111 | cl->cl_pcvtoff = parent->cl_cvtoff; | 1100 | cl->cl_pcvtoff = parent->cl_cvtoff; |
1112 | sch_tree_unlock(sch); | 1101 | sch_tree_unlock(sch); |
1113 | 1102 | ||
1103 | qdisc_class_hash_grow(sch, &q->clhash); | ||
1104 | |||
1114 | if (tca[TCA_RATE]) | 1105 | if (tca[TCA_RATE]) |
1115 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1106 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1116 | &sch->dev->queue_lock, tca[TCA_RATE]); | 1107 | qdisc_root_lock(sch), tca[TCA_RATE]); |
1117 | *arg = (unsigned long)cl; | 1108 | *arg = (unsigned long)cl; |
1118 | return 0; | 1109 | return 0; |
1119 | } | 1110 | } |
@@ -1145,7 +1136,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) | |||
1145 | hfsc_adjust_levels(cl->cl_parent); | 1136 | hfsc_adjust_levels(cl->cl_parent); |
1146 | 1137 | ||
1147 | hfsc_purge_queue(sch, cl); | 1138 | hfsc_purge_queue(sch, cl); |
1148 | list_del(&cl->hlist); | 1139 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); |
1149 | 1140 | ||
1150 | if (--cl->refcnt == 0) | 1141 | if (--cl->refcnt == 0) |
1151 | hfsc_destroy_class(sch, cl); | 1142 | hfsc_destroy_class(sch, cl); |
@@ -1211,8 +1202,9 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1211 | if (cl->level > 0) | 1202 | if (cl->level > 0) |
1212 | return -EINVAL; | 1203 | return -EINVAL; |
1213 | if (new == NULL) { | 1204 | if (new == NULL) { |
1214 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1205 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1215 | cl->classid); | 1206 | &pfifo_qdisc_ops, |
1207 | cl->cl_common.classid); | ||
1216 | if (new == NULL) | 1208 | if (new == NULL) |
1217 | new = &noop_qdisc; | 1209 | new = &noop_qdisc; |
1218 | } | 1210 | } |
@@ -1345,8 +1337,9 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, | |||
1345 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1337 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1346 | struct nlattr *nest; | 1338 | struct nlattr *nest; |
1347 | 1339 | ||
1348 | tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; | 1340 | tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : |
1349 | tcm->tcm_handle = cl->classid; | 1341 | TC_H_ROOT; |
1342 | tcm->tcm_handle = cl->cl_common.classid; | ||
1350 | if (cl->level == 0) | 1343 | if (cl->level == 0) |
1351 | tcm->tcm_info = cl->qdisc->handle; | 1344 | tcm->tcm_info = cl->qdisc->handle; |
1352 | 1345 | ||
@@ -1390,14 +1383,16 @@ static void | |||
1390 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 1383 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
1391 | { | 1384 | { |
1392 | struct hfsc_sched *q = qdisc_priv(sch); | 1385 | struct hfsc_sched *q = qdisc_priv(sch); |
1386 | struct hlist_node *n; | ||
1393 | struct hfsc_class *cl; | 1387 | struct hfsc_class *cl; |
1394 | unsigned int i; | 1388 | unsigned int i; |
1395 | 1389 | ||
1396 | if (arg->stop) | 1390 | if (arg->stop) |
1397 | return; | 1391 | return; |
1398 | 1392 | ||
1399 | for (i = 0; i < HFSC_HSIZE; i++) { | 1393 | for (i = 0; i < q->clhash.hashsize; i++) { |
1400 | list_for_each_entry(cl, &q->clhash[i], hlist) { | 1394 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], |
1395 | cl_common.hnode) { | ||
1401 | if (arg->count < arg->skip) { | 1396 | if (arg->count < arg->skip) { |
1402 | arg->count++; | 1397 | arg->count++; |
1403 | continue; | 1398 | continue; |
@@ -1433,23 +1428,25 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1433 | { | 1428 | { |
1434 | struct hfsc_sched *q = qdisc_priv(sch); | 1429 | struct hfsc_sched *q = qdisc_priv(sch); |
1435 | struct tc_hfsc_qopt *qopt; | 1430 | struct tc_hfsc_qopt *qopt; |
1436 | unsigned int i; | 1431 | int err; |
1437 | 1432 | ||
1438 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) | 1433 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) |
1439 | return -EINVAL; | 1434 | return -EINVAL; |
1440 | qopt = nla_data(opt); | 1435 | qopt = nla_data(opt); |
1441 | 1436 | ||
1442 | q->defcls = qopt->defcls; | 1437 | q->defcls = qopt->defcls; |
1443 | for (i = 0; i < HFSC_HSIZE; i++) | 1438 | err = qdisc_class_hash_init(&q->clhash); |
1444 | INIT_LIST_HEAD(&q->clhash[i]); | 1439 | if (err < 0) |
1440 | return err; | ||
1445 | q->eligible = RB_ROOT; | 1441 | q->eligible = RB_ROOT; |
1446 | INIT_LIST_HEAD(&q->droplist); | 1442 | INIT_LIST_HEAD(&q->droplist); |
1447 | skb_queue_head_init(&q->requeue); | 1443 | skb_queue_head_init(&q->requeue); |
1448 | 1444 | ||
1445 | q->root.cl_common.classid = sch->handle; | ||
1449 | q->root.refcnt = 1; | 1446 | q->root.refcnt = 1; |
1450 | q->root.classid = sch->handle; | ||
1451 | q->root.sched = q; | 1447 | q->root.sched = q; |
1452 | q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1448 | q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1449 | &pfifo_qdisc_ops, | ||
1453 | sch->handle); | 1450 | sch->handle); |
1454 | if (q->root.qdisc == NULL) | 1451 | if (q->root.qdisc == NULL) |
1455 | q->root.qdisc = &noop_qdisc; | 1452 | q->root.qdisc = &noop_qdisc; |
@@ -1457,7 +1454,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1457 | q->root.vt_tree = RB_ROOT; | 1454 | q->root.vt_tree = RB_ROOT; |
1458 | q->root.cf_tree = RB_ROOT; | 1455 | q->root.cf_tree = RB_ROOT; |
1459 | 1456 | ||
1460 | list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]); | 1457 | qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); |
1458 | qdisc_class_hash_grow(sch, &q->clhash); | ||
1461 | 1459 | ||
1462 | qdisc_watchdog_init(&q->watchdog, sch); | 1460 | qdisc_watchdog_init(&q->watchdog, sch); |
1463 | 1461 | ||
@@ -1520,10 +1518,11 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
1520 | { | 1518 | { |
1521 | struct hfsc_sched *q = qdisc_priv(sch); | 1519 | struct hfsc_sched *q = qdisc_priv(sch); |
1522 | struct hfsc_class *cl; | 1520 | struct hfsc_class *cl; |
1521 | struct hlist_node *n; | ||
1523 | unsigned int i; | 1522 | unsigned int i; |
1524 | 1523 | ||
1525 | for (i = 0; i < HFSC_HSIZE; i++) { | 1524 | for (i = 0; i < q->clhash.hashsize; i++) { |
1526 | list_for_each_entry(cl, &q->clhash[i], hlist) | 1525 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) |
1527 | hfsc_reset_class(cl); | 1526 | hfsc_reset_class(cl); |
1528 | } | 1527 | } |
1529 | __skb_queue_purge(&q->requeue); | 1528 | __skb_queue_purge(&q->requeue); |
@@ -1537,17 +1536,20 @@ static void | |||
1537 | hfsc_destroy_qdisc(struct Qdisc *sch) | 1536 | hfsc_destroy_qdisc(struct Qdisc *sch) |
1538 | { | 1537 | { |
1539 | struct hfsc_sched *q = qdisc_priv(sch); | 1538 | struct hfsc_sched *q = qdisc_priv(sch); |
1540 | struct hfsc_class *cl, *next; | 1539 | struct hlist_node *n, *next; |
1540 | struct hfsc_class *cl; | ||
1541 | unsigned int i; | 1541 | unsigned int i; |
1542 | 1542 | ||
1543 | for (i = 0; i < HFSC_HSIZE; i++) { | 1543 | for (i = 0; i < q->clhash.hashsize; i++) { |
1544 | list_for_each_entry(cl, &q->clhash[i], hlist) | 1544 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) |
1545 | tcf_destroy_chain(&cl->filter_list); | 1545 | tcf_destroy_chain(&cl->filter_list); |
1546 | } | 1546 | } |
1547 | for (i = 0; i < HFSC_HSIZE; i++) { | 1547 | for (i = 0; i < q->clhash.hashsize; i++) { |
1548 | list_for_each_entry_safe(cl, next, &q->clhash[i], hlist) | 1548 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], |
1549 | cl_common.hnode) | ||
1549 | hfsc_destroy_class(sch, cl); | 1550 | hfsc_destroy_class(sch, cl); |
1550 | } | 1551 | } |
1552 | qdisc_class_hash_destroy(&q->clhash); | ||
1551 | __skb_queue_purge(&q->requeue); | 1553 | __skb_queue_purge(&q->requeue); |
1552 | qdisc_watchdog_cancel(&q->watchdog); | 1554 | qdisc_watchdog_cancel(&q->watchdog); |
1553 | } | 1555 | } |
@@ -1572,7 +1574,6 @@ static int | |||
1572 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 1574 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
1573 | { | 1575 | { |
1574 | struct hfsc_class *cl; | 1576 | struct hfsc_class *cl; |
1575 | unsigned int len; | ||
1576 | int err; | 1577 | int err; |
1577 | 1578 | ||
1578 | cl = hfsc_classify(skb, sch, &err); | 1579 | cl = hfsc_classify(skb, sch, &err); |
@@ -1583,8 +1584,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1583 | return err; | 1584 | return err; |
1584 | } | 1585 | } |
1585 | 1586 | ||
1586 | len = skb->len; | 1587 | err = qdisc_enqueue(skb, cl->qdisc); |
1587 | err = cl->qdisc->enqueue(skb, cl->qdisc); | ||
1588 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1588 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
1589 | cl->qstats.drops++; | 1589 | cl->qstats.drops++; |
1590 | sch->qstats.drops++; | 1590 | sch->qstats.drops++; |
@@ -1592,12 +1592,12 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | if (cl->qdisc->q.qlen == 1) | 1594 | if (cl->qdisc->q.qlen == 1) |
1595 | set_active(cl, len); | 1595 | set_active(cl, qdisc_pkt_len(skb)); |
1596 | 1596 | ||
1597 | cl->bstats.packets++; | 1597 | cl->bstats.packets++; |
1598 | cl->bstats.bytes += len; | 1598 | cl->bstats.bytes += qdisc_pkt_len(skb); |
1599 | sch->bstats.packets++; | 1599 | sch->bstats.packets++; |
1600 | sch->bstats.bytes += len; | 1600 | sch->bstats.bytes += qdisc_pkt_len(skb); |
1601 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
1602 | 1602 | ||
1603 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
@@ -1647,9 +1647,9 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1647 | return NULL; | 1647 | return NULL; |
1648 | } | 1648 | } |
1649 | 1649 | ||
1650 | update_vf(cl, skb->len, cur_time); | 1650 | update_vf(cl, qdisc_pkt_len(skb), cur_time); |
1651 | if (realtime) | 1651 | if (realtime) |
1652 | cl->cl_cumul += skb->len; | 1652 | cl->cl_cumul += qdisc_pkt_len(skb); |
1653 | 1653 | ||
1654 | if (cl->qdisc->q.qlen != 0) { | 1654 | if (cl->qdisc->q.qlen != 0) { |
1655 | if (cl->cl_flags & HFSC_RSC) { | 1655 | if (cl->cl_flags & HFSC_RSC) { |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 3fb58f428f72..30c999c61b01 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -24,8 +24,6 @@ | |||
24 | * Jiri Fojtasek | 24 | * Jiri Fojtasek |
25 | * fixed requeue routine | 25 | * fixed requeue routine |
26 | * and many others. thanks. | 26 | * and many others. thanks. |
27 | * | ||
28 | * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $ | ||
29 | */ | 27 | */ |
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
31 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
@@ -53,7 +51,6 @@ | |||
53 | one less than their parent. | 51 | one less than their parent. |
54 | */ | 52 | */ |
55 | 53 | ||
56 | #define HTB_HSIZE 16 /* classid hash size */ | ||
57 | static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ | 54 | static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ |
58 | #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ | 55 | #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ |
59 | 56 | ||
@@ -74,8 +71,8 @@ enum htb_cmode { | |||
74 | 71 | ||
75 | /* interior & leaf nodes; props specific to leaves are marked L: */ | 72 | /* interior & leaf nodes; props specific to leaves are marked L: */ |
76 | struct htb_class { | 73 | struct htb_class { |
74 | struct Qdisc_class_common common; | ||
77 | /* general class parameters */ | 75 | /* general class parameters */ |
78 | u32 classid; | ||
79 | struct gnet_stats_basic bstats; | 76 | struct gnet_stats_basic bstats; |
80 | struct gnet_stats_queue qstats; | 77 | struct gnet_stats_queue qstats; |
81 | struct gnet_stats_rate_est rate_est; | 78 | struct gnet_stats_rate_est rate_est; |
@@ -84,10 +81,8 @@ struct htb_class { | |||
84 | 81 | ||
85 | /* topology */ | 82 | /* topology */ |
86 | int level; /* our level (see above) */ | 83 | int level; /* our level (see above) */ |
84 | unsigned int children; | ||
87 | struct htb_class *parent; /* parent class */ | 85 | struct htb_class *parent; /* parent class */ |
88 | struct hlist_node hlist; /* classid hash list item */ | ||
89 | struct list_head sibling; /* sibling list item */ | ||
90 | struct list_head children; /* children list */ | ||
91 | 86 | ||
92 | union { | 87 | union { |
93 | struct htb_class_leaf { | 88 | struct htb_class_leaf { |
@@ -142,8 +137,7 @@ static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, | |||
142 | } | 137 | } |
143 | 138 | ||
144 | struct htb_sched { | 139 | struct htb_sched { |
145 | struct list_head root; /* root classes list */ | 140 | struct Qdisc_class_hash clhash; |
146 | struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */ | ||
147 | struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ | 141 | struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ |
148 | 142 | ||
149 | /* self list - roots of self generating tree */ | 143 | /* self list - roots of self generating tree */ |
@@ -165,7 +159,6 @@ struct htb_sched { | |||
165 | 159 | ||
166 | /* filters for qdisc itself */ | 160 | /* filters for qdisc itself */ |
167 | struct tcf_proto *filter_list; | 161 | struct tcf_proto *filter_list; |
168 | int filter_cnt; | ||
169 | 162 | ||
170 | int rate2quantum; /* quant = rate / rate2quantum */ | 163 | int rate2quantum; /* quant = rate / rate2quantum */ |
171 | psched_time_t now; /* cached dequeue time */ | 164 | psched_time_t now; /* cached dequeue time */ |
@@ -178,32 +171,16 @@ struct htb_sched { | |||
178 | long direct_pkts; | 171 | long direct_pkts; |
179 | }; | 172 | }; |
180 | 173 | ||
181 | /* compute hash of size HTB_HSIZE for given handle */ | ||
182 | static inline int htb_hash(u32 h) | ||
183 | { | ||
184 | #if HTB_HSIZE != 16 | ||
185 | #error "Declare new hash for your HTB_HSIZE" | ||
186 | #endif | ||
187 | h ^= h >> 8; /* stolen from cbq_hash */ | ||
188 | h ^= h >> 4; | ||
189 | return h & 0xf; | ||
190 | } | ||
191 | |||
192 | /* find class in global hash table using given handle */ | 174 | /* find class in global hash table using given handle */ |
193 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | 175 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) |
194 | { | 176 | { |
195 | struct htb_sched *q = qdisc_priv(sch); | 177 | struct htb_sched *q = qdisc_priv(sch); |
196 | struct hlist_node *p; | 178 | struct Qdisc_class_common *clc; |
197 | struct htb_class *cl; | ||
198 | 179 | ||
199 | if (TC_H_MAJ(handle) != sch->handle) | 180 | clc = qdisc_class_find(&q->clhash, handle); |
181 | if (clc == NULL) | ||
200 | return NULL; | 182 | return NULL; |
201 | 183 | return container_of(clc, struct htb_class, common); | |
202 | hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) { | ||
203 | if (cl->classid == handle) | ||
204 | return cl; | ||
205 | } | ||
206 | return NULL; | ||
207 | } | 184 | } |
208 | 185 | ||
209 | /** | 186 | /** |
@@ -284,7 +261,7 @@ static void htb_add_to_id_tree(struct rb_root *root, | |||
284 | parent = *p; | 261 | parent = *p; |
285 | c = rb_entry(parent, struct htb_class, node[prio]); | 262 | c = rb_entry(parent, struct htb_class, node[prio]); |
286 | 263 | ||
287 | if (cl->classid > c->classid) | 264 | if (cl->common.classid > c->common.classid) |
288 | p = &parent->rb_right; | 265 | p = &parent->rb_right; |
289 | else | 266 | else |
290 | p = &parent->rb_left; | 267 | p = &parent->rb_left; |
@@ -448,7 +425,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
448 | /* we are removing child which is pointed to from | 425 | /* we are removing child which is pointed to from |
449 | parent feed - forget the pointer but remember | 426 | parent feed - forget the pointer but remember |
450 | classid */ | 427 | classid */ |
451 | p->un.inner.last_ptr_id[prio] = cl->classid; | 428 | p->un.inner.last_ptr_id[prio] = cl->common.classid; |
452 | p->un.inner.ptr[prio] = NULL; | 429 | p->un.inner.ptr[prio] = NULL; |
453 | } | 430 | } |
454 | 431 | ||
@@ -595,21 +572,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
595 | kfree_skb(skb); | 572 | kfree_skb(skb); |
596 | return ret; | 573 | return ret; |
597 | #endif | 574 | #endif |
598 | } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != | 575 | } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { |
599 | NET_XMIT_SUCCESS) { | ||
600 | sch->qstats.drops++; | 576 | sch->qstats.drops++; |
601 | cl->qstats.drops++; | 577 | cl->qstats.drops++; |
602 | return NET_XMIT_DROP; | 578 | return NET_XMIT_DROP; |
603 | } else { | 579 | } else { |
604 | cl->bstats.packets += | 580 | cl->bstats.packets += |
605 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 581 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; |
606 | cl->bstats.bytes += skb->len; | 582 | cl->bstats.bytes += qdisc_pkt_len(skb); |
607 | htb_activate(q, cl); | 583 | htb_activate(q, cl); |
608 | } | 584 | } |
609 | 585 | ||
610 | sch->q.qlen++; | 586 | sch->q.qlen++; |
611 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 587 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; |
612 | sch->bstats.bytes += skb->len; | 588 | sch->bstats.bytes += qdisc_pkt_len(skb); |
613 | return NET_XMIT_SUCCESS; | 589 | return NET_XMIT_SUCCESS; |
614 | } | 590 | } |
615 | 591 | ||
@@ -666,7 +642,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
666 | static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | 642 | static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, |
667 | int level, struct sk_buff *skb) | 643 | int level, struct sk_buff *skb) |
668 | { | 644 | { |
669 | int bytes = skb->len; | 645 | int bytes = qdisc_pkt_len(skb); |
670 | long toks, diff; | 646 | long toks, diff; |
671 | enum htb_cmode old_mode; | 647 | enum htb_cmode old_mode; |
672 | 648 | ||
@@ -753,10 +729,10 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, | |||
753 | while (n) { | 729 | while (n) { |
754 | struct htb_class *cl = | 730 | struct htb_class *cl = |
755 | rb_entry(n, struct htb_class, node[prio]); | 731 | rb_entry(n, struct htb_class, node[prio]); |
756 | if (id == cl->classid) | 732 | if (id == cl->common.classid) |
757 | return n; | 733 | return n; |
758 | 734 | ||
759 | if (id > cl->classid) { | 735 | if (id > cl->common.classid) { |
760 | n = n->rb_right; | 736 | n = n->rb_right; |
761 | } else { | 737 | } else { |
762 | r = n; | 738 | r = n; |
@@ -866,7 +842,7 @@ next: | |||
866 | if (!cl->warned) { | 842 | if (!cl->warned) { |
867 | printk(KERN_WARNING | 843 | printk(KERN_WARNING |
868 | "htb: class %X isn't work conserving ?!\n", | 844 | "htb: class %X isn't work conserving ?!\n", |
869 | cl->classid); | 845 | cl->common.classid); |
870 | cl->warned = 1; | 846 | cl->warned = 1; |
871 | } | 847 | } |
872 | q->nwc_hit++; | 848 | q->nwc_hit++; |
@@ -879,7 +855,8 @@ next: | |||
879 | } while (cl != start); | 855 | } while (cl != start); |
880 | 856 | ||
881 | if (likely(skb != NULL)) { | 857 | if (likely(skb != NULL)) { |
882 | if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { | 858 | cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); |
859 | if (cl->un.leaf.deficit[level] < 0) { | ||
883 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; | 860 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; |
884 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> | 861 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> |
885 | ptr[0]) + prio); | 862 | ptr[0]) + prio); |
@@ -977,13 +954,12 @@ static unsigned int htb_drop(struct Qdisc *sch) | |||
977 | static void htb_reset(struct Qdisc *sch) | 954 | static void htb_reset(struct Qdisc *sch) |
978 | { | 955 | { |
979 | struct htb_sched *q = qdisc_priv(sch); | 956 | struct htb_sched *q = qdisc_priv(sch); |
980 | int i; | 957 | struct htb_class *cl; |
981 | 958 | struct hlist_node *n; | |
982 | for (i = 0; i < HTB_HSIZE; i++) { | 959 | unsigned int i; |
983 | struct hlist_node *p; | ||
984 | struct htb_class *cl; | ||
985 | 960 | ||
986 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { | 961 | for (i = 0; i < q->clhash.hashsize; i++) { |
962 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | ||
987 | if (cl->level) | 963 | if (cl->level) |
988 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); | 964 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); |
989 | else { | 965 | else { |
@@ -1041,16 +1017,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
1041 | return -EINVAL; | 1017 | return -EINVAL; |
1042 | } | 1018 | } |
1043 | 1019 | ||
1044 | INIT_LIST_HEAD(&q->root); | 1020 | err = qdisc_class_hash_init(&q->clhash); |
1045 | for (i = 0; i < HTB_HSIZE; i++) | 1021 | if (err < 0) |
1046 | INIT_HLIST_HEAD(q->hash + i); | 1022 | return err; |
1047 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 1023 | for (i = 0; i < TC_HTB_NUMPRIO; i++) |
1048 | INIT_LIST_HEAD(q->drops + i); | 1024 | INIT_LIST_HEAD(q->drops + i); |
1049 | 1025 | ||
1050 | qdisc_watchdog_init(&q->watchdog, sch); | 1026 | qdisc_watchdog_init(&q->watchdog, sch); |
1051 | skb_queue_head_init(&q->direct_queue); | 1027 | skb_queue_head_init(&q->direct_queue); |
1052 | 1028 | ||
1053 | q->direct_qlen = sch->dev->tx_queue_len; | 1029 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; |
1054 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ | 1030 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ |
1055 | q->direct_qlen = 2; | 1031 | q->direct_qlen = 2; |
1056 | 1032 | ||
@@ -1063,11 +1039,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
1063 | 1039 | ||
1064 | static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | 1040 | static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) |
1065 | { | 1041 | { |
1042 | spinlock_t *root_lock = qdisc_root_lock(sch); | ||
1066 | struct htb_sched *q = qdisc_priv(sch); | 1043 | struct htb_sched *q = qdisc_priv(sch); |
1067 | struct nlattr *nest; | 1044 | struct nlattr *nest; |
1068 | struct tc_htb_glob gopt; | 1045 | struct tc_htb_glob gopt; |
1069 | 1046 | ||
1070 | spin_lock_bh(&sch->dev->queue_lock); | 1047 | spin_lock_bh(root_lock); |
1071 | 1048 | ||
1072 | gopt.direct_pkts = q->direct_pkts; | 1049 | gopt.direct_pkts = q->direct_pkts; |
1073 | gopt.version = HTB_VER; | 1050 | gopt.version = HTB_VER; |
@@ -1081,11 +1058,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1081 | NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 1058 | NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); |
1082 | nla_nest_end(skb, nest); | 1059 | nla_nest_end(skb, nest); |
1083 | 1060 | ||
1084 | spin_unlock_bh(&sch->dev->queue_lock); | 1061 | spin_unlock_bh(root_lock); |
1085 | return skb->len; | 1062 | return skb->len; |
1086 | 1063 | ||
1087 | nla_put_failure: | 1064 | nla_put_failure: |
1088 | spin_unlock_bh(&sch->dev->queue_lock); | 1065 | spin_unlock_bh(root_lock); |
1089 | nla_nest_cancel(skb, nest); | 1066 | nla_nest_cancel(skb, nest); |
1090 | return -1; | 1067 | return -1; |
1091 | } | 1068 | } |
@@ -1094,12 +1071,13 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1094 | struct sk_buff *skb, struct tcmsg *tcm) | 1071 | struct sk_buff *skb, struct tcmsg *tcm) |
1095 | { | 1072 | { |
1096 | struct htb_class *cl = (struct htb_class *)arg; | 1073 | struct htb_class *cl = (struct htb_class *)arg; |
1074 | spinlock_t *root_lock = qdisc_root_lock(sch); | ||
1097 | struct nlattr *nest; | 1075 | struct nlattr *nest; |
1098 | struct tc_htb_opt opt; | 1076 | struct tc_htb_opt opt; |
1099 | 1077 | ||
1100 | spin_lock_bh(&sch->dev->queue_lock); | 1078 | spin_lock_bh(root_lock); |
1101 | tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; | 1079 | tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; |
1102 | tcm->tcm_handle = cl->classid; | 1080 | tcm->tcm_handle = cl->common.classid; |
1103 | if (!cl->level && cl->un.leaf.q) | 1081 | if (!cl->level && cl->un.leaf.q) |
1104 | tcm->tcm_info = cl->un.leaf.q->handle; | 1082 | tcm->tcm_info = cl->un.leaf.q->handle; |
1105 | 1083 | ||
@@ -1119,11 +1097,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1119 | NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); | 1097 | NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); |
1120 | 1098 | ||
1121 | nla_nest_end(skb, nest); | 1099 | nla_nest_end(skb, nest); |
1122 | spin_unlock_bh(&sch->dev->queue_lock); | 1100 | spin_unlock_bh(root_lock); |
1123 | return skb->len; | 1101 | return skb->len; |
1124 | 1102 | ||
1125 | nla_put_failure: | 1103 | nla_put_failure: |
1126 | spin_unlock_bh(&sch->dev->queue_lock); | 1104 | spin_unlock_bh(root_lock); |
1127 | nla_nest_cancel(skb, nest); | 1105 | nla_nest_cancel(skb, nest); |
1128 | return -1; | 1106 | return -1; |
1129 | } | 1107 | } |
@@ -1153,8 +1131,9 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1153 | 1131 | ||
1154 | if (cl && !cl->level) { | 1132 | if (cl && !cl->level) { |
1155 | if (new == NULL && | 1133 | if (new == NULL && |
1156 | (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1134 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1157 | cl->classid)) | 1135 | &pfifo_qdisc_ops, |
1136 | cl->common.classid)) | ||
1158 | == NULL) | 1137 | == NULL) |
1159 | return -ENOBUFS; | 1138 | return -ENOBUFS; |
1160 | sch_tree_lock(sch); | 1139 | sch_tree_lock(sch); |
@@ -1195,12 +1174,9 @@ static inline int htb_parent_last_child(struct htb_class *cl) | |||
1195 | if (!cl->parent) | 1174 | if (!cl->parent) |
1196 | /* the root class */ | 1175 | /* the root class */ |
1197 | return 0; | 1176 | return 0; |
1198 | 1177 | if (cl->parent->children > 1) | |
1199 | if (!(cl->parent->children.next == &cl->sibling && | ||
1200 | cl->parent->children.prev == &cl->sibling)) | ||
1201 | /* not the last child */ | 1178 | /* not the last child */ |
1202 | return 0; | 1179 | return 0; |
1203 | |||
1204 | return 1; | 1180 | return 1; |
1205 | } | 1181 | } |
1206 | 1182 | ||
@@ -1228,8 +1204,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, | |||
1228 | 1204 | ||
1229 | static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | 1205 | static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) |
1230 | { | 1206 | { |
1231 | struct htb_sched *q = qdisc_priv(sch); | ||
1232 | |||
1233 | if (!cl->level) { | 1207 | if (!cl->level) { |
1234 | BUG_TRAP(cl->un.leaf.q); | 1208 | BUG_TRAP(cl->un.leaf.q); |
1235 | qdisc_destroy(cl->un.leaf.q); | 1209 | qdisc_destroy(cl->un.leaf.q); |
@@ -1239,21 +1213,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1239 | qdisc_put_rtab(cl->ceil); | 1213 | qdisc_put_rtab(cl->ceil); |
1240 | 1214 | ||
1241 | tcf_destroy_chain(&cl->filter_list); | 1215 | tcf_destroy_chain(&cl->filter_list); |
1242 | |||
1243 | while (!list_empty(&cl->children)) | ||
1244 | htb_destroy_class(sch, list_entry(cl->children.next, | ||
1245 | struct htb_class, sibling)); | ||
1246 | |||
1247 | /* note: this delete may happen twice (see htb_delete) */ | ||
1248 | hlist_del_init(&cl->hlist); | ||
1249 | list_del(&cl->sibling); | ||
1250 | |||
1251 | if (cl->prio_activity) | ||
1252 | htb_deactivate(q, cl); | ||
1253 | |||
1254 | if (cl->cmode != HTB_CAN_SEND) | ||
1255 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); | ||
1256 | |||
1257 | kfree(cl); | 1216 | kfree(cl); |
1258 | } | 1217 | } |
1259 | 1218 | ||
@@ -1261,6 +1220,9 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1261 | static void htb_destroy(struct Qdisc *sch) | 1220 | static void htb_destroy(struct Qdisc *sch) |
1262 | { | 1221 | { |
1263 | struct htb_sched *q = qdisc_priv(sch); | 1222 | struct htb_sched *q = qdisc_priv(sch); |
1223 | struct hlist_node *n, *next; | ||
1224 | struct htb_class *cl; | ||
1225 | unsigned int i; | ||
1264 | 1226 | ||
1265 | qdisc_watchdog_cancel(&q->watchdog); | 1227 | qdisc_watchdog_cancel(&q->watchdog); |
1266 | /* This line used to be after htb_destroy_class call below | 1228 | /* This line used to be after htb_destroy_class call below |
@@ -1269,10 +1231,16 @@ static void htb_destroy(struct Qdisc *sch) | |||
1269 | unbind_filter on it (without Oops). */ | 1231 | unbind_filter on it (without Oops). */ |
1270 | tcf_destroy_chain(&q->filter_list); | 1232 | tcf_destroy_chain(&q->filter_list); |
1271 | 1233 | ||
1272 | while (!list_empty(&q->root)) | 1234 | for (i = 0; i < q->clhash.hashsize; i++) { |
1273 | htb_destroy_class(sch, list_entry(q->root.next, | 1235 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) |
1274 | struct htb_class, sibling)); | 1236 | tcf_destroy_chain(&cl->filter_list); |
1275 | 1237 | } | |
1238 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
1239 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | ||
1240 | common.hnode) | ||
1241 | htb_destroy_class(sch, cl); | ||
1242 | } | ||
1243 | qdisc_class_hash_destroy(&q->clhash); | ||
1276 | __skb_queue_purge(&q->direct_queue); | 1244 | __skb_queue_purge(&q->direct_queue); |
1277 | } | 1245 | } |
1278 | 1246 | ||
@@ -1287,12 +1255,13 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1287 | // TODO: why don't allow to delete subtree ? references ? does | 1255 | // TODO: why don't allow to delete subtree ? references ? does |
1288 | // tc subsys quarantee us that in htb_destroy it holds no class | 1256 | // tc subsys quarantee us that in htb_destroy it holds no class |
1289 | // refs so that we can remove children safely there ? | 1257 | // refs so that we can remove children safely there ? |
1290 | if (!list_empty(&cl->children) || cl->filter_cnt) | 1258 | if (cl->children || cl->filter_cnt) |
1291 | return -EBUSY; | 1259 | return -EBUSY; |
1292 | 1260 | ||
1293 | if (!cl->level && htb_parent_last_child(cl)) { | 1261 | if (!cl->level && htb_parent_last_child(cl)) { |
1294 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 1262 | new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1295 | cl->parent->classid); | 1263 | &pfifo_qdisc_ops, |
1264 | cl->parent->common.classid); | ||
1296 | last_child = 1; | 1265 | last_child = 1; |
1297 | } | 1266 | } |
1298 | 1267 | ||
@@ -1305,11 +1274,15 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1305 | } | 1274 | } |
1306 | 1275 | ||
1307 | /* delete from hash and active; remainder in destroy_class */ | 1276 | /* delete from hash and active; remainder in destroy_class */ |
1308 | hlist_del_init(&cl->hlist); | 1277 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
1278 | cl->parent->children--; | ||
1309 | 1279 | ||
1310 | if (cl->prio_activity) | 1280 | if (cl->prio_activity) |
1311 | htb_deactivate(q, cl); | 1281 | htb_deactivate(q, cl); |
1312 | 1282 | ||
1283 | if (cl->cmode != HTB_CAN_SEND) | ||
1284 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); | ||
1285 | |||
1313 | if (last_child) | 1286 | if (last_child) |
1314 | htb_parent_to_leaf(q, cl, new_q); | 1287 | htb_parent_to_leaf(q, cl, new_q); |
1315 | 1288 | ||
@@ -1394,12 +1367,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1394 | goto failure; | 1367 | goto failure; |
1395 | 1368 | ||
1396 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1369 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1397 | &sch->dev->queue_lock, | 1370 | qdisc_root_lock(sch), |
1398 | tca[TCA_RATE] ? : &est.nla); | 1371 | tca[TCA_RATE] ? : &est.nla); |
1399 | cl->refcnt = 1; | 1372 | cl->refcnt = 1; |
1400 | INIT_LIST_HEAD(&cl->sibling); | 1373 | cl->children = 0; |
1401 | INIT_HLIST_NODE(&cl->hlist); | ||
1402 | INIT_LIST_HEAD(&cl->children); | ||
1403 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 1374 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); |
1404 | RB_CLEAR_NODE(&cl->pq_node); | 1375 | RB_CLEAR_NODE(&cl->pq_node); |
1405 | 1376 | ||
@@ -1409,7 +1380,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1409 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1380 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1410 | so that can't be used inside of sch_tree_lock | 1381 | so that can't be used inside of sch_tree_lock |
1411 | -- thanks to Karlis Peisenieks */ | 1382 | -- thanks to Karlis Peisenieks */ |
1412 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); | 1383 | new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1384 | &pfifo_qdisc_ops, classid); | ||
1413 | sch_tree_lock(sch); | 1385 | sch_tree_lock(sch); |
1414 | if (parent && !parent->level) { | 1386 | if (parent && !parent->level) { |
1415 | unsigned int qlen = parent->un.leaf.q->q.qlen; | 1387 | unsigned int qlen = parent->un.leaf.q->q.qlen; |
@@ -1433,7 +1405,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1433 | /* leaf (we) needs elementary qdisc */ | 1405 | /* leaf (we) needs elementary qdisc */ |
1434 | cl->un.leaf.q = new_q ? new_q : &noop_qdisc; | 1406 | cl->un.leaf.q = new_q ? new_q : &noop_qdisc; |
1435 | 1407 | ||
1436 | cl->classid = classid; | 1408 | cl->common.classid = classid; |
1437 | cl->parent = parent; | 1409 | cl->parent = parent; |
1438 | 1410 | ||
1439 | /* set class to be in HTB_CAN_SEND state */ | 1411 | /* set class to be in HTB_CAN_SEND state */ |
@@ -1444,13 +1416,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1444 | cl->cmode = HTB_CAN_SEND; | 1416 | cl->cmode = HTB_CAN_SEND; |
1445 | 1417 | ||
1446 | /* attach to the hash list and parent's family */ | 1418 | /* attach to the hash list and parent's family */ |
1447 | hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); | 1419 | qdisc_class_hash_insert(&q->clhash, &cl->common); |
1448 | list_add_tail(&cl->sibling, | 1420 | if (parent) |
1449 | parent ? &parent->children : &q->root); | 1421 | parent->children++; |
1450 | } else { | 1422 | } else { |
1451 | if (tca[TCA_RATE]) | 1423 | if (tca[TCA_RATE]) |
1452 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1424 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1453 | &sch->dev->queue_lock, | 1425 | qdisc_root_lock(sch), |
1454 | tca[TCA_RATE]); | 1426 | tca[TCA_RATE]); |
1455 | sch_tree_lock(sch); | 1427 | sch_tree_lock(sch); |
1456 | } | 1428 | } |
@@ -1462,13 +1434,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1462 | if (!hopt->quantum && cl->un.leaf.quantum < 1000) { | 1434 | if (!hopt->quantum && cl->un.leaf.quantum < 1000) { |
1463 | printk(KERN_WARNING | 1435 | printk(KERN_WARNING |
1464 | "HTB: quantum of class %X is small. Consider r2q change.\n", | 1436 | "HTB: quantum of class %X is small. Consider r2q change.\n", |
1465 | cl->classid); | 1437 | cl->common.classid); |
1466 | cl->un.leaf.quantum = 1000; | 1438 | cl->un.leaf.quantum = 1000; |
1467 | } | 1439 | } |
1468 | if (!hopt->quantum && cl->un.leaf.quantum > 200000) { | 1440 | if (!hopt->quantum && cl->un.leaf.quantum > 200000) { |
1469 | printk(KERN_WARNING | 1441 | printk(KERN_WARNING |
1470 | "HTB: quantum of class %X is big. Consider r2q change.\n", | 1442 | "HTB: quantum of class %X is big. Consider r2q change.\n", |
1471 | cl->classid); | 1443 | cl->common.classid); |
1472 | cl->un.leaf.quantum = 200000; | 1444 | cl->un.leaf.quantum = 200000; |
1473 | } | 1445 | } |
1474 | if (hopt->quantum) | 1446 | if (hopt->quantum) |
@@ -1491,6 +1463,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1491 | cl->ceil = ctab; | 1463 | cl->ceil = ctab; |
1492 | sch_tree_unlock(sch); | 1464 | sch_tree_unlock(sch); |
1493 | 1465 | ||
1466 | qdisc_class_hash_grow(sch, &q->clhash); | ||
1467 | |||
1494 | *arg = (unsigned long)cl; | 1468 | *arg = (unsigned long)cl; |
1495 | return 0; | 1469 | return 0; |
1496 | 1470 | ||
@@ -1514,7 +1488,6 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) | |||
1514 | static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, | 1488 | static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, |
1515 | u32 classid) | 1489 | u32 classid) |
1516 | { | 1490 | { |
1517 | struct htb_sched *q = qdisc_priv(sch); | ||
1518 | struct htb_class *cl = htb_find(classid, sch); | 1491 | struct htb_class *cl = htb_find(classid, sch); |
1519 | 1492 | ||
1520 | /*if (cl && !cl->level) return 0; | 1493 | /*if (cl && !cl->level) return 0; |
@@ -1528,35 +1501,29 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, | |||
1528 | */ | 1501 | */ |
1529 | if (cl) | 1502 | if (cl) |
1530 | cl->filter_cnt++; | 1503 | cl->filter_cnt++; |
1531 | else | ||
1532 | q->filter_cnt++; | ||
1533 | return (unsigned long)cl; | 1504 | return (unsigned long)cl; |
1534 | } | 1505 | } |
1535 | 1506 | ||
1536 | static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) | 1507 | static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) |
1537 | { | 1508 | { |
1538 | struct htb_sched *q = qdisc_priv(sch); | ||
1539 | struct htb_class *cl = (struct htb_class *)arg; | 1509 | struct htb_class *cl = (struct htb_class *)arg; |
1540 | 1510 | ||
1541 | if (cl) | 1511 | if (cl) |
1542 | cl->filter_cnt--; | 1512 | cl->filter_cnt--; |
1543 | else | ||
1544 | q->filter_cnt--; | ||
1545 | } | 1513 | } |
1546 | 1514 | ||
1547 | static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 1515 | static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
1548 | { | 1516 | { |
1549 | struct htb_sched *q = qdisc_priv(sch); | 1517 | struct htb_sched *q = qdisc_priv(sch); |
1550 | int i; | 1518 | struct htb_class *cl; |
1519 | struct hlist_node *n; | ||
1520 | unsigned int i; | ||
1551 | 1521 | ||
1552 | if (arg->stop) | 1522 | if (arg->stop) |
1553 | return; | 1523 | return; |
1554 | 1524 | ||
1555 | for (i = 0; i < HTB_HSIZE; i++) { | 1525 | for (i = 0; i < q->clhash.hashsize; i++) { |
1556 | struct hlist_node *p; | 1526 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { |
1557 | struct htb_class *cl; | ||
1558 | |||
1559 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { | ||
1560 | if (arg->count < arg->skip) { | 1527 | if (arg->count < arg->skip) { |
1561 | arg->count++; | 1528 | arg->count++; |
1562 | continue; | 1529 | continue; |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 956c80ad5965..4a2b77374358 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -77,7 +77,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
77 | result = tc_classify(skb, p->filter_list, &res); | 77 | result = tc_classify(skb, p->filter_list, &res); |
78 | 78 | ||
79 | sch->bstats.packets++; | 79 | sch->bstats.packets++; |
80 | sch->bstats.bytes += skb->len; | 80 | sch->bstats.bytes += qdisc_pkt_len(skb); |
81 | switch (result) { | 81 | switch (result) { |
82 | case TC_ACT_SHOT: | 82 | case TC_ACT_SHOT: |
83 | result = TC_ACT_SHOT; | 83 | result = TC_ACT_SHOT; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index c9c649b26eaa..a59085700678 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -82,6 +82,13 @@ struct netem_skb_cb { | |||
82 | psched_time_t time_to_send; | 82 | psched_time_t time_to_send; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) | ||
86 | { | ||
87 | BUILD_BUG_ON(sizeof(skb->cb) < | ||
88 | sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); | ||
89 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; | ||
90 | } | ||
91 | |||
85 | /* init_crandom - initialize correlated random number generator | 92 | /* init_crandom - initialize correlated random number generator |
86 | * Use entropy source for initial seed. | 93 | * Use entropy source for initial seed. |
87 | */ | 94 | */ |
@@ -180,11 +187,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
180 | * skb will be queued. | 187 | * skb will be queued. |
181 | */ | 188 | */ |
182 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 189 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
183 | struct Qdisc *rootq = sch->dev->qdisc; | 190 | struct Qdisc *rootq = qdisc_root(sch); |
184 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | 191 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
185 | q->duplicate = 0; | 192 | q->duplicate = 0; |
186 | 193 | ||
187 | rootq->enqueue(skb2, rootq); | 194 | qdisc_enqueue_root(skb2, rootq); |
188 | q->duplicate = dupsave; | 195 | q->duplicate = dupsave; |
189 | } | 196 | } |
190 | 197 | ||
@@ -205,7 +212,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
205 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | 212 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); |
206 | } | 213 | } |
207 | 214 | ||
208 | cb = (struct netem_skb_cb *)skb->cb; | 215 | cb = netem_skb_cb(skb); |
209 | if (q->gap == 0 /* not doing reordering */ | 216 | if (q->gap == 0 /* not doing reordering */ |
210 | || q->counter < q->gap /* inside last reordering gap */ | 217 | || q->counter < q->gap /* inside last reordering gap */ |
211 | || q->reorder < get_crandom(&q->reorder_cor)) { | 218 | || q->reorder < get_crandom(&q->reorder_cor)) { |
@@ -218,7 +225,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
218 | now = psched_get_time(); | 225 | now = psched_get_time(); |
219 | cb->time_to_send = now + delay; | 226 | cb->time_to_send = now + delay; |
220 | ++q->counter; | 227 | ++q->counter; |
221 | ret = q->qdisc->enqueue(skb, q->qdisc); | 228 | ret = qdisc_enqueue(skb, q->qdisc); |
222 | } else { | 229 | } else { |
223 | /* | 230 | /* |
224 | * Do re-ordering by putting one out of N packets at the front | 231 | * Do re-ordering by putting one out of N packets at the front |
@@ -231,7 +238,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
231 | 238 | ||
232 | if (likely(ret == NET_XMIT_SUCCESS)) { | 239 | if (likely(ret == NET_XMIT_SUCCESS)) { |
233 | sch->q.qlen++; | 240 | sch->q.qlen++; |
234 | sch->bstats.bytes += skb->len; | 241 | sch->bstats.bytes += qdisc_pkt_len(skb); |
235 | sch->bstats.packets++; | 242 | sch->bstats.packets++; |
236 | } else | 243 | } else |
237 | sch->qstats.drops++; | 244 | sch->qstats.drops++; |
@@ -277,8 +284,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
277 | 284 | ||
278 | skb = q->qdisc->dequeue(q->qdisc); | 285 | skb = q->qdisc->dequeue(q->qdisc); |
279 | if (skb) { | 286 | if (skb) { |
280 | const struct netem_skb_cb *cb | 287 | const struct netem_skb_cb *cb = netem_skb_cb(skb); |
281 | = (const struct netem_skb_cb *)skb->cb; | ||
282 | psched_time_t now = psched_get_time(); | 288 | psched_time_t now = psched_get_time(); |
283 | 289 | ||
284 | /* if more time remaining? */ | 290 | /* if more time remaining? */ |
@@ -310,28 +316,6 @@ static void netem_reset(struct Qdisc *sch) | |||
310 | qdisc_watchdog_cancel(&q->watchdog); | 316 | qdisc_watchdog_cancel(&q->watchdog); |
311 | } | 317 | } |
312 | 318 | ||
313 | /* Pass size change message down to embedded FIFO */ | ||
314 | static int set_fifo_limit(struct Qdisc *q, int limit) | ||
315 | { | ||
316 | struct nlattr *nla; | ||
317 | int ret = -ENOMEM; | ||
318 | |||
319 | /* Hack to avoid sending change message to non-FIFO */ | ||
320 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) | ||
321 | return 0; | ||
322 | |||
323 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | ||
324 | if (nla) { | ||
325 | nla->nla_type = RTM_NEWQDISC; | ||
326 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); | ||
327 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; | ||
328 | |||
329 | ret = q->ops->change(q, nla); | ||
330 | kfree(nla); | ||
331 | } | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | /* | 319 | /* |
336 | * Distribution data is a variable size payload containing | 320 | * Distribution data is a variable size payload containing |
337 | * signed 16 bit values. | 321 | * signed 16 bit values. |
@@ -341,6 +325,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) | |||
341 | struct netem_sched_data *q = qdisc_priv(sch); | 325 | struct netem_sched_data *q = qdisc_priv(sch); |
342 | unsigned long n = nla_len(attr)/sizeof(__s16); | 326 | unsigned long n = nla_len(attr)/sizeof(__s16); |
343 | const __s16 *data = nla_data(attr); | 327 | const __s16 *data = nla_data(attr); |
328 | spinlock_t *root_lock; | ||
344 | struct disttable *d; | 329 | struct disttable *d; |
345 | int i; | 330 | int i; |
346 | 331 | ||
@@ -355,9 +340,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) | |||
355 | for (i = 0; i < n; i++) | 340 | for (i = 0; i < n; i++) |
356 | d->table[i] = data[i]; | 341 | d->table[i] = data[i]; |
357 | 342 | ||
358 | spin_lock_bh(&sch->dev->queue_lock); | 343 | root_lock = qdisc_root_lock(sch); |
344 | |||
345 | spin_lock_bh(root_lock); | ||
359 | d = xchg(&q->delay_dist, d); | 346 | d = xchg(&q->delay_dist, d); |
360 | spin_unlock_bh(&sch->dev->queue_lock); | 347 | spin_unlock_bh(root_lock); |
361 | 348 | ||
362 | kfree(d); | 349 | kfree(d); |
363 | return 0; | 350 | return 0; |
@@ -416,7 +403,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) | |||
416 | if (ret < 0) | 403 | if (ret < 0) |
417 | return ret; | 404 | return ret; |
418 | 405 | ||
419 | ret = set_fifo_limit(q->qdisc, qopt->limit); | 406 | ret = fifo_set_limit(q->qdisc, qopt->limit); |
420 | if (ret) { | 407 | if (ret) { |
421 | pr_debug("netem: can't set fifo limit\n"); | 408 | pr_debug("netem: can't set fifo limit\n"); |
422 | return ret; | 409 | return ret; |
@@ -476,7 +463,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
476 | { | 463 | { |
477 | struct fifo_sched_data *q = qdisc_priv(sch); | 464 | struct fifo_sched_data *q = qdisc_priv(sch); |
478 | struct sk_buff_head *list = &sch->q; | 465 | struct sk_buff_head *list = &sch->q; |
479 | psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send; | 466 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
480 | struct sk_buff *skb; | 467 | struct sk_buff *skb; |
481 | 468 | ||
482 | if (likely(skb_queue_len(list) < q->limit)) { | 469 | if (likely(skb_queue_len(list) < q->limit)) { |
@@ -487,8 +474,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
487 | } | 474 | } |
488 | 475 | ||
489 | skb_queue_reverse_walk(list, skb) { | 476 | skb_queue_reverse_walk(list, skb) { |
490 | const struct netem_skb_cb *cb | 477 | const struct netem_skb_cb *cb = netem_skb_cb(skb); |
491 | = (const struct netem_skb_cb *)skb->cb; | ||
492 | 478 | ||
493 | if (tnext >= cb->time_to_send) | 479 | if (tnext >= cb->time_to_send) |
494 | break; | 480 | break; |
@@ -496,8 +482,8 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
496 | 482 | ||
497 | __skb_queue_after(list, skb, nskb); | 483 | __skb_queue_after(list, skb, nskb); |
498 | 484 | ||
499 | sch->qstats.backlog += nskb->len; | 485 | sch->qstats.backlog += qdisc_pkt_len(nskb); |
500 | sch->bstats.bytes += nskb->len; | 486 | sch->bstats.bytes += qdisc_pkt_len(nskb); |
501 | sch->bstats.packets++; | 487 | sch->bstats.packets++; |
502 | 488 | ||
503 | return NET_XMIT_SUCCESS; | 489 | return NET_XMIT_SUCCESS; |
@@ -517,7 +503,7 @@ static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
517 | 503 | ||
518 | q->limit = ctl->limit; | 504 | q->limit = ctl->limit; |
519 | } else | 505 | } else |
520 | q->limit = max_t(u32, sch->dev->tx_queue_len, 1); | 506 | q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); |
521 | 507 | ||
522 | q->oldest = PSCHED_PASTPERFECT; | 508 | q->oldest = PSCHED_PASTPERFECT; |
523 | return 0; | 509 | return 0; |
@@ -558,7 +544,8 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) | |||
558 | 544 | ||
559 | qdisc_watchdog_init(&q->watchdog, sch); | 545 | qdisc_watchdog_init(&q->watchdog, sch); |
560 | 546 | ||
561 | q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops, | 547 | q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
548 | &tfifo_qdisc_ops, | ||
562 | TC_H_MAKE(sch->handle, 1)); | 549 | TC_H_MAKE(sch->handle, 1)); |
563 | if (!q->qdisc) { | 550 | if (!q->qdisc) { |
564 | pr_debug("netem: qdisc create failed\n"); | 551 | pr_debug("netem: qdisc create failed\n"); |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 5532f1031ab5..f849243eb095 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -24,11 +24,9 @@ | |||
24 | struct prio_sched_data | 24 | struct prio_sched_data |
25 | { | 25 | { |
26 | int bands; | 26 | int bands; |
27 | int curband; /* for round-robin */ | ||
28 | struct tcf_proto *filter_list; | 27 | struct tcf_proto *filter_list; |
29 | u8 prio2band[TC_PRIO_MAX+1]; | 28 | u8 prio2band[TC_PRIO_MAX+1]; |
30 | struct Qdisc *queues[TCQ_PRIO_BANDS]; | 29 | struct Qdisc *queues[TCQ_PRIO_BANDS]; |
31 | int mq; | ||
32 | }; | 30 | }; |
33 | 31 | ||
34 | 32 | ||
@@ -55,17 +53,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
55 | if (!q->filter_list || err < 0) { | 53 | if (!q->filter_list || err < 0) { |
56 | if (TC_H_MAJ(band)) | 54 | if (TC_H_MAJ(band)) |
57 | band = 0; | 55 | band = 0; |
58 | band = q->prio2band[band&TC_PRIO_MAX]; | 56 | return q->queues[q->prio2band[band&TC_PRIO_MAX]]; |
59 | goto out; | ||
60 | } | 57 | } |
61 | band = res.classid; | 58 | band = res.classid; |
62 | } | 59 | } |
63 | band = TC_H_MIN(band) - 1; | 60 | band = TC_H_MIN(band) - 1; |
64 | if (band >= q->bands) | 61 | if (band >= q->bands) |
65 | band = q->prio2band[0]; | 62 | return q->queues[q->prio2band[0]]; |
66 | out: | 63 | |
67 | if (q->mq) | ||
68 | skb_set_queue_mapping(skb, band); | ||
69 | return q->queues[band]; | 64 | return q->queues[band]; |
70 | } | 65 | } |
71 | 66 | ||
@@ -86,8 +81,9 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
86 | } | 81 | } |
87 | #endif | 82 | #endif |
88 | 83 | ||
89 | if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { | 84 | ret = qdisc_enqueue(skb, qdisc); |
90 | sch->bstats.bytes += skb->len; | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
91 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
92 | sch->q.qlen++; | 88 | sch->q.qlen++; |
93 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
@@ -123,67 +119,23 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
123 | } | 119 | } |
124 | 120 | ||
125 | 121 | ||
126 | static struct sk_buff * | 122 | static struct sk_buff *prio_dequeue(struct Qdisc* sch) |
127 | prio_dequeue(struct Qdisc* sch) | ||
128 | { | 123 | { |
129 | struct sk_buff *skb; | ||
130 | struct prio_sched_data *q = qdisc_priv(sch); | 124 | struct prio_sched_data *q = qdisc_priv(sch); |
131 | int prio; | 125 | int prio; |
132 | struct Qdisc *qdisc; | ||
133 | 126 | ||
134 | for (prio = 0; prio < q->bands; prio++) { | 127 | for (prio = 0; prio < q->bands; prio++) { |
135 | /* Check if the target subqueue is available before | 128 | struct Qdisc *qdisc = q->queues[prio]; |
136 | * pulling an skb. This way we avoid excessive requeues | 129 | struct sk_buff *skb = qdisc->dequeue(qdisc); |
137 | * for slower queues. | 130 | if (skb) { |
138 | */ | 131 | sch->q.qlen--; |
139 | if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { | 132 | return skb; |
140 | qdisc = q->queues[prio]; | ||
141 | skb = qdisc->dequeue(qdisc); | ||
142 | if (skb) { | ||
143 | sch->q.qlen--; | ||
144 | return skb; | ||
145 | } | ||
146 | } | 133 | } |
147 | } | 134 | } |
148 | return NULL; | 135 | return NULL; |
149 | 136 | ||
150 | } | 137 | } |
151 | 138 | ||
152 | static struct sk_buff *rr_dequeue(struct Qdisc* sch) | ||
153 | { | ||
154 | struct sk_buff *skb; | ||
155 | struct prio_sched_data *q = qdisc_priv(sch); | ||
156 | struct Qdisc *qdisc; | ||
157 | int bandcount; | ||
158 | |||
159 | /* Only take one pass through the queues. If nothing is available, | ||
160 | * return nothing. | ||
161 | */ | ||
162 | for (bandcount = 0; bandcount < q->bands; bandcount++) { | ||
163 | /* Check if the target subqueue is available before | ||
164 | * pulling an skb. This way we avoid excessive requeues | ||
165 | * for slower queues. If the queue is stopped, try the | ||
166 | * next queue. | ||
167 | */ | ||
168 | if (!__netif_subqueue_stopped(sch->dev, | ||
169 | (q->mq ? q->curband : 0))) { | ||
170 | qdisc = q->queues[q->curband]; | ||
171 | skb = qdisc->dequeue(qdisc); | ||
172 | if (skb) { | ||
173 | sch->q.qlen--; | ||
174 | q->curband++; | ||
175 | if (q->curband >= q->bands) | ||
176 | q->curband = 0; | ||
177 | return skb; | ||
178 | } | ||
179 | } | ||
180 | q->curband++; | ||
181 | if (q->curband >= q->bands) | ||
182 | q->curband = 0; | ||
183 | } | ||
184 | return NULL; | ||
185 | } | ||
186 | |||
187 | static unsigned int prio_drop(struct Qdisc* sch) | 139 | static unsigned int prio_drop(struct Qdisc* sch) |
188 | { | 140 | { |
189 | struct prio_sched_data *q = qdisc_priv(sch); | 141 | struct prio_sched_data *q = qdisc_priv(sch); |
@@ -228,45 +180,22 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
228 | { | 180 | { |
229 | struct prio_sched_data *q = qdisc_priv(sch); | 181 | struct prio_sched_data *q = qdisc_priv(sch); |
230 | struct tc_prio_qopt *qopt; | 182 | struct tc_prio_qopt *qopt; |
231 | struct nlattr *tb[TCA_PRIO_MAX + 1]; | ||
232 | int err; | ||
233 | int i; | 183 | int i; |
234 | 184 | ||
235 | err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, | 185 | if (nla_len(opt) < sizeof(*qopt)) |
236 | sizeof(*qopt)); | 186 | return -EINVAL; |
237 | if (err < 0) | 187 | qopt = nla_data(opt); |
238 | return err; | ||
239 | |||
240 | q->bands = qopt->bands; | ||
241 | /* If we're multiqueue, make sure the number of incoming bands | ||
242 | * matches the number of queues on the device we're associating with. | ||
243 | * If the number of bands requested is zero, then set q->bands to | ||
244 | * dev->egress_subqueue_count. Also, the root qdisc must be the | ||
245 | * only one that is enabled for multiqueue, since it's the only one | ||
246 | * that interacts with the underlying device. | ||
247 | */ | ||
248 | q->mq = nla_get_flag(tb[TCA_PRIO_MQ]); | ||
249 | if (q->mq) { | ||
250 | if (sch->parent != TC_H_ROOT) | ||
251 | return -EINVAL; | ||
252 | if (netif_is_multiqueue(sch->dev)) { | ||
253 | if (q->bands == 0) | ||
254 | q->bands = sch->dev->egress_subqueue_count; | ||
255 | else if (q->bands != sch->dev->egress_subqueue_count) | ||
256 | return -EINVAL; | ||
257 | } else | ||
258 | return -EOPNOTSUPP; | ||
259 | } | ||
260 | 188 | ||
261 | if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) | 189 | if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) |
262 | return -EINVAL; | 190 | return -EINVAL; |
263 | 191 | ||
264 | for (i=0; i<=TC_PRIO_MAX; i++) { | 192 | for (i=0; i<=TC_PRIO_MAX; i++) { |
265 | if (qopt->priomap[i] >= q->bands) | 193 | if (qopt->priomap[i] >= qopt->bands) |
266 | return -EINVAL; | 194 | return -EINVAL; |
267 | } | 195 | } |
268 | 196 | ||
269 | sch_tree_lock(sch); | 197 | sch_tree_lock(sch); |
198 | q->bands = qopt->bands; | ||
270 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 199 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
271 | 200 | ||
272 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { | 201 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { |
@@ -281,7 +210,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
281 | for (i=0; i<q->bands; i++) { | 210 | for (i=0; i<q->bands; i++) { |
282 | if (q->queues[i] == &noop_qdisc) { | 211 | if (q->queues[i] == &noop_qdisc) { |
283 | struct Qdisc *child; | 212 | struct Qdisc *child; |
284 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 213 | child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
214 | &pfifo_qdisc_ops, | ||
285 | TC_H_MAKE(sch->handle, i + 1)); | 215 | TC_H_MAKE(sch->handle, i + 1)); |
286 | if (child) { | 216 | if (child) { |
287 | sch_tree_lock(sch); | 217 | sch_tree_lock(sch); |
@@ -331,10 +261,6 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
331 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); | 261 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); |
332 | if (nest == NULL) | 262 | if (nest == NULL) |
333 | goto nla_put_failure; | 263 | goto nla_put_failure; |
334 | if (q->mq) { | ||
335 | if (nla_put_flag(skb, TCA_PRIO_MQ) < 0) | ||
336 | goto nla_put_failure; | ||
337 | } | ||
338 | nla_nest_compat_end(skb, nest); | 264 | nla_nest_compat_end(skb, nest); |
339 | 265 | ||
340 | return skb->len; | 266 | return skb->len; |
@@ -507,44 +433,17 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = { | |||
507 | .owner = THIS_MODULE, | 433 | .owner = THIS_MODULE, |
508 | }; | 434 | }; |
509 | 435 | ||
510 | static struct Qdisc_ops rr_qdisc_ops __read_mostly = { | ||
511 | .next = NULL, | ||
512 | .cl_ops = &prio_class_ops, | ||
513 | .id = "rr", | ||
514 | .priv_size = sizeof(struct prio_sched_data), | ||
515 | .enqueue = prio_enqueue, | ||
516 | .dequeue = rr_dequeue, | ||
517 | .requeue = prio_requeue, | ||
518 | .drop = prio_drop, | ||
519 | .init = prio_init, | ||
520 | .reset = prio_reset, | ||
521 | .destroy = prio_destroy, | ||
522 | .change = prio_tune, | ||
523 | .dump = prio_dump, | ||
524 | .owner = THIS_MODULE, | ||
525 | }; | ||
526 | |||
527 | static int __init prio_module_init(void) | 436 | static int __init prio_module_init(void) |
528 | { | 437 | { |
529 | int err; | 438 | return register_qdisc(&prio_qdisc_ops); |
530 | |||
531 | err = register_qdisc(&prio_qdisc_ops); | ||
532 | if (err < 0) | ||
533 | return err; | ||
534 | err = register_qdisc(&rr_qdisc_ops); | ||
535 | if (err < 0) | ||
536 | unregister_qdisc(&prio_qdisc_ops); | ||
537 | return err; | ||
538 | } | 439 | } |
539 | 440 | ||
540 | static void __exit prio_module_exit(void) | 441 | static void __exit prio_module_exit(void) |
541 | { | 442 | { |
542 | unregister_qdisc(&prio_qdisc_ops); | 443 | unregister_qdisc(&prio_qdisc_ops); |
543 | unregister_qdisc(&rr_qdisc_ops); | ||
544 | } | 444 | } |
545 | 445 | ||
546 | module_init(prio_module_init) | 446 | module_init(prio_module_init) |
547 | module_exit(prio_module_exit) | 447 | module_exit(prio_module_exit) |
548 | 448 | ||
549 | MODULE_LICENSE("GPL"); | 449 | MODULE_LICENSE("GPL"); |
550 | MODULE_ALIAS("sch_rr"); | ||
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 5c569853b9c0..3f2d1d7f3bbd 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -92,9 +92,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
92 | break; | 92 | break; |
93 | } | 93 | } |
94 | 94 | ||
95 | ret = child->enqueue(skb, child); | 95 | ret = qdisc_enqueue(skb, child); |
96 | if (likely(ret == NET_XMIT_SUCCESS)) { | 96 | if (likely(ret == NET_XMIT_SUCCESS)) { |
97 | sch->bstats.bytes += skb->len; | 97 | sch->bstats.bytes += qdisc_pkt_len(skb); |
98 | sch->bstats.packets++; | 98 | sch->bstats.packets++; |
99 | sch->q.qlen++; | 99 | sch->q.qlen++; |
100 | } else { | 100 | } else { |
@@ -174,33 +174,6 @@ static void red_destroy(struct Qdisc *sch) | |||
174 | qdisc_destroy(q->qdisc); | 174 | qdisc_destroy(q->qdisc); |
175 | } | 175 | } |
176 | 176 | ||
177 | static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) | ||
178 | { | ||
179 | struct Qdisc *q; | ||
180 | struct nlattr *nla; | ||
181 | int ret; | ||
182 | |||
183 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | ||
184 | TC_H_MAKE(sch->handle, 1)); | ||
185 | if (q) { | ||
186 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), | ||
187 | GFP_KERNEL); | ||
188 | if (nla) { | ||
189 | nla->nla_type = RTM_NEWQDISC; | ||
190 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); | ||
191 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; | ||
192 | |||
193 | ret = q->ops->change(q, nla); | ||
194 | kfree(nla); | ||
195 | |||
196 | if (ret == 0) | ||
197 | return q; | ||
198 | } | ||
199 | qdisc_destroy(q); | ||
200 | } | ||
201 | return NULL; | ||
202 | } | ||
203 | |||
204 | static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { | 177 | static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { |
205 | [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, | 178 | [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, |
206 | [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, | 179 | [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, |
@@ -228,9 +201,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
228 | ctl = nla_data(tb[TCA_RED_PARMS]); | 201 | ctl = nla_data(tb[TCA_RED_PARMS]); |
229 | 202 | ||
230 | if (ctl->limit > 0) { | 203 | if (ctl->limit > 0) { |
231 | child = red_create_dflt(sch, ctl->limit); | 204 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); |
232 | if (child == NULL) | 205 | if (IS_ERR(child)) |
233 | return -ENOMEM; | 206 | return PTR_ERR(child); |
234 | } | 207 | } |
235 | 208 | ||
236 | sch_tree_lock(sch); | 209 | sch_tree_lock(sch); |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 6a97afbfb952..8589da666568 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -245,7 +245,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) | |||
245 | if (d > 1) { | 245 | if (d > 1) { |
246 | sfq_index x = q->dep[d + SFQ_DEPTH].next; | 246 | sfq_index x = q->dep[d + SFQ_DEPTH].next; |
247 | skb = q->qs[x].prev; | 247 | skb = q->qs[x].prev; |
248 | len = skb->len; | 248 | len = qdisc_pkt_len(skb); |
249 | __skb_unlink(skb, &q->qs[x]); | 249 | __skb_unlink(skb, &q->qs[x]); |
250 | kfree_skb(skb); | 250 | kfree_skb(skb); |
251 | sfq_dec(q, x); | 251 | sfq_dec(q, x); |
@@ -261,7 +261,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) | |||
261 | q->next[q->tail] = q->next[d]; | 261 | q->next[q->tail] = q->next[d]; |
262 | q->allot[q->next[d]] += q->quantum; | 262 | q->allot[q->next[d]] += q->quantum; |
263 | skb = q->qs[d].prev; | 263 | skb = q->qs[d].prev; |
264 | len = skb->len; | 264 | len = qdisc_pkt_len(skb); |
265 | __skb_unlink(skb, &q->qs[d]); | 265 | __skb_unlink(skb, &q->qs[d]); |
266 | kfree_skb(skb); | 266 | kfree_skb(skb); |
267 | sfq_dec(q, d); | 267 | sfq_dec(q, d); |
@@ -305,7 +305,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
305 | if (q->qs[x].qlen >= q->limit) | 305 | if (q->qs[x].qlen >= q->limit) |
306 | return qdisc_drop(skb, sch); | 306 | return qdisc_drop(skb, sch); |
307 | 307 | ||
308 | sch->qstats.backlog += skb->len; | 308 | sch->qstats.backlog += qdisc_pkt_len(skb); |
309 | __skb_queue_tail(&q->qs[x], skb); | 309 | __skb_queue_tail(&q->qs[x], skb); |
310 | sfq_inc(q, x); | 310 | sfq_inc(q, x); |
311 | if (q->qs[x].qlen == 1) { /* The flow is new */ | 311 | if (q->qs[x].qlen == 1) { /* The flow is new */ |
@@ -320,7 +320,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
320 | } | 320 | } |
321 | } | 321 | } |
322 | if (++sch->q.qlen <= q->limit) { | 322 | if (++sch->q.qlen <= q->limit) { |
323 | sch->bstats.bytes += skb->len; | 323 | sch->bstats.bytes += qdisc_pkt_len(skb); |
324 | sch->bstats.packets++; | 324 | sch->bstats.packets++; |
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
@@ -352,7 +352,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
352 | q->hash[x] = hash; | 352 | q->hash[x] = hash; |
353 | } | 353 | } |
354 | 354 | ||
355 | sch->qstats.backlog += skb->len; | 355 | sch->qstats.backlog += qdisc_pkt_len(skb); |
356 | __skb_queue_head(&q->qs[x], skb); | 356 | __skb_queue_head(&q->qs[x], skb); |
357 | /* If selected queue has length q->limit+1, this means that | 357 | /* If selected queue has length q->limit+1, this means that |
358 | * all another queues are empty and we do simple tail drop. | 358 | * all another queues are empty and we do simple tail drop. |
@@ -363,7 +363,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
363 | skb = q->qs[x].prev; | 363 | skb = q->qs[x].prev; |
364 | __skb_unlink(skb, &q->qs[x]); | 364 | __skb_unlink(skb, &q->qs[x]); |
365 | sch->qstats.drops++; | 365 | sch->qstats.drops++; |
366 | sch->qstats.backlog -= skb->len; | 366 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
367 | kfree_skb(skb); | 367 | kfree_skb(skb); |
368 | return NET_XMIT_CN; | 368 | return NET_XMIT_CN; |
369 | } | 369 | } |
@@ -411,7 +411,7 @@ sfq_dequeue(struct Qdisc *sch) | |||
411 | skb = __skb_dequeue(&q->qs[a]); | 411 | skb = __skb_dequeue(&q->qs[a]); |
412 | sfq_dec(q, a); | 412 | sfq_dec(q, a); |
413 | sch->q.qlen--; | 413 | sch->q.qlen--; |
414 | sch->qstats.backlog -= skb->len; | 414 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
415 | 415 | ||
416 | /* Is the slot empty? */ | 416 | /* Is the slot empty? */ |
417 | if (q->qs[a].qlen == 0) { | 417 | if (q->qs[a].qlen == 0) { |
@@ -423,7 +423,7 @@ sfq_dequeue(struct Qdisc *sch) | |||
423 | } | 423 | } |
424 | q->next[q->tail] = a; | 424 | q->next[q->tail] = a; |
425 | q->allot[a] += q->quantum; | 425 | q->allot[a] += q->quantum; |
426 | } else if ((q->allot[a] -= skb->len) <= 0) { | 426 | } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { |
427 | q->tail = a; | 427 | q->tail = a; |
428 | a = q->next[a]; | 428 | a = q->next[a]; |
429 | q->allot[a] += q->quantum; | 429 | q->allot[a] += q->quantum; |
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) | |||
461 | return -EINVAL; | 461 | return -EINVAL; |
462 | 462 | ||
463 | sch_tree_lock(sch); | 463 | sch_tree_lock(sch); |
464 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); | 464 | q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); |
465 | q->perturb_period = ctl->perturb_period * HZ; | 465 | q->perturb_period = ctl->perturb_period * HZ; |
466 | if (ctl->limit) | 466 | if (ctl->limit) |
467 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); | 467 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); |
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
502 | q->max_depth = 0; | 502 | q->max_depth = 0; |
503 | q->tail = SFQ_DEPTH; | 503 | q->tail = SFQ_DEPTH; |
504 | if (opt == NULL) { | 504 | if (opt == NULL) { |
505 | q->quantum = psched_mtu(sch->dev); | 505 | q->quantum = psched_mtu(qdisc_dev(sch)); |
506 | q->perturb_period = 0; | 506 | q->perturb_period = 0; |
507 | q->perturbation = net_random(); | 507 | q->perturbation = net_random(); |
508 | } else { | 508 | } else { |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 0b7d78f59d8c..b296672f7632 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -123,7 +123,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
123 | struct tbf_sched_data *q = qdisc_priv(sch); | 123 | struct tbf_sched_data *q = qdisc_priv(sch); |
124 | int ret; | 124 | int ret; |
125 | 125 | ||
126 | if (skb->len > q->max_size) { | 126 | if (qdisc_pkt_len(skb) > q->max_size) { |
127 | sch->qstats.drops++; | 127 | sch->qstats.drops++; |
128 | #ifdef CONFIG_NET_CLS_ACT | 128 | #ifdef CONFIG_NET_CLS_ACT |
129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) | 129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
@@ -133,13 +133,14 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
133 | return NET_XMIT_DROP; | 133 | return NET_XMIT_DROP; |
134 | } | 134 | } |
135 | 135 | ||
136 | if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { | 136 | ret = qdisc_enqueue(skb, q->qdisc); |
137 | if (ret != 0) { | ||
137 | sch->qstats.drops++; | 138 | sch->qstats.drops++; |
138 | return ret; | 139 | return ret; |
139 | } | 140 | } |
140 | 141 | ||
141 | sch->q.qlen++; | 142 | sch->q.qlen++; |
142 | sch->bstats.bytes += skb->len; | 143 | sch->bstats.bytes += qdisc_pkt_len(skb); |
143 | sch->bstats.packets++; | 144 | sch->bstats.packets++; |
144 | return 0; | 145 | return 0; |
145 | } | 146 | } |
@@ -180,7 +181,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
180 | psched_time_t now; | 181 | psched_time_t now; |
181 | long toks; | 182 | long toks; |
182 | long ptoks = 0; | 183 | long ptoks = 0; |
183 | unsigned int len = skb->len; | 184 | unsigned int len = qdisc_pkt_len(skb); |
184 | 185 | ||
185 | now = psched_get_time(); | 186 | now = psched_get_time(); |
186 | toks = psched_tdiff_bounded(now, q->t_c, q->buffer); | 187 | toks = psched_tdiff_bounded(now, q->t_c, q->buffer); |
@@ -242,34 +243,6 @@ static void tbf_reset(struct Qdisc* sch) | |||
242 | qdisc_watchdog_cancel(&q->watchdog); | 243 | qdisc_watchdog_cancel(&q->watchdog); |
243 | } | 244 | } |
244 | 245 | ||
245 | static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) | ||
246 | { | ||
247 | struct Qdisc *q; | ||
248 | struct nlattr *nla; | ||
249 | int ret; | ||
250 | |||
251 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | ||
252 | TC_H_MAKE(sch->handle, 1)); | ||
253 | if (q) { | ||
254 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), | ||
255 | GFP_KERNEL); | ||
256 | if (nla) { | ||
257 | nla->nla_type = RTM_NEWQDISC; | ||
258 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); | ||
259 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; | ||
260 | |||
261 | ret = q->ops->change(q, nla); | ||
262 | kfree(nla); | ||
263 | |||
264 | if (ret == 0) | ||
265 | return q; | ||
266 | } | ||
267 | qdisc_destroy(q); | ||
268 | } | ||
269 | |||
270 | return NULL; | ||
271 | } | ||
272 | |||
273 | static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { | 246 | static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { |
274 | [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, | 247 | [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, |
275 | [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | 248 | [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, |
@@ -322,8 +295,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | |||
322 | goto done; | 295 | goto done; |
323 | 296 | ||
324 | if (qopt->limit > 0) { | 297 | if (qopt->limit > 0) { |
325 | if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) | 298 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); |
299 | if (IS_ERR(child)) { | ||
300 | err = PTR_ERR(child); | ||
326 | goto done; | 301 | goto done; |
302 | } | ||
327 | } | 303 | } |
328 | 304 | ||
329 | sch_tree_lock(sch); | 305 | sch_tree_lock(sch); |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 0444fd0f0d22..537223642b6e 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -78,12 +78,12 @@ struct teql_sched_data | |||
78 | static int | 78 | static int |
79 | teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 79 | teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) |
80 | { | 80 | { |
81 | struct net_device *dev = sch->dev; | 81 | struct net_device *dev = qdisc_dev(sch); |
82 | struct teql_sched_data *q = qdisc_priv(sch); | 82 | struct teql_sched_data *q = qdisc_priv(sch); |
83 | 83 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 84 | if (q->q.qlen < dev->tx_queue_len) { |
85 | __skb_queue_tail(&q->q, skb); | 85 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += skb->len; | 86 | sch->bstats.bytes += qdisc_pkt_len(skb); |
87 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
@@ -107,17 +107,19 @@ static struct sk_buff * | |||
107 | teql_dequeue(struct Qdisc* sch) | 107 | teql_dequeue(struct Qdisc* sch) |
108 | { | 108 | { |
109 | struct teql_sched_data *dat = qdisc_priv(sch); | 109 | struct teql_sched_data *dat = qdisc_priv(sch); |
110 | struct netdev_queue *dat_queue; | ||
110 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
111 | 112 | ||
112 | skb = __skb_dequeue(&dat->q); | 113 | skb = __skb_dequeue(&dat->q); |
114 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); | ||
113 | if (skb == NULL) { | 115 | if (skb == NULL) { |
114 | struct net_device *m = dat->m->dev->qdisc->dev; | 116 | struct net_device *m = qdisc_dev(dat_queue->qdisc); |
115 | if (m) { | 117 | if (m) { |
116 | dat->m->slaves = sch; | 118 | dat->m->slaves = sch; |
117 | netif_wake_queue(m); | 119 | netif_wake_queue(m); |
118 | } | 120 | } |
119 | } | 121 | } |
120 | sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen; | 122 | sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; |
121 | return skb; | 123 | return skb; |
122 | } | 124 | } |
123 | 125 | ||
@@ -153,10 +155,16 @@ teql_destroy(struct Qdisc* sch) | |||
153 | if (q == master->slaves) { | 155 | if (q == master->slaves) { |
154 | master->slaves = NEXT_SLAVE(q); | 156 | master->slaves = NEXT_SLAVE(q); |
155 | if (q == master->slaves) { | 157 | if (q == master->slaves) { |
158 | struct netdev_queue *txq; | ||
159 | spinlock_t *root_lock; | ||
160 | |||
161 | txq = netdev_get_tx_queue(master->dev, 0); | ||
156 | master->slaves = NULL; | 162 | master->slaves = NULL; |
157 | spin_lock_bh(&master->dev->queue_lock); | 163 | |
158 | qdisc_reset(master->dev->qdisc); | 164 | root_lock = qdisc_root_lock(txq->qdisc); |
159 | spin_unlock_bh(&master->dev->queue_lock); | 165 | spin_lock_bh(root_lock); |
166 | qdisc_reset(txq->qdisc); | ||
167 | spin_unlock_bh(root_lock); | ||
160 | } | 168 | } |
161 | } | 169 | } |
162 | skb_queue_purge(&dat->q); | 170 | skb_queue_purge(&dat->q); |
@@ -170,7 +178,7 @@ teql_destroy(struct Qdisc* sch) | |||
170 | 178 | ||
171 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | 179 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) |
172 | { | 180 | { |
173 | struct net_device *dev = sch->dev; | 181 | struct net_device *dev = qdisc_dev(sch); |
174 | struct teql_master *m = (struct teql_master*)sch->ops; | 182 | struct teql_master *m = (struct teql_master*)sch->ops; |
175 | struct teql_sched_data *q = qdisc_priv(sch); | 183 | struct teql_sched_data *q = qdisc_priv(sch); |
176 | 184 | ||
@@ -216,7 +224,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
216 | static int | 224 | static int |
217 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) | 225 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) |
218 | { | 226 | { |
219 | struct teql_sched_data *q = qdisc_priv(dev->qdisc); | 227 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
228 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | ||
220 | struct neighbour *mn = skb->dst->neighbour; | 229 | struct neighbour *mn = skb->dst->neighbour; |
221 | struct neighbour *n = q->ncache; | 230 | struct neighbour *n = q->ncache; |
222 | 231 | ||
@@ -252,7 +261,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
252 | static inline int teql_resolve(struct sk_buff *skb, | 261 | static inline int teql_resolve(struct sk_buff *skb, |
253 | struct sk_buff *skb_res, struct net_device *dev) | 262 | struct sk_buff *skb_res, struct net_device *dev) |
254 | { | 263 | { |
255 | if (dev->qdisc == &noop_qdisc) | 264 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
265 | if (txq->qdisc == &noop_qdisc) | ||
256 | return -ENODEV; | 266 | return -ENODEV; |
257 | 267 | ||
258 | if (dev->header_ops == NULL || | 268 | if (dev->header_ops == NULL || |
@@ -268,7 +278,6 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | |||
268 | struct Qdisc *start, *q; | 278 | struct Qdisc *start, *q; |
269 | int busy; | 279 | int busy; |
270 | int nores; | 280 | int nores; |
271 | int len = skb->len; | ||
272 | int subq = skb_get_queue_mapping(skb); | 281 | int subq = skb_get_queue_mapping(skb); |
273 | struct sk_buff *skb_res = NULL; | 282 | struct sk_buff *skb_res = NULL; |
274 | 283 | ||
@@ -282,12 +291,13 @@ restart: | |||
282 | goto drop; | 291 | goto drop; |
283 | 292 | ||
284 | do { | 293 | do { |
285 | struct net_device *slave = q->dev; | 294 | struct net_device *slave = qdisc_dev(q); |
295 | struct netdev_queue *slave_txq; | ||
286 | 296 | ||
287 | if (slave->qdisc_sleeping != q) | 297 | slave_txq = netdev_get_tx_queue(slave, 0); |
298 | if (slave_txq->qdisc_sleeping != q) | ||
288 | continue; | 299 | continue; |
289 | if (netif_queue_stopped(slave) || | 300 | if (__netif_subqueue_stopped(slave, subq) || |
290 | __netif_subqueue_stopped(slave, subq) || | ||
291 | !netif_running(slave)) { | 301 | !netif_running(slave)) { |
292 | busy = 1; | 302 | busy = 1; |
293 | continue; | 303 | continue; |
@@ -296,14 +306,14 @@ restart: | |||
296 | switch (teql_resolve(skb, skb_res, slave)) { | 306 | switch (teql_resolve(skb, skb_res, slave)) { |
297 | case 0: | 307 | case 0: |
298 | if (netif_tx_trylock(slave)) { | 308 | if (netif_tx_trylock(slave)) { |
299 | if (!netif_queue_stopped(slave) && | 309 | if (!__netif_subqueue_stopped(slave, subq) && |
300 | !__netif_subqueue_stopped(slave, subq) && | ||
301 | slave->hard_start_xmit(skb, slave) == 0) { | 310 | slave->hard_start_xmit(skb, slave) == 0) { |
302 | netif_tx_unlock(slave); | 311 | netif_tx_unlock(slave); |
303 | master->slaves = NEXT_SLAVE(q); | 312 | master->slaves = NEXT_SLAVE(q); |
304 | netif_wake_queue(dev); | 313 | netif_wake_queue(dev); |
305 | master->stats.tx_packets++; | 314 | master->stats.tx_packets++; |
306 | master->stats.tx_bytes += len; | 315 | master->stats.tx_bytes += |
316 | qdisc_pkt_len(skb); | ||
307 | return 0; | 317 | return 0; |
308 | } | 318 | } |
309 | netif_tx_unlock(slave); | 319 | netif_tx_unlock(slave); |
@@ -352,7 +362,7 @@ static int teql_master_open(struct net_device *dev) | |||
352 | 362 | ||
353 | q = m->slaves; | 363 | q = m->slaves; |
354 | do { | 364 | do { |
355 | struct net_device *slave = q->dev; | 365 | struct net_device *slave = qdisc_dev(q); |
356 | 366 | ||
357 | if (slave == NULL) | 367 | if (slave == NULL) |
358 | return -EUNATCH; | 368 | return -EUNATCH; |
@@ -403,7 +413,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
403 | q = m->slaves; | 413 | q = m->slaves; |
404 | if (q) { | 414 | if (q) { |
405 | do { | 415 | do { |
406 | if (new_mtu > q->dev->mtu) | 416 | if (new_mtu > qdisc_dev(q)->mtu) |
407 | return -EINVAL; | 417 | return -EINVAL; |
408 | } while ((q=NEXT_SLAVE(q)) != m->slaves); | 418 | } while ((q=NEXT_SLAVE(q)) != m->slaves); |
409 | } | 419 | } |
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 0b79f869c4ea..58b3e882a187 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig | |||
@@ -47,11 +47,11 @@ config SCTP_DBG_MSG | |||
47 | 47 | ||
48 | config SCTP_DBG_OBJCNT | 48 | config SCTP_DBG_OBJCNT |
49 | bool "SCTP: Debug object counts" | 49 | bool "SCTP: Debug object counts" |
50 | depends on PROC_FS | ||
50 | help | 51 | help |
51 | If you say Y, this will enable debugging support for counting the | 52 | If you say Y, this will enable debugging support for counting the |
52 | type of objects that are currently allocated. This is useful for | 53 | type of objects that are currently allocated. This is useful for |
53 | identifying memory leaks. If the /proc filesystem is enabled this | 54 | identifying memory leaks. This debug information can be viewed by |
54 | debug information can be viewed by | ||
55 | 'cat /proc/net/sctp/sctp_dbg_objcnt' | 55 | 'cat /proc/net/sctp/sctp_dbg_objcnt' |
56 | 56 | ||
57 | If unsure, say N | 57 | If unsure, say N |
diff --git a/net/sctp/Makefile b/net/sctp/Makefile index f5356b9d5ee3..6b794734380a 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile | |||
@@ -9,10 +9,10 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ | |||
9 | transport.o chunk.o sm_make_chunk.o ulpevent.o \ | 9 | transport.o chunk.o sm_make_chunk.o ulpevent.o \ |
10 | inqueue.o outqueue.o ulpqueue.o command.o \ | 10 | inqueue.o outqueue.o ulpqueue.o command.o \ |
11 | tsnmap.o bind_addr.o socket.o primitive.o \ | 11 | tsnmap.o bind_addr.o socket.o primitive.o \ |
12 | output.o input.o debug.o ssnmap.o proc.o \ | 12 | output.o input.o debug.o ssnmap.o auth.o |
13 | auth.o | ||
14 | 13 | ||
15 | sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o | 14 | sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o |
15 | sctp-$(CONFIG_PROC_FS) += proc.o | ||
16 | sctp-$(CONFIG_SYSCTL) += sysctl.o | 16 | sctp-$(CONFIG_SYSCTL) += sysctl.o |
17 | 17 | ||
18 | sctp-$(subst m,y,$(CONFIG_IPV6)) += ipv6.o | 18 | sctp-$(subst m,y,$(CONFIG_IPV6)) += ipv6.o |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 024c3ebd9661..ec2a0a33fd78 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -136,6 +136,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
136 | 136 | ||
137 | /* Set association default SACK delay */ | 137 | /* Set association default SACK delay */ |
138 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); | 138 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); |
139 | asoc->sackfreq = sp->sackfreq; | ||
139 | 140 | ||
140 | /* Set the association default flags controlling | 141 | /* Set the association default flags controlling |
141 | * Heartbeat, SACK delay, and Path MTU Discovery. | 142 | * Heartbeat, SACK delay, and Path MTU Discovery. |
@@ -261,6 +262,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
261 | * already received one packet.] | 262 | * already received one packet.] |
262 | */ | 263 | */ |
263 | asoc->peer.sack_needed = 1; | 264 | asoc->peer.sack_needed = 1; |
265 | asoc->peer.sack_cnt = 0; | ||
264 | 266 | ||
265 | /* Assume that the peer will tell us if he recognizes ASCONF | 267 | /* Assume that the peer will tell us if he recognizes ASCONF |
266 | * as part of INIT exchange. | 268 | * as part of INIT exchange. |
@@ -624,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
624 | * association configured value. | 626 | * association configured value. |
625 | */ | 627 | */ |
626 | peer->sackdelay = asoc->sackdelay; | 628 | peer->sackdelay = asoc->sackdelay; |
629 | peer->sackfreq = asoc->sackfreq; | ||
627 | 630 | ||
628 | /* Enable/disable heartbeat, SACK delay, and path MTU discovery | 631 | /* Enable/disable heartbeat, SACK delay, and path MTU discovery |
629 | * based on association setting. | 632 | * based on association setting. |
@@ -650,6 +653,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
650 | 653 | ||
651 | SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " | 654 | SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " |
652 | "%d\n", asoc, asoc->pathmtu); | 655 | "%d\n", asoc, asoc->pathmtu); |
656 | peer->pmtu_pending = 0; | ||
653 | 657 | ||
654 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | 658 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); |
655 | 659 | ||
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 80e6df06967a..f62bc2468935 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -348,6 +348,43 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp, | |||
348 | return match; | 348 | return match; |
349 | } | 349 | } |
350 | 350 | ||
351 | /* Does the address 'addr' conflict with any addresses in | ||
352 | * the bp. | ||
353 | */ | ||
354 | int sctp_bind_addr_conflict(struct sctp_bind_addr *bp, | ||
355 | const union sctp_addr *addr, | ||
356 | struct sctp_sock *bp_sp, | ||
357 | struct sctp_sock *addr_sp) | ||
358 | { | ||
359 | struct sctp_sockaddr_entry *laddr; | ||
360 | int conflict = 0; | ||
361 | struct sctp_sock *sp; | ||
362 | |||
363 | /* Pick the IPv6 socket as the basis of comparison | ||
364 | * since it's usually a superset of the IPv4. | ||
365 | * If there is no IPv6 socket, then default to bind_addr. | ||
366 | */ | ||
367 | if (sctp_opt2sk(bp_sp)->sk_family == AF_INET6) | ||
368 | sp = bp_sp; | ||
369 | else if (sctp_opt2sk(addr_sp)->sk_family == AF_INET6) | ||
370 | sp = addr_sp; | ||
371 | else | ||
372 | sp = bp_sp; | ||
373 | |||
374 | rcu_read_lock(); | ||
375 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | ||
376 | if (!laddr->valid) | ||
377 | continue; | ||
378 | |||
379 | conflict = sp->pf->cmp_addr(&laddr->a, addr, sp); | ||
380 | if (conflict) | ||
381 | break; | ||
382 | } | ||
383 | rcu_read_unlock(); | ||
384 | |||
385 | return conflict; | ||
386 | } | ||
387 | |||
351 | /* Get the state of the entry in the bind_addr_list */ | 388 | /* Get the state of the entry in the bind_addr_list */ |
352 | int sctp_bind_addr_state(const struct sctp_bind_addr *bp, | 389 | int sctp_bind_addr_state(const struct sctp_bind_addr *bp, |
353 | const union sctp_addr *addr) | 390 | const union sctp_addr *addr) |
diff --git a/net/sctp/input.c b/net/sctp/input.c index ca6b022b1df2..a49fa80b57b9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -61,6 +61,7 @@ | |||
61 | #include <net/sctp/sctp.h> | 61 | #include <net/sctp/sctp.h> |
62 | #include <net/sctp/sm.h> | 62 | #include <net/sctp/sm.h> |
63 | #include <net/sctp/checksum.h> | 63 | #include <net/sctp/checksum.h> |
64 | #include <net/net_namespace.h> | ||
64 | 65 | ||
65 | /* Forward declarations for internal helpers. */ | 66 | /* Forward declarations for internal helpers. */ |
66 | static int sctp_rcv_ootb(struct sk_buff *); | 67 | static int sctp_rcv_ootb(struct sk_buff *); |
@@ -82,8 +83,8 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) | |||
82 | { | 83 | { |
83 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 84 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
84 | struct sctphdr *sh = sctp_hdr(skb); | 85 | struct sctphdr *sh = sctp_hdr(skb); |
85 | __u32 cmp = ntohl(sh->checksum); | 86 | __be32 cmp = sh->checksum; |
86 | __u32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); | 87 | __be32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); |
87 | 88 | ||
88 | for (; list; list = list->next) | 89 | for (; list; list = list->next) |
89 | val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), | 90 | val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), |
@@ -430,6 +431,9 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | |||
430 | struct sock *sk = NULL; | 431 | struct sock *sk = NULL; |
431 | struct sctp_association *asoc; | 432 | struct sctp_association *asoc; |
432 | struct sctp_transport *transport = NULL; | 433 | struct sctp_transport *transport = NULL; |
434 | struct sctp_init_chunk *chunkhdr; | ||
435 | __u32 vtag = ntohl(sctphdr->vtag); | ||
436 | int len = skb->len - ((void *)sctphdr - (void *)skb->data); | ||
433 | 437 | ||
434 | *app = NULL; *tpp = NULL; | 438 | *app = NULL; *tpp = NULL; |
435 | 439 | ||
@@ -451,8 +455,28 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | |||
451 | 455 | ||
452 | sk = asoc->base.sk; | 456 | sk = asoc->base.sk; |
453 | 457 | ||
454 | if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) { | 458 | /* RFC 4960, Appendix C. ICMP Handling |
455 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 459 | * |
460 | * ICMP6) An implementation MUST validate that the Verification Tag | ||
461 | * contained in the ICMP message matches the Verification Tag of | ||
462 | * the peer. If the Verification Tag is not 0 and does NOT | ||
463 | * match, discard the ICMP message. If it is 0 and the ICMP | ||
464 | * message contains enough bytes to verify that the chunk type is | ||
465 | * an INIT chunk and that the Initiate Tag matches the tag of the | ||
466 | * peer, continue with ICMP7. If the ICMP message is too short | ||
467 | * or the chunk type or the Initiate Tag does not match, silently | ||
468 | * discard the packet. | ||
469 | */ | ||
470 | if (vtag == 0) { | ||
471 | chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr | ||
472 | + sizeof(struct sctphdr)); | ||
473 | if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) | ||
474 | + sizeof(__be32) || | ||
475 | chunkhdr->chunk_hdr.type != SCTP_CID_INIT || | ||
476 | ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { | ||
477 | goto out; | ||
478 | } | ||
479 | } else if (vtag != asoc->c.peer_vtag) { | ||
456 | goto out; | 480 | goto out; |
457 | } | 481 | } |
458 | 482 | ||
@@ -462,7 +486,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | |||
462 | * servers this needs to be solved differently. | 486 | * servers this needs to be solved differently. |
463 | */ | 487 | */ |
464 | if (sock_owned_by_user(sk)) | 488 | if (sock_owned_by_user(sk)) |
465 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 489 | NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS); |
466 | 490 | ||
467 | *app = asoc; | 491 | *app = asoc; |
468 | *tpp = transport; | 492 | *tpp = transport; |
@@ -511,7 +535,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
511 | int err; | 535 | int err; |
512 | 536 | ||
513 | if (skb->len < ihlen + 8) { | 537 | if (skb->len < ihlen + 8) { |
514 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 538 | ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); |
515 | return; | 539 | return; |
516 | } | 540 | } |
517 | 541 | ||
@@ -525,7 +549,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
525 | skb->network_header = saveip; | 549 | skb->network_header = saveip; |
526 | skb->transport_header = savesctp; | 550 | skb->transport_header = savesctp; |
527 | if (!sk) { | 551 | if (!sk) { |
528 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 552 | ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); |
529 | return; | 553 | return; |
530 | } | 554 | } |
531 | /* Warning: The sock lock is held. Remember to call | 555 | /* Warning: The sock lock is held. Remember to call |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index a2f4d4d51593..a238d6834b33 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -818,7 +818,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) | |||
818 | return 1; | 818 | return 1; |
819 | /* v4-mapped-v6 addresses */ | 819 | /* v4-mapped-v6 addresses */ |
820 | case AF_INET: | 820 | case AF_INET: |
821 | if (!__ipv6_only_sock(sctp_opt2sk(sp)) && sp->v4mapped) | 821 | if (!__ipv6_only_sock(sctp_opt2sk(sp))) |
822 | return 1; | 822 | return 1; |
823 | default: | 823 | default: |
824 | return 0; | 824 | return 0; |
@@ -840,6 +840,11 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
840 | 840 | ||
841 | if (!af1 || !af2) | 841 | if (!af1 || !af2) |
842 | return 0; | 842 | return 0; |
843 | |||
844 | /* If the socket is IPv6 only, v4 addrs will not match */ | ||
845 | if (__ipv6_only_sock(sctp_opt2sk(opt)) && af1 != af2) | ||
846 | return 0; | ||
847 | |||
843 | /* Today, wildcard AF_INET/AF_INET6. */ | 848 | /* Today, wildcard AF_INET/AF_INET6. */ |
844 | if (sctp_is_any(addr1) || sctp_is_any(addr2)) | 849 | if (sctp_is_any(addr1) || sctp_is_any(addr2)) |
845 | return 1; | 850 | return 1; |
@@ -876,7 +881,11 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
876 | return 0; | 881 | return 0; |
877 | } | 882 | } |
878 | dev_put(dev); | 883 | dev_put(dev); |
884 | } else if (type == IPV6_ADDR_MAPPED) { | ||
885 | if (!opt->v4mapped) | ||
886 | return 0; | ||
879 | } | 887 | } |
888 | |||
880 | af = opt->pf->af; | 889 | af = opt->pf->af; |
881 | } | 890 | } |
882 | return af->available(addr, opt); | 891 | return af->available(addr, opt); |
@@ -919,9 +928,12 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
919 | static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, | 928 | static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, |
920 | __be16 *types) | 929 | __be16 *types) |
921 | { | 930 | { |
922 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | 931 | types[0] = SCTP_PARAM_IPV6_ADDRESS; |
923 | types[1] = SCTP_PARAM_IPV6_ADDRESS; | 932 | if (!opt || !ipv6_only_sock(sctp_opt2sk(opt))) { |
924 | return 2; | 933 | types[1] = SCTP_PARAM_IPV4_ADDRESS; |
934 | return 2; | ||
935 | } | ||
936 | return 1; | ||
925 | } | 937 | } |
926 | 938 | ||
927 | static const struct proto_ops inet6_seqpacket_ops = { | 939 | static const struct proto_ops inet6_seqpacket_ops = { |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 6d45bae93b46..45684646b1db 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/init.h> | 50 | #include <linux/init.h> |
51 | #include <net/inet_ecn.h> | 51 | #include <net/inet_ecn.h> |
52 | #include <net/icmp.h> | 52 | #include <net/icmp.h> |
53 | #include <net/net_namespace.h> | ||
53 | 54 | ||
54 | #ifndef TEST_FRAME | 55 | #ifndef TEST_FRAME |
55 | #include <net/tcp.h> | 56 | #include <net/tcp.h> |
@@ -157,7 +158,8 @@ void sctp_packet_free(struct sctp_packet *packet) | |||
157 | * packet can be sent only after receiving the COOKIE_ACK. | 158 | * packet can be sent only after receiving the COOKIE_ACK. |
158 | */ | 159 | */ |
159 | sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | 160 | sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, |
160 | struct sctp_chunk *chunk) | 161 | struct sctp_chunk *chunk, |
162 | int one_packet) | ||
161 | { | 163 | { |
162 | sctp_xmit_t retval; | 164 | sctp_xmit_t retval; |
163 | int error = 0; | 165 | int error = 0; |
@@ -175,7 +177,9 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | |||
175 | /* If we have an empty packet, then we can NOT ever | 177 | /* If we have an empty packet, then we can NOT ever |
176 | * return PMTU_FULL. | 178 | * return PMTU_FULL. |
177 | */ | 179 | */ |
178 | retval = sctp_packet_append_chunk(packet, chunk); | 180 | if (!one_packet) |
181 | retval = sctp_packet_append_chunk(packet, | ||
182 | chunk); | ||
179 | } | 183 | } |
180 | break; | 184 | break; |
181 | 185 | ||
@@ -361,7 +365,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
361 | struct sctp_transport *tp = packet->transport; | 365 | struct sctp_transport *tp = packet->transport; |
362 | struct sctp_association *asoc = tp->asoc; | 366 | struct sctp_association *asoc = tp->asoc; |
363 | struct sctphdr *sh; | 367 | struct sctphdr *sh; |
364 | __u32 crc32 = 0; | 368 | __be32 crc32 = __constant_cpu_to_be32(0); |
365 | struct sk_buff *nskb; | 369 | struct sk_buff *nskb; |
366 | struct sctp_chunk *chunk, *tmp; | 370 | struct sctp_chunk *chunk, *tmp; |
367 | struct sock *sk; | 371 | struct sock *sk; |
@@ -534,7 +538,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
534 | /* 3) Put the resultant value into the checksum field in the | 538 | /* 3) Put the resultant value into the checksum field in the |
535 | * common header, and leave the rest of the bits unchanged. | 539 | * common header, and leave the rest of the bits unchanged. |
536 | */ | 540 | */ |
537 | sh->checksum = htonl(crc32); | 541 | sh->checksum = crc32; |
538 | 542 | ||
539 | /* IP layer ECN support | 543 | /* IP layer ECN support |
540 | * From RFC 2481 | 544 | * From RFC 2481 |
@@ -592,7 +596,7 @@ out: | |||
592 | return err; | 596 | return err; |
593 | no_route: | 597 | no_route: |
594 | kfree_skb(nskb); | 598 | kfree_skb(nskb); |
595 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 599 | IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); |
596 | 600 | ||
597 | /* FIXME: Returning the 'err' will effect all the associations | 601 | /* FIXME: Returning the 'err' will effect all the associations |
598 | * associated with a socket, although only one of the paths of the | 602 | * associated with a socket, although only one of the paths of the |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index ace6770e9048..70ead8dc3485 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -702,6 +702,7 @@ int sctp_outq_uncork(struct sctp_outq *q) | |||
702 | return error; | 702 | return error; |
703 | } | 703 | } |
704 | 704 | ||
705 | |||
705 | /* | 706 | /* |
706 | * Try to flush an outqueue. | 707 | * Try to flush an outqueue. |
707 | * | 708 | * |
@@ -725,6 +726,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
725 | sctp_xmit_t status; | 726 | sctp_xmit_t status; |
726 | int error = 0; | 727 | int error = 0; |
727 | int start_timer = 0; | 728 | int start_timer = 0; |
729 | int one_packet = 0; | ||
728 | 730 | ||
729 | /* These transports have chunks to send. */ | 731 | /* These transports have chunks to send. */ |
730 | struct list_head transport_list; | 732 | struct list_head transport_list; |
@@ -830,20 +832,33 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
830 | if (sctp_test_T_bit(chunk)) { | 832 | if (sctp_test_T_bit(chunk)) { |
831 | packet->vtag = asoc->c.my_vtag; | 833 | packet->vtag = asoc->c.my_vtag; |
832 | } | 834 | } |
833 | case SCTP_CID_SACK: | 835 | /* The following chunks are "response" chunks, i.e. |
834 | case SCTP_CID_HEARTBEAT: | 836 | * they are generated in response to something we |
837 | * received. If we are sending these, then we can | ||
838 | * send only 1 packet containing these chunks. | ||
839 | */ | ||
835 | case SCTP_CID_HEARTBEAT_ACK: | 840 | case SCTP_CID_HEARTBEAT_ACK: |
836 | case SCTP_CID_SHUTDOWN: | ||
837 | case SCTP_CID_SHUTDOWN_ACK: | 841 | case SCTP_CID_SHUTDOWN_ACK: |
838 | case SCTP_CID_ERROR: | ||
839 | case SCTP_CID_COOKIE_ECHO: | ||
840 | case SCTP_CID_COOKIE_ACK: | 842 | case SCTP_CID_COOKIE_ACK: |
841 | case SCTP_CID_ECN_ECNE: | 843 | case SCTP_CID_COOKIE_ECHO: |
844 | case SCTP_CID_ERROR: | ||
842 | case SCTP_CID_ECN_CWR: | 845 | case SCTP_CID_ECN_CWR: |
843 | case SCTP_CID_ASCONF: | ||
844 | case SCTP_CID_ASCONF_ACK: | 846 | case SCTP_CID_ASCONF_ACK: |
847 | one_packet = 1; | ||
848 | /* Fall throught */ | ||
849 | |||
850 | case SCTP_CID_SACK: | ||
851 | case SCTP_CID_HEARTBEAT: | ||
852 | case SCTP_CID_SHUTDOWN: | ||
853 | case SCTP_CID_ECN_ECNE: | ||
854 | case SCTP_CID_ASCONF: | ||
845 | case SCTP_CID_FWD_TSN: | 855 | case SCTP_CID_FWD_TSN: |
846 | sctp_packet_transmit_chunk(packet, chunk); | 856 | status = sctp_packet_transmit_chunk(packet, chunk, |
857 | one_packet); | ||
858 | if (status != SCTP_XMIT_OK) { | ||
859 | /* put the chunk back */ | ||
860 | list_add(&chunk->list, &q->control_chunk_list); | ||
861 | } | ||
847 | break; | 862 | break; |
848 | 863 | ||
849 | default: | 864 | default: |
@@ -974,7 +989,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
974 | atomic_read(&chunk->skb->users) : -1); | 989 | atomic_read(&chunk->skb->users) : -1); |
975 | 990 | ||
976 | /* Add the chunk to the packet. */ | 991 | /* Add the chunk to the packet. */ |
977 | status = sctp_packet_transmit_chunk(packet, chunk); | 992 | status = sctp_packet_transmit_chunk(packet, chunk, 0); |
978 | 993 | ||
979 | switch (status) { | 994 | switch (status) { |
980 | case SCTP_XMIT_PMTU_FULL: | 995 | case SCTP_XMIT_PMTU_FULL: |
@@ -1239,7 +1254,6 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1239 | * Make sure the empty queue handler will get run later. | 1254 | * Make sure the empty queue handler will get run later. |
1240 | */ | 1255 | */ |
1241 | q->empty = (list_empty(&q->out_chunk_list) && | 1256 | q->empty = (list_empty(&q->out_chunk_list) && |
1242 | list_empty(&q->control_chunk_list) && | ||
1243 | list_empty(&q->retransmit)); | 1257 | list_empty(&q->retransmit)); |
1244 | if (!q->empty) | 1258 | if (!q->empty) |
1245 | goto finish; | 1259 | goto finish; |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 0aba759cb9b7..5dd89831eceb 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -383,3 +383,144 @@ void sctp_assocs_proc_exit(void) | |||
383 | { | 383 | { |
384 | remove_proc_entry("assocs", proc_net_sctp); | 384 | remove_proc_entry("assocs", proc_net_sctp); |
385 | } | 385 | } |
386 | |||
387 | static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) | ||
388 | { | ||
389 | if (*pos >= sctp_assoc_hashsize) | ||
390 | return NULL; | ||
391 | |||
392 | if (*pos < 0) | ||
393 | *pos = 0; | ||
394 | |||
395 | if (*pos == 0) | ||
396 | seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " | ||
397 | "REM_ADDR_RTX START\n"); | ||
398 | |||
399 | return (void *)pos; | ||
400 | } | ||
401 | |||
402 | static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
403 | { | ||
404 | if (++*pos >= sctp_assoc_hashsize) | ||
405 | return NULL; | ||
406 | |||
407 | return pos; | ||
408 | } | ||
409 | |||
410 | static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) | ||
411 | { | ||
412 | return; | ||
413 | } | ||
414 | |||
415 | static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | ||
416 | { | ||
417 | struct sctp_hashbucket *head; | ||
418 | struct sctp_ep_common *epb; | ||
419 | struct sctp_association *assoc; | ||
420 | struct hlist_node *node; | ||
421 | struct sctp_transport *tsp; | ||
422 | int hash = *(loff_t *)v; | ||
423 | |||
424 | if (hash >= sctp_assoc_hashsize) | ||
425 | return -ENOMEM; | ||
426 | |||
427 | head = &sctp_assoc_hashtable[hash]; | ||
428 | sctp_local_bh_disable(); | ||
429 | read_lock(&head->lock); | ||
430 | sctp_for_each_hentry(epb, node, &head->chain) { | ||
431 | assoc = sctp_assoc(epb); | ||
432 | list_for_each_entry(tsp, &assoc->peer.transport_addr_list, | ||
433 | transports) { | ||
434 | /* | ||
435 | * The remote address (ADDR) | ||
436 | */ | ||
437 | tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); | ||
438 | seq_printf(seq, " "); | ||
439 | |||
440 | /* | ||
441 | * The association ID (ASSOC_ID) | ||
442 | */ | ||
443 | seq_printf(seq, "%d ", tsp->asoc->assoc_id); | ||
444 | |||
445 | /* | ||
446 | * If the Heartbeat is active (HB_ACT) | ||
447 | * Note: 1 = Active, 0 = Inactive | ||
448 | */ | ||
449 | seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); | ||
450 | |||
451 | /* | ||
452 | * Retransmit time out (RTO) | ||
453 | */ | ||
454 | seq_printf(seq, "%lu ", tsp->rto); | ||
455 | |||
456 | /* | ||
457 | * Maximum path retransmit count (PATH_MAX_RTX) | ||
458 | */ | ||
459 | seq_printf(seq, "%d ", tsp->pathmaxrxt); | ||
460 | |||
461 | /* | ||
462 | * remote address retransmit count (REM_ADDR_RTX) | ||
463 | * Note: We don't have a way to tally this at the moment | ||
464 | * so lets just leave it as zero for the moment | ||
465 | */ | ||
466 | seq_printf(seq, "0 "); | ||
467 | |||
468 | /* | ||
469 | * remote address start time (START). This is also not | ||
470 | * currently implemented, but we can record it with a | ||
471 | * jiffies marker in a subsequent patch | ||
472 | */ | ||
473 | seq_printf(seq, "0"); | ||
474 | |||
475 | seq_printf(seq, "\n"); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | read_unlock(&head->lock); | ||
480 | sctp_local_bh_enable(); | ||
481 | |||
482 | return 0; | ||
483 | |||
484 | } | ||
485 | |||
486 | static const struct seq_operations sctp_remaddr_ops = { | ||
487 | .start = sctp_remaddr_seq_start, | ||
488 | .next = sctp_remaddr_seq_next, | ||
489 | .stop = sctp_remaddr_seq_stop, | ||
490 | .show = sctp_remaddr_seq_show, | ||
491 | }; | ||
492 | |||
493 | /* Cleanup the proc fs entry for 'remaddr' object. */ | ||
494 | void sctp_remaddr_proc_exit(void) | ||
495 | { | ||
496 | remove_proc_entry("remaddr", proc_net_sctp); | ||
497 | } | ||
498 | |||
499 | static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) | ||
500 | { | ||
501 | return seq_open(file, &sctp_remaddr_ops); | ||
502 | } | ||
503 | |||
504 | static const struct file_operations sctp_remaddr_seq_fops = { | ||
505 | .open = sctp_remaddr_seq_open, | ||
506 | .read = seq_read, | ||
507 | .llseek = seq_lseek, | ||
508 | .release = seq_release, | ||
509 | }; | ||
510 | |||
511 | int __init sctp_remaddr_proc_init(void) | ||
512 | { | ||
513 | struct proc_dir_entry *p; | ||
514 | |||
515 | p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp); | ||
516 | if (!p) | ||
517 | return -ENOMEM; | ||
518 | p->proc_fops = &sctp_remaddr_seq_fops; | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | void sctp_assoc_proc_exit(void) | ||
524 | { | ||
525 | remove_proc_entry("remaddr", proc_net_sctp); | ||
526 | } | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 9258dfe784ae..a6e0818bcff5 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -52,6 +52,8 @@ | |||
52 | #include <linux/inetdevice.h> | 52 | #include <linux/inetdevice.h> |
53 | #include <linux/seq_file.h> | 53 | #include <linux/seq_file.h> |
54 | #include <linux/bootmem.h> | 54 | #include <linux/bootmem.h> |
55 | #include <linux/highmem.h> | ||
56 | #include <linux/swap.h> | ||
55 | #include <net/net_namespace.h> | 57 | #include <net/net_namespace.h> |
56 | #include <net/protocol.h> | 58 | #include <net/protocol.h> |
57 | #include <net/ip.h> | 59 | #include <net/ip.h> |
@@ -64,9 +66,12 @@ | |||
64 | 66 | ||
65 | /* Global data structures. */ | 67 | /* Global data structures. */ |
66 | struct sctp_globals sctp_globals __read_mostly; | 68 | struct sctp_globals sctp_globals __read_mostly; |
67 | struct proc_dir_entry *proc_net_sctp; | ||
68 | DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly; | 69 | DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly; |
69 | 70 | ||
71 | #ifdef CONFIG_PROC_FS | ||
72 | struct proc_dir_entry *proc_net_sctp; | ||
73 | #endif | ||
74 | |||
70 | struct idr sctp_assocs_id; | 75 | struct idr sctp_assocs_id; |
71 | DEFINE_SPINLOCK(sctp_assocs_id_lock); | 76 | DEFINE_SPINLOCK(sctp_assocs_id_lock); |
72 | 77 | ||
@@ -97,6 +102,7 @@ struct sock *sctp_get_ctl_sock(void) | |||
97 | /* Set up the proc fs entry for the SCTP protocol. */ | 102 | /* Set up the proc fs entry for the SCTP protocol. */ |
98 | static __init int sctp_proc_init(void) | 103 | static __init int sctp_proc_init(void) |
99 | { | 104 | { |
105 | #ifdef CONFIG_PROC_FS | ||
100 | if (!proc_net_sctp) { | 106 | if (!proc_net_sctp) { |
101 | struct proc_dir_entry *ent; | 107 | struct proc_dir_entry *ent; |
102 | ent = proc_mkdir("sctp", init_net.proc_net); | 108 | ent = proc_mkdir("sctp", init_net.proc_net); |
@@ -113,9 +119,13 @@ static __init int sctp_proc_init(void) | |||
113 | goto out_eps_proc_init; | 119 | goto out_eps_proc_init; |
114 | if (sctp_assocs_proc_init()) | 120 | if (sctp_assocs_proc_init()) |
115 | goto out_assocs_proc_init; | 121 | goto out_assocs_proc_init; |
122 | if (sctp_remaddr_proc_init()) | ||
123 | goto out_remaddr_proc_init; | ||
116 | 124 | ||
117 | return 0; | 125 | return 0; |
118 | 126 | ||
127 | out_remaddr_proc_init: | ||
128 | sctp_assocs_proc_exit(); | ||
119 | out_assocs_proc_init: | 129 | out_assocs_proc_init: |
120 | sctp_eps_proc_exit(); | 130 | sctp_eps_proc_exit(); |
121 | out_eps_proc_init: | 131 | out_eps_proc_init: |
@@ -127,6 +137,9 @@ out_snmp_proc_init: | |||
127 | } | 137 | } |
128 | out_nomem: | 138 | out_nomem: |
129 | return -ENOMEM; | 139 | return -ENOMEM; |
140 | #else | ||
141 | return 0; | ||
142 | #endif /* CONFIG_PROC_FS */ | ||
130 | } | 143 | } |
131 | 144 | ||
132 | /* Clean up the proc fs entry for the SCTP protocol. | 145 | /* Clean up the proc fs entry for the SCTP protocol. |
@@ -135,14 +148,17 @@ out_nomem: | |||
135 | */ | 148 | */ |
136 | static void sctp_proc_exit(void) | 149 | static void sctp_proc_exit(void) |
137 | { | 150 | { |
151 | #ifdef CONFIG_PROC_FS | ||
138 | sctp_snmp_proc_exit(); | 152 | sctp_snmp_proc_exit(); |
139 | sctp_eps_proc_exit(); | 153 | sctp_eps_proc_exit(); |
140 | sctp_assocs_proc_exit(); | 154 | sctp_assocs_proc_exit(); |
155 | sctp_remaddr_proc_exit(); | ||
141 | 156 | ||
142 | if (proc_net_sctp) { | 157 | if (proc_net_sctp) { |
143 | proc_net_sctp = NULL; | 158 | proc_net_sctp = NULL; |
144 | remove_proc_entry("sctp", init_net.proc_net); | 159 | remove_proc_entry("sctp", init_net.proc_net); |
145 | } | 160 | } |
161 | #endif | ||
146 | } | 162 | } |
147 | 163 | ||
148 | /* Private helper to extract ipv4 address and stash them in | 164 | /* Private helper to extract ipv4 address and stash them in |
@@ -367,6 +383,10 @@ static int sctp_v4_addr_valid(union sctp_addr *addr, | |||
367 | struct sctp_sock *sp, | 383 | struct sctp_sock *sp, |
368 | const struct sk_buff *skb) | 384 | const struct sk_buff *skb) |
369 | { | 385 | { |
386 | /* IPv4 addresses not allowed */ | ||
387 | if (sp && ipv6_only_sock(sctp_opt2sk(sp))) | ||
388 | return 0; | ||
389 | |||
370 | /* Is this a non-unicast address or a unusable SCTP address? */ | 390 | /* Is this a non-unicast address or a unusable SCTP address? */ |
371 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) | 391 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) |
372 | return 0; | 392 | return 0; |
@@ -390,6 +410,9 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) | |||
390 | !sysctl_ip_nonlocal_bind) | 410 | !sysctl_ip_nonlocal_bind) |
391 | return 0; | 411 | return 0; |
392 | 412 | ||
413 | if (ipv6_only_sock(sctp_opt2sk(sp))) | ||
414 | return 0; | ||
415 | |||
393 | return 1; | 416 | return 1; |
394 | } | 417 | } |
395 | 418 | ||
@@ -645,7 +668,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
645 | struct sctp_sockaddr_entry *temp; | 668 | struct sctp_sockaddr_entry *temp; |
646 | int found = 0; | 669 | int found = 0; |
647 | 670 | ||
648 | if (dev_net(ifa->ifa_dev->dev) != &init_net) | 671 | if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net)) |
649 | return NOTIFY_DONE; | 672 | return NOTIFY_DONE; |
650 | 673 | ||
651 | switch (ev) { | 674 | switch (ev) { |
@@ -1059,6 +1082,7 @@ SCTP_STATIC __init int sctp_init(void) | |||
1059 | int status = -EINVAL; | 1082 | int status = -EINVAL; |
1060 | unsigned long goal; | 1083 | unsigned long goal; |
1061 | unsigned long limit; | 1084 | unsigned long limit; |
1085 | unsigned long nr_pages; | ||
1062 | int max_share; | 1086 | int max_share; |
1063 | int order; | 1087 | int order; |
1064 | 1088 | ||
@@ -1154,8 +1178,9 @@ SCTP_STATIC __init int sctp_init(void) | |||
1154 | * Note this initalizes the data in sctpv6_prot too | 1178 | * Note this initalizes the data in sctpv6_prot too |
1155 | * Unabashedly stolen from tcp_init | 1179 | * Unabashedly stolen from tcp_init |
1156 | */ | 1180 | */ |
1157 | limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | 1181 | nr_pages = totalram_pages - totalhigh_pages; |
1158 | limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | 1182 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); |
1183 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
1159 | limit = max(limit, 128UL); | 1184 | limit = max(limit, 128UL); |
1160 | sysctl_sctp_mem[0] = limit / 4 * 3; | 1185 | sysctl_sctp_mem[0] = limit / 4 * 3; |
1161 | sysctl_sctp_mem[1] = limit; | 1186 | sysctl_sctp_mem[1] = limit; |
@@ -1165,7 +1190,7 @@ SCTP_STATIC __init int sctp_init(void) | |||
1165 | limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); | 1190 | limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); |
1166 | max_share = min(4UL*1024*1024, limit); | 1191 | max_share = min(4UL*1024*1024, limit); |
1167 | 1192 | ||
1168 | sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ | 1193 | sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ |
1169 | sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); | 1194 | sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); |
1170 | sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); | 1195 | sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); |
1171 | 1196 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index bbc7107c86cf..e8ca4e54981f 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2364,8 +2364,13 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
2364 | case SCTP_PARAM_IPV6_ADDRESS: | 2364 | case SCTP_PARAM_IPV6_ADDRESS: |
2365 | if (PF_INET6 != asoc->base.sk->sk_family) | 2365 | if (PF_INET6 != asoc->base.sk->sk_family) |
2366 | break; | 2366 | break; |
2367 | /* Fall through. */ | 2367 | goto do_addr_param; |
2368 | |||
2368 | case SCTP_PARAM_IPV4_ADDRESS: | 2369 | case SCTP_PARAM_IPV4_ADDRESS: |
2370 | /* v4 addresses are not allowed on v6-only socket */ | ||
2371 | if (ipv6_only_sock(asoc->base.sk)) | ||
2372 | break; | ||
2373 | do_addr_param: | ||
2369 | af = sctp_get_af_specific(param_type2af(param.p->type)); | 2374 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
2370 | af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); | 2375 | af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); |
2371 | scope = sctp_scope(peer_addr); | 2376 | scope = sctp_scope(peer_addr); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 23a9f1a95b7d..9732c797e8ed 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -190,20 +190,28 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force, | |||
190 | * unacknowledged DATA chunk. ... | 190 | * unacknowledged DATA chunk. ... |
191 | */ | 191 | */ |
192 | if (!asoc->peer.sack_needed) { | 192 | if (!asoc->peer.sack_needed) { |
193 | /* We will need a SACK for the next packet. */ | 193 | asoc->peer.sack_cnt++; |
194 | asoc->peer.sack_needed = 1; | ||
195 | 194 | ||
196 | /* Set the SACK delay timeout based on the | 195 | /* Set the SACK delay timeout based on the |
197 | * SACK delay for the last transport | 196 | * SACK delay for the last transport |
198 | * data was received from, or the default | 197 | * data was received from, or the default |
199 | * for the association. | 198 | * for the association. |
200 | */ | 199 | */ |
201 | if (trans) | 200 | if (trans) { |
201 | /* We will need a SACK for the next packet. */ | ||
202 | if (asoc->peer.sack_cnt >= trans->sackfreq - 1) | ||
203 | asoc->peer.sack_needed = 1; | ||
204 | |||
202 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | 205 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
203 | trans->sackdelay; | 206 | trans->sackdelay; |
204 | else | 207 | } else { |
208 | /* We will need a SACK for the next packet. */ | ||
209 | if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) | ||
210 | asoc->peer.sack_needed = 1; | ||
211 | |||
205 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | 212 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
206 | asoc->sackdelay; | 213 | asoc->sackdelay; |
214 | } | ||
207 | 215 | ||
208 | /* Restart the SACK timer. */ | 216 | /* Restart the SACK timer. */ |
209 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | 217 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
@@ -216,6 +224,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force, | |||
216 | goto nomem; | 224 | goto nomem; |
217 | 225 | ||
218 | asoc->peer.sack_needed = 0; | 226 | asoc->peer.sack_needed = 0; |
227 | asoc->peer.sack_cnt = 0; | ||
219 | 228 | ||
220 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); | 229 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); |
221 | 230 | ||
@@ -655,7 +664,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | |||
655 | struct sctp_association *asoc, | 664 | struct sctp_association *asoc, |
656 | struct sctp_sackhdr *sackh) | 665 | struct sctp_sackhdr *sackh) |
657 | { | 666 | { |
658 | int err; | 667 | int err = 0; |
659 | 668 | ||
660 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | 669 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { |
661 | /* There are no more TSNs awaiting SACK. */ | 670 | /* There are no more TSNs awaiting SACK. */ |
@@ -663,11 +672,6 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | |||
663 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | 672 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), |
664 | asoc->state, asoc->ep, asoc, NULL, | 673 | asoc->state, asoc->ep, asoc, NULL, |
665 | GFP_ATOMIC); | 674 | GFP_ATOMIC); |
666 | } else { | ||
667 | /* Windows may have opened, so we need | ||
668 | * to check if we have DATA to transmit | ||
669 | */ | ||
670 | err = sctp_outq_flush(&asoc->outqueue, 0); | ||
671 | } | 675 | } |
672 | 676 | ||
673 | return err; | 677 | return err; |
@@ -1472,8 +1476,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1472 | break; | 1476 | break; |
1473 | 1477 | ||
1474 | case SCTP_CMD_DISCARD_PACKET: | 1478 | case SCTP_CMD_DISCARD_PACKET: |
1475 | /* We need to discard the whole packet. */ | 1479 | /* We need to discard the whole packet. |
1480 | * Uncork the queue since there might be | ||
1481 | * responses pending | ||
1482 | */ | ||
1476 | chunk->pdiscard = 1; | 1483 | chunk->pdiscard = 1; |
1484 | if (asoc) { | ||
1485 | sctp_outq_uncork(&asoc->outqueue); | ||
1486 | local_cork = 0; | ||
1487 | } | ||
1477 | break; | 1488 | break; |
1478 | 1489 | ||
1479 | case SCTP_CMD_RTO_PENDING: | 1490 | case SCTP_CMD_RTO_PENDING: |
@@ -1544,8 +1555,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1544 | } | 1555 | } |
1545 | 1556 | ||
1546 | out: | 1557 | out: |
1547 | if (local_cork) | 1558 | /* If this is in response to a received chunk, wait until |
1548 | sctp_outq_uncork(&asoc->outqueue); | 1559 | * we are done with the packet to open the queue so that we don't |
1560 | * send multiple packets in response to a single request. | ||
1561 | */ | ||
1562 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | ||
1563 | if (chunk->end_of_packet || chunk->singleton) | ||
1564 | sctp_outq_uncork(&asoc->outqueue); | ||
1565 | } else if (local_cork) | ||
1566 | sctp_outq_uncork(&asoc->outqueue); | ||
1549 | return error; | 1567 | return error; |
1550 | nomem: | 1568 | nomem: |
1551 | error = -ENOMEM; | 1569 | error = -ENOMEM; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index fcdb45d1071b..8848d329aa2c 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -795,8 +795,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
795 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 795 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, |
796 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 796 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
797 | 797 | ||
798 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
799 | |||
800 | /* This will send the COOKIE ACK */ | 798 | /* This will send the COOKIE ACK */ |
801 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | 799 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
802 | 800 | ||
@@ -883,7 +881,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, | |||
883 | if (asoc->autoclose) | 881 | if (asoc->autoclose) |
884 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 882 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, |
885 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | 883 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); |
886 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
887 | 884 | ||
888 | /* It may also notify its ULP about the successful | 885 | /* It may also notify its ULP about the successful |
889 | * establishment of the association with a Communication Up | 886 | * establishment of the association with a Communication Up |
@@ -1781,7 +1778,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, | |||
1781 | goto nomem; | 1778 | goto nomem; |
1782 | 1779 | ||
1783 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | 1780 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
1784 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1785 | 1781 | ||
1786 | /* RFC 2960 5.1 Normal Establishment of an Association | 1782 | /* RFC 2960 5.1 Normal Establishment of an Association |
1787 | * | 1783 | * |
@@ -1898,12 +1894,13 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, | |||
1898 | 1894 | ||
1899 | } | 1895 | } |
1900 | } | 1896 | } |
1901 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1902 | 1897 | ||
1903 | repl = sctp_make_cookie_ack(new_asoc, chunk); | 1898 | repl = sctp_make_cookie_ack(new_asoc, chunk); |
1904 | if (!repl) | 1899 | if (!repl) |
1905 | goto nomem; | 1900 | goto nomem; |
1906 | 1901 | ||
1902 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1903 | |||
1907 | if (ev) | 1904 | if (ev) |
1908 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | 1905 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, |
1909 | SCTP_ULPEVENT(ev)); | 1906 | SCTP_ULPEVENT(ev)); |
@@ -1911,9 +1908,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, | |||
1911 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | 1908 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, |
1912 | SCTP_ULPEVENT(ai_ev)); | 1909 | SCTP_ULPEVENT(ai_ev)); |
1913 | 1910 | ||
1914 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1915 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1916 | |||
1917 | return SCTP_DISPOSITION_CONSUME; | 1911 | return SCTP_DISPOSITION_CONSUME; |
1918 | 1912 | ||
1919 | nomem: | 1913 | nomem: |
@@ -3970,9 +3964,6 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, | |||
3970 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3964 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
3971 | break; | 3965 | break; |
3972 | case SCTP_CID_ACTION_DISCARD_ERR: | 3966 | case SCTP_CID_ACTION_DISCARD_ERR: |
3973 | /* Discard the packet. */ | ||
3974 | sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3975 | |||
3976 | /* Generate an ERROR chunk as response. */ | 3967 | /* Generate an ERROR chunk as response. */ |
3977 | hdr = unk_chunk->chunk_hdr; | 3968 | hdr = unk_chunk->chunk_hdr; |
3978 | err_chunk = sctp_make_op_error(asoc, unk_chunk, | 3969 | err_chunk = sctp_make_op_error(asoc, unk_chunk, |
@@ -3982,6 +3973,9 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, | |||
3982 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3973 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3983 | SCTP_CHUNK(err_chunk)); | 3974 | SCTP_CHUNK(err_chunk)); |
3984 | } | 3975 | } |
3976 | |||
3977 | /* Discard the packet. */ | ||
3978 | sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3985 | return SCTP_DISPOSITION_CONSUME; | 3979 | return SCTP_DISPOSITION_CONSUME; |
3986 | break; | 3980 | break; |
3987 | case SCTP_CID_ACTION_SKIP: | 3981 | case SCTP_CID_ACTION_SKIP: |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0dbcde6758ea..79bece16aede 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -116,7 +116,7 @@ static int sctp_memory_pressure; | |||
116 | static atomic_t sctp_memory_allocated; | 116 | static atomic_t sctp_memory_allocated; |
117 | static atomic_t sctp_sockets_allocated; | 117 | static atomic_t sctp_sockets_allocated; |
118 | 118 | ||
119 | static void sctp_enter_memory_pressure(void) | 119 | static void sctp_enter_memory_pressure(struct sock *sk) |
120 | { | 120 | { |
121 | sctp_memory_pressure = 1; | 121 | sctp_memory_pressure = 1; |
122 | } | 122 | } |
@@ -308,9 +308,16 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | |||
308 | if (len < sizeof (struct sockaddr)) | 308 | if (len < sizeof (struct sockaddr)) |
309 | return NULL; | 309 | return NULL; |
310 | 310 | ||
311 | /* Does this PF support this AF? */ | 311 | /* V4 mapped address are really of AF_INET family */ |
312 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | 312 | if (addr->sa.sa_family == AF_INET6 && |
313 | return NULL; | 313 | ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { |
314 | if (!opt->pf->af_supported(AF_INET, opt)) | ||
315 | return NULL; | ||
316 | } else { | ||
317 | /* Does this PF support this AF? */ | ||
318 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
319 | return NULL; | ||
320 | } | ||
314 | 321 | ||
315 | /* If we get this far, af is valid. */ | 322 | /* If we get this far, af is valid. */ |
316 | af = sctp_get_af_specific(addr->sa.sa_family); | 323 | af = sctp_get_af_specific(addr->sa.sa_family); |
@@ -370,18 +377,19 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
370 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 377 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
371 | return -EACCES; | 378 | return -EACCES; |
372 | 379 | ||
380 | /* See if the address matches any of the addresses we may have | ||
381 | * already bound before checking against other endpoints. | ||
382 | */ | ||
383 | if (sctp_bind_addr_match(bp, addr, sp)) | ||
384 | return -EINVAL; | ||
385 | |||
373 | /* Make sure we are allowed to bind here. | 386 | /* Make sure we are allowed to bind here. |
374 | * The function sctp_get_port_local() does duplicate address | 387 | * The function sctp_get_port_local() does duplicate address |
375 | * detection. | 388 | * detection. |
376 | */ | 389 | */ |
377 | addr->v4.sin_port = htons(snum); | 390 | addr->v4.sin_port = htons(snum); |
378 | if ((ret = sctp_get_port_local(sk, addr))) { | 391 | if ((ret = sctp_get_port_local(sk, addr))) { |
379 | if (ret == (long) sk) { | 392 | return -EADDRINUSE; |
380 | /* This endpoint has a conflicting address. */ | ||
381 | return -EINVAL; | ||
382 | } else { | ||
383 | return -EADDRINUSE; | ||
384 | } | ||
385 | } | 393 | } |
386 | 394 | ||
387 | /* Refresh ephemeral port. */ | 395 | /* Refresh ephemeral port. */ |
@@ -956,7 +964,8 @@ out: | |||
956 | */ | 964 | */ |
957 | static int __sctp_connect(struct sock* sk, | 965 | static int __sctp_connect(struct sock* sk, |
958 | struct sockaddr *kaddrs, | 966 | struct sockaddr *kaddrs, |
959 | int addrs_size) | 967 | int addrs_size, |
968 | sctp_assoc_t *assoc_id) | ||
960 | { | 969 | { |
961 | struct sctp_sock *sp; | 970 | struct sctp_sock *sp; |
962 | struct sctp_endpoint *ep; | 971 | struct sctp_endpoint *ep; |
@@ -1111,6 +1120,8 @@ static int __sctp_connect(struct sock* sk, | |||
1111 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | 1120 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); |
1112 | 1121 | ||
1113 | err = sctp_wait_for_connect(asoc, &timeo); | 1122 | err = sctp_wait_for_connect(asoc, &timeo); |
1123 | if (!err && assoc_id) | ||
1124 | *assoc_id = asoc->assoc_id; | ||
1114 | 1125 | ||
1115 | /* Don't free association on exit. */ | 1126 | /* Don't free association on exit. */ |
1116 | asoc = NULL; | 1127 | asoc = NULL; |
@@ -1128,7 +1139,8 @@ out_free: | |||
1128 | /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() | 1139 | /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() |
1129 | * | 1140 | * |
1130 | * API 8.9 | 1141 | * API 8.9 |
1131 | * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt); | 1142 | * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, |
1143 | * sctp_assoc_t *asoc); | ||
1132 | * | 1144 | * |
1133 | * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. | 1145 | * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. |
1134 | * If the sd is an IPv6 socket, the addresses passed can either be IPv4 | 1146 | * If the sd is an IPv6 socket, the addresses passed can either be IPv4 |
@@ -1144,8 +1156,10 @@ out_free: | |||
1144 | * representation is termed a "packed array" of addresses). The caller | 1156 | * representation is termed a "packed array" of addresses). The caller |
1145 | * specifies the number of addresses in the array with addrcnt. | 1157 | * specifies the number of addresses in the array with addrcnt. |
1146 | * | 1158 | * |
1147 | * On success, sctp_connectx() returns 0. On failure, sctp_connectx() returns | 1159 | * On success, sctp_connectx() returns 0. It also sets the assoc_id to |
1148 | * -1, and sets errno to the appropriate error code. | 1160 | * the association id of the new association. On failure, sctp_connectx() |
1161 | * returns -1, and sets errno to the appropriate error code. The assoc_id | ||
1162 | * is not touched by the kernel. | ||
1149 | * | 1163 | * |
1150 | * For SCTP, the port given in each socket address must be the same, or | 1164 | * For SCTP, the port given in each socket address must be the same, or |
1151 | * sctp_connectx() will fail, setting errno to EINVAL. | 1165 | * sctp_connectx() will fail, setting errno to EINVAL. |
@@ -1182,11 +1196,12 @@ out_free: | |||
1182 | * addrs The pointer to the addresses in user land | 1196 | * addrs The pointer to the addresses in user land |
1183 | * addrssize Size of the addrs buffer | 1197 | * addrssize Size of the addrs buffer |
1184 | * | 1198 | * |
1185 | * Returns 0 if ok, <0 errno code on error. | 1199 | * Returns >=0 if ok, <0 errno code on error. |
1186 | */ | 1200 | */ |
1187 | SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, | 1201 | SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk, |
1188 | struct sockaddr __user *addrs, | 1202 | struct sockaddr __user *addrs, |
1189 | int addrs_size) | 1203 | int addrs_size, |
1204 | sctp_assoc_t *assoc_id) | ||
1190 | { | 1205 | { |
1191 | int err = 0; | 1206 | int err = 0; |
1192 | struct sockaddr *kaddrs; | 1207 | struct sockaddr *kaddrs; |
@@ -1209,13 +1224,46 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, | |||
1209 | if (__copy_from_user(kaddrs, addrs, addrs_size)) { | 1224 | if (__copy_from_user(kaddrs, addrs, addrs_size)) { |
1210 | err = -EFAULT; | 1225 | err = -EFAULT; |
1211 | } else { | 1226 | } else { |
1212 | err = __sctp_connect(sk, kaddrs, addrs_size); | 1227 | err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); |
1213 | } | 1228 | } |
1214 | 1229 | ||
1215 | kfree(kaddrs); | 1230 | kfree(kaddrs); |
1231 | |||
1216 | return err; | 1232 | return err; |
1217 | } | 1233 | } |
1218 | 1234 | ||
1235 | /* | ||
1236 | * This is an older interface. It's kept for backward compatibility | ||
1237 | * to the option that doesn't provide association id. | ||
1238 | */ | ||
1239 | SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk, | ||
1240 | struct sockaddr __user *addrs, | ||
1241 | int addrs_size) | ||
1242 | { | ||
1243 | return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); | ||
1244 | } | ||
1245 | |||
1246 | /* | ||
1247 | * New interface for the API. The since the API is done with a socket | ||
1248 | * option, to make it simple we feed back the association id is as a return | ||
1249 | * indication to the call. Error is always negative and association id is | ||
1250 | * always positive. | ||
1251 | */ | ||
1252 | SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, | ||
1253 | struct sockaddr __user *addrs, | ||
1254 | int addrs_size) | ||
1255 | { | ||
1256 | sctp_assoc_t assoc_id = 0; | ||
1257 | int err = 0; | ||
1258 | |||
1259 | err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); | ||
1260 | |||
1261 | if (err) | ||
1262 | return err; | ||
1263 | else | ||
1264 | return assoc_id; | ||
1265 | } | ||
1266 | |||
1219 | /* API 3.1.4 close() - UDP Style Syntax | 1267 | /* API 3.1.4 close() - UDP Style Syntax |
1220 | * Applications use close() to perform graceful shutdown (as described in | 1268 | * Applications use close() to perform graceful shutdown (as described in |
1221 | * Section 10.1 of [SCTP]) on ALL the associations currently represented | 1269 | * Section 10.1 of [SCTP]) on ALL the associations currently represented |
@@ -2305,74 +2353,98 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, | |||
2305 | return 0; | 2353 | return 0; |
2306 | } | 2354 | } |
2307 | 2355 | ||
2308 | /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) | 2356 | /* |
2309 | * | 2357 | * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) |
2310 | * This options will get or set the delayed ack timer. The time is set | 2358 | * |
2311 | * in milliseconds. If the assoc_id is 0, then this sets or gets the | 2359 | * This option will effect the way delayed acks are performed. This |
2312 | * endpoints default delayed ack timer value. If the assoc_id field is | 2360 | * option allows you to get or set the delayed ack time, in |
2313 | * non-zero, then the set or get effects the specified association. | 2361 | * milliseconds. It also allows changing the delayed ack frequency. |
2314 | * | 2362 | * Changing the frequency to 1 disables the delayed sack algorithm. If |
2315 | * struct sctp_assoc_value { | 2363 | * the assoc_id is 0, then this sets or gets the endpoints default |
2316 | * sctp_assoc_t assoc_id; | 2364 | * values. If the assoc_id field is non-zero, then the set or get |
2317 | * uint32_t assoc_value; | 2365 | * effects the specified association for the one to many model (the |
2318 | * }; | 2366 | * assoc_id field is ignored by the one to one model). Note that if |
2367 | * sack_delay or sack_freq are 0 when setting this option, then the | ||
2368 | * current values will remain unchanged. | ||
2369 | * | ||
2370 | * struct sctp_sack_info { | ||
2371 | * sctp_assoc_t sack_assoc_id; | ||
2372 | * uint32_t sack_delay; | ||
2373 | * uint32_t sack_freq; | ||
2374 | * }; | ||
2319 | * | 2375 | * |
2320 | * assoc_id - This parameter, indicates which association the | 2376 | * sack_assoc_id - This parameter, indicates which association the user |
2321 | * user is preforming an action upon. Note that if | 2377 | * is performing an action upon. Note that if this field's value is |
2322 | * this field's value is zero then the endpoints | 2378 | * zero then the endpoints default value is changed (effecting future |
2323 | * default value is changed (effecting future | 2379 | * associations only). |
2324 | * associations only). | ||
2325 | * | 2380 | * |
2326 | * assoc_value - This parameter contains the number of milliseconds | 2381 | * sack_delay - This parameter contains the number of milliseconds that |
2327 | * that the user is requesting the delayed ACK timer | 2382 | * the user is requesting the delayed ACK timer be set to. Note that |
2328 | * be set to. Note that this value is defined in | 2383 | * this value is defined in the standard to be between 200 and 500 |
2329 | * the standard to be between 200 and 500 milliseconds. | 2384 | * milliseconds. |
2330 | * | 2385 | * |
2331 | * Note: a value of zero will leave the value alone, | 2386 | * sack_freq - This parameter contains the number of packets that must |
2332 | * but disable SACK delay. A non-zero value will also | 2387 | * be received before a sack is sent without waiting for the delay |
2333 | * enable SACK delay. | 2388 | * timer to expire. The default value for this is 2, setting this |
2389 | * value to 1 will disable the delayed sack algorithm. | ||
2334 | */ | 2390 | */ |
2335 | 2391 | ||
2336 | static int sctp_setsockopt_delayed_ack_time(struct sock *sk, | 2392 | static int sctp_setsockopt_delayed_ack(struct sock *sk, |
2337 | char __user *optval, int optlen) | 2393 | char __user *optval, int optlen) |
2338 | { | 2394 | { |
2339 | struct sctp_assoc_value params; | 2395 | struct sctp_sack_info params; |
2340 | struct sctp_transport *trans = NULL; | 2396 | struct sctp_transport *trans = NULL; |
2341 | struct sctp_association *asoc = NULL; | 2397 | struct sctp_association *asoc = NULL; |
2342 | struct sctp_sock *sp = sctp_sk(sk); | 2398 | struct sctp_sock *sp = sctp_sk(sk); |
2343 | 2399 | ||
2344 | if (optlen != sizeof(struct sctp_assoc_value)) | 2400 | if (optlen == sizeof(struct sctp_sack_info)) { |
2345 | return - EINVAL; | 2401 | if (copy_from_user(¶ms, optval, optlen)) |
2402 | return -EFAULT; | ||
2346 | 2403 | ||
2347 | if (copy_from_user(¶ms, optval, optlen)) | 2404 | if (params.sack_delay == 0 && params.sack_freq == 0) |
2348 | return -EFAULT; | 2405 | return 0; |
2406 | } else if (optlen == sizeof(struct sctp_assoc_value)) { | ||
2407 | printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info " | ||
2408 | "in delayed_ack socket option deprecated\n"); | ||
2409 | printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n"); | ||
2410 | if (copy_from_user(¶ms, optval, optlen)) | ||
2411 | return -EFAULT; | ||
2412 | |||
2413 | if (params.sack_delay == 0) | ||
2414 | params.sack_freq = 1; | ||
2415 | else | ||
2416 | params.sack_freq = 0; | ||
2417 | } else | ||
2418 | return - EINVAL; | ||
2349 | 2419 | ||
2350 | /* Validate value parameter. */ | 2420 | /* Validate value parameter. */ |
2351 | if (params.assoc_value > 500) | 2421 | if (params.sack_delay > 500) |
2352 | return -EINVAL; | 2422 | return -EINVAL; |
2353 | 2423 | ||
2354 | /* Get association, if assoc_id != 0 and the socket is a one | 2424 | /* Get association, if sack_assoc_id != 0 and the socket is a one |
2355 | * to many style socket, and an association was not found, then | 2425 | * to many style socket, and an association was not found, then |
2356 | * the id was invalid. | 2426 | * the id was invalid. |
2357 | */ | 2427 | */ |
2358 | asoc = sctp_id2assoc(sk, params.assoc_id); | 2428 | asoc = sctp_id2assoc(sk, params.sack_assoc_id); |
2359 | if (!asoc && params.assoc_id && sctp_style(sk, UDP)) | 2429 | if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) |
2360 | return -EINVAL; | 2430 | return -EINVAL; |
2361 | 2431 | ||
2362 | if (params.assoc_value) { | 2432 | if (params.sack_delay) { |
2363 | if (asoc) { | 2433 | if (asoc) { |
2364 | asoc->sackdelay = | 2434 | asoc->sackdelay = |
2365 | msecs_to_jiffies(params.assoc_value); | 2435 | msecs_to_jiffies(params.sack_delay); |
2366 | asoc->param_flags = | 2436 | asoc->param_flags = |
2367 | (asoc->param_flags & ~SPP_SACKDELAY) | | 2437 | (asoc->param_flags & ~SPP_SACKDELAY) | |
2368 | SPP_SACKDELAY_ENABLE; | 2438 | SPP_SACKDELAY_ENABLE; |
2369 | } else { | 2439 | } else { |
2370 | sp->sackdelay = params.assoc_value; | 2440 | sp->sackdelay = params.sack_delay; |
2371 | sp->param_flags = | 2441 | sp->param_flags = |
2372 | (sp->param_flags & ~SPP_SACKDELAY) | | 2442 | (sp->param_flags & ~SPP_SACKDELAY) | |
2373 | SPP_SACKDELAY_ENABLE; | 2443 | SPP_SACKDELAY_ENABLE; |
2374 | } | 2444 | } |
2375 | } else { | 2445 | } |
2446 | |||
2447 | if (params.sack_freq == 1) { | ||
2376 | if (asoc) { | 2448 | if (asoc) { |
2377 | asoc->param_flags = | 2449 | asoc->param_flags = |
2378 | (asoc->param_flags & ~SPP_SACKDELAY) | | 2450 | (asoc->param_flags & ~SPP_SACKDELAY) | |
@@ -2382,22 +2454,40 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk, | |||
2382 | (sp->param_flags & ~SPP_SACKDELAY) | | 2454 | (sp->param_flags & ~SPP_SACKDELAY) | |
2383 | SPP_SACKDELAY_DISABLE; | 2455 | SPP_SACKDELAY_DISABLE; |
2384 | } | 2456 | } |
2457 | } else if (params.sack_freq > 1) { | ||
2458 | if (asoc) { | ||
2459 | asoc->sackfreq = params.sack_freq; | ||
2460 | asoc->param_flags = | ||
2461 | (asoc->param_flags & ~SPP_SACKDELAY) | | ||
2462 | SPP_SACKDELAY_ENABLE; | ||
2463 | } else { | ||
2464 | sp->sackfreq = params.sack_freq; | ||
2465 | sp->param_flags = | ||
2466 | (sp->param_flags & ~SPP_SACKDELAY) | | ||
2467 | SPP_SACKDELAY_ENABLE; | ||
2468 | } | ||
2385 | } | 2469 | } |
2386 | 2470 | ||
2387 | /* If change is for association, also apply to each transport. */ | 2471 | /* If change is for association, also apply to each transport. */ |
2388 | if (asoc) { | 2472 | if (asoc) { |
2389 | list_for_each_entry(trans, &asoc->peer.transport_addr_list, | 2473 | list_for_each_entry(trans, &asoc->peer.transport_addr_list, |
2390 | transports) { | 2474 | transports) { |
2391 | if (params.assoc_value) { | 2475 | if (params.sack_delay) { |
2392 | trans->sackdelay = | 2476 | trans->sackdelay = |
2393 | msecs_to_jiffies(params.assoc_value); | 2477 | msecs_to_jiffies(params.sack_delay); |
2394 | trans->param_flags = | 2478 | trans->param_flags = |
2395 | (trans->param_flags & ~SPP_SACKDELAY) | | 2479 | (trans->param_flags & ~SPP_SACKDELAY) | |
2396 | SPP_SACKDELAY_ENABLE; | 2480 | SPP_SACKDELAY_ENABLE; |
2397 | } else { | 2481 | } |
2482 | if (params.sack_freq == 1) { | ||
2398 | trans->param_flags = | 2483 | trans->param_flags = |
2399 | (trans->param_flags & ~SPP_SACKDELAY) | | 2484 | (trans->param_flags & ~SPP_SACKDELAY) | |
2400 | SPP_SACKDELAY_DISABLE; | 2485 | SPP_SACKDELAY_DISABLE; |
2486 | } else if (params.sack_freq > 1) { | ||
2487 | trans->sackfreq = params.sack_freq; | ||
2488 | trans->param_flags = | ||
2489 | (trans->param_flags & ~SPP_SACKDELAY) | | ||
2490 | SPP_SACKDELAY_ENABLE; | ||
2401 | } | 2491 | } |
2402 | } | 2492 | } |
2403 | } | 2493 | } |
@@ -3164,10 +3254,18 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
3164 | optlen, SCTP_BINDX_REM_ADDR); | 3254 | optlen, SCTP_BINDX_REM_ADDR); |
3165 | break; | 3255 | break; |
3166 | 3256 | ||
3257 | case SCTP_SOCKOPT_CONNECTX_OLD: | ||
3258 | /* 'optlen' is the size of the addresses buffer. */ | ||
3259 | retval = sctp_setsockopt_connectx_old(sk, | ||
3260 | (struct sockaddr __user *)optval, | ||
3261 | optlen); | ||
3262 | break; | ||
3263 | |||
3167 | case SCTP_SOCKOPT_CONNECTX: | 3264 | case SCTP_SOCKOPT_CONNECTX: |
3168 | /* 'optlen' is the size of the addresses buffer. */ | 3265 | /* 'optlen' is the size of the addresses buffer. */ |
3169 | retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, | 3266 | retval = sctp_setsockopt_connectx(sk, |
3170 | optlen); | 3267 | (struct sockaddr __user *)optval, |
3268 | optlen); | ||
3171 | break; | 3269 | break; |
3172 | 3270 | ||
3173 | case SCTP_DISABLE_FRAGMENTS: | 3271 | case SCTP_DISABLE_FRAGMENTS: |
@@ -3186,8 +3284,8 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
3186 | retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); | 3284 | retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); |
3187 | break; | 3285 | break; |
3188 | 3286 | ||
3189 | case SCTP_DELAYED_ACK_TIME: | 3287 | case SCTP_DELAYED_ACK: |
3190 | retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); | 3288 | retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); |
3191 | break; | 3289 | break; |
3192 | case SCTP_PARTIAL_DELIVERY_POINT: | 3290 | case SCTP_PARTIAL_DELIVERY_POINT: |
3193 | retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); | 3291 | retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); |
@@ -3294,7 +3392,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, | |||
3294 | /* Pass correct addr len to common routine (so it knows there | 3392 | /* Pass correct addr len to common routine (so it knows there |
3295 | * is only one address being passed. | 3393 | * is only one address being passed. |
3296 | */ | 3394 | */ |
3297 | err = __sctp_connect(sk, addr, af->sockaddr_len); | 3395 | err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); |
3298 | } | 3396 | } |
3299 | 3397 | ||
3300 | sctp_release_sock(sk); | 3398 | sctp_release_sock(sk); |
@@ -3446,6 +3544,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3446 | sp->pathmaxrxt = sctp_max_retrans_path; | 3544 | sp->pathmaxrxt = sctp_max_retrans_path; |
3447 | sp->pathmtu = 0; // allow default discovery | 3545 | sp->pathmtu = 0; // allow default discovery |
3448 | sp->sackdelay = sctp_sack_timeout; | 3546 | sp->sackdelay = sctp_sack_timeout; |
3547 | sp->sackfreq = 2; | ||
3449 | sp->param_flags = SPP_HB_ENABLE | | 3548 | sp->param_flags = SPP_HB_ENABLE | |
3450 | SPP_PMTUD_ENABLE | | 3549 | SPP_PMTUD_ENABLE | |
3451 | SPP_SACKDELAY_ENABLE; | 3550 | SPP_SACKDELAY_ENABLE; |
@@ -3497,7 +3596,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3497 | } | 3596 | } |
3498 | 3597 | ||
3499 | /* Cleanup any SCTP per socket resources. */ | 3598 | /* Cleanup any SCTP per socket resources. */ |
3500 | SCTP_STATIC int sctp_destroy_sock(struct sock *sk) | 3599 | SCTP_STATIC void sctp_destroy_sock(struct sock *sk) |
3501 | { | 3600 | { |
3502 | struct sctp_endpoint *ep; | 3601 | struct sctp_endpoint *ep; |
3503 | 3602 | ||
@@ -3507,7 +3606,6 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk) | |||
3507 | ep = sctp_sk(sk)->ep; | 3606 | ep = sctp_sk(sk)->ep; |
3508 | sctp_endpoint_free(ep); | 3607 | sctp_endpoint_free(ep); |
3509 | atomic_dec(&sctp_sockets_allocated); | 3608 | atomic_dec(&sctp_sockets_allocated); |
3510 | return 0; | ||
3511 | } | 3609 | } |
3512 | 3610 | ||
3513 | /* API 4.1.7 shutdown() - TCP Style Syntax | 3611 | /* API 4.1.7 shutdown() - TCP Style Syntax |
@@ -3999,70 +4097,91 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, | |||
3999 | return 0; | 4097 | return 0; |
4000 | } | 4098 | } |
4001 | 4099 | ||
4002 | /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) | 4100 | /* |
4003 | * | 4101 | * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) |
4004 | * This options will get or set the delayed ack timer. The time is set | 4102 | * |
4005 | * in milliseconds. If the assoc_id is 0, then this sets or gets the | 4103 | * This option will effect the way delayed acks are performed. This |
4006 | * endpoints default delayed ack timer value. If the assoc_id field is | 4104 | * option allows you to get or set the delayed ack time, in |
4007 | * non-zero, then the set or get effects the specified association. | 4105 | * milliseconds. It also allows changing the delayed ack frequency. |
4008 | * | 4106 | * Changing the frequency to 1 disables the delayed sack algorithm. If |
4009 | * struct sctp_assoc_value { | 4107 | * the assoc_id is 0, then this sets or gets the endpoints default |
4010 | * sctp_assoc_t assoc_id; | 4108 | * values. If the assoc_id field is non-zero, then the set or get |
4011 | * uint32_t assoc_value; | 4109 | * effects the specified association for the one to many model (the |
4012 | * }; | 4110 | * assoc_id field is ignored by the one to one model). Note that if |
4111 | * sack_delay or sack_freq are 0 when setting this option, then the | ||
4112 | * current values will remain unchanged. | ||
4113 | * | ||
4114 | * struct sctp_sack_info { | ||
4115 | * sctp_assoc_t sack_assoc_id; | ||
4116 | * uint32_t sack_delay; | ||
4117 | * uint32_t sack_freq; | ||
4118 | * }; | ||
4013 | * | 4119 | * |
4014 | * assoc_id - This parameter, indicates which association the | 4120 | * sack_assoc_id - This parameter, indicates which association the user |
4015 | * user is preforming an action upon. Note that if | 4121 | * is performing an action upon. Note that if this field's value is |
4016 | * this field's value is zero then the endpoints | 4122 | * zero then the endpoints default value is changed (effecting future |
4017 | * default value is changed (effecting future | 4123 | * associations only). |
4018 | * associations only). | ||
4019 | * | 4124 | * |
4020 | * assoc_value - This parameter contains the number of milliseconds | 4125 | * sack_delay - This parameter contains the number of milliseconds that |
4021 | * that the user is requesting the delayed ACK timer | 4126 | * the user is requesting the delayed ACK timer be set to. Note that |
4022 | * be set to. Note that this value is defined in | 4127 | * this value is defined in the standard to be between 200 and 500 |
4023 | * the standard to be between 200 and 500 milliseconds. | 4128 | * milliseconds. |
4024 | * | 4129 | * |
4025 | * Note: a value of zero will leave the value alone, | 4130 | * sack_freq - This parameter contains the number of packets that must |
4026 | * but disable SACK delay. A non-zero value will also | 4131 | * be received before a sack is sent without waiting for the delay |
4027 | * enable SACK delay. | 4132 | * timer to expire. The default value for this is 2, setting this |
4133 | * value to 1 will disable the delayed sack algorithm. | ||
4028 | */ | 4134 | */ |
4029 | static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, | 4135 | static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, |
4030 | char __user *optval, | 4136 | char __user *optval, |
4031 | int __user *optlen) | 4137 | int __user *optlen) |
4032 | { | 4138 | { |
4033 | struct sctp_assoc_value params; | 4139 | struct sctp_sack_info params; |
4034 | struct sctp_association *asoc = NULL; | 4140 | struct sctp_association *asoc = NULL; |
4035 | struct sctp_sock *sp = sctp_sk(sk); | 4141 | struct sctp_sock *sp = sctp_sk(sk); |
4036 | 4142 | ||
4037 | if (len < sizeof(struct sctp_assoc_value)) | 4143 | if (len >= sizeof(struct sctp_sack_info)) { |
4038 | return - EINVAL; | 4144 | len = sizeof(struct sctp_sack_info); |
4039 | 4145 | ||
4040 | len = sizeof(struct sctp_assoc_value); | 4146 | if (copy_from_user(¶ms, optval, len)) |
4041 | 4147 | return -EFAULT; | |
4042 | if (copy_from_user(¶ms, optval, len)) | 4148 | } else if (len == sizeof(struct sctp_assoc_value)) { |
4043 | return -EFAULT; | 4149 | printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info " |
4150 | "in delayed_ack socket option deprecated\n"); | ||
4151 | printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n"); | ||
4152 | if (copy_from_user(¶ms, optval, len)) | ||
4153 | return -EFAULT; | ||
4154 | } else | ||
4155 | return - EINVAL; | ||
4044 | 4156 | ||
4045 | /* Get association, if assoc_id != 0 and the socket is a one | 4157 | /* Get association, if sack_assoc_id != 0 and the socket is a one |
4046 | * to many style socket, and an association was not found, then | 4158 | * to many style socket, and an association was not found, then |
4047 | * the id was invalid. | 4159 | * the id was invalid. |
4048 | */ | 4160 | */ |
4049 | asoc = sctp_id2assoc(sk, params.assoc_id); | 4161 | asoc = sctp_id2assoc(sk, params.sack_assoc_id); |
4050 | if (!asoc && params.assoc_id && sctp_style(sk, UDP)) | 4162 | if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) |
4051 | return -EINVAL; | 4163 | return -EINVAL; |
4052 | 4164 | ||
4053 | if (asoc) { | 4165 | if (asoc) { |
4054 | /* Fetch association values. */ | 4166 | /* Fetch association values. */ |
4055 | if (asoc->param_flags & SPP_SACKDELAY_ENABLE) | 4167 | if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { |
4056 | params.assoc_value = jiffies_to_msecs( | 4168 | params.sack_delay = jiffies_to_msecs( |
4057 | asoc->sackdelay); | 4169 | asoc->sackdelay); |
4058 | else | 4170 | params.sack_freq = asoc->sackfreq; |
4059 | params.assoc_value = 0; | 4171 | |
4172 | } else { | ||
4173 | params.sack_delay = 0; | ||
4174 | params.sack_freq = 1; | ||
4175 | } | ||
4060 | } else { | 4176 | } else { |
4061 | /* Fetch socket values. */ | 4177 | /* Fetch socket values. */ |
4062 | if (sp->param_flags & SPP_SACKDELAY_ENABLE) | 4178 | if (sp->param_flags & SPP_SACKDELAY_ENABLE) { |
4063 | params.assoc_value = sp->sackdelay; | 4179 | params.sack_delay = sp->sackdelay; |
4064 | else | 4180 | params.sack_freq = sp->sackfreq; |
4065 | params.assoc_value = 0; | 4181 | } else { |
4182 | params.sack_delay = 0; | ||
4183 | params.sack_freq = 1; | ||
4184 | } | ||
4066 | } | 4185 | } |
4067 | 4186 | ||
4068 | if (copy_to_user(optval, ¶ms, len)) | 4187 | if (copy_to_user(optval, ¶ms, len)) |
@@ -4112,6 +4231,8 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len, | |||
4112 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) | 4231 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) |
4113 | return -EFAULT; | 4232 | return -EFAULT; |
4114 | 4233 | ||
4234 | printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD " | ||
4235 | "socket option deprecated\n"); | ||
4115 | /* For UDP-style sockets, id specifies the association to query. */ | 4236 | /* For UDP-style sockets, id specifies the association to query. */ |
4116 | asoc = sctp_id2assoc(sk, id); | 4237 | asoc = sctp_id2assoc(sk, id); |
4117 | if (!asoc) | 4238 | if (!asoc) |
@@ -4151,6 +4272,9 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, | |||
4151 | 4272 | ||
4152 | if (getaddrs.addr_num <= 0) return -EINVAL; | 4273 | if (getaddrs.addr_num <= 0) return -EINVAL; |
4153 | 4274 | ||
4275 | printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD " | ||
4276 | "socket option deprecated\n"); | ||
4277 | |||
4154 | /* For UDP-style sockets, id specifies the association to query. */ | 4278 | /* For UDP-style sockets, id specifies the association to query. */ |
4155 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); | 4279 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); |
4156 | if (!asoc) | 4280 | if (!asoc) |
@@ -4244,6 +4368,9 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
4244 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) | 4368 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) |
4245 | return -EFAULT; | 4369 | return -EFAULT; |
4246 | 4370 | ||
4371 | printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD " | ||
4372 | "socket option deprecated\n"); | ||
4373 | |||
4247 | /* | 4374 | /* |
4248 | * For UDP-style sockets, id specifies the association to query. | 4375 | * For UDP-style sockets, id specifies the association to query. |
4249 | * If the id field is set to the value '0' then the locally bound | 4376 | * If the id field is set to the value '0' then the locally bound |
@@ -4276,6 +4403,11 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
4276 | (AF_INET6 == addr->a.sa.sa_family)) | 4403 | (AF_INET6 == addr->a.sa.sa_family)) |
4277 | continue; | 4404 | continue; |
4278 | 4405 | ||
4406 | if ((PF_INET6 == sk->sk_family) && | ||
4407 | inet_v6_ipv6only(sk) && | ||
4408 | (AF_INET == addr->a.sa.sa_family)) | ||
4409 | continue; | ||
4410 | |||
4279 | cnt++; | 4411 | cnt++; |
4280 | } | 4412 | } |
4281 | rcu_read_unlock(); | 4413 | rcu_read_unlock(); |
@@ -4316,6 +4448,10 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, | |||
4316 | if ((PF_INET == sk->sk_family) && | 4448 | if ((PF_INET == sk->sk_family) && |
4317 | (AF_INET6 == addr->a.sa.sa_family)) | 4449 | (AF_INET6 == addr->a.sa.sa_family)) |
4318 | continue; | 4450 | continue; |
4451 | if ((PF_INET6 == sk->sk_family) && | ||
4452 | inet_v6_ipv6only(sk) && | ||
4453 | (AF_INET == addr->a.sa.sa_family)) | ||
4454 | continue; | ||
4319 | memcpy(&temp, &addr->a, sizeof(temp)); | 4455 | memcpy(&temp, &addr->a, sizeof(temp)); |
4320 | if (!temp.v4.sin_port) | 4456 | if (!temp.v4.sin_port) |
4321 | temp.v4.sin_port = htons(port); | 4457 | temp.v4.sin_port = htons(port); |
@@ -4351,6 +4487,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, | |||
4351 | if ((PF_INET == sk->sk_family) && | 4487 | if ((PF_INET == sk->sk_family) && |
4352 | (AF_INET6 == addr->a.sa.sa_family)) | 4488 | (AF_INET6 == addr->a.sa.sa_family)) |
4353 | continue; | 4489 | continue; |
4490 | if ((PF_INET6 == sk->sk_family) && | ||
4491 | inet_v6_ipv6only(sk) && | ||
4492 | (AF_INET == addr->a.sa.sa_family)) | ||
4493 | continue; | ||
4354 | memcpy(&temp, &addr->a, sizeof(temp)); | 4494 | memcpy(&temp, &addr->a, sizeof(temp)); |
4355 | if (!temp.v4.sin_port) | 4495 | if (!temp.v4.sin_port) |
4356 | temp.v4.sin_port = htons(port); | 4496 | temp.v4.sin_port = htons(port); |
@@ -4404,6 +4544,10 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4404 | if (getaddrs.addr_num <= 0 || | 4544 | if (getaddrs.addr_num <= 0 || |
4405 | getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) | 4545 | getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) |
4406 | return -EINVAL; | 4546 | return -EINVAL; |
4547 | |||
4548 | printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD " | ||
4549 | "socket option deprecated\n"); | ||
4550 | |||
4407 | /* | 4551 | /* |
4408 | * For UDP-style sockets, id specifies the association to query. | 4552 | * For UDP-style sockets, id specifies the association to query. |
4409 | * If the id field is set to the value '0' then the locally bound | 4553 | * If the id field is set to the value '0' then the locally bound |
@@ -5220,8 +5364,8 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
5220 | retval = sctp_getsockopt_peer_addr_params(sk, len, optval, | 5364 | retval = sctp_getsockopt_peer_addr_params(sk, len, optval, |
5221 | optlen); | 5365 | optlen); |
5222 | break; | 5366 | break; |
5223 | case SCTP_DELAYED_ACK_TIME: | 5367 | case SCTP_DELAYED_ACK: |
5224 | retval = sctp_getsockopt_delayed_ack_time(sk, len, optval, | 5368 | retval = sctp_getsockopt_delayed_ack(sk, len, optval, |
5225 | optlen); | 5369 | optlen); |
5226 | break; | 5370 | break; |
5227 | case SCTP_INITMSG: | 5371 | case SCTP_INITMSG: |
@@ -5441,12 +5585,13 @@ pp_found: | |||
5441 | struct sctp_endpoint *ep2; | 5585 | struct sctp_endpoint *ep2; |
5442 | ep2 = sctp_sk(sk2)->ep; | 5586 | ep2 = sctp_sk(sk2)->ep; |
5443 | 5587 | ||
5444 | if (reuse && sk2->sk_reuse && | 5588 | if (sk == sk2 || |
5445 | sk2->sk_state != SCTP_SS_LISTENING) | 5589 | (reuse && sk2->sk_reuse && |
5590 | sk2->sk_state != SCTP_SS_LISTENING)) | ||
5446 | continue; | 5591 | continue; |
5447 | 5592 | ||
5448 | if (sctp_bind_addr_match(&ep2->base.bind_addr, addr, | 5593 | if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, |
5449 | sctp_sk(sk))) { | 5594 | sctp_sk(sk2), sctp_sk(sk))) { |
5450 | ret = (long)sk2; | 5595 | ret = (long)sk2; |
5451 | goto fail_unlock; | 5596 | goto fail_unlock; |
5452 | } | 5597 | } |
@@ -5559,8 +5704,13 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) | |||
5559 | if (!ep->base.bind_addr.port) { | 5704 | if (!ep->base.bind_addr.port) { |
5560 | if (sctp_autobind(sk)) | 5705 | if (sctp_autobind(sk)) |
5561 | return -EAGAIN; | 5706 | return -EAGAIN; |
5562 | } else | 5707 | } else { |
5708 | if (sctp_get_port(sk, inet_sk(sk)->num)) { | ||
5709 | sk->sk_state = SCTP_SS_CLOSED; | ||
5710 | return -EADDRINUSE; | ||
5711 | } | ||
5563 | sctp_sk(sk)->bind_hash->fastreuse = 0; | 5712 | sctp_sk(sk)->bind_hash->fastreuse = 0; |
5713 | } | ||
5564 | 5714 | ||
5565 | sctp_hash_endpoint(ep); | 5715 | sctp_hash_endpoint(ep); |
5566 | return 0; | 5716 | return 0; |
@@ -5630,7 +5780,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) | |||
5630 | goto out; | 5780 | goto out; |
5631 | 5781 | ||
5632 | /* Allocate HMAC for generating cookie. */ | 5782 | /* Allocate HMAC for generating cookie. */ |
5633 | if (sctp_hmac_alg) { | 5783 | if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { |
5634 | tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); | 5784 | tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); |
5635 | if (IS_ERR(tfm)) { | 5785 | if (IS_ERR(tfm)) { |
5636 | if (net_ratelimit()) { | 5786 | if (net_ratelimit()) { |
@@ -5658,7 +5808,8 @@ int sctp_inet_listen(struct socket *sock, int backlog) | |||
5658 | goto cleanup; | 5808 | goto cleanup; |
5659 | 5809 | ||
5660 | /* Store away the transform reference. */ | 5810 | /* Store away the transform reference. */ |
5661 | sctp_sk(sk)->hmac = tfm; | 5811 | if (!sctp_sk(sk)->hmac) |
5812 | sctp_sk(sk)->hmac = tfm; | ||
5662 | out: | 5813 | out: |
5663 | sctp_release_sock(sk); | 5814 | sctp_release_sock(sk); |
5664 | return err; | 5815 | return err; |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3f34f61221ec..e745c118f239 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -100,6 +100,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
100 | INIT_LIST_HEAD(&peer->send_ready); | 100 | INIT_LIST_HEAD(&peer->send_ready); |
101 | INIT_LIST_HEAD(&peer->transports); | 101 | INIT_LIST_HEAD(&peer->transports); |
102 | 102 | ||
103 | peer->T3_rtx_timer.expires = 0; | ||
104 | peer->hb_timer.expires = 0; | ||
105 | |||
103 | setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, | 106 | setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, |
104 | (unsigned long)peer); | 107 | (unsigned long)peer); |
105 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, | 108 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, |
diff --git a/net/socket.c b/net/socket.c index 66c4a8cf6db9..1ba57d888981 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -90,6 +90,7 @@ | |||
90 | #include <asm/unistd.h> | 90 | #include <asm/unistd.h> |
91 | 91 | ||
92 | #include <net/compat.h> | 92 | #include <net/compat.h> |
93 | #include <net/wext.h> | ||
93 | 94 | ||
94 | #include <net/sock.h> | 95 | #include <net/sock.h> |
95 | #include <linux/netfilter.h> | 96 | #include <linux/netfilter.h> |
@@ -179,9 +180,9 @@ static DEFINE_PER_CPU(int, sockets_in_use) = 0; | |||
179 | * invalid addresses -EFAULT is returned. On a success 0 is returned. | 180 | * invalid addresses -EFAULT is returned. On a success 0 is returned. |
180 | */ | 181 | */ |
181 | 182 | ||
182 | int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr) | 183 | int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) |
183 | { | 184 | { |
184 | if (ulen < 0 || ulen > MAX_SOCK_ADDR) | 185 | if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) |
185 | return -EINVAL; | 186 | return -EINVAL; |
186 | if (ulen == 0) | 187 | if (ulen == 0) |
187 | return 0; | 188 | return 0; |
@@ -207,7 +208,7 @@ int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr) | |||
207 | * specified. Zero is returned for a success. | 208 | * specified. Zero is returned for a success. |
208 | */ | 209 | */ |
209 | 210 | ||
210 | int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, | 211 | int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, |
211 | int __user *ulen) | 212 | int __user *ulen) |
212 | { | 213 | { |
213 | int err; | 214 | int err; |
@@ -218,7 +219,7 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, | |||
218 | return err; | 219 | return err; |
219 | if (len > klen) | 220 | if (len > klen) |
220 | len = klen; | 221 | len = klen; |
221 | if (len < 0 || len > MAX_SOCK_ADDR) | 222 | if (len < 0 || len > sizeof(struct sockaddr_storage)) |
222 | return -EINVAL; | 223 | return -EINVAL; |
223 | if (len) { | 224 | if (len) { |
224 | if (audit_sockaddr(klen, kaddr)) | 225 | if (audit_sockaddr(klen, kaddr)) |
@@ -1341,20 +1342,20 @@ out_fd: | |||
1341 | asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) | 1342 | asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) |
1342 | { | 1343 | { |
1343 | struct socket *sock; | 1344 | struct socket *sock; |
1344 | char address[MAX_SOCK_ADDR]; | 1345 | struct sockaddr_storage address; |
1345 | int err, fput_needed; | 1346 | int err, fput_needed; |
1346 | 1347 | ||
1347 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1348 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1348 | if (sock) { | 1349 | if (sock) { |
1349 | err = move_addr_to_kernel(umyaddr, addrlen, address); | 1350 | err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); |
1350 | if (err >= 0) { | 1351 | if (err >= 0) { |
1351 | err = security_socket_bind(sock, | 1352 | err = security_socket_bind(sock, |
1352 | (struct sockaddr *)address, | 1353 | (struct sockaddr *)&address, |
1353 | addrlen); | 1354 | addrlen); |
1354 | if (!err) | 1355 | if (!err) |
1355 | err = sock->ops->bind(sock, | 1356 | err = sock->ops->bind(sock, |
1356 | (struct sockaddr *) | 1357 | (struct sockaddr *) |
1357 | address, addrlen); | 1358 | &address, addrlen); |
1358 | } | 1359 | } |
1359 | fput_light(sock->file, fput_needed); | 1360 | fput_light(sock->file, fput_needed); |
1360 | } | 1361 | } |
@@ -1406,7 +1407,7 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, | |||
1406 | struct socket *sock, *newsock; | 1407 | struct socket *sock, *newsock; |
1407 | struct file *newfile; | 1408 | struct file *newfile; |
1408 | int err, len, newfd, fput_needed; | 1409 | int err, len, newfd, fput_needed; |
1409 | char address[MAX_SOCK_ADDR]; | 1410 | struct sockaddr_storage address; |
1410 | 1411 | ||
1411 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1412 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1412 | if (!sock) | 1413 | if (!sock) |
@@ -1445,13 +1446,13 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, | |||
1445 | goto out_fd; | 1446 | goto out_fd; |
1446 | 1447 | ||
1447 | if (upeer_sockaddr) { | 1448 | if (upeer_sockaddr) { |
1448 | if (newsock->ops->getname(newsock, (struct sockaddr *)address, | 1449 | if (newsock->ops->getname(newsock, (struct sockaddr *)&address, |
1449 | &len, 2) < 0) { | 1450 | &len, 2) < 0) { |
1450 | err = -ECONNABORTED; | 1451 | err = -ECONNABORTED; |
1451 | goto out_fd; | 1452 | goto out_fd; |
1452 | } | 1453 | } |
1453 | err = move_addr_to_user(address, len, upeer_sockaddr, | 1454 | err = move_addr_to_user((struct sockaddr *)&address, |
1454 | upeer_addrlen); | 1455 | len, upeer_sockaddr, upeer_addrlen); |
1455 | if (err < 0) | 1456 | if (err < 0) |
1456 | goto out_fd; | 1457 | goto out_fd; |
1457 | } | 1458 | } |
@@ -1494,22 +1495,22 @@ asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, | |||
1494 | int addrlen) | 1495 | int addrlen) |
1495 | { | 1496 | { |
1496 | struct socket *sock; | 1497 | struct socket *sock; |
1497 | char address[MAX_SOCK_ADDR]; | 1498 | struct sockaddr_storage address; |
1498 | int err, fput_needed; | 1499 | int err, fput_needed; |
1499 | 1500 | ||
1500 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1501 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1501 | if (!sock) | 1502 | if (!sock) |
1502 | goto out; | 1503 | goto out; |
1503 | err = move_addr_to_kernel(uservaddr, addrlen, address); | 1504 | err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); |
1504 | if (err < 0) | 1505 | if (err < 0) |
1505 | goto out_put; | 1506 | goto out_put; |
1506 | 1507 | ||
1507 | err = | 1508 | err = |
1508 | security_socket_connect(sock, (struct sockaddr *)address, addrlen); | 1509 | security_socket_connect(sock, (struct sockaddr *)&address, addrlen); |
1509 | if (err) | 1510 | if (err) |
1510 | goto out_put; | 1511 | goto out_put; |
1511 | 1512 | ||
1512 | err = sock->ops->connect(sock, (struct sockaddr *)address, addrlen, | 1513 | err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, |
1513 | sock->file->f_flags); | 1514 | sock->file->f_flags); |
1514 | out_put: | 1515 | out_put: |
1515 | fput_light(sock->file, fput_needed); | 1516 | fput_light(sock->file, fput_needed); |
@@ -1526,7 +1527,7 @@ asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, | |||
1526 | int __user *usockaddr_len) | 1527 | int __user *usockaddr_len) |
1527 | { | 1528 | { |
1528 | struct socket *sock; | 1529 | struct socket *sock; |
1529 | char address[MAX_SOCK_ADDR]; | 1530 | struct sockaddr_storage address; |
1530 | int len, err, fput_needed; | 1531 | int len, err, fput_needed; |
1531 | 1532 | ||
1532 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1533 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
@@ -1537,10 +1538,10 @@ asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, | |||
1537 | if (err) | 1538 | if (err) |
1538 | goto out_put; | 1539 | goto out_put; |
1539 | 1540 | ||
1540 | err = sock->ops->getname(sock, (struct sockaddr *)address, &len, 0); | 1541 | err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); |
1541 | if (err) | 1542 | if (err) |
1542 | goto out_put; | 1543 | goto out_put; |
1543 | err = move_addr_to_user(address, len, usockaddr, usockaddr_len); | 1544 | err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); |
1544 | 1545 | ||
1545 | out_put: | 1546 | out_put: |
1546 | fput_light(sock->file, fput_needed); | 1547 | fput_light(sock->file, fput_needed); |
@@ -1557,7 +1558,7 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, | |||
1557 | int __user *usockaddr_len) | 1558 | int __user *usockaddr_len) |
1558 | { | 1559 | { |
1559 | struct socket *sock; | 1560 | struct socket *sock; |
1560 | char address[MAX_SOCK_ADDR]; | 1561 | struct sockaddr_storage address; |
1561 | int len, err, fput_needed; | 1562 | int len, err, fput_needed; |
1562 | 1563 | ||
1563 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1564 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
@@ -1569,10 +1570,10 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, | |||
1569 | } | 1570 | } |
1570 | 1571 | ||
1571 | err = | 1572 | err = |
1572 | sock->ops->getname(sock, (struct sockaddr *)address, &len, | 1573 | sock->ops->getname(sock, (struct sockaddr *)&address, &len, |
1573 | 1); | 1574 | 1); |
1574 | if (!err) | 1575 | if (!err) |
1575 | err = move_addr_to_user(address, len, usockaddr, | 1576 | err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, |
1576 | usockaddr_len); | 1577 | usockaddr_len); |
1577 | fput_light(sock->file, fput_needed); | 1578 | fput_light(sock->file, fput_needed); |
1578 | } | 1579 | } |
@@ -1590,7 +1591,7 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len, | |||
1590 | int addr_len) | 1591 | int addr_len) |
1591 | { | 1592 | { |
1592 | struct socket *sock; | 1593 | struct socket *sock; |
1593 | char address[MAX_SOCK_ADDR]; | 1594 | struct sockaddr_storage address; |
1594 | int err; | 1595 | int err; |
1595 | struct msghdr msg; | 1596 | struct msghdr msg; |
1596 | struct iovec iov; | 1597 | struct iovec iov; |
@@ -1609,10 +1610,10 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len, | |||
1609 | msg.msg_controllen = 0; | 1610 | msg.msg_controllen = 0; |
1610 | msg.msg_namelen = 0; | 1611 | msg.msg_namelen = 0; |
1611 | if (addr) { | 1612 | if (addr) { |
1612 | err = move_addr_to_kernel(addr, addr_len, address); | 1613 | err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); |
1613 | if (err < 0) | 1614 | if (err < 0) |
1614 | goto out_put; | 1615 | goto out_put; |
1615 | msg.msg_name = address; | 1616 | msg.msg_name = (struct sockaddr *)&address; |
1616 | msg.msg_namelen = addr_len; | 1617 | msg.msg_namelen = addr_len; |
1617 | } | 1618 | } |
1618 | if (sock->file->f_flags & O_NONBLOCK) | 1619 | if (sock->file->f_flags & O_NONBLOCK) |
@@ -1648,7 +1649,7 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size, | |||
1648 | struct socket *sock; | 1649 | struct socket *sock; |
1649 | struct iovec iov; | 1650 | struct iovec iov; |
1650 | struct msghdr msg; | 1651 | struct msghdr msg; |
1651 | char address[MAX_SOCK_ADDR]; | 1652 | struct sockaddr_storage address; |
1652 | int err, err2; | 1653 | int err, err2; |
1653 | int fput_needed; | 1654 | int fput_needed; |
1654 | 1655 | ||
@@ -1662,14 +1663,15 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size, | |||
1662 | msg.msg_iov = &iov; | 1663 | msg.msg_iov = &iov; |
1663 | iov.iov_len = size; | 1664 | iov.iov_len = size; |
1664 | iov.iov_base = ubuf; | 1665 | iov.iov_base = ubuf; |
1665 | msg.msg_name = address; | 1666 | msg.msg_name = (struct sockaddr *)&address; |
1666 | msg.msg_namelen = MAX_SOCK_ADDR; | 1667 | msg.msg_namelen = sizeof(address); |
1667 | if (sock->file->f_flags & O_NONBLOCK) | 1668 | if (sock->file->f_flags & O_NONBLOCK) |
1668 | flags |= MSG_DONTWAIT; | 1669 | flags |= MSG_DONTWAIT; |
1669 | err = sock_recvmsg(sock, &msg, size, flags); | 1670 | err = sock_recvmsg(sock, &msg, size, flags); |
1670 | 1671 | ||
1671 | if (err >= 0 && addr != NULL) { | 1672 | if (err >= 0 && addr != NULL) { |
1672 | err2 = move_addr_to_user(address, msg.msg_namelen, addr, addr_len); | 1673 | err2 = move_addr_to_user((struct sockaddr *)&address, |
1674 | msg.msg_namelen, addr, addr_len); | ||
1673 | if (err2 < 0) | 1675 | if (err2 < 0) |
1674 | err = err2; | 1676 | err = err2; |
1675 | } | 1677 | } |
@@ -1789,7 +1791,7 @@ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) | |||
1789 | struct compat_msghdr __user *msg_compat = | 1791 | struct compat_msghdr __user *msg_compat = |
1790 | (struct compat_msghdr __user *)msg; | 1792 | (struct compat_msghdr __user *)msg; |
1791 | struct socket *sock; | 1793 | struct socket *sock; |
1792 | char address[MAX_SOCK_ADDR]; | 1794 | struct sockaddr_storage address; |
1793 | struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; | 1795 | struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; |
1794 | unsigned char ctl[sizeof(struct cmsghdr) + 20] | 1796 | unsigned char ctl[sizeof(struct cmsghdr) + 20] |
1795 | __attribute__ ((aligned(sizeof(__kernel_size_t)))); | 1797 | __attribute__ ((aligned(sizeof(__kernel_size_t)))); |
@@ -1827,9 +1829,13 @@ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) | |||
1827 | 1829 | ||
1828 | /* This will also move the address data into kernel space */ | 1830 | /* This will also move the address data into kernel space */ |
1829 | if (MSG_CMSG_COMPAT & flags) { | 1831 | if (MSG_CMSG_COMPAT & flags) { |
1830 | err = verify_compat_iovec(&msg_sys, iov, address, VERIFY_READ); | 1832 | err = verify_compat_iovec(&msg_sys, iov, |
1833 | (struct sockaddr *)&address, | ||
1834 | VERIFY_READ); | ||
1831 | } else | 1835 | } else |
1832 | err = verify_iovec(&msg_sys, iov, address, VERIFY_READ); | 1836 | err = verify_iovec(&msg_sys, iov, |
1837 | (struct sockaddr *)&address, | ||
1838 | VERIFY_READ); | ||
1833 | if (err < 0) | 1839 | if (err < 0) |
1834 | goto out_freeiov; | 1840 | goto out_freeiov; |
1835 | total_len = err; | 1841 | total_len = err; |
@@ -1900,7 +1906,7 @@ asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, | |||
1900 | int fput_needed; | 1906 | int fput_needed; |
1901 | 1907 | ||
1902 | /* kernel mode address */ | 1908 | /* kernel mode address */ |
1903 | char addr[MAX_SOCK_ADDR]; | 1909 | struct sockaddr_storage addr; |
1904 | 1910 | ||
1905 | /* user mode address pointers */ | 1911 | /* user mode address pointers */ |
1906 | struct sockaddr __user *uaddr; | 1912 | struct sockaddr __user *uaddr; |
@@ -1938,9 +1944,13 @@ asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, | |||
1938 | uaddr = (__force void __user *)msg_sys.msg_name; | 1944 | uaddr = (__force void __user *)msg_sys.msg_name; |
1939 | uaddr_len = COMPAT_NAMELEN(msg); | 1945 | uaddr_len = COMPAT_NAMELEN(msg); |
1940 | if (MSG_CMSG_COMPAT & flags) { | 1946 | if (MSG_CMSG_COMPAT & flags) { |
1941 | err = verify_compat_iovec(&msg_sys, iov, addr, VERIFY_WRITE); | 1947 | err = verify_compat_iovec(&msg_sys, iov, |
1948 | (struct sockaddr *)&addr, | ||
1949 | VERIFY_WRITE); | ||
1942 | } else | 1950 | } else |
1943 | err = verify_iovec(&msg_sys, iov, addr, VERIFY_WRITE); | 1951 | err = verify_iovec(&msg_sys, iov, |
1952 | (struct sockaddr *)&addr, | ||
1953 | VERIFY_WRITE); | ||
1944 | if (err < 0) | 1954 | if (err < 0) |
1945 | goto out_freeiov; | 1955 | goto out_freeiov; |
1946 | total_len = err; | 1956 | total_len = err; |
@@ -1956,7 +1966,8 @@ asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, | |||
1956 | len = err; | 1966 | len = err; |
1957 | 1967 | ||
1958 | if (uaddr != NULL) { | 1968 | if (uaddr != NULL) { |
1959 | err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, | 1969 | err = move_addr_to_user((struct sockaddr *)&addr, |
1970 | msg_sys.msg_namelen, uaddr, | ||
1960 | uaddr_len); | 1971 | uaddr_len); |
1961 | if (err < 0) | 1972 | if (err < 0) |
1962 | goto out_freeiov; | 1973 | goto out_freeiov; |
@@ -2210,10 +2221,19 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd, | |||
2210 | { | 2221 | { |
2211 | struct socket *sock = file->private_data; | 2222 | struct socket *sock = file->private_data; |
2212 | int ret = -ENOIOCTLCMD; | 2223 | int ret = -ENOIOCTLCMD; |
2224 | struct sock *sk; | ||
2225 | struct net *net; | ||
2226 | |||
2227 | sk = sock->sk; | ||
2228 | net = sock_net(sk); | ||
2213 | 2229 | ||
2214 | if (sock->ops->compat_ioctl) | 2230 | if (sock->ops->compat_ioctl) |
2215 | ret = sock->ops->compat_ioctl(sock, cmd, arg); | 2231 | ret = sock->ops->compat_ioctl(sock, cmd, arg); |
2216 | 2232 | ||
2233 | if (ret == -ENOIOCTLCMD && | ||
2234 | (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) | ||
2235 | ret = compat_wext_handle_ioctl(net, cmd, arg); | ||
2236 | |||
2217 | return ret; | 2237 | return ret; |
2218 | } | 2238 | } |
2219 | #endif | 2239 | #endif |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 834a83199bdf..853a4142cea1 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -33,8 +33,6 @@ | |||
33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
36 | * | ||
37 | * $Id$ | ||
38 | */ | 36 | */ |
39 | 37 | ||
40 | 38 | ||
diff --git a/net/sysctl_net.c b/net/sysctl_net.c index b4f0525f91af..007c1a6708ee 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c | |||
@@ -4,7 +4,6 @@ | |||
4 | * Begun April 1, 1996, Mike Shaver. | 4 | * Begun April 1, 1996, Mike Shaver. |
5 | * Added /proc/sys/net directories for each protocol family. [MS] | 5 | * Added /proc/sys/net directories for each protocol family. [MS] |
6 | * | 6 | * |
7 | * $Log: sysctl_net.c,v $ | ||
8 | * Revision 1.2 1996/05/08 20:24:40 shaver | 7 | * Revision 1.2 1996/05/08 20:24:40 shaver |
9 | * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and | 8 | * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and |
10 | * NET_IPV4_IP_FORWARD. | 9 | * NET_IPV4_IP_FORWARD. |
@@ -40,6 +39,27 @@ static struct ctl_table_root net_sysctl_root = { | |||
40 | .lookup = net_ctl_header_lookup, | 39 | .lookup = net_ctl_header_lookup, |
41 | }; | 40 | }; |
42 | 41 | ||
42 | static LIST_HEAD(net_sysctl_ro_tables); | ||
43 | static struct list_head *net_ctl_ro_header_lookup(struct ctl_table_root *root, | ||
44 | struct nsproxy *namespaces) | ||
45 | { | ||
46 | return &net_sysctl_ro_tables; | ||
47 | } | ||
48 | |||
49 | static int net_ctl_ro_header_perms(struct ctl_table_root *root, | ||
50 | struct nsproxy *namespaces, struct ctl_table *table) | ||
51 | { | ||
52 | if (namespaces->net_ns == &init_net) | ||
53 | return table->mode; | ||
54 | else | ||
55 | return table->mode & ~0222; | ||
56 | } | ||
57 | |||
58 | static struct ctl_table_root net_sysctl_ro_root = { | ||
59 | .lookup = net_ctl_ro_header_lookup, | ||
60 | .permissions = net_ctl_ro_header_perms, | ||
61 | }; | ||
62 | |||
43 | static int sysctl_net_init(struct net *net) | 63 | static int sysctl_net_init(struct net *net) |
44 | { | 64 | { |
45 | INIT_LIST_HEAD(&net->sysctl_table_headers); | 65 | INIT_LIST_HEAD(&net->sysctl_table_headers); |
@@ -64,6 +84,7 @@ static __init int sysctl_init(void) | |||
64 | if (ret) | 84 | if (ret) |
65 | goto out; | 85 | goto out; |
66 | register_sysctl_root(&net_sysctl_root); | 86 | register_sysctl_root(&net_sysctl_root); |
87 | register_sysctl_root(&net_sysctl_ro_root); | ||
67 | out: | 88 | out: |
68 | return ret; | 89 | return ret; |
69 | } | 90 | } |
@@ -80,6 +101,14 @@ struct ctl_table_header *register_net_sysctl_table(struct net *net, | |||
80 | } | 101 | } |
81 | EXPORT_SYMBOL_GPL(register_net_sysctl_table); | 102 | EXPORT_SYMBOL_GPL(register_net_sysctl_table); |
82 | 103 | ||
104 | struct ctl_table_header *register_net_sysctl_rotable(const | ||
105 | struct ctl_path *path, struct ctl_table *table) | ||
106 | { | ||
107 | return __register_sysctl_paths(&net_sysctl_ro_root, | ||
108 | &init_nsproxy, path, table); | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); | ||
111 | |||
83 | void unregister_net_sysctl_table(struct ctl_table_header *header) | 112 | void unregister_net_sysctl_table(struct ctl_table_header *header) |
84 | { | 113 | { |
85 | unregister_sysctl_table(header); | 114 | unregister_sysctl_table(header); |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e7880172ef19..b1ff16aa4bdb 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -276,7 +276,7 @@ static void bclink_send_nack(struct node *n_ptr) | |||
276 | if (buf) { | 276 | if (buf) { |
277 | msg = buf_msg(buf); | 277 | msg = buf_msg(buf); |
278 | msg_init(msg, BCAST_PROTOCOL, STATE_MSG, | 278 | msg_init(msg, BCAST_PROTOCOL, STATE_MSG, |
279 | TIPC_OK, INT_H_SIZE, n_ptr->addr); | 279 | INT_H_SIZE, n_ptr->addr); |
280 | msg_set_mc_netid(msg, tipc_net_id); | 280 | msg_set_mc_netid(msg, tipc_net_id); |
281 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); | 281 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); |
282 | msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); | 282 | msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); |
@@ -571,7 +571,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
571 | assert(tipc_cltr_bcast_nodes.count != 0); | 571 | assert(tipc_cltr_bcast_nodes.count != 0); |
572 | bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); | 572 | bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); |
573 | msg = buf_msg(buf); | 573 | msg = buf_msg(buf); |
574 | msg_set_non_seq(msg); | 574 | msg_set_non_seq(msg, 1); |
575 | msg_set_mc_netid(msg, tipc_net_id); | 575 | msg_set_mc_netid(msg, tipc_net_id); |
576 | } | 576 | } |
577 | 577 | ||
@@ -611,7 +611,7 @@ swap: | |||
611 | bcbearer->bpairs[bp_index].secondary = p; | 611 | bcbearer->bpairs[bp_index].secondary = p; |
612 | update: | 612 | update: |
613 | if (bcbearer->remains_new.count == 0) | 613 | if (bcbearer->remains_new.count == 0) |
614 | return TIPC_OK; | 614 | return 0; |
615 | 615 | ||
616 | bcbearer->remains = bcbearer->remains_new; | 616 | bcbearer->remains = bcbearer->remains_new; |
617 | } | 617 | } |
@@ -620,7 +620,7 @@ update: | |||
620 | 620 | ||
621 | bcbearer->bearer.publ.blocked = 1; | 621 | bcbearer->bearer.publ.blocked = 1; |
622 | bcl->stats.bearer_congs++; | 622 | bcl->stats.bearer_congs++; |
623 | return ~TIPC_OK; | 623 | return 1; |
624 | } | 624 | } |
625 | 625 | ||
626 | /** | 626 | /** |
@@ -756,7 +756,7 @@ int tipc_bclink_reset_stats(void) | |||
756 | spin_lock_bh(&bc_lock); | 756 | spin_lock_bh(&bc_lock); |
757 | memset(&bcl->stats, 0, sizeof(bcl->stats)); | 757 | memset(&bcl->stats, 0, sizeof(bcl->stats)); |
758 | spin_unlock_bh(&bc_lock); | 758 | spin_unlock_bh(&bc_lock); |
759 | return TIPC_OK; | 759 | return 0; |
760 | } | 760 | } |
761 | 761 | ||
762 | int tipc_bclink_set_queue_limits(u32 limit) | 762 | int tipc_bclink_set_queue_limits(u32 limit) |
@@ -769,7 +769,7 @@ int tipc_bclink_set_queue_limits(u32 limit) | |||
769 | spin_lock_bh(&bc_lock); | 769 | spin_lock_bh(&bc_lock); |
770 | tipc_link_set_queue_limits(bcl, limit); | 770 | tipc_link_set_queue_limits(bcl, limit); |
771 | spin_unlock_bh(&bc_lock); | 771 | spin_unlock_bh(&bc_lock); |
772 | return TIPC_OK; | 772 | return 0; |
773 | } | 773 | } |
774 | 774 | ||
775 | int tipc_bclink_init(void) | 775 | int tipc_bclink_init(void) |
@@ -810,7 +810,7 @@ int tipc_bclink_init(void) | |||
810 | tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); | 810 | tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); |
811 | } | 811 | } |
812 | 812 | ||
813 | return TIPC_OK; | 813 | return 0; |
814 | } | 814 | } |
815 | 815 | ||
816 | void tipc_bclink_stop(void) | 816 | void tipc_bclink_stop(void) |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 271a375b49b7..6a9aba3edd08 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -370,7 +370,7 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest) | |||
370 | */ | 370 | */ |
371 | static int bearer_push(struct bearer *b_ptr) | 371 | static int bearer_push(struct bearer *b_ptr) |
372 | { | 372 | { |
373 | u32 res = TIPC_OK; | 373 | u32 res = 0; |
374 | struct link *ln, *tln; | 374 | struct link *ln, *tln; |
375 | 375 | ||
376 | if (b_ptr->publ.blocked) | 376 | if (b_ptr->publ.blocked) |
@@ -607,7 +607,7 @@ int tipc_block_bearer(const char *name) | |||
607 | } | 607 | } |
608 | spin_unlock_bh(&b_ptr->publ.lock); | 608 | spin_unlock_bh(&b_ptr->publ.lock); |
609 | read_unlock_bh(&tipc_net_lock); | 609 | read_unlock_bh(&tipc_net_lock); |
610 | return TIPC_OK; | 610 | return 0; |
611 | } | 611 | } |
612 | 612 | ||
613 | /** | 613 | /** |
@@ -645,7 +645,7 @@ static int bearer_disable(const char *name) | |||
645 | } | 645 | } |
646 | spin_unlock_bh(&b_ptr->publ.lock); | 646 | spin_unlock_bh(&b_ptr->publ.lock); |
647 | memset(b_ptr, 0, sizeof(struct bearer)); | 647 | memset(b_ptr, 0, sizeof(struct bearer)); |
648 | return TIPC_OK; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | int tipc_disable_bearer(const char *name) | 651 | int tipc_disable_bearer(const char *name) |
@@ -668,7 +668,7 @@ int tipc_bearer_init(void) | |||
668 | tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); | 668 | tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); |
669 | media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); | 669 | media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); |
670 | if (tipc_bearers && media_list) { | 670 | if (tipc_bearers && media_list) { |
671 | res = TIPC_OK; | 671 | res = 0; |
672 | } else { | 672 | } else { |
673 | kfree(tipc_bearers); | 673 | kfree(tipc_bearers); |
674 | kfree(media_list); | 674 | kfree(media_list); |
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index 4bb3404f610b..46ee6c58532d 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c | |||
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest) | |||
238 | if (buf) { | 238 | if (buf) { |
239 | msg = buf_msg(buf); | 239 | msg = buf_msg(buf); |
240 | memset((char *)msg, 0, size); | 240 | memset((char *)msg, 0, size); |
241 | msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest); | 241 | msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest); |
242 | } | 242 | } |
243 | return buf; | 243 | return buf; |
244 | } | 244 | } |
@@ -571,6 +571,6 @@ exit: | |||
571 | int tipc_cltr_init(void) | 571 | int tipc_cltr_init(void) |
572 | { | 572 | { |
573 | tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves; | 573 | tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves; |
574 | return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM; | 574 | return tipc_cltr_create(tipc_own_addr) ? 0 : -ENOMEM; |
575 | } | 575 | } |
576 | 576 | ||
diff --git a/net/tipc/config.c b/net/tipc/config.c index c71337a22d33..ca3544d030c7 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/config.c: TIPC configuration management code | 2 | * net/tipc/config.c: TIPC configuration management code |
3 | * | 3 | * |
4 | * Copyright (c) 2002-2006, Ericsson AB | 4 | * Copyright (c) 2002-2006, Ericsson AB |
5 | * Copyright (c) 2004-2006, Wind River Systems | 5 | * Copyright (c) 2004-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -293,7 +293,6 @@ static struct sk_buff *cfg_set_own_addr(void) | |||
293 | if (tipc_mode == TIPC_NET_MODE) | 293 | if (tipc_mode == TIPC_NET_MODE) |
294 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 294 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
295 | " (cannot change node address once assigned)"); | 295 | " (cannot change node address once assigned)"); |
296 | tipc_own_addr = addr; | ||
297 | 296 | ||
298 | /* | 297 | /* |
299 | * Must release all spinlocks before calling start_net() because | 298 | * Must release all spinlocks before calling start_net() because |
@@ -306,7 +305,7 @@ static struct sk_buff *cfg_set_own_addr(void) | |||
306 | */ | 305 | */ |
307 | 306 | ||
308 | spin_unlock_bh(&config_lock); | 307 | spin_unlock_bh(&config_lock); |
309 | tipc_core_start_net(); | 308 | tipc_core_start_net(addr); |
310 | spin_lock_bh(&config_lock); | 309 | spin_lock_bh(&config_lock); |
311 | return tipc_cfg_reply_none(); | 310 | return tipc_cfg_reply_none(); |
312 | } | 311 | } |
@@ -529,7 +528,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area | |||
529 | break; | 528 | break; |
530 | #endif | 529 | #endif |
531 | case TIPC_CMD_SET_LOG_SIZE: | 530 | case TIPC_CMD_SET_LOG_SIZE: |
532 | rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space); | 531 | rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space); |
533 | break; | 532 | break; |
534 | case TIPC_CMD_DUMP_LOG: | 533 | case TIPC_CMD_DUMP_LOG: |
535 | rep_tlv_buf = tipc_log_dump(); | 534 | rep_tlv_buf = tipc_log_dump(); |
@@ -602,6 +601,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area | |||
602 | case TIPC_CMD_GET_NETID: | 601 | case TIPC_CMD_GET_NETID: |
603 | rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); | 602 | rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); |
604 | break; | 603 | break; |
604 | case TIPC_CMD_NOT_NET_ADMIN: | ||
605 | rep_tlv_buf = | ||
606 | tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); | ||
607 | break; | ||
605 | default: | 608 | default: |
606 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 609 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
607 | " (unknown command)"); | 610 | " (unknown command)"); |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 740aac5cdfb6..3256bd7d398f 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | 51 | ||
52 | #define TIPC_MOD_VER "1.6.3" | 52 | #define TIPC_MOD_VER "1.6.4" |
53 | 53 | ||
54 | #ifndef CONFIG_TIPC_ZONES | 54 | #ifndef CONFIG_TIPC_ZONES |
55 | #define CONFIG_TIPC_ZONES 3 | 55 | #define CONFIG_TIPC_ZONES 3 |
@@ -117,11 +117,11 @@ void tipc_core_stop_net(void) | |||
117 | * start_net - start TIPC networking sub-systems | 117 | * start_net - start TIPC networking sub-systems |
118 | */ | 118 | */ |
119 | 119 | ||
120 | int tipc_core_start_net(void) | 120 | int tipc_core_start_net(unsigned long addr) |
121 | { | 121 | { |
122 | int res; | 122 | int res; |
123 | 123 | ||
124 | if ((res = tipc_net_start()) || | 124 | if ((res = tipc_net_start(addr)) || |
125 | (res = tipc_eth_media_start())) { | 125 | (res = tipc_eth_media_start())) { |
126 | tipc_core_stop_net(); | 126 | tipc_core_stop_net(); |
127 | } | 127 | } |
@@ -164,8 +164,7 @@ int tipc_core_start(void) | |||
164 | tipc_mode = TIPC_NODE_MODE; | 164 | tipc_mode = TIPC_NODE_MODE; |
165 | 165 | ||
166 | if ((res = tipc_handler_start()) || | 166 | if ((res = tipc_handler_start()) || |
167 | (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions, | 167 | (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) || |
168 | tipc_random)) || | ||
169 | (res = tipc_reg_start()) || | 168 | (res = tipc_reg_start()) || |
170 | (res = tipc_nametbl_init()) || | 169 | (res = tipc_nametbl_init()) || |
171 | (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || | 170 | (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || |
@@ -182,7 +181,7 @@ static int __init tipc_init(void) | |||
182 | { | 181 | { |
183 | int res; | 182 | int res; |
184 | 183 | ||
185 | tipc_log_reinit(CONFIG_TIPC_LOG); | 184 | tipc_log_resize(CONFIG_TIPC_LOG); |
186 | info("Activated (version " TIPC_MOD_VER | 185 | info("Activated (version " TIPC_MOD_VER |
187 | " compiled " __DATE__ " " __TIME__ ")\n"); | 186 | " compiled " __DATE__ " " __TIME__ ")\n"); |
188 | 187 | ||
@@ -209,7 +208,7 @@ static void __exit tipc_exit(void) | |||
209 | tipc_core_stop_net(); | 208 | tipc_core_stop_net(); |
210 | tipc_core_stop(); | 209 | tipc_core_stop(); |
211 | info("Deactivated\n"); | 210 | info("Deactivated\n"); |
212 | tipc_log_stop(); | 211 | tipc_log_resize(0); |
213 | } | 212 | } |
214 | 213 | ||
215 | module_init(tipc_init); | 214 | module_init(tipc_init); |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 5a0e4878d3b7..a881f92a8537 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/core.h: Include file for TIPC global declarations | 2 | * net/tipc/core.h: Include file for TIPC global declarations |
3 | * | 3 | * |
4 | * Copyright (c) 2005-2006, Ericsson AB | 4 | * Copyright (c) 2005-2006, Ericsson AB |
5 | * Copyright (c) 2005-2006, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -59,84 +59,108 @@ | |||
59 | #include <linux/vmalloc.h> | 59 | #include <linux/vmalloc.h> |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * TIPC debugging code | 62 | * TIPC sanity test macros |
63 | */ | 63 | */ |
64 | 64 | ||
65 | #define assert(i) BUG_ON(!(i)) | 65 | #define assert(i) BUG_ON(!(i)) |
66 | 66 | ||
67 | struct tipc_msg; | ||
68 | extern struct print_buf *TIPC_NULL, *TIPC_CONS, *TIPC_LOG; | ||
69 | extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *); | ||
70 | void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*); | ||
71 | void tipc_printf(struct print_buf *, const char *fmt, ...); | ||
72 | void tipc_dump(struct print_buf*,const char *fmt, ...); | ||
73 | |||
74 | #ifdef CONFIG_TIPC_DEBUG | ||
75 | |||
76 | /* | 67 | /* |
77 | * TIPC debug support included: | 68 | * TIPC system monitoring code |
78 | * - system messages are printed to TIPC_OUTPUT print buffer | ||
79 | * - debug messages are printed to DBG_OUTPUT print buffer | ||
80 | */ | 69 | */ |
81 | 70 | ||
82 | #define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg) | 71 | /* |
83 | #define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg) | 72 | * TIPC's print buffer subsystem supports the following print buffers: |
84 | #define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) | 73 | * |
74 | * TIPC_NULL : null buffer (i.e. print nowhere) | ||
75 | * TIPC_CONS : system console | ||
76 | * TIPC_LOG : TIPC log buffer | ||
77 | * &buf : user-defined buffer (struct print_buf *) | ||
78 | * | ||
79 | * Note: TIPC_LOG is configured to echo its output to the system console; | ||
80 | * user-defined buffers can be configured to do the same thing. | ||
81 | */ | ||
85 | 82 | ||
86 | #define dbg(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) | 83 | extern struct print_buf *const TIPC_NULL; |
87 | #define msg_dbg(msg, txt) do {if (DBG_OUTPUT != TIPC_NULL) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0) | 84 | extern struct print_buf *const TIPC_CONS; |
88 | #define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) | 85 | extern struct print_buf *const TIPC_LOG; |
89 | 86 | ||
87 | void tipc_printf(struct print_buf *, const char *fmt, ...); | ||
90 | 88 | ||
91 | /* | 89 | /* |
92 | * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer, | 90 | * TIPC_OUTPUT is the destination print buffer for system messages. |
93 | * while DBG_OUTPUT is the null print buffer. These defaults can be changed | ||
94 | * here, or on a per .c file basis, by redefining these symbols. The following | ||
95 | * print buffer options are available: | ||
96 | * | ||
97 | * TIPC_NULL : null buffer (i.e. print nowhere) | ||
98 | * TIPC_CONS : system console | ||
99 | * TIPC_LOG : TIPC log buffer | ||
100 | * &buf : user-defined buffer (struct print_buf *) | ||
101 | * TIPC_TEE(&buf_a,&buf_b) : list of buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG)) | ||
102 | */ | 91 | */ |
103 | 92 | ||
104 | #ifndef TIPC_OUTPUT | 93 | #ifndef TIPC_OUTPUT |
105 | #define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG) | 94 | #define TIPC_OUTPUT TIPC_LOG |
106 | #endif | ||
107 | |||
108 | #ifndef DBG_OUTPUT | ||
109 | #define DBG_OUTPUT TIPC_NULL | ||
110 | #endif | 95 | #endif |
111 | 96 | ||
112 | #else | ||
113 | |||
114 | /* | 97 | /* |
115 | * TIPC debug support not included: | 98 | * TIPC can be configured to send system messages to TIPC_OUTPUT |
116 | * - system messages are printed to system console | 99 | * or to the system console only. |
117 | * - debug messages are not printed | ||
118 | */ | 100 | */ |
119 | 101 | ||
102 | #ifdef CONFIG_TIPC_DEBUG | ||
103 | |||
104 | #define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \ | ||
105 | KERN_ERR "TIPC: " fmt, ## arg) | ||
106 | #define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \ | ||
107 | KERN_WARNING "TIPC: " fmt, ## arg) | ||
108 | #define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \ | ||
109 | KERN_NOTICE "TIPC: " fmt, ## arg) | ||
110 | |||
111 | #else | ||
112 | |||
120 | #define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg) | 113 | #define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg) |
121 | #define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg) | 114 | #define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg) |
122 | #define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg) | 115 | #define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg) |
123 | 116 | ||
124 | #define dbg(fmt, arg...) do {} while (0) | 117 | #endif |
125 | #define msg_dbg(msg,txt) do {} while (0) | ||
126 | #define dump(fmt,arg...) do {} while (0) | ||
127 | 118 | ||
119 | /* | ||
120 | * DBG_OUTPUT is the destination print buffer for debug messages. | ||
121 | * It defaults to the the null print buffer, but can be redefined | ||
122 | * (typically in the individual .c files being debugged) to allow | ||
123 | * selected debug messages to be generated where needed. | ||
124 | */ | ||
125 | |||
126 | #ifndef DBG_OUTPUT | ||
127 | #define DBG_OUTPUT TIPC_NULL | ||
128 | #endif | ||
128 | 129 | ||
129 | /* | 130 | /* |
130 | * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is | 131 | * TIPC can be configured to send debug messages to the specified print buffer |
131 | * the null print buffer. Thes ensures that any system or debug messages | 132 | * (typically DBG_OUTPUT) or to suppress them entirely. |
132 | * that are generated without using the above macros are handled correctly. | ||
133 | */ | 133 | */ |
134 | 134 | ||
135 | #undef TIPC_OUTPUT | 135 | #ifdef CONFIG_TIPC_DEBUG |
136 | #define TIPC_OUTPUT TIPC_CONS | ||
137 | 136 | ||
138 | #undef DBG_OUTPUT | 137 | #define dbg(fmt, arg...) \ |
139 | #define DBG_OUTPUT TIPC_NULL | 138 | do { \ |
139 | if (DBG_OUTPUT != TIPC_NULL) \ | ||
140 | tipc_printf(DBG_OUTPUT, fmt, ## arg); \ | ||
141 | } while (0) | ||
142 | #define msg_dbg(msg, txt) \ | ||
143 | do { \ | ||
144 | if (DBG_OUTPUT != TIPC_NULL) \ | ||
145 | tipc_msg_dbg(DBG_OUTPUT, msg, txt); \ | ||
146 | } while (0) | ||
147 | #define dump(fmt, arg...) \ | ||
148 | do { \ | ||
149 | if (DBG_OUTPUT != TIPC_NULL) \ | ||
150 | tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \ | ||
151 | } while (0) | ||
152 | |||
153 | void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *); | ||
154 | void tipc_dump_dbg(struct print_buf *, const char *fmt, ...); | ||
155 | |||
156 | #else | ||
157 | |||
158 | #define dbg(fmt, arg...) do {} while (0) | ||
159 | #define msg_dbg(msg, txt) do {} while (0) | ||
160 | #define dump(fmt, arg...) do {} while (0) | ||
161 | |||
162 | #define tipc_msg_dbg(...) do {} while (0) | ||
163 | #define tipc_dump_dbg(...) do {} while (0) | ||
140 | 164 | ||
141 | #endif | 165 | #endif |
142 | 166 | ||
@@ -178,7 +202,7 @@ extern atomic_t tipc_user_count; | |||
178 | 202 | ||
179 | extern int tipc_core_start(void); | 203 | extern int tipc_core_start(void); |
180 | extern void tipc_core_stop(void); | 204 | extern void tipc_core_stop(void); |
181 | extern int tipc_core_start_net(void); | 205 | extern int tipc_core_start_net(unsigned long addr); |
182 | extern void tipc_core_stop_net(void); | 206 | extern void tipc_core_stop_net(void); |
183 | extern int tipc_handler_start(void); | 207 | extern int tipc_handler_start(void); |
184 | extern void tipc_handler_stop(void); | 208 | extern void tipc_handler_stop(void); |
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c index e809d2a2ce06..29ecae851668 100644 --- a/net/tipc/dbg.c +++ b/net/tipc/dbg.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/dbg.c: TIPC print buffer routines for debugging | 2 | * net/tipc/dbg.c: TIPC print buffer routines for debugging |
3 | * | 3 | * |
4 | * Copyright (c) 1996-2006, Ericsson AB | 4 | * Copyright (c) 1996-2006, Ericsson AB |
5 | * Copyright (c) 2005-2006, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -38,17 +38,43 @@ | |||
38 | #include "config.h" | 38 | #include "config.h" |
39 | #include "dbg.h" | 39 | #include "dbg.h" |
40 | 40 | ||
41 | static char print_string[TIPC_PB_MAX_STR]; | 41 | /* |
42 | static DEFINE_SPINLOCK(print_lock); | 42 | * TIPC pre-defines the following print buffers: |
43 | * | ||
44 | * TIPC_NULL : null buffer (i.e. print nowhere) | ||
45 | * TIPC_CONS : system console | ||
46 | * TIPC_LOG : TIPC log buffer | ||
47 | * | ||
48 | * Additional user-defined print buffers are also permitted. | ||
49 | */ | ||
43 | 50 | ||
44 | static struct print_buf null_buf = { NULL, 0, NULL, NULL }; | 51 | static struct print_buf null_buf = { NULL, 0, NULL, 0 }; |
45 | struct print_buf *TIPC_NULL = &null_buf; | 52 | struct print_buf *const TIPC_NULL = &null_buf; |
46 | 53 | ||
47 | static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; | 54 | static struct print_buf cons_buf = { NULL, 0, NULL, 1 }; |
48 | struct print_buf *TIPC_CONS = &cons_buf; | 55 | struct print_buf *const TIPC_CONS = &cons_buf; |
49 | 56 | ||
50 | static struct print_buf log_buf = { NULL, 0, NULL, NULL }; | 57 | static struct print_buf log_buf = { NULL, 0, NULL, 1 }; |
51 | struct print_buf *TIPC_LOG = &log_buf; | 58 | struct print_buf *const TIPC_LOG = &log_buf; |
59 | |||
60 | /* | ||
61 | * Locking policy when using print buffers. | ||
62 | * | ||
63 | * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to | ||
64 | * 'print_string' when writing to a print buffer. This also protects against | ||
65 | * concurrent writes to the print buffer being written to. | ||
66 | * | ||
67 | * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned | ||
68 | * use of 'print_lock' to protect against all types of concurrent operations | ||
69 | * on their associated print buffer (not just write operations). | ||
70 | * | ||
71 | * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely | ||
72 | * on the caller to prevent simultaneous use of the print buffer(s) being | ||
73 | * manipulated. | ||
74 | */ | ||
75 | |||
76 | static char print_string[TIPC_PB_MAX_STR]; | ||
77 | static DEFINE_SPINLOCK(print_lock); | ||
52 | 78 | ||
53 | 79 | ||
54 | #define FORMAT(PTR,LEN,FMT) \ | 80 | #define FORMAT(PTR,LEN,FMT) \ |
@@ -60,27 +86,14 @@ struct print_buf *TIPC_LOG = &log_buf; | |||
60 | *(PTR + LEN) = '\0';\ | 86 | *(PTR + LEN) = '\0';\ |
61 | } | 87 | } |
62 | 88 | ||
63 | /* | ||
64 | * Locking policy when using print buffers. | ||
65 | * | ||
66 | * The following routines use 'print_lock' for protection: | ||
67 | * 1) tipc_printf() - to protect its print buffer(s) and 'print_string' | ||
68 | * 2) TIPC_TEE() - to protect its print buffer(s) | ||
69 | * 3) tipc_dump() - to protect its print buffer(s) and 'print_string' | ||
70 | * 4) tipc_log_XXX() - to protect TIPC_LOG | ||
71 | * | ||
72 | * All routines of the form tipc_printbuf_XXX() rely on the caller to prevent | ||
73 | * simultaneous use of the print buffer(s) being manipulated. | ||
74 | */ | ||
75 | |||
76 | /** | 89 | /** |
77 | * tipc_printbuf_init - initialize print buffer to empty | 90 | * tipc_printbuf_init - initialize print buffer to empty |
78 | * @pb: pointer to print buffer structure | 91 | * @pb: pointer to print buffer structure |
79 | * @raw: pointer to character array used by print buffer | 92 | * @raw: pointer to character array used by print buffer |
80 | * @size: size of character array | 93 | * @size: size of character array |
81 | * | 94 | * |
82 | * Makes the print buffer a null device that discards anything written to it | 95 | * Note: If the character array is too small (or absent), the print buffer |
83 | * if the character array is too small (or absent). | 96 | * becomes a null device that discards anything written to it. |
84 | */ | 97 | */ |
85 | 98 | ||
86 | void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) | 99 | void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) |
@@ -88,13 +101,13 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) | |||
88 | pb->buf = raw; | 101 | pb->buf = raw; |
89 | pb->crs = raw; | 102 | pb->crs = raw; |
90 | pb->size = size; | 103 | pb->size = size; |
91 | pb->next = NULL; | 104 | pb->echo = 0; |
92 | 105 | ||
93 | if (size < TIPC_PB_MIN_SIZE) { | 106 | if (size < TIPC_PB_MIN_SIZE) { |
94 | pb->buf = NULL; | 107 | pb->buf = NULL; |
95 | } else if (raw) { | 108 | } else if (raw) { |
96 | pb->buf[0] = 0; | 109 | pb->buf[0] = 0; |
97 | pb->buf[size-1] = ~0; | 110 | pb->buf[size - 1] = ~0; |
98 | } | 111 | } |
99 | } | 112 | } |
100 | 113 | ||
@@ -105,7 +118,11 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) | |||
105 | 118 | ||
106 | void tipc_printbuf_reset(struct print_buf *pb) | 119 | void tipc_printbuf_reset(struct print_buf *pb) |
107 | { | 120 | { |
108 | tipc_printbuf_init(pb, pb->buf, pb->size); | 121 | if (pb->buf) { |
122 | pb->crs = pb->buf; | ||
123 | pb->buf[0] = 0; | ||
124 | pb->buf[pb->size - 1] = ~0; | ||
125 | } | ||
109 | } | 126 | } |
110 | 127 | ||
111 | /** | 128 | /** |
@@ -141,7 +158,7 @@ int tipc_printbuf_validate(struct print_buf *pb) | |||
141 | 158 | ||
142 | if (pb->buf[pb->size - 1] == 0) { | 159 | if (pb->buf[pb->size - 1] == 0) { |
143 | cp_buf = kmalloc(pb->size, GFP_ATOMIC); | 160 | cp_buf = kmalloc(pb->size, GFP_ATOMIC); |
144 | if (cp_buf != NULL){ | 161 | if (cp_buf) { |
145 | tipc_printbuf_init(&cb, cp_buf, pb->size); | 162 | tipc_printbuf_init(&cb, cp_buf, pb->size); |
146 | tipc_printbuf_move(&cb, pb); | 163 | tipc_printbuf_move(&cb, pb); |
147 | tipc_printbuf_move(pb, &cb); | 164 | tipc_printbuf_move(pb, &cb); |
@@ -179,15 +196,16 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) | |||
179 | } | 196 | } |
180 | 197 | ||
181 | if (pb_to->size < pb_from->size) { | 198 | if (pb_to->size < pb_from->size) { |
182 | tipc_printbuf_reset(pb_to); | 199 | strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***"); |
183 | tipc_printf(pb_to, "*** PRINT BUFFER MOVE ERROR ***"); | 200 | pb_to->buf[pb_to->size - 1] = ~0; |
201 | pb_to->crs = strchr(pb_to->buf, 0); | ||
184 | return; | 202 | return; |
185 | } | 203 | } |
186 | 204 | ||
187 | /* Copy data from char after cursor to end (if used) */ | 205 | /* Copy data from char after cursor to end (if used) */ |
188 | 206 | ||
189 | len = pb_from->buf + pb_from->size - pb_from->crs - 2; | 207 | len = pb_from->buf + pb_from->size - pb_from->crs - 2; |
190 | if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) { | 208 | if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) { |
191 | strcpy(pb_to->buf, pb_from->crs + 1); | 209 | strcpy(pb_to->buf, pb_from->crs + 1); |
192 | pb_to->crs = pb_to->buf + len; | 210 | pb_to->crs = pb_to->buf + len; |
193 | } else | 211 | } else |
@@ -203,8 +221,8 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) | |||
203 | } | 221 | } |
204 | 222 | ||
205 | /** | 223 | /** |
206 | * tipc_printf - append formatted output to print buffer chain | 224 | * tipc_printf - append formatted output to print buffer |
207 | * @pb: pointer to chain of print buffers (may be NULL) | 225 | * @pb: pointer to print buffer |
208 | * @fmt: formatted info to be printed | 226 | * @fmt: formatted info to be printed |
209 | */ | 227 | */ |
210 | 228 | ||
@@ -213,68 +231,40 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...) | |||
213 | int chars_to_add; | 231 | int chars_to_add; |
214 | int chars_left; | 232 | int chars_left; |
215 | char save_char; | 233 | char save_char; |
216 | struct print_buf *pb_next; | ||
217 | 234 | ||
218 | spin_lock_bh(&print_lock); | 235 | spin_lock_bh(&print_lock); |
236 | |||
219 | FORMAT(print_string, chars_to_add, fmt); | 237 | FORMAT(print_string, chars_to_add, fmt); |
220 | if (chars_to_add >= TIPC_PB_MAX_STR) | 238 | if (chars_to_add >= TIPC_PB_MAX_STR) |
221 | strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***"); | 239 | strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***"); |
222 | 240 | ||
223 | while (pb) { | 241 | if (pb->buf) { |
224 | if (pb == TIPC_CONS) | 242 | chars_left = pb->buf + pb->size - pb->crs - 1; |
225 | printk(print_string); | 243 | if (chars_to_add <= chars_left) { |
226 | else if (pb->buf) { | 244 | strcpy(pb->crs, print_string); |
227 | chars_left = pb->buf + pb->size - pb->crs - 1; | 245 | pb->crs += chars_to_add; |
228 | if (chars_to_add <= chars_left) { | 246 | } else if (chars_to_add >= (pb->size - 1)) { |
229 | strcpy(pb->crs, print_string); | 247 | strcpy(pb->buf, print_string + chars_to_add + 1 |
230 | pb->crs += chars_to_add; | 248 | - pb->size); |
231 | } else if (chars_to_add >= (pb->size - 1)) { | 249 | pb->crs = pb->buf + pb->size - 1; |
232 | strcpy(pb->buf, print_string + chars_to_add + 1 | 250 | } else { |
233 | - pb->size); | 251 | strcpy(pb->buf, print_string + chars_left); |
234 | pb->crs = pb->buf + pb->size - 1; | 252 | save_char = print_string[chars_left]; |
235 | } else { | 253 | print_string[chars_left] = 0; |
236 | strcpy(pb->buf, print_string + chars_left); | 254 | strcpy(pb->crs, print_string); |
237 | save_char = print_string[chars_left]; | 255 | print_string[chars_left] = save_char; |
238 | print_string[chars_left] = 0; | 256 | pb->crs = pb->buf + chars_to_add - chars_left; |
239 | strcpy(pb->crs, print_string); | ||
240 | print_string[chars_left] = save_char; | ||
241 | pb->crs = pb->buf + chars_to_add - chars_left; | ||
242 | } | ||
243 | } | 257 | } |
244 | pb_next = pb->next; | ||
245 | pb->next = NULL; | ||
246 | pb = pb_next; | ||
247 | } | 258 | } |
248 | spin_unlock_bh(&print_lock); | ||
249 | } | ||
250 | 259 | ||
251 | /** | 260 | if (pb->echo) |
252 | * TIPC_TEE - perform next output operation on both print buffers | 261 | printk(print_string); |
253 | * @b0: pointer to chain of print buffers (may be NULL) | ||
254 | * @b1: pointer to print buffer to add to chain | ||
255 | * | ||
256 | * Returns pointer to print buffer chain. | ||
257 | */ | ||
258 | 262 | ||
259 | struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1) | ||
260 | { | ||
261 | struct print_buf *pb = b0; | ||
262 | |||
263 | if (!b0 || (b0 == b1)) | ||
264 | return b1; | ||
265 | |||
266 | spin_lock_bh(&print_lock); | ||
267 | while (pb->next) { | ||
268 | if ((pb->next == b1) || (pb->next == b0)) | ||
269 | pb->next = pb->next->next; | ||
270 | else | ||
271 | pb = pb->next; | ||
272 | } | ||
273 | pb->next = b1; | ||
274 | spin_unlock_bh(&print_lock); | 263 | spin_unlock_bh(&print_lock); |
275 | return b0; | ||
276 | } | 264 | } |
277 | 265 | ||
266 | #ifdef CONFIG_TIPC_DEBUG | ||
267 | |||
278 | /** | 268 | /** |
279 | * print_to_console - write string of bytes to console in multiple chunks | 269 | * print_to_console - write string of bytes to console in multiple chunks |
280 | */ | 270 | */ |
@@ -321,72 +311,66 @@ static void printbuf_dump(struct print_buf *pb) | |||
321 | } | 311 | } |
322 | 312 | ||
323 | /** | 313 | /** |
324 | * tipc_dump - dump non-console print buffer(s) to console | 314 | * tipc_dump_dbg - dump (non-console) print buffer to console |
325 | * @pb: pointer to chain of print buffers | 315 | * @pb: pointer to print buffer |
326 | */ | 316 | */ |
327 | 317 | ||
328 | void tipc_dump(struct print_buf *pb, const char *fmt, ...) | 318 | void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...) |
329 | { | 319 | { |
330 | struct print_buf *pb_next; | ||
331 | int len; | 320 | int len; |
332 | 321 | ||
322 | if (pb == TIPC_CONS) | ||
323 | return; | ||
324 | |||
333 | spin_lock_bh(&print_lock); | 325 | spin_lock_bh(&print_lock); |
326 | |||
334 | FORMAT(print_string, len, fmt); | 327 | FORMAT(print_string, len, fmt); |
335 | printk(print_string); | 328 | printk(print_string); |
336 | 329 | ||
337 | for (; pb; pb = pb->next) { | 330 | printk("\n---- Start of %s log dump ----\n\n", |
338 | if (pb != TIPC_CONS) { | 331 | (pb == TIPC_LOG) ? "global" : "local"); |
339 | printk("\n---- Start of %s log dump ----\n\n", | 332 | printbuf_dump(pb); |
340 | (pb == TIPC_LOG) ? "global" : "local"); | 333 | tipc_printbuf_reset(pb); |
341 | printbuf_dump(pb); | 334 | printk("\n---- End of dump ----\n"); |
342 | tipc_printbuf_reset(pb); | 335 | |
343 | printk("\n---- End of dump ----\n"); | ||
344 | } | ||
345 | pb_next = pb->next; | ||
346 | pb->next = NULL; | ||
347 | pb = pb_next; | ||
348 | } | ||
349 | spin_unlock_bh(&print_lock); | 336 | spin_unlock_bh(&print_lock); |
350 | } | 337 | } |
351 | 338 | ||
339 | #endif | ||
340 | |||
352 | /** | 341 | /** |
353 | * tipc_log_stop - free up TIPC log print buffer | 342 | * tipc_log_resize - change the size of the TIPC log buffer |
343 | * @log_size: print buffer size to use | ||
354 | */ | 344 | */ |
355 | 345 | ||
356 | void tipc_log_stop(void) | 346 | int tipc_log_resize(int log_size) |
357 | { | 347 | { |
348 | int res = 0; | ||
349 | |||
358 | spin_lock_bh(&print_lock); | 350 | spin_lock_bh(&print_lock); |
359 | if (TIPC_LOG->buf) { | 351 | if (TIPC_LOG->buf) { |
360 | kfree(TIPC_LOG->buf); | 352 | kfree(TIPC_LOG->buf); |
361 | TIPC_LOG->buf = NULL; | 353 | TIPC_LOG->buf = NULL; |
362 | } | 354 | } |
363 | spin_unlock_bh(&print_lock); | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * tipc_log_reinit - (re)initialize TIPC log print buffer | ||
368 | * @log_size: print buffer size to use | ||
369 | */ | ||
370 | |||
371 | void tipc_log_reinit(int log_size) | ||
372 | { | ||
373 | tipc_log_stop(); | ||
374 | |||
375 | if (log_size) { | 355 | if (log_size) { |
376 | if (log_size < TIPC_PB_MIN_SIZE) | 356 | if (log_size < TIPC_PB_MIN_SIZE) |
377 | log_size = TIPC_PB_MIN_SIZE; | 357 | log_size = TIPC_PB_MIN_SIZE; |
378 | spin_lock_bh(&print_lock); | 358 | res = TIPC_LOG->echo; |
379 | tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), | 359 | tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), |
380 | log_size); | 360 | log_size); |
381 | spin_unlock_bh(&print_lock); | 361 | TIPC_LOG->echo = res; |
362 | res = !TIPC_LOG->buf; | ||
382 | } | 363 | } |
364 | spin_unlock_bh(&print_lock); | ||
365 | |||
366 | return res; | ||
383 | } | 367 | } |
384 | 368 | ||
385 | /** | 369 | /** |
386 | * tipc_log_resize - reconfigure size of TIPC log buffer | 370 | * tipc_log_resize_cmd - reconfigure size of TIPC log buffer |
387 | */ | 371 | */ |
388 | 372 | ||
389 | struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) | 373 | struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) |
390 | { | 374 | { |
391 | u32 value; | 375 | u32 value; |
392 | 376 | ||
@@ -397,7 +381,9 @@ struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) | |||
397 | if (value != delimit(value, 0, 32768)) | 381 | if (value != delimit(value, 0, 32768)) |
398 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 382 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
399 | " (log size must be 0-32768)"); | 383 | " (log size must be 0-32768)"); |
400 | tipc_log_reinit(value); | 384 | if (tipc_log_resize(value)) |
385 | return tipc_cfg_reply_error_string( | ||
386 | "unable to create specified log (log size is now 0)"); | ||
401 | return tipc_cfg_reply_none(); | 387 | return tipc_cfg_reply_none(); |
402 | } | 388 | } |
403 | 389 | ||
@@ -410,27 +396,32 @@ struct sk_buff *tipc_log_dump(void) | |||
410 | struct sk_buff *reply; | 396 | struct sk_buff *reply; |
411 | 397 | ||
412 | spin_lock_bh(&print_lock); | 398 | spin_lock_bh(&print_lock); |
413 | if (!TIPC_LOG->buf) | 399 | if (!TIPC_LOG->buf) { |
400 | spin_unlock_bh(&print_lock); | ||
414 | reply = tipc_cfg_reply_ultra_string("log not activated\n"); | 401 | reply = tipc_cfg_reply_ultra_string("log not activated\n"); |
415 | else if (tipc_printbuf_empty(TIPC_LOG)) | 402 | } else if (tipc_printbuf_empty(TIPC_LOG)) { |
403 | spin_unlock_bh(&print_lock); | ||
416 | reply = tipc_cfg_reply_ultra_string("log is empty\n"); | 404 | reply = tipc_cfg_reply_ultra_string("log is empty\n"); |
405 | } | ||
417 | else { | 406 | else { |
418 | struct tlv_desc *rep_tlv; | 407 | struct tlv_desc *rep_tlv; |
419 | struct print_buf pb; | 408 | struct print_buf pb; |
420 | int str_len; | 409 | int str_len; |
421 | 410 | ||
422 | str_len = min(TIPC_LOG->size, 32768u); | 411 | str_len = min(TIPC_LOG->size, 32768u); |
412 | spin_unlock_bh(&print_lock); | ||
423 | reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); | 413 | reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); |
424 | if (reply) { | 414 | if (reply) { |
425 | rep_tlv = (struct tlv_desc *)reply->data; | 415 | rep_tlv = (struct tlv_desc *)reply->data; |
426 | tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); | 416 | tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); |
417 | spin_lock_bh(&print_lock); | ||
427 | tipc_printbuf_move(&pb, TIPC_LOG); | 418 | tipc_printbuf_move(&pb, TIPC_LOG); |
419 | spin_unlock_bh(&print_lock); | ||
428 | str_len = strlen(TLV_DATA(rep_tlv)) + 1; | 420 | str_len = strlen(TLV_DATA(rep_tlv)) + 1; |
429 | skb_put(reply, TLV_SPACE(str_len)); | 421 | skb_put(reply, TLV_SPACE(str_len)); |
430 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | 422 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); |
431 | } | 423 | } |
432 | } | 424 | } |
433 | spin_unlock_bh(&print_lock); | ||
434 | return reply; | 425 | return reply; |
435 | } | 426 | } |
436 | 427 | ||
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h index c01b085000e0..5ef1bc8f64ef 100644 --- a/net/tipc/dbg.h +++ b/net/tipc/dbg.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/dbg.h: Include file for TIPC print buffer routines | 2 | * net/tipc/dbg.h: Include file for TIPC print buffer routines |
3 | * | 3 | * |
4 | * Copyright (c) 1997-2006, Ericsson AB | 4 | * Copyright (c) 1997-2006, Ericsson AB |
5 | * Copyright (c) 2005-2006, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -42,14 +42,14 @@ | |||
42 | * @buf: pointer to character array containing print buffer contents | 42 | * @buf: pointer to character array containing print buffer contents |
43 | * @size: size of character array | 43 | * @size: size of character array |
44 | * @crs: pointer to first unused space in character array (i.e. final NUL) | 44 | * @crs: pointer to first unused space in character array (i.e. final NUL) |
45 | * @next: used to link print buffers when printing to more than one at a time | 45 | * @echo: echo output to system console if non-zero |
46 | */ | 46 | */ |
47 | 47 | ||
48 | struct print_buf { | 48 | struct print_buf { |
49 | char *buf; | 49 | char *buf; |
50 | u32 size; | 50 | u32 size; |
51 | char *crs; | 51 | char *crs; |
52 | struct print_buf *next; | 52 | int echo; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */ | 55 | #define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */ |
@@ -61,10 +61,10 @@ int tipc_printbuf_empty(struct print_buf *pb); | |||
61 | int tipc_printbuf_validate(struct print_buf *pb); | 61 | int tipc_printbuf_validate(struct print_buf *pb); |
62 | void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); | 62 | void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); |
63 | 63 | ||
64 | void tipc_log_reinit(int log_size); | 64 | int tipc_log_resize(int log_size); |
65 | void tipc_log_stop(void); | ||
66 | 65 | ||
67 | struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space); | 66 | struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, |
67 | int req_tlv_space); | ||
68 | struct sk_buff *tipc_log_dump(void); | 68 | struct sk_buff *tipc_log_dump(void); |
69 | 69 | ||
70 | #endif | 70 | #endif |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 5d643e5721eb..1657f0e795ff 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -120,9 +120,8 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, | |||
120 | 120 | ||
121 | if (buf) { | 121 | if (buf) { |
122 | msg = buf_msg(buf); | 122 | msg = buf_msg(buf); |
123 | msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE, | 123 | msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain); |
124 | dest_domain); | 124 | msg_set_non_seq(msg, 1); |
125 | msg_set_non_seq(msg); | ||
126 | msg_set_req_links(msg, req_links); | 125 | msg_set_req_links(msg, req_links); |
127 | msg_set_dest_domain(msg, dest_domain); | 126 | msg_set_dest_domain(msg, dest_domain); |
128 | msg_set_bc_netid(msg, tipc_net_id); | 127 | msg_set_bc_netid(msg, tipc_net_id); |
@@ -156,11 +155,11 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr, | |||
156 | /** | 155 | /** |
157 | * tipc_disc_recv_msg - handle incoming link setup message (request or response) | 156 | * tipc_disc_recv_msg - handle incoming link setup message (request or response) |
158 | * @buf: buffer containing message | 157 | * @buf: buffer containing message |
158 | * @b_ptr: bearer that message arrived on | ||
159 | */ | 159 | */ |
160 | 160 | ||
161 | void tipc_disc_recv_msg(struct sk_buff *buf) | 161 | void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) |
162 | { | 162 | { |
163 | struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle; | ||
164 | struct link *link; | 163 | struct link *link; |
165 | struct tipc_media_addr media_addr; | 164 | struct tipc_media_addr media_addr; |
166 | struct tipc_msg *msg = buf_msg(buf); | 165 | struct tipc_msg *msg = buf_msg(buf); |
@@ -200,9 +199,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf) | |||
200 | dbg(" in own cluster\n"); | 199 | dbg(" in own cluster\n"); |
201 | if (n_ptr == NULL) { | 200 | if (n_ptr == NULL) { |
202 | n_ptr = tipc_node_create(orig); | 201 | n_ptr = tipc_node_create(orig); |
203 | } | 202 | if (!n_ptr) |
204 | if (n_ptr == NULL) { | 203 | return; |
205 | return; | ||
206 | } | 204 | } |
207 | spin_lock_bh(&n_ptr->lock); | 205 | spin_lock_bh(&n_ptr->lock); |
208 | link = n_ptr->links[b_ptr->identity]; | 206 | link = n_ptr->links[b_ptr->identity]; |
diff --git a/net/tipc/discover.h b/net/tipc/discover.h index 9fd7587b143a..c36eaeb7d5d0 100644 --- a/net/tipc/discover.h +++ b/net/tipc/discover.h | |||
@@ -48,7 +48,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, | |||
48 | void tipc_disc_update_link_req(struct link_req *req); | 48 | void tipc_disc_update_link_req(struct link_req *req); |
49 | void tipc_disc_stop_link_req(struct link_req *req); | 49 | void tipc_disc_stop_link_req(struct link_req *req); |
50 | 50 | ||
51 | void tipc_disc_recv_msg(struct sk_buff *buf); | 51 | void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr); |
52 | 52 | ||
53 | void tipc_disc_link_event(u32 addr, char *name, int up); | 53 | void tipc_disc_link_event(u32 addr, char *name, int up); |
54 | #if 0 | 54 | #if 0 |
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 9cd35eec3e7f..fe43ef7dd7e3 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c | |||
@@ -82,7 +82,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, | |||
82 | dev->dev_addr, clone->len); | 82 | dev->dev_addr, clone->len); |
83 | dev_queue_xmit(clone); | 83 | dev_queue_xmit(clone); |
84 | } | 84 | } |
85 | return TIPC_OK; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
88 | /** | 88 | /** |
@@ -101,7 +101,7 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, | |||
101 | struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; | 101 | struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; |
102 | u32 size; | 102 | u32 size; |
103 | 103 | ||
104 | if (dev_net(dev) != &init_net) { | 104 | if (!net_eq(dev_net(dev), &init_net)) { |
105 | kfree_skb(buf); | 105 | kfree_skb(buf); |
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
@@ -113,12 +113,12 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, | |||
113 | if (likely(buf->len == size)) { | 113 | if (likely(buf->len == size)) { |
114 | buf->next = NULL; | 114 | buf->next = NULL; |
115 | tipc_recv_msg(buf, eb_ptr->bearer); | 115 | tipc_recv_msg(buf, eb_ptr->bearer); |
116 | return TIPC_OK; | 116 | return 0; |
117 | } | 117 | } |
118 | } | 118 | } |
119 | } | 119 | } |
120 | kfree_skb(buf); | 120 | kfree_skb(buf); |
121 | return TIPC_OK; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | /** | 124 | /** |
@@ -198,7 +198,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, | |||
198 | struct eth_bearer *eb_ptr = ð_bearers[0]; | 198 | struct eth_bearer *eb_ptr = ð_bearers[0]; |
199 | struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; | 199 | struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; |
200 | 200 | ||
201 | if (dev_net(dev) != &init_net) | 201 | if (!net_eq(dev_net(dev), &init_net)) |
202 | return NOTIFY_DONE; | 202 | return NOTIFY_DONE; |
203 | 203 | ||
204 | while ((eb_ptr->dev != dev)) { | 204 | while ((eb_ptr->dev != dev)) { |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 2a26a16e269f..d60113ba4b1b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -51,6 +51,12 @@ | |||
51 | 51 | ||
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Out-of-range value for link session numbers | ||
55 | */ | ||
56 | |||
57 | #define INVALID_SESSION 0x10000 | ||
58 | |||
59 | /* | ||
54 | * Limit for deferred reception queue: | 60 | * Limit for deferred reception queue: |
55 | */ | 61 | */ |
56 | 62 | ||
@@ -147,9 +153,21 @@ static void link_print(struct link *l_ptr, struct print_buf *buf, | |||
147 | 153 | ||
148 | #define LINK_LOG_BUF_SIZE 0 | 154 | #define LINK_LOG_BUF_SIZE 0 |
149 | 155 | ||
150 | #define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) | 156 | #define dbg_link(fmt, arg...) \ |
151 | #define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0) | 157 | do { \ |
152 | #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) | 158 | if (LINK_LOG_BUF_SIZE) \ |
159 | tipc_printf(&l_ptr->print_buf, fmt, ## arg); \ | ||
160 | } while (0) | ||
161 | #define dbg_link_msg(msg, txt) \ | ||
162 | do { \ | ||
163 | if (LINK_LOG_BUF_SIZE) \ | ||
164 | tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \ | ||
165 | } while (0) | ||
166 | #define dbg_link_state(txt) \ | ||
167 | do { \ | ||
168 | if (LINK_LOG_BUF_SIZE) \ | ||
169 | link_print(l_ptr, &l_ptr->print_buf, txt); \ | ||
170 | } while (0) | ||
153 | #define dbg_link_dump() do { \ | 171 | #define dbg_link_dump() do { \ |
154 | if (LINK_LOG_BUF_SIZE) { \ | 172 | if (LINK_LOG_BUF_SIZE) { \ |
155 | tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ | 173 | tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ |
@@ -450,9 +468,9 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer, | |||
450 | 468 | ||
451 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; | 469 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; |
452 | msg = l_ptr->pmsg; | 470 | msg = l_ptr->pmsg; |
453 | msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); | 471 | msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); |
454 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); | 472 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); |
455 | msg_set_session(msg, tipc_random); | 473 | msg_set_session(msg, (tipc_random & 0xffff)); |
456 | msg_set_bearer_id(msg, b_ptr->identity); | 474 | msg_set_bearer_id(msg, b_ptr->identity); |
457 | strcpy((char *)msg_data(msg), if_name); | 475 | strcpy((char *)msg_data(msg), if_name); |
458 | 476 | ||
@@ -693,10 +711,10 @@ void tipc_link_reset(struct link *l_ptr) | |||
693 | u32 checkpoint = l_ptr->next_in_no; | 711 | u32 checkpoint = l_ptr->next_in_no; |
694 | int was_active_link = tipc_link_is_active(l_ptr); | 712 | int was_active_link = tipc_link_is_active(l_ptr); |
695 | 713 | ||
696 | msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); | 714 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); |
697 | 715 | ||
698 | /* Link is down, accept any session: */ | 716 | /* Link is down, accept any session */ |
699 | l_ptr->peer_session = 0; | 717 | l_ptr->peer_session = INVALID_SESSION; |
700 | 718 | ||
701 | /* Prepare for max packet size negotiation */ | 719 | /* Prepare for max packet size negotiation */ |
702 | link_init_max_pkt(l_ptr); | 720 | link_init_max_pkt(l_ptr); |
@@ -1110,7 +1128,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) | |||
1110 | 1128 | ||
1111 | if (bundler) { | 1129 | if (bundler) { |
1112 | msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, | 1130 | msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, |
1113 | TIPC_OK, INT_H_SIZE, l_ptr->addr); | 1131 | INT_H_SIZE, l_ptr->addr); |
1114 | skb_copy_to_linear_data(bundler, &bundler_hdr, | 1132 | skb_copy_to_linear_data(bundler, &bundler_hdr, |
1115 | INT_H_SIZE); | 1133 | INT_H_SIZE); |
1116 | skb_trim(bundler, INT_H_SIZE); | 1134 | skb_trim(bundler, INT_H_SIZE); |
@@ -1374,7 +1392,7 @@ again: | |||
1374 | 1392 | ||
1375 | msg_dbg(hdr, ">FRAGMENTING>"); | 1393 | msg_dbg(hdr, ">FRAGMENTING>"); |
1376 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 1394 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, |
1377 | TIPC_OK, INT_H_SIZE, msg_destnode(hdr)); | 1395 | INT_H_SIZE, msg_destnode(hdr)); |
1378 | msg_set_link_selector(&fragm_hdr, sender->publ.ref); | 1396 | msg_set_link_selector(&fragm_hdr, sender->publ.ref); |
1379 | msg_set_size(&fragm_hdr, max_pkt); | 1397 | msg_set_size(&fragm_hdr, max_pkt); |
1380 | msg_set_fragm_no(&fragm_hdr, 1); | 1398 | msg_set_fragm_no(&fragm_hdr, 1); |
@@ -1543,7 +1561,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1543 | l_ptr->retransm_queue_head = mod(++r_q_head); | 1561 | l_ptr->retransm_queue_head = mod(++r_q_head); |
1544 | l_ptr->retransm_queue_size = --r_q_size; | 1562 | l_ptr->retransm_queue_size = --r_q_size; |
1545 | l_ptr->stats.retransmitted++; | 1563 | l_ptr->stats.retransmitted++; |
1546 | return TIPC_OK; | 1564 | return 0; |
1547 | } else { | 1565 | } else { |
1548 | l_ptr->stats.bearer_congs++; | 1566 | l_ptr->stats.bearer_congs++; |
1549 | msg_dbg(buf_msg(buf), "|>DEF-RETR>"); | 1567 | msg_dbg(buf_msg(buf), "|>DEF-RETR>"); |
@@ -1562,7 +1580,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1562 | l_ptr->unacked_window = 0; | 1580 | l_ptr->unacked_window = 0; |
1563 | buf_discard(buf); | 1581 | buf_discard(buf); |
1564 | l_ptr->proto_msg_queue = NULL; | 1582 | l_ptr->proto_msg_queue = NULL; |
1565 | return TIPC_OK; | 1583 | return 0; |
1566 | } else { | 1584 | } else { |
1567 | msg_dbg(buf_msg(buf), "|>DEF-PROT>"); | 1585 | msg_dbg(buf_msg(buf), "|>DEF-PROT>"); |
1568 | l_ptr->stats.bearer_congs++; | 1586 | l_ptr->stats.bearer_congs++; |
@@ -1586,7 +1604,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1586 | msg_set_type(msg, CLOSED_MSG); | 1604 | msg_set_type(msg, CLOSED_MSG); |
1587 | msg_dbg(msg, ">PUSH-DATA>"); | 1605 | msg_dbg(msg, ">PUSH-DATA>"); |
1588 | l_ptr->next_out = buf->next; | 1606 | l_ptr->next_out = buf->next; |
1589 | return TIPC_OK; | 1607 | return 0; |
1590 | } else { | 1608 | } else { |
1591 | msg_dbg(msg, "|PUSH-DATA|"); | 1609 | msg_dbg(msg, "|PUSH-DATA|"); |
1592 | l_ptr->stats.bearer_congs++; | 1610 | l_ptr->stats.bearer_congs++; |
@@ -1610,8 +1628,8 @@ void tipc_link_push_queue(struct link *l_ptr) | |||
1610 | 1628 | ||
1611 | do { | 1629 | do { |
1612 | res = tipc_link_push_packet(l_ptr); | 1630 | res = tipc_link_push_packet(l_ptr); |
1613 | } | 1631 | } while (!res); |
1614 | while (res == TIPC_OK); | 1632 | |
1615 | if (res == PUSH_FAILED) | 1633 | if (res == PUSH_FAILED) |
1616 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | 1634 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); |
1617 | } | 1635 | } |
@@ -1651,7 +1669,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) | |||
1651 | struct tipc_msg *msg = buf_msg(buf); | 1669 | struct tipc_msg *msg = buf_msg(buf); |
1652 | 1670 | ||
1653 | warn("Retransmission failure on link <%s>\n", l_ptr->name); | 1671 | warn("Retransmission failure on link <%s>\n", l_ptr->name); |
1654 | tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>"); | 1672 | tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>"); |
1655 | 1673 | ||
1656 | if (l_ptr->addr) { | 1674 | if (l_ptr->addr) { |
1657 | 1675 | ||
@@ -1748,21 +1766,6 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1748 | l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; | 1766 | l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; |
1749 | } | 1767 | } |
1750 | 1768 | ||
1751 | /* | ||
1752 | * link_recv_non_seq: Receive packets which are outside | ||
1753 | * the link sequence flow | ||
1754 | */ | ||
1755 | |||
1756 | static void link_recv_non_seq(struct sk_buff *buf) | ||
1757 | { | ||
1758 | struct tipc_msg *msg = buf_msg(buf); | ||
1759 | |||
1760 | if (msg_user(msg) == LINK_CONFIG) | ||
1761 | tipc_disc_recv_msg(buf); | ||
1762 | else | ||
1763 | tipc_bclink_recv_pkt(buf); | ||
1764 | } | ||
1765 | |||
1766 | /** | 1769 | /** |
1767 | * link_insert_deferred_queue - insert deferred messages back into receive chain | 1770 | * link_insert_deferred_queue - insert deferred messages back into receive chain |
1768 | */ | 1771 | */ |
@@ -1839,7 +1842,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) | |||
1839 | { | 1842 | { |
1840 | read_lock_bh(&tipc_net_lock); | 1843 | read_lock_bh(&tipc_net_lock); |
1841 | while (head) { | 1844 | while (head) { |
1842 | struct bearer *b_ptr; | 1845 | struct bearer *b_ptr = (struct bearer *)tb_ptr; |
1843 | struct node *n_ptr; | 1846 | struct node *n_ptr; |
1844 | struct link *l_ptr; | 1847 | struct link *l_ptr; |
1845 | struct sk_buff *crs; | 1848 | struct sk_buff *crs; |
@@ -1850,9 +1853,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) | |||
1850 | u32 released = 0; | 1853 | u32 released = 0; |
1851 | int type; | 1854 | int type; |
1852 | 1855 | ||
1853 | b_ptr = (struct bearer *)tb_ptr; | ||
1854 | TIPC_SKB_CB(buf)->handle = b_ptr; | ||
1855 | |||
1856 | head = head->next; | 1856 | head = head->next; |
1857 | 1857 | ||
1858 | /* Ensure message is well-formed */ | 1858 | /* Ensure message is well-formed */ |
@@ -1871,7 +1871,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) | |||
1871 | msg = buf_msg(buf); | 1871 | msg = buf_msg(buf); |
1872 | 1872 | ||
1873 | if (unlikely(msg_non_seq(msg))) { | 1873 | if (unlikely(msg_non_seq(msg))) { |
1874 | link_recv_non_seq(buf); | 1874 | if (msg_user(msg) == LINK_CONFIG) |
1875 | tipc_disc_recv_msg(buf, b_ptr); | ||
1876 | else | ||
1877 | tipc_bclink_recv_pkt(buf); | ||
1875 | continue; | 1878 | continue; |
1876 | } | 1879 | } |
1877 | 1880 | ||
@@ -1978,8 +1981,6 @@ deliver: | |||
1978 | if (link_recv_changeover_msg(&l_ptr, &buf)) { | 1981 | if (link_recv_changeover_msg(&l_ptr, &buf)) { |
1979 | msg = buf_msg(buf); | 1982 | msg = buf_msg(buf); |
1980 | seq_no = msg_seqno(msg); | 1983 | seq_no = msg_seqno(msg); |
1981 | TIPC_SKB_CB(buf)->handle | ||
1982 | = b_ptr; | ||
1983 | if (type == ORIGINAL_MSG) | 1984 | if (type == ORIGINAL_MSG) |
1984 | goto deliver; | 1985 | goto deliver; |
1985 | goto protocol_check; | 1986 | goto protocol_check; |
@@ -2263,7 +2264,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf) | |||
2263 | switch (msg_type(msg)) { | 2264 | switch (msg_type(msg)) { |
2264 | 2265 | ||
2265 | case RESET_MSG: | 2266 | case RESET_MSG: |
2266 | if (!link_working_unknown(l_ptr) && l_ptr->peer_session) { | 2267 | if (!link_working_unknown(l_ptr) && |
2268 | (l_ptr->peer_session != INVALID_SESSION)) { | ||
2267 | if (msg_session(msg) == l_ptr->peer_session) { | 2269 | if (msg_session(msg) == l_ptr->peer_session) { |
2268 | dbg("Duplicate RESET: %u<->%u\n", | 2270 | dbg("Duplicate RESET: %u<->%u\n", |
2269 | msg_session(msg), l_ptr->peer_session); | 2271 | msg_session(msg), l_ptr->peer_session); |
@@ -2424,7 +2426,7 @@ void tipc_link_changeover(struct link *l_ptr) | |||
2424 | } | 2426 | } |
2425 | 2427 | ||
2426 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 2428 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, |
2427 | ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); | 2429 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); |
2428 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 2430 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
2429 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 2431 | msg_set_msgcnt(&tunnel_hdr, msgcount); |
2430 | dbg("Link changeover requires %u tunnel messages\n", msgcount); | 2432 | dbg("Link changeover requires %u tunnel messages\n", msgcount); |
@@ -2479,7 +2481,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) | |||
2479 | struct tipc_msg tunnel_hdr; | 2481 | struct tipc_msg tunnel_hdr; |
2480 | 2482 | ||
2481 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 2483 | msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, |
2482 | DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); | 2484 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); |
2483 | msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); | 2485 | msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); |
2484 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 2486 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
2485 | iter = l_ptr->first_out; | 2487 | iter = l_ptr->first_out; |
@@ -2672,10 +2674,12 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) | |||
2672 | u32 pack_sz = link_max_pkt(l_ptr); | 2674 | u32 pack_sz = link_max_pkt(l_ptr); |
2673 | u32 fragm_sz = pack_sz - INT_H_SIZE; | 2675 | u32 fragm_sz = pack_sz - INT_H_SIZE; |
2674 | u32 fragm_no = 1; | 2676 | u32 fragm_no = 1; |
2675 | u32 destaddr = msg_destnode(inmsg); | 2677 | u32 destaddr; |
2676 | 2678 | ||
2677 | if (msg_short(inmsg)) | 2679 | if (msg_short(inmsg)) |
2678 | destaddr = l_ptr->addr; | 2680 | destaddr = l_ptr->addr; |
2681 | else | ||
2682 | destaddr = msg_destnode(inmsg); | ||
2679 | 2683 | ||
2680 | if (msg_routed(inmsg)) | 2684 | if (msg_routed(inmsg)) |
2681 | msg_set_prevnode(inmsg, tipc_own_addr); | 2685 | msg_set_prevnode(inmsg, tipc_own_addr); |
@@ -2683,7 +2687,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) | |||
2683 | /* Prepare reusable fragment header: */ | 2687 | /* Prepare reusable fragment header: */ |
2684 | 2688 | ||
2685 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 2689 | msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, |
2686 | TIPC_OK, INT_H_SIZE, destaddr); | 2690 | INT_H_SIZE, destaddr); |
2687 | msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); | 2691 | msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); |
2688 | msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); | 2692 | msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); |
2689 | msg_set_fragm_no(&fragm_hdr, fragm_no); | 2693 | msg_set_fragm_no(&fragm_hdr, fragm_no); |
@@ -2994,7 +2998,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space | |||
2994 | link_set_supervision_props(l_ptr, new_value); | 2998 | link_set_supervision_props(l_ptr, new_value); |
2995 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, | 2999 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, |
2996 | 0, 0, new_value, 0, 0); | 3000 | 0, 0, new_value, 0, 0); |
2997 | res = TIPC_OK; | 3001 | res = 0; |
2998 | } | 3002 | } |
2999 | break; | 3003 | break; |
3000 | case TIPC_CMD_SET_LINK_PRI: | 3004 | case TIPC_CMD_SET_LINK_PRI: |
@@ -3003,14 +3007,14 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space | |||
3003 | l_ptr->priority = new_value; | 3007 | l_ptr->priority = new_value; |
3004 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, | 3008 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, |
3005 | 0, 0, 0, new_value, 0); | 3009 | 0, 0, 0, new_value, 0); |
3006 | res = TIPC_OK; | 3010 | res = 0; |
3007 | } | 3011 | } |
3008 | break; | 3012 | break; |
3009 | case TIPC_CMD_SET_LINK_WINDOW: | 3013 | case TIPC_CMD_SET_LINK_WINDOW: |
3010 | if ((new_value >= TIPC_MIN_LINK_WIN) && | 3014 | if ((new_value >= TIPC_MIN_LINK_WIN) && |
3011 | (new_value <= TIPC_MAX_LINK_WIN)) { | 3015 | (new_value <= TIPC_MAX_LINK_WIN)) { |
3012 | tipc_link_set_queue_limits(l_ptr, new_value); | 3016 | tipc_link_set_queue_limits(l_ptr, new_value); |
3013 | res = TIPC_OK; | 3017 | res = 0; |
3014 | } | 3018 | } |
3015 | break; | 3019 | break; |
3016 | } | 3020 | } |
@@ -3226,7 +3230,7 @@ int link_control(const char *name, u32 op, u32 val) | |||
3226 | if (op == TIPC_CMD_UNBLOCK_LINK) { | 3230 | if (op == TIPC_CMD_UNBLOCK_LINK) { |
3227 | l_ptr->blocked = 0; | 3231 | l_ptr->blocked = 0; |
3228 | } | 3232 | } |
3229 | res = TIPC_OK; | 3233 | res = 0; |
3230 | } | 3234 | } |
3231 | tipc_node_unlock(node); | 3235 | tipc_node_unlock(node); |
3232 | } | 3236 | } |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 696a8633df75..73dcd00d674e 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -41,7 +41,9 @@ | |||
41 | #include "bearer.h" | 41 | #include "bearer.h" |
42 | 42 | ||
43 | 43 | ||
44 | void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str) | 44 | #ifdef CONFIG_TIPC_DEBUG |
45 | |||
46 | void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) | ||
45 | { | 47 | { |
46 | u32 usr = msg_user(msg); | 48 | u32 usr = msg_user(msg); |
47 | tipc_printf(buf, str); | 49 | tipc_printf(buf, str); |
@@ -228,13 +230,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str | |||
228 | 230 | ||
229 | switch (usr) { | 231 | switch (usr) { |
230 | case CONN_MANAGER: | 232 | case CONN_MANAGER: |
231 | case NAME_DISTRIBUTOR: | ||
232 | case TIPC_LOW_IMPORTANCE: | 233 | case TIPC_LOW_IMPORTANCE: |
233 | case TIPC_MEDIUM_IMPORTANCE: | 234 | case TIPC_MEDIUM_IMPORTANCE: |
234 | case TIPC_HIGH_IMPORTANCE: | 235 | case TIPC_HIGH_IMPORTANCE: |
235 | case TIPC_CRITICAL_IMPORTANCE: | 236 | case TIPC_CRITICAL_IMPORTANCE: |
236 | if (msg_short(msg)) | ||
237 | break; /* No error */ | ||
238 | switch (msg_errcode(msg)) { | 237 | switch (msg_errcode(msg)) { |
239 | case TIPC_OK: | 238 | case TIPC_OK: |
240 | break; | 239 | break; |
@@ -315,9 +314,11 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str | |||
315 | } | 314 | } |
316 | tipc_printf(buf, "\n"); | 315 | tipc_printf(buf, "\n"); |
317 | if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { | 316 | if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { |
318 | tipc_msg_print(buf,msg_get_wrapped(msg)," /"); | 317 | tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); |
319 | } | 318 | } |
320 | if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { | 319 | if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { |
321 | tipc_msg_print(buf,msg_get_wrapped(msg)," /"); | 320 | tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); |
322 | } | 321 | } |
323 | } | 322 | } |
323 | |||
324 | #endif | ||
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index ad487e8abcc2..7ee6ae238147 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/msg.h: Include file for TIPC message header routines | 2 | * net/tipc/msg.h: Include file for TIPC message header routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2007, Ericsson AB | 4 | * Copyright (c) 2000-2007, Ericsson AB |
5 | * Copyright (c) 2005-2007, Wind River Systems | 5 | * Copyright (c) 2005-2008, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -75,6 +75,14 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w, | |||
75 | m->hdr[w] |= htonl(val); | 75 | m->hdr[w] |= htonl(val); |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b) | ||
79 | { | ||
80 | u32 temp = msg->hdr[a]; | ||
81 | |||
82 | msg->hdr[a] = msg->hdr[b]; | ||
83 | msg->hdr[b] = temp; | ||
84 | } | ||
85 | |||
78 | /* | 86 | /* |
79 | * Word 0 | 87 | * Word 0 |
80 | */ | 88 | */ |
@@ -119,9 +127,9 @@ static inline int msg_non_seq(struct tipc_msg *m) | |||
119 | return msg_bits(m, 0, 20, 1); | 127 | return msg_bits(m, 0, 20, 1); |
120 | } | 128 | } |
121 | 129 | ||
122 | static inline void msg_set_non_seq(struct tipc_msg *m) | 130 | static inline void msg_set_non_seq(struct tipc_msg *m, u32 n) |
123 | { | 131 | { |
124 | msg_set_bits(m, 0, 20, 1, 1); | 132 | msg_set_bits(m, 0, 20, 1, n); |
125 | } | 133 | } |
126 | 134 | ||
127 | static inline int msg_dest_droppable(struct tipc_msg *m) | 135 | static inline int msg_dest_droppable(struct tipc_msg *m) |
@@ -224,6 +232,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) | |||
224 | msg_set_bits(m, 2, 0, 0xffff, n); | 232 | msg_set_bits(m, 2, 0, 0xffff, n); |
225 | } | 233 | } |
226 | 234 | ||
235 | /* | ||
236 | * TIPC may utilize the "link ack #" and "link seq #" fields of a short | ||
237 | * message header to hold the destination node for the message, since the | ||
238 | * normal "dest node" field isn't present. This cache is only referenced | ||
239 | * when required, so populating the cache of a longer message header is | ||
240 | * harmless (as long as the header has the two link sequence fields present). | ||
241 | * | ||
242 | * Note: Host byte order is OK here, since the info never goes off-card. | ||
243 | */ | ||
244 | |||
245 | static inline u32 msg_destnode_cache(struct tipc_msg *m) | ||
246 | { | ||
247 | return m->hdr[2]; | ||
248 | } | ||
249 | |||
250 | static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode) | ||
251 | { | ||
252 | m->hdr[2] = dnode; | ||
253 | } | ||
227 | 254 | ||
228 | /* | 255 | /* |
229 | * Words 3-10 | 256 | * Words 3-10 |
@@ -325,7 +352,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
325 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 352 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
326 | w0:|vers |msg usr|hdr sz |n|resrv| packet size | | 353 | w0:|vers |msg usr|hdr sz |n|resrv| packet size | |
327 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 354 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
328 | w1:|m typ|rsv=0| sequence gap | broadcast ack no | | 355 | w1:|m typ| sequence gap | broadcast ack no | |
329 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 356 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
330 | w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to | | 357 | w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to | |
331 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 358 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
@@ -388,12 +415,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
388 | 415 | ||
389 | static inline u32 msg_seq_gap(struct tipc_msg *m) | 416 | static inline u32 msg_seq_gap(struct tipc_msg *m) |
390 | { | 417 | { |
391 | return msg_bits(m, 1, 16, 0xff); | 418 | return msg_bits(m, 1, 16, 0x1fff); |
392 | } | 419 | } |
393 | 420 | ||
394 | static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) | 421 | static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) |
395 | { | 422 | { |
396 | msg_set_bits(m, 1, 16, 0xff, n); | 423 | msg_set_bits(m, 1, 16, 0x1fff, n); |
397 | } | 424 | } |
398 | 425 | ||
399 | static inline u32 msg_req_links(struct tipc_msg *m) | 426 | static inline u32 msg_req_links(struct tipc_msg *m) |
@@ -696,7 +723,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m) | |||
696 | 723 | ||
697 | 724 | ||
698 | static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, | 725 | static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, |
699 | u32 err, u32 hsize, u32 destnode) | 726 | u32 hsize, u32 destnode) |
700 | { | 727 | { |
701 | memset(m, 0, hsize); | 728 | memset(m, 0, hsize); |
702 | msg_set_version(m); | 729 | msg_set_version(m); |
@@ -705,7 +732,6 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, | |||
705 | msg_set_size(m, hsize); | 732 | msg_set_size(m, hsize); |
706 | msg_set_prevnode(m, tipc_own_addr); | 733 | msg_set_prevnode(m, tipc_own_addr); |
707 | msg_set_type(m, type); | 734 | msg_set_type(m, type); |
708 | msg_set_errcode(m, err); | ||
709 | if (!msg_short(m)) { | 735 | if (!msg_short(m)) { |
710 | msg_set_orignode(m, tipc_own_addr); | 736 | msg_set_orignode(m, tipc_own_addr); |
711 | msg_set_destnode(m, destnode); | 737 | msg_set_destnode(m, destnode); |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 39fd1619febf..10a69894e2fd 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -41,9 +41,6 @@ | |||
41 | #include "msg.h" | 41 | #include "msg.h" |
42 | #include "name_distr.h" | 42 | #include "name_distr.h" |
43 | 43 | ||
44 | #undef DBG_OUTPUT | ||
45 | #define DBG_OUTPUT NULL | ||
46 | |||
47 | #define ITEM_SIZE sizeof(struct distr_item) | 44 | #define ITEM_SIZE sizeof(struct distr_item) |
48 | 45 | ||
49 | /** | 46 | /** |
@@ -106,8 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) | |||
106 | 103 | ||
107 | if (buf != NULL) { | 104 | if (buf != NULL) { |
108 | msg = buf_msg(buf); | 105 | msg = buf_msg(buf); |
109 | msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, | 106 | msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); |
110 | LONG_H_SIZE, dest); | ||
111 | msg_set_size(msg, LONG_H_SIZE + size); | 107 | msg_set_size(msg, LONG_H_SIZE + size); |
112 | } | 108 | } |
113 | return buf; | 109 | return buf; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index ac7dfdda7973..cd72e22b132b 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/name_table.c: TIPC name table code | 2 | * net/tipc/name_table.c: TIPC name table code |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, Ericsson AB | 4 | * Copyright (c) 2000-2006, Ericsson AB |
5 | * Copyright (c) 2004-2005, Wind River Systems | 5 | * Copyright (c) 2004-2008, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -52,9 +52,16 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */ | |||
52 | * struct sub_seq - container for all published instances of a name sequence | 52 | * struct sub_seq - container for all published instances of a name sequence |
53 | * @lower: name sequence lower bound | 53 | * @lower: name sequence lower bound |
54 | * @upper: name sequence upper bound | 54 | * @upper: name sequence upper bound |
55 | * @node_list: circular list of matching publications with >= node scope | 55 | * @node_list: circular list of publications made by own node |
56 | * @cluster_list: circular list of matching publications with >= cluster scope | 56 | * @cluster_list: circular list of publications made by own cluster |
57 | * @zone_list: circular list of matching publications with >= zone scope | 57 | * @zone_list: circular list of publications made by own zone |
58 | * @node_list_size: number of entries in "node_list" | ||
59 | * @cluster_list_size: number of entries in "cluster_list" | ||
60 | * @zone_list_size: number of entries in "zone_list" | ||
61 | * | ||
62 | * Note: The zone list always contains at least one entry, since all | ||
63 | * publications of the associated name sequence belong to it. | ||
64 | * (The cluster and node lists may be empty.) | ||
58 | */ | 65 | */ |
59 | 66 | ||
60 | struct sub_seq { | 67 | struct sub_seq { |
@@ -63,6 +70,9 @@ struct sub_seq { | |||
63 | struct publication *node_list; | 70 | struct publication *node_list; |
64 | struct publication *cluster_list; | 71 | struct publication *cluster_list; |
65 | struct publication *zone_list; | 72 | struct publication *zone_list; |
73 | u32 node_list_size; | ||
74 | u32 cluster_list_size; | ||
75 | u32 zone_list_size; | ||
66 | }; | 76 | }; |
67 | 77 | ||
68 | /** | 78 | /** |
@@ -74,7 +84,7 @@ struct sub_seq { | |||
74 | * @first_free: array index of first unused sub-sequence entry | 84 | * @first_free: array index of first unused sub-sequence entry |
75 | * @ns_list: links to adjacent name sequences in hash chain | 85 | * @ns_list: links to adjacent name sequences in hash chain |
76 | * @subscriptions: list of subscriptions for this 'type' | 86 | * @subscriptions: list of subscriptions for this 'type' |
77 | * @lock: spinlock controlling access to name sequence structure | 87 | * @lock: spinlock controlling access to publication lists of all sub-sequences |
78 | */ | 88 | */ |
79 | 89 | ||
80 | struct name_seq { | 90 | struct name_seq { |
@@ -317,6 +327,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | |||
317 | dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n", | 327 | dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n", |
318 | publ, node, publ->node, publ->subscr.node); | 328 | publ, node, publ->node, publ->subscr.node); |
319 | 329 | ||
330 | sseq->zone_list_size++; | ||
320 | if (!sseq->zone_list) | 331 | if (!sseq->zone_list) |
321 | sseq->zone_list = publ->zone_list_next = publ; | 332 | sseq->zone_list = publ->zone_list_next = publ; |
322 | else { | 333 | else { |
@@ -325,6 +336,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | |||
325 | } | 336 | } |
326 | 337 | ||
327 | if (in_own_cluster(node)) { | 338 | if (in_own_cluster(node)) { |
339 | sseq->cluster_list_size++; | ||
328 | if (!sseq->cluster_list) | 340 | if (!sseq->cluster_list) |
329 | sseq->cluster_list = publ->cluster_list_next = publ; | 341 | sseq->cluster_list = publ->cluster_list_next = publ; |
330 | else { | 342 | else { |
@@ -335,6 +347,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | |||
335 | } | 347 | } |
336 | 348 | ||
337 | if (node == tipc_own_addr) { | 349 | if (node == tipc_own_addr) { |
350 | sseq->node_list_size++; | ||
338 | if (!sseq->node_list) | 351 | if (!sseq->node_list) |
339 | sseq->node_list = publ->node_list_next = publ; | 352 | sseq->node_list = publ->node_list_next = publ; |
340 | else { | 353 | else { |
@@ -411,6 +424,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i | |||
411 | } else { | 424 | } else { |
412 | sseq->zone_list = NULL; | 425 | sseq->zone_list = NULL; |
413 | } | 426 | } |
427 | sseq->zone_list_size--; | ||
414 | 428 | ||
415 | /* Remove publication from cluster scope list, if present */ | 429 | /* Remove publication from cluster scope list, if present */ |
416 | 430 | ||
@@ -439,6 +453,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i | |||
439 | } else { | 453 | } else { |
440 | sseq->cluster_list = NULL; | 454 | sseq->cluster_list = NULL; |
441 | } | 455 | } |
456 | sseq->cluster_list_size--; | ||
442 | } | 457 | } |
443 | end_cluster: | 458 | end_cluster: |
444 | 459 | ||
@@ -469,6 +484,7 @@ end_cluster: | |||
469 | } else { | 484 | } else { |
470 | sseq->node_list = NULL; | 485 | sseq->node_list = NULL; |
471 | } | 486 | } |
487 | sseq->node_list_size--; | ||
472 | } | 488 | } |
473 | end_node: | 489 | end_node: |
474 | 490 | ||
@@ -709,15 +725,18 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, | |||
709 | 725 | ||
710 | if (sseq->lower > upper) | 726 | if (sseq->lower > upper) |
711 | break; | 727 | break; |
712 | publ = sseq->cluster_list; | 728 | |
713 | if (publ && (publ->scope <= limit)) | 729 | publ = sseq->node_list; |
730 | if (publ) { | ||
714 | do { | 731 | do { |
715 | if (publ->node == tipc_own_addr) | 732 | if (publ->scope <= limit) |
716 | tipc_port_list_add(dports, publ->ref); | 733 | tipc_port_list_add(dports, publ->ref); |
717 | else | 734 | publ = publ->node_list_next; |
718 | res = 1; | 735 | } while (publ != sseq->node_list); |
719 | publ = publ->cluster_list_next; | 736 | } |
720 | } while (publ != sseq->cluster_list); | 737 | |
738 | if (sseq->cluster_list_size != sseq->node_list_size) | ||
739 | res = 1; | ||
721 | } | 740 | } |
722 | 741 | ||
723 | spin_unlock_bh(&seq->lock); | 742 | spin_unlock_bh(&seq->lock); |
@@ -905,6 +924,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, | |||
905 | struct sub_seq *sseq; | 924 | struct sub_seq *sseq; |
906 | char typearea[11]; | 925 | char typearea[11]; |
907 | 926 | ||
927 | if (seq->first_free == 0) | ||
928 | return; | ||
929 | |||
908 | sprintf(typearea, "%-10u", seq->type); | 930 | sprintf(typearea, "%-10u", seq->type); |
909 | 931 | ||
910 | if (depth == 1) { | 932 | if (depth == 1) { |
@@ -915,7 +937,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, | |||
915 | for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { | 937 | for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { |
916 | if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { | 938 | if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { |
917 | tipc_printf(buf, "%s ", typearea); | 939 | tipc_printf(buf, "%s ", typearea); |
940 | spin_lock_bh(&seq->lock); | ||
918 | subseq_list(sseq, buf, depth, index); | 941 | subseq_list(sseq, buf, depth, index); |
942 | spin_unlock_bh(&seq->lock); | ||
919 | sprintf(typearea, "%10s", " "); | 943 | sprintf(typearea, "%10s", " "); |
920 | } | 944 | } |
921 | } | 945 | } |
@@ -1050,15 +1074,12 @@ void tipc_nametbl_dump(void) | |||
1050 | 1074 | ||
1051 | int tipc_nametbl_init(void) | 1075 | int tipc_nametbl_init(void) |
1052 | { | 1076 | { |
1053 | int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; | 1077 | table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), |
1054 | 1078 | GFP_ATOMIC); | |
1055 | table.types = kzalloc(array_size, GFP_ATOMIC); | ||
1056 | if (!table.types) | 1079 | if (!table.types) |
1057 | return -ENOMEM; | 1080 | return -ENOMEM; |
1058 | 1081 | ||
1059 | write_lock_bh(&tipc_nametbl_lock); | ||
1060 | table.local_publ_count = 0; | 1082 | table.local_publ_count = 0; |
1061 | write_unlock_bh(&tipc_nametbl_lock); | ||
1062 | return 0; | 1083 | return 0; |
1063 | } | 1084 | } |
1064 | 1085 | ||
diff --git a/net/tipc/net.c b/net/tipc/net.c index c39c76201e8e..ec7b04fbdc43 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -165,7 +165,7 @@ static int net_init(void) | |||
165 | if (!tipc_net.zones) { | 165 | if (!tipc_net.zones) { |
166 | return -ENOMEM; | 166 | return -ENOMEM; |
167 | } | 167 | } |
168 | return TIPC_OK; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | static void net_stop(void) | 171 | static void net_stop(void) |
@@ -266,7 +266,7 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
266 | tipc_link_send(buf, dnode, msg_link_selector(msg)); | 266 | tipc_link_send(buf, dnode, msg_link_selector(msg)); |
267 | } | 267 | } |
268 | 268 | ||
269 | int tipc_net_start(void) | 269 | int tipc_net_start(u32 addr) |
270 | { | 270 | { |
271 | char addr_string[16]; | 271 | char addr_string[16]; |
272 | int res; | 272 | int res; |
@@ -274,6 +274,10 @@ int tipc_net_start(void) | |||
274 | if (tipc_mode != TIPC_NODE_MODE) | 274 | if (tipc_mode != TIPC_NODE_MODE) |
275 | return -ENOPROTOOPT; | 275 | return -ENOPROTOOPT; |
276 | 276 | ||
277 | tipc_subscr_stop(); | ||
278 | tipc_cfg_stop(); | ||
279 | |||
280 | tipc_own_addr = addr; | ||
277 | tipc_mode = TIPC_NET_MODE; | 281 | tipc_mode = TIPC_NET_MODE; |
278 | tipc_named_reinit(); | 282 | tipc_named_reinit(); |
279 | tipc_port_reinit(); | 283 | tipc_port_reinit(); |
@@ -284,14 +288,14 @@ int tipc_net_start(void) | |||
284 | (res = tipc_bclink_init())) { | 288 | (res = tipc_bclink_init())) { |
285 | return res; | 289 | return res; |
286 | } | 290 | } |
287 | tipc_subscr_stop(); | 291 | |
288 | tipc_cfg_stop(); | ||
289 | tipc_k_signal((Handler)tipc_subscr_start, 0); | 292 | tipc_k_signal((Handler)tipc_subscr_start, 0); |
290 | tipc_k_signal((Handler)tipc_cfg_init, 0); | 293 | tipc_k_signal((Handler)tipc_cfg_init, 0); |
294 | |||
291 | info("Started in network mode\n"); | 295 | info("Started in network mode\n"); |
292 | info("Own node address %s, network identity %u\n", | 296 | info("Own node address %s, network identity %u\n", |
293 | addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); | 297 | addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); |
294 | return TIPC_OK; | 298 | return 0; |
295 | } | 299 | } |
296 | 300 | ||
297 | void tipc_net_stop(void) | 301 | void tipc_net_stop(void) |
diff --git a/net/tipc/net.h b/net/tipc/net.h index a6a0e9976ac9..d154ac2bda9a 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h | |||
@@ -58,7 +58,7 @@ void tipc_net_route_msg(struct sk_buff *buf); | |||
58 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref); | 58 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref); |
59 | u32 tipc_net_select_router(u32 addr, u32 ref); | 59 | u32 tipc_net_select_router(u32 addr, u32 ref); |
60 | 60 | ||
61 | int tipc_net_start(void); | 61 | int tipc_net_start(u32 addr); |
62 | void tipc_net_stop(void); | 62 | void tipc_net_stop(void); |
63 | 63 | ||
64 | #endif | 64 | #endif |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6a7f7b4c2595..c387217bb230 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/netlink.c: TIPC configuration handling | 2 | * net/tipc/netlink.c: TIPC configuration handling |
3 | * | 3 | * |
4 | * Copyright (c) 2005-2006, Ericsson AB | 4 | * Copyright (c) 2005-2006, Ericsson AB |
5 | * Copyright (c) 2005, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -45,15 +45,17 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) | |||
45 | struct nlmsghdr *req_nlh = info->nlhdr; | 45 | struct nlmsghdr *req_nlh = info->nlhdr; |
46 | struct tipc_genlmsghdr *req_userhdr = info->userhdr; | 46 | struct tipc_genlmsghdr *req_userhdr = info->userhdr; |
47 | int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); | 47 | int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); |
48 | u16 cmd; | ||
48 | 49 | ||
49 | if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) | 50 | if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) |
50 | rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); | 51 | cmd = TIPC_CMD_NOT_NET_ADMIN; |
51 | else | 52 | else |
52 | rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, | 53 | cmd = req_userhdr->cmd; |
53 | req_userhdr->cmd, | 54 | |
54 | NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, | 55 | rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd, |
55 | NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), | 56 | NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, |
56 | hdr_space); | 57 | NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), |
58 | hdr_space); | ||
57 | 59 | ||
58 | if (rep_buf) { | 60 | if (rep_buf) { |
59 | skb_push(rep_buf, hdr_space); | 61 | skb_push(rep_buf, hdr_space); |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 598f4d3a0098..ee952ad60218 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -52,16 +52,40 @@ static void node_established_contact(struct node *n_ptr); | |||
52 | 52 | ||
53 | struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ | 53 | struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ |
54 | 54 | ||
55 | static DEFINE_SPINLOCK(node_create_lock); | ||
56 | |||
55 | u32 tipc_own_tag = 0; | 57 | u32 tipc_own_tag = 0; |
56 | 58 | ||
59 | /** | ||
60 | * tipc_node_create - create neighboring node | ||
61 | * | ||
62 | * Currently, this routine is called by neighbor discovery code, which holds | ||
63 | * net_lock for reading only. We must take node_create_lock to ensure a node | ||
64 | * isn't created twice if two different bearers discover the node at the same | ||
65 | * time. (It would be preferable to switch to holding net_lock in write mode, | ||
66 | * but this is a non-trivial change.) | ||
67 | */ | ||
68 | |||
57 | struct node *tipc_node_create(u32 addr) | 69 | struct node *tipc_node_create(u32 addr) |
58 | { | 70 | { |
59 | struct cluster *c_ptr; | 71 | struct cluster *c_ptr; |
60 | struct node *n_ptr; | 72 | struct node *n_ptr; |
61 | struct node **curr_node; | 73 | struct node **curr_node; |
62 | 74 | ||
75 | spin_lock_bh(&node_create_lock); | ||
76 | |||
77 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { | ||
78 | if (addr < n_ptr->addr) | ||
79 | break; | ||
80 | if (addr == n_ptr->addr) { | ||
81 | spin_unlock_bh(&node_create_lock); | ||
82 | return n_ptr; | ||
83 | } | ||
84 | } | ||
85 | |||
63 | n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); | 86 | n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); |
64 | if (!n_ptr) { | 87 | if (!n_ptr) { |
88 | spin_unlock_bh(&node_create_lock); | ||
65 | warn("Node creation failed, no memory\n"); | 89 | warn("Node creation failed, no memory\n"); |
66 | return NULL; | 90 | return NULL; |
67 | } | 91 | } |
@@ -71,6 +95,7 @@ struct node *tipc_node_create(u32 addr) | |||
71 | c_ptr = tipc_cltr_create(addr); | 95 | c_ptr = tipc_cltr_create(addr); |
72 | } | 96 | } |
73 | if (!c_ptr) { | 97 | if (!c_ptr) { |
98 | spin_unlock_bh(&node_create_lock); | ||
74 | kfree(n_ptr); | 99 | kfree(n_ptr); |
75 | return NULL; | 100 | return NULL; |
76 | } | 101 | } |
@@ -91,6 +116,7 @@ struct node *tipc_node_create(u32 addr) | |||
91 | } | 116 | } |
92 | } | 117 | } |
93 | (*curr_node) = n_ptr; | 118 | (*curr_node) = n_ptr; |
119 | spin_unlock_bh(&node_create_lock); | ||
94 | return n_ptr; | 120 | return n_ptr; |
95 | } | 121 | } |
96 | 122 | ||
@@ -574,12 +600,14 @@ u32 tipc_available_nodes(const u32 domain) | |||
574 | struct node *n_ptr; | 600 | struct node *n_ptr; |
575 | u32 cnt = 0; | 601 | u32 cnt = 0; |
576 | 602 | ||
603 | read_lock_bh(&tipc_net_lock); | ||
577 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { | 604 | for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { |
578 | if (!in_scope(domain, n_ptr->addr)) | 605 | if (!in_scope(domain, n_ptr->addr)) |
579 | continue; | 606 | continue; |
580 | if (tipc_node_is_up(n_ptr)) | 607 | if (tipc_node_is_up(n_ptr)) |
581 | cnt++; | 608 | cnt++; |
582 | } | 609 | } |
610 | read_unlock_bh(&tipc_net_lock); | ||
583 | return cnt; | 611 | return cnt; |
584 | } | 612 | } |
585 | 613 | ||
@@ -599,19 +627,26 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
599 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 627 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
600 | " (network address)"); | 628 | " (network address)"); |
601 | 629 | ||
602 | if (!tipc_nodes) | 630 | read_lock_bh(&tipc_net_lock); |
631 | if (!tipc_nodes) { | ||
632 | read_unlock_bh(&tipc_net_lock); | ||
603 | return tipc_cfg_reply_none(); | 633 | return tipc_cfg_reply_none(); |
634 | } | ||
604 | 635 | ||
605 | /* For now, get space for all other nodes | 636 | /* For now, get space for all other nodes |
606 | (will need to modify this when slave nodes are supported */ | 637 | (will need to modify this when slave nodes are supported */ |
607 | 638 | ||
608 | payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); | 639 | payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); |
609 | if (payload_size > 32768u) | 640 | if (payload_size > 32768u) { |
641 | read_unlock_bh(&tipc_net_lock); | ||
610 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 642 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
611 | " (too many nodes)"); | 643 | " (too many nodes)"); |
644 | } | ||
612 | buf = tipc_cfg_reply_alloc(payload_size); | 645 | buf = tipc_cfg_reply_alloc(payload_size); |
613 | if (!buf) | 646 | if (!buf) { |
647 | read_unlock_bh(&tipc_net_lock); | ||
614 | return NULL; | 648 | return NULL; |
649 | } | ||
615 | 650 | ||
616 | /* Add TLVs for all nodes in scope */ | 651 | /* Add TLVs for all nodes in scope */ |
617 | 652 | ||
@@ -624,6 +659,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
624 | &node_info, sizeof(node_info)); | 659 | &node_info, sizeof(node_info)); |
625 | } | 660 | } |
626 | 661 | ||
662 | read_unlock_bh(&tipc_net_lock); | ||
627 | return buf; | 663 | return buf; |
628 | } | 664 | } |
629 | 665 | ||
@@ -646,16 +682,22 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
646 | if (tipc_mode != TIPC_NET_MODE) | 682 | if (tipc_mode != TIPC_NET_MODE) |
647 | return tipc_cfg_reply_none(); | 683 | return tipc_cfg_reply_none(); |
648 | 684 | ||
685 | read_lock_bh(&tipc_net_lock); | ||
686 | |||
649 | /* Get space for all unicast links + multicast link */ | 687 | /* Get space for all unicast links + multicast link */ |
650 | 688 | ||
651 | payload_size = TLV_SPACE(sizeof(link_info)) * | 689 | payload_size = TLV_SPACE(sizeof(link_info)) * |
652 | (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); | 690 | (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); |
653 | if (payload_size > 32768u) | 691 | if (payload_size > 32768u) { |
692 | read_unlock_bh(&tipc_net_lock); | ||
654 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 693 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
655 | " (too many links)"); | 694 | " (too many links)"); |
695 | } | ||
656 | buf = tipc_cfg_reply_alloc(payload_size); | 696 | buf = tipc_cfg_reply_alloc(payload_size); |
657 | if (!buf) | 697 | if (!buf) { |
698 | read_unlock_bh(&tipc_net_lock); | ||
658 | return NULL; | 699 | return NULL; |
700 | } | ||
659 | 701 | ||
660 | /* Add TLV for broadcast link */ | 702 | /* Add TLV for broadcast link */ |
661 | 703 | ||
@@ -671,6 +713,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
671 | 713 | ||
672 | if (!in_scope(domain, n_ptr->addr)) | 714 | if (!in_scope(domain, n_ptr->addr)) |
673 | continue; | 715 | continue; |
716 | tipc_node_lock(n_ptr); | ||
674 | for (i = 0; i < MAX_BEARERS; i++) { | 717 | for (i = 0; i < MAX_BEARERS; i++) { |
675 | if (!n_ptr->links[i]) | 718 | if (!n_ptr->links[i]) |
676 | continue; | 719 | continue; |
@@ -680,7 +723,9 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
680 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, | 723 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, |
681 | &link_info, sizeof(link_info)); | 724 | &link_info, sizeof(link_info)); |
682 | } | 725 | } |
726 | tipc_node_unlock(n_ptr); | ||
683 | } | 727 | } |
684 | 728 | ||
729 | read_unlock_bh(&tipc_net_lock); | ||
685 | return buf; | 730 | return buf; |
686 | } | 731 | } |
diff --git a/net/tipc/port.c b/net/tipc/port.c index 2f5806410c64..e70d27ea6578 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/port.c: TIPC port code | 2 | * net/tipc/port.c: TIPC port code |
3 | * | 3 | * |
4 | * Copyright (c) 1992-2007, Ericsson AB | 4 | * Copyright (c) 1992-2007, Ericsson AB |
5 | * Copyright (c) 2004-2007, Wind River Systems | 5 | * Copyright (c) 2004-2008, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -211,12 +211,12 @@ exit: | |||
211 | } | 211 | } |
212 | 212 | ||
213 | /** | 213 | /** |
214 | * tipc_createport_raw - create a native TIPC port | 214 | * tipc_createport_raw - create a generic TIPC port |
215 | * | 215 | * |
216 | * Returns local port reference | 216 | * Returns pointer to (locked) TIPC port, or NULL if unable to create it |
217 | */ | 217 | */ |
218 | 218 | ||
219 | u32 tipc_createport_raw(void *usr_handle, | 219 | struct tipc_port *tipc_createport_raw(void *usr_handle, |
220 | u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), | 220 | u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), |
221 | void (*wakeup)(struct tipc_port *), | 221 | void (*wakeup)(struct tipc_port *), |
222 | const u32 importance) | 222 | const u32 importance) |
@@ -228,26 +228,21 @@ u32 tipc_createport_raw(void *usr_handle, | |||
228 | p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); | 228 | p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); |
229 | if (!p_ptr) { | 229 | if (!p_ptr) { |
230 | warn("Port creation failed, no memory\n"); | 230 | warn("Port creation failed, no memory\n"); |
231 | return 0; | 231 | return NULL; |
232 | } | 232 | } |
233 | ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); | 233 | ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); |
234 | if (!ref) { | 234 | if (!ref) { |
235 | warn("Port creation failed, reference table exhausted\n"); | 235 | warn("Port creation failed, reference table exhausted\n"); |
236 | kfree(p_ptr); | 236 | kfree(p_ptr); |
237 | return 0; | 237 | return NULL; |
238 | } | 238 | } |
239 | 239 | ||
240 | tipc_port_lock(ref); | ||
241 | p_ptr->publ.usr_handle = usr_handle; | 240 | p_ptr->publ.usr_handle = usr_handle; |
242 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; | 241 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; |
243 | p_ptr->publ.ref = ref; | 242 | p_ptr->publ.ref = ref; |
244 | msg = &p_ptr->publ.phdr; | 243 | msg = &p_ptr->publ.phdr; |
245 | msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, | 244 | msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); |
246 | 0); | ||
247 | msg_set_orignode(msg, tipc_own_addr); | ||
248 | msg_set_prevnode(msg, tipc_own_addr); | ||
249 | msg_set_origport(msg, ref); | 245 | msg_set_origport(msg, ref); |
250 | msg_set_importance(msg,importance); | ||
251 | p_ptr->last_in_seqno = 41; | 246 | p_ptr->last_in_seqno = 41; |
252 | p_ptr->sent = 1; | 247 | p_ptr->sent = 1; |
253 | INIT_LIST_HEAD(&p_ptr->wait_list); | 248 | INIT_LIST_HEAD(&p_ptr->wait_list); |
@@ -262,8 +257,7 @@ u32 tipc_createport_raw(void *usr_handle, | |||
262 | INIT_LIST_HEAD(&p_ptr->port_list); | 257 | INIT_LIST_HEAD(&p_ptr->port_list); |
263 | list_add_tail(&p_ptr->port_list, &ports); | 258 | list_add_tail(&p_ptr->port_list, &ports); |
264 | spin_unlock_bh(&tipc_port_list_lock); | 259 | spin_unlock_bh(&tipc_port_list_lock); |
265 | tipc_port_unlock(p_ptr); | 260 | return &(p_ptr->publ); |
266 | return ref; | ||
267 | } | 261 | } |
268 | 262 | ||
269 | int tipc_deleteport(u32 ref) | 263 | int tipc_deleteport(u32 ref) |
@@ -297,7 +291,7 @@ int tipc_deleteport(u32 ref) | |||
297 | kfree(p_ptr); | 291 | kfree(p_ptr); |
298 | dbg("Deleted port %u\n", ref); | 292 | dbg("Deleted port %u\n", ref); |
299 | tipc_net_route_msg(buf); | 293 | tipc_net_route_msg(buf); |
300 | return TIPC_OK; | 294 | return 0; |
301 | } | 295 | } |
302 | 296 | ||
303 | /** | 297 | /** |
@@ -342,7 +336,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable) | |||
342 | return -EINVAL; | 336 | return -EINVAL; |
343 | *isunreliable = port_unreliable(p_ptr); | 337 | *isunreliable = port_unreliable(p_ptr); |
344 | tipc_port_unlock(p_ptr); | 338 | tipc_port_unlock(p_ptr); |
345 | return TIPC_OK; | 339 | return 0; |
346 | } | 340 | } |
347 | 341 | ||
348 | int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) | 342 | int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) |
@@ -354,7 +348,7 @@ int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) | |||
354 | return -EINVAL; | 348 | return -EINVAL; |
355 | msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); | 349 | msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); |
356 | tipc_port_unlock(p_ptr); | 350 | tipc_port_unlock(p_ptr); |
357 | return TIPC_OK; | 351 | return 0; |
358 | } | 352 | } |
359 | 353 | ||
360 | static int port_unreturnable(struct port *p_ptr) | 354 | static int port_unreturnable(struct port *p_ptr) |
@@ -371,7 +365,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable) | |||
371 | return -EINVAL; | 365 | return -EINVAL; |
372 | *isunrejectable = port_unreturnable(p_ptr); | 366 | *isunrejectable = port_unreturnable(p_ptr); |
373 | tipc_port_unlock(p_ptr); | 367 | tipc_port_unlock(p_ptr); |
374 | return TIPC_OK; | 368 | return 0; |
375 | } | 369 | } |
376 | 370 | ||
377 | int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) | 371 | int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) |
@@ -383,7 +377,7 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) | |||
383 | return -EINVAL; | 377 | return -EINVAL; |
384 | msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); | 378 | msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); |
385 | tipc_port_unlock(p_ptr); | 379 | tipc_port_unlock(p_ptr); |
386 | return TIPC_OK; | 380 | return 0; |
387 | } | 381 | } |
388 | 382 | ||
389 | /* | 383 | /* |
@@ -402,10 +396,10 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, | |||
402 | buf = buf_acquire(LONG_H_SIZE); | 396 | buf = buf_acquire(LONG_H_SIZE); |
403 | if (buf) { | 397 | if (buf) { |
404 | msg = buf_msg(buf); | 398 | msg = buf_msg(buf); |
405 | msg_init(msg, usr, type, err, LONG_H_SIZE, destnode); | 399 | msg_init(msg, usr, type, LONG_H_SIZE, destnode); |
400 | msg_set_errcode(msg, err); | ||
406 | msg_set_destport(msg, destport); | 401 | msg_set_destport(msg, destport); |
407 | msg_set_origport(msg, origport); | 402 | msg_set_origport(msg, origport); |
408 | msg_set_destnode(msg, destnode); | ||
409 | msg_set_orignode(msg, orignode); | 403 | msg_set_orignode(msg, orignode); |
410 | msg_set_transp_seqno(msg, seqno); | 404 | msg_set_transp_seqno(msg, seqno); |
411 | msg_set_msgcnt(msg, ack); | 405 | msg_set_msgcnt(msg, ack); |
@@ -446,17 +440,19 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) | |||
446 | return data_sz; | 440 | return data_sz; |
447 | } | 441 | } |
448 | rmsg = buf_msg(rbuf); | 442 | rmsg = buf_msg(rbuf); |
449 | msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg)); | 443 | msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); |
444 | msg_set_errcode(rmsg, err); | ||
450 | msg_set_destport(rmsg, msg_origport(msg)); | 445 | msg_set_destport(rmsg, msg_origport(msg)); |
451 | msg_set_prevnode(rmsg, tipc_own_addr); | ||
452 | msg_set_origport(rmsg, msg_destport(msg)); | 446 | msg_set_origport(rmsg, msg_destport(msg)); |
453 | if (msg_short(msg)) | 447 | if (msg_short(msg)) { |
454 | msg_set_orignode(rmsg, tipc_own_addr); | 448 | msg_set_orignode(rmsg, tipc_own_addr); |
455 | else | 449 | /* leave name type & instance as zeroes */ |
450 | } else { | ||
456 | msg_set_orignode(rmsg, msg_destnode(msg)); | 451 | msg_set_orignode(rmsg, msg_destnode(msg)); |
452 | msg_set_nametype(rmsg, msg_nametype(msg)); | ||
453 | msg_set_nameinst(rmsg, msg_nameinst(msg)); | ||
454 | } | ||
457 | msg_set_size(rmsg, data_sz + hdr_sz); | 455 | msg_set_size(rmsg, data_sz + hdr_sz); |
458 | msg_set_nametype(rmsg, msg_nametype(msg)); | ||
459 | msg_set_nameinst(rmsg, msg_nameinst(msg)); | ||
460 | skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); | 456 | skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); |
461 | 457 | ||
462 | /* send self-abort message when rejecting on a connected port */ | 458 | /* send self-abort message when rejecting on a connected port */ |
@@ -778,6 +774,7 @@ void tipc_port_reinit(void) | |||
778 | msg = &p_ptr->publ.phdr; | 774 | msg = &p_ptr->publ.phdr; |
779 | if (msg_orignode(msg) == tipc_own_addr) | 775 | if (msg_orignode(msg) == tipc_own_addr) |
780 | break; | 776 | break; |
777 | msg_set_prevnode(msg, tipc_own_addr); | ||
781 | msg_set_orignode(msg, tipc_own_addr); | 778 | msg_set_orignode(msg, tipc_own_addr); |
782 | } | 779 | } |
783 | spin_unlock_bh(&tipc_port_list_lock); | 780 | spin_unlock_bh(&tipc_port_list_lock); |
@@ -838,16 +835,13 @@ static void port_dispatcher_sigh(void *dummy) | |||
838 | u32 peer_node = port_peernode(p_ptr); | 835 | u32 peer_node = port_peernode(p_ptr); |
839 | 836 | ||
840 | tipc_port_unlock(p_ptr); | 837 | tipc_port_unlock(p_ptr); |
838 | if (unlikely(!cb)) | ||
839 | goto reject; | ||
841 | if (unlikely(!connected)) { | 840 | if (unlikely(!connected)) { |
842 | if (unlikely(published)) | 841 | if (tipc_connect2port(dref, &orig)) |
843 | goto reject; | 842 | goto reject; |
844 | tipc_connect2port(dref,&orig); | 843 | } else if ((msg_origport(msg) != peer_port) || |
845 | } | 844 | (msg_orignode(msg) != peer_node)) |
846 | if (unlikely(msg_origport(msg) != peer_port)) | ||
847 | goto reject; | ||
848 | if (unlikely(msg_orignode(msg) != peer_node)) | ||
849 | goto reject; | ||
850 | if (unlikely(!cb)) | ||
851 | goto reject; | 845 | goto reject; |
852 | if (unlikely(++p_ptr->publ.conn_unacked >= | 846 | if (unlikely(++p_ptr->publ.conn_unacked >= |
853 | TIPC_FLOW_CONTROL_WIN)) | 847 | TIPC_FLOW_CONTROL_WIN)) |
@@ -862,9 +856,7 @@ static void port_dispatcher_sigh(void *dummy) | |||
862 | tipc_msg_event cb = up_ptr->msg_cb; | 856 | tipc_msg_event cb = up_ptr->msg_cb; |
863 | 857 | ||
864 | tipc_port_unlock(p_ptr); | 858 | tipc_port_unlock(p_ptr); |
865 | if (unlikely(connected)) | 859 | if (unlikely(!cb || connected)) |
866 | goto reject; | ||
867 | if (unlikely(!cb)) | ||
868 | goto reject; | 860 | goto reject; |
869 | skb_pull(buf, msg_hdr_sz(msg)); | 861 | skb_pull(buf, msg_hdr_sz(msg)); |
870 | cb(usr_handle, dref, &buf, msg_data(msg), | 862 | cb(usr_handle, dref, &buf, msg_data(msg), |
@@ -877,11 +869,7 @@ static void port_dispatcher_sigh(void *dummy) | |||
877 | tipc_named_msg_event cb = up_ptr->named_msg_cb; | 869 | tipc_named_msg_event cb = up_ptr->named_msg_cb; |
878 | 870 | ||
879 | tipc_port_unlock(p_ptr); | 871 | tipc_port_unlock(p_ptr); |
880 | if (unlikely(connected)) | 872 | if (unlikely(!cb || connected || !published)) |
881 | goto reject; | ||
882 | if (unlikely(!cb)) | ||
883 | goto reject; | ||
884 | if (unlikely(!published)) | ||
885 | goto reject; | 873 | goto reject; |
886 | dseq.type = msg_nametype(msg); | 874 | dseq.type = msg_nametype(msg); |
887 | dseq.lower = msg_nameinst(msg); | 875 | dseq.lower = msg_nameinst(msg); |
@@ -908,11 +896,10 @@ err: | |||
908 | u32 peer_node = port_peernode(p_ptr); | 896 | u32 peer_node = port_peernode(p_ptr); |
909 | 897 | ||
910 | tipc_port_unlock(p_ptr); | 898 | tipc_port_unlock(p_ptr); |
911 | if (!connected || !cb) | 899 | if (!cb || !connected) |
912 | break; | ||
913 | if (msg_origport(msg) != peer_port) | ||
914 | break; | 900 | break; |
915 | if (msg_orignode(msg) != peer_node) | 901 | if ((msg_origport(msg) != peer_port) || |
902 | (msg_orignode(msg) != peer_node)) | ||
916 | break; | 903 | break; |
917 | tipc_disconnect(dref); | 904 | tipc_disconnect(dref); |
918 | skb_pull(buf, msg_hdr_sz(msg)); | 905 | skb_pull(buf, msg_hdr_sz(msg)); |
@@ -924,7 +911,7 @@ err: | |||
924 | tipc_msg_err_event cb = up_ptr->err_cb; | 911 | tipc_msg_err_event cb = up_ptr->err_cb; |
925 | 912 | ||
926 | tipc_port_unlock(p_ptr); | 913 | tipc_port_unlock(p_ptr); |
927 | if (connected || !cb) | 914 | if (!cb || connected) |
928 | break; | 915 | break; |
929 | skb_pull(buf, msg_hdr_sz(msg)); | 916 | skb_pull(buf, msg_hdr_sz(msg)); |
930 | cb(usr_handle, dref, &buf, msg_data(msg), | 917 | cb(usr_handle, dref, &buf, msg_data(msg), |
@@ -937,7 +924,7 @@ err: | |||
937 | up_ptr->named_err_cb; | 924 | up_ptr->named_err_cb; |
938 | 925 | ||
939 | tipc_port_unlock(p_ptr); | 926 | tipc_port_unlock(p_ptr); |
940 | if (connected || !cb) | 927 | if (!cb || connected) |
941 | break; | 928 | break; |
942 | dseq.type = msg_nametype(msg); | 929 | dseq.type = msg_nametype(msg); |
943 | dseq.lower = msg_nameinst(msg); | 930 | dseq.lower = msg_nameinst(msg); |
@@ -976,7 +963,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) | |||
976 | tipc_k_signal((Handler)port_dispatcher_sigh, 0); | 963 | tipc_k_signal((Handler)port_dispatcher_sigh, 0); |
977 | } | 964 | } |
978 | spin_unlock_bh(&queue_lock); | 965 | spin_unlock_bh(&queue_lock); |
979 | return TIPC_OK; | 966 | return 0; |
980 | } | 967 | } |
981 | 968 | ||
982 | /* | 969 | /* |
@@ -1053,15 +1040,14 @@ int tipc_createport(u32 user_ref, | |||
1053 | { | 1040 | { |
1054 | struct user_port *up_ptr; | 1041 | struct user_port *up_ptr; |
1055 | struct port *p_ptr; | 1042 | struct port *p_ptr; |
1056 | u32 ref; | ||
1057 | 1043 | ||
1058 | up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); | 1044 | up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); |
1059 | if (!up_ptr) { | 1045 | if (!up_ptr) { |
1060 | warn("Port creation failed, no memory\n"); | 1046 | warn("Port creation failed, no memory\n"); |
1061 | return -ENOMEM; | 1047 | return -ENOMEM; |
1062 | } | 1048 | } |
1063 | ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); | 1049 | p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher, |
1064 | p_ptr = tipc_port_lock(ref); | 1050 | port_wakeup, importance); |
1065 | if (!p_ptr) { | 1051 | if (!p_ptr) { |
1066 | kfree(up_ptr); | 1052 | kfree(up_ptr); |
1067 | return -ENOMEM; | 1053 | return -ENOMEM; |
@@ -1081,16 +1067,15 @@ int tipc_createport(u32 user_ref, | |||
1081 | INIT_LIST_HEAD(&up_ptr->uport_list); | 1067 | INIT_LIST_HEAD(&up_ptr->uport_list); |
1082 | tipc_reg_add_port(up_ptr); | 1068 | tipc_reg_add_port(up_ptr); |
1083 | *portref = p_ptr->publ.ref; | 1069 | *portref = p_ptr->publ.ref; |
1084 | dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref); | ||
1085 | tipc_port_unlock(p_ptr); | 1070 | tipc_port_unlock(p_ptr); |
1086 | return TIPC_OK; | 1071 | return 0; |
1087 | } | 1072 | } |
1088 | 1073 | ||
1089 | int tipc_ownidentity(u32 ref, struct tipc_portid *id) | 1074 | int tipc_ownidentity(u32 ref, struct tipc_portid *id) |
1090 | { | 1075 | { |
1091 | id->ref = ref; | 1076 | id->ref = ref; |
1092 | id->node = tipc_own_addr; | 1077 | id->node = tipc_own_addr; |
1093 | return TIPC_OK; | 1078 | return 0; |
1094 | } | 1079 | } |
1095 | 1080 | ||
1096 | int tipc_portimportance(u32 ref, unsigned int *importance) | 1081 | int tipc_portimportance(u32 ref, unsigned int *importance) |
@@ -1102,7 +1087,7 @@ int tipc_portimportance(u32 ref, unsigned int *importance) | |||
1102 | return -EINVAL; | 1087 | return -EINVAL; |
1103 | *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); | 1088 | *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); |
1104 | tipc_port_unlock(p_ptr); | 1089 | tipc_port_unlock(p_ptr); |
1105 | return TIPC_OK; | 1090 | return 0; |
1106 | } | 1091 | } |
1107 | 1092 | ||
1108 | int tipc_set_portimportance(u32 ref, unsigned int imp) | 1093 | int tipc_set_portimportance(u32 ref, unsigned int imp) |
@@ -1117,7 +1102,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp) | |||
1117 | return -EINVAL; | 1102 | return -EINVAL; |
1118 | msg_set_importance(&p_ptr->publ.phdr, (u32)imp); | 1103 | msg_set_importance(&p_ptr->publ.phdr, (u32)imp); |
1119 | tipc_port_unlock(p_ptr); | 1104 | tipc_port_unlock(p_ptr); |
1120 | return TIPC_OK; | 1105 | return 0; |
1121 | } | 1106 | } |
1122 | 1107 | ||
1123 | 1108 | ||
@@ -1152,7 +1137,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
1152 | list_add(&publ->pport_list, &p_ptr->publications); | 1137 | list_add(&publ->pport_list, &p_ptr->publications); |
1153 | p_ptr->pub_count++; | 1138 | p_ptr->pub_count++; |
1154 | p_ptr->publ.published = 1; | 1139 | p_ptr->publ.published = 1; |
1155 | res = TIPC_OK; | 1140 | res = 0; |
1156 | } | 1141 | } |
1157 | exit: | 1142 | exit: |
1158 | tipc_port_unlock(p_ptr); | 1143 | tipc_port_unlock(p_ptr); |
@@ -1175,7 +1160,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
1175 | tipc_nametbl_withdraw(publ->type, publ->lower, | 1160 | tipc_nametbl_withdraw(publ->type, publ->lower, |
1176 | publ->ref, publ->key); | 1161 | publ->ref, publ->key); |
1177 | } | 1162 | } |
1178 | res = TIPC_OK; | 1163 | res = 0; |
1179 | } else { | 1164 | } else { |
1180 | list_for_each_entry_safe(publ, tpubl, | 1165 | list_for_each_entry_safe(publ, tpubl, |
1181 | &p_ptr->publications, pport_list) { | 1166 | &p_ptr->publications, pport_list) { |
@@ -1189,7 +1174,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
1189 | break; | 1174 | break; |
1190 | tipc_nametbl_withdraw(publ->type, publ->lower, | 1175 | tipc_nametbl_withdraw(publ->type, publ->lower, |
1191 | publ->ref, publ->key); | 1176 | publ->ref, publ->key); |
1192 | res = TIPC_OK; | 1177 | res = 0; |
1193 | break; | 1178 | break; |
1194 | } | 1179 | } |
1195 | } | 1180 | } |
@@ -1233,7 +1218,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer) | |||
1233 | tipc_nodesub_subscribe(&p_ptr->subscription,peer->node, | 1218 | tipc_nodesub_subscribe(&p_ptr->subscription,peer->node, |
1234 | (void *)(unsigned long)ref, | 1219 | (void *)(unsigned long)ref, |
1235 | (net_ev_handler)port_handle_node_down); | 1220 | (net_ev_handler)port_handle_node_down); |
1236 | res = TIPC_OK; | 1221 | res = 0; |
1237 | exit: | 1222 | exit: |
1238 | tipc_port_unlock(p_ptr); | 1223 | tipc_port_unlock(p_ptr); |
1239 | p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref); | 1224 | p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref); |
@@ -1255,7 +1240,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr) | |||
1255 | /* let timer expire on it's own to avoid deadlock! */ | 1240 | /* let timer expire on it's own to avoid deadlock! */ |
1256 | tipc_nodesub_unsubscribe( | 1241 | tipc_nodesub_unsubscribe( |
1257 | &((struct port *)tp_ptr)->subscription); | 1242 | &((struct port *)tp_ptr)->subscription); |
1258 | res = TIPC_OK; | 1243 | res = 0; |
1259 | } else { | 1244 | } else { |
1260 | res = -ENOTCONN; | 1245 | res = -ENOTCONN; |
1261 | } | 1246 | } |
@@ -1320,7 +1305,7 @@ int tipc_isconnected(u32 ref, int *isconnected) | |||
1320 | return -EINVAL; | 1305 | return -EINVAL; |
1321 | *isconnected = p_ptr->publ.connected; | 1306 | *isconnected = p_ptr->publ.connected; |
1322 | tipc_port_unlock(p_ptr); | 1307 | tipc_port_unlock(p_ptr); |
1323 | return TIPC_OK; | 1308 | return 0; |
1324 | } | 1309 | } |
1325 | 1310 | ||
1326 | int tipc_peer(u32 ref, struct tipc_portid *peer) | 1311 | int tipc_peer(u32 ref, struct tipc_portid *peer) |
@@ -1334,7 +1319,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer) | |||
1334 | if (p_ptr->publ.connected) { | 1319 | if (p_ptr->publ.connected) { |
1335 | peer->ref = port_peerport(p_ptr); | 1320 | peer->ref = port_peerport(p_ptr); |
1336 | peer->node = port_peernode(p_ptr); | 1321 | peer->node = port_peernode(p_ptr); |
1337 | res = TIPC_OK; | 1322 | res = 0; |
1338 | } else | 1323 | } else |
1339 | res = -ENOTCONN; | 1324 | res = -ENOTCONN; |
1340 | tipc_port_unlock(p_ptr); | 1325 | tipc_port_unlock(p_ptr); |
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 89cbab24d08f..414fc34b8bea 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -123,7 +123,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start) | |||
123 | tipc_ref_table.index_mask = actual_size - 1; | 123 | tipc_ref_table.index_mask = actual_size - 1; |
124 | tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; | 124 | tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; |
125 | 125 | ||
126 | return TIPC_OK; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | /** | 129 | /** |
@@ -142,9 +142,13 @@ void tipc_ref_table_stop(void) | |||
142 | /** | 142 | /** |
143 | * tipc_ref_acquire - create reference to an object | 143 | * tipc_ref_acquire - create reference to an object |
144 | * | 144 | * |
145 | * Return a unique reference value which can be translated back to the pointer | 145 | * Register an object pointer in reference table and lock the object. |
146 | * 'object' at a later time. Also, pass back a pointer to the lock protecting | 146 | * Returns a unique reference value that is used from then on to retrieve the |
147 | * the object, but without locking it. | 147 | * object pointer, or to determine that the object has been deregistered. |
148 | * | ||
149 | * Note: The object is returned in the locked state so that the caller can | ||
150 | * register a partially initialized object, without running the risk that | ||
151 | * the object will be accessed before initialization is complete. | ||
148 | */ | 152 | */ |
149 | 153 | ||
150 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) | 154 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) |
@@ -178,13 +182,13 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
178 | ref = (next_plus_upper & ~index_mask) + index; | 182 | ref = (next_plus_upper & ~index_mask) + index; |
179 | entry->ref = ref; | 183 | entry->ref = ref; |
180 | entry->object = object; | 184 | entry->object = object; |
181 | spin_unlock_bh(&entry->lock); | ||
182 | *lock = &entry->lock; | 185 | *lock = &entry->lock; |
183 | } | 186 | } |
184 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { | 187 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { |
185 | index = tipc_ref_table.init_point++; | 188 | index = tipc_ref_table.init_point++; |
186 | entry = &(tipc_ref_table.entries[index]); | 189 | entry = &(tipc_ref_table.entries[index]); |
187 | spin_lock_init(&entry->lock); | 190 | spin_lock_init(&entry->lock); |
191 | spin_lock_bh(&entry->lock); | ||
188 | ref = tipc_ref_table.start_mask + index; | 192 | ref = tipc_ref_table.start_mask + index; |
189 | entry->ref = ref; | 193 | entry->ref = ref; |
190 | entry->object = object; | 194 | entry->object = object; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 230f9ca2ad6b..1848693ebb82 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/tipc/socket.c: TIPC socket API | 2 | * net/tipc/socket.c: TIPC socket API |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2007, Ericsson AB | 4 | * Copyright (c) 2001-2007, Ericsson AB |
5 | * Copyright (c) 2004-2007, Wind River Systems | 5 | * Copyright (c) 2004-2008, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -63,6 +63,7 @@ | |||
63 | struct tipc_sock { | 63 | struct tipc_sock { |
64 | struct sock sk; | 64 | struct sock sk; |
65 | struct tipc_port *p; | 65 | struct tipc_port *p; |
66 | struct tipc_portid peer_name; | ||
66 | }; | 67 | }; |
67 | 68 | ||
68 | #define tipc_sk(sk) ((struct tipc_sock *)(sk)) | 69 | #define tipc_sk(sk) ((struct tipc_sock *)(sk)) |
@@ -188,7 +189,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) | |||
188 | const struct proto_ops *ops; | 189 | const struct proto_ops *ops; |
189 | socket_state state; | 190 | socket_state state; |
190 | struct sock *sk; | 191 | struct sock *sk; |
191 | u32 portref; | 192 | struct tipc_port *tp_ptr; |
192 | 193 | ||
193 | /* Validate arguments */ | 194 | /* Validate arguments */ |
194 | 195 | ||
@@ -224,9 +225,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) | |||
224 | 225 | ||
225 | /* Allocate TIPC port for socket to use */ | 226 | /* Allocate TIPC port for socket to use */ |
226 | 227 | ||
227 | portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, | 228 | tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, |
228 | TIPC_LOW_IMPORTANCE); | 229 | TIPC_LOW_IMPORTANCE); |
229 | if (unlikely(portref == 0)) { | 230 | if (unlikely(!tp_ptr)) { |
230 | sk_free(sk); | 231 | sk_free(sk); |
231 | return -ENOMEM; | 232 | return -ENOMEM; |
232 | } | 233 | } |
@@ -239,12 +240,14 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) | |||
239 | sock_init_data(sock, sk); | 240 | sock_init_data(sock, sk); |
240 | sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); | 241 | sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); |
241 | sk->sk_backlog_rcv = backlog_rcv; | 242 | sk->sk_backlog_rcv = backlog_rcv; |
242 | tipc_sk(sk)->p = tipc_get_port(portref); | 243 | tipc_sk(sk)->p = tp_ptr; |
244 | |||
245 | spin_unlock_bh(tp_ptr->lock); | ||
243 | 246 | ||
244 | if (sock->state == SS_READY) { | 247 | if (sock->state == SS_READY) { |
245 | tipc_set_portunreturnable(portref, 1); | 248 | tipc_set_portunreturnable(tp_ptr->ref, 1); |
246 | if (sock->type == SOCK_DGRAM) | 249 | if (sock->type == SOCK_DGRAM) |
247 | tipc_set_portunreliable(portref, 1); | 250 | tipc_set_portunreliable(tp_ptr->ref, 1); |
248 | } | 251 | } |
249 | 252 | ||
250 | atomic_inc(&tipc_user_count); | 253 | atomic_inc(&tipc_user_count); |
@@ -375,27 +378,29 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | |||
375 | * @sock: socket structure | 378 | * @sock: socket structure |
376 | * @uaddr: area for returned socket address | 379 | * @uaddr: area for returned socket address |
377 | * @uaddr_len: area for returned length of socket address | 380 | * @uaddr_len: area for returned length of socket address |
378 | * @peer: 0 to obtain socket name, 1 to obtain peer socket name | 381 | * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID |
379 | * | 382 | * |
380 | * Returns 0 on success, errno otherwise | 383 | * Returns 0 on success, errno otherwise |
381 | * | 384 | * |
382 | * NOTE: This routine doesn't need to take the socket lock since it doesn't | 385 | * NOTE: This routine doesn't need to take the socket lock since it only |
383 | * access any non-constant socket information. | 386 | * accesses socket information that is unchanging (or which changes in |
387 | * a completely predictable manner). | ||
384 | */ | 388 | */ |
385 | 389 | ||
386 | static int get_name(struct socket *sock, struct sockaddr *uaddr, | 390 | static int get_name(struct socket *sock, struct sockaddr *uaddr, |
387 | int *uaddr_len, int peer) | 391 | int *uaddr_len, int peer) |
388 | { | 392 | { |
389 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 393 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
390 | u32 portref = tipc_sk_port(sock->sk)->ref; | 394 | struct tipc_sock *tsock = tipc_sk(sock->sk); |
391 | u32 res; | ||
392 | 395 | ||
393 | if (peer) { | 396 | if (peer) { |
394 | res = tipc_peer(portref, &addr->addr.id); | 397 | if ((sock->state != SS_CONNECTED) && |
395 | if (res) | 398 | ((peer != 2) || (sock->state != SS_DISCONNECTING))) |
396 | return res; | 399 | return -ENOTCONN; |
400 | addr->addr.id.ref = tsock->peer_name.ref; | ||
401 | addr->addr.id.node = tsock->peer_name.node; | ||
397 | } else { | 402 | } else { |
398 | tipc_ownidentity(portref, &addr->addr.id); | 403 | tipc_ownidentity(tsock->p->ref, &addr->addr.id); |
399 | } | 404 | } |
400 | 405 | ||
401 | *uaddr_len = sizeof(*addr); | 406 | *uaddr_len = sizeof(*addr); |
@@ -764,18 +769,17 @@ exit: | |||
764 | 769 | ||
765 | static int auto_connect(struct socket *sock, struct tipc_msg *msg) | 770 | static int auto_connect(struct socket *sock, struct tipc_msg *msg) |
766 | { | 771 | { |
767 | struct tipc_port *tport = tipc_sk_port(sock->sk); | 772 | struct tipc_sock *tsock = tipc_sk(sock->sk); |
768 | struct tipc_portid peer; | ||
769 | 773 | ||
770 | if (msg_errcode(msg)) { | 774 | if (msg_errcode(msg)) { |
771 | sock->state = SS_DISCONNECTING; | 775 | sock->state = SS_DISCONNECTING; |
772 | return -ECONNREFUSED; | 776 | return -ECONNREFUSED; |
773 | } | 777 | } |
774 | 778 | ||
775 | peer.ref = msg_origport(msg); | 779 | tsock->peer_name.ref = msg_origport(msg); |
776 | peer.node = msg_orignode(msg); | 780 | tsock->peer_name.node = msg_orignode(msg); |
777 | tipc_connect2port(tport->ref, &peer); | 781 | tipc_connect2port(tsock->p->ref, &tsock->peer_name); |
778 | tipc_set_portimportance(tport->ref, msg_importance(msg)); | 782 | tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); |
779 | sock->state = SS_CONNECTED; | 783 | sock->state = SS_CONNECTED; |
780 | return 0; | 784 | return 0; |
781 | } | 785 | } |
@@ -1131,7 +1135,7 @@ restart: | |||
1131 | /* Loop around if more data is required */ | 1135 | /* Loop around if more data is required */ |
1132 | 1136 | ||
1133 | if ((sz_copied < buf_len) /* didn't get all requested data */ | 1137 | if ((sz_copied < buf_len) /* didn't get all requested data */ |
1134 | && (!skb_queue_empty(&sock->sk->sk_receive_queue) || | 1138 | && (!skb_queue_empty(&sk->sk_receive_queue) || |
1135 | (flags & MSG_WAITALL)) | 1139 | (flags & MSG_WAITALL)) |
1136 | /* ... and more is ready or required */ | 1140 | /* ... and more is ready or required */ |
1137 | && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ | 1141 | && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ |
@@ -1527,9 +1531,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1527 | res = tipc_create(sock_net(sock->sk), new_sock, 0); | 1531 | res = tipc_create(sock_net(sock->sk), new_sock, 0); |
1528 | if (!res) { | 1532 | if (!res) { |
1529 | struct sock *new_sk = new_sock->sk; | 1533 | struct sock *new_sk = new_sock->sk; |
1530 | struct tipc_port *new_tport = tipc_sk_port(new_sk); | 1534 | struct tipc_sock *new_tsock = tipc_sk(new_sk); |
1535 | struct tipc_port *new_tport = new_tsock->p; | ||
1531 | u32 new_ref = new_tport->ref; | 1536 | u32 new_ref = new_tport->ref; |
1532 | struct tipc_portid id; | ||
1533 | struct tipc_msg *msg = buf_msg(buf); | 1537 | struct tipc_msg *msg = buf_msg(buf); |
1534 | 1538 | ||
1535 | lock_sock(new_sk); | 1539 | lock_sock(new_sk); |
@@ -1543,9 +1547,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1543 | 1547 | ||
1544 | /* Connect new socket to it's peer */ | 1548 | /* Connect new socket to it's peer */ |
1545 | 1549 | ||
1546 | id.ref = msg_origport(msg); | 1550 | new_tsock->peer_name.ref = msg_origport(msg); |
1547 | id.node = msg_orignode(msg); | 1551 | new_tsock->peer_name.node = msg_orignode(msg); |
1548 | tipc_connect2port(new_ref, &id); | 1552 | tipc_connect2port(new_ref, &new_tsock->peer_name); |
1549 | new_sock->state = SS_CONNECTED; | 1553 | new_sock->state = SS_CONNECTED; |
1550 | 1554 | ||
1551 | tipc_set_portimportance(new_ref, msg_importance(msg)); | 1555 | tipc_set_portimportance(new_ref, msg_importance(msg)); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 8c01ccd3626c..0326d3060bc7 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/subscr.c: TIPC subscription service | 2 | * net/tipc/subscr.c: TIPC network topology service |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, Ericsson AB | 4 | * Copyright (c) 2000-2006, Ericsson AB |
5 | * Copyright (c) 2005, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -36,27 +36,24 @@ | |||
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "dbg.h" | 38 | #include "dbg.h" |
39 | #include "subscr.h" | ||
40 | #include "name_table.h" | 39 | #include "name_table.h" |
40 | #include "port.h" | ||
41 | #include "ref.h" | 41 | #include "ref.h" |
42 | #include "subscr.h" | ||
42 | 43 | ||
43 | /** | 44 | /** |
44 | * struct subscriber - TIPC network topology subscriber | 45 | * struct subscriber - TIPC network topology subscriber |
45 | * @ref: object reference to subscriber object itself | 46 | * @port_ref: object reference to server port connecting to subscriber |
46 | * @lock: pointer to spinlock controlling access to subscriber object | 47 | * @lock: pointer to spinlock controlling access to subscriber's server port |
47 | * @subscriber_list: adjacent subscribers in top. server's list of subscribers | 48 | * @subscriber_list: adjacent subscribers in top. server's list of subscribers |
48 | * @subscription_list: list of subscription objects for this subscriber | 49 | * @subscription_list: list of subscription objects for this subscriber |
49 | * @port_ref: object reference to port used to communicate with subscriber | ||
50 | * @swap: indicates if subscriber uses opposite endianness in its messages | ||
51 | */ | 50 | */ |
52 | 51 | ||
53 | struct subscriber { | 52 | struct subscriber { |
54 | u32 ref; | 53 | u32 port_ref; |
55 | spinlock_t *lock; | 54 | spinlock_t *lock; |
56 | struct list_head subscriber_list; | 55 | struct list_head subscriber_list; |
57 | struct list_head subscription_list; | 56 | struct list_head subscription_list; |
58 | u32 port_ref; | ||
59 | int swap; | ||
60 | }; | 57 | }; |
61 | 58 | ||
62 | /** | 59 | /** |
@@ -88,13 +85,14 @@ static struct top_srv topsrv = { 0 }; | |||
88 | 85 | ||
89 | static u32 htohl(u32 in, int swap) | 86 | static u32 htohl(u32 in, int swap) |
90 | { | 87 | { |
91 | char *c = (char *)∈ | 88 | return swap ? (u32)___constant_swab32(in) : in; |
92 | |||
93 | return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in; | ||
94 | } | 89 | } |
95 | 90 | ||
96 | /** | 91 | /** |
97 | * subscr_send_event - send a message containing a tipc_event to the subscriber | 92 | * subscr_send_event - send a message containing a tipc_event to the subscriber |
93 | * | ||
94 | * Note: Must not hold subscriber's server port lock, since tipc_send() will | ||
95 | * try to take the lock if the message is rejected and returned! | ||
98 | */ | 96 | */ |
99 | 97 | ||
100 | static void subscr_send_event(struct subscription *sub, | 98 | static void subscr_send_event(struct subscription *sub, |
@@ -109,12 +107,12 @@ static void subscr_send_event(struct subscription *sub, | |||
109 | msg_sect.iov_base = (void *)&sub->evt; | 107 | msg_sect.iov_base = (void *)&sub->evt; |
110 | msg_sect.iov_len = sizeof(struct tipc_event); | 108 | msg_sect.iov_len = sizeof(struct tipc_event); |
111 | 109 | ||
112 | sub->evt.event = htohl(event, sub->owner->swap); | 110 | sub->evt.event = htohl(event, sub->swap); |
113 | sub->evt.found_lower = htohl(found_lower, sub->owner->swap); | 111 | sub->evt.found_lower = htohl(found_lower, sub->swap); |
114 | sub->evt.found_upper = htohl(found_upper, sub->owner->swap); | 112 | sub->evt.found_upper = htohl(found_upper, sub->swap); |
115 | sub->evt.port.ref = htohl(port_ref, sub->owner->swap); | 113 | sub->evt.port.ref = htohl(port_ref, sub->swap); |
116 | sub->evt.port.node = htohl(node, sub->owner->swap); | 114 | sub->evt.port.node = htohl(node, sub->swap); |
117 | tipc_send(sub->owner->port_ref, 1, &msg_sect); | 115 | tipc_send(sub->server_ref, 1, &msg_sect); |
118 | } | 116 | } |
119 | 117 | ||
120 | /** | 118 | /** |
@@ -151,13 +149,12 @@ void tipc_subscr_report_overlap(struct subscription *sub, | |||
151 | u32 node, | 149 | u32 node, |
152 | int must) | 150 | int must) |
153 | { | 151 | { |
154 | dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower, | ||
155 | sub->seq.upper, found_lower, found_upper); | ||
156 | if (!tipc_subscr_overlap(sub, found_lower, found_upper)) | 152 | if (!tipc_subscr_overlap(sub, found_lower, found_upper)) |
157 | return; | 153 | return; |
158 | if (!must && !(sub->filter & TIPC_SUB_PORTS)) | 154 | if (!must && !(sub->filter & TIPC_SUB_PORTS)) |
159 | return; | 155 | return; |
160 | subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); | 156 | |
157 | sub->event_cb(sub, found_lower, found_upper, event, port_ref, node); | ||
161 | } | 158 | } |
162 | 159 | ||
163 | /** | 160 | /** |
@@ -166,20 +163,18 @@ void tipc_subscr_report_overlap(struct subscription *sub, | |||
166 | 163 | ||
167 | static void subscr_timeout(struct subscription *sub) | 164 | static void subscr_timeout(struct subscription *sub) |
168 | { | 165 | { |
169 | struct subscriber *subscriber; | 166 | struct port *server_port; |
170 | u32 subscriber_ref; | ||
171 | 167 | ||
172 | /* Validate subscriber reference (in case subscriber is terminating) */ | 168 | /* Validate server port reference (in case subscriber is terminating) */ |
173 | 169 | ||
174 | subscriber_ref = sub->owner->ref; | 170 | server_port = tipc_port_lock(sub->server_ref); |
175 | subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref); | 171 | if (server_port == NULL) |
176 | if (subscriber == NULL) | ||
177 | return; | 172 | return; |
178 | 173 | ||
179 | /* Validate timeout (in case subscription is being cancelled) */ | 174 | /* Validate timeout (in case subscription is being cancelled) */ |
180 | 175 | ||
181 | if (sub->timeout == TIPC_WAIT_FOREVER) { | 176 | if (sub->timeout == TIPC_WAIT_FOREVER) { |
182 | tipc_ref_unlock(subscriber_ref); | 177 | tipc_port_unlock(server_port); |
183 | return; | 178 | return; |
184 | } | 179 | } |
185 | 180 | ||
@@ -187,19 +182,21 @@ static void subscr_timeout(struct subscription *sub) | |||
187 | 182 | ||
188 | tipc_nametbl_unsubscribe(sub); | 183 | tipc_nametbl_unsubscribe(sub); |
189 | 184 | ||
190 | /* Notify subscriber of timeout, then unlink subscription */ | 185 | /* Unlink subscription from subscriber */ |
191 | 186 | ||
192 | subscr_send_event(sub, | ||
193 | sub->evt.s.seq.lower, | ||
194 | sub->evt.s.seq.upper, | ||
195 | TIPC_SUBSCR_TIMEOUT, | ||
196 | 0, | ||
197 | 0); | ||
198 | list_del(&sub->subscription_list); | 187 | list_del(&sub->subscription_list); |
199 | 188 | ||
189 | /* Release subscriber's server port */ | ||
190 | |||
191 | tipc_port_unlock(server_port); | ||
192 | |||
193 | /* Notify subscriber of timeout */ | ||
194 | |||
195 | subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, | ||
196 | TIPC_SUBSCR_TIMEOUT, 0, 0); | ||
197 | |||
200 | /* Now destroy subscription */ | 198 | /* Now destroy subscription */ |
201 | 199 | ||
202 | tipc_ref_unlock(subscriber_ref); | ||
203 | k_term_timer(&sub->timer); | 200 | k_term_timer(&sub->timer); |
204 | kfree(sub); | 201 | kfree(sub); |
205 | atomic_dec(&topsrv.subscription_count); | 202 | atomic_dec(&topsrv.subscription_count); |
@@ -208,7 +205,7 @@ static void subscr_timeout(struct subscription *sub) | |||
208 | /** | 205 | /** |
209 | * subscr_del - delete a subscription within a subscription list | 206 | * subscr_del - delete a subscription within a subscription list |
210 | * | 207 | * |
211 | * Called with subscriber locked. | 208 | * Called with subscriber port locked. |
212 | */ | 209 | */ |
213 | 210 | ||
214 | static void subscr_del(struct subscription *sub) | 211 | static void subscr_del(struct subscription *sub) |
@@ -222,7 +219,7 @@ static void subscr_del(struct subscription *sub) | |||
222 | /** | 219 | /** |
223 | * subscr_terminate - terminate communication with a subscriber | 220 | * subscr_terminate - terminate communication with a subscriber |
224 | * | 221 | * |
225 | * Called with subscriber locked. Routine must temporarily release this lock | 222 | * Called with subscriber port locked. Routine must temporarily release lock |
226 | * to enable subscription timeout routine(s) to finish without deadlocking; | 223 | * to enable subscription timeout routine(s) to finish without deadlocking; |
227 | * the lock is then reclaimed to allow caller to release it upon return. | 224 | * the lock is then reclaimed to allow caller to release it upon return. |
228 | * (This should work even in the unlikely event some other thread creates | 225 | * (This should work even in the unlikely event some other thread creates |
@@ -232,14 +229,21 @@ static void subscr_del(struct subscription *sub) | |||
232 | 229 | ||
233 | static void subscr_terminate(struct subscriber *subscriber) | 230 | static void subscr_terminate(struct subscriber *subscriber) |
234 | { | 231 | { |
232 | u32 port_ref; | ||
235 | struct subscription *sub; | 233 | struct subscription *sub; |
236 | struct subscription *sub_temp; | 234 | struct subscription *sub_temp; |
237 | 235 | ||
238 | /* Invalidate subscriber reference */ | 236 | /* Invalidate subscriber reference */ |
239 | 237 | ||
240 | tipc_ref_discard(subscriber->ref); | 238 | port_ref = subscriber->port_ref; |
239 | subscriber->port_ref = 0; | ||
241 | spin_unlock_bh(subscriber->lock); | 240 | spin_unlock_bh(subscriber->lock); |
242 | 241 | ||
242 | /* Sever connection to subscriber */ | ||
243 | |||
244 | tipc_shutdown(port_ref); | ||
245 | tipc_deleteport(port_ref); | ||
246 | |||
243 | /* Destroy any existing subscriptions for subscriber */ | 247 | /* Destroy any existing subscriptions for subscriber */ |
244 | 248 | ||
245 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 249 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
@@ -253,27 +257,25 @@ static void subscr_terminate(struct subscriber *subscriber) | |||
253 | subscr_del(sub); | 257 | subscr_del(sub); |
254 | } | 258 | } |
255 | 259 | ||
256 | /* Sever connection to subscriber */ | ||
257 | |||
258 | tipc_shutdown(subscriber->port_ref); | ||
259 | tipc_deleteport(subscriber->port_ref); | ||
260 | |||
261 | /* Remove subscriber from topology server's subscriber list */ | 260 | /* Remove subscriber from topology server's subscriber list */ |
262 | 261 | ||
263 | spin_lock_bh(&topsrv.lock); | 262 | spin_lock_bh(&topsrv.lock); |
264 | list_del(&subscriber->subscriber_list); | 263 | list_del(&subscriber->subscriber_list); |
265 | spin_unlock_bh(&topsrv.lock); | 264 | spin_unlock_bh(&topsrv.lock); |
266 | 265 | ||
267 | /* Now destroy subscriber */ | 266 | /* Reclaim subscriber lock */ |
268 | 267 | ||
269 | spin_lock_bh(subscriber->lock); | 268 | spin_lock_bh(subscriber->lock); |
269 | |||
270 | /* Now destroy subscriber */ | ||
271 | |||
270 | kfree(subscriber); | 272 | kfree(subscriber); |
271 | } | 273 | } |
272 | 274 | ||
273 | /** | 275 | /** |
274 | * subscr_cancel - handle subscription cancellation request | 276 | * subscr_cancel - handle subscription cancellation request |
275 | * | 277 | * |
276 | * Called with subscriber locked. Routine must temporarily release this lock | 278 | * Called with subscriber port locked. Routine must temporarily release lock |
277 | * to enable the subscription timeout routine to finish without deadlocking; | 279 | * to enable the subscription timeout routine to finish without deadlocking; |
278 | * the lock is then reclaimed to allow caller to release it upon return. | 280 | * the lock is then reclaimed to allow caller to release it upon return. |
279 | * | 281 | * |
@@ -316,27 +318,25 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
316 | /** | 318 | /** |
317 | * subscr_subscribe - create subscription for subscriber | 319 | * subscr_subscribe - create subscription for subscriber |
318 | * | 320 | * |
319 | * Called with subscriber locked | 321 | * Called with subscriber port locked. |
320 | */ | 322 | */ |
321 | 323 | ||
322 | static void subscr_subscribe(struct tipc_subscr *s, | 324 | static struct subscription *subscr_subscribe(struct tipc_subscr *s, |
323 | struct subscriber *subscriber) | 325 | struct subscriber *subscriber) |
324 | { | 326 | { |
325 | struct subscription *sub; | 327 | struct subscription *sub; |
328 | int swap; | ||
326 | 329 | ||
327 | /* Determine/update subscriber's endianness */ | 330 | /* Determine subscriber's endianness */ |
328 | 331 | ||
329 | if (s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)) | 332 | swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); |
330 | subscriber->swap = 0; | ||
331 | else | ||
332 | subscriber->swap = 1; | ||
333 | 333 | ||
334 | /* Detect & process a subscription cancellation request */ | 334 | /* Detect & process a subscription cancellation request */ |
335 | 335 | ||
336 | if (s->filter & htohl(TIPC_SUB_CANCEL, subscriber->swap)) { | 336 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { |
337 | s->filter &= ~htohl(TIPC_SUB_CANCEL, subscriber->swap); | 337 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); |
338 | subscr_cancel(s, subscriber); | 338 | subscr_cancel(s, subscriber); |
339 | return; | 339 | return NULL; |
340 | } | 340 | } |
341 | 341 | ||
342 | /* Refuse subscription if global limit exceeded */ | 342 | /* Refuse subscription if global limit exceeded */ |
@@ -345,63 +345,66 @@ static void subscr_subscribe(struct tipc_subscr *s, | |||
345 | warn("Subscription rejected, subscription limit reached (%u)\n", | 345 | warn("Subscription rejected, subscription limit reached (%u)\n", |
346 | tipc_max_subscriptions); | 346 | tipc_max_subscriptions); |
347 | subscr_terminate(subscriber); | 347 | subscr_terminate(subscriber); |
348 | return; | 348 | return NULL; |
349 | } | 349 | } |
350 | 350 | ||
351 | /* Allocate subscription object */ | 351 | /* Allocate subscription object */ |
352 | 352 | ||
353 | sub = kzalloc(sizeof(*sub), GFP_ATOMIC); | 353 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); |
354 | if (!sub) { | 354 | if (!sub) { |
355 | warn("Subscription rejected, no memory\n"); | 355 | warn("Subscription rejected, no memory\n"); |
356 | subscr_terminate(subscriber); | 356 | subscr_terminate(subscriber); |
357 | return; | 357 | return NULL; |
358 | } | 358 | } |
359 | 359 | ||
360 | /* Initialize subscription object */ | 360 | /* Initialize subscription object */ |
361 | 361 | ||
362 | sub->seq.type = htohl(s->seq.type, subscriber->swap); | 362 | sub->seq.type = htohl(s->seq.type, swap); |
363 | sub->seq.lower = htohl(s->seq.lower, subscriber->swap); | 363 | sub->seq.lower = htohl(s->seq.lower, swap); |
364 | sub->seq.upper = htohl(s->seq.upper, subscriber->swap); | 364 | sub->seq.upper = htohl(s->seq.upper, swap); |
365 | sub->timeout = htohl(s->timeout, subscriber->swap); | 365 | sub->timeout = htohl(s->timeout, swap); |
366 | sub->filter = htohl(s->filter, subscriber->swap); | 366 | sub->filter = htohl(s->filter, swap); |
367 | if ((!(sub->filter & TIPC_SUB_PORTS) | 367 | if ((!(sub->filter & TIPC_SUB_PORTS) |
368 | == !(sub->filter & TIPC_SUB_SERVICE)) | 368 | == !(sub->filter & TIPC_SUB_SERVICE)) |
369 | || (sub->seq.lower > sub->seq.upper)) { | 369 | || (sub->seq.lower > sub->seq.upper)) { |
370 | warn("Subscription rejected, illegal request\n"); | 370 | warn("Subscription rejected, illegal request\n"); |
371 | kfree(sub); | 371 | kfree(sub); |
372 | subscr_terminate(subscriber); | 372 | subscr_terminate(subscriber); |
373 | return; | 373 | return NULL; |
374 | } | 374 | } |
375 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); | 375 | sub->event_cb = subscr_send_event; |
376 | INIT_LIST_HEAD(&sub->subscription_list); | ||
377 | INIT_LIST_HEAD(&sub->nameseq_list); | 376 | INIT_LIST_HEAD(&sub->nameseq_list); |
378 | list_add(&sub->subscription_list, &subscriber->subscription_list); | 377 | list_add(&sub->subscription_list, &subscriber->subscription_list); |
378 | sub->server_ref = subscriber->port_ref; | ||
379 | sub->swap = swap; | ||
380 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); | ||
379 | atomic_inc(&topsrv.subscription_count); | 381 | atomic_inc(&topsrv.subscription_count); |
380 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 382 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
381 | k_init_timer(&sub->timer, | 383 | k_init_timer(&sub->timer, |
382 | (Handler)subscr_timeout, (unsigned long)sub); | 384 | (Handler)subscr_timeout, (unsigned long)sub); |
383 | k_start_timer(&sub->timer, sub->timeout); | 385 | k_start_timer(&sub->timer, sub->timeout); |
384 | } | 386 | } |
385 | sub->owner = subscriber; | 387 | |
386 | tipc_nametbl_subscribe(sub); | 388 | return sub; |
387 | } | 389 | } |
388 | 390 | ||
389 | /** | 391 | /** |
390 | * subscr_conn_shutdown_event - handle termination request from subscriber | 392 | * subscr_conn_shutdown_event - handle termination request from subscriber |
393 | * | ||
394 | * Called with subscriber's server port unlocked. | ||
391 | */ | 395 | */ |
392 | 396 | ||
393 | static void subscr_conn_shutdown_event(void *usr_handle, | 397 | static void subscr_conn_shutdown_event(void *usr_handle, |
394 | u32 portref, | 398 | u32 port_ref, |
395 | struct sk_buff **buf, | 399 | struct sk_buff **buf, |
396 | unsigned char const *data, | 400 | unsigned char const *data, |
397 | unsigned int size, | 401 | unsigned int size, |
398 | int reason) | 402 | int reason) |
399 | { | 403 | { |
400 | struct subscriber *subscriber; | 404 | struct subscriber *subscriber = usr_handle; |
401 | spinlock_t *subscriber_lock; | 405 | spinlock_t *subscriber_lock; |
402 | 406 | ||
403 | subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); | 407 | if (tipc_port_lock(port_ref) == NULL) |
404 | if (subscriber == NULL) | ||
405 | return; | 408 | return; |
406 | 409 | ||
407 | subscriber_lock = subscriber->lock; | 410 | subscriber_lock = subscriber->lock; |
@@ -411,6 +414,8 @@ static void subscr_conn_shutdown_event(void *usr_handle, | |||
411 | 414 | ||
412 | /** | 415 | /** |
413 | * subscr_conn_msg_event - handle new subscription request from subscriber | 416 | * subscr_conn_msg_event - handle new subscription request from subscriber |
417 | * | ||
418 | * Called with subscriber's server port unlocked. | ||
414 | */ | 419 | */ |
415 | 420 | ||
416 | static void subscr_conn_msg_event(void *usr_handle, | 421 | static void subscr_conn_msg_event(void *usr_handle, |
@@ -419,20 +424,46 @@ static void subscr_conn_msg_event(void *usr_handle, | |||
419 | const unchar *data, | 424 | const unchar *data, |
420 | u32 size) | 425 | u32 size) |
421 | { | 426 | { |
422 | struct subscriber *subscriber; | 427 | struct subscriber *subscriber = usr_handle; |
423 | spinlock_t *subscriber_lock; | 428 | spinlock_t *subscriber_lock; |
429 | struct subscription *sub; | ||
430 | |||
431 | /* | ||
432 | * Lock subscriber's server port (& make a local copy of lock pointer, | ||
433 | * in case subscriber is deleted while processing subscription request) | ||
434 | */ | ||
424 | 435 | ||
425 | subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); | 436 | if (tipc_port_lock(port_ref) == NULL) |
426 | if (subscriber == NULL) | ||
427 | return; | 437 | return; |
428 | 438 | ||
429 | subscriber_lock = subscriber->lock; | 439 | subscriber_lock = subscriber->lock; |
430 | if (size != sizeof(struct tipc_subscr)) | ||
431 | subscr_terminate(subscriber); | ||
432 | else | ||
433 | subscr_subscribe((struct tipc_subscr *)data, subscriber); | ||
434 | 440 | ||
435 | spin_unlock_bh(subscriber_lock); | 441 | if (size != sizeof(struct tipc_subscr)) { |
442 | subscr_terminate(subscriber); | ||
443 | spin_unlock_bh(subscriber_lock); | ||
444 | } else { | ||
445 | sub = subscr_subscribe((struct tipc_subscr *)data, subscriber); | ||
446 | spin_unlock_bh(subscriber_lock); | ||
447 | if (sub != NULL) { | ||
448 | |||
449 | /* | ||
450 | * We must release the server port lock before adding a | ||
451 | * subscription to the name table since TIPC needs to be | ||
452 | * able to (re)acquire the port lock if an event message | ||
453 | * issued by the subscription process is rejected and | ||
454 | * returned. The subscription cannot be deleted while | ||
455 | * it is being added to the name table because: | ||
456 | * a) the single-threading of the native API port code | ||
457 | * ensures the subscription cannot be cancelled and | ||
458 | * the subscriber connection cannot be broken, and | ||
459 | * b) the name table lock ensures the subscription | ||
460 | * timeout code cannot delete the subscription, | ||
461 | * so the subscription object is still protected. | ||
462 | */ | ||
463 | |||
464 | tipc_nametbl_subscribe(sub); | ||
465 | } | ||
466 | } | ||
436 | } | 467 | } |
437 | 468 | ||
438 | /** | 469 | /** |
@@ -448,16 +479,10 @@ static void subscr_named_msg_event(void *usr_handle, | |||
448 | struct tipc_portid const *orig, | 479 | struct tipc_portid const *orig, |
449 | struct tipc_name_seq const *dest) | 480 | struct tipc_name_seq const *dest) |
450 | { | 481 | { |
451 | struct subscriber *subscriber; | 482 | static struct iovec msg_sect = {NULL, 0}; |
452 | struct iovec msg_sect = {NULL, 0}; | ||
453 | spinlock_t *subscriber_lock; | ||
454 | 483 | ||
455 | dbg("subscr_named_msg_event: orig = %x own = %x,\n", | 484 | struct subscriber *subscriber; |
456 | orig->node, tipc_own_addr); | 485 | u32 server_port_ref; |
457 | if (size && (size != sizeof(struct tipc_subscr))) { | ||
458 | warn("Subscriber rejected, invalid subscription size\n"); | ||
459 | return; | ||
460 | } | ||
461 | 486 | ||
462 | /* Create subscriber object */ | 487 | /* Create subscriber object */ |
463 | 488 | ||
@@ -468,17 +493,11 @@ static void subscr_named_msg_event(void *usr_handle, | |||
468 | } | 493 | } |
469 | INIT_LIST_HEAD(&subscriber->subscription_list); | 494 | INIT_LIST_HEAD(&subscriber->subscription_list); |
470 | INIT_LIST_HEAD(&subscriber->subscriber_list); | 495 | INIT_LIST_HEAD(&subscriber->subscriber_list); |
471 | subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); | ||
472 | if (subscriber->ref == 0) { | ||
473 | warn("Subscriber rejected, reference table exhausted\n"); | ||
474 | kfree(subscriber); | ||
475 | return; | ||
476 | } | ||
477 | 496 | ||
478 | /* Establish a connection to subscriber */ | 497 | /* Create server port & establish connection to subscriber */ |
479 | 498 | ||
480 | tipc_createport(topsrv.user_ref, | 499 | tipc_createport(topsrv.user_ref, |
481 | (void *)(unsigned long)subscriber->ref, | 500 | subscriber, |
482 | importance, | 501 | importance, |
483 | NULL, | 502 | NULL, |
484 | NULL, | 503 | NULL, |
@@ -490,32 +509,36 @@ static void subscr_named_msg_event(void *usr_handle, | |||
490 | &subscriber->port_ref); | 509 | &subscriber->port_ref); |
491 | if (subscriber->port_ref == 0) { | 510 | if (subscriber->port_ref == 0) { |
492 | warn("Subscriber rejected, unable to create port\n"); | 511 | warn("Subscriber rejected, unable to create port\n"); |
493 | tipc_ref_discard(subscriber->ref); | ||
494 | kfree(subscriber); | 512 | kfree(subscriber); |
495 | return; | 513 | return; |
496 | } | 514 | } |
497 | tipc_connect2port(subscriber->port_ref, orig); | 515 | tipc_connect2port(subscriber->port_ref, orig); |
498 | 516 | ||
517 | /* Lock server port (& save lock address for future use) */ | ||
518 | |||
519 | subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock; | ||
499 | 520 | ||
500 | /* Add subscriber to topology server's subscriber list */ | 521 | /* Add subscriber to topology server's subscriber list */ |
501 | 522 | ||
502 | tipc_ref_lock(subscriber->ref); | ||
503 | spin_lock_bh(&topsrv.lock); | 523 | spin_lock_bh(&topsrv.lock); |
504 | list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); | 524 | list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); |
505 | spin_unlock_bh(&topsrv.lock); | 525 | spin_unlock_bh(&topsrv.lock); |
506 | 526 | ||
507 | /* | 527 | /* Unlock server port */ |
508 | * Subscribe now if message contains a subscription, | ||
509 | * otherwise send an empty response to complete connection handshaking | ||
510 | */ | ||
511 | 528 | ||
512 | subscriber_lock = subscriber->lock; | 529 | server_port_ref = subscriber->port_ref; |
513 | if (size) | 530 | spin_unlock_bh(subscriber->lock); |
514 | subscr_subscribe((struct tipc_subscr *)data, subscriber); | ||
515 | else | ||
516 | tipc_send(subscriber->port_ref, 1, &msg_sect); | ||
517 | 531 | ||
518 | spin_unlock_bh(subscriber_lock); | 532 | /* Send an ACK- to complete connection handshaking */ |
533 | |||
534 | tipc_send(server_port_ref, 1, &msg_sect); | ||
535 | |||
536 | /* Handle optional subscription request */ | ||
537 | |||
538 | if (size != 0) { | ||
539 | subscr_conn_msg_event(subscriber, server_port_ref, | ||
540 | buf, data, size); | ||
541 | } | ||
519 | } | 542 | } |
520 | 543 | ||
521 | int tipc_subscr_start(void) | 544 | int tipc_subscr_start(void) |
@@ -574,8 +597,8 @@ void tipc_subscr_stop(void) | |||
574 | list_for_each_entry_safe(subscriber, subscriber_temp, | 597 | list_for_each_entry_safe(subscriber, subscriber_temp, |
575 | &topsrv.subscriber_list, | 598 | &topsrv.subscriber_list, |
576 | subscriber_list) { | 599 | subscriber_list) { |
577 | tipc_ref_lock(subscriber->ref); | ||
578 | subscriber_lock = subscriber->lock; | 600 | subscriber_lock = subscriber->lock; |
601 | spin_lock_bh(subscriber_lock); | ||
579 | subscr_terminate(subscriber); | 602 | subscr_terminate(subscriber); |
580 | spin_unlock_bh(subscriber_lock); | 603 | spin_unlock_bh(subscriber_lock); |
581 | } | 604 | } |
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index 93a8e674fac1..45d89bf4d202 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/subscr.h: Include file for TIPC subscription service | 2 | * net/tipc/subscr.h: Include file for TIPC network topology service |
3 | * | 3 | * |
4 | * Copyright (c) 2003-2006, Ericsson AB | 4 | * Copyright (c) 2003-2006, Ericsson AB |
5 | * Copyright (c) 2005, Wind River Systems | 5 | * Copyright (c) 2005-2007, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
@@ -37,34 +37,44 @@ | |||
37 | #ifndef _TIPC_SUBSCR_H | 37 | #ifndef _TIPC_SUBSCR_H |
38 | #define _TIPC_SUBSCR_H | 38 | #define _TIPC_SUBSCR_H |
39 | 39 | ||
40 | struct subscription; | ||
41 | |||
42 | typedef void (*tipc_subscr_event) (struct subscription *sub, | ||
43 | u32 found_lower, u32 found_upper, | ||
44 | u32 event, u32 port_ref, u32 node); | ||
45 | |||
40 | /** | 46 | /** |
41 | * struct subscription - TIPC network topology subscription object | 47 | * struct subscription - TIPC network topology subscription object |
42 | * @seq: name sequence associated with subscription | 48 | * @seq: name sequence associated with subscription |
43 | * @timeout: duration of subscription (in ms) | 49 | * @timeout: duration of subscription (in ms) |
44 | * @filter: event filtering to be done for subscription | 50 | * @filter: event filtering to be done for subscription |
45 | * @evt: template for events generated by subscription | 51 | * @event_cb: routine invoked when a subscription event is detected |
46 | * @subscription_list: adjacent subscriptions in subscriber's subscription list | 52 | * @timer: timer governing subscription duration (optional) |
47 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list | 53 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list |
48 | * @timer_ref: reference to timer governing subscription duration (may be NULL) | 54 | * @subscription_list: adjacent subscriptions in subscriber's subscription list |
49 | * @owner: pointer to subscriber object associated with this subscription | 55 | * @server_ref: object reference of server port associated with subscription |
56 | * @swap: indicates if subscriber uses opposite endianness in its messages | ||
57 | * @evt: template for events generated by subscription | ||
50 | */ | 58 | */ |
51 | 59 | ||
52 | struct subscription { | 60 | struct subscription { |
53 | struct tipc_name_seq seq; | 61 | struct tipc_name_seq seq; |
54 | u32 timeout; | 62 | u32 timeout; |
55 | u32 filter; | 63 | u32 filter; |
56 | struct tipc_event evt; | 64 | tipc_subscr_event event_cb; |
57 | struct list_head subscription_list; | ||
58 | struct list_head nameseq_list; | ||
59 | struct timer_list timer; | 65 | struct timer_list timer; |
60 | struct subscriber *owner; | 66 | struct list_head nameseq_list; |
67 | struct list_head subscription_list; | ||
68 | u32 server_ref; | ||
69 | int swap; | ||
70 | struct tipc_event evt; | ||
61 | }; | 71 | }; |
62 | 72 | ||
63 | int tipc_subscr_overlap(struct subscription * sub, | 73 | int tipc_subscr_overlap(struct subscription *sub, |
64 | u32 found_lower, | 74 | u32 found_lower, |
65 | u32 found_upper); | 75 | u32 found_upper); |
66 | 76 | ||
67 | void tipc_subscr_report_overlap(struct subscription * sub, | 77 | void tipc_subscr_report_overlap(struct subscription *sub, |
68 | u32 found_lower, | 78 | u32 found_lower, |
69 | u32 found_upper, | 79 | u32 found_upper, |
70 | u32 event, | 80 | u32 event, |
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c index 4146c40cd20b..506928803162 100644 --- a/net/tipc/user_reg.c +++ b/net/tipc/user_reg.c | |||
@@ -91,7 +91,7 @@ static int reg_init(void) | |||
91 | } | 91 | } |
92 | } | 92 | } |
93 | spin_unlock_bh(®_lock); | 93 | spin_unlock_bh(®_lock); |
94 | return users ? TIPC_OK : -ENOMEM; | 94 | return users ? 0 : -ENOMEM; |
95 | } | 95 | } |
96 | 96 | ||
97 | /** | 97 | /** |
@@ -129,7 +129,7 @@ int tipc_reg_start(void) | |||
129 | tipc_k_signal((Handler)reg_callback, | 129 | tipc_k_signal((Handler)reg_callback, |
130 | (unsigned long)&users[u]); | 130 | (unsigned long)&users[u]); |
131 | } | 131 | } |
132 | return TIPC_OK; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | /** | 135 | /** |
@@ -184,7 +184,7 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle) | |||
184 | 184 | ||
185 | if (cb && (tipc_mode != TIPC_NOT_RUNNING)) | 185 | if (cb && (tipc_mode != TIPC_NOT_RUNNING)) |
186 | tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr); | 186 | tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr); |
187 | return TIPC_OK; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | /** | 190 | /** |
@@ -230,7 +230,7 @@ int tipc_reg_add_port(struct user_port *up_ptr) | |||
230 | struct tipc_user *user_ptr; | 230 | struct tipc_user *user_ptr; |
231 | 231 | ||
232 | if (up_ptr->user_ref == 0) | 232 | if (up_ptr->user_ref == 0) |
233 | return TIPC_OK; | 233 | return 0; |
234 | if (up_ptr->user_ref > MAX_USERID) | 234 | if (up_ptr->user_ref > MAX_USERID) |
235 | return -EINVAL; | 235 | return -EINVAL; |
236 | if ((tipc_mode == TIPC_NOT_RUNNING) || !users ) | 236 | if ((tipc_mode == TIPC_NOT_RUNNING) || !users ) |
@@ -240,7 +240,7 @@ int tipc_reg_add_port(struct user_port *up_ptr) | |||
240 | user_ptr = &users[up_ptr->user_ref]; | 240 | user_ptr = &users[up_ptr->user_ref]; |
241 | list_add(&up_ptr->uport_list, &user_ptr->ports); | 241 | list_add(&up_ptr->uport_list, &user_ptr->ports); |
242 | spin_unlock_bh(®_lock); | 242 | spin_unlock_bh(®_lock); |
243 | return TIPC_OK; | 243 | return 0; |
244 | } | 244 | } |
245 | 245 | ||
246 | /** | 246 | /** |
@@ -250,7 +250,7 @@ int tipc_reg_add_port(struct user_port *up_ptr) | |||
250 | int tipc_reg_remove_port(struct user_port *up_ptr) | 250 | int tipc_reg_remove_port(struct user_port *up_ptr) |
251 | { | 251 | { |
252 | if (up_ptr->user_ref == 0) | 252 | if (up_ptr->user_ref == 0) |
253 | return TIPC_OK; | 253 | return 0; |
254 | if (up_ptr->user_ref > MAX_USERID) | 254 | if (up_ptr->user_ref > MAX_USERID) |
255 | return -EINVAL; | 255 | return -EINVAL; |
256 | if (!users ) | 256 | if (!users ) |
@@ -259,6 +259,6 @@ int tipc_reg_remove_port(struct user_port *up_ptr) | |||
259 | spin_lock_bh(®_lock); | 259 | spin_lock_bh(®_lock); |
260 | list_del_init(&up_ptr->uport_list); | 260 | list_del_init(&up_ptr->uport_list); |
261 | spin_unlock_bh(®_lock); | 261 | spin_unlock_bh(®_lock); |
262 | return TIPC_OK; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 783317dacd30..70ceb1604ad8 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $ | ||
12 | * | ||
13 | * Fixes: | 11 | * Fixes: |
14 | * Linus Torvalds : Assorted bug cures. | 12 | * Linus Torvalds : Assorted bug cures. |
15 | * Niibe Yutaka : async I/O support. | 13 | * Niibe Yutaka : async I/O support. |
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig index 1debe1cb054e..61ceae0b9566 100644 --- a/net/wanrouter/Kconfig +++ b/net/wanrouter/Kconfig | |||
@@ -20,8 +20,6 @@ config WAN_ROUTER | |||
20 | wish to use your Linux box as a WAN router, say Y here and also to | 20 | wish to use your Linux box as a WAN router, say Y here and also to |
21 | the WAN driver for your card, below. You will then need the | 21 | the WAN driver for your card, below. You will then need the |
22 | wan-tools package which is available from <ftp://ftp.sangoma.com/>. | 22 | wan-tools package which is available from <ftp://ftp.sangoma.com/>. |
23 | Read <file:Documentation/networking/wan-router.txt> for more | ||
24 | information. | ||
25 | 23 | ||
26 | To compile WAN routing support as a module, choose M here: the | 24 | To compile WAN routing support as a module, choose M here: the |
27 | module will be called wanrouter. | 25 | module will be called wanrouter. |
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 9ab31a3ce3ad..b210a88d0960 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c | |||
@@ -350,9 +350,9 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
350 | * o execute requested action or pass command to the device driver | 350 | * o execute requested action or pass command to the device driver |
351 | */ | 351 | */ |
352 | 352 | ||
353 | int wanrouter_ioctl(struct inode *inode, struct file *file, | 353 | long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
354 | unsigned int cmd, unsigned long arg) | ||
355 | { | 354 | { |
355 | struct inode *inode = file->f_path.dentry->d_inode; | ||
356 | int err = 0; | 356 | int err = 0; |
357 | struct proc_dir_entry *dent; | 357 | struct proc_dir_entry *dent; |
358 | struct wan_device *wandev; | 358 | struct wan_device *wandev; |
@@ -372,6 +372,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file, | |||
372 | if (wandev->magic != ROUTER_MAGIC) | 372 | if (wandev->magic != ROUTER_MAGIC) |
373 | return -EINVAL; | 373 | return -EINVAL; |
374 | 374 | ||
375 | lock_kernel(); | ||
375 | switch (cmd) { | 376 | switch (cmd) { |
376 | case ROUTER_SETUP: | 377 | case ROUTER_SETUP: |
377 | err = wanrouter_device_setup(wandev, data); | 378 | err = wanrouter_device_setup(wandev, data); |
@@ -403,6 +404,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file, | |||
403 | err = wandev->ioctl(wandev, cmd, arg); | 404 | err = wandev->ioctl(wandev, cmd, arg); |
404 | else err = -EINVAL; | 405 | else err = -EINVAL; |
405 | } | 406 | } |
407 | unlock_kernel(); | ||
406 | return err; | 408 | return err; |
407 | } | 409 | } |
408 | 410 | ||
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index 5bebe40bf4e6..267f7ff49827 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c | |||
@@ -278,7 +278,7 @@ static const struct file_operations wandev_fops = { | |||
278 | .read = seq_read, | 278 | .read = seq_read, |
279 | .llseek = seq_lseek, | 279 | .llseek = seq_lseek, |
280 | .release = single_release, | 280 | .release = single_release, |
281 | .ioctl = wanrouter_ioctl, | 281 | .unlocked_ioctl = wanrouter_ioctl, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | /* | 284 | /* |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 79270903bda6..ab015c62d561 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -29,3 +29,14 @@ config WIRELESS_EXT | |||
29 | 29 | ||
30 | Say N (if you can) unless you know you need wireless | 30 | Say N (if you can) unless you know you need wireless |
31 | extensions for external modules. | 31 | extensions for external modules. |
32 | |||
33 | config WIRELESS_EXT_SYSFS | ||
34 | bool "Wireless extensions sysfs files" | ||
35 | default y | ||
36 | depends on WIRELESS_EXT && SYSFS | ||
37 | help | ||
38 | This option enables the deprecated wireless statistics | ||
39 | files in /sys/class/net/*/wireless/. The same information | ||
40 | is available via the ioctls as well. | ||
41 | |||
42 | Say Y if you have programs using it (we don't know of any). | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 80afacdae46c..f1da0b93bc56 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -143,8 +143,11 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv) | |||
143 | int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | 143 | int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, |
144 | char *newname) | 144 | char *newname) |
145 | { | 145 | { |
146 | struct cfg80211_registered_device *drv; | ||
146 | int idx, taken = -1, result, digits; | 147 | int idx, taken = -1, result, digits; |
147 | 148 | ||
149 | mutex_lock(&cfg80211_drv_mutex); | ||
150 | |||
148 | /* prohibit calling the thing phy%d when %d is not its number */ | 151 | /* prohibit calling the thing phy%d when %d is not its number */ |
149 | sscanf(newname, PHY_NAME "%d%n", &idx, &taken); | 152 | sscanf(newname, PHY_NAME "%d%n", &idx, &taken); |
150 | if (taken == strlen(newname) && idx != rdev->idx) { | 153 | if (taken == strlen(newname) && idx != rdev->idx) { |
@@ -156,14 +159,30 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | |||
156 | * deny the name if it is phy<idx> where <idx> is printed | 159 | * deny the name if it is phy<idx> where <idx> is printed |
157 | * without leading zeroes. taken == strlen(newname) here | 160 | * without leading zeroes. taken == strlen(newname) here |
158 | */ | 161 | */ |
162 | result = -EINVAL; | ||
159 | if (taken == strlen(PHY_NAME) + digits) | 163 | if (taken == strlen(PHY_NAME) + digits) |
160 | return -EINVAL; | 164 | goto out_unlock; |
165 | } | ||
166 | |||
167 | |||
168 | /* Ignore nop renames */ | ||
169 | result = 0; | ||
170 | if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) | ||
171 | goto out_unlock; | ||
172 | |||
173 | /* Ensure another device does not already have this name. */ | ||
174 | list_for_each_entry(drv, &cfg80211_drv_list, list) { | ||
175 | result = -EINVAL; | ||
176 | if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0) | ||
177 | goto out_unlock; | ||
161 | } | 178 | } |
162 | 179 | ||
163 | /* this will check for collisions */ | 180 | /* this will only check for collisions in sysfs |
181 | * which is not even always compiled in. | ||
182 | */ | ||
164 | result = device_rename(&rdev->wiphy.dev, newname); | 183 | result = device_rename(&rdev->wiphy.dev, newname); |
165 | if (result) | 184 | if (result) |
166 | return result; | 185 | goto out_unlock; |
167 | 186 | ||
168 | if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, | 187 | if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, |
169 | rdev->wiphy.debugfsdir, | 188 | rdev->wiphy.debugfsdir, |
@@ -172,9 +191,13 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | |||
172 | printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", | 191 | printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", |
173 | newname); | 192 | newname); |
174 | 193 | ||
175 | nl80211_notify_dev_rename(rdev); | 194 | result = 0; |
195 | out_unlock: | ||
196 | mutex_unlock(&cfg80211_drv_mutex); | ||
197 | if (result == 0) | ||
198 | nl80211_notify_dev_rename(rdev); | ||
176 | 199 | ||
177 | return 0; | 200 | return result; |
178 | } | 201 | } |
179 | 202 | ||
180 | /* exported functions */ | 203 | /* exported functions */ |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fb75f265b39c..b7fefffd2d0d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -199,12 +199,14 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
199 | 199 | ||
200 | mutex_lock(&cfg80211_drv_mutex); | 200 | mutex_lock(&cfg80211_drv_mutex); |
201 | list_for_each_entry(dev, &cfg80211_drv_list, list) { | 201 | list_for_each_entry(dev, &cfg80211_drv_list, list) { |
202 | if (++idx < start) | 202 | if (++idx <= start) |
203 | continue; | 203 | continue; |
204 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, | 204 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, |
205 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 205 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
206 | dev) < 0) | 206 | dev) < 0) { |
207 | idx--; | ||
207 | break; | 208 | break; |
209 | } | ||
208 | } | 210 | } |
209 | mutex_unlock(&cfg80211_drv_mutex); | 211 | mutex_unlock(&cfg80211_drv_mutex); |
210 | 212 | ||
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index 28fbd0b0b568..f591871a7b4f 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c | |||
@@ -59,23 +59,21 @@ int ieee80211_radiotap_iterator_init( | |||
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | /* sanity check for allowed length and radiotap length field */ | 61 | /* sanity check for allowed length and radiotap length field */ |
62 | if (max_length < le16_to_cpu(get_unaligned(&radiotap_header->it_len))) | 62 | if (max_length < get_unaligned_le16(&radiotap_header->it_len)) |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | 64 | ||
65 | iterator->rtheader = radiotap_header; | 65 | iterator->rtheader = radiotap_header; |
66 | iterator->max_length = le16_to_cpu(get_unaligned( | 66 | iterator->max_length = get_unaligned_le16(&radiotap_header->it_len); |
67 | &radiotap_header->it_len)); | ||
68 | iterator->arg_index = 0; | 67 | iterator->arg_index = 0; |
69 | iterator->bitmap_shifter = le32_to_cpu(get_unaligned( | 68 | iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); |
70 | &radiotap_header->it_present)); | ||
71 | iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); | 69 | iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); |
72 | iterator->this_arg = NULL; | 70 | iterator->this_arg = NULL; |
73 | 71 | ||
74 | /* find payload start allowing for extended bitmap(s) */ | 72 | /* find payload start allowing for extended bitmap(s) */ |
75 | 73 | ||
76 | if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { | 74 | if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { |
77 | while (le32_to_cpu(get_unaligned((__le32 *)iterator->arg)) & | 75 | while (get_unaligned_le32(iterator->arg) & |
78 | (1<<IEEE80211_RADIOTAP_EXT)) { | 76 | (1 << IEEE80211_RADIOTAP_EXT)) { |
79 | iterator->arg += sizeof(u32); | 77 | iterator->arg += sizeof(u32); |
80 | 78 | ||
81 | /* | 79 | /* |
@@ -241,8 +239,8 @@ int ieee80211_radiotap_iterator_next( | |||
241 | if (iterator->bitmap_shifter & 1) { | 239 | if (iterator->bitmap_shifter & 1) { |
242 | /* b31 was set, there is more */ | 240 | /* b31 was set, there is more */ |
243 | /* move to next u32 bitmap */ | 241 | /* move to next u32 bitmap */ |
244 | iterator->bitmap_shifter = le32_to_cpu( | 242 | iterator->bitmap_shifter = |
245 | get_unaligned(iterator->next_bitmap)); | 243 | get_unaligned_le32(iterator->next_bitmap); |
246 | iterator->next_bitmap++; | 244 | iterator->next_bitmap++; |
247 | } else | 245 | } else |
248 | /* no more bitmaps: end */ | 246 | /* no more bitmaps: end */ |
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index 947188a5b937..df5b3886c36b 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -500,7 +500,7 @@ static int call_commit_handler(struct net_device *dev) | |||
500 | /* | 500 | /* |
501 | * Calculate size of private arguments | 501 | * Calculate size of private arguments |
502 | */ | 502 | */ |
503 | static inline int get_priv_size(__u16 args) | 503 | static int get_priv_size(__u16 args) |
504 | { | 504 | { |
505 | int num = args & IW_PRIV_SIZE_MASK; | 505 | int num = args & IW_PRIV_SIZE_MASK; |
506 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | 506 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; |
@@ -512,10 +512,9 @@ static inline int get_priv_size(__u16 args) | |||
512 | /* | 512 | /* |
513 | * Re-calculate the size of private arguments | 513 | * Re-calculate the size of private arguments |
514 | */ | 514 | */ |
515 | static inline int adjust_priv_size(__u16 args, | 515 | static int adjust_priv_size(__u16 args, struct iw_point *iwp) |
516 | union iwreq_data * wrqu) | ||
517 | { | 516 | { |
518 | int num = wrqu->data.length; | 517 | int num = iwp->length; |
519 | int max = args & IW_PRIV_SIZE_MASK; | 518 | int max = args & IW_PRIV_SIZE_MASK; |
520 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | 519 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; |
521 | 520 | ||
@@ -695,19 +694,150 @@ void wext_proc_exit(struct net *net) | |||
695 | */ | 694 | */ |
696 | 695 | ||
697 | /* ---------------------------------------------------------------- */ | 696 | /* ---------------------------------------------------------------- */ |
697 | static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, | ||
698 | const struct iw_ioctl_description *descr, | ||
699 | iw_handler handler, struct net_device *dev, | ||
700 | struct iw_request_info *info) | ||
701 | { | ||
702 | int err, extra_size, user_length = 0, essid_compat = 0; | ||
703 | char *extra; | ||
704 | |||
705 | /* Calculate space needed by arguments. Always allocate | ||
706 | * for max space. | ||
707 | */ | ||
708 | extra_size = descr->max_tokens * descr->token_size; | ||
709 | |||
710 | /* Check need for ESSID compatibility for WE < 21 */ | ||
711 | switch (cmd) { | ||
712 | case SIOCSIWESSID: | ||
713 | case SIOCGIWESSID: | ||
714 | case SIOCSIWNICKN: | ||
715 | case SIOCGIWNICKN: | ||
716 | if (iwp->length == descr->max_tokens + 1) | ||
717 | essid_compat = 1; | ||
718 | else if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
719 | char essid[IW_ESSID_MAX_SIZE + 1]; | ||
720 | |||
721 | err = copy_from_user(essid, iwp->pointer, | ||
722 | iwp->length * | ||
723 | descr->token_size); | ||
724 | if (err) | ||
725 | return -EFAULT; | ||
726 | |||
727 | if (essid[iwp->length - 1] == '\0') | ||
728 | essid_compat = 1; | ||
729 | } | ||
730 | break; | ||
731 | default: | ||
732 | break; | ||
733 | } | ||
734 | |||
735 | iwp->length -= essid_compat; | ||
736 | |||
737 | /* Check what user space is giving us */ | ||
738 | if (IW_IS_SET(cmd)) { | ||
739 | /* Check NULL pointer */ | ||
740 | if (!iwp->pointer && iwp->length != 0) | ||
741 | return -EFAULT; | ||
742 | /* Check if number of token fits within bounds */ | ||
743 | if (iwp->length > descr->max_tokens) | ||
744 | return -E2BIG; | ||
745 | if (iwp->length < descr->min_tokens) | ||
746 | return -EINVAL; | ||
747 | } else { | ||
748 | /* Check NULL pointer */ | ||
749 | if (!iwp->pointer) | ||
750 | return -EFAULT; | ||
751 | /* Save user space buffer size for checking */ | ||
752 | user_length = iwp->length; | ||
753 | |||
754 | /* Don't check if user_length > max to allow forward | ||
755 | * compatibility. The test user_length < min is | ||
756 | * implied by the test at the end. | ||
757 | */ | ||
758 | |||
759 | /* Support for very large requests */ | ||
760 | if ((descr->flags & IW_DESCR_FLAG_NOMAX) && | ||
761 | (user_length > descr->max_tokens)) { | ||
762 | /* Allow userspace to GET more than max so | ||
763 | * we can support any size GET requests. | ||
764 | * There is still a limit : -ENOMEM. | ||
765 | */ | ||
766 | extra_size = user_length * descr->token_size; | ||
767 | |||
768 | /* Note : user_length is originally a __u16, | ||
769 | * and token_size is controlled by us, | ||
770 | * so extra_size won't get negative and | ||
771 | * won't overflow... | ||
772 | */ | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /* kzalloc() ensures NULL-termination for essid_compat. */ | ||
777 | extra = kzalloc(extra_size, GFP_KERNEL); | ||
778 | if (!extra) | ||
779 | return -ENOMEM; | ||
780 | |||
781 | /* If it is a SET, get all the extra data in here */ | ||
782 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
783 | if (copy_from_user(extra, iwp->pointer, | ||
784 | iwp->length * | ||
785 | descr->token_size)) { | ||
786 | err = -EFAULT; | ||
787 | goto out; | ||
788 | } | ||
789 | } | ||
790 | |||
791 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | ||
792 | |||
793 | iwp->length += essid_compat; | ||
794 | |||
795 | /* If we have something to return to the user */ | ||
796 | if (!err && IW_IS_GET(cmd)) { | ||
797 | /* Check if there is enough buffer up there */ | ||
798 | if (user_length < iwp->length) { | ||
799 | err = -E2BIG; | ||
800 | goto out; | ||
801 | } | ||
802 | |||
803 | if (copy_to_user(iwp->pointer, extra, | ||
804 | iwp->length * | ||
805 | descr->token_size)) { | ||
806 | err = -EFAULT; | ||
807 | goto out; | ||
808 | } | ||
809 | } | ||
810 | |||
811 | /* Generate an event to notify listeners of the change */ | ||
812 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && err == -EIWCOMMIT) { | ||
813 | union iwreq_data *data = (union iwreq_data *) iwp; | ||
814 | |||
815 | if (descr->flags & IW_DESCR_FLAG_RESTRICT) | ||
816 | /* If the event is restricted, don't | ||
817 | * export the payload. | ||
818 | */ | ||
819 | wireless_send_event(dev, cmd, data, NULL); | ||
820 | else | ||
821 | wireless_send_event(dev, cmd, data, extra); | ||
822 | } | ||
823 | |||
824 | out: | ||
825 | kfree(extra); | ||
826 | return err; | ||
827 | } | ||
828 | |||
698 | /* | 829 | /* |
699 | * Wrapper to call a standard Wireless Extension handler. | 830 | * Wrapper to call a standard Wireless Extension handler. |
700 | * We do various checks and also take care of moving data between | 831 | * We do various checks and also take care of moving data between |
701 | * user space and kernel space. | 832 | * user space and kernel space. |
702 | */ | 833 | */ |
703 | static int ioctl_standard_call(struct net_device * dev, | 834 | static int ioctl_standard_call(struct net_device * dev, |
704 | struct ifreq * ifr, | 835 | struct iwreq *iwr, |
705 | unsigned int cmd, | 836 | unsigned int cmd, |
837 | struct iw_request_info *info, | ||
706 | iw_handler handler) | 838 | iw_handler handler) |
707 | { | 839 | { |
708 | struct iwreq * iwr = (struct iwreq *) ifr; | ||
709 | const struct iw_ioctl_description * descr; | 840 | const struct iw_ioctl_description * descr; |
710 | struct iw_request_info info; | ||
711 | int ret = -EINVAL; | 841 | int ret = -EINVAL; |
712 | 842 | ||
713 | /* Get the description of the IOCTL */ | 843 | /* Get the description of the IOCTL */ |
@@ -715,145 +845,19 @@ static int ioctl_standard_call(struct net_device * dev, | |||
715 | return -EOPNOTSUPP; | 845 | return -EOPNOTSUPP; |
716 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | 846 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); |
717 | 847 | ||
718 | /* Prepare the call */ | ||
719 | info.cmd = cmd; | ||
720 | info.flags = 0; | ||
721 | |||
722 | /* Check if we have a pointer to user space data or not */ | 848 | /* Check if we have a pointer to user space data or not */ |
723 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | 849 | if (descr->header_type != IW_HEADER_TYPE_POINT) { |
724 | 850 | ||
725 | /* No extra arguments. Trivial to handle */ | 851 | /* No extra arguments. Trivial to handle */ |
726 | ret = handler(dev, &info, &(iwr->u), NULL); | 852 | ret = handler(dev, info, &(iwr->u), NULL); |
727 | 853 | ||
728 | /* Generate an event to notify listeners of the change */ | 854 | /* Generate an event to notify listeners of the change */ |
729 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | 855 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && |
730 | ((ret == 0) || (ret == -EIWCOMMIT))) | 856 | ((ret == 0) || (ret == -EIWCOMMIT))) |
731 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | 857 | wireless_send_event(dev, cmd, &(iwr->u), NULL); |
732 | } else { | 858 | } else { |
733 | char * extra; | 859 | ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, |
734 | int extra_size; | 860 | handler, dev, info); |
735 | int user_length = 0; | ||
736 | int err; | ||
737 | int essid_compat = 0; | ||
738 | |||
739 | /* Calculate space needed by arguments. Always allocate | ||
740 | * for max space. Easier, and won't last long... */ | ||
741 | extra_size = descr->max_tokens * descr->token_size; | ||
742 | |||
743 | /* Check need for ESSID compatibility for WE < 21 */ | ||
744 | switch (cmd) { | ||
745 | case SIOCSIWESSID: | ||
746 | case SIOCGIWESSID: | ||
747 | case SIOCSIWNICKN: | ||
748 | case SIOCGIWNICKN: | ||
749 | if (iwr->u.data.length == descr->max_tokens + 1) | ||
750 | essid_compat = 1; | ||
751 | else if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) { | ||
752 | char essid[IW_ESSID_MAX_SIZE + 1]; | ||
753 | |||
754 | err = copy_from_user(essid, iwr->u.data.pointer, | ||
755 | iwr->u.data.length * | ||
756 | descr->token_size); | ||
757 | if (err) | ||
758 | return -EFAULT; | ||
759 | |||
760 | if (essid[iwr->u.data.length - 1] == '\0') | ||
761 | essid_compat = 1; | ||
762 | } | ||
763 | break; | ||
764 | default: | ||
765 | break; | ||
766 | } | ||
767 | |||
768 | iwr->u.data.length -= essid_compat; | ||
769 | |||
770 | /* Check what user space is giving us */ | ||
771 | if (IW_IS_SET(cmd)) { | ||
772 | /* Check NULL pointer */ | ||
773 | if ((iwr->u.data.pointer == NULL) && | ||
774 | (iwr->u.data.length != 0)) | ||
775 | return -EFAULT; | ||
776 | /* Check if number of token fits within bounds */ | ||
777 | if (iwr->u.data.length > descr->max_tokens) | ||
778 | return -E2BIG; | ||
779 | if (iwr->u.data.length < descr->min_tokens) | ||
780 | return -EINVAL; | ||
781 | } else { | ||
782 | /* Check NULL pointer */ | ||
783 | if (iwr->u.data.pointer == NULL) | ||
784 | return -EFAULT; | ||
785 | /* Save user space buffer size for checking */ | ||
786 | user_length = iwr->u.data.length; | ||
787 | |||
788 | /* Don't check if user_length > max to allow forward | ||
789 | * compatibility. The test user_length < min is | ||
790 | * implied by the test at the end. */ | ||
791 | |||
792 | /* Support for very large requests */ | ||
793 | if ((descr->flags & IW_DESCR_FLAG_NOMAX) && | ||
794 | (user_length > descr->max_tokens)) { | ||
795 | /* Allow userspace to GET more than max so | ||
796 | * we can support any size GET requests. | ||
797 | * There is still a limit : -ENOMEM. */ | ||
798 | extra_size = user_length * descr->token_size; | ||
799 | /* Note : user_length is originally a __u16, | ||
800 | * and token_size is controlled by us, | ||
801 | * so extra_size won't get negative and | ||
802 | * won't overflow... */ | ||
803 | } | ||
804 | } | ||
805 | |||
806 | /* Create the kernel buffer */ | ||
807 | /* kzalloc ensures NULL-termination for essid_compat */ | ||
808 | extra = kzalloc(extra_size, GFP_KERNEL); | ||
809 | if (extra == NULL) | ||
810 | return -ENOMEM; | ||
811 | |||
812 | /* If it is a SET, get all the extra data in here */ | ||
813 | if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) { | ||
814 | err = copy_from_user(extra, iwr->u.data.pointer, | ||
815 | iwr->u.data.length * | ||
816 | descr->token_size); | ||
817 | if (err) { | ||
818 | kfree(extra); | ||
819 | return -EFAULT; | ||
820 | } | ||
821 | } | ||
822 | |||
823 | /* Call the handler */ | ||
824 | ret = handler(dev, &info, &(iwr->u), extra); | ||
825 | |||
826 | iwr->u.data.length += essid_compat; | ||
827 | |||
828 | /* If we have something to return to the user */ | ||
829 | if (!ret && IW_IS_GET(cmd)) { | ||
830 | /* Check if there is enough buffer up there */ | ||
831 | if (user_length < iwr->u.data.length) { | ||
832 | kfree(extra); | ||
833 | return -E2BIG; | ||
834 | } | ||
835 | |||
836 | err = copy_to_user(iwr->u.data.pointer, extra, | ||
837 | iwr->u.data.length * | ||
838 | descr->token_size); | ||
839 | if (err) | ||
840 | ret = -EFAULT; | ||
841 | } | ||
842 | |||
843 | /* Generate an event to notify listeners of the change */ | ||
844 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | ||
845 | ((ret == 0) || (ret == -EIWCOMMIT))) { | ||
846 | if (descr->flags & IW_DESCR_FLAG_RESTRICT) | ||
847 | /* If the event is restricted, don't | ||
848 | * export the payload */ | ||
849 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | ||
850 | else | ||
851 | wireless_send_event(dev, cmd, &(iwr->u), | ||
852 | extra); | ||
853 | } | ||
854 | |||
855 | /* Cleanup - I told you it wasn't that long ;-) */ | ||
856 | kfree(extra); | ||
857 | } | 861 | } |
858 | 862 | ||
859 | /* Call commit handler if needed and defined */ | 863 | /* Call commit handler if needed and defined */ |
@@ -881,25 +885,22 @@ static int ioctl_standard_call(struct net_device * dev, | |||
881 | * a iw_handler but process it in your ioctl handler (i.e. use the | 885 | * a iw_handler but process it in your ioctl handler (i.e. use the |
882 | * old driver API). | 886 | * old driver API). |
883 | */ | 887 | */ |
884 | static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, | 888 | static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, |
885 | unsigned int cmd, iw_handler handler) | 889 | const struct iw_priv_args **descrp) |
886 | { | 890 | { |
887 | struct iwreq * iwr = (struct iwreq *) ifr; | 891 | const struct iw_priv_args *descr; |
888 | const struct iw_priv_args * descr = NULL; | 892 | int i, extra_size; |
889 | struct iw_request_info info; | ||
890 | int extra_size = 0; | ||
891 | int i; | ||
892 | int ret = -EINVAL; | ||
893 | 893 | ||
894 | /* Get the description of the IOCTL */ | 894 | descr = NULL; |
895 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) | 895 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { |
896 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { | 896 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { |
897 | descr = &(dev->wireless_handlers->private_args[i]); | 897 | descr = &dev->wireless_handlers->private_args[i]; |
898 | break; | 898 | break; |
899 | } | 899 | } |
900 | } | ||
900 | 901 | ||
901 | /* Compute the size of the set/get arguments */ | 902 | extra_size = 0; |
902 | if (descr != NULL) { | 903 | if (descr) { |
903 | if (IW_IS_SET(cmd)) { | 904 | if (IW_IS_SET(cmd)) { |
904 | int offset = 0; /* For sub-ioctls */ | 905 | int offset = 0; /* For sub-ioctls */ |
905 | /* Check for sub-ioctl handler */ | 906 | /* Check for sub-ioctl handler */ |
@@ -924,72 +925,77 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, | |||
924 | extra_size = 0; | 925 | extra_size = 0; |
925 | } | 926 | } |
926 | } | 927 | } |
928 | *descrp = descr; | ||
929 | return extra_size; | ||
930 | } | ||
927 | 931 | ||
928 | /* Prepare the call */ | 932 | static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, |
929 | info.cmd = cmd; | 933 | const struct iw_priv_args *descr, |
930 | info.flags = 0; | 934 | iw_handler handler, struct net_device *dev, |
935 | struct iw_request_info *info, int extra_size) | ||
936 | { | ||
937 | char *extra; | ||
938 | int err; | ||
931 | 939 | ||
932 | /* Check if we have a pointer to user space data or not. */ | 940 | /* Check what user space is giving us */ |
933 | if (extra_size == 0) { | 941 | if (IW_IS_SET(cmd)) { |
934 | /* No extra arguments. Trivial to handle */ | 942 | if (!iwp->pointer && iwp->length != 0) |
935 | ret = handler(dev, &info, &(iwr->u), (char *) &(iwr->u)); | 943 | return -EFAULT; |
936 | } else { | ||
937 | char * extra; | ||
938 | int err; | ||
939 | 944 | ||
940 | /* Check what user space is giving us */ | 945 | if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) |
941 | if (IW_IS_SET(cmd)) { | 946 | return -E2BIG; |
942 | /* Check NULL pointer */ | 947 | } else if (!iwp->pointer) |
943 | if ((iwr->u.data.pointer == NULL) && | 948 | return -EFAULT; |
944 | (iwr->u.data.length != 0)) | ||
945 | return -EFAULT; | ||
946 | 949 | ||
947 | /* Does it fits within bounds ? */ | 950 | extra = kmalloc(extra_size, GFP_KERNEL); |
948 | if (iwr->u.data.length > (descr->set_args & | 951 | if (!extra) |
949 | IW_PRIV_SIZE_MASK)) | 952 | return -ENOMEM; |
950 | return -E2BIG; | ||
951 | } else if (iwr->u.data.pointer == NULL) | ||
952 | return -EFAULT; | ||
953 | 953 | ||
954 | /* Always allocate for max space. Easier, and won't last | 954 | /* If it is a SET, get all the extra data in here */ |
955 | * long... */ | 955 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { |
956 | extra = kmalloc(extra_size, GFP_KERNEL); | 956 | if (copy_from_user(extra, iwp->pointer, extra_size)) { |
957 | if (extra == NULL) | 957 | err = -EFAULT; |
958 | return -ENOMEM; | 958 | goto out; |
959 | |||
960 | /* If it is a SET, get all the extra data in here */ | ||
961 | if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) { | ||
962 | err = copy_from_user(extra, iwr->u.data.pointer, | ||
963 | extra_size); | ||
964 | if (err) { | ||
965 | kfree(extra); | ||
966 | return -EFAULT; | ||
967 | } | ||
968 | } | 959 | } |
960 | } | ||
969 | 961 | ||
970 | /* Call the handler */ | 962 | /* Call the handler */ |
971 | ret = handler(dev, &info, &(iwr->u), extra); | 963 | err = handler(dev, info, (union iwreq_data *) iwp, extra); |
972 | 964 | ||
973 | /* If we have something to return to the user */ | 965 | /* If we have something to return to the user */ |
974 | if (!ret && IW_IS_GET(cmd)) { | 966 | if (!err && IW_IS_GET(cmd)) { |
967 | /* Adjust for the actual length if it's variable, | ||
968 | * avoid leaking kernel bits outside. | ||
969 | */ | ||
970 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) | ||
971 | extra_size = adjust_priv_size(descr->get_args, iwp); | ||
975 | 972 | ||
976 | /* Adjust for the actual length if it's variable, | 973 | if (copy_to_user(iwp->pointer, extra, extra_size)) |
977 | * avoid leaking kernel bits outside. */ | 974 | err = -EFAULT; |
978 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) { | 975 | } |
979 | extra_size = adjust_priv_size(descr->get_args, | ||
980 | &(iwr->u)); | ||
981 | } | ||
982 | 976 | ||
983 | err = copy_to_user(iwr->u.data.pointer, extra, | 977 | out: |
984 | extra_size); | 978 | kfree(extra); |
985 | if (err) | 979 | return err; |
986 | ret = -EFAULT; | 980 | } |
987 | } | ||
988 | 981 | ||
989 | /* Cleanup - I told you it wasn't that long ;-) */ | 982 | static int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, |
990 | kfree(extra); | 983 | unsigned int cmd, struct iw_request_info *info, |
991 | } | 984 | iw_handler handler) |
985 | { | ||
986 | int extra_size = 0, ret = -EINVAL; | ||
987 | const struct iw_priv_args *descr; | ||
992 | 988 | ||
989 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
990 | |||
991 | /* Check if we have a pointer to user space data or not. */ | ||
992 | if (extra_size == 0) { | ||
993 | /* No extra arguments. Trivial to handle */ | ||
994 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
995 | } else { | ||
996 | ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, | ||
997 | handler, dev, info, extra_size); | ||
998 | } | ||
993 | 999 | ||
994 | /* Call commit handler if needed and defined */ | 1000 | /* Call commit handler if needed and defined */ |
995 | if (ret == -EIWCOMMIT) | 1001 | if (ret == -EIWCOMMIT) |
@@ -999,12 +1005,21 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, | |||
999 | } | 1005 | } |
1000 | 1006 | ||
1001 | /* ---------------------------------------------------------------- */ | 1007 | /* ---------------------------------------------------------------- */ |
1008 | typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | ||
1009 | unsigned int, struct iw_request_info *, | ||
1010 | iw_handler); | ||
1011 | |||
1002 | /* | 1012 | /* |
1003 | * Main IOCTl dispatcher. | 1013 | * Main IOCTl dispatcher. |
1004 | * Check the type of IOCTL and call the appropriate wrapper... | 1014 | * Check the type of IOCTL and call the appropriate wrapper... |
1005 | */ | 1015 | */ |
1006 | static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd) | 1016 | static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, |
1017 | unsigned int cmd, | ||
1018 | struct iw_request_info *info, | ||
1019 | wext_ioctl_func standard, | ||
1020 | wext_ioctl_func private) | ||
1007 | { | 1021 | { |
1022 | struct iwreq *iwr = (struct iwreq *) ifr; | ||
1008 | struct net_device *dev; | 1023 | struct net_device *dev; |
1009 | iw_handler handler; | 1024 | iw_handler handler; |
1010 | 1025 | ||
@@ -1019,12 +1034,12 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i | |||
1019 | * Note that 'cmd' is already filtered in dev_ioctl() with | 1034 | * Note that 'cmd' is already filtered in dev_ioctl() with |
1020 | * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ | 1035 | * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ |
1021 | if (cmd == SIOCGIWSTATS) | 1036 | if (cmd == SIOCGIWSTATS) |
1022 | return ioctl_standard_call(dev, ifr, cmd, | 1037 | return standard(dev, iwr, cmd, info, |
1023 | &iw_handler_get_iwstats); | 1038 | &iw_handler_get_iwstats); |
1024 | 1039 | ||
1025 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) | 1040 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) |
1026 | return ioctl_standard_call(dev, ifr, cmd, | 1041 | return standard(dev, iwr, cmd, info, |
1027 | &iw_handler_get_private); | 1042 | &iw_handler_get_private); |
1028 | 1043 | ||
1029 | /* Basic check */ | 1044 | /* Basic check */ |
1030 | if (!netif_device_present(dev)) | 1045 | if (!netif_device_present(dev)) |
@@ -1035,9 +1050,9 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i | |||
1035 | if (handler) { | 1050 | if (handler) { |
1036 | /* Standard and private are not the same */ | 1051 | /* Standard and private are not the same */ |
1037 | if (cmd < SIOCIWFIRSTPRIV) | 1052 | if (cmd < SIOCIWFIRSTPRIV) |
1038 | return ioctl_standard_call(dev, ifr, cmd, handler); | 1053 | return standard(dev, iwr, cmd, info, handler); |
1039 | else | 1054 | else |
1040 | return ioctl_private_call(dev, ifr, cmd, handler); | 1055 | return private(dev, iwr, cmd, info, handler); |
1041 | } | 1056 | } |
1042 | /* Old driver API : call driver ioctl handler */ | 1057 | /* Old driver API : call driver ioctl handler */ |
1043 | if (dev->do_ioctl) | 1058 | if (dev->do_ioctl) |
@@ -1045,27 +1060,154 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i | |||
1045 | return -EOPNOTSUPP; | 1060 | return -EOPNOTSUPP; |
1046 | } | 1061 | } |
1047 | 1062 | ||
1048 | /* entry point from dev ioctl */ | 1063 | /* If command is `set a parameter', or `get the encoding parameters', |
1049 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 1064 | * check if the user has the right to do it. |
1050 | void __user *arg) | 1065 | */ |
1066 | static int wext_permission_check(unsigned int cmd) | ||
1051 | { | 1067 | { |
1052 | int ret; | ||
1053 | |||
1054 | /* If command is `set a parameter', or | ||
1055 | * `get the encoding parameters', check if | ||
1056 | * the user has the right to do it */ | ||
1057 | if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) | 1068 | if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) |
1058 | && !capable(CAP_NET_ADMIN)) | 1069 | && !capable(CAP_NET_ADMIN)) |
1059 | return -EPERM; | 1070 | return -EPERM; |
1060 | 1071 | ||
1072 | return 0; | ||
1073 | } | ||
1074 | |||
1075 | /* entry point from dev ioctl */ | ||
1076 | static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, | ||
1077 | unsigned int cmd, struct iw_request_info *info, | ||
1078 | wext_ioctl_func standard, | ||
1079 | wext_ioctl_func private) | ||
1080 | { | ||
1081 | int ret = wext_permission_check(cmd); | ||
1082 | |||
1083 | if (ret) | ||
1084 | return ret; | ||
1085 | |||
1061 | dev_load(net, ifr->ifr_name); | 1086 | dev_load(net, ifr->ifr_name); |
1062 | rtnl_lock(); | 1087 | rtnl_lock(); |
1063 | ret = wireless_process_ioctl(net, ifr, cmd); | 1088 | ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); |
1064 | rtnl_unlock(); | 1089 | rtnl_unlock(); |
1065 | if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct iwreq))) | 1090 | |
1091 | return ret; | ||
1092 | } | ||
1093 | |||
1094 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | ||
1095 | void __user *arg) | ||
1096 | { | ||
1097 | struct iw_request_info info = { .cmd = cmd, .flags = 0 }; | ||
1098 | int ret; | ||
1099 | |||
1100 | ret = wext_ioctl_dispatch(net, ifr, cmd, &info, | ||
1101 | ioctl_standard_call, | ||
1102 | ioctl_private_call); | ||
1103 | if (ret >= 0 && | ||
1104 | IW_IS_GET(cmd) && | ||
1105 | copy_to_user(arg, ifr, sizeof(struct iwreq))) | ||
1106 | return -EFAULT; | ||
1107 | |||
1108 | return ret; | ||
1109 | } | ||
1110 | |||
1111 | #ifdef CONFIG_COMPAT | ||
1112 | static int compat_standard_call(struct net_device *dev, | ||
1113 | struct iwreq *iwr, | ||
1114 | unsigned int cmd, | ||
1115 | struct iw_request_info *info, | ||
1116 | iw_handler handler) | ||
1117 | { | ||
1118 | const struct iw_ioctl_description *descr; | ||
1119 | struct compat_iw_point *iwp_compat; | ||
1120 | struct iw_point iwp; | ||
1121 | int err; | ||
1122 | |||
1123 | descr = standard_ioctl + (cmd - SIOCIWFIRST); | ||
1124 | |||
1125 | if (descr->header_type != IW_HEADER_TYPE_POINT) | ||
1126 | return ioctl_standard_call(dev, iwr, cmd, info, handler); | ||
1127 | |||
1128 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
1129 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
1130 | iwp.length = iwp_compat->length; | ||
1131 | iwp.flags = iwp_compat->flags; | ||
1132 | |||
1133 | err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info); | ||
1134 | |||
1135 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
1136 | iwp_compat->length = iwp.length; | ||
1137 | iwp_compat->flags = iwp.flags; | ||
1138 | |||
1139 | return err; | ||
1140 | } | ||
1141 | |||
1142 | static int compat_private_call(struct net_device *dev, struct iwreq *iwr, | ||
1143 | unsigned int cmd, struct iw_request_info *info, | ||
1144 | iw_handler handler) | ||
1145 | { | ||
1146 | const struct iw_priv_args *descr; | ||
1147 | int ret, extra_size; | ||
1148 | |||
1149 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
1150 | |||
1151 | /* Check if we have a pointer to user space data or not. */ | ||
1152 | if (extra_size == 0) { | ||
1153 | /* No extra arguments. Trivial to handle */ | ||
1154 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
1155 | } else { | ||
1156 | struct compat_iw_point *iwp_compat; | ||
1157 | struct iw_point iwp; | ||
1158 | |||
1159 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
1160 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
1161 | iwp.length = iwp_compat->length; | ||
1162 | iwp.flags = iwp_compat->flags; | ||
1163 | |||
1164 | ret = ioctl_private_iw_point(&iwp, cmd, descr, | ||
1165 | handler, dev, info, extra_size); | ||
1166 | |||
1167 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
1168 | iwp_compat->length = iwp.length; | ||
1169 | iwp_compat->flags = iwp.flags; | ||
1170 | } | ||
1171 | |||
1172 | /* Call commit handler if needed and defined */ | ||
1173 | if (ret == -EIWCOMMIT) | ||
1174 | ret = call_commit_handler(dev); | ||
1175 | |||
1176 | return ret; | ||
1177 | } | ||
1178 | |||
1179 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | ||
1180 | unsigned long arg) | ||
1181 | { | ||
1182 | void __user *argp = (void __user *)arg; | ||
1183 | struct iw_request_info info; | ||
1184 | struct iwreq iwr; | ||
1185 | char *colon; | ||
1186 | int ret; | ||
1187 | |||
1188 | if (copy_from_user(&iwr, argp, sizeof(struct iwreq))) | ||
1189 | return -EFAULT; | ||
1190 | |||
1191 | iwr.ifr_name[IFNAMSIZ-1] = 0; | ||
1192 | colon = strchr(iwr.ifr_name, ':'); | ||
1193 | if (colon) | ||
1194 | *colon = 0; | ||
1195 | |||
1196 | info.cmd = cmd; | ||
1197 | info.flags = IW_REQUEST_FLAG_COMPAT; | ||
1198 | |||
1199 | ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, | ||
1200 | compat_standard_call, | ||
1201 | compat_private_call); | ||
1202 | |||
1203 | if (ret >= 0 && | ||
1204 | IW_IS_GET(cmd) && | ||
1205 | copy_to_user(argp, &iwr, sizeof(struct iwreq))) | ||
1066 | return -EFAULT; | 1206 | return -EFAULT; |
1207 | |||
1067 | return ret; | 1208 | return ret; |
1068 | } | 1209 | } |
1210 | #endif | ||
1069 | 1211 | ||
1070 | /************************* EVENT PROCESSING *************************/ | 1212 | /************************* EVENT PROCESSING *************************/ |
1071 | /* | 1213 | /* |
@@ -1157,7 +1299,7 @@ static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len) | |||
1157 | struct sk_buff *skb; | 1299 | struct sk_buff *skb; |
1158 | int err; | 1300 | int err; |
1159 | 1301 | ||
1160 | if (dev_net(dev) != &init_net) | 1302 | if (!net_eq(dev_net(dev), &init_net)) |
1161 | return; | 1303 | return; |
1162 | 1304 | ||
1163 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 1305 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 6ba67c523c16..9fc5b023d111 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -191,7 +191,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event, | |||
191 | struct net_device *dev = ptr; | 191 | struct net_device *dev = ptr; |
192 | struct x25_neigh *nb; | 192 | struct x25_neigh *nb; |
193 | 193 | ||
194 | if (dev_net(dev) != &init_net) | 194 | if (!net_eq(dev_net(dev), &init_net)) |
195 | return NOTIFY_DONE; | 195 | return NOTIFY_DONE; |
196 | 196 | ||
197 | if (dev->type == ARPHRD_X25 | 197 | if (dev->type == ARPHRD_X25 |
@@ -555,13 +555,11 @@ static struct sock *x25_make_new(struct sock *osk) | |||
555 | x25 = x25_sk(sk); | 555 | x25 = x25_sk(sk); |
556 | 556 | ||
557 | sk->sk_type = osk->sk_type; | 557 | sk->sk_type = osk->sk_type; |
558 | sk->sk_socket = osk->sk_socket; | ||
559 | sk->sk_priority = osk->sk_priority; | 558 | sk->sk_priority = osk->sk_priority; |
560 | sk->sk_protocol = osk->sk_protocol; | 559 | sk->sk_protocol = osk->sk_protocol; |
561 | sk->sk_rcvbuf = osk->sk_rcvbuf; | 560 | sk->sk_rcvbuf = osk->sk_rcvbuf; |
562 | sk->sk_sndbuf = osk->sk_sndbuf; | 561 | sk->sk_sndbuf = osk->sk_sndbuf; |
563 | sk->sk_state = TCP_ESTABLISHED; | 562 | sk->sk_state = TCP_ESTABLISHED; |
564 | sk->sk_sleep = osk->sk_sleep; | ||
565 | sk->sk_backlog_rcv = osk->sk_backlog_rcv; | 563 | sk->sk_backlog_rcv = osk->sk_backlog_rcv; |
566 | sock_copy_flags(sk, osk); | 564 | sock_copy_flags(sk, osk); |
567 | 565 | ||
@@ -614,8 +612,7 @@ static int x25_release(struct socket *sock) | |||
614 | break; | 612 | break; |
615 | } | 613 | } |
616 | 614 | ||
617 | sock->sk = NULL; | 615 | sock_orphan(sk); |
618 | sk->sk_socket = NULL; /* Not used, but we should do this */ | ||
619 | out: | 616 | out: |
620 | return 0; | 617 | return 0; |
621 | } | 618 | } |
@@ -808,14 +805,12 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
808 | if (!skb->sk) | 805 | if (!skb->sk) |
809 | goto out2; | 806 | goto out2; |
810 | newsk = skb->sk; | 807 | newsk = skb->sk; |
811 | newsk->sk_socket = newsock; | 808 | sock_graft(newsk, newsock); |
812 | newsk->sk_sleep = &newsock->wait; | ||
813 | 809 | ||
814 | /* Now attach up the new socket */ | 810 | /* Now attach up the new socket */ |
815 | skb->sk = NULL; | 811 | skb->sk = NULL; |
816 | kfree_skb(skb); | 812 | kfree_skb(skb); |
817 | sk->sk_ack_backlog--; | 813 | sk->sk_ack_backlog--; |
818 | newsock->sk = newsk; | ||
819 | newsock->state = SS_CONNECTED; | 814 | newsock->state = SS_CONNECTED; |
820 | rc = 0; | 815 | rc = 0; |
821 | out2: | 816 | out2: |
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 3ff206c0ae94..3e1efe534645 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -95,7 +95,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, | |||
95 | struct sk_buff *nskb; | 95 | struct sk_buff *nskb; |
96 | struct x25_neigh *nb; | 96 | struct x25_neigh *nb; |
97 | 97 | ||
98 | if (dev_net(dev) != &init_net) | 98 | if (!net_eq(dev_net(dev), &init_net)) |
99 | goto drop; | 99 | goto drop; |
100 | 100 | ||
101 | nskb = skb_copy(skb, GFP_ATOMIC); | 101 | nskb = skb_copy(skb, GFP_ATOMIC); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cae9fd815543..841b32a2e680 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2360,7 +2360,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void | |||
2360 | { | 2360 | { |
2361 | struct net_device *dev = ptr; | 2361 | struct net_device *dev = ptr; |
2362 | 2362 | ||
2363 | if (dev_net(dev) != &init_net) | 2363 | if (!net_eq(dev_net(dev), &init_net)) |
2364 | return NOTIFY_DONE; | 2364 | return NOTIFY_DONE; |
2365 | 2365 | ||
2366 | switch (event) { | 2366 | switch (event) { |