diff options
Diffstat (limited to 'net')
70 files changed, 864 insertions, 491 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index a5e4d2dcb03e..9186550d77a6 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -204,7 +204,7 @@ free_and_return: | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | struct p9_fcall *p9_fcall_alloc(int alloc_msize) | 207 | static struct p9_fcall *p9_fcall_alloc(int alloc_msize) |
208 | { | 208 | { |
209 | struct p9_fcall *fc; | 209 | struct p9_fcall *fc; |
210 | fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS); | 210 | fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS); |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index cd1e1ede73a4..ac2666c1d011 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, | |||
340 | int count = nr_pages; | 340 | int count = nr_pages; |
341 | while (nr_pages) { | 341 | while (nr_pages) { |
342 | s = rest_of_page(data); | 342 | s = rest_of_page(data); |
343 | pages[index++] = kmap_to_page(data); | 343 | if (is_vmalloc_addr(data)) |
344 | pages[index++] = vmalloc_to_page(data); | ||
345 | else | ||
346 | pages[index++] = kmap_to_page(data); | ||
344 | data += s; | 347 | data += s; |
345 | nr_pages--; | 348 | nr_pages--; |
346 | } | 349 | } |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index e4401a531afb..63f0455c0bc3 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -187,8 +187,7 @@ static int br_set_mac_address(struct net_device *dev, void *p) | |||
187 | 187 | ||
188 | spin_lock_bh(&br->lock); | 188 | spin_lock_bh(&br->lock); |
189 | if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { | 189 | if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { |
190 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 190 | /* Mac address will be changed in br_stp_change_bridge_id(). */ |
191 | br_fdb_change_mac_address(br, addr->sa_data); | ||
192 | br_stp_change_bridge_id(br, addr->sa_data); | 191 | br_stp_change_bridge_id(br, addr->sa_data); |
193 | } | 192 | } |
194 | spin_unlock_bh(&br->lock); | 193 | spin_unlock_bh(&br->lock); |
@@ -226,6 +225,33 @@ static void br_netpoll_cleanup(struct net_device *dev) | |||
226 | br_netpoll_disable(p); | 225 | br_netpoll_disable(p); |
227 | } | 226 | } |
228 | 227 | ||
228 | static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | ||
229 | { | ||
230 | struct netpoll *np; | ||
231 | int err; | ||
232 | |||
233 | np = kzalloc(sizeof(*p->np), gfp); | ||
234 | if (!np) | ||
235 | return -ENOMEM; | ||
236 | |||
237 | err = __netpoll_setup(np, p->dev, gfp); | ||
238 | if (err) { | ||
239 | kfree(np); | ||
240 | return err; | ||
241 | } | ||
242 | |||
243 | p->np = np; | ||
244 | return err; | ||
245 | } | ||
246 | |||
247 | int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | ||
248 | { | ||
249 | if (!p->br->dev->npinfo) | ||
250 | return 0; | ||
251 | |||
252 | return __br_netpoll_enable(p, gfp); | ||
253 | } | ||
254 | |||
229 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, | 255 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, |
230 | gfp_t gfp) | 256 | gfp_t gfp) |
231 | { | 257 | { |
@@ -236,7 +262,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, | |||
236 | list_for_each_entry(p, &br->port_list, list) { | 262 | list_for_each_entry(p, &br->port_list, list) { |
237 | if (!p->dev) | 263 | if (!p->dev) |
238 | continue; | 264 | continue; |
239 | err = br_netpoll_enable(p, gfp); | 265 | err = __br_netpoll_enable(p, gfp); |
240 | if (err) | 266 | if (err) |
241 | goto fail; | 267 | goto fail; |
242 | } | 268 | } |
@@ -249,28 +275,6 @@ fail: | |||
249 | goto out; | 275 | goto out; |
250 | } | 276 | } |
251 | 277 | ||
252 | int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | ||
253 | { | ||
254 | struct netpoll *np; | ||
255 | int err; | ||
256 | |||
257 | if (!p->br->dev->npinfo) | ||
258 | return 0; | ||
259 | |||
260 | np = kzalloc(sizeof(*p->np), gfp); | ||
261 | if (!np) | ||
262 | return -ENOMEM; | ||
263 | |||
264 | err = __netpoll_setup(np, p->dev, gfp); | ||
265 | if (err) { | ||
266 | kfree(np); | ||
267 | return err; | ||
268 | } | ||
269 | |||
270 | p->np = np; | ||
271 | return err; | ||
272 | } | ||
273 | |||
274 | void br_netpoll_disable(struct net_bridge_port *p) | 278 | void br_netpoll_disable(struct net_bridge_port *p) |
275 | { | 279 | { |
276 | struct netpoll *np = p->np; | 280 | struct netpoll *np = p->np; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c5f5a4a933f4..9203d5a1943f 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include "br_private.h" | 27 | #include "br_private.h" |
28 | 28 | ||
29 | static struct kmem_cache *br_fdb_cache __read_mostly; | 29 | static struct kmem_cache *br_fdb_cache __read_mostly; |
30 | static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, | ||
31 | const unsigned char *addr, | ||
32 | __u16 vid); | ||
30 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | 33 | static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, |
31 | const unsigned char *addr, u16 vid); | 34 | const unsigned char *addr, u16 vid); |
32 | static void fdb_notify(struct net_bridge *br, | 35 | static void fdb_notify(struct net_bridge *br, |
@@ -89,11 +92,57 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) | |||
89 | call_rcu(&f->rcu, fdb_rcu_free); | 92 | call_rcu(&f->rcu, fdb_rcu_free); |
90 | } | 93 | } |
91 | 94 | ||
95 | /* Delete a local entry if no other port had the same address. */ | ||
96 | static void fdb_delete_local(struct net_bridge *br, | ||
97 | const struct net_bridge_port *p, | ||
98 | struct net_bridge_fdb_entry *f) | ||
99 | { | ||
100 | const unsigned char *addr = f->addr.addr; | ||
101 | u16 vid = f->vlan_id; | ||
102 | struct net_bridge_port *op; | ||
103 | |||
104 | /* Maybe another port has same hw addr? */ | ||
105 | list_for_each_entry(op, &br->port_list, list) { | ||
106 | if (op != p && ether_addr_equal(op->dev->dev_addr, addr) && | ||
107 | (!vid || nbp_vlan_find(op, vid))) { | ||
108 | f->dst = op; | ||
109 | f->added_by_user = 0; | ||
110 | return; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | /* Maybe bridge device has same hw addr? */ | ||
115 | if (p && ether_addr_equal(br->dev->dev_addr, addr) && | ||
116 | (!vid || br_vlan_find(br, vid))) { | ||
117 | f->dst = NULL; | ||
118 | f->added_by_user = 0; | ||
119 | return; | ||
120 | } | ||
121 | |||
122 | fdb_delete(br, f); | ||
123 | } | ||
124 | |||
125 | void br_fdb_find_delete_local(struct net_bridge *br, | ||
126 | const struct net_bridge_port *p, | ||
127 | const unsigned char *addr, u16 vid) | ||
128 | { | ||
129 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; | ||
130 | struct net_bridge_fdb_entry *f; | ||
131 | |||
132 | spin_lock_bh(&br->hash_lock); | ||
133 | f = fdb_find(head, addr, vid); | ||
134 | if (f && f->is_local && !f->added_by_user && f->dst == p) | ||
135 | fdb_delete_local(br, p, f); | ||
136 | spin_unlock_bh(&br->hash_lock); | ||
137 | } | ||
138 | |||
92 | void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) | 139 | void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) |
93 | { | 140 | { |
94 | struct net_bridge *br = p->br; | 141 | struct net_bridge *br = p->br; |
95 | bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false; | 142 | struct net_port_vlans *pv = nbp_get_vlan_info(p); |
143 | bool no_vlan = !pv; | ||
96 | int i; | 144 | int i; |
145 | u16 vid; | ||
97 | 146 | ||
98 | spin_lock_bh(&br->hash_lock); | 147 | spin_lock_bh(&br->hash_lock); |
99 | 148 | ||
@@ -104,38 +153,34 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) | |||
104 | struct net_bridge_fdb_entry *f; | 153 | struct net_bridge_fdb_entry *f; |
105 | 154 | ||
106 | f = hlist_entry(h, struct net_bridge_fdb_entry, hlist); | 155 | f = hlist_entry(h, struct net_bridge_fdb_entry, hlist); |
107 | if (f->dst == p && f->is_local) { | 156 | if (f->dst == p && f->is_local && !f->added_by_user) { |
108 | /* maybe another port has same hw addr? */ | ||
109 | struct net_bridge_port *op; | ||
110 | u16 vid = f->vlan_id; | ||
111 | list_for_each_entry(op, &br->port_list, list) { | ||
112 | if (op != p && | ||
113 | ether_addr_equal(op->dev->dev_addr, | ||
114 | f->addr.addr) && | ||
115 | nbp_vlan_find(op, vid)) { | ||
116 | f->dst = op; | ||
117 | goto insert; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | /* delete old one */ | 157 | /* delete old one */ |
122 | fdb_delete(br, f); | 158 | fdb_delete_local(br, p, f); |
123 | insert: | ||
124 | /* insert new address, may fail if invalid | ||
125 | * address or dup. | ||
126 | */ | ||
127 | fdb_insert(br, p, newaddr, vid); | ||
128 | 159 | ||
129 | /* if this port has no vlan information | 160 | /* if this port has no vlan information |
130 | * configured, we can safely be done at | 161 | * configured, we can safely be done at |
131 | * this point. | 162 | * this point. |
132 | */ | 163 | */ |
133 | if (no_vlan) | 164 | if (no_vlan) |
134 | goto done; | 165 | goto insert; |
135 | } | 166 | } |
136 | } | 167 | } |
137 | } | 168 | } |
138 | 169 | ||
170 | insert: | ||
171 | /* insert new address, may fail if invalid address or dup. */ | ||
172 | fdb_insert(br, p, newaddr, 0); | ||
173 | |||
174 | if (no_vlan) | ||
175 | goto done; | ||
176 | |||
177 | /* Now add entries for every VLAN configured on the port. | ||
178 | * This function runs under RTNL so the bitmap will not change | ||
179 | * from under us. | ||
180 | */ | ||
181 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) | ||
182 | fdb_insert(br, p, newaddr, vid); | ||
183 | |||
139 | done: | 184 | done: |
140 | spin_unlock_bh(&br->hash_lock); | 185 | spin_unlock_bh(&br->hash_lock); |
141 | } | 186 | } |
@@ -146,10 +191,12 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
146 | struct net_port_vlans *pv; | 191 | struct net_port_vlans *pv; |
147 | u16 vid = 0; | 192 | u16 vid = 0; |
148 | 193 | ||
194 | spin_lock_bh(&br->hash_lock); | ||
195 | |||
149 | /* If old entry was unassociated with any port, then delete it. */ | 196 | /* If old entry was unassociated with any port, then delete it. */ |
150 | f = __br_fdb_get(br, br->dev->dev_addr, 0); | 197 | f = __br_fdb_get(br, br->dev->dev_addr, 0); |
151 | if (f && f->is_local && !f->dst) | 198 | if (f && f->is_local && !f->dst) |
152 | fdb_delete(br, f); | 199 | fdb_delete_local(br, NULL, f); |
153 | 200 | ||
154 | fdb_insert(br, NULL, newaddr, 0); | 201 | fdb_insert(br, NULL, newaddr, 0); |
155 | 202 | ||
@@ -159,14 +206,16 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
159 | */ | 206 | */ |
160 | pv = br_get_vlan_info(br); | 207 | pv = br_get_vlan_info(br); |
161 | if (!pv) | 208 | if (!pv) |
162 | return; | 209 | goto out; |
163 | 210 | ||
164 | for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { | 211 | for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { |
165 | f = __br_fdb_get(br, br->dev->dev_addr, vid); | 212 | f = __br_fdb_get(br, br->dev->dev_addr, vid); |
166 | if (f && f->is_local && !f->dst) | 213 | if (f && f->is_local && !f->dst) |
167 | fdb_delete(br, f); | 214 | fdb_delete_local(br, NULL, f); |
168 | fdb_insert(br, NULL, newaddr, vid); | 215 | fdb_insert(br, NULL, newaddr, vid); |
169 | } | 216 | } |
217 | out: | ||
218 | spin_unlock_bh(&br->hash_lock); | ||
170 | } | 219 | } |
171 | 220 | ||
172 | void br_fdb_cleanup(unsigned long _data) | 221 | void br_fdb_cleanup(unsigned long _data) |
@@ -235,25 +284,11 @@ void br_fdb_delete_by_port(struct net_bridge *br, | |||
235 | 284 | ||
236 | if (f->is_static && !do_all) | 285 | if (f->is_static && !do_all) |
237 | continue; | 286 | continue; |
238 | /* | ||
239 | * if multiple ports all have the same device address | ||
240 | * then when one port is deleted, assign | ||
241 | * the local entry to other port | ||
242 | */ | ||
243 | if (f->is_local) { | ||
244 | struct net_bridge_port *op; | ||
245 | list_for_each_entry(op, &br->port_list, list) { | ||
246 | if (op != p && | ||
247 | ether_addr_equal(op->dev->dev_addr, | ||
248 | f->addr.addr)) { | ||
249 | f->dst = op; | ||
250 | goto skip_delete; | ||
251 | } | ||
252 | } | ||
253 | } | ||
254 | 287 | ||
255 | fdb_delete(br, f); | 288 | if (f->is_local) |
256 | skip_delete: ; | 289 | fdb_delete_local(br, p, f); |
290 | else | ||
291 | fdb_delete(br, f); | ||
257 | } | 292 | } |
258 | } | 293 | } |
259 | spin_unlock_bh(&br->hash_lock); | 294 | spin_unlock_bh(&br->hash_lock); |
@@ -397,6 +432,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, | |||
397 | fdb->vlan_id = vid; | 432 | fdb->vlan_id = vid; |
398 | fdb->is_local = 0; | 433 | fdb->is_local = 0; |
399 | fdb->is_static = 0; | 434 | fdb->is_static = 0; |
435 | fdb->added_by_user = 0; | ||
400 | fdb->updated = fdb->used = jiffies; | 436 | fdb->updated = fdb->used = jiffies; |
401 | hlist_add_head_rcu(&fdb->hlist, head); | 437 | hlist_add_head_rcu(&fdb->hlist, head); |
402 | } | 438 | } |
@@ -447,7 +483,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | |||
447 | } | 483 | } |
448 | 484 | ||
449 | void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | 485 | void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, |
450 | const unsigned char *addr, u16 vid) | 486 | const unsigned char *addr, u16 vid, bool added_by_user) |
451 | { | 487 | { |
452 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; | 488 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; |
453 | struct net_bridge_fdb_entry *fdb; | 489 | struct net_bridge_fdb_entry *fdb; |
@@ -473,13 +509,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | |||
473 | /* fastpath: update of existing entry */ | 509 | /* fastpath: update of existing entry */ |
474 | fdb->dst = source; | 510 | fdb->dst = source; |
475 | fdb->updated = jiffies; | 511 | fdb->updated = jiffies; |
512 | if (unlikely(added_by_user)) | ||
513 | fdb->added_by_user = 1; | ||
476 | } | 514 | } |
477 | } else { | 515 | } else { |
478 | spin_lock(&br->hash_lock); | 516 | spin_lock(&br->hash_lock); |
479 | if (likely(!fdb_find(head, addr, vid))) { | 517 | if (likely(!fdb_find(head, addr, vid))) { |
480 | fdb = fdb_create(head, source, addr, vid); | 518 | fdb = fdb_create(head, source, addr, vid); |
481 | if (fdb) | 519 | if (fdb) { |
520 | if (unlikely(added_by_user)) | ||
521 | fdb->added_by_user = 1; | ||
482 | fdb_notify(br, fdb, RTM_NEWNEIGH); | 522 | fdb_notify(br, fdb, RTM_NEWNEIGH); |
523 | } | ||
483 | } | 524 | } |
484 | /* else we lose race and someone else inserts | 525 | /* else we lose race and someone else inserts |
485 | * it first, don't bother updating | 526 | * it first, don't bother updating |
@@ -647,6 +688,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, | |||
647 | 688 | ||
648 | modified = true; | 689 | modified = true; |
649 | } | 690 | } |
691 | fdb->added_by_user = 1; | ||
650 | 692 | ||
651 | fdb->used = jiffies; | 693 | fdb->used = jiffies; |
652 | if (modified) { | 694 | if (modified) { |
@@ -664,7 +706,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, | |||
664 | 706 | ||
665 | if (ndm->ndm_flags & NTF_USE) { | 707 | if (ndm->ndm_flags & NTF_USE) { |
666 | rcu_read_lock(); | 708 | rcu_read_lock(); |
667 | br_fdb_update(p->br, p, addr, vid); | 709 | br_fdb_update(p->br, p, addr, vid, true); |
668 | rcu_read_unlock(); | 710 | rcu_read_unlock(); |
669 | } else { | 711 | } else { |
670 | spin_lock_bh(&p->br->hash_lock); | 712 | spin_lock_bh(&p->br->hash_lock); |
@@ -749,8 +791,7 @@ out: | |||
749 | return err; | 791 | return err; |
750 | } | 792 | } |
751 | 793 | ||
752 | int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, | 794 | static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan) |
753 | u16 vlan) | ||
754 | { | 795 | { |
755 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; | 796 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; |
756 | struct net_bridge_fdb_entry *fdb; | 797 | struct net_bridge_fdb_entry *fdb; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index cffe1d666ba1..54d207d3a31c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -389,6 +389,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
389 | if (br->dev->needed_headroom < dev->needed_headroom) | 389 | if (br->dev->needed_headroom < dev->needed_headroom) |
390 | br->dev->needed_headroom = dev->needed_headroom; | 390 | br->dev->needed_headroom = dev->needed_headroom; |
391 | 391 | ||
392 | if (br_fdb_insert(br, p, dev->dev_addr, 0)) | ||
393 | netdev_err(dev, "failed insert local address bridge forwarding table\n"); | ||
394 | |||
392 | spin_lock_bh(&br->lock); | 395 | spin_lock_bh(&br->lock); |
393 | changed_addr = br_stp_recalculate_bridge_id(br); | 396 | changed_addr = br_stp_recalculate_bridge_id(br); |
394 | 397 | ||
@@ -404,9 +407,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
404 | 407 | ||
405 | dev_set_mtu(br->dev, br_min_mtu(br)); | 408 | dev_set_mtu(br->dev, br_min_mtu(br)); |
406 | 409 | ||
407 | if (br_fdb_insert(br, p, dev->dev_addr, 0)) | ||
408 | netdev_err(dev, "failed insert local address bridge forwarding table\n"); | ||
409 | |||
410 | kobject_uevent(&p->kobj, KOBJ_ADD); | 410 | kobject_uevent(&p->kobj, KOBJ_ADD); |
411 | 411 | ||
412 | return 0; | 412 | return 0; |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index bf8dc7d308d6..28d544627422 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -77,7 +77,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
77 | /* insert into forwarding database after filtering to avoid spoofing */ | 77 | /* insert into forwarding database after filtering to avoid spoofing */ |
78 | br = p->br; | 78 | br = p->br; |
79 | if (p->flags & BR_LEARNING) | 79 | if (p->flags & BR_LEARNING) |
80 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); | 80 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false); |
81 | 81 | ||
82 | if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && | 82 | if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && |
83 | br_multicast_rcv(br, p, skb, vid)) | 83 | br_multicast_rcv(br, p, skb, vid)) |
@@ -148,7 +148,7 @@ static int br_handle_local_finish(struct sk_buff *skb) | |||
148 | 148 | ||
149 | br_vlan_get_tag(skb, &vid); | 149 | br_vlan_get_tag(skb, &vid); |
150 | if (p->flags & BR_LEARNING) | 150 | if (p->flags & BR_LEARNING) |
151 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); | 151 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); |
152 | return 0; /* process further */ | 152 | return 0; /* process further */ |
153 | } | 153 | } |
154 | 154 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index fcd12333c59b..3ba11bc99b65 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -104,6 +104,7 @@ struct net_bridge_fdb_entry | |||
104 | mac_addr addr; | 104 | mac_addr addr; |
105 | unsigned char is_local; | 105 | unsigned char is_local; |
106 | unsigned char is_static; | 106 | unsigned char is_static; |
107 | unsigned char added_by_user; | ||
107 | __u16 vlan_id; | 108 | __u16 vlan_id; |
108 | }; | 109 | }; |
109 | 110 | ||
@@ -370,6 +371,9 @@ static inline void br_netpoll_disable(struct net_bridge_port *p) | |||
370 | int br_fdb_init(void); | 371 | int br_fdb_init(void); |
371 | void br_fdb_fini(void); | 372 | void br_fdb_fini(void); |
372 | void br_fdb_flush(struct net_bridge *br); | 373 | void br_fdb_flush(struct net_bridge *br); |
374 | void br_fdb_find_delete_local(struct net_bridge *br, | ||
375 | const struct net_bridge_port *p, | ||
376 | const unsigned char *addr, u16 vid); | ||
373 | void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); | 377 | void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); |
374 | void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); | 378 | void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); |
375 | void br_fdb_cleanup(unsigned long arg); | 379 | void br_fdb_cleanup(unsigned long arg); |
@@ -383,8 +387,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count, | |||
383 | int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | 387 | int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, |
384 | const unsigned char *addr, u16 vid); | 388 | const unsigned char *addr, u16 vid); |
385 | void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | 389 | void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, |
386 | const unsigned char *addr, u16 vid); | 390 | const unsigned char *addr, u16 vid, bool added_by_user); |
387 | int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid); | ||
388 | 391 | ||
389 | int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | 392 | int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], |
390 | struct net_device *dev, const unsigned char *addr); | 393 | struct net_device *dev, const unsigned char *addr); |
@@ -584,6 +587,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
584 | int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); | 587 | int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); |
585 | int br_vlan_delete(struct net_bridge *br, u16 vid); | 588 | int br_vlan_delete(struct net_bridge *br, u16 vid); |
586 | void br_vlan_flush(struct net_bridge *br); | 589 | void br_vlan_flush(struct net_bridge *br); |
590 | bool br_vlan_find(struct net_bridge *br, u16 vid); | ||
587 | int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); | 591 | int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); |
588 | int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); | 592 | int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); |
589 | int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); | 593 | int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); |
@@ -665,6 +669,11 @@ static inline void br_vlan_flush(struct net_bridge *br) | |||
665 | { | 669 | { |
666 | } | 670 | } |
667 | 671 | ||
672 | static inline bool br_vlan_find(struct net_bridge *br, u16 vid) | ||
673 | { | ||
674 | return false; | ||
675 | } | ||
676 | |||
668 | static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) | 677 | static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) |
669 | { | 678 | { |
670 | return -EOPNOTSUPP; | 679 | return -EOPNOTSUPP; |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 656a6f3e40de..189ba1e7d851 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -194,6 +194,8 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | |||
194 | 194 | ||
195 | wasroot = br_is_root_bridge(br); | 195 | wasroot = br_is_root_bridge(br); |
196 | 196 | ||
197 | br_fdb_change_mac_address(br, addr); | ||
198 | |||
197 | memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); | 199 | memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); |
198 | memcpy(br->bridge_id.addr, addr, ETH_ALEN); | 200 | memcpy(br->bridge_id.addr, addr, ETH_ALEN); |
199 | memcpy(br->dev->dev_addr, addr, ETH_ALEN); | 201 | memcpy(br->dev->dev_addr, addr, ETH_ALEN); |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 4ca4d0a0151c..8249ca764c79 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -275,9 +275,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid) | |||
275 | if (!pv) | 275 | if (!pv) |
276 | return -EINVAL; | 276 | return -EINVAL; |
277 | 277 | ||
278 | spin_lock_bh(&br->hash_lock); | 278 | br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); |
279 | fdb_delete_by_addr(br, br->dev->dev_addr, vid); | ||
280 | spin_unlock_bh(&br->hash_lock); | ||
281 | 279 | ||
282 | __vlan_del(pv, vid); | 280 | __vlan_del(pv, vid); |
283 | return 0; | 281 | return 0; |
@@ -295,6 +293,25 @@ void br_vlan_flush(struct net_bridge *br) | |||
295 | __vlan_flush(pv); | 293 | __vlan_flush(pv); |
296 | } | 294 | } |
297 | 295 | ||
296 | bool br_vlan_find(struct net_bridge *br, u16 vid) | ||
297 | { | ||
298 | struct net_port_vlans *pv; | ||
299 | bool found = false; | ||
300 | |||
301 | rcu_read_lock(); | ||
302 | pv = rcu_dereference(br->vlan_info); | ||
303 | |||
304 | if (!pv) | ||
305 | goto out; | ||
306 | |||
307 | if (test_bit(vid, pv->vlan_bitmap)) | ||
308 | found = true; | ||
309 | |||
310 | out: | ||
311 | rcu_read_unlock(); | ||
312 | return found; | ||
313 | } | ||
314 | |||
298 | int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) | 315 | int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) |
299 | { | 316 | { |
300 | if (!rtnl_trylock()) | 317 | if (!rtnl_trylock()) |
@@ -359,9 +376,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) | |||
359 | if (!pv) | 376 | if (!pv) |
360 | return -EINVAL; | 377 | return -EINVAL; |
361 | 378 | ||
362 | spin_lock_bh(&port->br->hash_lock); | 379 | br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid); |
363 | fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); | ||
364 | spin_unlock_bh(&port->br->hash_lock); | ||
365 | 380 | ||
366 | return __vlan_del(pv, vid); | 381 | return __vlan_del(pv, vid); |
367 | } | 382 | } |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 4dca159435cf..edbca468fa73 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
23 | #include <net/caif/caif_device.h> | 23 | #include <net/caif/caif_device.h> |
24 | #include <net/caif/caif_layer.h> | 24 | #include <net/caif/caif_layer.h> |
25 | #include <net/caif/caif_dev.h> | ||
25 | #include <net/caif/cfpkt.h> | 26 | #include <net/caif/cfpkt.h> |
26 | #include <net/caif/cfcnfg.h> | 27 | #include <net/caif/cfcnfg.h> |
27 | #include <net/caif/cfserl.h> | 28 | #include <net/caif/cfserl.h> |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 353f793d1b3b..a6e115463052 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <net/caif/caif_layer.h> | 15 | #include <net/caif/caif_layer.h> |
16 | #include <net/caif/cfsrvl.h> | 16 | #include <net/caif/cfsrvl.h> |
17 | #include <net/caif/cfpkt.h> | 17 | #include <net/caif/cfpkt.h> |
18 | #include <net/caif/caif_dev.h> | ||
18 | 19 | ||
19 | #define SRVL_CTRL_PKT_SIZE 1 | 20 | #define SRVL_CTRL_PKT_SIZE 1 |
20 | #define SRVL_FLOW_OFF 0x81 | 21 | #define SRVL_FLOW_OFF 0x81 |
diff --git a/net/can/af_can.c b/net/can/af_can.c index d249874a366d..a27f8aad9e99 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/skbuff.h> | 57 | #include <linux/skbuff.h> |
58 | #include <linux/can.h> | 58 | #include <linux/can.h> |
59 | #include <linux/can/core.h> | 59 | #include <linux/can/core.h> |
60 | #include <linux/can/skb.h> | ||
60 | #include <linux/ratelimit.h> | 61 | #include <linux/ratelimit.h> |
61 | #include <net/net_namespace.h> | 62 | #include <net/net_namespace.h> |
62 | #include <net/sock.h> | 63 | #include <net/sock.h> |
@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop) | |||
290 | return -ENOMEM; | 291 | return -ENOMEM; |
291 | } | 292 | } |
292 | 293 | ||
293 | newskb->sk = skb->sk; | 294 | can_skb_set_owner(newskb, skb->sk); |
294 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 295 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
295 | newskb->pkt_type = PACKET_BROADCAST; | 296 | newskb->pkt_type = PACKET_BROADCAST; |
296 | } | 297 | } |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 3fc737b214c7..dcb75c0e66c1 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op) | |||
268 | 268 | ||
269 | /* send with loopback */ | 269 | /* send with loopback */ |
270 | skb->dev = dev; | 270 | skb->dev = dev; |
271 | skb->sk = op->sk; | 271 | can_skb_set_owner(skb, op->sk); |
272 | can_send(skb, 1); | 272 | can_send(skb, 1); |
273 | 273 | ||
274 | /* update statistics */ | 274 | /* update statistics */ |
@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) | |||
1223 | 1223 | ||
1224 | can_skb_prv(skb)->ifindex = dev->ifindex; | 1224 | can_skb_prv(skb)->ifindex = dev->ifindex; |
1225 | skb->dev = dev; | 1225 | skb->dev = dev; |
1226 | skb->sk = sk; | 1226 | can_skb_set_owner(skb, sk); |
1227 | err = can_send(skb, 1); /* send with loopback */ | 1227 | err = can_send(skb, 1); /* send with loopback */ |
1228 | dev_put(dev); | 1228 | dev_put(dev); |
1229 | 1229 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index 07d72d852324..8be757cca2ec 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -715,6 +715,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
715 | 715 | ||
716 | skb->dev = dev; | 716 | skb->dev = dev; |
717 | skb->sk = sk; | 717 | skb->sk = sk; |
718 | skb->priority = sk->sk_priority; | ||
718 | 719 | ||
719 | err = can_send(skb, ro->loopback); | 720 | err = can_send(skb, ro->loopback); |
720 | 721 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0e478a0f4204..30efc5c18622 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, | |||
840 | 840 | ||
841 | if (!cursor->bvec_iter.bi_size) { | 841 | if (!cursor->bvec_iter.bi_size) { |
842 | bio = bio->bi_next; | 842 | bio = bio->bi_next; |
843 | cursor->bvec_iter = bio->bi_iter; | 843 | cursor->bio = bio; |
844 | if (bio) | ||
845 | cursor->bvec_iter = bio->bi_iter; | ||
846 | else | ||
847 | memset(&cursor->bvec_iter, 0, | ||
848 | sizeof(cursor->bvec_iter)); | ||
844 | } | 849 | } |
845 | cursor->bio = bio; | ||
846 | 850 | ||
847 | if (!cursor->last_piece) { | 851 | if (!cursor->last_piece) { |
848 | BUG_ON(!cursor->resid); | 852 | BUG_ON(!cursor->resid); |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 010ff3bd58ad..0676f2b199d6 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1427,6 +1427,40 @@ static void __send_queued(struct ceph_osd_client *osdc) | |||
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | /* | 1429 | /* |
1430 | * Caller should hold map_sem for read and request_mutex. | ||
1431 | */ | ||
1432 | static int __ceph_osdc_start_request(struct ceph_osd_client *osdc, | ||
1433 | struct ceph_osd_request *req, | ||
1434 | bool nofail) | ||
1435 | { | ||
1436 | int rc; | ||
1437 | |||
1438 | __register_request(osdc, req); | ||
1439 | req->r_sent = 0; | ||
1440 | req->r_got_reply = 0; | ||
1441 | rc = __map_request(osdc, req, 0); | ||
1442 | if (rc < 0) { | ||
1443 | if (nofail) { | ||
1444 | dout("osdc_start_request failed map, " | ||
1445 | " will retry %lld\n", req->r_tid); | ||
1446 | rc = 0; | ||
1447 | } else { | ||
1448 | __unregister_request(osdc, req); | ||
1449 | } | ||
1450 | return rc; | ||
1451 | } | ||
1452 | |||
1453 | if (req->r_osd == NULL) { | ||
1454 | dout("send_request %p no up osds in pg\n", req); | ||
1455 | ceph_monc_request_next_osdmap(&osdc->client->monc); | ||
1456 | } else { | ||
1457 | __send_queued(osdc); | ||
1458 | } | ||
1459 | |||
1460 | return 0; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1430 | * Timeout callback, called every N seconds when 1 or more osd | 1464 | * Timeout callback, called every N seconds when 1 or more osd |
1431 | * requests has been active for more than N seconds. When this | 1465 | * requests has been active for more than N seconds. When this |
1432 | * happens, we ping all OSDs with requests who have timed out to | 1466 | * happens, we ping all OSDs with requests who have timed out to |
@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1653 | osdmap_epoch = ceph_decode_32(&p); | 1687 | osdmap_epoch = ceph_decode_32(&p); |
1654 | 1688 | ||
1655 | /* lookup */ | 1689 | /* lookup */ |
1690 | down_read(&osdc->map_sem); | ||
1656 | mutex_lock(&osdc->request_mutex); | 1691 | mutex_lock(&osdc->request_mutex); |
1657 | req = __lookup_request(osdc, tid); | 1692 | req = __lookup_request(osdc, tid); |
1658 | if (req == NULL) { | 1693 | if (req == NULL) { |
@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1709 | dout("redirect pool %lld\n", redir.oloc.pool); | 1744 | dout("redirect pool %lld\n", redir.oloc.pool); |
1710 | 1745 | ||
1711 | __unregister_request(osdc, req); | 1746 | __unregister_request(osdc, req); |
1712 | mutex_unlock(&osdc->request_mutex); | ||
1713 | 1747 | ||
1714 | req->r_target_oloc = redir.oloc; /* struct */ | 1748 | req->r_target_oloc = redir.oloc; /* struct */ |
1715 | 1749 | ||
@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1721 | * successfully. In the future we might want to follow | 1755 | * successfully. In the future we might want to follow |
1722 | * original request's nofail setting here. | 1756 | * original request's nofail setting here. |
1723 | */ | 1757 | */ |
1724 | err = ceph_osdc_start_request(osdc, req, true); | 1758 | err = __ceph_osdc_start_request(osdc, req, true); |
1725 | BUG_ON(err); | 1759 | BUG_ON(err); |
1726 | 1760 | ||
1727 | goto done; | 1761 | goto out_unlock; |
1728 | } | 1762 | } |
1729 | 1763 | ||
1730 | already_completed = req->r_got_reply; | 1764 | already_completed = req->r_got_reply; |
@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1742 | req->r_got_reply = 1; | 1776 | req->r_got_reply = 1; |
1743 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { | 1777 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { |
1744 | dout("handle_reply tid %llu dup ack\n", tid); | 1778 | dout("handle_reply tid %llu dup ack\n", tid); |
1745 | mutex_unlock(&osdc->request_mutex); | 1779 | goto out_unlock; |
1746 | goto done; | ||
1747 | } | 1780 | } |
1748 | 1781 | ||
1749 | dout("handle_reply tid %llu flags %d\n", tid, flags); | 1782 | dout("handle_reply tid %llu flags %d\n", tid, flags); |
@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1758 | __unregister_request(osdc, req); | 1791 | __unregister_request(osdc, req); |
1759 | 1792 | ||
1760 | mutex_unlock(&osdc->request_mutex); | 1793 | mutex_unlock(&osdc->request_mutex); |
1794 | up_read(&osdc->map_sem); | ||
1761 | 1795 | ||
1762 | if (!already_completed) { | 1796 | if (!already_completed) { |
1763 | if (req->r_unsafe_callback && | 1797 | if (req->r_unsafe_callback && |
@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1775 | complete_request(req); | 1809 | complete_request(req); |
1776 | } | 1810 | } |
1777 | 1811 | ||
1778 | done: | 1812 | out: |
1779 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); | 1813 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); |
1780 | ceph_osdc_put_request(req); | 1814 | ceph_osdc_put_request(req); |
1781 | return; | 1815 | return; |
1816 | out_unlock: | ||
1817 | mutex_unlock(&osdc->request_mutex); | ||
1818 | up_read(&osdc->map_sem); | ||
1819 | goto out; | ||
1782 | 1820 | ||
1783 | bad_put: | 1821 | bad_put: |
1784 | req->r_result = -EIO; | 1822 | req->r_result = -EIO; |
@@ -1791,6 +1829,7 @@ bad_put: | |||
1791 | ceph_osdc_put_request(req); | 1829 | ceph_osdc_put_request(req); |
1792 | bad_mutex: | 1830 | bad_mutex: |
1793 | mutex_unlock(&osdc->request_mutex); | 1831 | mutex_unlock(&osdc->request_mutex); |
1832 | up_read(&osdc->map_sem); | ||
1794 | bad: | 1833 | bad: |
1795 | pr_err("corrupt osd_op_reply got %d %d\n", | 1834 | pr_err("corrupt osd_op_reply got %d %d\n", |
1796 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); | 1835 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); |
@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
2351 | struct ceph_osd_request *req, | 2390 | struct ceph_osd_request *req, |
2352 | bool nofail) | 2391 | bool nofail) |
2353 | { | 2392 | { |
2354 | int rc = 0; | 2393 | int rc; |
2355 | 2394 | ||
2356 | down_read(&osdc->map_sem); | 2395 | down_read(&osdc->map_sem); |
2357 | mutex_lock(&osdc->request_mutex); | 2396 | mutex_lock(&osdc->request_mutex); |
2358 | __register_request(osdc, req); | 2397 | |
2359 | req->r_sent = 0; | 2398 | rc = __ceph_osdc_start_request(osdc, req, nofail); |
2360 | req->r_got_reply = 0; | 2399 | |
2361 | rc = __map_request(osdc, req, 0); | ||
2362 | if (rc < 0) { | ||
2363 | if (nofail) { | ||
2364 | dout("osdc_start_request failed map, " | ||
2365 | " will retry %lld\n", req->r_tid); | ||
2366 | rc = 0; | ||
2367 | } else { | ||
2368 | __unregister_request(osdc, req); | ||
2369 | } | ||
2370 | goto out_unlock; | ||
2371 | } | ||
2372 | if (req->r_osd == NULL) { | ||
2373 | dout("send_request %p no up osds in pg\n", req); | ||
2374 | ceph_monc_request_next_osdmap(&osdc->client->monc); | ||
2375 | } else { | ||
2376 | __send_queued(osdc); | ||
2377 | } | ||
2378 | rc = 0; | ||
2379 | out_unlock: | ||
2380 | mutex_unlock(&osdc->request_mutex); | 2400 | mutex_unlock(&osdc->request_mutex); |
2381 | up_read(&osdc->map_sem); | 2401 | up_read(&osdc->map_sem); |
2402 | |||
2382 | return rc; | 2403 | return rc; |
2383 | } | 2404 | } |
2384 | EXPORT_SYMBOL(ceph_osdc_start_request); | 2405 | EXPORT_SYMBOL(ceph_osdc_start_request); |
@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |||
2504 | err = -ENOMEM; | 2525 | err = -ENOMEM; |
2505 | osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); | 2526 | osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); |
2506 | if (!osdc->notify_wq) | 2527 | if (!osdc->notify_wq) |
2507 | goto out_msgpool; | 2528 | goto out_msgpool_reply; |
2529 | |||
2508 | return 0; | 2530 | return 0; |
2509 | 2531 | ||
2532 | out_msgpool_reply: | ||
2533 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); | ||
2510 | out_msgpool: | 2534 | out_msgpool: |
2511 | ceph_msgpool_destroy(&osdc->msgpool_op); | 2535 | ceph_msgpool_destroy(&osdc->msgpool_op); |
2512 | out_mempool: | 2536 | out_mempool: |
diff --git a/net/core/dev.c b/net/core/dev.c index 3721db716350..4ad1b78c9c77 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2803,7 +2803,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
2803 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2803 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
2804 | * --BLG | 2804 | * --BLG |
2805 | */ | 2805 | */ |
2806 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | 2806 | static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
2807 | { | 2807 | { |
2808 | struct net_device *dev = skb->dev; | 2808 | struct net_device *dev = skb->dev; |
2809 | struct netdev_queue *txq; | 2809 | struct netdev_queue *txq; |
@@ -4637,7 +4637,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) | |||
4637 | } | 4637 | } |
4638 | EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); | 4638 | EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); |
4639 | 4639 | ||
4640 | int netdev_adjacent_sysfs_add(struct net_device *dev, | 4640 | static int netdev_adjacent_sysfs_add(struct net_device *dev, |
4641 | struct net_device *adj_dev, | 4641 | struct net_device *adj_dev, |
4642 | struct list_head *dev_list) | 4642 | struct list_head *dev_list) |
4643 | { | 4643 | { |
@@ -4647,7 +4647,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev, | |||
4647 | return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), | 4647 | return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), |
4648 | linkname); | 4648 | linkname); |
4649 | } | 4649 | } |
4650 | void netdev_adjacent_sysfs_del(struct net_device *dev, | 4650 | static void netdev_adjacent_sysfs_del(struct net_device *dev, |
4651 | char *name, | 4651 | char *name, |
4652 | struct list_head *dev_list) | 4652 | struct list_head *dev_list) |
4653 | { | 4653 | { |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index f409e0bd35c0..185c341fafbd 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
745 | attach_rules(&ops->rules_list, dev); | 745 | attach_rules(&ops->rules_list, dev); |
746 | break; | 746 | break; |
747 | 747 | ||
748 | case NETDEV_CHANGENAME: | ||
749 | list_for_each_entry(ops, &net->rules_ops, list) { | ||
750 | detach_rules(&ops->rules_list, dev); | ||
751 | attach_rules(&ops->rules_list, dev); | ||
752 | } | ||
753 | break; | ||
754 | |||
748 | case NETDEV_UNREGISTER: | 755 | case NETDEV_UNREGISTER: |
749 | list_for_each_entry(ops, &net->rules_ops, list) | 756 | list_for_each_entry(ops, &net->rules_ops, list) |
750 | detach_rules(&ops->rules_list, dev); | 757 | detach_rules(&ops->rules_list, dev); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c03f3dec4763..a664f7829a6d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
948 | { | 948 | { |
949 | char *cur=opt, *delim; | 949 | char *cur=opt, *delim; |
950 | int ipv6; | 950 | int ipv6; |
951 | bool ipversion_set = false; | ||
951 | 952 | ||
952 | if (*cur != '@') { | 953 | if (*cur != '@') { |
953 | if ((delim = strchr(cur, '@')) == NULL) | 954 | if ((delim = strchr(cur, '@')) == NULL) |
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
960 | cur++; | 961 | cur++; |
961 | 962 | ||
962 | if (*cur != '/') { | 963 | if (*cur != '/') { |
964 | ipversion_set = true; | ||
963 | if ((delim = strchr(cur, '/')) == NULL) | 965 | if ((delim = strchr(cur, '/')) == NULL) |
964 | goto parse_failed; | 966 | goto parse_failed; |
965 | *delim = 0; | 967 | *delim = 0; |
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
1002 | ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); | 1004 | ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); |
1003 | if (ipv6 < 0) | 1005 | if (ipv6 < 0) |
1004 | goto parse_failed; | 1006 | goto parse_failed; |
1005 | else if (np->ipv6 != (bool)ipv6) | 1007 | else if (ipversion_set && np->ipv6 != (bool)ipv6) |
1006 | goto parse_failed; | 1008 | goto parse_failed; |
1007 | else | 1009 | else |
1008 | np->ipv6 = (bool)ipv6; | 1010 | np->ipv6 = (bool)ipv6; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 393b1bc9a618..048dc8d183aa 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) | |||
374 | if (!master_dev) | 374 | if (!master_dev) |
375 | return 0; | 375 | return 0; |
376 | ops = master_dev->rtnl_link_ops; | 376 | ops = master_dev->rtnl_link_ops; |
377 | if (!ops->get_slave_size) | 377 | if (!ops || !ops->get_slave_size) |
378 | return 0; | 378 | return 0; |
379 | /* IFLA_INFO_SLAVE_DATA + nested data */ | 379 | /* IFLA_INFO_SLAVE_DATA + nested data */ |
380 | return nla_total_size(sizeof(struct nlattr)) + | 380 | return nla_total_size(sizeof(struct nlattr)) + |
diff --git a/net/core/sock.c b/net/core/sock.c index 0c127dcdf6a8..5b6a9431b017 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1775 | while (order) { | 1775 | while (order) { |
1776 | if (npages >= 1 << order) { | 1776 | if (npages >= 1 << order) { |
1777 | page = alloc_pages(sk->sk_allocation | | 1777 | page = alloc_pages(sk->sk_allocation | |
1778 | __GFP_COMP | __GFP_NOWARN, | 1778 | __GFP_COMP | |
1779 | __GFP_NOWARN | | ||
1780 | __GFP_NORETRY, | ||
1779 | order); | 1781 | order); |
1780 | if (page) | 1782 | if (page) |
1781 | goto fill_page; | 1783 | goto fill_page; |
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio) | |||
1845 | gfp_t gfp = prio; | 1847 | gfp_t gfp = prio; |
1846 | 1848 | ||
1847 | if (order) | 1849 | if (order) |
1848 | gfp |= __GFP_COMP | __GFP_NOWARN; | 1850 | gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; |
1849 | pfrag->page = alloc_pages(gfp, order); | 1851 | pfrag->page = alloc_pages(gfp, order); |
1850 | if (likely(pfrag->page)) { | 1852 | if (likely(pfrag->page)) { |
1851 | pfrag->offset = 0; | 1853 | pfrag->offset = 0; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 2954dcbca832..4c04848953bd 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -2104,8 +2104,6 @@ static struct notifier_block dn_dev_notifier = { | |||
2104 | .notifier_call = dn_device_event, | 2104 | .notifier_call = dn_device_event, |
2105 | }; | 2105 | }; |
2106 | 2106 | ||
2107 | extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); | ||
2108 | |||
2109 | static struct packet_type dn_dix_packet_type __read_mostly = { | 2107 | static struct packet_type dn_dix_packet_type __read_mostly = { |
2110 | .type = cpu_to_be16(ETH_P_DNA_RT), | 2108 | .type = cpu_to_be16(ETH_P_DNA_RT), |
2111 | .func = dn_route_rcv, | 2109 | .func = dn_route_rcv, |
@@ -2353,9 +2351,6 @@ static const struct proto_ops dn_proto_ops = { | |||
2353 | .sendpage = sock_no_sendpage, | 2351 | .sendpage = sock_no_sendpage, |
2354 | }; | 2352 | }; |
2355 | 2353 | ||
2356 | void dn_register_sysctl(void); | ||
2357 | void dn_unregister_sysctl(void); | ||
2358 | |||
2359 | MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); | 2354 | MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); |
2360 | MODULE_AUTHOR("Linux DECnet Project Team"); | 2355 | MODULE_AUTHOR("Linux DECnet Project Team"); |
2361 | MODULE_LICENSE("GPL"); | 2356 | MODULE_LICENSE("GPL"); |
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 48b25c0af4d0..8edfea5da572 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c | |||
@@ -106,7 +106,6 @@ static int lowpan_header_create(struct sk_buff *skb, | |||
106 | unsigned short type, const void *_daddr, | 106 | unsigned short type, const void *_daddr, |
107 | const void *_saddr, unsigned int len) | 107 | const void *_saddr, unsigned int len) |
108 | { | 108 | { |
109 | struct ipv6hdr *hdr; | ||
110 | const u8 *saddr = _saddr; | 109 | const u8 *saddr = _saddr; |
111 | const u8 *daddr = _daddr; | 110 | const u8 *daddr = _daddr; |
112 | struct ieee802154_addr sa, da; | 111 | struct ieee802154_addr sa, da; |
@@ -117,8 +116,6 @@ static int lowpan_header_create(struct sk_buff *skb, | |||
117 | if (type != ETH_P_IPV6) | 116 | if (type != ETH_P_IPV6) |
118 | return 0; | 117 | return 0; |
119 | 118 | ||
120 | hdr = ipv6_hdr(skb); | ||
121 | |||
122 | if (!saddr) | 119 | if (!saddr) |
123 | saddr = dev->dev_addr; | 120 | saddr = dev->dev_addr; |
124 | 121 | ||
@@ -533,7 +530,27 @@ static struct header_ops lowpan_header_ops = { | |||
533 | .create = lowpan_header_create, | 530 | .create = lowpan_header_create, |
534 | }; | 531 | }; |
535 | 532 | ||
533 | static struct lock_class_key lowpan_tx_busylock; | ||
534 | static struct lock_class_key lowpan_netdev_xmit_lock_key; | ||
535 | |||
536 | static void lowpan_set_lockdep_class_one(struct net_device *dev, | ||
537 | struct netdev_queue *txq, | ||
538 | void *_unused) | ||
539 | { | ||
540 | lockdep_set_class(&txq->_xmit_lock, | ||
541 | &lowpan_netdev_xmit_lock_key); | ||
542 | } | ||
543 | |||
544 | |||
545 | static int lowpan_dev_init(struct net_device *dev) | ||
546 | { | ||
547 | netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL); | ||
548 | dev->qdisc_tx_busylock = &lowpan_tx_busylock; | ||
549 | return 0; | ||
550 | } | ||
551 | |||
536 | static const struct net_device_ops lowpan_netdev_ops = { | 552 | static const struct net_device_ops lowpan_netdev_ops = { |
553 | .ndo_init = lowpan_dev_init, | ||
537 | .ndo_start_xmit = lowpan_xmit, | 554 | .ndo_start_xmit = lowpan_xmit, |
538 | .ndo_set_mac_address = lowpan_set_address, | 555 | .ndo_set_mac_address = lowpan_set_address, |
539 | }; | 556 | }; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index ac2dff3c2c1c..bdbf68bb2e2d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1443,7 +1443,8 @@ static size_t inet_nlmsg_size(void) | |||
1443 | + nla_total_size(4) /* IFA_LOCAL */ | 1443 | + nla_total_size(4) /* IFA_LOCAL */ |
1444 | + nla_total_size(4) /* IFA_BROADCAST */ | 1444 | + nla_total_size(4) /* IFA_BROADCAST */ |
1445 | + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ | 1445 | + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ |
1446 | + nla_total_size(4); /* IFA_FLAGS */ | 1446 | + nla_total_size(4) /* IFA_FLAGS */ |
1447 | + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ | ||
1447 | } | 1448 | } |
1448 | 1449 | ||
1449 | static inline u32 cstamp_delta(unsigned long cstamp) | 1450 | static inline u32 cstamp_delta(unsigned long cstamp) |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index bd28f386bd02..50228be5c17b 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -101,28 +101,22 @@ static void tunnel_dst_reset_all(struct ip_tunnel *t) | |||
101 | __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); | 101 | __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) | 104 | static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) |
105 | { | 105 | { |
106 | struct dst_entry *dst; | 106 | struct dst_entry *dst; |
107 | 107 | ||
108 | rcu_read_lock(); | 108 | rcu_read_lock(); |
109 | dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); | 109 | dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); |
110 | if (dst) | 110 | if (dst) { |
111 | if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
112 | rcu_read_unlock(); | ||
113 | tunnel_dst_reset(t); | ||
114 | return NULL; | ||
115 | } | ||
111 | dst_hold(dst); | 116 | dst_hold(dst); |
112 | rcu_read_unlock(); | ||
113 | return dst; | ||
114 | } | ||
115 | |||
116 | static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie) | ||
117 | { | ||
118 | struct dst_entry *dst = tunnel_dst_get(t); | ||
119 | |||
120 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
121 | tunnel_dst_reset(t); | ||
122 | return NULL; | ||
123 | } | 117 | } |
124 | 118 | rcu_read_unlock(); | |
125 | return dst; | 119 | return (struct rtable *)dst; |
126 | } | 120 | } |
127 | 121 | ||
128 | /* Often modified stats are per cpu, other are shared (netdev->stats) */ | 122 | /* Often modified stats are per cpu, other are shared (netdev->stats) */ |
@@ -584,7 +578,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
584 | struct flowi4 fl4; | 578 | struct flowi4 fl4; |
585 | u8 tos, ttl; | 579 | u8 tos, ttl; |
586 | __be16 df; | 580 | __be16 df; |
587 | struct rtable *rt = NULL; /* Route to the other host */ | 581 | struct rtable *rt; /* Route to the other host */ |
588 | unsigned int max_headroom; /* The extra header space needed */ | 582 | unsigned int max_headroom; /* The extra header space needed */ |
589 | __be32 dst; | 583 | __be32 dst; |
590 | int err; | 584 | int err; |
@@ -657,8 +651,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
657 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, | 651 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, |
658 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); | 652 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); |
659 | 653 | ||
660 | if (connected) | 654 | rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL; |
661 | rt = (struct rtable *)tunnel_dst_check(tunnel, 0); | ||
662 | 655 | ||
663 | if (!rt) { | 656 | if (!rt) { |
664 | rt = ip_route_output_key(tunnel->net, &fl4); | 657 | rt = ip_route_output_key(tunnel->net, &fl4); |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 81c6910cfa92..a26ce035e3fa 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -61,6 +61,11 @@ config NFT_CHAIN_NAT_IPV4 | |||
61 | packet transformations such as the source, destination address and | 61 | packet transformations such as the source, destination address and |
62 | source and destination ports. | 62 | source and destination ports. |
63 | 63 | ||
64 | config NFT_REJECT_IPV4 | ||
65 | depends on NF_TABLES_IPV4 | ||
66 | default NFT_REJECT | ||
67 | tristate | ||
68 | |||
64 | config NF_TABLES_ARP | 69 | config NF_TABLES_ARP |
65 | depends on NF_TABLES | 70 | depends on NF_TABLES |
66 | tristate "ARP nf_tables support" | 71 | tristate "ARP nf_tables support" |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index c16be9d58420..90b82405331e 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o | |||
30 | obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o | 30 | obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o |
31 | obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o | 31 | obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o |
32 | obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o | 32 | obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o |
33 | obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o | ||
33 | obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o | 34 | obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o |
34 | 35 | ||
35 | # generic IP tables | 36 | # generic IP tables |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 9eea059dd621..574f7ebba0b6 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -229,7 +229,10 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
229 | ret = nf_ct_expect_related(rtcp_exp); | 229 | ret = nf_ct_expect_related(rtcp_exp); |
230 | if (ret == 0) | 230 | if (ret == 0) |
231 | break; | 231 | break; |
232 | else if (ret != -EBUSY) { | 232 | else if (ret == -EBUSY) { |
233 | nf_ct_unexpect_related(rtp_exp); | ||
234 | continue; | ||
235 | } else if (ret < 0) { | ||
233 | nf_ct_unexpect_related(rtp_exp); | 236 | nf_ct_unexpect_related(rtp_exp); |
234 | nated_port = 0; | 237 | nated_port = 0; |
235 | break; | 238 | break; |
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c new file mode 100644 index 000000000000..e79718a382f2 --- /dev/null +++ b/net/ipv4/netfilter/nft_reject_ipv4.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> | ||
3 | * Copyright (c) 2013 Eric Leblond <eric@regit.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/netlink.h> | ||
16 | #include <linux/netfilter.h> | ||
17 | #include <linux/netfilter/nf_tables.h> | ||
18 | #include <net/netfilter/nf_tables.h> | ||
19 | #include <net/icmp.h> | ||
20 | #include <net/netfilter/ipv4/nf_reject.h> | ||
21 | #include <net/netfilter/nft_reject.h> | ||
22 | |||
23 | void nft_reject_ipv4_eval(const struct nft_expr *expr, | ||
24 | struct nft_data data[NFT_REG_MAX + 1], | ||
25 | const struct nft_pktinfo *pkt) | ||
26 | { | ||
27 | struct nft_reject *priv = nft_expr_priv(expr); | ||
28 | |||
29 | switch (priv->type) { | ||
30 | case NFT_REJECT_ICMP_UNREACH: | ||
31 | nf_send_unreach(pkt->skb, priv->icmp_code); | ||
32 | break; | ||
33 | case NFT_REJECT_TCP_RST: | ||
34 | nf_send_reset(pkt->skb, pkt->ops->hooknum); | ||
35 | break; | ||
36 | } | ||
37 | |||
38 | data[NFT_REG_VERDICT].verdict = NF_DROP; | ||
39 | } | ||
40 | EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval); | ||
41 | |||
42 | static struct nft_expr_type nft_reject_ipv4_type; | ||
43 | static const struct nft_expr_ops nft_reject_ipv4_ops = { | ||
44 | .type = &nft_reject_ipv4_type, | ||
45 | .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), | ||
46 | .eval = nft_reject_ipv4_eval, | ||
47 | .init = nft_reject_init, | ||
48 | .dump = nft_reject_dump, | ||
49 | }; | ||
50 | |||
51 | static struct nft_expr_type nft_reject_ipv4_type __read_mostly = { | ||
52 | .family = NFPROTO_IPV4, | ||
53 | .name = "reject", | ||
54 | .ops = &nft_reject_ipv4_ops, | ||
55 | .policy = nft_reject_policy, | ||
56 | .maxattr = NFTA_REJECT_MAX, | ||
57 | .owner = THIS_MODULE, | ||
58 | }; | ||
59 | |||
60 | static int __init nft_reject_ipv4_module_init(void) | ||
61 | { | ||
62 | return nft_register_expr(&nft_reject_ipv4_type); | ||
63 | } | ||
64 | |||
65 | static void __exit nft_reject_ipv4_module_exit(void) | ||
66 | { | ||
67 | nft_unregister_expr(&nft_reject_ipv4_type); | ||
68 | } | ||
69 | |||
70 | module_init(nft_reject_ipv4_module_init); | ||
71 | module_exit(nft_reject_ipv4_module_exit); | ||
72 | |||
73 | MODULE_LICENSE("GPL"); | ||
74 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
75 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject"); | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4475b3bb494d..9f3a2db9109e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2229,7 +2229,7 @@ adjudge_to_death: | |||
2229 | /* This is a (useful) BSD violating of the RFC. There is a | 2229 | /* This is a (useful) BSD violating of the RFC. There is a |
2230 | * problem with TCP as specified in that the other end could | 2230 | * problem with TCP as specified in that the other end could |
2231 | * keep a socket open forever with no application left this end. | 2231 | * keep a socket open forever with no application left this end. |
2232 | * We use a 3 minute timeout (about the same as BSD) then kill | 2232 | * We use a 1 minute timeout (about the same as BSD) then kill |
2233 | * our end. If they send after that then tough - BUT: long enough | 2233 | * our end. If they send after that then tough - BUT: long enough |
2234 | * that we won't make the old 4*rto = almost no time - whoops | 2234 | * that we won't make the old 4*rto = almost no time - whoops |
2235 | * reset mistake. | 2235 | * reset mistake. |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 65cf90e063d5..227cba79fa6b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -671,6 +671,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
671 | { | 671 | { |
672 | struct tcp_sock *tp = tcp_sk(sk); | 672 | struct tcp_sock *tp = tcp_sk(sk); |
673 | long m = mrtt; /* RTT */ | 673 | long m = mrtt; /* RTT */ |
674 | u32 srtt = tp->srtt; | ||
674 | 675 | ||
675 | /* The following amusing code comes from Jacobson's | 676 | /* The following amusing code comes from Jacobson's |
676 | * article in SIGCOMM '88. Note that rtt and mdev | 677 | * article in SIGCOMM '88. Note that rtt and mdev |
@@ -688,11 +689,9 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
688 | * does not matter how to _calculate_ it. Seems, it was trap | 689 | * does not matter how to _calculate_ it. Seems, it was trap |
689 | * that VJ failed to avoid. 8) | 690 | * that VJ failed to avoid. 8) |
690 | */ | 691 | */ |
691 | if (m == 0) | 692 | if (srtt != 0) { |
692 | m = 1; | 693 | m -= (srtt >> 3); /* m is now error in rtt est */ |
693 | if (tp->srtt != 0) { | 694 | srtt += m; /* rtt = 7/8 rtt + 1/8 new */ |
694 | m -= (tp->srtt >> 3); /* m is now error in rtt est */ | ||
695 | tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ | ||
696 | if (m < 0) { | 695 | if (m < 0) { |
697 | m = -m; /* m is now abs(error) */ | 696 | m = -m; /* m is now abs(error) */ |
698 | m -= (tp->mdev >> 2); /* similar update on mdev */ | 697 | m -= (tp->mdev >> 2); /* similar update on mdev */ |
@@ -723,11 +722,12 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
723 | } | 722 | } |
724 | } else { | 723 | } else { |
725 | /* no previous measure. */ | 724 | /* no previous measure. */ |
726 | tp->srtt = m << 3; /* take the measured time to be rtt */ | 725 | srtt = m << 3; /* take the measured time to be rtt */ |
727 | tp->mdev = m << 1; /* make sure rto = 3*rtt */ | 726 | tp->mdev = m << 1; /* make sure rto = 3*rtt */ |
728 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | 727 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
729 | tp->rtt_seq = tp->snd_nxt; | 728 | tp->rtt_seq = tp->snd_nxt; |
730 | } | 729 | } |
730 | tp->srtt = max(1U, srtt); | ||
731 | } | 731 | } |
732 | 732 | ||
733 | /* Set the sk_pacing_rate to allow proper sizing of TSO packets. | 733 | /* Set the sk_pacing_rate to allow proper sizing of TSO packets. |
@@ -746,8 +746,10 @@ static void tcp_update_pacing_rate(struct sock *sk) | |||
746 | 746 | ||
747 | rate *= max(tp->snd_cwnd, tp->packets_out); | 747 | rate *= max(tp->snd_cwnd, tp->packets_out); |
748 | 748 | ||
749 | /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), | 749 | /* Correction for small srtt and scheduling constraints. |
750 | * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) | 750 | * For small rtt, consider noise is too high, and use |
751 | * the minimal value (srtt = 1 -> 125 us for HZ=1000) | ||
752 | * | ||
751 | * We probably need usec resolution in the future. | 753 | * We probably need usec resolution in the future. |
752 | * Note: This also takes care of possible srtt=0 case, | 754 | * Note: This also takes care of possible srtt=0 case, |
753 | * when tcp_rtt_estimator() was not yet called. | 755 | * when tcp_rtt_estimator() was not yet called. |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 03d26b85eab8..3be16727f058 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk) | |||
698 | if ((1 << sk->sk_state) & | 698 | if ((1 << sk->sk_state) & |
699 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | | 699 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | |
700 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) | 700 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) |
701 | tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); | 701 | tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, |
702 | 0, GFP_ATOMIC); | ||
702 | } | 703 | } |
703 | /* | 704 | /* |
704 | * One tasklet per cpu tries to send more skbs. | 705 | * One tasklet per cpu tries to send more skbs. |
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1904 | 1905 | ||
1905 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { | 1906 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { |
1906 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | 1907 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); |
1907 | break; | 1908 | /* It is possible TX completion already happened |
1909 | * before we set TSQ_THROTTLED, so we must | ||
1910 | * test again the condition. | ||
1911 | * We abuse smp_mb__after_clear_bit() because | ||
1912 | * there is no smp_mb__after_set_bit() yet | ||
1913 | */ | ||
1914 | smp_mb__after_clear_bit(); | ||
1915 | if (atomic_read(&sk->sk_wmem_alloc) > limit) | ||
1916 | break; | ||
1908 | } | 1917 | } |
1909 | 1918 | ||
1910 | limit = mss_now; | 1919 | limit = mss_now; |
@@ -1977,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
1977 | /* Schedule a loss probe in 2*RTT for SACK capable connections | 1986 | /* Schedule a loss probe in 2*RTT for SACK capable connections |
1978 | * in Open state, that are either limited by cwnd or application. | 1987 | * in Open state, that are either limited by cwnd or application. |
1979 | */ | 1988 | */ |
1980 | if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || | 1989 | if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out || |
1981 | !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) | 1990 | !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) |
1982 | return false; | 1991 | return false; |
1983 | 1992 | ||
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 25f5cee3a08a..88b4023ecfcf 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -17,6 +17,8 @@ | |||
17 | static DEFINE_SPINLOCK(udp_offload_lock); | 17 | static DEFINE_SPINLOCK(udp_offload_lock); |
18 | static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; | 18 | static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; |
19 | 19 | ||
20 | #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) | ||
21 | |||
20 | struct udp_offload_priv { | 22 | struct udp_offload_priv { |
21 | struct udp_offload *offload; | 23 | struct udp_offload *offload; |
22 | struct rcu_head rcu; | 24 | struct rcu_head rcu; |
@@ -100,8 +102,7 @@ out: | |||
100 | 102 | ||
101 | int udp_add_offload(struct udp_offload *uo) | 103 | int udp_add_offload(struct udp_offload *uo) |
102 | { | 104 | { |
103 | struct udp_offload_priv __rcu **head = &udp_offload_base; | 105 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); |
104 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL); | ||
105 | 106 | ||
106 | if (!new_offload) | 107 | if (!new_offload) |
107 | return -ENOMEM; | 108 | return -ENOMEM; |
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo) | |||
109 | new_offload->offload = uo; | 110 | new_offload->offload = uo; |
110 | 111 | ||
111 | spin_lock(&udp_offload_lock); | 112 | spin_lock(&udp_offload_lock); |
112 | rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); | 113 | new_offload->next = udp_offload_base; |
113 | rcu_assign_pointer(*head, new_offload); | 114 | rcu_assign_pointer(udp_offload_base, new_offload); |
114 | spin_unlock(&udp_offload_lock); | 115 | spin_unlock(&udp_offload_lock); |
115 | 116 | ||
116 | return 0; | 117 | return 0; |
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo) | |||
130 | 131 | ||
131 | spin_lock(&udp_offload_lock); | 132 | spin_lock(&udp_offload_lock); |
132 | 133 | ||
133 | uo_priv = rcu_dereference(*head); | 134 | uo_priv = udp_deref_protected(*head); |
134 | for (; uo_priv != NULL; | 135 | for (; uo_priv != NULL; |
135 | uo_priv = rcu_dereference(*head)) { | 136 | uo_priv = udp_deref_protected(*head)) { |
136 | |||
137 | if (uo_priv->offload == uo) { | 137 | if (uo_priv->offload == uo) { |
138 | rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); | 138 | rcu_assign_pointer(*head, |
139 | udp_deref_protected(uo_priv->next)); | ||
139 | goto unlock; | 140 | goto unlock; |
140 | } | 141 | } |
141 | head = &uo_priv->next; | 142 | head = &uo_priv->next; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index f81f59686f21..f2610e157660 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -414,7 +414,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) | |||
414 | addr_type = ipv6_addr_type(&hdr->daddr); | 414 | addr_type = ipv6_addr_type(&hdr->daddr); |
415 | 415 | ||
416 | if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || | 416 | if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || |
417 | ipv6_anycast_destination(skb)) | 417 | ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr)) |
418 | saddr = &hdr->daddr; | 418 | saddr = &hdr->daddr; |
419 | 419 | ||
420 | /* | 420 | /* |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 35750df744dc..4bff1f297e39 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -50,6 +50,11 @@ config NFT_CHAIN_NAT_IPV6 | |||
50 | packet transformations such as the source, destination address and | 50 | packet transformations such as the source, destination address and |
51 | source and destination ports. | 51 | source and destination ports. |
52 | 52 | ||
53 | config NFT_REJECT_IPV6 | ||
54 | depends on NF_TABLES_IPV6 | ||
55 | default NFT_REJECT | ||
56 | tristate | ||
57 | |||
53 | config IP6_NF_IPTABLES | 58 | config IP6_NF_IPTABLES |
54 | tristate "IP6 tables support (required for filtering)" | 59 | tristate "IP6 tables support (required for filtering)" |
55 | depends on INET && IPV6 | 60 | depends on INET && IPV6 |
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index d1b4928f34f7..70d3dd66f2cd 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
@@ -27,6 +27,7 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o | |||
27 | obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o | 27 | obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o |
28 | obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o | 28 | obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o |
29 | obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o | 29 | obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o |
30 | obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o | ||
30 | 31 | ||
31 | # matches | 32 | # matches |
32 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o | 33 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o |
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c new file mode 100644 index 000000000000..0bc19fa87821 --- /dev/null +++ b/net/ipv6/netfilter/nft_reject_ipv6.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> | ||
3 | * Copyright (c) 2013 Eric Leblond <eric@regit.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/netlink.h> | ||
16 | #include <linux/netfilter.h> | ||
17 | #include <linux/netfilter/nf_tables.h> | ||
18 | #include <net/netfilter/nf_tables.h> | ||
19 | #include <net/netfilter/nft_reject.h> | ||
20 | #include <net/netfilter/ipv6/nf_reject.h> | ||
21 | |||
22 | void nft_reject_ipv6_eval(const struct nft_expr *expr, | ||
23 | struct nft_data data[NFT_REG_MAX + 1], | ||
24 | const struct nft_pktinfo *pkt) | ||
25 | { | ||
26 | struct nft_reject *priv = nft_expr_priv(expr); | ||
27 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); | ||
28 | |||
29 | switch (priv->type) { | ||
30 | case NFT_REJECT_ICMP_UNREACH: | ||
31 | nf_send_unreach6(net, pkt->skb, priv->icmp_code, | ||
32 | pkt->ops->hooknum); | ||
33 | break; | ||
34 | case NFT_REJECT_TCP_RST: | ||
35 | nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); | ||
36 | break; | ||
37 | } | ||
38 | |||
39 | data[NFT_REG_VERDICT].verdict = NF_DROP; | ||
40 | } | ||
41 | EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval); | ||
42 | |||
43 | static struct nft_expr_type nft_reject_ipv6_type; | ||
44 | static const struct nft_expr_ops nft_reject_ipv6_ops = { | ||
45 | .type = &nft_reject_ipv6_type, | ||
46 | .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), | ||
47 | .eval = nft_reject_ipv6_eval, | ||
48 | .init = nft_reject_init, | ||
49 | .dump = nft_reject_dump, | ||
50 | }; | ||
51 | |||
52 | static struct nft_expr_type nft_reject_ipv6_type __read_mostly = { | ||
53 | .family = NFPROTO_IPV6, | ||
54 | .name = "reject", | ||
55 | .ops = &nft_reject_ipv6_ops, | ||
56 | .policy = nft_reject_policy, | ||
57 | .maxattr = NFTA_REJECT_MAX, | ||
58 | .owner = THIS_MODULE, | ||
59 | }; | ||
60 | |||
61 | static int __init nft_reject_ipv6_module_init(void) | ||
62 | { | ||
63 | return nft_register_expr(&nft_reject_ipv6_type); | ||
64 | } | ||
65 | |||
66 | static void __exit nft_reject_ipv6_module_exit(void) | ||
67 | { | ||
68 | nft_unregister_expr(&nft_reject_ipv6_type); | ||
69 | } | ||
70 | |||
71 | module_init(nft_reject_ipv6_module_init); | ||
72 | module_exit(nft_reject_ipv6_module_exit); | ||
73 | |||
74 | MODULE_LICENSE("GPL"); | ||
75 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
76 | MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject"); | ||
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 994e28bfb32e..00b2a6d1c009 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -52,18 +52,12 @@ | |||
52 | #include <net/p8022.h> | 52 | #include <net/p8022.h> |
53 | #include <net/psnap.h> | 53 | #include <net/psnap.h> |
54 | #include <net/sock.h> | 54 | #include <net/sock.h> |
55 | #include <net/datalink.h> | ||
55 | #include <net/tcp_states.h> | 56 | #include <net/tcp_states.h> |
57 | #include <net/net_namespace.h> | ||
56 | 58 | ||
57 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
58 | 60 | ||
59 | #ifdef CONFIG_SYSCTL | ||
60 | extern void ipx_register_sysctl(void); | ||
61 | extern void ipx_unregister_sysctl(void); | ||
62 | #else | ||
63 | #define ipx_register_sysctl() | ||
64 | #define ipx_unregister_sysctl() | ||
65 | #endif | ||
66 | |||
67 | /* Configuration Variables */ | 61 | /* Configuration Variables */ |
68 | static unsigned char ipxcfg_max_hops = 16; | 62 | static unsigned char ipxcfg_max_hops = 16; |
69 | static char ipxcfg_auto_select_primary; | 63 | static char ipxcfg_auto_select_primary; |
@@ -84,15 +78,6 @@ DEFINE_SPINLOCK(ipx_interfaces_lock); | |||
84 | struct ipx_interface *ipx_primary_net; | 78 | struct ipx_interface *ipx_primary_net; |
85 | struct ipx_interface *ipx_internal_net; | 79 | struct ipx_interface *ipx_internal_net; |
86 | 80 | ||
87 | extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, | ||
88 | unsigned char *node); | ||
89 | extern void ipxrtr_del_routes(struct ipx_interface *intrfc); | ||
90 | extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, | ||
91 | struct iovec *iov, size_t len, int noblock); | ||
92 | extern int ipxrtr_route_skb(struct sk_buff *skb); | ||
93 | extern struct ipx_route *ipxrtr_lookup(__be32 net); | ||
94 | extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); | ||
95 | |||
96 | struct ipx_interface *ipx_interfaces_head(void) | 81 | struct ipx_interface *ipx_interfaces_head(void) |
97 | { | 82 | { |
98 | struct ipx_interface *rc = NULL; | 83 | struct ipx_interface *rc = NULL; |
@@ -1986,9 +1971,6 @@ static struct notifier_block ipx_dev_notifier = { | |||
1986 | .notifier_call = ipxitf_device_event, | 1971 | .notifier_call = ipxitf_device_event, |
1987 | }; | 1972 | }; |
1988 | 1973 | ||
1989 | extern struct datalink_proto *make_EII_client(void); | ||
1990 | extern void destroy_EII_client(struct datalink_proto *); | ||
1991 | |||
1992 | static const unsigned char ipx_8022_type = 0xE0; | 1974 | static const unsigned char ipx_8022_type = 0xE0; |
1993 | static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; | 1975 | static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; |
1994 | static const char ipx_EII_err_msg[] __initconst = | 1976 | static const char ipx_EII_err_msg[] __initconst = |
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index 30f4519b092f..c1f03185c5e1 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c | |||
@@ -20,15 +20,11 @@ DEFINE_RWLOCK(ipx_routes_lock); | |||
20 | 20 | ||
21 | extern struct ipx_interface *ipx_internal_net; | 21 | extern struct ipx_interface *ipx_internal_net; |
22 | 22 | ||
23 | extern __be16 ipx_cksum(struct ipxhdr *packet, int length); | ||
24 | extern struct ipx_interface *ipxitf_find_using_net(__be32 net); | 23 | extern struct ipx_interface *ipxitf_find_using_net(__be32 net); |
25 | extern int ipxitf_demux_socket(struct ipx_interface *intrfc, | 24 | extern int ipxitf_demux_socket(struct ipx_interface *intrfc, |
26 | struct sk_buff *skb, int copy); | 25 | struct sk_buff *skb, int copy); |
27 | extern int ipxitf_demux_socket(struct ipx_interface *intrfc, | 26 | extern int ipxitf_demux_socket(struct ipx_interface *intrfc, |
28 | struct sk_buff *skb, int copy); | 27 | struct sk_buff *skb, int copy); |
29 | extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, | ||
30 | char *node); | ||
31 | extern struct ipx_interface *ipxitf_find_using_net(__be32 net); | ||
32 | 28 | ||
33 | struct ipx_route *ipxrtr_lookup(__be32 net) | 29 | struct ipx_route *ipxrtr_lookup(__be32 net) |
34 | { | 30 | { |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index f9ae9b85d4c1..453e974287d1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1021,8 +1021,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
1021 | IEEE80211_P2P_OPPPS_ENABLE_BIT; | 1021 | IEEE80211_P2P_OPPPS_ENABLE_BIT; |
1022 | 1022 | ||
1023 | err = ieee80211_assign_beacon(sdata, ¶ms->beacon); | 1023 | err = ieee80211_assign_beacon(sdata, ¶ms->beacon); |
1024 | if (err < 0) | 1024 | if (err < 0) { |
1025 | ieee80211_vif_release_channel(sdata); | ||
1025 | return err; | 1026 | return err; |
1027 | } | ||
1026 | changed |= err; | 1028 | changed |= err; |
1027 | 1029 | ||
1028 | err = drv_start_ap(sdata->local, sdata); | 1030 | err = drv_start_ap(sdata->local, sdata); |
@@ -1032,6 +1034,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
1032 | if (old) | 1034 | if (old) |
1033 | kfree_rcu(old, rcu_head); | 1035 | kfree_rcu(old, rcu_head); |
1034 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); | 1036 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); |
1037 | ieee80211_vif_release_channel(sdata); | ||
1035 | return err; | 1038 | return err; |
1036 | } | 1039 | } |
1037 | 1040 | ||
@@ -1090,8 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) | |||
1090 | kfree(sdata->u.ap.next_beacon); | 1093 | kfree(sdata->u.ap.next_beacon); |
1091 | sdata->u.ap.next_beacon = NULL; | 1094 | sdata->u.ap.next_beacon = NULL; |
1092 | 1095 | ||
1093 | cancel_work_sync(&sdata->u.ap.request_smps_work); | ||
1094 | |||
1095 | /* turn off carrier for this interface and dependent VLANs */ | 1096 | /* turn off carrier for this interface and dependent VLANs */ |
1096 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | 1097 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) |
1097 | netif_carrier_off(vlan->dev); | 1098 | netif_carrier_off(vlan->dev); |
@@ -1103,6 +1104,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) | |||
1103 | kfree_rcu(old_beacon, rcu_head); | 1104 | kfree_rcu(old_beacon, rcu_head); |
1104 | if (old_probe_resp) | 1105 | if (old_probe_resp) |
1105 | kfree_rcu(old_probe_resp, rcu_head); | 1106 | kfree_rcu(old_probe_resp, rcu_head); |
1107 | sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF; | ||
1106 | 1108 | ||
1107 | __sta_info_flush(sdata, true); | 1109 | __sta_info_flush(sdata, true); |
1108 | ieee80211_free_keys(sdata, true); | 1110 | ieee80211_free_keys(sdata, true); |
@@ -2638,6 +2640,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2638 | INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); | 2640 | INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); |
2639 | INIT_LIST_HEAD(&roc->dependents); | 2641 | INIT_LIST_HEAD(&roc->dependents); |
2640 | 2642 | ||
2643 | /* | ||
2644 | * cookie is either the roc cookie (for normal roc) | ||
2645 | * or the SKB (for mgmt TX) | ||
2646 | */ | ||
2647 | if (!txskb) { | ||
2648 | /* local->mtx protects this */ | ||
2649 | local->roc_cookie_counter++; | ||
2650 | roc->cookie = local->roc_cookie_counter; | ||
2651 | /* wow, you wrapped 64 bits ... more likely a bug */ | ||
2652 | if (WARN_ON(roc->cookie == 0)) { | ||
2653 | roc->cookie = 1; | ||
2654 | local->roc_cookie_counter++; | ||
2655 | } | ||
2656 | *cookie = roc->cookie; | ||
2657 | } else { | ||
2658 | *cookie = (unsigned long)txskb; | ||
2659 | } | ||
2660 | |||
2641 | /* if there's one pending or we're scanning, queue this one */ | 2661 | /* if there's one pending or we're scanning, queue this one */ |
2642 | if (!list_empty(&local->roc_list) || | 2662 | if (!list_empty(&local->roc_list) || |
2643 | local->scanning || local->radar_detect_enabled) | 2663 | local->scanning || local->radar_detect_enabled) |
@@ -2772,24 +2792,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2772 | if (!queued) | 2792 | if (!queued) |
2773 | list_add_tail(&roc->list, &local->roc_list); | 2793 | list_add_tail(&roc->list, &local->roc_list); |
2774 | 2794 | ||
2775 | /* | ||
2776 | * cookie is either the roc cookie (for normal roc) | ||
2777 | * or the SKB (for mgmt TX) | ||
2778 | */ | ||
2779 | if (!txskb) { | ||
2780 | /* local->mtx protects this */ | ||
2781 | local->roc_cookie_counter++; | ||
2782 | roc->cookie = local->roc_cookie_counter; | ||
2783 | /* wow, you wrapped 64 bits ... more likely a bug */ | ||
2784 | if (WARN_ON(roc->cookie == 0)) { | ||
2785 | roc->cookie = 1; | ||
2786 | local->roc_cookie_counter++; | ||
2787 | } | ||
2788 | *cookie = roc->cookie; | ||
2789 | } else { | ||
2790 | *cookie = (unsigned long)txskb; | ||
2791 | } | ||
2792 | |||
2793 | return 0; | 2795 | return 0; |
2794 | } | 2796 | } |
2795 | 2797 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index fab7b91923e0..70dd013de836 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -466,7 +466,9 @@ void ieee80211_request_smps_ap_work(struct work_struct *work) | |||
466 | u.ap.request_smps_work); | 466 | u.ap.request_smps_work); |
467 | 467 | ||
468 | sdata_lock(sdata); | 468 | sdata_lock(sdata); |
469 | __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode); | 469 | if (sdata_dereference(sdata->u.ap.beacon, sdata)) |
470 | __ieee80211_request_smps_ap(sdata, | ||
471 | sdata->u.ap.driver_smps_mode); | ||
470 | sdata_unlock(sdata); | 472 | sdata_unlock(sdata); |
471 | } | 473 | } |
472 | 474 | ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 771080ec7212..2796a198728f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -695,12 +695,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) | |||
695 | struct cfg80211_bss *cbss; | 695 | struct cfg80211_bss *cbss; |
696 | struct beacon_data *presp; | 696 | struct beacon_data *presp; |
697 | struct sta_info *sta; | 697 | struct sta_info *sta; |
698 | int active_ibss; | ||
699 | u16 capability; | 698 | u16 capability; |
700 | 699 | ||
701 | active_ibss = ieee80211_sta_active_ibss(sdata); | 700 | if (!is_zero_ether_addr(ifibss->bssid)) { |
702 | |||
703 | if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { | ||
704 | capability = WLAN_CAPABILITY_IBSS; | 701 | capability = WLAN_CAPABILITY_IBSS; |
705 | 702 | ||
706 | if (ifibss->privacy) | 703 | if (ifibss->privacy) |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 3dfd20a453ab..d6d1f1df9119 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -418,20 +418,24 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
418 | return ret; | 418 | return ret; |
419 | } | 419 | } |
420 | 420 | ||
421 | mutex_lock(&local->iflist_mtx); | ||
422 | rcu_assign_pointer(local->monitor_sdata, sdata); | ||
423 | mutex_unlock(&local->iflist_mtx); | ||
424 | |||
421 | mutex_lock(&local->mtx); | 425 | mutex_lock(&local->mtx); |
422 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | 426 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, |
423 | IEEE80211_CHANCTX_EXCLUSIVE); | 427 | IEEE80211_CHANCTX_EXCLUSIVE); |
424 | mutex_unlock(&local->mtx); | 428 | mutex_unlock(&local->mtx); |
425 | if (ret) { | 429 | if (ret) { |
430 | mutex_lock(&local->iflist_mtx); | ||
431 | rcu_assign_pointer(local->monitor_sdata, NULL); | ||
432 | mutex_unlock(&local->iflist_mtx); | ||
433 | synchronize_net(); | ||
426 | drv_remove_interface(local, sdata); | 434 | drv_remove_interface(local, sdata); |
427 | kfree(sdata); | 435 | kfree(sdata); |
428 | return ret; | 436 | return ret; |
429 | } | 437 | } |
430 | 438 | ||
431 | mutex_lock(&local->iflist_mtx); | ||
432 | rcu_assign_pointer(local->monitor_sdata, sdata); | ||
433 | mutex_unlock(&local->iflist_mtx); | ||
434 | |||
435 | return 0; | 439 | return 0; |
436 | } | 440 | } |
437 | 441 | ||
@@ -770,12 +774,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
770 | 774 | ||
771 | ieee80211_roc_purge(local, sdata); | 775 | ieee80211_roc_purge(local, sdata); |
772 | 776 | ||
773 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 777 | switch (sdata->vif.type) { |
778 | case NL80211_IFTYPE_STATION: | ||
774 | ieee80211_mgd_stop(sdata); | 779 | ieee80211_mgd_stop(sdata); |
775 | 780 | break; | |
776 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | 781 | case NL80211_IFTYPE_ADHOC: |
777 | ieee80211_ibss_stop(sdata); | 782 | ieee80211_ibss_stop(sdata); |
778 | 783 | break; | |
784 | case NL80211_IFTYPE_AP: | ||
785 | cancel_work_sync(&sdata->u.ap.request_smps_work); | ||
786 | break; | ||
787 | default: | ||
788 | break; | ||
789 | } | ||
779 | 790 | ||
780 | /* | 791 | /* |
781 | * Remove all stations associated with this interface. | 792 | * Remove all stations associated with this interface. |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 27c990bf2320..97a02d3f7d87 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -878,7 +878,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx, | |||
878 | } | 878 | } |
879 | 879 | ||
880 | /* adjust first fragment's length */ | 880 | /* adjust first fragment's length */ |
881 | skb->len = hdrlen + per_fragm; | 881 | skb_trim(skb, hdrlen + per_fragm); |
882 | return 0; | 882 | return 0; |
883 | } | 883 | } |
884 | 884 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index c37467562fd0..e9410d17619d 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -513,7 +513,6 @@ config NFT_QUEUE | |||
513 | 513 | ||
514 | config NFT_REJECT | 514 | config NFT_REJECT |
515 | depends on NF_TABLES | 515 | depends on NF_TABLES |
516 | depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6 | ||
517 | default m if NETFILTER_ADVANCED=n | 516 | default m if NETFILTER_ADVANCED=n |
518 | tristate "Netfilter nf_tables reject support" | 517 | tristate "Netfilter nf_tables reject support" |
519 | help | 518 | help |
@@ -521,6 +520,11 @@ config NFT_REJECT | |||
521 | explicitly deny and notify via TCP reset/ICMP informational errors | 520 | explicitly deny and notify via TCP reset/ICMP informational errors |
522 | unallowed traffic. | 521 | unallowed traffic. |
523 | 522 | ||
523 | config NFT_REJECT_INET | ||
524 | depends on NF_TABLES_INET | ||
525 | default NFT_REJECT | ||
526 | tristate | ||
527 | |||
524 | config NFT_COMPAT | 528 | config NFT_COMPAT |
525 | depends on NF_TABLES | 529 | depends on NF_TABLES |
526 | depends on NETFILTER_XTABLES | 530 | depends on NETFILTER_XTABLES |
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ee9c4de5f8ed..bffdad774da7 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
@@ -79,6 +79,7 @@ obj-$(CONFIG_NFT_LIMIT) += nft_limit.o | |||
79 | obj-$(CONFIG_NFT_NAT) += nft_nat.o | 79 | obj-$(CONFIG_NFT_NAT) += nft_nat.o |
80 | obj-$(CONFIG_NFT_QUEUE) += nft_queue.o | 80 | obj-$(CONFIG_NFT_QUEUE) += nft_queue.o |
81 | obj-$(CONFIG_NFT_REJECT) += nft_reject.o | 81 | obj-$(CONFIG_NFT_REJECT) += nft_reject.o |
82 | obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o | ||
82 | obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o | 83 | obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o |
83 | obj-$(CONFIG_NFT_HASH) += nft_hash.o | 84 | obj-$(CONFIG_NFT_HASH) += nft_hash.o |
84 | obj-$(CONFIG_NFT_COUNTER) += nft_counter.o | 85 | obj-$(CONFIG_NFT_COUNTER) += nft_counter.o |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 59a1a85bcb3e..a8eb0a89326a 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, | |||
871 | cp->protocol = p->protocol; | 871 | cp->protocol = p->protocol; |
872 | ip_vs_addr_set(p->af, &cp->caddr, p->caddr); | 872 | ip_vs_addr_set(p->af, &cp->caddr, p->caddr); |
873 | cp->cport = p->cport; | 873 | cp->cport = p->cport; |
874 | ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr); | 874 | /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */ |
875 | cp->vport = p->vport; | ||
876 | /* proto should only be IPPROTO_IP if d_addr is a fwmark */ | ||
877 | ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, | 875 | ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, |
878 | &cp->daddr, daddr); | 876 | &cp->vaddr, p->vaddr); |
877 | cp->vport = p->vport; | ||
878 | ip_vs_addr_set(p->af, &cp->daddr, daddr); | ||
879 | cp->dport = dport; | 879 | cp->dport = dport; |
880 | cp->flags = flags; | 880 | cp->flags = flags; |
881 | cp->fwmark = fwmark; | 881 | cp->fwmark = fwmark; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 8824ed0ccc9c..356bef519fe5 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -312,6 +312,21 @@ static void death_by_timeout(unsigned long ul_conntrack) | |||
312 | nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); | 312 | nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline bool | ||
316 | nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, | ||
317 | const struct nf_conntrack_tuple *tuple, | ||
318 | u16 zone) | ||
319 | { | ||
320 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | ||
321 | |||
322 | /* A conntrack can be recreated with the equal tuple, | ||
323 | * so we need to check that the conntrack is confirmed | ||
324 | */ | ||
325 | return nf_ct_tuple_equal(tuple, &h->tuple) && | ||
326 | nf_ct_zone(ct) == zone && | ||
327 | nf_ct_is_confirmed(ct); | ||
328 | } | ||
329 | |||
315 | /* | 330 | /* |
316 | * Warning : | 331 | * Warning : |
317 | * - Caller must take a reference on returned object | 332 | * - Caller must take a reference on returned object |
@@ -333,8 +348,7 @@ ____nf_conntrack_find(struct net *net, u16 zone, | |||
333 | local_bh_disable(); | 348 | local_bh_disable(); |
334 | begin: | 349 | begin: |
335 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { | 350 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { |
336 | if (nf_ct_tuple_equal(tuple, &h->tuple) && | 351 | if (nf_ct_key_equal(h, tuple, zone)) { |
337 | nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { | ||
338 | NF_CT_STAT_INC(net, found); | 352 | NF_CT_STAT_INC(net, found); |
339 | local_bh_enable(); | 353 | local_bh_enable(); |
340 | return h; | 354 | return h; |
@@ -372,8 +386,7 @@ begin: | |||
372 | !atomic_inc_not_zero(&ct->ct_general.use))) | 386 | !atomic_inc_not_zero(&ct->ct_general.use))) |
373 | h = NULL; | 387 | h = NULL; |
374 | else { | 388 | else { |
375 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || | 389 | if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { |
376 | nf_ct_zone(ct) != zone)) { | ||
377 | nf_ct_put(ct); | 390 | nf_ct_put(ct); |
378 | goto begin; | 391 | goto begin; |
379 | } | 392 | } |
@@ -435,7 +448,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) | |||
435 | goto out; | 448 | goto out; |
436 | 449 | ||
437 | add_timer(&ct->timeout); | 450 | add_timer(&ct->timeout); |
438 | nf_conntrack_get(&ct->ct_general); | 451 | smp_wmb(); |
452 | /* The caller holds a reference to this object */ | ||
453 | atomic_set(&ct->ct_general.use, 2); | ||
439 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 454 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
440 | NF_CT_STAT_INC(net, insert); | 455 | NF_CT_STAT_INC(net, insert); |
441 | spin_unlock_bh(&nf_conntrack_lock); | 456 | spin_unlock_bh(&nf_conntrack_lock); |
@@ -449,6 +464,21 @@ out: | |||
449 | } | 464 | } |
450 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); | 465 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); |
451 | 466 | ||
467 | /* deletion from this larval template list happens via nf_ct_put() */ | ||
468 | void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl) | ||
469 | { | ||
470 | __set_bit(IPS_TEMPLATE_BIT, &tmpl->status); | ||
471 | __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); | ||
472 | nf_conntrack_get(&tmpl->ct_general); | ||
473 | |||
474 | spin_lock_bh(&nf_conntrack_lock); | ||
475 | /* Overload tuple linked list to put us in template list. */ | ||
476 | hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | ||
477 | &net->ct.tmpl); | ||
478 | spin_unlock_bh(&nf_conntrack_lock); | ||
479 | } | ||
480 | EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert); | ||
481 | |||
452 | /* Confirm a connection given skb; places it in hash table */ | 482 | /* Confirm a connection given skb; places it in hash table */ |
453 | int | 483 | int |
454 | __nf_conntrack_confirm(struct sk_buff *skb) | 484 | __nf_conntrack_confirm(struct sk_buff *skb) |
@@ -720,11 +750,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone, | |||
720 | nf_ct_zone->id = zone; | 750 | nf_ct_zone->id = zone; |
721 | } | 751 | } |
722 | #endif | 752 | #endif |
723 | /* | 753 | /* Because we use RCU lookups, we set ct_general.use to zero before |
724 | * changes to lookup keys must be done before setting refcnt to 1 | 754 | * this is inserted in any list. |
725 | */ | 755 | */ |
726 | smp_wmb(); | 756 | atomic_set(&ct->ct_general.use, 0); |
727 | atomic_set(&ct->ct_general.use, 1); | ||
728 | return ct; | 757 | return ct; |
729 | 758 | ||
730 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 759 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
@@ -748,6 +777,11 @@ void nf_conntrack_free(struct nf_conn *ct) | |||
748 | { | 777 | { |
749 | struct net *net = nf_ct_net(ct); | 778 | struct net *net = nf_ct_net(ct); |
750 | 779 | ||
780 | /* A freed object has refcnt == 0, that's | ||
781 | * the golden rule for SLAB_DESTROY_BY_RCU | ||
782 | */ | ||
783 | NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); | ||
784 | |||
751 | nf_ct_ext_destroy(ct); | 785 | nf_ct_ext_destroy(ct); |
752 | nf_ct_ext_free(ct); | 786 | nf_ct_ext_free(ct); |
753 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | 787 | kmem_cache_free(net->ct.nf_conntrack_cachep, ct); |
@@ -843,6 +877,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, | |||
843 | NF_CT_STAT_INC(net, new); | 877 | NF_CT_STAT_INC(net, new); |
844 | } | 878 | } |
845 | 879 | ||
880 | /* Now it is inserted into the unconfirmed list, bump refcount */ | ||
881 | nf_conntrack_get(&ct->ct_general); | ||
882 | |||
846 | /* Overload tuple linked list to put us in unconfirmed list. */ | 883 | /* Overload tuple linked list to put us in unconfirmed list. */ |
847 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | 884 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, |
848 | &net->ct.unconfirmed); | 885 | &net->ct.unconfirmed); |
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 9858e3e51a3a..52e20c9a46a5 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c | |||
@@ -363,9 +363,8 @@ static int __net_init synproxy_net_init(struct net *net) | |||
363 | goto err2; | 363 | goto err2; |
364 | if (!nfct_synproxy_ext_add(ct)) | 364 | if (!nfct_synproxy_ext_add(ct)) |
365 | goto err2; | 365 | goto err2; |
366 | __set_bit(IPS_TEMPLATE_BIT, &ct->status); | ||
367 | __set_bit(IPS_CONFIRMED_BIT, &ct->status); | ||
368 | 366 | ||
367 | nf_conntrack_tmpl_insert(net, ct); | ||
369 | snet->tmpl = ct; | 368 | snet->tmpl = ct; |
370 | 369 | ||
371 | snet->stats = alloc_percpu(struct synproxy_stats); | 370 | snet->stats = alloc_percpu(struct synproxy_stats); |
@@ -390,7 +389,7 @@ static void __net_exit synproxy_net_exit(struct net *net) | |||
390 | { | 389 | { |
391 | struct synproxy_net *snet = synproxy_pernet(net); | 390 | struct synproxy_net *snet = synproxy_pernet(net); |
392 | 391 | ||
393 | nf_conntrack_free(snet->tmpl); | 392 | nf_ct_put(snet->tmpl); |
394 | synproxy_proc_exit(net); | 393 | synproxy_proc_exit(net); |
395 | free_percpu(snet->stats); | 394 | free_percpu(snet->stats); |
396 | } | 395 | } |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 117bbaaddde6..adce01e8bb57 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1008,10 +1008,8 @@ notify: | |||
1008 | return 0; | 1008 | return 0; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | static void nf_tables_rcu_chain_destroy(struct rcu_head *head) | 1011 | static void nf_tables_chain_destroy(struct nft_chain *chain) |
1012 | { | 1012 | { |
1013 | struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head); | ||
1014 | |||
1015 | BUG_ON(chain->use > 0); | 1013 | BUG_ON(chain->use > 0); |
1016 | 1014 | ||
1017 | if (chain->flags & NFT_BASE_CHAIN) { | 1015 | if (chain->flags & NFT_BASE_CHAIN) { |
@@ -1045,7 +1043,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | |||
1045 | if (IS_ERR(chain)) | 1043 | if (IS_ERR(chain)) |
1046 | return PTR_ERR(chain); | 1044 | return PTR_ERR(chain); |
1047 | 1045 | ||
1048 | if (!list_empty(&chain->rules)) | 1046 | if (!list_empty(&chain->rules) || chain->use > 0) |
1049 | return -EBUSY; | 1047 | return -EBUSY; |
1050 | 1048 | ||
1051 | list_del(&chain->list); | 1049 | list_del(&chain->list); |
@@ -1059,7 +1057,9 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | |||
1059 | family); | 1057 | family); |
1060 | 1058 | ||
1061 | /* Make sure all rule references are gone before this is released */ | 1059 | /* Make sure all rule references are gone before this is released */ |
1062 | call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy); | 1060 | synchronize_rcu(); |
1061 | |||
1062 | nf_tables_chain_destroy(chain); | ||
1063 | return 0; | 1063 | return 0; |
1064 | } | 1064 | } |
1065 | 1065 | ||
@@ -1114,35 +1114,45 @@ void nft_unregister_expr(struct nft_expr_type *type) | |||
1114 | } | 1114 | } |
1115 | EXPORT_SYMBOL_GPL(nft_unregister_expr); | 1115 | EXPORT_SYMBOL_GPL(nft_unregister_expr); |
1116 | 1116 | ||
1117 | static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla) | 1117 | static const struct nft_expr_type *__nft_expr_type_get(u8 family, |
1118 | struct nlattr *nla) | ||
1118 | { | 1119 | { |
1119 | const struct nft_expr_type *type; | 1120 | const struct nft_expr_type *type; |
1120 | 1121 | ||
1121 | list_for_each_entry(type, &nf_tables_expressions, list) { | 1122 | list_for_each_entry(type, &nf_tables_expressions, list) { |
1122 | if (!nla_strcmp(nla, type->name)) | 1123 | if (!nla_strcmp(nla, type->name) && |
1124 | (!type->family || type->family == family)) | ||
1123 | return type; | 1125 | return type; |
1124 | } | 1126 | } |
1125 | return NULL; | 1127 | return NULL; |
1126 | } | 1128 | } |
1127 | 1129 | ||
1128 | static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla) | 1130 | static const struct nft_expr_type *nft_expr_type_get(u8 family, |
1131 | struct nlattr *nla) | ||
1129 | { | 1132 | { |
1130 | const struct nft_expr_type *type; | 1133 | const struct nft_expr_type *type; |
1131 | 1134 | ||
1132 | if (nla == NULL) | 1135 | if (nla == NULL) |
1133 | return ERR_PTR(-EINVAL); | 1136 | return ERR_PTR(-EINVAL); |
1134 | 1137 | ||
1135 | type = __nft_expr_type_get(nla); | 1138 | type = __nft_expr_type_get(family, nla); |
1136 | if (type != NULL && try_module_get(type->owner)) | 1139 | if (type != NULL && try_module_get(type->owner)) |
1137 | return type; | 1140 | return type; |
1138 | 1141 | ||
1139 | #ifdef CONFIG_MODULES | 1142 | #ifdef CONFIG_MODULES |
1140 | if (type == NULL) { | 1143 | if (type == NULL) { |
1141 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); | 1144 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); |
1145 | request_module("nft-expr-%u-%.*s", family, | ||
1146 | nla_len(nla), (char *)nla_data(nla)); | ||
1147 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | ||
1148 | if (__nft_expr_type_get(family, nla)) | ||
1149 | return ERR_PTR(-EAGAIN); | ||
1150 | |||
1151 | nfnl_unlock(NFNL_SUBSYS_NFTABLES); | ||
1142 | request_module("nft-expr-%.*s", | 1152 | request_module("nft-expr-%.*s", |
1143 | nla_len(nla), (char *)nla_data(nla)); | 1153 | nla_len(nla), (char *)nla_data(nla)); |
1144 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 1154 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
1145 | if (__nft_expr_type_get(nla)) | 1155 | if (__nft_expr_type_get(family, nla)) |
1146 | return ERR_PTR(-EAGAIN); | 1156 | return ERR_PTR(-EAGAIN); |
1147 | } | 1157 | } |
1148 | #endif | 1158 | #endif |
@@ -1193,7 +1203,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, | |||
1193 | if (err < 0) | 1203 | if (err < 0) |
1194 | return err; | 1204 | return err; |
1195 | 1205 | ||
1196 | type = nft_expr_type_get(tb[NFTA_EXPR_NAME]); | 1206 | type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]); |
1197 | if (IS_ERR(type)) | 1207 | if (IS_ERR(type)) |
1198 | return PTR_ERR(type); | 1208 | return PTR_ERR(type); |
1199 | 1209 | ||
@@ -1521,9 +1531,8 @@ err: | |||
1521 | return err; | 1531 | return err; |
1522 | } | 1532 | } |
1523 | 1533 | ||
1524 | static void nf_tables_rcu_rule_destroy(struct rcu_head *head) | 1534 | static void nf_tables_rule_destroy(struct nft_rule *rule) |
1525 | { | 1535 | { |
1526 | struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head); | ||
1527 | struct nft_expr *expr; | 1536 | struct nft_expr *expr; |
1528 | 1537 | ||
1529 | /* | 1538 | /* |
@@ -1538,11 +1547,6 @@ static void nf_tables_rcu_rule_destroy(struct rcu_head *head) | |||
1538 | kfree(rule); | 1547 | kfree(rule); |
1539 | } | 1548 | } |
1540 | 1549 | ||
1541 | static void nf_tables_rule_destroy(struct nft_rule *rule) | ||
1542 | { | ||
1543 | call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy); | ||
1544 | } | ||
1545 | |||
1546 | #define NFT_RULE_MAXEXPRS 128 | 1550 | #define NFT_RULE_MAXEXPRS 128 |
1547 | 1551 | ||
1548 | static struct nft_expr_info *info; | 1552 | static struct nft_expr_info *info; |
@@ -1809,9 +1813,6 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
1809 | synchronize_rcu(); | 1813 | synchronize_rcu(); |
1810 | 1814 | ||
1811 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { | 1815 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { |
1812 | /* Delete this rule from the dirty list */ | ||
1813 | list_del(&rupd->list); | ||
1814 | |||
1815 | /* This rule was inactive in the past and just became active. | 1816 | /* This rule was inactive in the past and just became active. |
1816 | * Clear the next bit of the genmask since its meaning has | 1817 | * Clear the next bit of the genmask since its meaning has |
1817 | * changed, now it is the future. | 1818 | * changed, now it is the future. |
@@ -1822,6 +1823,7 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
1822 | rupd->chain, rupd->rule, | 1823 | rupd->chain, rupd->rule, |
1823 | NFT_MSG_NEWRULE, 0, | 1824 | NFT_MSG_NEWRULE, 0, |
1824 | rupd->family); | 1825 | rupd->family); |
1826 | list_del(&rupd->list); | ||
1825 | kfree(rupd); | 1827 | kfree(rupd); |
1826 | continue; | 1828 | continue; |
1827 | } | 1829 | } |
@@ -1831,7 +1833,15 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
1831 | nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain, | 1833 | nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain, |
1832 | rupd->rule, NFT_MSG_DELRULE, 0, | 1834 | rupd->rule, NFT_MSG_DELRULE, 0, |
1833 | rupd->family); | 1835 | rupd->family); |
1836 | } | ||
1837 | |||
1838 | /* Make sure we don't see any packet traversing old rules */ | ||
1839 | synchronize_rcu(); | ||
1840 | |||
1841 | /* Now we can safely release unused old rules */ | ||
1842 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { | ||
1834 | nf_tables_rule_destroy(rupd->rule); | 1843 | nf_tables_rule_destroy(rupd->rule); |
1844 | list_del(&rupd->list); | ||
1835 | kfree(rupd); | 1845 | kfree(rupd); |
1836 | } | 1846 | } |
1837 | 1847 | ||
@@ -1844,20 +1854,26 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
1844 | struct nft_rule_trans *rupd, *tmp; | 1854 | struct nft_rule_trans *rupd, *tmp; |
1845 | 1855 | ||
1846 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { | 1856 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { |
1847 | /* Delete all rules from the dirty list */ | ||
1848 | list_del(&rupd->list); | ||
1849 | |||
1850 | if (!nft_rule_is_active_next(net, rupd->rule)) { | 1857 | if (!nft_rule_is_active_next(net, rupd->rule)) { |
1851 | nft_rule_clear(net, rupd->rule); | 1858 | nft_rule_clear(net, rupd->rule); |
1859 | list_del(&rupd->list); | ||
1852 | kfree(rupd); | 1860 | kfree(rupd); |
1853 | continue; | 1861 | continue; |
1854 | } | 1862 | } |
1855 | 1863 | ||
1856 | /* This rule is inactive, get rid of it */ | 1864 | /* This rule is inactive, get rid of it */ |
1857 | list_del_rcu(&rupd->rule->list); | 1865 | list_del_rcu(&rupd->rule->list); |
1866 | } | ||
1867 | |||
1868 | /* Make sure we don't see any packet accessing aborted rules */ | ||
1869 | synchronize_rcu(); | ||
1870 | |||
1871 | list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { | ||
1858 | nf_tables_rule_destroy(rupd->rule); | 1872 | nf_tables_rule_destroy(rupd->rule); |
1873 | list_del(&rupd->list); | ||
1859 | kfree(rupd); | 1874 | kfree(rupd); |
1860 | } | 1875 | } |
1876 | |||
1861 | return 0; | 1877 | return 0; |
1862 | } | 1878 | } |
1863 | 1879 | ||
@@ -1943,6 +1959,9 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, | |||
1943 | } | 1959 | } |
1944 | 1960 | ||
1945 | if (nla[NFTA_SET_TABLE] != NULL) { | 1961 | if (nla[NFTA_SET_TABLE] != NULL) { |
1962 | if (afi == NULL) | ||
1963 | return -EAFNOSUPPORT; | ||
1964 | |||
1946 | table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); | 1965 | table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); |
1947 | if (IS_ERR(table)) | 1966 | if (IS_ERR(table)) |
1948 | return PTR_ERR(table); | 1967 | return PTR_ERR(table); |
@@ -1989,13 +2008,13 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, | |||
1989 | 2008 | ||
1990 | if (!sscanf(i->name, name, &tmp)) | 2009 | if (!sscanf(i->name, name, &tmp)) |
1991 | continue; | 2010 | continue; |
1992 | if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE) | 2011 | if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE) |
1993 | continue; | 2012 | continue; |
1994 | 2013 | ||
1995 | set_bit(tmp, inuse); | 2014 | set_bit(tmp, inuse); |
1996 | } | 2015 | } |
1997 | 2016 | ||
1998 | n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE); | 2017 | n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); |
1999 | free_page((unsigned long)inuse); | 2018 | free_page((unsigned long)inuse); |
2000 | } | 2019 | } |
2001 | 2020 | ||
@@ -2428,6 +2447,8 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | |||
2428 | struct nft_ctx ctx; | 2447 | struct nft_ctx ctx; |
2429 | int err; | 2448 | int err; |
2430 | 2449 | ||
2450 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) | ||
2451 | return -EAFNOSUPPORT; | ||
2431 | if (nla[NFTA_SET_TABLE] == NULL) | 2452 | if (nla[NFTA_SET_TABLE] == NULL) |
2432 | return -EINVAL; | 2453 | return -EINVAL; |
2433 | 2454 | ||
@@ -2435,9 +2456,6 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | |||
2435 | if (err < 0) | 2456 | if (err < 0) |
2436 | return err; | 2457 | return err; |
2437 | 2458 | ||
2438 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) | ||
2439 | return -EAFNOSUPPORT; | ||
2440 | |||
2441 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); | 2459 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); |
2442 | if (IS_ERR(set)) | 2460 | if (IS_ERR(set)) |
2443 | return PTR_ERR(set); | 2461 | return PTR_ERR(set); |
@@ -2723,6 +2741,9 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set, | |||
2723 | if (nla[NFTA_SET_ELEM_DATA] == NULL && | 2741 | if (nla[NFTA_SET_ELEM_DATA] == NULL && |
2724 | !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) | 2742 | !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) |
2725 | return -EINVAL; | 2743 | return -EINVAL; |
2744 | if (nla[NFTA_SET_ELEM_DATA] != NULL && | ||
2745 | elem.flags & NFT_SET_ELEM_INTERVAL_END) | ||
2746 | return -EINVAL; | ||
2726 | } else { | 2747 | } else { |
2727 | if (nla[NFTA_SET_ELEM_DATA] != NULL) | 2748 | if (nla[NFTA_SET_ELEM_DATA] != NULL) |
2728 | return -EINVAL; | 2749 | return -EINVAL; |
@@ -2977,6 +2998,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, | |||
2977 | const struct nft_set_iter *iter, | 2998 | const struct nft_set_iter *iter, |
2978 | const struct nft_set_elem *elem) | 2999 | const struct nft_set_elem *elem) |
2979 | { | 3000 | { |
3001 | if (elem->flags & NFT_SET_ELEM_INTERVAL_END) | ||
3002 | return 0; | ||
3003 | |||
2980 | switch (elem->data.verdict) { | 3004 | switch (elem->data.verdict) { |
2981 | case NFT_JUMP: | 3005 | case NFT_JUMP: |
2982 | case NFT_GOTO: | 3006 | case NFT_GOTO: |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 0d879fcb8763..90998a6ff8b9 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -103,9 +103,9 @@ static struct nf_loginfo trace_loginfo = { | |||
103 | }, | 103 | }, |
104 | }; | 104 | }; |
105 | 105 | ||
106 | static inline void nft_trace_packet(const struct nft_pktinfo *pkt, | 106 | static void nft_trace_packet(const struct nft_pktinfo *pkt, |
107 | const struct nft_chain *chain, | 107 | const struct nft_chain *chain, |
108 | int rulenum, enum nft_trace type) | 108 | int rulenum, enum nft_trace type) |
109 | { | 109 | { |
110 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); | 110 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); |
111 | 111 | ||
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 917052e20602..46e275403838 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -226,6 +226,7 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr, | |||
226 | if (tb[NFTA_CT_DIRECTION] != NULL) | 226 | if (tb[NFTA_CT_DIRECTION] != NULL) |
227 | return -EINVAL; | 227 | return -EINVAL; |
228 | break; | 228 | break; |
229 | case NFT_CT_L3PROTOCOL: | ||
229 | case NFT_CT_PROTOCOL: | 230 | case NFT_CT_PROTOCOL: |
230 | case NFT_CT_SRC: | 231 | case NFT_CT_SRC: |
231 | case NFT_CT_DST: | 232 | case NFT_CT_DST: |
@@ -311,8 +312,19 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
311 | goto nla_put_failure; | 312 | goto nla_put_failure; |
312 | if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) | 313 | if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) |
313 | goto nla_put_failure; | 314 | goto nla_put_failure; |
314 | if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) | 315 | |
315 | goto nla_put_failure; | 316 | switch (priv->key) { |
317 | case NFT_CT_PROTOCOL: | ||
318 | case NFT_CT_SRC: | ||
319 | case NFT_CT_DST: | ||
320 | case NFT_CT_PROTO_SRC: | ||
321 | case NFT_CT_PROTO_DST: | ||
322 | if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) | ||
323 | goto nla_put_failure; | ||
324 | default: | ||
325 | break; | ||
326 | } | ||
327 | |||
316 | return 0; | 328 | return 0; |
317 | 329 | ||
318 | nla_put_failure: | 330 | nla_put_failure: |
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c index 5af790123ad8..26c5154e05f3 100644 --- a/net/netfilter/nft_log.c +++ b/net/netfilter/nft_log.c | |||
@@ -23,7 +23,6 @@ static const char *nft_log_null_prefix = ""; | |||
23 | struct nft_log { | 23 | struct nft_log { |
24 | struct nf_loginfo loginfo; | 24 | struct nf_loginfo loginfo; |
25 | char *prefix; | 25 | char *prefix; |
26 | int family; | ||
27 | }; | 26 | }; |
28 | 27 | ||
29 | static void nft_log_eval(const struct nft_expr *expr, | 28 | static void nft_log_eval(const struct nft_expr *expr, |
@@ -33,7 +32,7 @@ static void nft_log_eval(const struct nft_expr *expr, | |||
33 | const struct nft_log *priv = nft_expr_priv(expr); | 32 | const struct nft_log *priv = nft_expr_priv(expr); |
34 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); | 33 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); |
35 | 34 | ||
36 | nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in, | 35 | nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in, |
37 | pkt->out, &priv->loginfo, "%s", priv->prefix); | 36 | pkt->out, &priv->loginfo, "%s", priv->prefix); |
38 | } | 37 | } |
39 | 38 | ||
@@ -52,8 +51,6 @@ static int nft_log_init(const struct nft_ctx *ctx, | |||
52 | struct nf_loginfo *li = &priv->loginfo; | 51 | struct nf_loginfo *li = &priv->loginfo; |
53 | const struct nlattr *nla; | 52 | const struct nlattr *nla; |
54 | 53 | ||
55 | priv->family = ctx->afi->family; | ||
56 | |||
57 | nla = tb[NFTA_LOG_PREFIX]; | 54 | nla = tb[NFTA_LOG_PREFIX]; |
58 | if (nla != NULL) { | 55 | if (nla != NULL) { |
59 | priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL); | 56 | priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL); |
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 8a6116b75b5a..bb4ef4cccb6e 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/netfilter.h> | 16 | #include <linux/netfilter.h> |
17 | #include <linux/netfilter/nf_tables.h> | 17 | #include <linux/netfilter/nf_tables.h> |
18 | #include <net/netfilter/nf_tables.h> | 18 | #include <net/netfilter/nf_tables.h> |
19 | #include <net/netfilter/nf_tables_core.h> | ||
19 | 20 | ||
20 | struct nft_lookup { | 21 | struct nft_lookup { |
21 | struct nft_set *set; | 22 | struct nft_set *set; |
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index cbea473d69e9..e8ae2f6bf232 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c | |||
@@ -25,7 +25,6 @@ struct nft_queue { | |||
25 | u16 queuenum; | 25 | u16 queuenum; |
26 | u16 queues_total; | 26 | u16 queues_total; |
27 | u16 flags; | 27 | u16 flags; |
28 | u8 family; | ||
29 | }; | 28 | }; |
30 | 29 | ||
31 | static void nft_queue_eval(const struct nft_expr *expr, | 30 | static void nft_queue_eval(const struct nft_expr *expr, |
@@ -43,7 +42,7 @@ static void nft_queue_eval(const struct nft_expr *expr, | |||
43 | queue = priv->queuenum + cpu % priv->queues_total; | 42 | queue = priv->queuenum + cpu % priv->queues_total; |
44 | } else { | 43 | } else { |
45 | queue = nfqueue_hash(pkt->skb, queue, | 44 | queue = nfqueue_hash(pkt->skb, queue, |
46 | priv->queues_total, priv->family, | 45 | priv->queues_total, pkt->ops->pf, |
47 | jhash_initval); | 46 | jhash_initval); |
48 | } | 47 | } |
49 | } | 48 | } |
@@ -71,7 +70,6 @@ static int nft_queue_init(const struct nft_ctx *ctx, | |||
71 | return -EINVAL; | 70 | return -EINVAL; |
72 | 71 | ||
73 | init_hashrandom(&jhash_initval); | 72 | init_hashrandom(&jhash_initval); |
74 | priv->family = ctx->afi->family; | ||
75 | priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); | 73 | priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); |
76 | 74 | ||
77 | if (tb[NFTA_QUEUE_TOTAL] != NULL) | 75 | if (tb[NFTA_QUEUE_TOTAL] != NULL) |
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index ca0c1b231bfe..e21d69d13506 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c | |||
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set, | |||
69 | struct nft_rbtree_elem *rbe) | 69 | struct nft_rbtree_elem *rbe) |
70 | { | 70 | { |
71 | nft_data_uninit(&rbe->key, NFT_DATA_VALUE); | 71 | nft_data_uninit(&rbe->key, NFT_DATA_VALUE); |
72 | if (set->flags & NFT_SET_MAP) | 72 | if (set->flags & NFT_SET_MAP && |
73 | !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) | ||
73 | nft_data_uninit(rbe->data, set->dtype); | 74 | nft_data_uninit(rbe->data, set->dtype); |
75 | |||
74 | kfree(rbe); | 76 | kfree(rbe); |
75 | } | 77 | } |
76 | 78 | ||
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set, | |||
108 | int err; | 110 | int err; |
109 | 111 | ||
110 | size = sizeof(*rbe); | 112 | size = sizeof(*rbe); |
111 | if (set->flags & NFT_SET_MAP) | 113 | if (set->flags & NFT_SET_MAP && |
114 | !(elem->flags & NFT_SET_ELEM_INTERVAL_END)) | ||
112 | size += sizeof(rbe->data[0]); | 115 | size += sizeof(rbe->data[0]); |
113 | 116 | ||
114 | rbe = kzalloc(size, GFP_KERNEL); | 117 | rbe = kzalloc(size, GFP_KERNEL); |
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set, | |||
117 | 120 | ||
118 | rbe->flags = elem->flags; | 121 | rbe->flags = elem->flags; |
119 | nft_data_copy(&rbe->key, &elem->key); | 122 | nft_data_copy(&rbe->key, &elem->key); |
120 | if (set->flags & NFT_SET_MAP) | 123 | if (set->flags & NFT_SET_MAP && |
124 | !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) | ||
121 | nft_data_copy(rbe->data, &elem->data); | 125 | nft_data_copy(rbe->data, &elem->data); |
122 | 126 | ||
123 | err = __nft_rbtree_insert(set, rbe); | 127 | err = __nft_rbtree_insert(set, rbe); |
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem) | |||
153 | parent = parent->rb_right; | 157 | parent = parent->rb_right; |
154 | else { | 158 | else { |
155 | elem->cookie = rbe; | 159 | elem->cookie = rbe; |
156 | if (set->flags & NFT_SET_MAP) | 160 | if (set->flags & NFT_SET_MAP && |
161 | !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) | ||
157 | nft_data_copy(&elem->data, rbe->data); | 162 | nft_data_copy(&elem->data, rbe->data); |
158 | elem->flags = rbe->flags; | 163 | elem->flags = rbe->flags; |
159 | return 0; | 164 | return 0; |
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, | |||
177 | 182 | ||
178 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 183 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
179 | nft_data_copy(&elem.key, &rbe->key); | 184 | nft_data_copy(&elem.key, &rbe->key); |
180 | if (set->flags & NFT_SET_MAP) | 185 | if (set->flags & NFT_SET_MAP && |
186 | !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) | ||
181 | nft_data_copy(&elem.data, rbe->data); | 187 | nft_data_copy(&elem.data, rbe->data); |
182 | elem.flags = rbe->flags; | 188 | elem.flags = rbe->flags; |
183 | 189 | ||
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 5e204711d704..f3448c296446 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c | |||
@@ -16,65 +16,23 @@ | |||
16 | #include <linux/netfilter.h> | 16 | #include <linux/netfilter.h> |
17 | #include <linux/netfilter/nf_tables.h> | 17 | #include <linux/netfilter/nf_tables.h> |
18 | #include <net/netfilter/nf_tables.h> | 18 | #include <net/netfilter/nf_tables.h> |
19 | #include <net/icmp.h> | 19 | #include <net/netfilter/nft_reject.h> |
20 | #include <net/netfilter/ipv4/nf_reject.h> | ||
21 | 20 | ||
22 | #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) | 21 | const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = { |
23 | #include <net/netfilter/ipv6/nf_reject.h> | ||
24 | #endif | ||
25 | |||
26 | struct nft_reject { | ||
27 | enum nft_reject_types type:8; | ||
28 | u8 icmp_code; | ||
29 | u8 family; | ||
30 | }; | ||
31 | |||
32 | static void nft_reject_eval(const struct nft_expr *expr, | ||
33 | struct nft_data data[NFT_REG_MAX + 1], | ||
34 | const struct nft_pktinfo *pkt) | ||
35 | { | ||
36 | struct nft_reject *priv = nft_expr_priv(expr); | ||
37 | #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) | ||
38 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); | ||
39 | #endif | ||
40 | switch (priv->type) { | ||
41 | case NFT_REJECT_ICMP_UNREACH: | ||
42 | if (priv->family == NFPROTO_IPV4) | ||
43 | nf_send_unreach(pkt->skb, priv->icmp_code); | ||
44 | #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) | ||
45 | else if (priv->family == NFPROTO_IPV6) | ||
46 | nf_send_unreach6(net, pkt->skb, priv->icmp_code, | ||
47 | pkt->ops->hooknum); | ||
48 | #endif | ||
49 | break; | ||
50 | case NFT_REJECT_TCP_RST: | ||
51 | if (priv->family == NFPROTO_IPV4) | ||
52 | nf_send_reset(pkt->skb, pkt->ops->hooknum); | ||
53 | #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) | ||
54 | else if (priv->family == NFPROTO_IPV6) | ||
55 | nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); | ||
56 | #endif | ||
57 | break; | ||
58 | } | ||
59 | |||
60 | data[NFT_REG_VERDICT].verdict = NF_DROP; | ||
61 | } | ||
62 | |||
63 | static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = { | ||
64 | [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, | 22 | [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, |
65 | [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 }, | 23 | [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 }, |
66 | }; | 24 | }; |
25 | EXPORT_SYMBOL_GPL(nft_reject_policy); | ||
67 | 26 | ||
68 | static int nft_reject_init(const struct nft_ctx *ctx, | 27 | int nft_reject_init(const struct nft_ctx *ctx, |
69 | const struct nft_expr *expr, | 28 | const struct nft_expr *expr, |
70 | const struct nlattr * const tb[]) | 29 | const struct nlattr * const tb[]) |
71 | { | 30 | { |
72 | struct nft_reject *priv = nft_expr_priv(expr); | 31 | struct nft_reject *priv = nft_expr_priv(expr); |
73 | 32 | ||
74 | if (tb[NFTA_REJECT_TYPE] == NULL) | 33 | if (tb[NFTA_REJECT_TYPE] == NULL) |
75 | return -EINVAL; | 34 | return -EINVAL; |
76 | 35 | ||
77 | priv->family = ctx->afi->family; | ||
78 | priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); | 36 | priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); |
79 | switch (priv->type) { | 37 | switch (priv->type) { |
80 | case NFT_REJECT_ICMP_UNREACH: | 38 | case NFT_REJECT_ICMP_UNREACH: |
@@ -89,8 +47,9 @@ static int nft_reject_init(const struct nft_ctx *ctx, | |||
89 | 47 | ||
90 | return 0; | 48 | return 0; |
91 | } | 49 | } |
50 | EXPORT_SYMBOL_GPL(nft_reject_init); | ||
92 | 51 | ||
93 | static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | 52 | int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) |
94 | { | 53 | { |
95 | const struct nft_reject *priv = nft_expr_priv(expr); | 54 | const struct nft_reject *priv = nft_expr_priv(expr); |
96 | 55 | ||
@@ -109,37 +68,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
109 | nla_put_failure: | 68 | nla_put_failure: |
110 | return -1; | 69 | return -1; |
111 | } | 70 | } |
112 | 71 | EXPORT_SYMBOL_GPL(nft_reject_dump); | |
113 | static struct nft_expr_type nft_reject_type; | ||
114 | static const struct nft_expr_ops nft_reject_ops = { | ||
115 | .type = &nft_reject_type, | ||
116 | .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), | ||
117 | .eval = nft_reject_eval, | ||
118 | .init = nft_reject_init, | ||
119 | .dump = nft_reject_dump, | ||
120 | }; | ||
121 | |||
122 | static struct nft_expr_type nft_reject_type __read_mostly = { | ||
123 | .name = "reject", | ||
124 | .ops = &nft_reject_ops, | ||
125 | .policy = nft_reject_policy, | ||
126 | .maxattr = NFTA_REJECT_MAX, | ||
127 | .owner = THIS_MODULE, | ||
128 | }; | ||
129 | |||
130 | static int __init nft_reject_module_init(void) | ||
131 | { | ||
132 | return nft_register_expr(&nft_reject_type); | ||
133 | } | ||
134 | |||
135 | static void __exit nft_reject_module_exit(void) | ||
136 | { | ||
137 | nft_unregister_expr(&nft_reject_type); | ||
138 | } | ||
139 | |||
140 | module_init(nft_reject_module_init); | ||
141 | module_exit(nft_reject_module_exit); | ||
142 | 72 | ||
143 | MODULE_LICENSE("GPL"); | 73 | MODULE_LICENSE("GPL"); |
144 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | 74 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); |
145 | MODULE_ALIAS_NFT_EXPR("reject"); | ||
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c new file mode 100644 index 000000000000..8a310f239c93 --- /dev/null +++ b/net/netfilter/nft_reject_inet.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Patrick McHardy <kaber@trash.net> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/netlink.h> | ||
13 | #include <linux/netfilter.h> | ||
14 | #include <linux/netfilter/nf_tables.h> | ||
15 | #include <net/netfilter/nf_tables.h> | ||
16 | #include <net/netfilter/nft_reject.h> | ||
17 | |||
18 | static void nft_reject_inet_eval(const struct nft_expr *expr, | ||
19 | struct nft_data data[NFT_REG_MAX + 1], | ||
20 | const struct nft_pktinfo *pkt) | ||
21 | { | ||
22 | switch (pkt->ops->pf) { | ||
23 | case NFPROTO_IPV4: | ||
24 | nft_reject_ipv4_eval(expr, data, pkt); | ||
25 | case NFPROTO_IPV6: | ||
26 | nft_reject_ipv6_eval(expr, data, pkt); | ||
27 | } | ||
28 | } | ||
29 | |||
30 | static struct nft_expr_type nft_reject_inet_type; | ||
31 | static const struct nft_expr_ops nft_reject_inet_ops = { | ||
32 | .type = &nft_reject_inet_type, | ||
33 | .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), | ||
34 | .eval = nft_reject_inet_eval, | ||
35 | .init = nft_reject_init, | ||
36 | .dump = nft_reject_dump, | ||
37 | }; | ||
38 | |||
39 | static struct nft_expr_type nft_reject_inet_type __read_mostly = { | ||
40 | .family = NFPROTO_INET, | ||
41 | .name = "reject", | ||
42 | .ops = &nft_reject_inet_ops, | ||
43 | .policy = nft_reject_policy, | ||
44 | .maxattr = NFTA_REJECT_MAX, | ||
45 | .owner = THIS_MODULE, | ||
46 | }; | ||
47 | |||
48 | static int __init nft_reject_inet_module_init(void) | ||
49 | { | ||
50 | return nft_register_expr(&nft_reject_inet_type); | ||
51 | } | ||
52 | |||
53 | static void __exit nft_reject_inet_module_exit(void) | ||
54 | { | ||
55 | nft_unregister_expr(&nft_reject_inet_type); | ||
56 | } | ||
57 | |||
58 | module_init(nft_reject_inet_module_init); | ||
59 | module_exit(nft_reject_inet_module_exit); | ||
60 | |||
61 | MODULE_LICENSE("GPL"); | ||
62 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
63 | MODULE_ALIAS_NFT_AF_EXPR(1, "reject"); | ||
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 5929be622c5c..75747aecdebe 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -228,12 +228,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, | |||
228 | goto err3; | 228 | goto err3; |
229 | } | 229 | } |
230 | 230 | ||
231 | __set_bit(IPS_TEMPLATE_BIT, &ct->status); | 231 | nf_conntrack_tmpl_insert(par->net, ct); |
232 | __set_bit(IPS_CONFIRMED_BIT, &ct->status); | ||
233 | |||
234 | /* Overload tuple linked list to put us in template list. */ | ||
235 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | ||
236 | &par->net->ct.tmpl); | ||
237 | out: | 232 | out: |
238 | info->ct = ct; | 233 | info->ct = ct; |
239 | return 0; | 234 | return 0; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index df4692826ead..e9a48baf8551 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -55,6 +55,7 @@ | |||
55 | 55 | ||
56 | #include "datapath.h" | 56 | #include "datapath.h" |
57 | #include "flow.h" | 57 | #include "flow.h" |
58 | #include "flow_table.h" | ||
58 | #include "flow_netlink.h" | 59 | #include "flow_netlink.h" |
59 | #include "vport-internal_dev.h" | 60 | #include "vport-internal_dev.h" |
60 | #include "vport-netdev.h" | 61 | #include "vport-netdev.h" |
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
160 | { | 161 | { |
161 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
162 | 163 | ||
163 | ovs_flow_tbl_destroy(&dp->table); | ||
164 | free_percpu(dp->stats_percpu); | 164 | free_percpu(dp->stats_percpu); |
165 | release_net(ovs_dp_get_net(dp)); | 165 | release_net(ovs_dp_get_net(dp)); |
166 | kfree(dp->ports); | 166 | kfree(dp->ports); |
@@ -466,6 +466,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
466 | 466 | ||
467 | skb_zerocopy(user_skb, skb, skb->len, hlen); | 467 | skb_zerocopy(user_skb, skb, skb->len, hlen); |
468 | 468 | ||
469 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ | ||
470 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { | ||
471 | size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len; | ||
472 | |||
473 | if (plen > 0) | ||
474 | memset(skb_put(user_skb, plen), 0, plen); | ||
475 | } | ||
476 | |||
469 | ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; | 477 | ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; |
470 | 478 | ||
471 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); | 479 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); |
@@ -852,11 +860,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
852 | goto err_unlock_ovs; | 860 | goto err_unlock_ovs; |
853 | 861 | ||
854 | /* The unmasked key has to be the same for flow updates. */ | 862 | /* The unmasked key has to be the same for flow updates. */ |
855 | error = -EINVAL; | 863 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) |
856 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) { | ||
857 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); | ||
858 | goto err_unlock_ovs; | 864 | goto err_unlock_ovs; |
859 | } | ||
860 | 865 | ||
861 | /* Update actions. */ | 866 | /* Update actions. */ |
862 | old_acts = ovsl_dereference(flow->sf_acts); | 867 | old_acts = ovsl_dereference(flow->sf_acts); |
@@ -1079,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void) | |||
1079 | msgsize += nla_total_size(IFNAMSIZ); | 1084 | msgsize += nla_total_size(IFNAMSIZ); |
1080 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); | 1085 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); |
1081 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); | 1086 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); |
1087 | msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ | ||
1082 | 1088 | ||
1083 | return msgsize; | 1089 | return msgsize; |
1084 | } | 1090 | } |
@@ -1279,7 +1285,7 @@ err_destroy_ports_array: | |||
1279 | err_destroy_percpu: | 1285 | err_destroy_percpu: |
1280 | free_percpu(dp->stats_percpu); | 1286 | free_percpu(dp->stats_percpu); |
1281 | err_destroy_table: | 1287 | err_destroy_table: |
1282 | ovs_flow_tbl_destroy(&dp->table); | 1288 | ovs_flow_tbl_destroy(&dp->table, false); |
1283 | err_free_dp: | 1289 | err_free_dp: |
1284 | release_net(ovs_dp_get_net(dp)); | 1290 | release_net(ovs_dp_get_net(dp)); |
1285 | kfree(dp); | 1291 | kfree(dp); |
@@ -1306,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp) | |||
1306 | list_del_rcu(&dp->list_node); | 1312 | list_del_rcu(&dp->list_node); |
1307 | 1313 | ||
1308 | /* OVSP_LOCAL is datapath internal port. We need to make sure that | 1314 | /* OVSP_LOCAL is datapath internal port. We need to make sure that |
1309 | * all port in datapath are destroyed first before freeing datapath. | 1315 | * all ports in datapath are destroyed first before freeing datapath. |
1310 | */ | 1316 | */ |
1311 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); | 1317 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); |
1312 | 1318 | ||
1319 | /* RCU destroy the flow table */ | ||
1320 | ovs_flow_tbl_destroy(&dp->table, true); | ||
1321 | |||
1313 | call_rcu(&dp->rcu, destroy_dp_rcu); | 1322 | call_rcu(&dp->rcu, destroy_dp_rcu); |
1314 | } | 1323 | } |
1315 | 1324 | ||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index c58a0fe3c889..3c268b3d71c3 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -153,29 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) | |||
153 | flow_free(flow); | 153 | flow_free(flow); |
154 | } | 154 | } |
155 | 155 | ||
156 | static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
157 | { | ||
158 | if (!mask) | ||
159 | return; | ||
160 | |||
161 | BUG_ON(!mask->ref_count); | ||
162 | mask->ref_count--; | ||
163 | |||
164 | if (!mask->ref_count) { | ||
165 | list_del_rcu(&mask->list); | ||
166 | if (deferred) | ||
167 | kfree_rcu(mask, rcu); | ||
168 | else | ||
169 | kfree(mask); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | 156 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
174 | { | 157 | { |
175 | if (!flow) | 158 | if (!flow) |
176 | return; | 159 | return; |
177 | 160 | ||
178 | flow_mask_del_ref(flow->mask, deferred); | 161 | if (flow->mask) { |
162 | struct sw_flow_mask *mask = flow->mask; | ||
163 | |||
164 | /* ovs-lock is required to protect mask-refcount and | ||
165 | * mask list. | ||
166 | */ | ||
167 | ASSERT_OVSL(); | ||
168 | BUG_ON(!mask->ref_count); | ||
169 | mask->ref_count--; | ||
170 | |||
171 | if (!mask->ref_count) { | ||
172 | list_del_rcu(&mask->list); | ||
173 | if (deferred) | ||
174 | kfree_rcu(mask, rcu); | ||
175 | else | ||
176 | kfree(mask); | ||
177 | } | ||
178 | } | ||
179 | 179 | ||
180 | if (deferred) | 180 | if (deferred) |
181 | call_rcu(&flow->rcu, rcu_free_flow_callback); | 181 | call_rcu(&flow->rcu, rcu_free_flow_callback); |
@@ -188,26 +188,9 @@ static void free_buckets(struct flex_array *buckets) | |||
188 | flex_array_free(buckets); | 188 | flex_array_free(buckets); |
189 | } | 189 | } |
190 | 190 | ||
191 | |||
191 | static void __table_instance_destroy(struct table_instance *ti) | 192 | static void __table_instance_destroy(struct table_instance *ti) |
192 | { | 193 | { |
193 | int i; | ||
194 | |||
195 | if (ti->keep_flows) | ||
196 | goto skip_flows; | ||
197 | |||
198 | for (i = 0; i < ti->n_buckets; i++) { | ||
199 | struct sw_flow *flow; | ||
200 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
201 | struct hlist_node *n; | ||
202 | int ver = ti->node_ver; | ||
203 | |||
204 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
205 | hlist_del(&flow->hash_node[ver]); | ||
206 | ovs_flow_free(flow, false); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | skip_flows: | ||
211 | free_buckets(ti->buckets); | 194 | free_buckets(ti->buckets); |
212 | kfree(ti); | 195 | kfree(ti); |
213 | } | 196 | } |
@@ -258,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |||
258 | 241 | ||
259 | static void table_instance_destroy(struct table_instance *ti, bool deferred) | 242 | static void table_instance_destroy(struct table_instance *ti, bool deferred) |
260 | { | 243 | { |
244 | int i; | ||
245 | |||
261 | if (!ti) | 246 | if (!ti) |
262 | return; | 247 | return; |
263 | 248 | ||
249 | if (ti->keep_flows) | ||
250 | goto skip_flows; | ||
251 | |||
252 | for (i = 0; i < ti->n_buckets; i++) { | ||
253 | struct sw_flow *flow; | ||
254 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
255 | struct hlist_node *n; | ||
256 | int ver = ti->node_ver; | ||
257 | |||
258 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
259 | hlist_del_rcu(&flow->hash_node[ver]); | ||
260 | ovs_flow_free(flow, deferred); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | skip_flows: | ||
264 | if (deferred) | 265 | if (deferred) |
265 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | 266 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
266 | else | 267 | else |
267 | __table_instance_destroy(ti); | 268 | __table_instance_destroy(ti); |
268 | } | 269 | } |
269 | 270 | ||
270 | void ovs_flow_tbl_destroy(struct flow_table *table) | 271 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) |
271 | { | 272 | { |
272 | struct table_instance *ti = ovsl_dereference(table->ti); | 273 | struct table_instance *ti = ovsl_dereference(table->ti); |
273 | 274 | ||
274 | table_instance_destroy(ti, false); | 275 | table_instance_destroy(ti, deferred); |
275 | } | 276 | } |
276 | 277 | ||
277 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 278 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
@@ -504,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void) | |||
504 | 505 | ||
505 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | 506 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); |
506 | if (mask) | 507 | if (mask) |
507 | mask->ref_count = 0; | 508 | mask->ref_count = 1; |
508 | 509 | ||
509 | return mask; | 510 | return mask; |
510 | } | 511 | } |
511 | 512 | ||
512 | static void mask_add_ref(struct sw_flow_mask *mask) | ||
513 | { | ||
514 | mask->ref_count++; | ||
515 | } | ||
516 | |||
517 | static bool mask_equal(const struct sw_flow_mask *a, | 513 | static bool mask_equal(const struct sw_flow_mask *a, |
518 | const struct sw_flow_mask *b) | 514 | const struct sw_flow_mask *b) |
519 | { | 515 | { |
@@ -554,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | |||
554 | mask->key = new->key; | 550 | mask->key = new->key; |
555 | mask->range = new->range; | 551 | mask->range = new->range; |
556 | list_add_rcu(&mask->list, &tbl->mask_list); | 552 | list_add_rcu(&mask->list, &tbl->mask_list); |
553 | } else { | ||
554 | BUG_ON(!mask->ref_count); | ||
555 | mask->ref_count++; | ||
557 | } | 556 | } |
558 | 557 | ||
559 | mask_add_ref(mask); | ||
560 | flow->mask = mask; | 558 | flow->mask = mask; |
561 | return 0; | 559 | return 0; |
562 | } | 560 | } |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 1996e34c0fd8..baaeb101924d 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred); | |||
60 | 60 | ||
61 | int ovs_flow_tbl_init(struct flow_table *); | 61 | int ovs_flow_tbl_init(struct flow_table *); |
62 | int ovs_flow_tbl_count(struct flow_table *table); | 62 | int ovs_flow_tbl_count(struct flow_table *table); |
63 | void ovs_flow_tbl_destroy(struct flow_table *table); | 63 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); |
64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); | 64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); |
65 | 65 | ||
66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | 66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0f6259a6a932..2b1738ef9394 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
662 | */ | 662 | */ |
663 | sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); | 663 | sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); |
664 | 664 | ||
665 | newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | ||
666 | |||
665 | sk_refcnt_debug_inc(newsk); | 667 | sk_refcnt_debug_inc(newsk); |
666 | 668 | ||
667 | if (newsk->sk_prot->init(newsk)) { | 669 | if (newsk->sk_prot->init(newsk)) { |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 80a6640f329b..06c6ff0cb911 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -571,7 +571,7 @@ static void svc_check_conn_limits(struct svc_serv *serv) | |||
571 | } | 571 | } |
572 | } | 572 | } |
573 | 573 | ||
574 | int svc_alloc_arg(struct svc_rqst *rqstp) | 574 | static int svc_alloc_arg(struct svc_rqst *rqstp) |
575 | { | 575 | { |
576 | struct svc_serv *serv = rqstp->rq_server; | 576 | struct svc_serv *serv = rqstp->rq_server; |
577 | struct xdr_buf *arg; | 577 | struct xdr_buf *arg; |
@@ -612,7 +612,7 @@ int svc_alloc_arg(struct svc_rqst *rqstp) | |||
612 | return 0; | 612 | return 0; |
613 | } | 613 | } |
614 | 614 | ||
615 | struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | 615 | static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
616 | { | 616 | { |
617 | struct svc_xprt *xprt; | 617 | struct svc_xprt *xprt; |
618 | struct svc_pool *pool = rqstp->rq_pool; | 618 | struct svc_pool *pool = rqstp->rq_pool; |
@@ -691,7 +691,7 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
691 | return xprt; | 691 | return xprt; |
692 | } | 692 | } |
693 | 693 | ||
694 | void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) | 694 | static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) |
695 | { | 695 | { |
696 | spin_lock_bh(&serv->sv_lock); | 696 | spin_lock_bh(&serv->sv_lock); |
697 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | 697 | set_bit(XPT_TEMP, &newxpt->xpt_flags); |
diff --git a/net/wireless/core.c b/net/wireless/core.c index d89dee2259b5..010892b81a06 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -203,8 +203,11 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | |||
203 | 203 | ||
204 | rdev->opencount--; | 204 | rdev->opencount--; |
205 | 205 | ||
206 | WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && | 206 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { |
207 | !rdev->scan_req->notified); | 207 | if (WARN_ON(!rdev->scan_req->notified)) |
208 | rdev->scan_req->aborted = true; | ||
209 | ___cfg80211_scan_done(rdev, false); | ||
210 | } | ||
208 | } | 211 | } |
209 | 212 | ||
210 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 213 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
@@ -440,9 +443,6 @@ int wiphy_register(struct wiphy *wiphy) | |||
440 | int i; | 443 | int i; |
441 | u16 ifmodes = wiphy->interface_modes; | 444 | u16 ifmodes = wiphy->interface_modes; |
442 | 445 | ||
443 | /* support for 5/10 MHz is broken due to nl80211 API mess - disable */ | ||
444 | wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ; | ||
445 | |||
446 | /* | 446 | /* |
447 | * There are major locking problems in nl80211/mac80211 for CSA, | 447 | * There are major locking problems in nl80211/mac80211 for CSA, |
448 | * disable for all drivers until this has been reworked. | 448 | * disable for all drivers until this has been reworked. |
@@ -859,8 +859,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
859 | break; | 859 | break; |
860 | case NETDEV_DOWN: | 860 | case NETDEV_DOWN: |
861 | cfg80211_update_iface_num(rdev, wdev->iftype, -1); | 861 | cfg80211_update_iface_num(rdev, wdev->iftype, -1); |
862 | WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && | 862 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { |
863 | !rdev->scan_req->notified); | 863 | if (WARN_ON(!rdev->scan_req->notified)) |
864 | rdev->scan_req->aborted = true; | ||
865 | ___cfg80211_scan_done(rdev, false); | ||
866 | } | ||
864 | 867 | ||
865 | if (WARN_ON(rdev->sched_scan_req && | 868 | if (WARN_ON(rdev->sched_scan_req && |
866 | rdev->sched_scan_req->dev == wdev->netdev)) { | 869 | rdev->sched_scan_req->dev == wdev->netdev)) { |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 37ec16d7bb1a..f1d193b557b6 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -62,6 +62,7 @@ struct cfg80211_registered_device { | |||
62 | struct rb_root bss_tree; | 62 | struct rb_root bss_tree; |
63 | u32 bss_generation; | 63 | u32 bss_generation; |
64 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ | 64 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ |
65 | struct sk_buff *scan_msg; | ||
65 | struct cfg80211_sched_scan_request *sched_scan_req; | 66 | struct cfg80211_sched_scan_request *sched_scan_req; |
66 | unsigned long suspend_at; | 67 | unsigned long suspend_at; |
67 | struct work_struct scan_done_wk; | 68 | struct work_struct scan_done_wk; |
@@ -361,7 +362,8 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, | |||
361 | struct key_params *params, int key_idx, | 362 | struct key_params *params, int key_idx, |
362 | bool pairwise, const u8 *mac_addr); | 363 | bool pairwise, const u8 *mac_addr); |
363 | void __cfg80211_scan_done(struct work_struct *wk); | 364 | void __cfg80211_scan_done(struct work_struct *wk); |
364 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev); | 365 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, |
366 | bool send_message); | ||
365 | void __cfg80211_sched_scan_results(struct work_struct *wk); | 367 | void __cfg80211_sched_scan_results(struct work_struct *wk); |
366 | int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, | 368 | int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, |
367 | bool driver_initiated); | 369 | bool driver_initiated); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7a742594916e..4fe2e6e2bc76 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -1719,9 +1719,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
1719 | * We can then retry with the larger buffer. | 1719 | * We can then retry with the larger buffer. |
1720 | */ | 1720 | */ |
1721 | if ((ret == -ENOBUFS || ret == -EMSGSIZE) && | 1721 | if ((ret == -ENOBUFS || ret == -EMSGSIZE) && |
1722 | !skb->len && | 1722 | !skb->len && !state->split && |
1723 | cb->min_dump_alloc < 4096) { | 1723 | cb->min_dump_alloc < 4096) { |
1724 | cb->min_dump_alloc = 4096; | 1724 | cb->min_dump_alloc = 4096; |
1725 | state->split_start = 0; | ||
1725 | rtnl_unlock(); | 1726 | rtnl_unlock(); |
1726 | return 1; | 1727 | return 1; |
1727 | } | 1728 | } |
@@ -5244,7 +5245,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
5244 | if (!rdev->ops->scan) | 5245 | if (!rdev->ops->scan) |
5245 | return -EOPNOTSUPP; | 5246 | return -EOPNOTSUPP; |
5246 | 5247 | ||
5247 | if (rdev->scan_req) { | 5248 | if (rdev->scan_req || rdev->scan_msg) { |
5248 | err = -EBUSY; | 5249 | err = -EBUSY; |
5249 | goto unlock; | 5250 | goto unlock; |
5250 | } | 5251 | } |
@@ -10011,40 +10012,31 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, | |||
10011 | NL80211_MCGRP_SCAN, GFP_KERNEL); | 10012 | NL80211_MCGRP_SCAN, GFP_KERNEL); |
10012 | } | 10013 | } |
10013 | 10014 | ||
10014 | void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, | 10015 | struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, |
10015 | struct wireless_dev *wdev) | 10016 | struct wireless_dev *wdev, bool aborted) |
10016 | { | 10017 | { |
10017 | struct sk_buff *msg; | 10018 | struct sk_buff *msg; |
10018 | 10019 | ||
10019 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 10020 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
10020 | if (!msg) | 10021 | if (!msg) |
10021 | return; | 10022 | return NULL; |
10022 | 10023 | ||
10023 | if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, | 10024 | if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, |
10024 | NL80211_CMD_NEW_SCAN_RESULTS) < 0) { | 10025 | aborted ? NL80211_CMD_SCAN_ABORTED : |
10026 | NL80211_CMD_NEW_SCAN_RESULTS) < 0) { | ||
10025 | nlmsg_free(msg); | 10027 | nlmsg_free(msg); |
10026 | return; | 10028 | return NULL; |
10027 | } | 10029 | } |
10028 | 10030 | ||
10029 | genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, | 10031 | return msg; |
10030 | NL80211_MCGRP_SCAN, GFP_KERNEL); | ||
10031 | } | 10032 | } |
10032 | 10033 | ||
10033 | void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, | 10034 | void nl80211_send_scan_result(struct cfg80211_registered_device *rdev, |
10034 | struct wireless_dev *wdev) | 10035 | struct sk_buff *msg) |
10035 | { | 10036 | { |
10036 | struct sk_buff *msg; | ||
10037 | |||
10038 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
10039 | if (!msg) | 10037 | if (!msg) |
10040 | return; | 10038 | return; |
10041 | 10039 | ||
10042 | if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, | ||
10043 | NL80211_CMD_SCAN_ABORTED) < 0) { | ||
10044 | nlmsg_free(msg); | ||
10045 | return; | ||
10046 | } | ||
10047 | |||
10048 | genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, | 10040 | genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, |
10049 | NL80211_MCGRP_SCAN, GFP_KERNEL); | 10041 | NL80211_MCGRP_SCAN, GFP_KERNEL); |
10050 | } | 10042 | } |
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index b1b231324e10..75799746d845 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h | |||
@@ -8,10 +8,10 @@ void nl80211_exit(void); | |||
8 | void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); | 8 | void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); |
9 | void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, | 9 | void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, |
10 | struct wireless_dev *wdev); | 10 | struct wireless_dev *wdev); |
11 | void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, | 11 | struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, |
12 | struct wireless_dev *wdev); | 12 | struct wireless_dev *wdev, bool aborted); |
13 | void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, | 13 | void nl80211_send_scan_result(struct cfg80211_registered_device *rdev, |
14 | struct wireless_dev *wdev); | 14 | struct sk_buff *msg); |
15 | void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, | 15 | void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, |
16 | struct net_device *netdev, u32 cmd); | 16 | struct net_device *netdev, u32 cmd); |
17 | void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, | 17 | void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index b528e31da2cf..d1ed4aebbbb7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -161,18 +161,25 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, | |||
161 | dev->bss_generation++; | 161 | dev->bss_generation++; |
162 | } | 162 | } |
163 | 163 | ||
164 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev) | 164 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, |
165 | bool send_message) | ||
165 | { | 166 | { |
166 | struct cfg80211_scan_request *request; | 167 | struct cfg80211_scan_request *request; |
167 | struct wireless_dev *wdev; | 168 | struct wireless_dev *wdev; |
169 | struct sk_buff *msg; | ||
168 | #ifdef CONFIG_CFG80211_WEXT | 170 | #ifdef CONFIG_CFG80211_WEXT |
169 | union iwreq_data wrqu; | 171 | union iwreq_data wrqu; |
170 | #endif | 172 | #endif |
171 | 173 | ||
172 | ASSERT_RTNL(); | 174 | ASSERT_RTNL(); |
173 | 175 | ||
174 | request = rdev->scan_req; | 176 | if (rdev->scan_msg) { |
177 | nl80211_send_scan_result(rdev, rdev->scan_msg); | ||
178 | rdev->scan_msg = NULL; | ||
179 | return; | ||
180 | } | ||
175 | 181 | ||
182 | request = rdev->scan_req; | ||
176 | if (!request) | 183 | if (!request) |
177 | return; | 184 | return; |
178 | 185 | ||
@@ -186,18 +193,16 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev) | |||
186 | if (wdev->netdev) | 193 | if (wdev->netdev) |
187 | cfg80211_sme_scan_done(wdev->netdev); | 194 | cfg80211_sme_scan_done(wdev->netdev); |
188 | 195 | ||
189 | if (request->aborted) { | 196 | if (!request->aborted && |
190 | nl80211_send_scan_aborted(rdev, wdev); | 197 | request->flags & NL80211_SCAN_FLAG_FLUSH) { |
191 | } else { | 198 | /* flush entries from previous scans */ |
192 | if (request->flags & NL80211_SCAN_FLAG_FLUSH) { | 199 | spin_lock_bh(&rdev->bss_lock); |
193 | /* flush entries from previous scans */ | 200 | __cfg80211_bss_expire(rdev, request->scan_start); |
194 | spin_lock_bh(&rdev->bss_lock); | 201 | spin_unlock_bh(&rdev->bss_lock); |
195 | __cfg80211_bss_expire(rdev, request->scan_start); | ||
196 | spin_unlock_bh(&rdev->bss_lock); | ||
197 | } | ||
198 | nl80211_send_scan_done(rdev, wdev); | ||
199 | } | 202 | } |
200 | 203 | ||
204 | msg = nl80211_build_scan_msg(rdev, wdev, request->aborted); | ||
205 | |||
201 | #ifdef CONFIG_CFG80211_WEXT | 206 | #ifdef CONFIG_CFG80211_WEXT |
202 | if (wdev->netdev && !request->aborted) { | 207 | if (wdev->netdev && !request->aborted) { |
203 | memset(&wrqu, 0, sizeof(wrqu)); | 208 | memset(&wrqu, 0, sizeof(wrqu)); |
@@ -211,6 +216,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev) | |||
211 | 216 | ||
212 | rdev->scan_req = NULL; | 217 | rdev->scan_req = NULL; |
213 | kfree(request); | 218 | kfree(request); |
219 | |||
220 | if (!send_message) | ||
221 | rdev->scan_msg = msg; | ||
222 | else | ||
223 | nl80211_send_scan_result(rdev, msg); | ||
214 | } | 224 | } |
215 | 225 | ||
216 | void __cfg80211_scan_done(struct work_struct *wk) | 226 | void __cfg80211_scan_done(struct work_struct *wk) |
@@ -221,7 +231,7 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
221 | scan_done_wk); | 231 | scan_done_wk); |
222 | 232 | ||
223 | rtnl_lock(); | 233 | rtnl_lock(); |
224 | ___cfg80211_scan_done(rdev); | 234 | ___cfg80211_scan_done(rdev, true); |
225 | rtnl_unlock(); | 235 | rtnl_unlock(); |
226 | } | 236 | } |
227 | 237 | ||
@@ -1079,7 +1089,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1079 | if (IS_ERR(rdev)) | 1089 | if (IS_ERR(rdev)) |
1080 | return PTR_ERR(rdev); | 1090 | return PTR_ERR(rdev); |
1081 | 1091 | ||
1082 | if (rdev->scan_req) { | 1092 | if (rdev->scan_req || rdev->scan_msg) { |
1083 | err = -EBUSY; | 1093 | err = -EBUSY; |
1084 | goto out; | 1094 | goto out; |
1085 | } | 1095 | } |
@@ -1481,7 +1491,7 @@ int cfg80211_wext_giwscan(struct net_device *dev, | |||
1481 | if (IS_ERR(rdev)) | 1491 | if (IS_ERR(rdev)) |
1482 | return PTR_ERR(rdev); | 1492 | return PTR_ERR(rdev); |
1483 | 1493 | ||
1484 | if (rdev->scan_req) | 1494 | if (rdev->scan_req || rdev->scan_msg) |
1485 | return -EAGAIN; | 1495 | return -EAGAIN; |
1486 | 1496 | ||
1487 | res = ieee80211_scan_results(rdev, info, extra, data->length); | 1497 | res = ieee80211_scan_results(rdev, info, extra, data->length); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index a63509118508..f04d4c32e96e 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -67,7 +67,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
67 | ASSERT_RDEV_LOCK(rdev); | 67 | ASSERT_RDEV_LOCK(rdev); |
68 | ASSERT_WDEV_LOCK(wdev); | 68 | ASSERT_WDEV_LOCK(wdev); |
69 | 69 | ||
70 | if (rdev->scan_req) | 70 | if (rdev->scan_req || rdev->scan_msg) |
71 | return -EBUSY; | 71 | return -EBUSY; |
72 | 72 | ||
73 | if (wdev->conn->params.channel) | 73 | if (wdev->conn->params.channel) |