aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-10 13:30:08 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-10 13:30:08 -0400
commit3dd392a407d15250a501fa109cc1f93fee95ef85 (patch)
treec1faca3fa8bd0f7c8790b3e0887229b4a5a90e8b /net
parentb27a43c1e90582facad44de67d02bc9e9f900289 (diff)
parentd403a6484f0341bf0624d17ece46f24f741b6a92 (diff)
Merge branch 'linus' into x86/pat2
Conflicts: arch/x86/mm/init_64.c
Diffstat (limited to 'net')
-rw-r--r--net/9p/client.c10
-rw-r--r--net/9p/conv.c6
-rw-r--r--net/9p/mod.c92
-rw-r--r--net/9p/trans_fd.c104
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_std_timer.c8
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c62
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c8
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/iucv/iucv.c3
-rw-r--r--net/key/af_key.c30
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sctp/associola.c9
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/sm_make_chunk.c52
-rw-r--r--net/sctp/sm_statefuns.c48
-rw-r--r--net/socket.c2
-rw-r--r--net/xfrm/xfrm_output.c12
27 files changed, 310 insertions, 218 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index 2ffe40cf2f01..10e320307ec0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -75,7 +75,6 @@ static int parse_opts(char *opts, struct p9_client *clnt)
75 int option; 75 int option;
76 int ret = 0; 76 int ret = 0;
77 77
78 clnt->trans_mod = v9fs_default_trans();
79 clnt->dotu = 1; 78 clnt->dotu = 1;
80 clnt->msize = 8192; 79 clnt->msize = 8192;
81 80
@@ -108,7 +107,7 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 clnt->msize = option; 107 clnt->msize = option;
109 break; 108 break;
110 case Opt_trans: 109 case Opt_trans:
111 clnt->trans_mod = v9fs_match_trans(&args[0]); 110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
112 break; 111 break;
113 case Opt_legacy: 112 case Opt_legacy:
114 clnt->dotu = 0; 113 clnt->dotu = 0;
@@ -117,6 +116,10 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 continue; 116 continue;
118 } 117 }
119 } 118 }
119
120 if (!clnt->trans_mod)
121 clnt->trans_mod = v9fs_get_default_trans();
122
120 kfree(options); 123 kfree(options);
121 return ret; 124 return ret;
122} 125}
@@ -150,6 +153,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
150 if (!clnt) 153 if (!clnt)
151 return ERR_PTR(-ENOMEM); 154 return ERR_PTR(-ENOMEM);
152 155
156 clnt->trans_mod = NULL;
153 clnt->trans = NULL; 157 clnt->trans = NULL;
154 spin_lock_init(&clnt->lock); 158 spin_lock_init(&clnt->lock);
155 INIT_LIST_HEAD(&clnt->fidlist); 159 INIT_LIST_HEAD(&clnt->fidlist);
@@ -235,6 +239,8 @@ void p9_client_destroy(struct p9_client *clnt)
235 clnt->trans = NULL; 239 clnt->trans = NULL;
236 } 240 }
237 241
242 v9fs_put_trans(clnt->trans_mod);
243
238 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) 244 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist)
239 p9_fid_destroy(fid); 245 p9_fid_destroy(fid);
240 246
diff --git a/net/9p/conv.c b/net/9p/conv.c
index 44547201f5bc..5ad3a3bd73b2 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -451,8 +451,10 @@ p9_put_data(struct cbuf *bufp, const char *data, int count,
451 unsigned char **pdata) 451 unsigned char **pdata)
452{ 452{
453 *pdata = buf_alloc(bufp, count); 453 *pdata = buf_alloc(bufp, count);
454 if (*pdata == NULL)
455 return -ENOMEM;
454 memmove(*pdata, data, count); 456 memmove(*pdata, data, count);
455 return count; 457 return 0;
456} 458}
457 459
458static int 460static int
@@ -460,6 +462,8 @@ p9_put_user_data(struct cbuf *bufp, const char __user *data, int count,
460 unsigned char **pdata) 462 unsigned char **pdata)
461{ 463{
462 *pdata = buf_alloc(bufp, count); 464 *pdata = buf_alloc(bufp, count);
465 if (*pdata == NULL)
466 return -ENOMEM;
463 return copy_from_user(*pdata, data, count); 467 return copy_from_user(*pdata, data, count);
464} 468}
465 469
diff --git a/net/9p/mod.c b/net/9p/mod.c
index bdee1fb7cc62..1084feb24cb0 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -31,6 +31,7 @@
31#include <linux/parser.h> 31#include <linux/parser.h>
32#include <net/9p/transport.h> 32#include <net/9p/transport.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/spinlock.h>
34 35
35#ifdef CONFIG_NET_9P_DEBUG 36#ifdef CONFIG_NET_9P_DEBUG
36unsigned int p9_debug_level = 0; /* feature-rific global debug level */ 37unsigned int p9_debug_level = 0; /* feature-rific global debug level */
@@ -44,8 +45,8 @@ MODULE_PARM_DESC(debug, "9P debugging level");
44 * 45 *
45 */ 46 */
46 47
48static DEFINE_SPINLOCK(v9fs_trans_lock);
47static LIST_HEAD(v9fs_trans_list); 49static LIST_HEAD(v9fs_trans_list);
48static struct p9_trans_module *v9fs_default_transport;
49 50
50/** 51/**
51 * v9fs_register_trans - register a new transport with 9p 52 * v9fs_register_trans - register a new transport with 9p
@@ -54,48 +55,87 @@ static struct p9_trans_module *v9fs_default_transport;
54 */ 55 */
55void v9fs_register_trans(struct p9_trans_module *m) 56void v9fs_register_trans(struct p9_trans_module *m)
56{ 57{
58 spin_lock(&v9fs_trans_lock);
57 list_add_tail(&m->list, &v9fs_trans_list); 59 list_add_tail(&m->list, &v9fs_trans_list);
58 if (m->def) 60 spin_unlock(&v9fs_trans_lock);
59 v9fs_default_transport = m;
60} 61}
61EXPORT_SYMBOL(v9fs_register_trans); 62EXPORT_SYMBOL(v9fs_register_trans);
62 63
63/** 64/**
64 * v9fs_match_trans - match transport versus registered transports 65 * v9fs_unregister_trans - unregister a 9p transport
66 * @m: the transport to remove
67 *
68 */
69void v9fs_unregister_trans(struct p9_trans_module *m)
70{
71 spin_lock(&v9fs_trans_lock);
72 list_del_init(&m->list);
73 spin_unlock(&v9fs_trans_lock);
74}
75EXPORT_SYMBOL(v9fs_unregister_trans);
76
77/**
78 * v9fs_get_trans_by_name - get transport with the matching name
65 * @name: string identifying transport 79 * @name: string identifying transport
66 * 80 *
67 */ 81 */
68struct p9_trans_module *v9fs_match_trans(const substring_t *name) 82struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name)
69{ 83{
70 struct list_head *p; 84 struct p9_trans_module *t, *found = NULL;
71 struct p9_trans_module *t = NULL; 85
72 86 spin_lock(&v9fs_trans_lock);
73 list_for_each(p, &v9fs_trans_list) { 87
74 t = list_entry(p, struct p9_trans_module, list); 88 list_for_each_entry(t, &v9fs_trans_list, list)
75 if (strncmp(t->name, name->from, name->to-name->from) == 0) 89 if (strncmp(t->name, name->from, name->to-name->from) == 0 &&
76 return t; 90 try_module_get(t->owner)) {
77 } 91 found = t;
78 return NULL; 92 break;
93 }
94
95 spin_unlock(&v9fs_trans_lock);
96 return found;
79} 97}
80EXPORT_SYMBOL(v9fs_match_trans); 98EXPORT_SYMBOL(v9fs_get_trans_by_name);
81 99
82/** 100/**
83 * v9fs_default_trans - returns pointer to default transport 101 * v9fs_get_default_trans - get the default transport
84 * 102 *
85 */ 103 */
86 104
87struct p9_trans_module *v9fs_default_trans(void) 105struct p9_trans_module *v9fs_get_default_trans(void)
88{ 106{
89 if (v9fs_default_transport) 107 struct p9_trans_module *t, *found = NULL;
90 return v9fs_default_transport; 108
91 else if (!list_empty(&v9fs_trans_list)) 109 spin_lock(&v9fs_trans_lock);
92 return list_first_entry(&v9fs_trans_list, 110
93 struct p9_trans_module, list); 111 list_for_each_entry(t, &v9fs_trans_list, list)
94 else 112 if (t->def && try_module_get(t->owner)) {
95 return NULL; 113 found = t;
114 break;
115 }
116
117 if (!found)
118 list_for_each_entry(t, &v9fs_trans_list, list)
119 if (try_module_get(t->owner)) {
120 found = t;
121 break;
122 }
123
124 spin_unlock(&v9fs_trans_lock);
125 return found;
96} 126}
97EXPORT_SYMBOL(v9fs_default_trans); 127EXPORT_SYMBOL(v9fs_get_default_trans);
98 128
129/**
130 * v9fs_put_trans - put trans
131 * @m: transport to put
132 *
133 */
134void v9fs_put_trans(struct p9_trans_module *m)
135{
136 if (m)
137 module_put(m->owner);
138}
99 139
100/** 140/**
101 * v9fs_init - Initialize module 141 * v9fs_init - Initialize module
@@ -120,6 +160,8 @@ static int __init init_p9(void)
120static void __exit exit_p9(void) 160static void __exit exit_p9(void)
121{ 161{
122 printk(KERN_INFO "Unloading 9P2000 support\n"); 162 printk(KERN_INFO "Unloading 9P2000 support\n");
163
164 p9_trans_fd_exit();
123} 165}
124 166
125module_init(init_p9) 167module_init(init_p9)
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index cdf137af7adc..d652baf5ff91 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -151,7 +151,6 @@ struct p9_mux_poll_task {
151 * @trans: reference to transport instance for this connection 151 * @trans: reference to transport instance for this connection
152 * @tagpool: id accounting for transactions 152 * @tagpool: id accounting for transactions
153 * @err: error state 153 * @err: error state
154 * @equeue: event wait_q (?)
155 * @req_list: accounting for requests which have been sent 154 * @req_list: accounting for requests which have been sent
156 * @unsent_req_list: accounting for requests that haven't been sent 155 * @unsent_req_list: accounting for requests that haven't been sent
157 * @rcall: current response &p9_fcall structure 156 * @rcall: current response &p9_fcall structure
@@ -178,7 +177,6 @@ struct p9_conn {
178 struct p9_trans *trans; 177 struct p9_trans *trans;
179 struct p9_idpool *tagpool; 178 struct p9_idpool *tagpool;
180 int err; 179 int err;
181 wait_queue_head_t equeue;
182 struct list_head req_list; 180 struct list_head req_list;
183 struct list_head unsent_req_list; 181 struct list_head unsent_req_list;
184 struct p9_fcall *rcall; 182 struct p9_fcall *rcall;
@@ -240,22 +238,6 @@ static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
240 238
241static void p9_conn_cancel(struct p9_conn *m, int err); 239static void p9_conn_cancel(struct p9_conn *m, int err);
242 240
243static int p9_mux_global_init(void)
244{
245 int i;
246
247 for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
248 p9_mux_poll_tasks[i].task = NULL;
249
250 p9_mux_wq = create_workqueue("v9fs");
251 if (!p9_mux_wq) {
252 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
253 return -ENOMEM;
254 }
255
256 return 0;
257}
258
259static u16 p9_mux_get_tag(struct p9_conn *m) 241static u16 p9_mux_get_tag(struct p9_conn *m)
260{ 242{
261 int tag; 243 int tag;
@@ -409,11 +391,11 @@ static void p9_mux_poll_stop(struct p9_conn *m)
409static struct p9_conn *p9_conn_create(struct p9_trans *trans) 391static struct p9_conn *p9_conn_create(struct p9_trans *trans)
410{ 392{
411 int i, n; 393 int i, n;
412 struct p9_conn *m, *mtmp; 394 struct p9_conn *m;
413 395
414 P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, 396 P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans,
415 trans->msize); 397 trans->msize);
416 m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); 398 m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
417 if (!m) 399 if (!m)
418 return ERR_PTR(-ENOMEM); 400 return ERR_PTR(-ENOMEM);
419 401
@@ -424,25 +406,14 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans)
424 m->trans = trans; 406 m->trans = trans;
425 m->tagpool = p9_idpool_create(); 407 m->tagpool = p9_idpool_create();
426 if (IS_ERR(m->tagpool)) { 408 if (IS_ERR(m->tagpool)) {
427 mtmp = ERR_PTR(-ENOMEM);
428 kfree(m); 409 kfree(m);
429 return mtmp; 410 return ERR_PTR(-ENOMEM);
430 } 411 }
431 412
432 m->err = 0;
433 init_waitqueue_head(&m->equeue);
434 INIT_LIST_HEAD(&m->req_list); 413 INIT_LIST_HEAD(&m->req_list);
435 INIT_LIST_HEAD(&m->unsent_req_list); 414 INIT_LIST_HEAD(&m->unsent_req_list);
436 m->rcall = NULL;
437 m->rpos = 0;
438 m->rbuf = NULL;
439 m->wpos = m->wsize = 0;
440 m->wbuf = NULL;
441 INIT_WORK(&m->rq, p9_read_work); 415 INIT_WORK(&m->rq, p9_read_work);
442 INIT_WORK(&m->wq, p9_write_work); 416 INIT_WORK(&m->wq, p9_write_work);
443 m->wsched = 0;
444 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
445 m->poll_task = NULL;
446 n = p9_mux_poll_start(m); 417 n = p9_mux_poll_start(m);
447 if (n) { 418 if (n) {
448 kfree(m); 419 kfree(m);
@@ -463,10 +434,8 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans)
463 for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { 434 for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
464 if (IS_ERR(m->poll_waddr[i])) { 435 if (IS_ERR(m->poll_waddr[i])) {
465 p9_mux_poll_stop(m); 436 p9_mux_poll_stop(m);
466 mtmp = (void *)m->poll_waddr; /* the error code */
467 kfree(m); 437 kfree(m);
468 m = mtmp; 438 return (void *)m->poll_waddr; /* the error code */
469 break;
470 } 439 }
471 } 440 }
472 441
@@ -483,18 +452,13 @@ static void p9_conn_destroy(struct p9_conn *m)
483{ 452{
484 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, 453 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
485 m->mux_list.prev, m->mux_list.next); 454 m->mux_list.prev, m->mux_list.next);
486 p9_conn_cancel(m, -ECONNRESET);
487
488 if (!list_empty(&m->req_list)) {
489 /* wait until all processes waiting on this session exit */
490 P9_DPRINTK(P9_DEBUG_MUX,
491 "mux %p waiting for empty request queue\n", m);
492 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
493 P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
494 list_empty(&m->req_list));
495 }
496 455
497 p9_mux_poll_stop(m); 456 p9_mux_poll_stop(m);
457 cancel_work_sync(&m->rq);
458 cancel_work_sync(&m->wq);
459
460 p9_conn_cancel(m, -ECONNRESET);
461
498 m->trans = NULL; 462 m->trans = NULL;
499 p9_idpool_destroy(m->tagpool); 463 p9_idpool_destroy(m->tagpool);
500 kfree(m); 464 kfree(m);
@@ -840,8 +804,6 @@ static void p9_read_work(struct work_struct *work)
840 (*req->cb) (req, req->cba); 804 (*req->cb) (req, req->cba);
841 else 805 else
842 kfree(req->rcall); 806 kfree(req->rcall);
843
844 wake_up(&m->equeue);
845 } 807 }
846 } else { 808 } else {
847 if (err >= 0 && rcall->id != P9_RFLUSH) 809 if (err >= 0 && rcall->id != P9_RFLUSH)
@@ -908,8 +870,10 @@ static struct p9_req *p9_send_request(struct p9_conn *m,
908 else 870 else
909 n = p9_mux_get_tag(m); 871 n = p9_mux_get_tag(m);
910 872
911 if (n < 0) 873 if (n < 0) {
874 kfree(req);
912 return ERR_PTR(-ENOMEM); 875 return ERR_PTR(-ENOMEM);
876 }
913 877
914 p9_set_tag(tc, n); 878 p9_set_tag(tc, n);
915 879
@@ -984,8 +948,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
984 (*req->cb) (req, req->cba); 948 (*req->cb) (req, req->cba);
985 else 949 else
986 kfree(req->rcall); 950 kfree(req->rcall);
987
988 wake_up(&m->equeue);
989 } 951 }
990 952
991 kfree(freq->tcall); 953 kfree(freq->tcall);
@@ -1191,8 +1153,6 @@ void p9_conn_cancel(struct p9_conn *m, int err)
1191 else 1153 else
1192 kfree(req->rcall); 1154 kfree(req->rcall);
1193 } 1155 }
1194
1195 wake_up(&m->equeue);
1196} 1156}
1197 1157
1198/** 1158/**
@@ -1370,7 +1330,6 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt)
1370{ 1330{
1371 int ret, n; 1331 int ret, n;
1372 struct p9_trans_fd *ts = NULL; 1332 struct p9_trans_fd *ts = NULL;
1373 mm_segment_t oldfs;
1374 1333
1375 if (trans && trans->status == Connected) 1334 if (trans && trans->status == Connected)
1376 ts = trans->priv; 1335 ts = trans->priv;
@@ -1384,24 +1343,17 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt)
1384 if (!ts->wr->f_op || !ts->wr->f_op->poll) 1343 if (!ts->wr->f_op || !ts->wr->f_op->poll)
1385 return -EIO; 1344 return -EIO;
1386 1345
1387 oldfs = get_fs();
1388 set_fs(get_ds());
1389
1390 ret = ts->rd->f_op->poll(ts->rd, pt); 1346 ret = ts->rd->f_op->poll(ts->rd, pt);
1391 if (ret < 0) 1347 if (ret < 0)
1392 goto end; 1348 return ret;
1393 1349
1394 if (ts->rd != ts->wr) { 1350 if (ts->rd != ts->wr) {
1395 n = ts->wr->f_op->poll(ts->wr, pt); 1351 n = ts->wr->f_op->poll(ts->wr, pt);
1396 if (n < 0) { 1352 if (n < 0)
1397 ret = n; 1353 return n;
1398 goto end;
1399 }
1400 ret = (ret & ~POLLOUT) | (n & ~POLLIN); 1354 ret = (ret & ~POLLOUT) | (n & ~POLLIN);
1401 } 1355 }
1402 1356
1403end:
1404 set_fs(oldfs);
1405 return ret; 1357 return ret;
1406} 1358}
1407 1359
@@ -1629,6 +1581,7 @@ static struct p9_trans_module p9_tcp_trans = {
1629 .maxsize = MAX_SOCK_BUF, 1581 .maxsize = MAX_SOCK_BUF,
1630 .def = 1, 1582 .def = 1,
1631 .create = p9_trans_create_tcp, 1583 .create = p9_trans_create_tcp,
1584 .owner = THIS_MODULE,
1632}; 1585};
1633 1586
1634static struct p9_trans_module p9_unix_trans = { 1587static struct p9_trans_module p9_unix_trans = {
@@ -1636,6 +1589,7 @@ static struct p9_trans_module p9_unix_trans = {
1636 .maxsize = MAX_SOCK_BUF, 1589 .maxsize = MAX_SOCK_BUF,
1637 .def = 0, 1590 .def = 0,
1638 .create = p9_trans_create_unix, 1591 .create = p9_trans_create_unix,
1592 .owner = THIS_MODULE,
1639}; 1593};
1640 1594
1641static struct p9_trans_module p9_fd_trans = { 1595static struct p9_trans_module p9_fd_trans = {
@@ -1643,14 +1597,20 @@ static struct p9_trans_module p9_fd_trans = {
1643 .maxsize = MAX_SOCK_BUF, 1597 .maxsize = MAX_SOCK_BUF,
1644 .def = 0, 1598 .def = 0,
1645 .create = p9_trans_create_fd, 1599 .create = p9_trans_create_fd,
1600 .owner = THIS_MODULE,
1646}; 1601};
1647 1602
1648int p9_trans_fd_init(void) 1603int p9_trans_fd_init(void)
1649{ 1604{
1650 int ret = p9_mux_global_init(); 1605 int i;
1651 if (ret) { 1606
1652 printk(KERN_WARNING "9p: starting mux failed\n"); 1607 for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
1653 return ret; 1608 p9_mux_poll_tasks[i].task = NULL;
1609
1610 p9_mux_wq = create_workqueue("v9fs");
1611 if (!p9_mux_wq) {
1612 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1613 return -ENOMEM;
1654 } 1614 }
1655 1615
1656 v9fs_register_trans(&p9_tcp_trans); 1616 v9fs_register_trans(&p9_tcp_trans);
@@ -1659,4 +1619,12 @@ int p9_trans_fd_init(void)
1659 1619
1660 return 0; 1620 return 0;
1661} 1621}
1662EXPORT_SYMBOL(p9_trans_fd_init); 1622
1623void p9_trans_fd_exit(void)
1624{
1625 v9fs_unregister_trans(&p9_tcp_trans);
1626 v9fs_unregister_trans(&p9_unix_trans);
1627 v9fs_unregister_trans(&p9_fd_trans);
1628
1629 destroy_workqueue(p9_mux_wq);
1630}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 42adc052b149..94912e077a55 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -528,6 +528,7 @@ static struct p9_trans_module p9_virtio_trans = {
528 .create = p9_virtio_create, 528 .create = p9_virtio_create,
529 .maxsize = PAGE_SIZE*16, 529 .maxsize = PAGE_SIZE*16,
530 .def = 0, 530 .def = 0,
531 .owner = THIS_MODULE,
531}; 532};
532 533
533/* The standard init function */ 534/* The standard init function */
@@ -545,6 +546,7 @@ static int __init p9_virtio_init(void)
545static void __exit p9_virtio_cleanup(void) 546static void __exit p9_virtio_cleanup(void)
546{ 547{
547 unregister_virtio_driver(&p9_virtio_drv); 548 unregister_virtio_driver(&p9_virtio_drv);
549 v9fs_unregister_trans(&p9_virtio_trans);
548} 550}
549 551
550module_init(p9_virtio_init); 552module_init(p9_virtio_init);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 01c83e2a4c19..28c71574a781 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -317,6 +317,9 @@ void ax25_destroy_socket(ax25_cb *ax25)
317 /* Queue the unaccepted socket for death */ 317 /* Queue the unaccepted socket for death */
318 sock_orphan(skb->sk); 318 sock_orphan(skb->sk);
319 319
320 /* 9A4GL: hack to release unaccepted sockets */
321 skb->sk->sk_state = TCP_LISTEN;
322
320 ax25_start_heartbeat(sax25); 323 ax25_start_heartbeat(sax25);
321 sax25->state = AX25_STATE_0; 324 sax25->state = AX25_STATE_0;
322 } 325 }
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index cdc7e751ef36..96e4b9273250 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -39,9 +39,11 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
39 39
40 switch (ax25->state) { 40 switch (ax25->state) {
41 case AX25_STATE_0: 41 case AX25_STATE_0:
42 if (!sk || 42 /* Magic here: If we listen() and a new link dies before it
43 sock_flag(sk, SOCK_DESTROY) || 43 is accepted() it isn't 'dead' so doesn't get removed. */
44 sock_flag(sk, SOCK_DEAD)) { 44 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
45 (sk->sk_state == TCP_LISTEN &&
46 sock_flag(sk, SOCK_DEAD))) {
45 if (sk) { 47 if (sk) {
46 sock_hold(sk); 48 sock_hold(sk);
47 ax25_destroy_socket(ax25); 49 ax25_destroy_socket(ax25);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index f5b21cb93699..278a3ace14f6 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -164,6 +164,9 @@ static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *
164{ 164{
165 int ret; 165 int ret;
166 166
167 if (!test_bit(HCI_UP, &hdev->flags))
168 return -ENETDOWN;
169
167 /* Serialize all requests */ 170 /* Serialize all requests */
168 hci_req_lock(hdev); 171 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout); 172 ret = __hci_request(hdev, req, opt, timeout);
diff --git a/net/core/dev.c b/net/core/dev.c
index e719ed29310f..0ae08d3f57e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -122,6 +122,7 @@
122#include <linux/if_arp.h> 122#include <linux/if_arp.h>
123#include <linux/if_vlan.h> 123#include <linux/if_vlan.h>
124#include <linux/ip.h> 124#include <linux/ip.h>
125#include <net/ip.h>
125#include <linux/ipv6.h> 126#include <linux/ipv6.h>
126#include <linux/in.h> 127#include <linux/in.h>
127#include <linux/jhash.h> 128#include <linux/jhash.h>
@@ -1667,7 +1668,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1667{ 1668{
1668 u32 addr1, addr2, ports; 1669 u32 addr1, addr2, ports;
1669 u32 hash, ihl; 1670 u32 hash, ihl;
1670 u8 ip_proto; 1671 u8 ip_proto = 0;
1671 1672
1672 if (unlikely(!simple_tx_hashrnd_initialized)) { 1673 if (unlikely(!simple_tx_hashrnd_initialized)) {
1673 get_random_bytes(&simple_tx_hashrnd, 4); 1674 get_random_bytes(&simple_tx_hashrnd, 4);
@@ -1676,7 +1677,8 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1676 1677
1677 switch (skb->protocol) { 1678 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP): 1679 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol; 1680 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1681 ip_proto = ip_hdr(skb)->protocol;
1680 addr1 = ip_hdr(skb)->saddr; 1682 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr; 1683 addr2 = ip_hdr(skb)->daddr;
1682 ihl = ip_hdr(skb)->ihl; 1684 ihl = ip_hdr(skb)->ihl;
@@ -2916,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2916 return 0; 2918 return 0;
2917} 2919}
2918 2920
2921static void dev_change_rx_flags(struct net_device *dev, int flags)
2922{
2923 if (dev->flags & IFF_UP && dev->change_rx_flags)
2924 dev->change_rx_flags(dev, flags);
2925}
2926
2919static int __dev_set_promiscuity(struct net_device *dev, int inc) 2927static int __dev_set_promiscuity(struct net_device *dev, int inc)
2920{ 2928{
2921 unsigned short old_flags = dev->flags; 2929 unsigned short old_flags = dev->flags;
@@ -2953,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
2953 current->uid, current->gid, 2961 current->uid, current->gid,
2954 audit_get_sessionid(current)); 2962 audit_get_sessionid(current));
2955 2963
2956 if (dev->change_rx_flags) 2964 dev_change_rx_flags(dev, IFF_PROMISC);
2957 dev->change_rx_flags(dev, IFF_PROMISC);
2958 } 2965 }
2959 return 0; 2966 return 0;
2960} 2967}
@@ -3020,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3020 } 3027 }
3021 } 3028 }
3022 if (dev->flags ^ old_flags) { 3029 if (dev->flags ^ old_flags) {
3023 if (dev->change_rx_flags) 3030 dev_change_rx_flags(dev, IFF_ALLMULTI);
3024 dev->change_rx_flags(dev, IFF_ALLMULTI);
3025 dev_set_rx_mode(dev); 3031 dev_set_rx_mode(dev);
3026 } 3032 }
3027 return 0; 3033 return 0;
@@ -3345,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3345 * Load in the correct multicast list now the flags have changed. 3351 * Load in the correct multicast list now the flags have changed.
3346 */ 3352 */
3347 3353
3348 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) 3354 if ((old_flags ^ flags) & IFF_MULTICAST)
3349 dev->change_rx_flags(dev, IFF_MULTICAST); 3355 dev_change_rx_flags(dev, IFF_MULTICAST);
3350 3356
3351 dev_set_rx_mode(dev); 3357 dev_set_rx_mode(dev);
3352 3358
@@ -3806,14 +3812,11 @@ static int dev_new_index(struct net *net)
3806} 3812}
3807 3813
3808/* Delayed registration/unregisteration */ 3814/* Delayed registration/unregisteration */
3809static DEFINE_SPINLOCK(net_todo_list_lock);
3810static LIST_HEAD(net_todo_list); 3815static LIST_HEAD(net_todo_list);
3811 3816
3812static void net_set_todo(struct net_device *dev) 3817static void net_set_todo(struct net_device *dev)
3813{ 3818{
3814 spin_lock(&net_todo_list_lock);
3815 list_add_tail(&dev->todo_list, &net_todo_list); 3819 list_add_tail(&dev->todo_list, &net_todo_list);
3816 spin_unlock(&net_todo_list_lock);
3817} 3820}
3818 3821
3819static void rollback_registered(struct net_device *dev) 3822static void rollback_registered(struct net_device *dev)
@@ -4140,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4140 * free_netdev(y1); 4143 * free_netdev(y1);
4141 * free_netdev(y2); 4144 * free_netdev(y2);
4142 * 4145 *
4143 * We are invoked by rtnl_unlock() after it drops the semaphore. 4146 * We are invoked by rtnl_unlock().
4144 * This allows us to deal with problems: 4147 * This allows us to deal with problems:
4145 * 1) We can delete sysfs objects which invoke hotplug 4148 * 1) We can delete sysfs objects which invoke hotplug
4146 * without deadlocking with linkwatch via keventd. 4149 * without deadlocking with linkwatch via keventd.
4147 * 2) Since we run with the RTNL semaphore not held, we can sleep 4150 * 2) Since we run with the RTNL semaphore not held, we can sleep
4148 * safely in order to wait for the netdev refcnt to drop to zero. 4151 * safely in order to wait for the netdev refcnt to drop to zero.
4152 *
4153 * We must not return until all unregister events added during
4154 * the interval the lock was held have been completed.
4149 */ 4155 */
4150static DEFINE_MUTEX(net_todo_run_mutex);
4151void netdev_run_todo(void) 4156void netdev_run_todo(void)
4152{ 4157{
4153 struct list_head list; 4158 struct list_head list;
4154 4159
4155 /* Need to guard against multiple cpu's getting out of order. */
4156 mutex_lock(&net_todo_run_mutex);
4157
4158 /* Not safe to do outside the semaphore. We must not return
4159 * until all unregister events invoked by the local processor
4160 * have been completed (either by this todo run, or one on
4161 * another cpu).
4162 */
4163 if (list_empty(&net_todo_list))
4164 goto out;
4165
4166 /* Snapshot list, allow later requests */ 4160 /* Snapshot list, allow later requests */
4167 spin_lock(&net_todo_list_lock);
4168 list_replace_init(&net_todo_list, &list); 4161 list_replace_init(&net_todo_list, &list);
4169 spin_unlock(&net_todo_list_lock); 4162
4163 __rtnl_unlock();
4170 4164
4171 while (!list_empty(&list)) { 4165 while (!list_empty(&list)) {
4172 struct net_device *dev 4166 struct net_device *dev
@@ -4198,9 +4192,6 @@ void netdev_run_todo(void)
4198 /* Free network device */ 4192 /* Free network device */
4199 kobject_put(&dev->dev.kobj); 4193 kobject_put(&dev->dev.kobj);
4200 } 4194 }
4201
4202out:
4203 mutex_unlock(&net_todo_run_mutex);
4204} 4195}
4205 4196
4206static struct net_device_stats *internal_stats(struct net_device *dev) 4197static struct net_device_stats *internal_stats(struct net_device *dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b36341..d6381c2a4693 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -73,7 +73,7 @@ void __rtnl_unlock(void)
73 73
74void rtnl_unlock(void) 74void rtnl_unlock(void)
75{ 75{
76 mutex_unlock(&rtnl_mutex); 76 /* This fellow will unlock it for us. */
77 netdev_run_todo(); 77 netdev_run_todo();
78} 78}
79 79
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index bfcbd148a89d..c209e054a634 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -150,7 +150,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
150 ca->snd_cwnd_cents -= 128; 150 ca->snd_cwnd_cents -= 128;
151 tp->snd_cwnd_cnt = 0; 151 tp->snd_cwnd_cnt = 0;
152 } 152 }
153 153 /* check when cwnd has not been incremented for a while */
154 if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
155 tp->snd_cwnd++;
156 tp->snd_cwnd_cnt = 0;
157 }
154 /* clamp down slowstart cwnd to ssthresh value. */ 158 /* clamp down slowstart cwnd to ssthresh value. */
155 if (is_slowstart) 159 if (is_slowstart)
156 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 160 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2a96bd..7abc6b80d47d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4879,7 +4879,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4879 goto no_ack; 4879 goto no_ack;
4880 } 4880 }
4881 4881
4882 __tcp_ack_snd_check(sk, 0); 4882 if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
4883 __tcp_ack_snd_check(sk, 0);
4883no_ack: 4884no_ack:
4884#ifdef CONFIG_NET_DMA 4885#ifdef CONFIG_NET_DMA
4885 if (copied_early) 4886 if (copied_early)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1b4fee20fc93..011478e46c40 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -618,7 +618,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
618 ]; 618 ];
619 } rep; 619 } rep;
620 struct ip_reply_arg arg; 620 struct ip_reply_arg arg;
621 struct net *net = dev_net(skb->dev); 621 struct net *net = dev_net(skb->dst->dev);
622 622
623 memset(&rep.th, 0, sizeof(struct tcphdr)); 623 memset(&rep.th, 0, sizeof(struct tcphdr));
624 memset(&arg, 0, sizeof(arg)); 624 memset(&arg, 0, sizeof(arg));
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8e42fbbd5761..57e26fa66185 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -951,6 +951,27 @@ int udp_disconnect(struct sock *sk, int flags)
951 return 0; 951 return 0;
952} 952}
953 953
954static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
955{
956 int is_udplite = IS_UDPLITE(sk);
957 int rc;
958
959 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
960 /* Note that an ENOMEM error is charged twice */
961 if (rc == -ENOMEM)
962 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
963 is_udplite);
964 goto drop;
965 }
966
967 return 0;
968
969drop:
970 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
971 kfree_skb(skb);
972 return -1;
973}
974
954/* returns: 975/* returns:
955 * -1: error 976 * -1: error
956 * 0: success 977 * 0: success
@@ -989,9 +1010,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
989 up->encap_rcv != NULL) { 1010 up->encap_rcv != NULL) {
990 int ret; 1011 int ret;
991 1012
992 bh_unlock_sock(sk);
993 ret = (*up->encap_rcv)(sk, skb); 1013 ret = (*up->encap_rcv)(sk, skb);
994 bh_lock_sock(sk);
995 if (ret <= 0) { 1014 if (ret <= 0) {
996 UDP_INC_STATS_BH(sock_net(sk), 1015 UDP_INC_STATS_BH(sock_net(sk),
997 UDP_MIB_INDATAGRAMS, 1016 UDP_MIB_INDATAGRAMS,
@@ -1044,17 +1063,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1044 goto drop; 1063 goto drop;
1045 } 1064 }
1046 1065
1047 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1066 rc = 0;
1048 /* Note that an ENOMEM error is charged twice */
1049 if (rc == -ENOMEM) {
1050 UDP_INC_STATS_BH(sock_net(sk),
1051 UDP_MIB_RCVBUFERRORS, is_udplite);
1052 atomic_inc(&sk->sk_drops);
1053 }
1054 goto drop;
1055 }
1056 1067
1057 return 0; 1068 bh_lock_sock(sk);
1069 if (!sock_owned_by_user(sk))
1070 rc = __udp_queue_rcv_skb(sk, skb);
1071 else
1072 sk_add_backlog(sk, skb);
1073 bh_unlock_sock(sk);
1074
1075 return rc;
1058 1076
1059drop: 1077drop:
1060 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1078 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -1092,15 +1110,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1092 skb1 = skb_clone(skb, GFP_ATOMIC); 1110 skb1 = skb_clone(skb, GFP_ATOMIC);
1093 1111
1094 if (skb1) { 1112 if (skb1) {
1095 int ret = 0; 1113 int ret = udp_queue_rcv_skb(sk, skb1);
1096
1097 bh_lock_sock(sk);
1098 if (!sock_owned_by_user(sk))
1099 ret = udp_queue_rcv_skb(sk, skb1);
1100 else
1101 sk_add_backlog(sk, skb1);
1102 bh_unlock_sock(sk);
1103
1104 if (ret > 0) 1114 if (ret > 0)
1105 /* we should probably re-process instead 1115 /* we should probably re-process instead
1106 * of dropping packets here. */ 1116 * of dropping packets here. */
@@ -1195,13 +1205,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1195 uh->dest, inet_iif(skb), udptable); 1205 uh->dest, inet_iif(skb), udptable);
1196 1206
1197 if (sk != NULL) { 1207 if (sk != NULL) {
1198 int ret = 0; 1208 int ret = udp_queue_rcv_skb(sk, skb);
1199 bh_lock_sock(sk);
1200 if (!sock_owned_by_user(sk))
1201 ret = udp_queue_rcv_skb(sk, skb);
1202 else
1203 sk_add_backlog(sk, skb);
1204 bh_unlock_sock(sk);
1205 sock_put(sk); 1209 sock_put(sk);
1206 1210
1207 /* a return value > 0 means to resubmit the input, but 1211 /* a return value > 0 means to resubmit the input, but
@@ -1494,7 +1498,7 @@ struct proto udp_prot = {
1494 .sendmsg = udp_sendmsg, 1498 .sendmsg = udp_sendmsg,
1495 .recvmsg = udp_recvmsg, 1499 .recvmsg = udp_recvmsg,
1496 .sendpage = udp_sendpage, 1500 .sendpage = udp_sendpage,
1497 .backlog_rcv = udp_queue_rcv_skb, 1501 .backlog_rcv = __udp_queue_rcv_skb,
1498 .hash = udp_lib_hash, 1502 .hash = udp_lib_hash,
1499 .unhash = udp_lib_unhash, 1503 .unhash = udp_lib_unhash,
1500 .get_port = udp_v4_get_port, 1504 .get_port = udp_v4_get_port,
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 62e39ace0588..26654b26d7fa 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -97,8 +97,6 @@ hbh_mt6(const struct sk_buff *skb, const struct net_device *in,
97 hdrlen -= 2; 97 hdrlen -= 2;
98 if (!(optinfo->flags & IP6T_OPTS_OPTS)) { 98 if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
99 return ret; 99 return ret;
100 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
101 pr_debug("Not strict - not implemented");
102 } else { 100 } else {
103 pr_debug("Strict "); 101 pr_debug("Strict ");
104 pr_debug("#%d ", optinfo->optsnr); 102 pr_debug("#%d ", optinfo->optsnr);
@@ -177,6 +175,12 @@ hbh_mt6_check(const char *tablename, const void *entry,
177 pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags); 175 pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
178 return false; 176 return false;
179 } 177 }
178
179 if (optsinfo->flags & IP6T_OPTS_NSTRICT) {
180 pr_debug("ip6t_opts: Not strict - not implemented");
181 return false;
182 }
183
180 return true; 184 return true;
181} 185}
182 186
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9af6115f0f50..63442a1e741c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2688,6 +2688,8 @@ int __init ip6_route_init(void)
2688 if (ret) 2688 if (ret)
2689 goto out_kmem_cache; 2689 goto out_kmem_cache;
2690 2690
2691 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2692
2691 /* Registering of the loopback is done before this portion of code, 2693 /* Registering of the loopback is done before this portion of code,
2692 * the loopback reference in rt6_info will not be taken, do it 2694 * the loopback reference in rt6_info will not be taken, do it
2693 * manually for init_net */ 2695 * manually for init_net */
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b585c850a89a..10e22fd48222 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1050,7 +1050,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1050 struct tcphdr *th = tcp_hdr(skb), *t1; 1050 struct tcphdr *th = tcp_hdr(skb), *t1;
1051 struct sk_buff *buff; 1051 struct sk_buff *buff;
1052 struct flowi fl; 1052 struct flowi fl;
1053 struct net *net = dev_net(skb->dev); 1053 struct net *net = dev_net(skb->dst->dev);
1054 struct sock *ctl_sk = net->ipv6.tcp_sk; 1054 struct sock *ctl_sk = net->ipv6.tcp_sk;
1055 unsigned int tot_len = sizeof(struct tcphdr); 1055 unsigned int tot_len = sizeof(struct tcphdr);
1056 __be32 *topt; 1056 __be32 *topt;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 705959b31e24..d7b54b5bfa69 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -524,7 +524,6 @@ static int iucv_enable(void)
524 get_online_cpus(); 524 get_online_cpus();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
527 preempt_enable();
528 if (cpus_empty(iucv_buffer_cpumask)) 527 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 528 /* No cpu could declare an iucv buffer. */
530 goto out_path; 529 goto out_path;
@@ -547,7 +546,9 @@ out:
547 */ 546 */
548static void iucv_disable(void) 547static void iucv_disable(void)
549{ 548{
549 get_online_cpus();
550 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 550 on_each_cpu(iucv_retrieve_cpu, NULL, 1);
551 put_online_cpus();
551 kfree(iucv_path_table); 552 kfree(iucv_path_table);
552} 553}
553 554
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d628df97e02e..b7f5a1c353ee 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -73,22 +73,18 @@ static int pfkey_can_dump(struct sock *sk)
73 return 0; 73 return 0;
74} 74}
75 75
76static int pfkey_do_dump(struct pfkey_sock *pfk) 76static void pfkey_terminate_dump(struct pfkey_sock *pfk)
77{ 77{
78 int rc; 78 if (pfk->dump.dump) {
79 79 pfk->dump.done(pfk);
80 rc = pfk->dump.dump(pfk); 80 pfk->dump.dump = NULL;
81 if (rc == -ENOBUFS) 81 pfk->dump.done = NULL;
82 return 0; 82 }
83
84 pfk->dump.done(pfk);
85 pfk->dump.dump = NULL;
86 pfk->dump.done = NULL;
87 return rc;
88} 83}
89 84
90static void pfkey_sock_destruct(struct sock *sk) 85static void pfkey_sock_destruct(struct sock *sk)
91{ 86{
87 pfkey_terminate_dump(pfkey_sk(sk));
92 skb_queue_purge(&sk->sk_receive_queue); 88 skb_queue_purge(&sk->sk_receive_queue);
93 89
94 if (!sock_flag(sk, SOCK_DEAD)) { 90 if (!sock_flag(sk, SOCK_DEAD)) {
@@ -310,6 +306,18 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
310 return err; 306 return err;
311} 307}
312 308
309static int pfkey_do_dump(struct pfkey_sock *pfk)
310{
311 int rc;
312
313 rc = pfk->dump.dump(pfk);
314 if (rc == -ENOBUFS)
315 return 0;
316
317 pfkey_terminate_dump(pfk);
318 return rc;
319}
320
313static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig) 321static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
314{ 322{
315 *new = *orig; 323 *new = *orig;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 532e4faa29f7..9f1ea4a27b35 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -525,6 +525,7 @@ static int nr_release(struct socket *sock)
525 if (sk == NULL) return 0; 525 if (sk == NULL) return 0;
526 526
527 sock_hold(sk); 527 sock_hold(sk);
528 sock_orphan(sk);
528 lock_sock(sk); 529 lock_sock(sk);
529 nr = nr_sk(sk); 530 nr = nr_sk(sk);
530 531
@@ -548,7 +549,6 @@ static int nr_release(struct socket *sock)
548 sk->sk_state = TCP_CLOSE; 549 sk->sk_state = TCP_CLOSE;
549 sk->sk_shutdown |= SEND_SHUTDOWN; 550 sk->sk_shutdown |= SEND_SHUTDOWN;
550 sk->sk_state_change(sk); 551 sk->sk_state_change(sk);
551 sock_orphan(sk);
552 sock_set_flag(sk, SOCK_DESTROY); 552 sock_set_flag(sk, SOCK_DESTROY);
553 break; 553 break;
554 554
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9634091ee2f0..ec0a0839ce51 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -215,10 +215,9 @@ static void dev_watchdog(unsigned long arg)
215 time_after(jiffies, (dev->trans_start + 215 time_after(jiffies, (dev->trans_start +
216 dev->watchdog_timeo))) { 216 dev->watchdog_timeo))) {
217 char drivername[64]; 217 char drivername[64];
218 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 218 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
219 dev->name, netdev_drivername(dev, drivername, 64)); 219 dev->name, netdev_drivername(dev, drivername, 64));
220 dev->tx_timeout(dev); 220 dev->tx_timeout(dev);
221 WARN_ON_ONCE(1);
222 } 221 }
223 if (!mod_timer(&dev->watchdog_timer, 222 if (!mod_timer(&dev->watchdog_timer,
224 round_jiffies(jiffies + 223 round_jiffies(jiffies +
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8472b8b349c4..abd51cef2413 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -599,11 +599,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
599 /* Check to see if this is a duplicate. */ 599 /* Check to see if this is a duplicate. */
600 peer = sctp_assoc_lookup_paddr(asoc, addr); 600 peer = sctp_assoc_lookup_paddr(asoc, addr);
601 if (peer) { 601 if (peer) {
602 /* An UNKNOWN state is only set on transports added by
603 * user in sctp_connectx() call. Such transports should be
604 * considered CONFIRMED per RFC 4960, Section 5.4.
605 */
602 if (peer->state == SCTP_UNKNOWN) { 606 if (peer->state == SCTP_UNKNOWN) {
603 if (peer_state == SCTP_ACTIVE) 607 peer->state = SCTP_ACTIVE;
604 peer->state = SCTP_ACTIVE;
605 if (peer_state == SCTP_UNCONFIRMED)
606 peer->state = SCTP_UNCONFIRMED;
607 } 608 }
608 return peer; 609 return peer;
609 } 610 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0dc4a7dfb234..225c7123c41f 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -533,7 +533,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
533 if (!(dst->dev->features & NETIF_F_NO_CSUM)) { 533 if (!(dst->dev->features & NETIF_F_NO_CSUM)) {
534 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 534 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
535 crc32 = sctp_end_cksum(crc32); 535 crc32 = sctp_end_cksum(crc32);
536 } 536 } else
537 nskb->ip_summed = CHECKSUM_UNNECESSARY;
537 538
538 /* 3) Put the resultant value into the checksum field in the 539 /* 3) Put the resultant value into the checksum field in the
539 * common header, and leave the rest of the bits unchanged. 540 * common header, and leave the rest of the bits unchanged.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e8ca4e54981f..d68869f966c3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1012,6 +1012,29 @@ end:
1012 return retval; 1012 return retval;
1013} 1013}
1014 1014
1015struct sctp_chunk *sctp_make_violation_paramlen(
1016 const struct sctp_association *asoc,
1017 const struct sctp_chunk *chunk,
1018 struct sctp_paramhdr *param)
1019{
1020 struct sctp_chunk *retval;
1021 static const char error[] = "The following parameter had invalid length:";
1022 size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) +
1023 sizeof(sctp_paramhdr_t);
1024
1025 retval = sctp_make_abort(asoc, chunk, payload_len);
1026 if (!retval)
1027 goto nodata;
1028
1029 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION,
1030 sizeof(error) + sizeof(sctp_paramhdr_t));
1031 sctp_addto_chunk(retval, sizeof(error), error);
1032 sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param);
1033
1034nodata:
1035 return retval;
1036}
1037
1015/* Make a HEARTBEAT chunk. */ 1038/* Make a HEARTBEAT chunk. */
1016struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1039struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1017 const struct sctp_transport *transport, 1040 const struct sctp_transport *transport,
@@ -1782,11 +1805,6 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1782 const struct sctp_chunk *chunk, 1805 const struct sctp_chunk *chunk,
1783 struct sctp_chunk **errp) 1806 struct sctp_chunk **errp)
1784{ 1807{
1785 static const char error[] = "The following parameter had invalid length:";
1786 size_t payload_len = WORD_ROUND(sizeof(error)) +
1787 sizeof(sctp_paramhdr_t);
1788
1789
1790 /* This is a fatal error. Any accumulated non-fatal errors are 1808 /* This is a fatal error. Any accumulated non-fatal errors are
1791 * not reported. 1809 * not reported.
1792 */ 1810 */
@@ -1794,14 +1812,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1794 sctp_chunk_free(*errp); 1812 sctp_chunk_free(*errp);
1795 1813
1796 /* Create an error chunk and fill it in with our payload. */ 1814 /* Create an error chunk and fill it in with our payload. */
1797 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1815 *errp = sctp_make_violation_paramlen(asoc, chunk, param);
1798
1799 if (*errp) {
1800 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
1801 sizeof(error) + sizeof(sctp_paramhdr_t));
1802 sctp_addto_chunk(*errp, sizeof(error), error);
1803 sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
1804 }
1805 1816
1806 return 0; 1817 return 0;
1807} 1818}
@@ -1886,11 +1897,13 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1886 /* if the peer reports AUTH, assume that he 1897 /* if the peer reports AUTH, assume that he
1887 * supports AUTH. 1898 * supports AUTH.
1888 */ 1899 */
1889 asoc->peer.auth_capable = 1; 1900 if (sctp_auth_enable)
1901 asoc->peer.auth_capable = 1;
1890 break; 1902 break;
1891 case SCTP_CID_ASCONF: 1903 case SCTP_CID_ASCONF:
1892 case SCTP_CID_ASCONF_ACK: 1904 case SCTP_CID_ASCONF_ACK:
1893 asoc->peer.asconf_capable = 1; 1905 if (sctp_addip_enable)
1906 asoc->peer.asconf_capable = 1;
1894 break; 1907 break;
1895 default: 1908 default:
1896 break; 1909 break;
@@ -2319,12 +2332,10 @@ clean_up:
2319 /* Release the transport structures. */ 2332 /* Release the transport structures. */
2320 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 2333 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
2321 transport = list_entry(pos, struct sctp_transport, transports); 2334 transport = list_entry(pos, struct sctp_transport, transports);
2322 list_del_init(pos); 2335 if (transport->state != SCTP_ACTIVE)
2323 sctp_transport_free(transport); 2336 sctp_assoc_rm_peer(asoc, transport);
2324 } 2337 }
2325 2338
2326 asoc->peer.transport_count = 0;
2327
2328nomem: 2339nomem:
2329 return 0; 2340 return 0;
2330} 2341}
@@ -2460,6 +2471,9 @@ do_addr_param:
2460 break; 2471 break;
2461 2472
2462 case SCTP_PARAM_SET_PRIMARY: 2473 case SCTP_PARAM_SET_PRIMARY:
2474 if (!sctp_addip_enable)
2475 goto fall_through;
2476
2463 addr_param = param.v + sizeof(sctp_addip_param_t); 2477 addr_param = param.v + sizeof(sctp_addip_param_t);
2464 2478
2465 af = sctp_get_af_specific(param_type2af(param.p->type)); 2479 af = sctp_get_af_specific(param_type2af(param.p->type));
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8848d329aa2c..7c622af2ce55 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -119,7 +119,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
119 const struct sctp_endpoint *ep, 119 const struct sctp_endpoint *ep,
120 const struct sctp_association *asoc, 120 const struct sctp_association *asoc,
121 const sctp_subtype_t type, 121 const sctp_subtype_t type,
122 void *arg, 122 void *arg, void *ext,
123 sctp_cmd_seq_t *commands); 123 sctp_cmd_seq_t *commands);
124 124
125static sctp_disposition_t sctp_sf_violation_ctsn( 125static sctp_disposition_t sctp_sf_violation_ctsn(
@@ -3425,7 +3425,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3425 addr_param = (union sctp_addr_param *)hdr->params; 3425 addr_param = (union sctp_addr_param *)hdr->params;
3426 length = ntohs(addr_param->p.length); 3426 length = ntohs(addr_param->p.length);
3427 if (length < sizeof(sctp_paramhdr_t)) 3427 if (length < sizeof(sctp_paramhdr_t))
3428 return sctp_sf_violation_paramlen(ep, asoc, type, 3428 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3429 (void *)addr_param, commands); 3429 (void *)addr_param, commands);
3430 3430
3431 /* Verify the ASCONF chunk before processing it. */ 3431 /* Verify the ASCONF chunk before processing it. */
@@ -3433,8 +3433,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3433 (sctp_paramhdr_t *)((void *)addr_param + length), 3433 (sctp_paramhdr_t *)((void *)addr_param + length),
3434 (void *)chunk->chunk_end, 3434 (void *)chunk->chunk_end,
3435 &err_param)) 3435 &err_param))
3436 return sctp_sf_violation_paramlen(ep, asoc, type, 3436 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3437 (void *)&err_param, commands); 3437 (void *)err_param, commands);
3438 3438
3439 /* ADDIP 5.2 E1) Compare the value of the serial number to the value 3439 /* ADDIP 5.2 E1) Compare the value of the serial number to the value
3440 * the endpoint stored in a new association variable 3440 * the endpoint stored in a new association variable
@@ -3542,8 +3542,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3542 (sctp_paramhdr_t *)addip_hdr->params, 3542 (sctp_paramhdr_t *)addip_hdr->params,
3543 (void *)asconf_ack->chunk_end, 3543 (void *)asconf_ack->chunk_end,
3544 &err_param)) 3544 &err_param))
3545 return sctp_sf_violation_paramlen(ep, asoc, type, 3545 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3546 (void *)&err_param, commands); 3546 (void *)err_param, commands);
3547 3547
3548 if (last_asconf) { 3548 if (last_asconf) {
3549 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; 3549 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
@@ -4240,12 +4240,38 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
4240 const struct sctp_endpoint *ep, 4240 const struct sctp_endpoint *ep,
4241 const struct sctp_association *asoc, 4241 const struct sctp_association *asoc,
4242 const sctp_subtype_t type, 4242 const sctp_subtype_t type,
4243 void *arg, 4243 void *arg, void *ext,
4244 sctp_cmd_seq_t *commands) { 4244 sctp_cmd_seq_t *commands)
4245 static const char err_str[] = "The following parameter had invalid length:"; 4245{
4246 struct sctp_chunk *chunk = arg;
4247 struct sctp_paramhdr *param = ext;
4248 struct sctp_chunk *abort = NULL;
4246 4249
4247 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4250 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
4248 sizeof(err_str)); 4251 goto discard;
4252
4253 /* Make the abort chunk. */
4254 abort = sctp_make_violation_paramlen(asoc, chunk, param);
4255 if (!abort)
4256 goto nomem;
4257
4258 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4259 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
4260
4261 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4262 SCTP_ERROR(ECONNABORTED));
4263 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4264 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4265 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
4266
4267discard:
4268 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
4269
4270 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
4271
4272 return SCTP_DISPOSITION_ABORT;
4273nomem:
4274 return SCTP_DISPOSITION_NOMEM;
4249} 4275}
4250 4276
4251/* Handle a protocol violation when the peer trying to advance the 4277/* Handle a protocol violation when the peer trying to advance the
diff --git a/net/socket.c b/net/socket.c
index 8ef8ba81b9e2..3e8d4e35c08f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1511,6 +1511,7 @@ out_fd:
1511 goto out_put; 1511 goto out_put;
1512} 1512}
1513 1513
1514#if 0
1514#ifdef HAVE_SET_RESTORE_SIGMASK 1515#ifdef HAVE_SET_RESTORE_SIGMASK
1515asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, 1516asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1516 int __user *upeer_addrlen, 1517 int __user *upeer_addrlen,
@@ -1564,6 +1565,7 @@ asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1564 return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); 1565 return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
1565} 1566}
1566#endif 1567#endif
1568#endif
1567 1569
1568asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1570asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1569 int __user *upeer_addrlen) 1571 int __user *upeer_addrlen)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index ac25b4c0e982..dc50f1e71f76 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -27,10 +27,14 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
27 - skb_headroom(skb); 27 - skb_headroom(skb);
28 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); 28 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
29 29
30 if (nhead > 0 || ntail > 0) 30 if (nhead <= 0) {
31 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); 31 if (ntail <= 0)
32 32 return 0;
33 return 0; 33 nhead = 0;
34 } else if (ntail < 0)
35 ntail = 0;
36
37 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
34} 38}
35 39
36static int xfrm_output_one(struct sk_buff *skb, int err) 40static int xfrm_output_one(struct sk_buff *skb, int err)