diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-15 01:33:11 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-15 01:33:11 -0500 |
commit | 1bb95834bbcdc969e477a9284cf96c17a4c2616f (patch) | |
tree | 9cf66b22a611bb6bc78778c05dac72263bb45a23 /net | |
parent | 85345517fe6d4de27b0d6ca19fef9d28ac947c4a (diff) | |
parent | a41c73e04673b47730df682446f0d52f95e32a5b (diff) |
Merge remote branch 'airlied/drm-fixes' into drm-intel-fixes
Diffstat (limited to 'net')
113 files changed, 1275 insertions, 765 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 941f2a324d3a..c1df2dad8c6b 100644 --- a/net/802/garp.c +++ b/net/802/garp.c | |||
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev, | |||
346 | const struct garp_application *appl, | 346 | const struct garp_application *appl, |
347 | const void *data, u8 len, u8 type) | 347 | const void *data, u8 len, u8 type) |
348 | { | 348 | { |
349 | struct garp_port *port = dev->garp_port; | 349 | struct garp_port *port = rtnl_dereference(dev->garp_port); |
350 | struct garp_applicant *app = port->applicants[appl->type]; | 350 | struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); |
351 | struct garp_attr *attr; | 351 | struct garp_attr *attr; |
352 | 352 | ||
353 | spin_lock_bh(&app->lock); | 353 | spin_lock_bh(&app->lock); |
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev, | |||
366 | const struct garp_application *appl, | 366 | const struct garp_application *appl, |
367 | const void *data, u8 len, u8 type) | 367 | const void *data, u8 len, u8 type) |
368 | { | 368 | { |
369 | struct garp_port *port = dev->garp_port; | 369 | struct garp_port *port = rtnl_dereference(dev->garp_port); |
370 | struct garp_applicant *app = port->applicants[appl->type]; | 370 | struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); |
371 | struct garp_attr *attr; | 371 | struct garp_attr *attr; |
372 | 372 | ||
373 | spin_lock_bh(&app->lock); | 373 | spin_lock_bh(&app->lock); |
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev) | |||
546 | 546 | ||
547 | static void garp_release_port(struct net_device *dev) | 547 | static void garp_release_port(struct net_device *dev) |
548 | { | 548 | { |
549 | struct garp_port *port = dev->garp_port; | 549 | struct garp_port *port = rtnl_dereference(dev->garp_port); |
550 | unsigned int i; | 550 | unsigned int i; |
551 | 551 | ||
552 | for (i = 0; i <= GARP_APPLICATION_MAX; i++) { | 552 | for (i = 0; i <= GARP_APPLICATION_MAX; i++) { |
553 | if (port->applicants[i]) | 553 | if (rtnl_dereference(port->applicants[i])) |
554 | return; | 554 | return; |
555 | } | 555 | } |
556 | rcu_assign_pointer(dev->garp_port, NULL); | 556 | rcu_assign_pointer(dev->garp_port, NULL); |
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) | |||
565 | 565 | ||
566 | ASSERT_RTNL(); | 566 | ASSERT_RTNL(); |
567 | 567 | ||
568 | if (!dev->garp_port) { | 568 | if (!rtnl_dereference(dev->garp_port)) { |
569 | err = garp_init_port(dev); | 569 | err = garp_init_port(dev); |
570 | if (err < 0) | 570 | if (err < 0) |
571 | goto err1; | 571 | goto err1; |
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant); | |||
601 | 601 | ||
602 | void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) | 602 | void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) |
603 | { | 603 | { |
604 | struct garp_port *port = dev->garp_port; | 604 | struct garp_port *port = rtnl_dereference(dev->garp_port); |
605 | struct garp_applicant *app = port->applicants[appl->type]; | 605 | struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]); |
606 | 606 | ||
607 | ASSERT_RTNL(); | 607 | ASSERT_RTNL(); |
608 | 608 | ||
diff --git a/net/802/stp.c b/net/802/stp.c index 53c8f77f0ccd..978c30b1b36b 100644 --- a/net/802/stp.c +++ b/net/802/stp.c | |||
@@ -21,8 +21,8 @@ | |||
21 | #define GARP_ADDR_MAX 0x2F | 21 | #define GARP_ADDR_MAX 0x2F |
22 | #define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) | 22 | #define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) |
23 | 23 | ||
24 | static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; | 24 | static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; |
25 | static const struct stp_proto *stp_proto __read_mostly; | 25 | static const struct stp_proto __rcu *stp_proto __read_mostly; |
26 | 26 | ||
27 | static struct llc_sap *sap __read_mostly; | 27 | static struct llc_sap *sap __read_mostly; |
28 | static unsigned int sap_registered; | 28 | static unsigned int sap_registered; |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 05b867e43757..52077ca22072 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
112 | 112 | ||
113 | ASSERT_RTNL(); | 113 | ASSERT_RTNL(); |
114 | 114 | ||
115 | grp = real_dev->vlgrp; | 115 | grp = rtnl_dereference(real_dev->vlgrp); |
116 | BUG_ON(!grp); | 116 | BUG_ON(!grp); |
117 | 117 | ||
118 | /* Take it out of our own structures, but be sure to interlock with | 118 | /* Take it out of our own structures, but be sure to interlock with |
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev) | |||
177 | struct vlan_group *grp, *ngrp = NULL; | 177 | struct vlan_group *grp, *ngrp = NULL; |
178 | int err; | 178 | int err; |
179 | 179 | ||
180 | grp = real_dev->vlgrp; | 180 | grp = rtnl_dereference(real_dev->vlgrp); |
181 | if (!grp) { | 181 | if (!grp) { |
182 | ngrp = grp = vlan_group_alloc(real_dev); | 182 | ngrp = grp = vlan_group_alloc(real_dev); |
183 | if (!grp) | 183 | if (!grp) |
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
385 | dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); | 385 | dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); |
386 | } | 386 | } |
387 | 387 | ||
388 | grp = dev->vlgrp; | 388 | grp = rtnl_dereference(dev->vlgrp); |
389 | if (!grp) | 389 | if (!grp) |
390 | goto out; | 390 | goto out; |
391 | 391 | ||
diff --git a/net/9p/client.c b/net/9p/client.c index 83bf0541d66f..a848bca9fbff 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -450,32 +450,43 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) | |||
450 | return err; | 450 | return err; |
451 | } | 451 | } |
452 | 452 | ||
453 | if (type == P9_RERROR) { | 453 | if (type == P9_RERROR || type == P9_RLERROR) { |
454 | int ecode; | 454 | int ecode; |
455 | char *ename; | ||
456 | 455 | ||
457 | err = p9pdu_readf(req->rc, c->proto_version, "s?d", | 456 | if (!p9_is_proto_dotl(c)) { |
458 | &ename, &ecode); | 457 | char *ename; |
459 | if (err) { | ||
460 | P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", | ||
461 | err); | ||
462 | return err; | ||
463 | } | ||
464 | 458 | ||
465 | if (p9_is_proto_dotu(c) || | 459 | err = p9pdu_readf(req->rc, c->proto_version, "s?d", |
466 | p9_is_proto_dotl(c)) | 460 | &ename, &ecode); |
467 | err = -ecode; | 461 | if (err) |
462 | goto out_err; | ||
463 | |||
464 | if (p9_is_proto_dotu(c)) | ||
465 | err = -ecode; | ||
466 | |||
467 | if (!err || !IS_ERR_VALUE(err)) { | ||
468 | err = p9_errstr2errno(ename, strlen(ename)); | ||
469 | |||
470 | P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename); | ||
468 | 471 | ||
469 | if (!err || !IS_ERR_VALUE(err)) | 472 | kfree(ename); |
470 | err = p9_errstr2errno(ename, strlen(ename)); | 473 | } |
474 | } else { | ||
475 | err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); | ||
476 | err = -ecode; | ||
471 | 477 | ||
472 | P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename); | 478 | P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); |
479 | } | ||
473 | 480 | ||
474 | kfree(ename); | ||
475 | } else | 481 | } else |
476 | err = 0; | 482 | err = 0; |
477 | 483 | ||
478 | return err; | 484 | return err; |
485 | |||
486 | out_err: | ||
487 | P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); | ||
488 | |||
489 | return err; | ||
479 | } | 490 | } |
480 | 491 | ||
481 | /** | 492 | /** |
@@ -568,11 +579,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
568 | va_start(ap, fmt); | 579 | va_start(ap, fmt); |
569 | err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); | 580 | err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); |
570 | va_end(ap); | 581 | va_end(ap); |
582 | if (err) | ||
583 | goto reterr; | ||
571 | p9pdu_finalize(req->tc); | 584 | p9pdu_finalize(req->tc); |
572 | 585 | ||
573 | err = c->trans_mod->request(c, req); | 586 | err = c->trans_mod->request(c, req); |
574 | if (err < 0) { | 587 | if (err < 0) { |
575 | c->status = Disconnected; | 588 | if (err != -ERESTARTSYS) |
589 | c->status = Disconnected; | ||
576 | goto reterr; | 590 | goto reterr; |
577 | } | 591 | } |
578 | 592 | ||
@@ -1151,12 +1165,44 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) | |||
1151 | } | 1165 | } |
1152 | EXPORT_SYMBOL(p9_client_link); | 1166 | EXPORT_SYMBOL(p9_client_link); |
1153 | 1167 | ||
1168 | int p9_client_fsync(struct p9_fid *fid, int datasync) | ||
1169 | { | ||
1170 | int err; | ||
1171 | struct p9_client *clnt; | ||
1172 | struct p9_req_t *req; | ||
1173 | |||
1174 | P9_DPRINTK(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n", | ||
1175 | fid->fid, datasync); | ||
1176 | err = 0; | ||
1177 | clnt = fid->clnt; | ||
1178 | |||
1179 | req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync); | ||
1180 | if (IS_ERR(req)) { | ||
1181 | err = PTR_ERR(req); | ||
1182 | goto error; | ||
1183 | } | ||
1184 | |||
1185 | P9_DPRINTK(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid); | ||
1186 | |||
1187 | p9_free_req(clnt, req); | ||
1188 | |||
1189 | error: | ||
1190 | return err; | ||
1191 | } | ||
1192 | EXPORT_SYMBOL(p9_client_fsync); | ||
1193 | |||
1154 | int p9_client_clunk(struct p9_fid *fid) | 1194 | int p9_client_clunk(struct p9_fid *fid) |
1155 | { | 1195 | { |
1156 | int err; | 1196 | int err; |
1157 | struct p9_client *clnt; | 1197 | struct p9_client *clnt; |
1158 | struct p9_req_t *req; | 1198 | struct p9_req_t *req; |
1159 | 1199 | ||
1200 | if (!fid) { | ||
1201 | P9_EPRINTK(KERN_WARNING, "Trying to clunk with NULL fid\n"); | ||
1202 | dump_stack(); | ||
1203 | return 0; | ||
1204 | } | ||
1205 | |||
1160 | P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid); | 1206 | P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid); |
1161 | err = 0; | 1207 | err = 0; |
1162 | clnt = fid->clnt; | 1208 | clnt = fid->clnt; |
@@ -1240,16 +1286,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, | |||
1240 | 1286 | ||
1241 | if (data) { | 1287 | if (data) { |
1242 | memmove(data, dataptr, count); | 1288 | memmove(data, dataptr, count); |
1243 | } | 1289 | } else { |
1244 | |||
1245 | if (udata) { | ||
1246 | err = copy_to_user(udata, dataptr, count); | 1290 | err = copy_to_user(udata, dataptr, count); |
1247 | if (err) { | 1291 | if (err) { |
1248 | err = -EFAULT; | 1292 | err = -EFAULT; |
1249 | goto free_and_error; | 1293 | goto free_and_error; |
1250 | } | 1294 | } |
1251 | } | 1295 | } |
1252 | |||
1253 | p9_free_req(clnt, req); | 1296 | p9_free_req(clnt, req); |
1254 | return count; | 1297 | return count; |
1255 | 1298 | ||
@@ -1761,3 +1804,96 @@ error: | |||
1761 | 1804 | ||
1762 | } | 1805 | } |
1763 | EXPORT_SYMBOL(p9_client_mkdir_dotl); | 1806 | EXPORT_SYMBOL(p9_client_mkdir_dotl); |
1807 | |||
1808 | int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status) | ||
1809 | { | ||
1810 | int err; | ||
1811 | struct p9_client *clnt; | ||
1812 | struct p9_req_t *req; | ||
1813 | |||
1814 | err = 0; | ||
1815 | clnt = fid->clnt; | ||
1816 | P9_DPRINTK(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d " | ||
1817 | "start %lld length %lld proc_id %d client_id %s\n", | ||
1818 | fid->fid, flock->type, flock->flags, flock->start, | ||
1819 | flock->length, flock->proc_id, flock->client_id); | ||
1820 | |||
1821 | req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type, | ||
1822 | flock->flags, flock->start, flock->length, | ||
1823 | flock->proc_id, flock->client_id); | ||
1824 | |||
1825 | if (IS_ERR(req)) | ||
1826 | return PTR_ERR(req); | ||
1827 | |||
1828 | err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); | ||
1829 | if (err) { | ||
1830 | p9pdu_dump(1, req->rc); | ||
1831 | goto error; | ||
1832 | } | ||
1833 | P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); | ||
1834 | error: | ||
1835 | p9_free_req(clnt, req); | ||
1836 | return err; | ||
1837 | |||
1838 | } | ||
1839 | EXPORT_SYMBOL(p9_client_lock_dotl); | ||
1840 | |||
1841 | int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock) | ||
1842 | { | ||
1843 | int err; | ||
1844 | struct p9_client *clnt; | ||
1845 | struct p9_req_t *req; | ||
1846 | |||
1847 | err = 0; | ||
1848 | clnt = fid->clnt; | ||
1849 | P9_DPRINTK(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld " | ||
1850 | "length %lld proc_id %d client_id %s\n", fid->fid, glock->type, | ||
1851 | glock->start, glock->length, glock->proc_id, glock->client_id); | ||
1852 | |||
1853 | req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type, | ||
1854 | glock->start, glock->length, glock->proc_id, glock->client_id); | ||
1855 | |||
1856 | if (IS_ERR(req)) | ||
1857 | return PTR_ERR(req); | ||
1858 | |||
1859 | err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type, | ||
1860 | &glock->start, &glock->length, &glock->proc_id, | ||
1861 | &glock->client_id); | ||
1862 | if (err) { | ||
1863 | p9pdu_dump(1, req->rc); | ||
1864 | goto error; | ||
1865 | } | ||
1866 | P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " | ||
1867 | "proc_id %d client_id %s\n", glock->type, glock->start, | ||
1868 | glock->length, glock->proc_id, glock->client_id); | ||
1869 | error: | ||
1870 | p9_free_req(clnt, req); | ||
1871 | return err; | ||
1872 | } | ||
1873 | EXPORT_SYMBOL(p9_client_getlock_dotl); | ||
1874 | |||
1875 | int p9_client_readlink(struct p9_fid *fid, char **target) | ||
1876 | { | ||
1877 | int err; | ||
1878 | struct p9_client *clnt; | ||
1879 | struct p9_req_t *req; | ||
1880 | |||
1881 | err = 0; | ||
1882 | clnt = fid->clnt; | ||
1883 | P9_DPRINTK(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid); | ||
1884 | |||
1885 | req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid); | ||
1886 | if (IS_ERR(req)) | ||
1887 | return PTR_ERR(req); | ||
1888 | |||
1889 | err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); | ||
1890 | if (err) { | ||
1891 | p9pdu_dump(1, req->rc); | ||
1892 | goto error; | ||
1893 | } | ||
1894 | P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); | ||
1895 | error: | ||
1896 | p9_free_req(clnt, req); | ||
1897 | return err; | ||
1898 | } | ||
1899 | EXPORT_SYMBOL(p9_client_readlink); | ||
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 3acd3afb20c8..45c15f491401 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -122,9 +122,8 @@ static size_t | |||
122 | pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) | 122 | pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) |
123 | { | 123 | { |
124 | size_t len = MIN(pdu->capacity - pdu->size, size); | 124 | size_t len = MIN(pdu->capacity - pdu->size, size); |
125 | int err = copy_from_user(&pdu->sdata[pdu->size], udata, len); | 125 | if (copy_from_user(&pdu->sdata[pdu->size], udata, len)) |
126 | if (err) | 126 | len = 0; |
127 | printk(KERN_WARNING "pdu_write_u returning: %d\n", err); | ||
128 | 127 | ||
129 | pdu->size += len; | 128 | pdu->size += len; |
130 | return size - len; | 129 | return size - len; |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index b88515936e4b..c8f3f72ab20e 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -75,6 +75,8 @@ struct virtio_chan { | |||
75 | struct p9_client *client; | 75 | struct p9_client *client; |
76 | struct virtio_device *vdev; | 76 | struct virtio_device *vdev; |
77 | struct virtqueue *vq; | 77 | struct virtqueue *vq; |
78 | int ring_bufs_avail; | ||
79 | wait_queue_head_t *vc_wq; | ||
78 | 80 | ||
79 | /* Scatterlist: can be too big for stack. */ | 81 | /* Scatterlist: can be too big for stack. */ |
80 | struct scatterlist sg[VIRTQUEUE_NUM]; | 82 | struct scatterlist sg[VIRTQUEUE_NUM]; |
@@ -134,16 +136,30 @@ static void req_done(struct virtqueue *vq) | |||
134 | struct p9_fcall *rc; | 136 | struct p9_fcall *rc; |
135 | unsigned int len; | 137 | unsigned int len; |
136 | struct p9_req_t *req; | 138 | struct p9_req_t *req; |
139 | unsigned long flags; | ||
137 | 140 | ||
138 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); | 141 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); |
139 | 142 | ||
140 | while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { | 143 | do { |
141 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | 144 | spin_lock_irqsave(&chan->lock, flags); |
142 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 145 | rc = virtqueue_get_buf(chan->vq, &len); |
143 | req = p9_tag_lookup(chan->client, rc->tag); | 146 | |
144 | req->status = REQ_STATUS_RCVD; | 147 | if (rc != NULL) { |
145 | p9_client_cb(chan->client, req); | 148 | if (!chan->ring_bufs_avail) { |
146 | } | 149 | chan->ring_bufs_avail = 1; |
150 | wake_up(chan->vc_wq); | ||
151 | } | ||
152 | spin_unlock_irqrestore(&chan->lock, flags); | ||
153 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | ||
154 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", | ||
155 | rc->tag); | ||
156 | req = p9_tag_lookup(chan->client, rc->tag); | ||
157 | req->status = REQ_STATUS_RCVD; | ||
158 | p9_client_cb(chan->client, req); | ||
159 | } else { | ||
160 | spin_unlock_irqrestore(&chan->lock, flags); | ||
161 | } | ||
162 | } while (rc != NULL); | ||
147 | } | 163 | } |
148 | 164 | ||
149 | /** | 165 | /** |
@@ -199,23 +215,43 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) | |||
199 | int in, out; | 215 | int in, out; |
200 | struct virtio_chan *chan = client->trans; | 216 | struct virtio_chan *chan = client->trans; |
201 | char *rdata = (char *)req->rc+sizeof(struct p9_fcall); | 217 | char *rdata = (char *)req->rc+sizeof(struct p9_fcall); |
218 | unsigned long flags; | ||
219 | int err; | ||
202 | 220 | ||
203 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); | 221 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); |
204 | 222 | ||
223 | req_retry: | ||
224 | req->status = REQ_STATUS_SENT; | ||
225 | |||
226 | spin_lock_irqsave(&chan->lock, flags); | ||
205 | out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, | 227 | out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, |
206 | req->tc->size); | 228 | req->tc->size); |
207 | in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, | 229 | in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, |
208 | client->msize); | 230 | client->msize); |
209 | 231 | ||
210 | req->status = REQ_STATUS_SENT; | 232 | err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); |
211 | 233 | if (err < 0) { | |
212 | if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { | 234 | if (err == -ENOSPC) { |
213 | P9_DPRINTK(P9_DEBUG_TRANS, | 235 | chan->ring_bufs_avail = 0; |
214 | "9p debug: virtio rpc add_buf returned failure"); | 236 | spin_unlock_irqrestore(&chan->lock, flags); |
215 | return -EIO; | 237 | err = wait_event_interruptible(*chan->vc_wq, |
238 | chan->ring_bufs_avail); | ||
239 | if (err == -ERESTARTSYS) | ||
240 | return err; | ||
241 | |||
242 | P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n"); | ||
243 | goto req_retry; | ||
244 | } else { | ||
245 | spin_unlock_irqrestore(&chan->lock, flags); | ||
246 | P9_DPRINTK(P9_DEBUG_TRANS, | ||
247 | "9p debug: " | ||
248 | "virtio rpc add_buf returned failure"); | ||
249 | return -EIO; | ||
250 | } | ||
216 | } | 251 | } |
217 | 252 | ||
218 | virtqueue_kick(chan->vq); | 253 | virtqueue_kick(chan->vq); |
254 | spin_unlock_irqrestore(&chan->lock, flags); | ||
219 | 255 | ||
220 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); | 256 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); |
221 | return 0; | 257 | return 0; |
@@ -290,14 +326,23 @@ static int p9_virtio_probe(struct virtio_device *vdev) | |||
290 | chan->tag_len = tag_len; | 326 | chan->tag_len = tag_len; |
291 | err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | 327 | err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); |
292 | if (err) { | 328 | if (err) { |
293 | kfree(tag); | 329 | goto out_free_tag; |
294 | goto out_free_vq; | ||
295 | } | 330 | } |
331 | chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); | ||
332 | if (!chan->vc_wq) { | ||
333 | err = -ENOMEM; | ||
334 | goto out_free_tag; | ||
335 | } | ||
336 | init_waitqueue_head(chan->vc_wq); | ||
337 | chan->ring_bufs_avail = 1; | ||
338 | |||
296 | mutex_lock(&virtio_9p_lock); | 339 | mutex_lock(&virtio_9p_lock); |
297 | list_add_tail(&chan->chan_list, &virtio_chan_list); | 340 | list_add_tail(&chan->chan_list, &virtio_chan_list); |
298 | mutex_unlock(&virtio_9p_lock); | 341 | mutex_unlock(&virtio_9p_lock); |
299 | return 0; | 342 | return 0; |
300 | 343 | ||
344 | out_free_tag: | ||
345 | kfree(tag); | ||
301 | out_free_vq: | 346 | out_free_vq: |
302 | vdev->config->del_vqs(vdev); | 347 | vdev->config->del_vqs(vdev); |
303 | kfree(chan); | 348 | kfree(chan); |
@@ -371,6 +416,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) | |||
371 | mutex_unlock(&virtio_9p_lock); | 416 | mutex_unlock(&virtio_9p_lock); |
372 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | 417 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); |
373 | kfree(chan->tag); | 418 | kfree(chan->tag); |
419 | kfree(chan->vc_wq); | ||
374 | kfree(chan); | 420 | kfree(chan); |
375 | 421 | ||
376 | } | 422 | } |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 26eaebf4aaa9..bb86d2932394 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
1393 | int err = 0; | 1393 | int err = 0; |
1394 | 1394 | ||
1395 | memset(fsa, 0, sizeof(fsa)); | ||
1395 | lock_sock(sk); | 1396 | lock_sock(sk); |
1396 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
1397 | 1398 | ||
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1403 | 1404 | ||
1404 | fsa->fsa_ax25.sax25_family = AF_AX25; | 1405 | fsa->fsa_ax25.sax25_family = AF_AX25; |
1405 | fsa->fsa_ax25.sax25_call = ax25->dest_addr; | 1406 | fsa->fsa_ax25.sax25_call = ax25->dest_addr; |
1406 | fsa->fsa_ax25.sax25_ndigis = 0; | ||
1407 | 1407 | ||
1408 | if (ax25->digipeat != NULL) { | 1408 | if (ax25->digipeat != NULL) { |
1409 | ndigi = ax25->digipeat->ndigi; | 1409 | ndigi = ax25->digipeat->ndigi; |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index bfef5bae0b3a..84093b0000b9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff | |||
1175 | hci_send_cmd(hdev, | 1175 | hci_send_cmd(hdev, |
1176 | HCI_OP_READ_REMOTE_EXT_FEATURES, | 1176 | HCI_OP_READ_REMOTE_EXT_FEATURES, |
1177 | sizeof(cp), &cp); | 1177 | sizeof(cp), &cp); |
1178 | } else if (!ev->status && conn->out && | ||
1179 | conn->sec_level == BT_SECURITY_HIGH) { | ||
1180 | struct hci_cp_auth_requested cp; | ||
1181 | cp.handle = ev->handle; | ||
1182 | hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, | ||
1183 | sizeof(cp), &cp); | ||
1178 | } else { | 1184 | } else { |
1179 | conn->state = BT_CONNECTED; | 1185 | conn->state = BT_CONNECTED; |
1180 | hci_proto_connect_cfm(conn, ev->status); | 1186 | hci_proto_connect_cfm(conn, ev->status); |
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 98fdfa1fbddd..86a91543172a 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_HIDP | 1 | config BT_HIDP |
2 | tristate "HIDP protocol support" | 2 | tristate "HIDP protocol support" |
3 | depends on BT && BT_L2CAP && INPUT | 3 | depends on BT && BT_L2CAP && INPUT && HID_SUPPORT |
4 | select HID | 4 | select HID |
5 | help | 5 | help |
6 | HIDP (Human Interface Device Protocol) is a transport layer | 6 | HIDP (Human Interface Device Protocol) is a transport layer |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index daa7a988d9a6..cd8f6ea03841 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned | |||
2421 | break; | 2421 | break; |
2422 | 2422 | ||
2423 | case 2: | 2423 | case 2: |
2424 | *val = __le16_to_cpu(*((__le16 *) opt->val)); | 2424 | *val = get_unaligned_le16(opt->val); |
2425 | break; | 2425 | break; |
2426 | 2426 | ||
2427 | case 4: | 2427 | case 4: |
2428 | *val = __le32_to_cpu(*((__le32 *) opt->val)); | 2428 | *val = get_unaligned_le32(opt->val); |
2429 | break; | 2429 | break; |
2430 | 2430 | ||
2431 | default: | 2431 | default: |
@@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) | |||
2452 | break; | 2452 | break; |
2453 | 2453 | ||
2454 | case 2: | 2454 | case 2: |
2455 | *((__le16 *) opt->val) = cpu_to_le16(val); | 2455 | put_unaligned_le16(val, opt->val); |
2456 | break; | 2456 | break; |
2457 | 2457 | ||
2458 | case 4: | 2458 | case 4: |
2459 | *((__le32 *) opt->val) = cpu_to_le32(val); | 2459 | put_unaligned_le32(val, opt->val); |
2460 | break; | 2460 | break; |
2461 | 2461 | ||
2462 | default: | 2462 | default: |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 39a5d87e33b4..fa642aa652bd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); | |||
79 | 79 | ||
80 | static void rfcomm_process_connect(struct rfcomm_session *s); | 80 | static void rfcomm_process_connect(struct rfcomm_session *s); |
81 | 81 | ||
82 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); | 82 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, |
83 | bdaddr_t *dst, | ||
84 | u8 sec_level, | ||
85 | int *err); | ||
83 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); | 86 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); |
84 | static void rfcomm_session_del(struct rfcomm_session *s); | 87 | static void rfcomm_session_del(struct rfcomm_session *s); |
85 | 88 | ||
@@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, | |||
401 | 404 | ||
402 | s = rfcomm_session_get(src, dst); | 405 | s = rfcomm_session_get(src, dst); |
403 | if (!s) { | 406 | if (!s) { |
404 | s = rfcomm_session_create(src, dst, &err); | 407 | s = rfcomm_session_create(src, dst, d->sec_level, &err); |
405 | if (!s) | 408 | if (!s) |
406 | return err; | 409 | return err; |
407 | } | 410 | } |
@@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) | |||
679 | rfcomm_session_put(s); | 682 | rfcomm_session_put(s); |
680 | } | 683 | } |
681 | 684 | ||
682 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) | 685 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, |
686 | bdaddr_t *dst, | ||
687 | u8 sec_level, | ||
688 | int *err) | ||
683 | { | 689 | { |
684 | struct rfcomm_session *s = NULL; | 690 | struct rfcomm_session *s = NULL; |
685 | struct sockaddr_l2 addr; | 691 | struct sockaddr_l2 addr; |
@@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst | |||
704 | sk = sock->sk; | 710 | sk = sock->sk; |
705 | lock_sock(sk); | 711 | lock_sock(sk); |
706 | l2cap_pi(sk)->imtu = l2cap_mtu; | 712 | l2cap_pi(sk)->imtu = l2cap_mtu; |
713 | l2cap_pi(sk)->sec_level = sec_level; | ||
707 | if (l2cap_ertm) | 714 | if (l2cap_ertm) |
708 | l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; | 715 | l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; |
709 | release_sock(sk); | 716 | release_sock(sk); |
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c index 76ae68303d3a..d522d8c1703e 100644 --- a/net/caif/caif_config_util.c +++ b/net/caif/caif_config_util.c | |||
@@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg, | |||
16 | { | 16 | { |
17 | struct dev_info *dev_info; | 17 | struct dev_info *dev_info; |
18 | enum cfcnfg_phy_preference pref; | 18 | enum cfcnfg_phy_preference pref; |
19 | int res; | ||
20 | |||
19 | memset(l, 0, sizeof(*l)); | 21 | memset(l, 0, sizeof(*l)); |
20 | l->priority = s->priority; | 22 | /* In caif protocol low value is high priority */ |
23 | l->priority = CAIF_PRIO_MAX - s->priority + 1; | ||
21 | 24 | ||
22 | if (s->link_name[0] != '\0') | 25 | if (s->ifindex != 0){ |
23 | l->phyid = cfcnfg_get_named(cnfg, s->link_name); | 26 | res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); |
27 | if (res < 0) | ||
28 | return res; | ||
29 | l->phyid = res; | ||
30 | } | ||
24 | else { | 31 | else { |
25 | switch (s->link_selector) { | 32 | switch (s->link_selector) { |
26 | case CAIF_LINK_HIGH_BANDW: | 33 | case CAIF_LINK_HIGH_BANDW: |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index b99369a055d1..a42a408306e4 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, | |||
307 | 307 | ||
308 | case NETDEV_UNREGISTER: | 308 | case NETDEV_UNREGISTER: |
309 | caifd = caif_get(dev); | 309 | caifd = caif_get(dev); |
310 | if (caifd == NULL) | ||
311 | break; | ||
310 | netdev_info(dev, "unregister\n"); | 312 | netdev_info(dev, "unregister\n"); |
311 | atomic_set(&caifd->state, what); | 313 | atomic_set(&caifd->state, what); |
312 | caif_device_destroy(dev); | 314 | caif_device_destroy(dev); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 2eca2dd0000f..1bf0cf503796 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock, | |||
716 | { | 716 | { |
717 | struct sock *sk = sock->sk; | 717 | struct sock *sk = sock->sk; |
718 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 718 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
719 | int prio, linksel; | 719 | int linksel; |
720 | struct ifreq ifreq; | ||
721 | 720 | ||
722 | if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) | 721 | if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) |
723 | return -ENOPROTOOPT; | 722 | return -ENOPROTOOPT; |
@@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock, | |||
735 | release_sock(&cf_sk->sk); | 734 | release_sock(&cf_sk->sk); |
736 | return 0; | 735 | return 0; |
737 | 736 | ||
738 | case SO_PRIORITY: | ||
739 | if (lvl != SOL_SOCKET) | ||
740 | goto bad_sol; | ||
741 | if (ol < sizeof(int)) | ||
742 | return -EINVAL; | ||
743 | if (copy_from_user(&prio, ov, sizeof(int))) | ||
744 | return -EINVAL; | ||
745 | lock_sock(&(cf_sk->sk)); | ||
746 | cf_sk->conn_req.priority = prio; | ||
747 | release_sock(&cf_sk->sk); | ||
748 | return 0; | ||
749 | |||
750 | case SO_BINDTODEVICE: | ||
751 | if (lvl != SOL_SOCKET) | ||
752 | goto bad_sol; | ||
753 | if (ol < sizeof(struct ifreq)) | ||
754 | return -EINVAL; | ||
755 | if (copy_from_user(&ifreq, ov, sizeof(ifreq))) | ||
756 | return -EFAULT; | ||
757 | lock_sock(&(cf_sk->sk)); | ||
758 | strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, | ||
759 | sizeof(cf_sk->conn_req.link_name)); | ||
760 | cf_sk->conn_req.link_name | ||
761 | [sizeof(cf_sk->conn_req.link_name)-1] = 0; | ||
762 | release_sock(&cf_sk->sk); | ||
763 | return 0; | ||
764 | |||
765 | case CAIFSO_REQ_PARAM: | 737 | case CAIFSO_REQ_PARAM: |
766 | if (lvl != SOL_CAIF) | 738 | if (lvl != SOL_CAIF) |
767 | goto bad_sol; | 739 | goto bad_sol; |
@@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
880 | sock->state = SS_CONNECTING; | 852 | sock->state = SS_CONNECTING; |
881 | sk->sk_state = CAIF_CONNECTING; | 853 | sk->sk_state = CAIF_CONNECTING; |
882 | 854 | ||
855 | /* Check priority value comming from socket */ | ||
856 | /* if priority value is out of range it will be ajusted */ | ||
857 | if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) | ||
858 | cf_sk->conn_req.priority = CAIF_PRIO_MAX; | ||
859 | else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) | ||
860 | cf_sk->conn_req.priority = CAIF_PRIO_MIN; | ||
861 | else | ||
862 | cf_sk->conn_req.priority = cf_sk->sk.sk_priority; | ||
863 | |||
864 | /*ifindex = id of the interface.*/ | ||
865 | cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; | ||
866 | |||
883 | dbfs_atomic_inc(&cnt.num_connect_req); | 867 | dbfs_atomic_inc(&cnt.num_connect_req); |
884 | cf_sk->layer.receive = caif_sktrecv_cb; | 868 | cf_sk->layer.receive = caif_sktrecv_cb; |
885 | err = caif_connect_client(&cf_sk->conn_req, | 869 | err = caif_connect_client(&cf_sk->conn_req, |
@@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
905 | cf_sk->maxframe = mtu - (headroom + tailroom); | 889 | cf_sk->maxframe = mtu - (headroom + tailroom); |
906 | if (cf_sk->maxframe < 1) { | 890 | if (cf_sk->maxframe < 1) { |
907 | pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); | 891 | pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); |
892 | err = -ENODEV; | ||
908 | goto out; | 893 | goto out; |
909 | } | 894 | } |
910 | 895 | ||
@@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, | |||
1142 | set_rx_flow_on(cf_sk); | 1127 | set_rx_flow_on(cf_sk); |
1143 | 1128 | ||
1144 | /* Set default options on configuration */ | 1129 | /* Set default options on configuration */ |
1145 | cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; | 1130 | cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; |
1146 | cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; | 1131 | cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; |
1147 | cf_sk->conn_req.protocol = protocol; | 1132 | cf_sk->conn_req.protocol = protocol; |
1148 | /* Increase the number of sockets created. */ | 1133 | /* Increase the number of sockets created. */ |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 41adafd18914..21ede141018a 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, | |||
173 | return NULL; | 173 | return NULL; |
174 | } | 174 | } |
175 | 175 | ||
176 | int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) | 176 | |
177 | int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) | ||
177 | { | 178 | { |
178 | int i; | 179 | int i; |
179 | 180 | for (i = 0; i < MAX_PHY_LAYERS; i++) | |
180 | /* Try to match with specified name */ | 181 | if (cnfg->phy_layers[i].frm_layer != NULL && |
181 | for (i = 0; i < MAX_PHY_LAYERS; i++) { | 182 | cnfg->phy_layers[i].ifindex == ifi) |
182 | if (cnfg->phy_layers[i].frm_layer != NULL | 183 | return i; |
183 | && strcmp(cnfg->phy_layers[i].phy_layer->name, | 184 | return -ENODEV; |
184 | name) == 0) | ||
185 | return cnfg->phy_layers[i].frm_layer->id; | ||
186 | } | ||
187 | return 0; | ||
188 | } | 185 | } |
189 | 186 | ||
190 | int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | 187 | int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) |
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 08f267a109aa..3cd8f978e309 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) | |||
361 | struct cfctrl_request_info *p, *tmp; | 361 | struct cfctrl_request_info *p, *tmp; |
362 | struct cfctrl *ctrl = container_obj(layr); | 362 | struct cfctrl *ctrl = container_obj(layr); |
363 | spin_lock(&ctrl->info_list_lock); | 363 | spin_lock(&ctrl->info_list_lock); |
364 | pr_warn("enter\n"); | ||
365 | 364 | ||
366 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { | 365 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
367 | if (p->client_layer == adap_layer) { | 366 | if (p->client_layer == adap_layer) { |
368 | pr_warn("cancel req :%d\n", p->sequence_no); | 367 | pr_debug("cancel req :%d\n", p->sequence_no); |
369 | list_del(&p->list); | 368 | list_del(&p->list); |
370 | kfree(p); | 369 | kfree(p); |
371 | } | 370 | } |
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c index 496fda9ac66f..11a2af4c162a 100644 --- a/net/caif/cfdbgl.c +++ b/net/caif/cfdbgl.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <net/caif/cfsrvl.h> | 12 | #include <net/caif/cfsrvl.h> |
13 | #include <net/caif/cfpkt.h> | 13 | #include <net/caif/cfpkt.h> |
14 | 14 | ||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | |||
15 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); | 17 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); |
16 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); | 18 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); |
17 | 19 | ||
@@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) | |||
38 | 40 | ||
39 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) | 41 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) |
40 | { | 42 | { |
43 | struct cfsrvl *service = container_obj(layr); | ||
44 | struct caif_payload_info *info; | ||
45 | int ret; | ||
46 | |||
47 | if (!cfsrvl_ready(service, &ret)) | ||
48 | return ret; | ||
49 | |||
50 | /* Add info for MUX-layer to route the packet out */ | ||
51 | info = cfpkt_info(pkt); | ||
52 | info->channel_id = service->layer.id; | ||
53 | info->dev_info = &service->dev_info; | ||
54 | |||
41 | return layr->dn->transmit(layr->dn, pkt); | 55 | return layr->dn->transmit(layr->dn, pkt); |
42 | } | 56 | } |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index bde8481e8d25..e2fb5fa75795 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -193,7 +193,7 @@ out: | |||
193 | 193 | ||
194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) | 194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) |
195 | { | 195 | { |
196 | caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); | 196 | caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); |
197 | 197 | ||
198 | /* Add info for MUX-layer to route the packet out. */ | 198 | /* Add info for MUX-layer to route the packet out. */ |
199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; | 199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 08ffe9e4be20..6faa8256e10c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -125,7 +125,7 @@ struct bcm_sock { | |||
125 | struct list_head tx_ops; | 125 | struct list_head tx_ops; |
126 | unsigned long dropped_usr_msgs; | 126 | unsigned long dropped_usr_msgs; |
127 | struct proc_dir_entry *bcm_proc_read; | 127 | struct proc_dir_entry *bcm_proc_read; |
128 | char procname [9]; /* pointer printed in ASCII with \0 */ | 128 | char procname [20]; /* pointer printed in ASCII with \0 */ |
129 | }; | 129 | }; |
130 | 130 | ||
131 | static inline struct bcm_sock *bcm_sk(const struct sock *sk) | 131 | static inline struct bcm_sock *bcm_sk(const struct sock *sk) |
diff --git a/net/compat.c b/net/compat.c index 63d260e81472..3649d5895361 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov, | |||
41 | compat_size_t len; | 41 | compat_size_t len; |
42 | 42 | ||
43 | if (get_user(len, &uiov32->iov_len) || | 43 | if (get_user(len, &uiov32->iov_len) || |
44 | get_user(buf, &uiov32->iov_base)) { | 44 | get_user(buf, &uiov32->iov_base)) |
45 | tot_len = -EFAULT; | 45 | return -EFAULT; |
46 | break; | 46 | |
47 | } | 47 | if (len > INT_MAX - tot_len) |
48 | len = INT_MAX - tot_len; | ||
49 | |||
48 | tot_len += len; | 50 | tot_len += len; |
49 | kiov->iov_base = compat_ptr(buf); | 51 | kiov->iov_base = compat_ptr(buf); |
50 | kiov->iov_len = (__kernel_size_t) len; | 52 | kiov->iov_len = (__kernel_size_t) len; |
diff --git a/net/core/dev.c b/net/core/dev.c index 78b5a89b0f40..0dd54a69dace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach); | |||
1685 | 1685 | ||
1686 | static bool can_checksum_protocol(unsigned long features, __be16 protocol) | 1686 | static bool can_checksum_protocol(unsigned long features, __be16 protocol) |
1687 | { | 1687 | { |
1688 | return ((features & NETIF_F_GEN_CSUM) || | 1688 | return ((features & NETIF_F_NO_CSUM) || |
1689 | ((features & NETIF_F_IP_CSUM) && | 1689 | ((features & NETIF_F_V4_CSUM) && |
1690 | protocol == htons(ETH_P_IP)) || | 1690 | protocol == htons(ETH_P_IP)) || |
1691 | ((features & NETIF_F_IPV6_CSUM) && | 1691 | ((features & NETIF_F_V6_CSUM) && |
1692 | protocol == htons(ETH_P_IPV6)) || | 1692 | protocol == htons(ETH_P_IPV6)) || |
1693 | ((features & NETIF_F_FCOE_CRC) && | 1693 | ((features & NETIF_F_FCOE_CRC) && |
1694 | protocol == htons(ETH_P_FCOE))); | 1694 | protocol == htons(ETH_P_FCOE))); |
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol) | |||
1696 | 1696 | ||
1697 | static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | 1697 | static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) |
1698 | { | 1698 | { |
1699 | __be16 protocol = skb->protocol; | ||
1699 | int features = dev->features; | 1700 | int features = dev->features; |
1700 | 1701 | ||
1701 | if (vlan_tx_tag_present(skb)) | 1702 | if (vlan_tx_tag_present(skb)) { |
1702 | features &= dev->vlan_features; | 1703 | features &= dev->vlan_features; |
1703 | 1704 | } else if (protocol == htons(ETH_P_8021Q)) { | |
1704 | if (can_checksum_protocol(features, skb->protocol)) | ||
1705 | return true; | ||
1706 | |||
1707 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
1708 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 1705 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
1709 | if (can_checksum_protocol(dev->features & dev->vlan_features, | 1706 | protocol = veh->h_vlan_encapsulated_proto; |
1710 | veh->h_vlan_encapsulated_proto)) | 1707 | features &= dev->vlan_features; |
1711 | return true; | ||
1712 | } | 1708 | } |
1713 | 1709 | ||
1714 | return false; | 1710 | return can_checksum_protocol(features, protocol); |
1715 | } | 1711 | } |
1716 | 1712 | ||
1717 | /** | 1713 | /** |
@@ -2135,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
2135 | } else { | 2131 | } else { |
2136 | struct sock *sk = skb->sk; | 2132 | struct sock *sk = skb->sk; |
2137 | queue_index = sk_tx_queue_get(sk); | 2133 | queue_index = sk_tx_queue_get(sk); |
2138 | if (queue_index < 0) { | 2134 | if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) { |
2139 | 2135 | ||
2140 | queue_index = 0; | 2136 | queue_index = 0; |
2141 | if (dev->real_num_tx_queues > 1) | 2137 | if (dev->real_num_tx_queues > 1) |
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2213 | } | 2209 | } |
2214 | 2210 | ||
2215 | static DEFINE_PER_CPU(int, xmit_recursion); | 2211 | static DEFINE_PER_CPU(int, xmit_recursion); |
2216 | #define RECURSION_LIMIT 3 | 2212 | #define RECURSION_LIMIT 10 |
2217 | 2213 | ||
2218 | /** | 2214 | /** |
2219 | * dev_queue_xmit - transmit a buffer | 2215 | * dev_queue_xmit - transmit a buffer |
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash); | |||
2413 | #ifdef CONFIG_RPS | 2409 | #ifdef CONFIG_RPS |
2414 | 2410 | ||
2415 | /* One global table that all flow-based protocols share. */ | 2411 | /* One global table that all flow-based protocols share. */ |
2416 | struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; | 2412 | struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; |
2417 | EXPORT_SYMBOL(rps_sock_flow_table); | 2413 | EXPORT_SYMBOL(rps_sock_flow_table); |
2418 | 2414 | ||
2419 | /* | 2415 | /* |
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2425 | struct rps_dev_flow **rflowp) | 2421 | struct rps_dev_flow **rflowp) |
2426 | { | 2422 | { |
2427 | struct netdev_rx_queue *rxqueue; | 2423 | struct netdev_rx_queue *rxqueue; |
2428 | struct rps_map *map = NULL; | 2424 | struct rps_map *map; |
2429 | struct rps_dev_flow_table *flow_table; | 2425 | struct rps_dev_flow_table *flow_table; |
2430 | struct rps_sock_flow_table *sock_flow_table; | 2426 | struct rps_sock_flow_table *sock_flow_table; |
2431 | int cpu = -1; | 2427 | int cpu = -1; |
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2444 | } else | 2440 | } else |
2445 | rxqueue = dev->_rx; | 2441 | rxqueue = dev->_rx; |
2446 | 2442 | ||
2447 | if (rxqueue->rps_map) { | 2443 | map = rcu_dereference(rxqueue->rps_map); |
2448 | map = rcu_dereference(rxqueue->rps_map); | 2444 | if (map) { |
2449 | if (map && map->len == 1) { | 2445 | if (map->len == 1) { |
2450 | tcpu = map->cpus[0]; | 2446 | tcpu = map->cpus[0]; |
2451 | if (cpu_online(tcpu)) | 2447 | if (cpu_online(tcpu)) |
2452 | cpu = tcpu; | 2448 | cpu = tcpu; |
2453 | goto done; | 2449 | goto done; |
2454 | } | 2450 | } |
2455 | } else if (!rxqueue->rps_flow_table) { | 2451 | } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { |
2456 | goto done; | 2452 | goto done; |
2457 | } | 2453 | } |
2458 | 2454 | ||
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void) | |||
5416 | /* paranoia */ | 5412 | /* paranoia */ |
5417 | BUG_ON(netdev_refcnt_read(dev)); | 5413 | BUG_ON(netdev_refcnt_read(dev)); |
5418 | WARN_ON(rcu_dereference_raw(dev->ip_ptr)); | 5414 | WARN_ON(rcu_dereference_raw(dev->ip_ptr)); |
5419 | WARN_ON(dev->ip6_ptr); | 5415 | WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); |
5420 | WARN_ON(dev->dn_ptr); | 5416 | WARN_ON(dev->dn_ptr); |
5421 | 5417 | ||
5422 | if (dev->destructor) | 5418 | if (dev->destructor) |
diff --git a/net/core/dst.c b/net/core/dst.c index 8abe628b79f1..b99c7c7ffce2 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, | |||
370 | 370 | ||
371 | static struct notifier_block dst_dev_notifier = { | 371 | static struct notifier_block dst_dev_notifier = { |
372 | .notifier_call = dst_dev_event, | 372 | .notifier_call = dst_dev_event, |
373 | .priority = -10, /* must be called after other network notifiers */ | ||
373 | }; | 374 | }; |
374 | 375 | ||
375 | void __init dst_init(void) | 376 | void __init dst_init(void) |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 1bc3f253ba6c..82a4369ae150 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
351 | 351 | ||
352 | list_for_each_entry(r, &ops->rules_list, list) { | 352 | list_for_each_entry(r, &ops->rules_list, list) { |
353 | if (r->pref == rule->target) { | 353 | if (r->pref == rule->target) { |
354 | rule->ctarget = r; | 354 | RCU_INIT_POINTER(rule->ctarget, r); |
355 | break; | 355 | break; |
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | if (rule->ctarget == NULL) | 359 | if (rcu_dereference_protected(rule->ctarget, 1) == NULL) |
360 | unresolved = 1; | 360 | unresolved = 1; |
361 | } else if (rule->action == FR_ACT_GOTO) | 361 | } else if (rule->action == FR_ACT_GOTO) |
362 | goto errout_free; | 362 | goto errout_free; |
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
373 | 373 | ||
374 | fib_rule_get(rule); | 374 | fib_rule_get(rule); |
375 | 375 | ||
376 | if (last) | ||
377 | list_add_rcu(&rule->list, &last->list); | ||
378 | else | ||
379 | list_add_rcu(&rule->list, &ops->rules_list); | ||
380 | |||
376 | if (ops->unresolved_rules) { | 381 | if (ops->unresolved_rules) { |
377 | /* | 382 | /* |
378 | * There are unresolved goto rules in the list, check if | 383 | * There are unresolved goto rules in the list, check if |
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
381 | list_for_each_entry(r, &ops->rules_list, list) { | 386 | list_for_each_entry(r, &ops->rules_list, list) { |
382 | if (r->action == FR_ACT_GOTO && | 387 | if (r->action == FR_ACT_GOTO && |
383 | r->target == rule->pref) { | 388 | r->target == rule->pref) { |
384 | BUG_ON(r->ctarget != NULL); | 389 | BUG_ON(rtnl_dereference(r->ctarget) != NULL); |
385 | rcu_assign_pointer(r->ctarget, rule); | 390 | rcu_assign_pointer(r->ctarget, rule); |
386 | if (--ops->unresolved_rules == 0) | 391 | if (--ops->unresolved_rules == 0) |
387 | break; | 392 | break; |
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
395 | if (unresolved) | 400 | if (unresolved) |
396 | ops->unresolved_rules++; | 401 | ops->unresolved_rules++; |
397 | 402 | ||
398 | if (last) | ||
399 | list_add_rcu(&rule->list, &last->list); | ||
400 | else | ||
401 | list_add_rcu(&rule->list, &ops->rules_list); | ||
402 | |||
403 | notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); | 403 | notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); |
404 | flush_route_cache(ops); | 404 | flush_route_cache(ops); |
405 | rules_ops_put(ops); | 405 | rules_ops_put(ops); |
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
487 | */ | 487 | */ |
488 | if (ops->nr_goto_rules > 0) { | 488 | if (ops->nr_goto_rules > 0) { |
489 | list_for_each_entry(tmp, &ops->rules_list, list) { | 489 | list_for_each_entry(tmp, &ops->rules_list, list) { |
490 | if (tmp->ctarget == rule) { | 490 | if (rtnl_dereference(tmp->ctarget) == rule) { |
491 | rcu_assign_pointer(tmp->ctarget, NULL); | 491 | rcu_assign_pointer(tmp->ctarget, NULL); |
492 | ops->unresolved_rules++; | 492 | ops->unresolved_rules++; |
493 | } | 493 | } |
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | |||
545 | frh->action = rule->action; | 545 | frh->action = rule->action; |
546 | frh->flags = rule->flags; | 546 | frh->flags = rule->flags; |
547 | 547 | ||
548 | if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) | 548 | if (rule->action == FR_ACT_GOTO && |
549 | rcu_dereference_raw(rule->ctarget) == NULL) | ||
549 | frh->flags |= FIB_RULE_UNRESOLVED; | 550 | frh->flags |= FIB_RULE_UNRESOLVED; |
550 | 551 | ||
551 | if (rule->iifname[0]) { | 552 | if (rule->iifname[0]) { |
diff --git a/net/core/filter.c b/net/core/filter.c index 7adf50352918..23e9b2a6b4c8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) | |||
89 | rcu_read_lock_bh(); | 89 | rcu_read_lock_bh(); |
90 | filter = rcu_dereference_bh(sk->sk_filter); | 90 | filter = rcu_dereference_bh(sk->sk_filter); |
91 | if (filter) { | 91 | if (filter) { |
92 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, | 92 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); |
93 | filter->len); | 93 | |
94 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | 94 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; |
95 | } | 95 | } |
96 | rcu_read_unlock_bh(); | 96 | rcu_read_unlock_bh(); |
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter); | |||
112 | */ | 112 | */ |
113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) | 113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) |
114 | { | 114 | { |
115 | struct sock_filter *fentry; /* We walk down these */ | ||
116 | void *ptr; | 115 | void *ptr; |
117 | u32 A = 0; /* Accumulator */ | 116 | u32 A = 0; /* Accumulator */ |
118 | u32 X = 0; /* Index Register */ | 117 | u32 X = 0; /* Index Register */ |
119 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 118 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
119 | unsigned long memvalid = 0; | ||
120 | u32 tmp; | 120 | u32 tmp; |
121 | int k; | 121 | int k; |
122 | int pc; | 122 | int pc; |
123 | 123 | ||
124 | BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); | ||
124 | /* | 125 | /* |
125 | * Process array of filter instructions. | 126 | * Process array of filter instructions. |
126 | */ | 127 | */ |
127 | for (pc = 0; pc < flen; pc++) { | 128 | for (pc = 0; pc < flen; pc++) { |
128 | fentry = &filter[pc]; | 129 | const struct sock_filter *fentry = &filter[pc]; |
130 | u32 f_k = fentry->k; | ||
129 | 131 | ||
130 | switch (fentry->code) { | 132 | switch (fentry->code) { |
131 | case BPF_S_ALU_ADD_X: | 133 | case BPF_S_ALU_ADD_X: |
132 | A += X; | 134 | A += X; |
133 | continue; | 135 | continue; |
134 | case BPF_S_ALU_ADD_K: | 136 | case BPF_S_ALU_ADD_K: |
135 | A += fentry->k; | 137 | A += f_k; |
136 | continue; | 138 | continue; |
137 | case BPF_S_ALU_SUB_X: | 139 | case BPF_S_ALU_SUB_X: |
138 | A -= X; | 140 | A -= X; |
139 | continue; | 141 | continue; |
140 | case BPF_S_ALU_SUB_K: | 142 | case BPF_S_ALU_SUB_K: |
141 | A -= fentry->k; | 143 | A -= f_k; |
142 | continue; | 144 | continue; |
143 | case BPF_S_ALU_MUL_X: | 145 | case BPF_S_ALU_MUL_X: |
144 | A *= X; | 146 | A *= X; |
145 | continue; | 147 | continue; |
146 | case BPF_S_ALU_MUL_K: | 148 | case BPF_S_ALU_MUL_K: |
147 | A *= fentry->k; | 149 | A *= f_k; |
148 | continue; | 150 | continue; |
149 | case BPF_S_ALU_DIV_X: | 151 | case BPF_S_ALU_DIV_X: |
150 | if (X == 0) | 152 | if (X == 0) |
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
152 | A /= X; | 154 | A /= X; |
153 | continue; | 155 | continue; |
154 | case BPF_S_ALU_DIV_K: | 156 | case BPF_S_ALU_DIV_K: |
155 | A /= fentry->k; | 157 | A /= f_k; |
156 | continue; | 158 | continue; |
157 | case BPF_S_ALU_AND_X: | 159 | case BPF_S_ALU_AND_X: |
158 | A &= X; | 160 | A &= X; |
159 | continue; | 161 | continue; |
160 | case BPF_S_ALU_AND_K: | 162 | case BPF_S_ALU_AND_K: |
161 | A &= fentry->k; | 163 | A &= f_k; |
162 | continue; | 164 | continue; |
163 | case BPF_S_ALU_OR_X: | 165 | case BPF_S_ALU_OR_X: |
164 | A |= X; | 166 | A |= X; |
165 | continue; | 167 | continue; |
166 | case BPF_S_ALU_OR_K: | 168 | case BPF_S_ALU_OR_K: |
167 | A |= fentry->k; | 169 | A |= f_k; |
168 | continue; | 170 | continue; |
169 | case BPF_S_ALU_LSH_X: | 171 | case BPF_S_ALU_LSH_X: |
170 | A <<= X; | 172 | A <<= X; |
171 | continue; | 173 | continue; |
172 | case BPF_S_ALU_LSH_K: | 174 | case BPF_S_ALU_LSH_K: |
173 | A <<= fentry->k; | 175 | A <<= f_k; |
174 | continue; | 176 | continue; |
175 | case BPF_S_ALU_RSH_X: | 177 | case BPF_S_ALU_RSH_X: |
176 | A >>= X; | 178 | A >>= X; |
177 | continue; | 179 | continue; |
178 | case BPF_S_ALU_RSH_K: | 180 | case BPF_S_ALU_RSH_K: |
179 | A >>= fentry->k; | 181 | A >>= f_k; |
180 | continue; | 182 | continue; |
181 | case BPF_S_ALU_NEG: | 183 | case BPF_S_ALU_NEG: |
182 | A = -A; | 184 | A = -A; |
183 | continue; | 185 | continue; |
184 | case BPF_S_JMP_JA: | 186 | case BPF_S_JMP_JA: |
185 | pc += fentry->k; | 187 | pc += f_k; |
186 | continue; | 188 | continue; |
187 | case BPF_S_JMP_JGT_K: | 189 | case BPF_S_JMP_JGT_K: |
188 | pc += (A > fentry->k) ? fentry->jt : fentry->jf; | 190 | pc += (A > f_k) ? fentry->jt : fentry->jf; |
189 | continue; | 191 | continue; |
190 | case BPF_S_JMP_JGE_K: | 192 | case BPF_S_JMP_JGE_K: |
191 | pc += (A >= fentry->k) ? fentry->jt : fentry->jf; | 193 | pc += (A >= f_k) ? fentry->jt : fentry->jf; |
192 | continue; | 194 | continue; |
193 | case BPF_S_JMP_JEQ_K: | 195 | case BPF_S_JMP_JEQ_K: |
194 | pc += (A == fentry->k) ? fentry->jt : fentry->jf; | 196 | pc += (A == f_k) ? fentry->jt : fentry->jf; |
195 | continue; | 197 | continue; |
196 | case BPF_S_JMP_JSET_K: | 198 | case BPF_S_JMP_JSET_K: |
197 | pc += (A & fentry->k) ? fentry->jt : fentry->jf; | 199 | pc += (A & f_k) ? fentry->jt : fentry->jf; |
198 | continue; | 200 | continue; |
199 | case BPF_S_JMP_JGT_X: | 201 | case BPF_S_JMP_JGT_X: |
200 | pc += (A > X) ? fentry->jt : fentry->jf; | 202 | pc += (A > X) ? fentry->jt : fentry->jf; |
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
209 | pc += (A & X) ? fentry->jt : fentry->jf; | 211 | pc += (A & X) ? fentry->jt : fentry->jf; |
210 | continue; | 212 | continue; |
211 | case BPF_S_LD_W_ABS: | 213 | case BPF_S_LD_W_ABS: |
212 | k = fentry->k; | 214 | k = f_k; |
213 | load_w: | 215 | load_w: |
214 | ptr = load_pointer(skb, k, 4, &tmp); | 216 | ptr = load_pointer(skb, k, 4, &tmp); |
215 | if (ptr != NULL) { | 217 | if (ptr != NULL) { |
@@ -218,7 +220,7 @@ load_w: | |||
218 | } | 220 | } |
219 | break; | 221 | break; |
220 | case BPF_S_LD_H_ABS: | 222 | case BPF_S_LD_H_ABS: |
221 | k = fentry->k; | 223 | k = f_k; |
222 | load_h: | 224 | load_h: |
223 | ptr = load_pointer(skb, k, 2, &tmp); | 225 | ptr = load_pointer(skb, k, 2, &tmp); |
224 | if (ptr != NULL) { | 226 | if (ptr != NULL) { |
@@ -227,7 +229,7 @@ load_h: | |||
227 | } | 229 | } |
228 | break; | 230 | break; |
229 | case BPF_S_LD_B_ABS: | 231 | case BPF_S_LD_B_ABS: |
230 | k = fentry->k; | 232 | k = f_k; |
231 | load_b: | 233 | load_b: |
232 | ptr = load_pointer(skb, k, 1, &tmp); | 234 | ptr = load_pointer(skb, k, 1, &tmp); |
233 | if (ptr != NULL) { | 235 | if (ptr != NULL) { |
@@ -242,32 +244,34 @@ load_b: | |||
242 | X = skb->len; | 244 | X = skb->len; |
243 | continue; | 245 | continue; |
244 | case BPF_S_LD_W_IND: | 246 | case BPF_S_LD_W_IND: |
245 | k = X + fentry->k; | 247 | k = X + f_k; |
246 | goto load_w; | 248 | goto load_w; |
247 | case BPF_S_LD_H_IND: | 249 | case BPF_S_LD_H_IND: |
248 | k = X + fentry->k; | 250 | k = X + f_k; |
249 | goto load_h; | 251 | goto load_h; |
250 | case BPF_S_LD_B_IND: | 252 | case BPF_S_LD_B_IND: |
251 | k = X + fentry->k; | 253 | k = X + f_k; |
252 | goto load_b; | 254 | goto load_b; |
253 | case BPF_S_LDX_B_MSH: | 255 | case BPF_S_LDX_B_MSH: |
254 | ptr = load_pointer(skb, fentry->k, 1, &tmp); | 256 | ptr = load_pointer(skb, f_k, 1, &tmp); |
255 | if (ptr != NULL) { | 257 | if (ptr != NULL) { |
256 | X = (*(u8 *)ptr & 0xf) << 2; | 258 | X = (*(u8 *)ptr & 0xf) << 2; |
257 | continue; | 259 | continue; |
258 | } | 260 | } |
259 | return 0; | 261 | return 0; |
260 | case BPF_S_LD_IMM: | 262 | case BPF_S_LD_IMM: |
261 | A = fentry->k; | 263 | A = f_k; |
262 | continue; | 264 | continue; |
263 | case BPF_S_LDX_IMM: | 265 | case BPF_S_LDX_IMM: |
264 | X = fentry->k; | 266 | X = f_k; |
265 | continue; | 267 | continue; |
266 | case BPF_S_LD_MEM: | 268 | case BPF_S_LD_MEM: |
267 | A = mem[fentry->k]; | 269 | A = (memvalid & (1UL << f_k)) ? |
270 | mem[f_k] : 0; | ||
268 | continue; | 271 | continue; |
269 | case BPF_S_LDX_MEM: | 272 | case BPF_S_LDX_MEM: |
270 | X = mem[fentry->k]; | 273 | X = (memvalid & (1UL << f_k)) ? |
274 | mem[f_k] : 0; | ||
271 | continue; | 275 | continue; |
272 | case BPF_S_MISC_TAX: | 276 | case BPF_S_MISC_TAX: |
273 | X = A; | 277 | X = A; |
@@ -276,14 +280,16 @@ load_b: | |||
276 | A = X; | 280 | A = X; |
277 | continue; | 281 | continue; |
278 | case BPF_S_RET_K: | 282 | case BPF_S_RET_K: |
279 | return fentry->k; | 283 | return f_k; |
280 | case BPF_S_RET_A: | 284 | case BPF_S_RET_A: |
281 | return A; | 285 | return A; |
282 | case BPF_S_ST: | 286 | case BPF_S_ST: |
283 | mem[fentry->k] = A; | 287 | memvalid |= 1UL << f_k; |
288 | mem[f_k] = A; | ||
284 | continue; | 289 | continue; |
285 | case BPF_S_STX: | 290 | case BPF_S_STX: |
286 | mem[fentry->k] = X; | 291 | memvalid |= 1UL << f_k; |
292 | mem[f_k] = X; | ||
287 | continue; | 293 | continue; |
288 | default: | 294 | default: |
289 | WARN_ON(1); | 295 | WARN_ON(1); |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 72aceb1fe4fa..c40f27e7d208 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -35,10 +35,9 @@ | |||
35 | * in any case. | 35 | * in any case. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) | 38 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) |
39 | { | 39 | { |
40 | int size, ct; | 40 | int size, ct, err; |
41 | long err; | ||
42 | 41 | ||
43 | if (m->msg_namelen) { | 42 | if (m->msg_namelen) { |
44 | if (mode == VERIFY_READ) { | 43 | if (mode == VERIFY_READ) { |
@@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, | |||
62 | err = 0; | 61 | err = 0; |
63 | 62 | ||
64 | for (ct = 0; ct < m->msg_iovlen; ct++) { | 63 | for (ct = 0; ct < m->msg_iovlen; ct++) { |
65 | err += iov[ct].iov_len; | 64 | size_t len = iov[ct].iov_len; |
66 | /* | 65 | |
67 | * Goal is not to verify user data, but to prevent returning | 66 | if (len > INT_MAX - err) { |
68 | * negative value, which is interpreted as errno. | 67 | len = INT_MAX - err; |
69 | * Overflow is still possible, but it is harmless. | 68 | iov[ct].iov_len = len; |
70 | */ | 69 | } |
71 | if (err < 0) | 70 | err += len; |
72 | return -EMSGSIZE; | ||
73 | } | 71 | } |
74 | 72 | ||
75 | return err; | 73 | return err; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b143173e3eb2..a5ff5a89f376 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |||
598 | } | 598 | } |
599 | 599 | ||
600 | spin_lock(&rps_map_lock); | 600 | spin_lock(&rps_map_lock); |
601 | old_map = queue->rps_map; | 601 | old_map = rcu_dereference_protected(queue->rps_map, |
602 | lockdep_is_held(&rps_map_lock)); | ||
602 | rcu_assign_pointer(queue->rps_map, map); | 603 | rcu_assign_pointer(queue->rps_map, map); |
603 | spin_unlock(&rps_map_lock); | 604 | spin_unlock(&rps_map_lock); |
604 | 605 | ||
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | |||
677 | table = NULL; | 678 | table = NULL; |
678 | 679 | ||
679 | spin_lock(&rps_dev_flow_lock); | 680 | spin_lock(&rps_dev_flow_lock); |
680 | old_table = queue->rps_flow_table; | 681 | old_table = rcu_dereference_protected(queue->rps_flow_table, |
682 | lockdep_is_held(&rps_dev_flow_lock)); | ||
681 | rcu_assign_pointer(queue->rps_flow_table, table); | 683 | rcu_assign_pointer(queue->rps_flow_table, table); |
682 | spin_unlock(&rps_dev_flow_lock); | 684 | spin_unlock(&rps_dev_flow_lock); |
683 | 685 | ||
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj) | |||
705 | { | 707 | { |
706 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | 708 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
707 | struct netdev_rx_queue *first = queue->first; | 709 | struct netdev_rx_queue *first = queue->first; |
710 | struct rps_map *map; | ||
711 | struct rps_dev_flow_table *flow_table; | ||
708 | 712 | ||
709 | if (queue->rps_map) | ||
710 | call_rcu(&queue->rps_map->rcu, rps_map_release); | ||
711 | 713 | ||
712 | if (queue->rps_flow_table) | 714 | map = rcu_dereference_raw(queue->rps_map); |
713 | call_rcu(&queue->rps_flow_table->rcu, | 715 | if (map) |
714 | rps_dev_flow_table_release); | 716 | call_rcu(&map->rcu, rps_map_release); |
717 | |||
718 | flow_table = rcu_dereference_raw(queue->rps_flow_table); | ||
719 | if (flow_table) | ||
720 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); | ||
715 | 721 | ||
716 | if (atomic_dec_and_test(&first->count)) | 722 | if (atomic_dec_and_test(&first->count)) |
717 | kfree(first); | 723 | kfree(first); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index c988e685433a..3f860261c5ee 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data) | |||
42 | BUG_ON(!mutex_is_locked(&net_mutex)); | 42 | BUG_ON(!mutex_is_locked(&net_mutex)); |
43 | BUG_ON(id == 0); | 43 | BUG_ON(id == 0); |
44 | 44 | ||
45 | ng = old_ng = net->gen; | 45 | old_ng = rcu_dereference_protected(net->gen, |
46 | lockdep_is_held(&net_mutex)); | ||
47 | ng = old_ng; | ||
46 | if (old_ng->len >= id) | 48 | if (old_ng->len >= id) |
47 | goto assign; | 49 | goto assign; |
48 | 50 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2c0df0f95b3d..33bc3823ac6f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -771,10 +771,10 @@ done: | |||
771 | static unsigned long num_arg(const char __user * user_buffer, | 771 | static unsigned long num_arg(const char __user * user_buffer, |
772 | unsigned long maxlen, unsigned long *num) | 772 | unsigned long maxlen, unsigned long *num) |
773 | { | 773 | { |
774 | int i = 0; | 774 | int i; |
775 | *num = 0; | 775 | *num = 0; |
776 | 776 | ||
777 | for (; i < maxlen; i++) { | 777 | for (i = 0; i < maxlen; i++) { |
778 | char c; | 778 | char c; |
779 | if (get_user(c, &user_buffer[i])) | 779 | if (get_user(c, &user_buffer[i])) |
780 | return -EFAULT; | 780 | return -EFAULT; |
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer, | |||
789 | 789 | ||
790 | static int strn_len(const char __user * user_buffer, unsigned int maxlen) | 790 | static int strn_len(const char __user * user_buffer, unsigned int maxlen) |
791 | { | 791 | { |
792 | int i = 0; | 792 | int i; |
793 | 793 | ||
794 | for (; i < maxlen; i++) { | 794 | for (i = 0; i < maxlen; i++) { |
795 | char c; | 795 | char c; |
796 | if (get_user(c, &user_buffer[i])) | 796 | if (get_user(c, &user_buffer[i])) |
797 | return -EFAULT; | 797 | return -EFAULT; |
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
846 | { | 846 | { |
847 | struct seq_file *seq = file->private_data; | 847 | struct seq_file *seq = file->private_data; |
848 | struct pktgen_dev *pkt_dev = seq->private; | 848 | struct pktgen_dev *pkt_dev = seq->private; |
849 | int i = 0, max, len; | 849 | int i, max, len; |
850 | char name[16], valstr[32]; | 850 | char name[16], valstr[32]; |
851 | unsigned long value = 0; | 851 | unsigned long value = 0; |
852 | char *pg_result = NULL; | 852 | char *pg_result = NULL; |
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file, | |||
860 | return -EINVAL; | 860 | return -EINVAL; |
861 | } | 861 | } |
862 | 862 | ||
863 | max = count - i; | 863 | max = count; |
864 | tmp = count_trail_chars(&user_buffer[i], max); | 864 | tmp = count_trail_chars(user_buffer, max); |
865 | if (tmp < 0) { | 865 | if (tmp < 0) { |
866 | pr_warning("illegal format\n"); | 866 | pr_warning("illegal format\n"); |
867 | return tmp; | 867 | return tmp; |
868 | } | 868 | } |
869 | i += tmp; | 869 | i = tmp; |
870 | 870 | ||
871 | /* Read variable name */ | 871 | /* Read variable name */ |
872 | 872 | ||
@@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file, | |||
887 | i += len; | 887 | i += len; |
888 | 888 | ||
889 | if (debug) { | 889 | if (debug) { |
890 | char tb[count + 1]; | 890 | size_t copy = min_t(size_t, count, 1023); |
891 | if (copy_from_user(tb, user_buffer, count)) | 891 | char tb[copy + 1]; |
892 | if (copy_from_user(tb, user_buffer, copy)) | ||
892 | return -EFAULT; | 893 | return -EFAULT; |
893 | tb[count] = 0; | 894 | tb[copy] = 0; |
894 | printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, | 895 | printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, |
895 | (unsigned long)count, tb); | 896 | (unsigned long)count, tb); |
896 | } | 897 | } |
@@ -1764,7 +1765,7 @@ static ssize_t pktgen_thread_write(struct file *file, | |||
1764 | { | 1765 | { |
1765 | struct seq_file *seq = file->private_data; | 1766 | struct seq_file *seq = file->private_data; |
1766 | struct pktgen_thread *t = seq->private; | 1767 | struct pktgen_thread *t = seq->private; |
1767 | int i = 0, max, len, ret; | 1768 | int i, max, len, ret; |
1768 | char name[40]; | 1769 | char name[40]; |
1769 | char *pg_result; | 1770 | char *pg_result; |
1770 | 1771 | ||
@@ -1773,12 +1774,12 @@ static ssize_t pktgen_thread_write(struct file *file, | |||
1773 | return -EINVAL; | 1774 | return -EINVAL; |
1774 | } | 1775 | } |
1775 | 1776 | ||
1776 | max = count - i; | 1777 | max = count; |
1777 | len = count_trail_chars(&user_buffer[i], max); | 1778 | len = count_trail_chars(user_buffer, max); |
1778 | if (len < 0) | 1779 | if (len < 0) |
1779 | return len; | 1780 | return len; |
1780 | 1781 | ||
1781 | i += len; | 1782 | i = len; |
1782 | 1783 | ||
1783 | /* Read variable name */ | 1784 | /* Read variable name */ |
1784 | 1785 | ||
@@ -1975,7 +1976,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, | |||
1975 | const char *ifname) | 1976 | const char *ifname) |
1976 | { | 1977 | { |
1977 | char b[IFNAMSIZ+5]; | 1978 | char b[IFNAMSIZ+5]; |
1978 | int i = 0; | 1979 | int i; |
1979 | 1980 | ||
1980 | for (i = 0; ifname[i] != '@'; i++) { | 1981 | for (i = 0; ifname[i] != '@'; i++) { |
1981 | if (i == IFNAMSIZ) | 1982 | if (i == IFNAMSIZ) |
@@ -2519,8 +2520,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev) | |||
2519 | { | 2520 | { |
2520 | if (pkt_dev->cflows) { | 2521 | if (pkt_dev->cflows) { |
2521 | /* let go of the SAs if we have them */ | 2522 | /* let go of the SAs if we have them */ |
2522 | int i = 0; | 2523 | int i; |
2523 | for (; i < pkt_dev->cflows; i++) { | 2524 | for (i = 0; i < pkt_dev->cflows; i++) { |
2524 | struct xfrm_state *x = pkt_dev->flows[i].x; | 2525 | struct xfrm_state *x = pkt_dev->flows[i].x; |
2525 | if (x) { | 2526 | if (x) { |
2526 | xfrm_state_put(x); | 2527 | xfrm_state_put(x); |
@@ -2611,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2611 | /* Update any of the values, used when we're incrementing various | 2612 | /* Update any of the values, used when we're incrementing various |
2612 | * fields. | 2613 | * fields. |
2613 | */ | 2614 | */ |
2614 | queue_map = pkt_dev->cur_queue_map; | ||
2615 | mod_cur_headers(pkt_dev); | 2615 | mod_cur_headers(pkt_dev); |
2616 | queue_map = pkt_dev->cur_queue_map; | ||
2616 | 2617 | ||
2617 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2618 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2618 | 2619 | ||
@@ -2975,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2975 | /* Update any of the values, used when we're incrementing various | 2976 | /* Update any of the values, used when we're incrementing various |
2976 | * fields. | 2977 | * fields. |
2977 | */ | 2978 | */ |
2978 | queue_map = pkt_dev->cur_queue_map; | ||
2979 | mod_cur_headers(pkt_dev); | 2979 | mod_cur_headers(pkt_dev); |
2980 | queue_map = pkt_dev->cur_queue_map; | ||
2980 | 2981 | ||
2981 | skb = __netdev_alloc_skb(odev, | 2982 | skb = __netdev_alloc_skb(odev, |
2982 | pkt_dev->cur_pkt_size + 64 | 2983 | pkt_dev->cur_pkt_size + 64 |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8121268ddbdd..841c287ef40a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev) | |||
347 | if (!ops) | 347 | if (!ops) |
348 | return 0; | 348 | return 0; |
349 | 349 | ||
350 | size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ | 350 | size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ |
351 | nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ | 351 | nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ |
352 | 352 | ||
353 | if (ops->get_size) | 353 | if (ops->get_size) |
354 | /* IFLA_INFO_DATA + nested data */ | 354 | /* IFLA_INFO_DATA + nested data */ |
355 | size += nlmsg_total_size(sizeof(struct nlattr)) + | 355 | size += nla_total_size(sizeof(struct nlattr)) + |
356 | ops->get_size(dev); | 356 | ops->get_size(dev); |
357 | 357 | ||
358 | if (ops->get_xstats_size) | 358 | if (ops->get_xstats_size) |
359 | size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ | 359 | /* IFLA_INFO_XSTATS */ |
360 | size += nla_total_size(ops->get_xstats_size(dev)); | ||
360 | 361 | ||
361 | return size; | 362 | return size; |
362 | } | 363 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 11db43632df8..fb6080111461 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1225 | sock_reset_flag(newsk, SOCK_DONE); | 1225 | sock_reset_flag(newsk, SOCK_DONE); |
1226 | skb_queue_head_init(&newsk->sk_error_queue); | 1226 | skb_queue_head_init(&newsk->sk_error_queue); |
1227 | 1227 | ||
1228 | filter = newsk->sk_filter; | 1228 | filter = rcu_dereference_protected(newsk->sk_filter, 1); |
1229 | if (filter != NULL) | 1229 | if (filter != NULL) |
1230 | sk_filter_charge(newsk, filter); | 1230 | sk_filter_charge(newsk, filter); |
1231 | 1231 | ||
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) | |||
1653 | { | 1653 | { |
1654 | struct proto *prot = sk->sk_prot; | 1654 | struct proto *prot = sk->sk_prot; |
1655 | int amt = sk_mem_pages(size); | 1655 | int amt = sk_mem_pages(size); |
1656 | int allocated; | 1656 | long allocated; |
1657 | 1657 | ||
1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | 1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
1659 | allocated = atomic_add_return(amt, prot->memory_allocated); | 1659 | allocated = atomic_long_add_return(amt, prot->memory_allocated); |
1660 | 1660 | ||
1661 | /* Under limit. */ | 1661 | /* Under limit. */ |
1662 | if (allocated <= prot->sysctl_mem[0]) { | 1662 | if (allocated <= prot->sysctl_mem[0]) { |
@@ -1714,7 +1714,7 @@ suppress_allocation: | |||
1714 | 1714 | ||
1715 | /* Alas. Undo changes. */ | 1715 | /* Alas. Undo changes. */ |
1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; | 1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; |
1717 | atomic_sub(amt, prot->memory_allocated); | 1717 | atomic_long_sub(amt, prot->memory_allocated); |
1718 | return 0; | 1718 | return 0; |
1719 | } | 1719 | } |
1720 | EXPORT_SYMBOL(__sk_mem_schedule); | 1720 | EXPORT_SYMBOL(__sk_mem_schedule); |
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk) | |||
1727 | { | 1727 | { |
1728 | struct proto *prot = sk->sk_prot; | 1728 | struct proto *prot = sk->sk_prot; |
1729 | 1729 | ||
1730 | atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, | 1730 | atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, |
1731 | prot->memory_allocated); | 1731 | prot->memory_allocated); |
1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; | 1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; |
1733 | 1733 | ||
1734 | if (prot->memory_pressure && *prot->memory_pressure && | 1734 | if (prot->memory_pressure && *prot->memory_pressure && |
1735 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) | 1735 | (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) |
1736 | *prot->memory_pressure = 0; | 1736 | *prot->memory_pressure = 0; |
1737 | } | 1737 | } |
1738 | EXPORT_SYMBOL(__sk_mem_reclaim); | 1738 | EXPORT_SYMBOL(__sk_mem_reclaim); |
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method) | |||
2452 | 2452 | ||
2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) | 2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) |
2454 | { | 2454 | { |
2455 | seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " | 2455 | seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " |
2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", | 2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", |
2457 | proto->name, | 2457 | proto->name, |
2458 | proto->obj_size, | 2458 | proto->obj_size, |
2459 | sock_prot_inuse_get(seq_file_net(seq), proto), | 2459 | sock_prot_inuse_get(seq_file_net(seq), proto), |
2460 | proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, | 2460 | proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, |
2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", | 2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", |
2462 | proto->max_header, | 2462 | proto->max_header, |
2463 | proto->slab == NULL ? "no" : "yes", | 2463 | proto->slab == NULL ? "no" : "yes", |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 01eee5d984be..385b6095fdc4 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, | |||
34 | 34 | ||
35 | mutex_lock(&sock_flow_mutex); | 35 | mutex_lock(&sock_flow_mutex); |
36 | 36 | ||
37 | orig_sock_table = rps_sock_flow_table; | 37 | orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, |
38 | lockdep_is_held(&sock_flow_mutex)); | ||
38 | size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; | 39 | size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; |
39 | 40 | ||
40 | ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); | 41 | ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 117fb093dcaf..75c3582a7678 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -134,13 +134,41 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp) | |||
134 | extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); | 134 | extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); |
135 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); | 135 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); |
136 | 136 | ||
137 | /* | ||
138 | * Congestion control of queued data packets via CCID decision. | ||
139 | * | ||
140 | * The TX CCID performs its congestion-control by indicating whether and when a | ||
141 | * queued packet may be sent, using the return code of ccid_hc_tx_send_packet(). | ||
142 | * The following modes are supported via the symbolic constants below: | ||
143 | * - timer-based pacing (CCID returns a delay value in milliseconds); | ||
144 | * - autonomous dequeueing (CCID internally schedules dccps_xmitlet). | ||
145 | */ | ||
146 | |||
147 | enum ccid_dequeueing_decision { | ||
148 | CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */ | ||
149 | CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */ | ||
150 | CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */ | ||
151 | CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */ | ||
152 | CCID_PACKET_ERR = 0xF0000, /* error condition */ | ||
153 | }; | ||
154 | |||
155 | static inline int ccid_packet_dequeue_eval(const int return_code) | ||
156 | { | ||
157 | if (return_code < 0) | ||
158 | return CCID_PACKET_ERR; | ||
159 | if (return_code == 0) | ||
160 | return CCID_PACKET_SEND_AT_ONCE; | ||
161 | if (return_code <= CCID_PACKET_DELAY_MAX) | ||
162 | return CCID_PACKET_DELAY; | ||
163 | return return_code; | ||
164 | } | ||
165 | |||
137 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, | 166 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, |
138 | struct sk_buff *skb) | 167 | struct sk_buff *skb) |
139 | { | 168 | { |
140 | int rc = 0; | ||
141 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) | 169 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) |
142 | rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); | 170 | return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); |
143 | return rc; | 171 | return CCID_PACKET_SEND_AT_ONCE; |
144 | } | 172 | } |
145 | 173 | ||
146 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, | 174 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index d850e291f87c..6576eae9e779 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) | |||
78 | 78 | ||
79 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 79 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
80 | { | 80 | { |
81 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | 81 | if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) |
82 | 82 | return CCID_PACKET_WILL_DEQUEUE_LATER; | |
83 | if (hc->tx_pipe < hc->tx_cwnd) | 83 | return CCID_PACKET_SEND_AT_ONCE; |
84 | return 0; | ||
85 | |||
86 | return 1; /* XXX CCID should dequeue when ready instead of polling */ | ||
87 | } | 84 | } |
88 | 85 | ||
89 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | 86 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
@@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) | |||
115 | { | 112 | { |
116 | struct sock *sk = (struct sock *)data; | 113 | struct sock *sk = (struct sock *)data; |
117 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | 114 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
115 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); | ||
118 | 116 | ||
119 | bh_lock_sock(sk); | 117 | bh_lock_sock(sk); |
120 | if (sock_owned_by_user(sk)) { | 118 | if (sock_owned_by_user(sk)) { |
@@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) | |||
129 | if (hc->tx_rto > DCCP_RTO_MAX) | 127 | if (hc->tx_rto > DCCP_RTO_MAX) |
130 | hc->tx_rto = DCCP_RTO_MAX; | 128 | hc->tx_rto = DCCP_RTO_MAX; |
131 | 129 | ||
132 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | ||
133 | |||
134 | /* adjust pipe, cwnd etc */ | 130 | /* adjust pipe, cwnd etc */ |
135 | hc->tx_ssthresh = hc->tx_cwnd / 2; | 131 | hc->tx_ssthresh = hc->tx_cwnd / 2; |
136 | if (hc->tx_ssthresh < 2) | 132 | if (hc->tx_ssthresh < 2) |
@@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) | |||
146 | hc->tx_rpseq = 0; | 142 | hc->tx_rpseq = 0; |
147 | hc->tx_rpdupack = -1; | 143 | hc->tx_rpdupack = -1; |
148 | ccid2_change_l_ack_ratio(sk, 1); | 144 | ccid2_change_l_ack_ratio(sk, 1); |
145 | |||
146 | /* if we were blocked before, we may now send cwnd=1 packet */ | ||
147 | if (sender_was_blocked) | ||
148 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
149 | /* restart backed-off timer */ | ||
150 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | ||
149 | out: | 151 | out: |
150 | bh_unlock_sock(sk); | 152 | bh_unlock_sock(sk); |
151 | sock_put(sk); | 153 | sock_put(sk); |
@@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
434 | { | 436 | { |
435 | struct dccp_sock *dp = dccp_sk(sk); | 437 | struct dccp_sock *dp = dccp_sk(sk); |
436 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | 438 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
439 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); | ||
437 | u64 ackno, seqno; | 440 | u64 ackno, seqno; |
438 | struct ccid2_seq *seqp; | 441 | struct ccid2_seq *seqp; |
439 | unsigned char *vector; | 442 | unsigned char *vector; |
@@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
631 | sk_stop_timer(sk, &hc->tx_rtotimer); | 634 | sk_stop_timer(sk, &hc->tx_rtotimer); |
632 | else | 635 | else |
633 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | 636 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); |
637 | |||
638 | /* check if incoming Acks allow pending packets to be sent */ | ||
639 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) | ||
640 | tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); | ||
634 | } | 641 | } |
635 | 642 | ||
636 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | 643 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 9731c2dc1487..25cb6b216eda 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h | |||
@@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock { | |||
81 | u64 tx_high_ack; | 81 | u64 tx_high_ack; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc) | ||
85 | { | ||
86 | return hc->tx_pipe >= hc->tx_cwnd; | ||
87 | } | ||
88 | |||
84 | struct ccid2_hc_rx_sock { | 89 | struct ccid2_hc_rx_sock { |
85 | int rx_data; | 90 | int rx_data; |
86 | }; | 91 | }; |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 3060a60ed5ab..3d604e1349c0 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -268,11 +268,11 @@ out: | |||
268 | sock_put(sk); | 268 | sock_put(sk); |
269 | } | 269 | } |
270 | 270 | ||
271 | /* | 271 | /** |
272 | * returns | 272 | * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets |
273 | * > 0: delay (in msecs) that should pass before actually sending | 273 | * @skb: next packet candidate to send on @sk |
274 | * = 0: can send immediately | 274 | * This function uses the convention of ccid_packet_dequeue_eval() and |
275 | * < 0: error condition; do not send packet | 275 | * returns a millisecond-delay value between 0 and t_mbi = 64000 msec. |
276 | */ | 276 | */ |
277 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 277 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
278 | { | 278 | { |
@@ -348,7 +348,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
348 | 348 | ||
349 | /* set the nominal send time for the next following packet */ | 349 | /* set the nominal send time for the next following packet */ |
350 | hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); | 350 | hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); |
351 | return 0; | 351 | return CCID_PACKET_SEND_AT_ONCE; |
352 | } | 352 | } |
353 | 353 | ||
354 | static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) | 354 | static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 3eb264b60823..a8ed459508b2 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -243,8 +243,9 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
243 | extern void dccp_send_sync(struct sock *sk, const u64 seq, | 243 | extern void dccp_send_sync(struct sock *sk, const u64 seq, |
244 | const enum dccp_pkt_type pkt_type); | 244 | const enum dccp_pkt_type pkt_type); |
245 | 245 | ||
246 | extern void dccp_write_xmit(struct sock *sk, int block); | 246 | extern void dccp_write_xmit(struct sock *sk); |
247 | extern void dccp_write_space(struct sock *sk); | 247 | extern void dccp_write_space(struct sock *sk); |
248 | extern void dccp_flush_write_queue(struct sock *sk, long *time_budget); | ||
248 | 249 | ||
249 | extern void dccp_init_xmit_timers(struct sock *sk); | 250 | extern void dccp_init_xmit_timers(struct sock *sk); |
250 | static inline void dccp_clear_xmit_timers(struct sock *sk) | 251 | static inline void dccp_clear_xmit_timers(struct sock *sk) |
diff --git a/net/dccp/output.c b/net/dccp/output.c index a988fe9ffcba..45b91853f5ae 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -209,108 +209,150 @@ void dccp_write_space(struct sock *sk) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | /** | 211 | /** |
212 | * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet | 212 | * dccp_wait_for_ccid - Await CCID send permission |
213 | * @sk: socket to wait for | 213 | * @sk: socket to wait for |
214 | * @skb: current skb to pass on for waiting | 214 | * @delay: timeout in jiffies |
215 | * @delay: sleep timeout in milliseconds (> 0) | 215 | * This is used by CCIDs which need to delay the send time in process context. |
216 | * This function is called by default when the socket is closed, and | ||
217 | * when a non-zero linger time is set on the socket. For consistency | ||
218 | */ | 216 | */ |
219 | static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | 217 | static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) |
220 | { | 218 | { |
221 | struct dccp_sock *dp = dccp_sk(sk); | ||
222 | DEFINE_WAIT(wait); | 219 | DEFINE_WAIT(wait); |
223 | unsigned long jiffdelay; | 220 | long remaining; |
224 | int rc; | 221 | |
222 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
223 | sk->sk_write_pending++; | ||
224 | release_sock(sk); | ||
225 | |||
226 | remaining = schedule_timeout(delay); | ||
227 | |||
228 | lock_sock(sk); | ||
229 | sk->sk_write_pending--; | ||
230 | finish_wait(sk_sleep(sk), &wait); | ||
231 | |||
232 | if (signal_pending(current) || sk->sk_err) | ||
233 | return -1; | ||
234 | return remaining; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * dccp_xmit_packet - Send data packet under control of CCID | ||
239 | * Transmits next-queued payload and informs CCID to account for the packet. | ||
240 | */ | ||
241 | static void dccp_xmit_packet(struct sock *sk) | ||
242 | { | ||
243 | int err, len; | ||
244 | struct dccp_sock *dp = dccp_sk(sk); | ||
245 | struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue); | ||
225 | 246 | ||
226 | do { | 247 | if (unlikely(skb == NULL)) |
227 | dccp_pr_debug("delayed send by %d msec\n", delay); | 248 | return; |
228 | jiffdelay = msecs_to_jiffies(delay); | 249 | len = skb->len; |
229 | 250 | ||
230 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 251 | if (sk->sk_state == DCCP_PARTOPEN) { |
252 | const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; | ||
253 | /* | ||
254 | * See 8.1.5 - Handshake Completion. | ||
255 | * | ||
256 | * For robustness we resend Confirm options until the client has | ||
257 | * entered OPEN. During the initial feature negotiation, the MPS | ||
258 | * is smaller than usual, reduced by the Change/Confirm options. | ||
259 | */ | ||
260 | if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { | ||
261 | DCCP_WARN("Payload too large (%d) for featneg.\n", len); | ||
262 | dccp_send_ack(sk); | ||
263 | dccp_feat_list_purge(&dp->dccps_featneg); | ||
264 | } | ||
231 | 265 | ||
232 | sk->sk_write_pending++; | 266 | inet_csk_schedule_ack(sk); |
233 | release_sock(sk); | 267 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
234 | schedule_timeout(jiffdelay); | 268 | inet_csk(sk)->icsk_rto, |
235 | lock_sock(sk); | 269 | DCCP_RTO_MAX); |
236 | sk->sk_write_pending--; | 270 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; |
271 | } else if (dccp_ack_pending(sk)) { | ||
272 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; | ||
273 | } else { | ||
274 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA; | ||
275 | } | ||
276 | |||
277 | err = dccp_transmit_skb(sk, skb); | ||
278 | if (err) | ||
279 | dccp_pr_debug("transmit_skb() returned err=%d\n", err); | ||
280 | /* | ||
281 | * Register this one as sent even if an error occurred. To the remote | ||
282 | * end a local packet drop is indistinguishable from network loss, i.e. | ||
283 | * any local drop will eventually be reported via receiver feedback. | ||
284 | */ | ||
285 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); | ||
286 | } | ||
237 | 287 | ||
238 | if (sk->sk_err) | 288 | /** |
239 | goto do_error; | 289 | * dccp_flush_write_queue - Drain queue at end of connection |
240 | if (signal_pending(current)) | 290 | * Since dccp_sendmsg queues packets without waiting for them to be sent, it may |
241 | goto do_interrupted; | 291 | * happen that the TX queue is not empty at the end of a connection. We give the |
292 | * HC-sender CCID a grace period of up to @time_budget jiffies. If this function | ||
293 | * returns with a non-empty write queue, it will be purged later. | ||
294 | */ | ||
295 | void dccp_flush_write_queue(struct sock *sk, long *time_budget) | ||
296 | { | ||
297 | struct dccp_sock *dp = dccp_sk(sk); | ||
298 | struct sk_buff *skb; | ||
299 | long delay, rc; | ||
242 | 300 | ||
301 | while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { | ||
243 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 302 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
244 | } while ((delay = rc) > 0); | 303 | |
245 | out: | 304 | switch (ccid_packet_dequeue_eval(rc)) { |
246 | finish_wait(sk_sleep(sk), &wait); | 305 | case CCID_PACKET_WILL_DEQUEUE_LATER: |
247 | return rc; | 306 | /* |
248 | 307 | * If the CCID determines when to send, the next sending | |
249 | do_error: | 308 | * time is unknown or the CCID may not even send again |
250 | rc = -EPIPE; | 309 | * (e.g. remote host crashes or lost Ack packets). |
251 | goto out; | 310 | */ |
252 | do_interrupted: | 311 | DCCP_WARN("CCID did not manage to send all packets\n"); |
253 | rc = -EINTR; | 312 | return; |
254 | goto out; | 313 | case CCID_PACKET_DELAY: |
314 | delay = msecs_to_jiffies(rc); | ||
315 | if (delay > *time_budget) | ||
316 | return; | ||
317 | rc = dccp_wait_for_ccid(sk, delay); | ||
318 | if (rc < 0) | ||
319 | return; | ||
320 | *time_budget -= (delay - rc); | ||
321 | /* check again if we can send now */ | ||
322 | break; | ||
323 | case CCID_PACKET_SEND_AT_ONCE: | ||
324 | dccp_xmit_packet(sk); | ||
325 | break; | ||
326 | case CCID_PACKET_ERR: | ||
327 | skb_dequeue(&sk->sk_write_queue); | ||
328 | kfree_skb(skb); | ||
329 | dccp_pr_debug("packet discarded due to err=%ld\n", rc); | ||
330 | } | ||
331 | } | ||
255 | } | 332 | } |
256 | 333 | ||
257 | void dccp_write_xmit(struct sock *sk, int block) | 334 | void dccp_write_xmit(struct sock *sk) |
258 | { | 335 | { |
259 | struct dccp_sock *dp = dccp_sk(sk); | 336 | struct dccp_sock *dp = dccp_sk(sk); |
260 | struct sk_buff *skb; | 337 | struct sk_buff *skb; |
261 | 338 | ||
262 | while ((skb = skb_peek(&sk->sk_write_queue))) { | 339 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
263 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 340 | int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
264 | |||
265 | if (err > 0) { | ||
266 | if (!block) { | ||
267 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | ||
268 | msecs_to_jiffies(err)+jiffies); | ||
269 | break; | ||
270 | } else | ||
271 | err = dccp_wait_for_ccid(sk, skb, err); | ||
272 | if (err && err != -EINTR) | ||
273 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | ||
274 | } | ||
275 | 341 | ||
276 | skb_dequeue(&sk->sk_write_queue); | 342 | switch (ccid_packet_dequeue_eval(rc)) { |
277 | if (err == 0) { | 343 | case CCID_PACKET_WILL_DEQUEUE_LATER: |
278 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | 344 | return; |
279 | const int len = skb->len; | 345 | case CCID_PACKET_DELAY: |
280 | 346 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | |
281 | if (sk->sk_state == DCCP_PARTOPEN) { | 347 | jiffies + msecs_to_jiffies(rc)); |
282 | const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; | 348 | return; |
283 | /* | 349 | case CCID_PACKET_SEND_AT_ONCE: |
284 | * See 8.1.5 - Handshake Completion. | 350 | dccp_xmit_packet(sk); |
285 | * | 351 | break; |
286 | * For robustness we resend Confirm options until the client has | 352 | case CCID_PACKET_ERR: |
287 | * entered OPEN. During the initial feature negotiation, the MPS | 353 | skb_dequeue(&sk->sk_write_queue); |
288 | * is smaller than usual, reduced by the Change/Confirm options. | ||
289 | */ | ||
290 | if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { | ||
291 | DCCP_WARN("Payload too large (%d) for featneg.\n", len); | ||
292 | dccp_send_ack(sk); | ||
293 | dccp_feat_list_purge(&dp->dccps_featneg); | ||
294 | } | ||
295 | |||
296 | inet_csk_schedule_ack(sk); | ||
297 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
298 | inet_csk(sk)->icsk_rto, | ||
299 | DCCP_RTO_MAX); | ||
300 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
301 | } else if (dccp_ack_pending(sk)) | ||
302 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
303 | else | ||
304 | dcb->dccpd_type = DCCP_PKT_DATA; | ||
305 | |||
306 | err = dccp_transmit_skb(sk, skb); | ||
307 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); | ||
308 | if (err) | ||
309 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", | ||
310 | err); | ||
311 | } else { | ||
312 | dccp_pr_debug("packet discarded due to err=%d\n", err); | ||
313 | kfree_skb(skb); | 354 | kfree_skb(skb); |
355 | dccp_pr_debug("packet discarded due to err=%d\n", rc); | ||
314 | } | 356 | } |
315 | } | 357 | } |
316 | } | 358 | } |
@@ -622,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active) | |||
622 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; | 664 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; |
623 | 665 | ||
624 | if (active) { | 666 | if (active) { |
625 | dccp_write_xmit(sk, 1); | ||
626 | dccp_skb_entail(sk, skb); | 667 | dccp_skb_entail(sk, skb); |
627 | dccp_transmit_skb(sk, skb_clone(skb, prio)); | 668 | dccp_transmit_skb(sk, skb_clone(skb, prio)); |
628 | /* | 669 | /* |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 7e5fc04eb6d1..ef343d53fcea 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -726,7 +726,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
726 | goto out_discard; | 726 | goto out_discard; |
727 | 727 | ||
728 | skb_queue_tail(&sk->sk_write_queue, skb); | 728 | skb_queue_tail(&sk->sk_write_queue, skb); |
729 | dccp_write_xmit(sk,0); | 729 | /* |
730 | * The xmit_timer is set if the TX CCID is rate-based and will expire | ||
731 | * when congestion control permits to release further packets into the | ||
732 | * network. Window-based CCIDs do not use this timer. | ||
733 | */ | ||
734 | if (!timer_pending(&dp->dccps_xmit_timer)) | ||
735 | dccp_write_xmit(sk); | ||
730 | out_release: | 736 | out_release: |
731 | release_sock(sk); | 737 | release_sock(sk); |
732 | return rc ? : len; | 738 | return rc ? : len; |
@@ -951,9 +957,22 @@ void dccp_close(struct sock *sk, long timeout) | |||
951 | /* Check zero linger _after_ checking for unread data. */ | 957 | /* Check zero linger _after_ checking for unread data. */ |
952 | sk->sk_prot->disconnect(sk, 0); | 958 | sk->sk_prot->disconnect(sk, 0); |
953 | } else if (sk->sk_state != DCCP_CLOSED) { | 959 | } else if (sk->sk_state != DCCP_CLOSED) { |
960 | /* | ||
961 | * Normal connection termination. May need to wait if there are | ||
962 | * still packets in the TX queue that are delayed by the CCID. | ||
963 | */ | ||
964 | dccp_flush_write_queue(sk, &timeout); | ||
954 | dccp_terminate_connection(sk); | 965 | dccp_terminate_connection(sk); |
955 | } | 966 | } |
956 | 967 | ||
968 | /* | ||
969 | * Flush write queue. This may be necessary in several cases: | ||
970 | * - we have been closed by the peer but still have application data; | ||
971 | * - abortive termination (unread data or zero linger time), | ||
972 | * - normal termination but queue could not be flushed within time limit | ||
973 | */ | ||
974 | __skb_queue_purge(&sk->sk_write_queue); | ||
975 | |||
957 | sk_stream_wait_close(sk, timeout); | 976 | sk_stream_wait_close(sk, timeout); |
958 | 977 | ||
959 | adjudge_to_death: | 978 | adjudge_to_death: |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 1a9aa05d4dc4..7587870b7040 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -237,32 +237,35 @@ out: | |||
237 | sock_put(sk); | 237 | sock_put(sk); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Transmit-delay timer: used by the CCIDs to delay actual send time */ | 240 | /** |
241 | static void dccp_write_xmit_timer(unsigned long data) | 241 | * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface |
242 | * See the comments above %ccid_dequeueing_decision for supported modes. | ||
243 | */ | ||
244 | static void dccp_write_xmitlet(unsigned long data) | ||
242 | { | 245 | { |
243 | struct sock *sk = (struct sock *)data; | 246 | struct sock *sk = (struct sock *)data; |
244 | struct dccp_sock *dp = dccp_sk(sk); | ||
245 | 247 | ||
246 | bh_lock_sock(sk); | 248 | bh_lock_sock(sk); |
247 | if (sock_owned_by_user(sk)) | 249 | if (sock_owned_by_user(sk)) |
248 | sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); | 250 | sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); |
249 | else | 251 | else |
250 | dccp_write_xmit(sk, 0); | 252 | dccp_write_xmit(sk); |
251 | bh_unlock_sock(sk); | 253 | bh_unlock_sock(sk); |
252 | sock_put(sk); | ||
253 | } | 254 | } |
254 | 255 | ||
255 | static void dccp_init_write_xmit_timer(struct sock *sk) | 256 | static void dccp_write_xmit_timer(unsigned long data) |
256 | { | 257 | { |
257 | struct dccp_sock *dp = dccp_sk(sk); | 258 | dccp_write_xmitlet(data); |
258 | 259 | sock_put((struct sock *)data); | |
259 | setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, | ||
260 | (unsigned long)sk); | ||
261 | } | 260 | } |
262 | 261 | ||
263 | void dccp_init_xmit_timers(struct sock *sk) | 262 | void dccp_init_xmit_timers(struct sock *sk) |
264 | { | 263 | { |
265 | dccp_init_write_xmit_timer(sk); | 264 | struct dccp_sock *dp = dccp_sk(sk); |
265 | |||
266 | tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); | ||
267 | setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, | ||
268 | (unsigned long)sk); | ||
266 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | 269 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, |
267 | &dccp_keepalive_timer); | 270 | &dccp_keepalive_timer); |
268 | } | 271 | } |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index d6b93d19790f..a76b78de679f 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops; | |||
155 | static DEFINE_RWLOCK(dn_hash_lock); | 155 | static DEFINE_RWLOCK(dn_hash_lock); |
156 | static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; | 156 | static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; |
157 | static struct hlist_head dn_wild_sk; | 157 | static struct hlist_head dn_wild_sk; |
158 | static atomic_t decnet_memory_allocated; | 158 | static atomic_long_t decnet_memory_allocated; |
159 | 159 | ||
160 | static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); | 160 | static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); |
161 | static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); | 161 | static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); |
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index be3eb8e23288..28f8b5e5f73b 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c | |||
@@ -38,7 +38,7 @@ int decnet_log_martians = 1; | |||
38 | int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; | 38 | int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; |
39 | 39 | ||
40 | /* Reasonable defaults, I hope, based on tcp's defaults */ | 40 | /* Reasonable defaults, I hope, based on tcp's defaults */ |
41 | int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; | 41 | long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; |
42 | int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; | 42 | int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; |
43 | int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; | 43 | int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; |
44 | 44 | ||
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = { | |||
324 | .data = &sysctl_decnet_mem, | 324 | .data = &sysctl_decnet_mem, |
325 | .maxlen = sizeof(sysctl_decnet_mem), | 325 | .maxlen = sizeof(sysctl_decnet_mem), |
326 | .mode = 0644, | 326 | .mode = 0644, |
327 | .proc_handler = proc_dointvec, | 327 | .proc_handler = proc_doulongvec_minmax |
328 | }, | 328 | }, |
329 | { | 329 | { |
330 | .procname = "decnet_rmem", | 330 | .procname = "decnet_rmem", |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 36e27c2107de..eb6f69a8f27a 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -1052,7 +1052,7 @@ static void ip_fib_net_exit(struct net *net) | |||
1052 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { | 1052 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { |
1053 | hlist_del(node); | 1053 | hlist_del(node); |
1054 | fib_table_flush(tb); | 1054 | fib_table_flush(tb); |
1055 | kfree(tb); | 1055 | fib_free_table(tb); |
1056 | } | 1056 | } |
1057 | } | 1057 | } |
1058 | kfree(net->ipv4.fib_table_hash); | 1058 | kfree(net->ipv4.fib_table_hash); |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 43e1c594ce8f..b3acb0417b21 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz, | |||
120 | struct fib_node *f; | 120 | struct fib_node *f; |
121 | 121 | ||
122 | hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) { | 122 | hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) { |
123 | struct hlist_head __rcu *new_head; | 123 | struct hlist_head *new_head; |
124 | 124 | ||
125 | hlist_del_rcu(&f->fn_hash); | 125 | hlist_del_rcu(&f->fn_hash); |
126 | 126 | ||
127 | new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; | 127 | new_head = rcu_dereference_protected(fz->fz_hash, 1) + |
128 | fn_hash(f->fn_key, fz); | ||
128 | hlist_add_head_rcu(&f->fn_hash, new_head); | 129 | hlist_add_head_rcu(&f->fn_hash, new_head); |
129 | } | 130 | } |
130 | } | 131 | } |
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz) | |||
179 | memcpy(&nfz, fz, sizeof(nfz)); | 180 | memcpy(&nfz, fz, sizeof(nfz)); |
180 | 181 | ||
181 | write_seqlock_bh(&fz->fz_lock); | 182 | write_seqlock_bh(&fz->fz_lock); |
182 | old_ht = fz->fz_hash; | 183 | old_ht = rcu_dereference_protected(fz->fz_hash, 1); |
183 | nfz.fz_hash = ht; | 184 | RCU_INIT_POINTER(nfz.fz_hash, ht); |
184 | nfz.fz_hashmask = new_hashmask; | 185 | nfz.fz_hashmask = new_hashmask; |
185 | nfz.fz_divisor = new_divisor; | 186 | nfz.fz_divisor = new_divisor; |
186 | fn_rebuild_zone(&nfz, old_ht, old_divisor); | 187 | fn_rebuild_zone(&nfz, old_ht, old_divisor); |
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z) | |||
236 | seqlock_init(&fz->fz_lock); | 237 | seqlock_init(&fz->fz_lock); |
237 | fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1; | 238 | fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1; |
238 | fz->fz_hashmask = fz->fz_divisor - 1; | 239 | fz->fz_hashmask = fz->fz_divisor - 1; |
239 | fz->fz_hash = fz->fz_embedded_hash; | 240 | RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash); |
240 | fz->fz_order = z; | 241 | fz->fz_order = z; |
241 | fz->fz_revorder = 32 - z; | 242 | fz->fz_revorder = 32 - z; |
242 | fz->fz_mask = inet_make_mask(z); | 243 | fz->fz_mask = inet_make_mask(z); |
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb, | |||
272 | for (fz = rcu_dereference(t->fn_zone_list); | 273 | for (fz = rcu_dereference(t->fn_zone_list); |
273 | fz != NULL; | 274 | fz != NULL; |
274 | fz = rcu_dereference(fz->fz_next)) { | 275 | fz = rcu_dereference(fz->fz_next)) { |
275 | struct hlist_head __rcu *head; | 276 | struct hlist_head *head; |
276 | struct hlist_node *node; | 277 | struct hlist_node *node; |
277 | struct fib_node *f; | 278 | struct fib_node *f; |
278 | __be32 k; | 279 | __be32 k; |
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb, | |||
282 | seq = read_seqbegin(&fz->fz_lock); | 283 | seq = read_seqbegin(&fz->fz_lock); |
283 | k = fz_key(flp->fl4_dst, fz); | 284 | k = fz_key(flp->fl4_dst, fz); |
284 | 285 | ||
285 | head = &fz->fz_hash[fn_hash(k, fz)]; | 286 | head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz); |
286 | hlist_for_each_entry_rcu(f, node, head, fn_hash) { | 287 | hlist_for_each_entry_rcu(f, node, head, fn_hash) { |
287 | if (f->fn_key != k) | 288 | if (f->fn_key != k) |
288 | continue; | 289 | continue; |
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb, | |||
311 | struct fib_info *last_resort; | 312 | struct fib_info *last_resort; |
312 | struct fn_hash *t = (struct fn_hash *)tb->tb_data; | 313 | struct fn_hash *t = (struct fn_hash *)tb->tb_data; |
313 | struct fn_zone *fz = t->fn_zones[0]; | 314 | struct fn_zone *fz = t->fn_zones[0]; |
315 | struct hlist_head *head; | ||
314 | 316 | ||
315 | if (fz == NULL) | 317 | if (fz == NULL) |
316 | return; | 318 | return; |
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb, | |||
320 | order = -1; | 322 | order = -1; |
321 | 323 | ||
322 | rcu_read_lock(); | 324 | rcu_read_lock(); |
323 | hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) { | 325 | head = rcu_dereference(fz->fz_hash); |
326 | hlist_for_each_entry_rcu(f, node, head, fn_hash) { | ||
324 | struct fib_alias *fa; | 327 | struct fib_alias *fa; |
325 | 328 | ||
326 | list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) { | 329 | list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) { |
@@ -374,7 +377,7 @@ out: | |||
374 | /* Insert node F to FZ. */ | 377 | /* Insert node F to FZ. */ |
375 | static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f) | 378 | static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f) |
376 | { | 379 | { |
377 | struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; | 380 | struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz); |
378 | 381 | ||
379 | hlist_add_head_rcu(&f->fn_hash, head); | 382 | hlist_add_head_rcu(&f->fn_hash, head); |
380 | } | 383 | } |
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f) | |||
382 | /* Return the node in FZ matching KEY. */ | 385 | /* Return the node in FZ matching KEY. */ |
383 | static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) | 386 | static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) |
384 | { | 387 | { |
385 | struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)]; | 388 | struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz); |
386 | struct hlist_node *node; | 389 | struct hlist_node *node; |
387 | struct fib_node *f; | 390 | struct fib_node *f; |
388 | 391 | ||
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |||
662 | 665 | ||
663 | static int fn_flush_list(struct fn_zone *fz, int idx) | 666 | static int fn_flush_list(struct fn_zone *fz, int idx) |
664 | { | 667 | { |
665 | struct hlist_head *head = &fz->fz_hash[idx]; | 668 | struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx; |
666 | struct hlist_node *node, *n; | 669 | struct hlist_node *node, *n; |
667 | struct fib_node *f; | 670 | struct fib_node *f; |
668 | int found = 0; | 671 | int found = 0; |
@@ -713,6 +716,24 @@ int fib_table_flush(struct fib_table *tb) | |||
713 | return found; | 716 | return found; |
714 | } | 717 | } |
715 | 718 | ||
719 | void fib_free_table(struct fib_table *tb) | ||
720 | { | ||
721 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; | ||
722 | struct fn_zone *fz, *next; | ||
723 | |||
724 | next = table->fn_zone_list; | ||
725 | while (next != NULL) { | ||
726 | fz = next; | ||
727 | next = fz->fz_next; | ||
728 | |||
729 | if (fz->fz_hash != fz->fz_embedded_hash) | ||
730 | fz_hash_free(fz->fz_hash, fz->fz_divisor); | ||
731 | |||
732 | kfree(fz); | ||
733 | } | ||
734 | |||
735 | kfree(tb); | ||
736 | } | ||
716 | 737 | ||
717 | static inline int | 738 | static inline int |
718 | fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, | 739 | fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, |
@@ -761,14 +782,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, | |||
761 | struct fn_zone *fz) | 782 | struct fn_zone *fz) |
762 | { | 783 | { |
763 | int h, s_h; | 784 | int h, s_h; |
785 | struct hlist_head *head = rcu_dereference(fz->fz_hash); | ||
764 | 786 | ||
765 | if (fz->fz_hash == NULL) | 787 | if (head == NULL) |
766 | return skb->len; | 788 | return skb->len; |
767 | s_h = cb->args[3]; | 789 | s_h = cb->args[3]; |
768 | for (h = s_h; h < fz->fz_divisor; h++) { | 790 | for (h = s_h; h < fz->fz_divisor; h++) { |
769 | if (hlist_empty(&fz->fz_hash[h])) | 791 | if (hlist_empty(head + h)) |
770 | continue; | 792 | continue; |
771 | if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) { | 793 | if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) { |
772 | cb->args[3] = h; | 794 | cb->args[3] = h; |
773 | return -1; | 795 | return -1; |
774 | } | 796 | } |
@@ -872,7 +894,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq) | |||
872 | if (!iter->zone->fz_nent) | 894 | if (!iter->zone->fz_nent) |
873 | continue; | 895 | continue; |
874 | 896 | ||
875 | iter->hash_head = iter->zone->fz_hash; | 897 | iter->hash_head = rcu_dereference(iter->zone->fz_hash); |
876 | maxslot = iter->zone->fz_divisor; | 898 | maxslot = iter->zone->fz_divisor; |
877 | 899 | ||
878 | for (iter->bucket = 0; iter->bucket < maxslot; | 900 | for (iter->bucket = 0; iter->bucket < maxslot; |
@@ -957,7 +979,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq) | |||
957 | goto out; | 979 | goto out; |
958 | 980 | ||
959 | iter->bucket = 0; | 981 | iter->bucket = 0; |
960 | iter->hash_head = iter->zone->fz_hash; | 982 | iter->hash_head = rcu_dereference(iter->zone->fz_hash); |
961 | 983 | ||
962 | hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) { | 984 | hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) { |
963 | list_for_each_entry(fa, &fn->fn_alias, fa_list) { | 985 | list_for_each_entry(fa, &fn->fn_alias, fa_list) { |
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index a29edf2219c8..c079cc0ec651 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h | |||
@@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order, | |||
47 | static inline void fib_result_assign(struct fib_result *res, | 47 | static inline void fib_result_assign(struct fib_result *res, |
48 | struct fib_info *fi) | 48 | struct fib_info *fi) |
49 | { | 49 | { |
50 | if (res->fi != NULL) | 50 | /* we used to play games with refcounts, but we now use RCU */ |
51 | fib_info_put(res->fi); | ||
52 | res->fi = fi; | 51 | res->fi = fi; |
53 | if (fi != NULL) | ||
54 | atomic_inc(&fi->fib_clntref); | ||
55 | } | 52 | } |
56 | 53 | ||
57 | #endif /* _FIB_LOOKUP_H */ | 54 | #endif /* _FIB_LOOKUP_H */ |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b14450895102..200eb538fbb3 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1797,6 +1797,11 @@ int fib_table_flush(struct fib_table *tb) | |||
1797 | return found; | 1797 | return found; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | void fib_free_table(struct fib_table *tb) | ||
1801 | { | ||
1802 | kfree(tb); | ||
1803 | } | ||
1804 | |||
1800 | void fib_table_select_default(struct fib_table *tb, | 1805 | void fib_table_select_default(struct fib_table *tb, |
1801 | const struct flowi *flp, | 1806 | const struct flowi *flp, |
1802 | struct fib_result *res) | 1807 | struct fib_result *res) |
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index caea6885fdbd..c6933f2ea310 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <net/gre.h> | 22 | #include <net/gre.h> |
23 | 23 | ||
24 | 24 | ||
25 | static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly; | 25 | static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; |
26 | static DEFINE_SPINLOCK(gre_proto_lock); | 26 | static DEFINE_SPINLOCK(gre_proto_lock); |
27 | 27 | ||
28 | int gre_add_protocol(const struct gre_protocol *proto, u8 version) | 28 | int gre_add_protocol(const struct gre_protocol *proto, u8 version) |
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) | |||
51 | goto err_out; | 51 | goto err_out; |
52 | 52 | ||
53 | spin_lock(&gre_proto_lock); | 53 | spin_lock(&gre_proto_lock); |
54 | if (gre_proto[version] != proto) | 54 | if (rcu_dereference_protected(gre_proto[version], |
55 | lockdep_is_held(&gre_proto_lock)) != proto) | ||
55 | goto err_out_unlock; | 56 | goto err_out_unlock; |
56 | rcu_assign_pointer(gre_proto[version], NULL); | 57 | rcu_assign_pointer(gre_proto[version], NULL); |
57 | spin_unlock(&gre_proto_lock); | 58 | spin_unlock(&gre_proto_lock); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c8877c6c7216..3c53c2d89e3b 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2306 | 2306 | ||
2307 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); | 2307 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2308 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2308 | (void) ip_mc_leave_src(sk, iml, in_dev); |
2309 | if (in_dev != NULL) { | 2309 | if (in_dev != NULL) |
2310 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2310 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
2311 | in_dev_put(in_dev); | ||
2312 | } | ||
2313 | /* decrease mem now to avoid the memleak warning */ | 2311 | /* decrease mem now to avoid the memleak warning */ |
2314 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2312 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
2315 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | 2313 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ba8042665849..2ada17129fce 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk, | |||
490 | { | 490 | { |
491 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 491 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); |
492 | 492 | ||
493 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | 493 | if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { |
494 | struct inet_diag_entry entry; | 494 | struct inet_diag_entry entry; |
495 | struct rtattr *bc = (struct rtattr *)(r + 1); | 495 | const struct nlattr *bc = nlmsg_find_attr(cb->nlh, |
496 | sizeof(*r), | ||
497 | INET_DIAG_REQ_BYTECODE); | ||
496 | struct inet_sock *inet = inet_sk(sk); | 498 | struct inet_sock *inet = inet_sk(sk); |
497 | 499 | ||
498 | entry.family = sk->sk_family; | 500 | entry.family = sk->sk_family; |
@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk, | |||
512 | entry.dport = ntohs(inet->inet_dport); | 514 | entry.dport = ntohs(inet->inet_dport); |
513 | entry.userlocks = sk->sk_userlocks; | 515 | entry.userlocks = sk->sk_userlocks; |
514 | 516 | ||
515 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | 517 | if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) |
516 | return 0; | 518 | return 0; |
517 | } | 519 | } |
518 | 520 | ||
@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, | |||
527 | { | 529 | { |
528 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 530 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); |
529 | 531 | ||
530 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | 532 | if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { |
531 | struct inet_diag_entry entry; | 533 | struct inet_diag_entry entry; |
532 | struct rtattr *bc = (struct rtattr *)(r + 1); | 534 | const struct nlattr *bc = nlmsg_find_attr(cb->nlh, |
535 | sizeof(*r), | ||
536 | INET_DIAG_REQ_BYTECODE); | ||
533 | 537 | ||
534 | entry.family = tw->tw_family; | 538 | entry.family = tw->tw_family; |
535 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 539 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) |
@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, | |||
548 | entry.dport = ntohs(tw->tw_dport); | 552 | entry.dport = ntohs(tw->tw_dport); |
549 | entry.userlocks = 0; | 553 | entry.userlocks = 0; |
550 | 554 | ||
551 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | 555 | if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) |
552 | return 0; | 556 | return 0; |
553 | } | 557 | } |
554 | 558 | ||
@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
618 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 622 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); |
619 | struct inet_connection_sock *icsk = inet_csk(sk); | 623 | struct inet_connection_sock *icsk = inet_csk(sk); |
620 | struct listen_sock *lopt; | 624 | struct listen_sock *lopt; |
621 | struct rtattr *bc = NULL; | 625 | const struct nlattr *bc = NULL; |
622 | struct inet_sock *inet = inet_sk(sk); | 626 | struct inet_sock *inet = inet_sk(sk); |
623 | int j, s_j; | 627 | int j, s_j; |
624 | int reqnum, s_reqnum; | 628 | int reqnum, s_reqnum; |
@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
638 | if (!lopt || !lopt->qlen) | 642 | if (!lopt || !lopt->qlen) |
639 | goto out; | 643 | goto out; |
640 | 644 | ||
641 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | 645 | if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { |
642 | bc = (struct rtattr *)(r + 1); | 646 | bc = nlmsg_find_attr(cb->nlh, sizeof(*r), |
647 | INET_DIAG_REQ_BYTECODE); | ||
643 | entry.sport = inet->inet_num; | 648 | entry.sport = inet->inet_num; |
644 | entry.userlocks = sk->sk_userlocks; | 649 | entry.userlocks = sk->sk_userlocks; |
645 | } | 650 | } |
@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
672 | &ireq->rmt_addr; | 677 | &ireq->rmt_addr; |
673 | entry.dport = ntohs(ireq->rmt_port); | 678 | entry.dport = ntohs(ireq->rmt_port); |
674 | 679 | ||
675 | if (!inet_diag_bc_run(RTA_DATA(bc), | 680 | if (!inet_diag_bc_run(nla_data(bc), |
676 | RTA_PAYLOAD(bc), &entry)) | 681 | nla_len(bc), &entry)) |
677 | continue; | 682 | continue; |
678 | } | 683 | } |
679 | 684 | ||
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 9ffa24b9a804..9e94d7cf4f8a 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly; | |||
72 | #define node_height(x) x->avl_height | 72 | #define node_height(x) x->avl_height |
73 | 73 | ||
74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) | 74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
75 | #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) | ||
75 | static const struct inet_peer peer_fake_node = { | 76 | static const struct inet_peer peer_fake_node = { |
76 | .avl_left = peer_avl_empty, | 77 | .avl_left = peer_avl_empty_rcu, |
77 | .avl_right = peer_avl_empty, | 78 | .avl_right = peer_avl_empty_rcu, |
78 | .avl_height = 0 | 79 | .avl_height = 0 |
79 | }; | 80 | }; |
80 | 81 | ||
81 | static struct { | 82 | static struct { |
82 | struct inet_peer *root; | 83 | struct inet_peer __rcu *root; |
83 | spinlock_t lock; | 84 | spinlock_t lock; |
84 | int total; | 85 | int total; |
85 | } peers = { | 86 | } peers = { |
86 | .root = peer_avl_empty, | 87 | .root = peer_avl_empty_rcu, |
87 | .lock = __SPIN_LOCK_UNLOCKED(peers.lock), | 88 | .lock = __SPIN_LOCK_UNLOCKED(peers.lock), |
88 | .total = 0, | 89 | .total = 0, |
89 | }; | 90 | }; |
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p) | |||
156 | */ | 157 | */ |
157 | #define lookup(_daddr, _stack) \ | 158 | #define lookup(_daddr, _stack) \ |
158 | ({ \ | 159 | ({ \ |
159 | struct inet_peer *u, **v; \ | 160 | struct inet_peer *u; \ |
161 | struct inet_peer __rcu **v; \ | ||
160 | \ | 162 | \ |
161 | stackptr = _stack; \ | 163 | stackptr = _stack; \ |
162 | *stackptr++ = &peers.root; \ | 164 | *stackptr++ = &peers.root; \ |
163 | for (u = peers.root; u != peer_avl_empty; ) { \ | 165 | for (u = rcu_dereference_protected(peers.root, \ |
166 | lockdep_is_held(&peers.lock)); \ | ||
167 | u != peer_avl_empty; ) { \ | ||
164 | if (_daddr == u->v4daddr) \ | 168 | if (_daddr == u->v4daddr) \ |
165 | break; \ | 169 | break; \ |
166 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ | 170 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ |
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p) | |||
168 | else \ | 172 | else \ |
169 | v = &u->avl_right; \ | 173 | v = &u->avl_right; \ |
170 | *stackptr++ = v; \ | 174 | *stackptr++ = v; \ |
171 | u = *v; \ | 175 | u = rcu_dereference_protected(*v, \ |
176 | lockdep_is_held(&peers.lock)); \ | ||
172 | } \ | 177 | } \ |
173 | u; \ | 178 | u; \ |
174 | }) | 179 | }) |
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr) | |||
209 | /* Called with local BH disabled and the pool lock held. */ | 214 | /* Called with local BH disabled and the pool lock held. */ |
210 | #define lookup_rightempty(start) \ | 215 | #define lookup_rightempty(start) \ |
211 | ({ \ | 216 | ({ \ |
212 | struct inet_peer *u, **v; \ | 217 | struct inet_peer *u; \ |
218 | struct inet_peer __rcu **v; \ | ||
213 | *stackptr++ = &start->avl_left; \ | 219 | *stackptr++ = &start->avl_left; \ |
214 | v = &start->avl_left; \ | 220 | v = &start->avl_left; \ |
215 | for (u = *v; u->avl_right != peer_avl_empty; ) { \ | 221 | for (u = rcu_dereference_protected(*v, \ |
222 | lockdep_is_held(&peers.lock)); \ | ||
223 | u->avl_right != peer_avl_empty_rcu; ) { \ | ||
216 | v = &u->avl_right; \ | 224 | v = &u->avl_right; \ |
217 | *stackptr++ = v; \ | 225 | *stackptr++ = v; \ |
218 | u = *v; \ | 226 | u = rcu_dereference_protected(*v, \ |
227 | lockdep_is_held(&peers.lock)); \ | ||
219 | } \ | 228 | } \ |
220 | u; \ | 229 | u; \ |
221 | }) | 230 | }) |
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr) | |||
224 | * Variable names are the proof of operation correctness. | 233 | * Variable names are the proof of operation correctness. |
225 | * Look into mm/map_avl.c for more detail description of the ideas. | 234 | * Look into mm/map_avl.c for more detail description of the ideas. |
226 | */ | 235 | */ |
227 | static void peer_avl_rebalance(struct inet_peer **stack[], | 236 | static void peer_avl_rebalance(struct inet_peer __rcu **stack[], |
228 | struct inet_peer ***stackend) | 237 | struct inet_peer __rcu ***stackend) |
229 | { | 238 | { |
230 | struct inet_peer **nodep, *node, *l, *r; | 239 | struct inet_peer __rcu **nodep; |
240 | struct inet_peer *node, *l, *r; | ||
231 | int lh, rh; | 241 | int lh, rh; |
232 | 242 | ||
233 | while (stackend > stack) { | 243 | while (stackend > stack) { |
234 | nodep = *--stackend; | 244 | nodep = *--stackend; |
235 | node = *nodep; | 245 | node = rcu_dereference_protected(*nodep, |
236 | l = node->avl_left; | 246 | lockdep_is_held(&peers.lock)); |
237 | r = node->avl_right; | 247 | l = rcu_dereference_protected(node->avl_left, |
248 | lockdep_is_held(&peers.lock)); | ||
249 | r = rcu_dereference_protected(node->avl_right, | ||
250 | lockdep_is_held(&peers.lock)); | ||
238 | lh = node_height(l); | 251 | lh = node_height(l); |
239 | rh = node_height(r); | 252 | rh = node_height(r); |
240 | if (lh > rh + 1) { /* l: RH+2 */ | 253 | if (lh > rh + 1) { /* l: RH+2 */ |
241 | struct inet_peer *ll, *lr, *lrl, *lrr; | 254 | struct inet_peer *ll, *lr, *lrl, *lrr; |
242 | int lrh; | 255 | int lrh; |
243 | ll = l->avl_left; | 256 | ll = rcu_dereference_protected(l->avl_left, |
244 | lr = l->avl_right; | 257 | lockdep_is_held(&peers.lock)); |
258 | lr = rcu_dereference_protected(l->avl_right, | ||
259 | lockdep_is_held(&peers.lock)); | ||
245 | lrh = node_height(lr); | 260 | lrh = node_height(lr); |
246 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ | 261 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ |
247 | node->avl_left = lr; /* lr: RH or RH+1 */ | 262 | RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ |
248 | node->avl_right = r; /* r: RH */ | 263 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
249 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ | 264 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ |
250 | l->avl_left = ll; /* ll: RH+1 */ | 265 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ |
251 | l->avl_right = node; /* node: RH+1 or RH+2 */ | 266 | RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ |
252 | l->avl_height = node->avl_height + 1; | 267 | l->avl_height = node->avl_height + 1; |
253 | *nodep = l; | 268 | RCU_INIT_POINTER(*nodep, l); |
254 | } else { /* ll: RH, lr: RH+1 */ | 269 | } else { /* ll: RH, lr: RH+1 */ |
255 | lrl = lr->avl_left; /* lrl: RH or RH-1 */ | 270 | lrl = rcu_dereference_protected(lr->avl_left, |
256 | lrr = lr->avl_right; /* lrr: RH or RH-1 */ | 271 | lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */ |
257 | node->avl_left = lrr; /* lrr: RH or RH-1 */ | 272 | lrr = rcu_dereference_protected(lr->avl_right, |
258 | node->avl_right = r; /* r: RH */ | 273 | lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */ |
274 | RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ | ||
275 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ | ||
259 | node->avl_height = rh + 1; /* node: RH+1 */ | 276 | node->avl_height = rh + 1; /* node: RH+1 */ |
260 | l->avl_left = ll; /* ll: RH */ | 277 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ |
261 | l->avl_right = lrl; /* lrl: RH or RH-1 */ | 278 | RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ |
262 | l->avl_height = rh + 1; /* l: RH+1 */ | 279 | l->avl_height = rh + 1; /* l: RH+1 */ |
263 | lr->avl_left = l; /* l: RH+1 */ | 280 | RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ |
264 | lr->avl_right = node; /* node: RH+1 */ | 281 | RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ |
265 | lr->avl_height = rh + 2; | 282 | lr->avl_height = rh + 2; |
266 | *nodep = lr; | 283 | RCU_INIT_POINTER(*nodep, lr); |
267 | } | 284 | } |
268 | } else if (rh > lh + 1) { /* r: LH+2 */ | 285 | } else if (rh > lh + 1) { /* r: LH+2 */ |
269 | struct inet_peer *rr, *rl, *rlr, *rll; | 286 | struct inet_peer *rr, *rl, *rlr, *rll; |
270 | int rlh; | 287 | int rlh; |
271 | rr = r->avl_right; | 288 | rr = rcu_dereference_protected(r->avl_right, |
272 | rl = r->avl_left; | 289 | lockdep_is_held(&peers.lock)); |
290 | rl = rcu_dereference_protected(r->avl_left, | ||
291 | lockdep_is_held(&peers.lock)); | ||
273 | rlh = node_height(rl); | 292 | rlh = node_height(rl); |
274 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ | 293 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ |
275 | node->avl_right = rl; /* rl: LH or LH+1 */ | 294 | RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ |
276 | node->avl_left = l; /* l: LH */ | 295 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
277 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ | 296 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ |
278 | r->avl_right = rr; /* rr: LH+1 */ | 297 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ |
279 | r->avl_left = node; /* node: LH+1 or LH+2 */ | 298 | RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ |
280 | r->avl_height = node->avl_height + 1; | 299 | r->avl_height = node->avl_height + 1; |
281 | *nodep = r; | 300 | RCU_INIT_POINTER(*nodep, r); |
282 | } else { /* rr: RH, rl: RH+1 */ | 301 | } else { /* rr: RH, rl: RH+1 */ |
283 | rlr = rl->avl_right; /* rlr: LH or LH-1 */ | 302 | rlr = rcu_dereference_protected(rl->avl_right, |
284 | rll = rl->avl_left; /* rll: LH or LH-1 */ | 303 | lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */ |
285 | node->avl_right = rll; /* rll: LH or LH-1 */ | 304 | rll = rcu_dereference_protected(rl->avl_left, |
286 | node->avl_left = l; /* l: LH */ | 305 | lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */ |
306 | RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ | ||
307 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ | ||
287 | node->avl_height = lh + 1; /* node: LH+1 */ | 308 | node->avl_height = lh + 1; /* node: LH+1 */ |
288 | r->avl_right = rr; /* rr: LH */ | 309 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ |
289 | r->avl_left = rlr; /* rlr: LH or LH-1 */ | 310 | RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ |
290 | r->avl_height = lh + 1; /* r: LH+1 */ | 311 | r->avl_height = lh + 1; /* r: LH+1 */ |
291 | rl->avl_right = r; /* r: LH+1 */ | 312 | RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ |
292 | rl->avl_left = node; /* node: LH+1 */ | 313 | RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ |
293 | rl->avl_height = lh + 2; | 314 | rl->avl_height = lh + 2; |
294 | *nodep = rl; | 315 | RCU_INIT_POINTER(*nodep, rl); |
295 | } | 316 | } |
296 | } else { | 317 | } else { |
297 | node->avl_height = (lh > rh ? lh : rh) + 1; | 318 | node->avl_height = (lh > rh ? lh : rh) + 1; |
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[], | |||
303 | #define link_to_pool(n) \ | 324 | #define link_to_pool(n) \ |
304 | do { \ | 325 | do { \ |
305 | n->avl_height = 1; \ | 326 | n->avl_height = 1; \ |
306 | n->avl_left = peer_avl_empty; \ | 327 | n->avl_left = peer_avl_empty_rcu; \ |
307 | n->avl_right = peer_avl_empty; \ | 328 | n->avl_right = peer_avl_empty_rcu; \ |
308 | smp_wmb(); /* lockless readers can catch us now */ \ | 329 | /* lockless readers can catch us now */ \ |
309 | **--stackptr = n; \ | 330 | rcu_assign_pointer(**--stackptr, n); \ |
310 | peer_avl_rebalance(stack, stackptr); \ | 331 | peer_avl_rebalance(stack, stackptr); \ |
311 | } while (0) | 332 | } while (0) |
312 | 333 | ||
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p) | |||
330 | * We use refcnt=-1 to alert lockless readers this entry is deleted. | 351 | * We use refcnt=-1 to alert lockless readers this entry is deleted. |
331 | */ | 352 | */ |
332 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { | 353 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
333 | struct inet_peer **stack[PEER_MAXDEPTH]; | 354 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; |
334 | struct inet_peer ***stackptr, ***delp; | 355 | struct inet_peer __rcu ***stackptr, ***delp; |
335 | if (lookup(p->v4daddr, stack) != p) | 356 | if (lookup(p->v4daddr, stack) != p) |
336 | BUG(); | 357 | BUG(); |
337 | delp = stackptr - 1; /* *delp[0] == p */ | 358 | delp = stackptr - 1; /* *delp[0] == p */ |
338 | if (p->avl_left == peer_avl_empty) { | 359 | if (p->avl_left == peer_avl_empty_rcu) { |
339 | *delp[0] = p->avl_right; | 360 | *delp[0] = p->avl_right; |
340 | --stackptr; | 361 | --stackptr; |
341 | } else { | 362 | } else { |
342 | /* look for a node to insert instead of p */ | 363 | /* look for a node to insert instead of p */ |
343 | struct inet_peer *t; | 364 | struct inet_peer *t; |
344 | t = lookup_rightempty(p); | 365 | t = lookup_rightempty(p); |
345 | BUG_ON(*stackptr[-1] != t); | 366 | BUG_ON(rcu_dereference_protected(*stackptr[-1], |
367 | lockdep_is_held(&peers.lock)) != t); | ||
346 | **--stackptr = t->avl_left; | 368 | **--stackptr = t->avl_left; |
347 | /* t is removed, t->v4daddr > x->v4daddr for any | 369 | /* t is removed, t->v4daddr > x->v4daddr for any |
348 | * x in p->avl_left subtree. | 370 | * x in p->avl_left subtree. |
349 | * Put t in the old place of p. */ | 371 | * Put t in the old place of p. */ |
350 | *delp[0] = t; | 372 | RCU_INIT_POINTER(*delp[0], t); |
351 | t->avl_left = p->avl_left; | 373 | t->avl_left = p->avl_left; |
352 | t->avl_right = p->avl_right; | 374 | t->avl_right = p->avl_right; |
353 | t->avl_height = p->avl_height; | 375 | t->avl_height = p->avl_height; |
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl) | |||
414 | struct inet_peer *inet_getpeer(__be32 daddr, int create) | 436 | struct inet_peer *inet_getpeer(__be32 daddr, int create) |
415 | { | 437 | { |
416 | struct inet_peer *p; | 438 | struct inet_peer *p; |
417 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 439 | struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; |
418 | 440 | ||
419 | /* Look up for the address quickly, lockless. | 441 | /* Look up for the address quickly, lockless. |
420 | * Because of a concurrent writer, we might not find an existing entry. | 442 | * Because of a concurrent writer, we might not find an existing entry. |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ffcbe369b7..70ff77f02eee 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1072 | break; | 1072 | break; |
1073 | } | 1073 | } |
1074 | ipgre_tunnel_unlink(ign, t); | 1074 | ipgre_tunnel_unlink(ign, t); |
1075 | synchronize_net(); | ||
1075 | t->parms.iph.saddr = p.iph.saddr; | 1076 | t->parms.iph.saddr = p.iph.saddr; |
1076 | t->parms.iph.daddr = p.iph.daddr; | 1077 | t->parms.iph.daddr = p.iph.daddr; |
1077 | t->parms.i_key = p.i_key; | 1078 | t->parms.i_key = p.i_key; |
@@ -1324,7 +1325,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) | |||
1324 | { | 1325 | { |
1325 | struct ip_tunnel *tunnel = netdev_priv(dev); | 1326 | struct ip_tunnel *tunnel = netdev_priv(dev); |
1326 | struct iphdr *iph = &tunnel->parms.iph; | 1327 | struct iphdr *iph = &tunnel->parms.iph; |
1327 | struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id); | ||
1328 | 1328 | ||
1329 | tunnel->dev = dev; | 1329 | tunnel->dev = dev; |
1330 | strcpy(tunnel->parms.name, dev->name); | 1330 | strcpy(tunnel->parms.name, dev->name); |
@@ -1335,7 +1335,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) | |||
1335 | tunnel->hlen = sizeof(struct iphdr) + 4; | 1335 | tunnel->hlen = sizeof(struct iphdr) + 4; |
1336 | 1336 | ||
1337 | dev_hold(dev); | 1337 | dev_hold(dev); |
1338 | rcu_assign_pointer(ign->tunnels_wc[0], tunnel); | ||
1339 | } | 1338 | } |
1340 | 1339 | ||
1341 | 1340 | ||
@@ -1382,10 +1381,12 @@ static int __net_init ipgre_init_net(struct net *net) | |||
1382 | if ((err = register_netdev(ign->fb_tunnel_dev))) | 1381 | if ((err = register_netdev(ign->fb_tunnel_dev))) |
1383 | goto err_reg_dev; | 1382 | goto err_reg_dev; |
1384 | 1383 | ||
1384 | rcu_assign_pointer(ign->tunnels_wc[0], | ||
1385 | netdev_priv(ign->fb_tunnel_dev)); | ||
1385 | return 0; | 1386 | return 0; |
1386 | 1387 | ||
1387 | err_reg_dev: | 1388 | err_reg_dev: |
1388 | free_netdev(ign->fb_tunnel_dev); | 1389 | ipgre_dev_free(ign->fb_tunnel_dev); |
1389 | err_alloc_dev: | 1390 | err_alloc_dev: |
1390 | return err; | 1391 | return err; |
1391 | } | 1392 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 64b70ad162e3..3948c86e59ca 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
238 | but receiver should be enough clever f.e. to forward mtrace requests, | 238 | but receiver should be enough clever f.e. to forward mtrace requests, |
239 | sent to multicast group to reach destination designated router. | 239 | sent to multicast group to reach destination designated router. |
240 | */ | 240 | */ |
241 | struct ip_ra_chain *ip_ra_chain; | 241 | struct ip_ra_chain __rcu *ip_ra_chain; |
242 | static DEFINE_SPINLOCK(ip_ra_lock); | 242 | static DEFINE_SPINLOCK(ip_ra_lock); |
243 | 243 | ||
244 | 244 | ||
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head) | |||
253 | int ip_ra_control(struct sock *sk, unsigned char on, | 253 | int ip_ra_control(struct sock *sk, unsigned char on, |
254 | void (*destructor)(struct sock *)) | 254 | void (*destructor)(struct sock *)) |
255 | { | 255 | { |
256 | struct ip_ra_chain *ra, *new_ra, **rap; | 256 | struct ip_ra_chain *ra, *new_ra; |
257 | struct ip_ra_chain __rcu **rap; | ||
257 | 258 | ||
258 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) | 259 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) |
259 | return -EINVAL; | 260 | return -EINVAL; |
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
261 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 262 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
262 | 263 | ||
263 | spin_lock_bh(&ip_ra_lock); | 264 | spin_lock_bh(&ip_ra_lock); |
264 | for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { | 265 | for (rap = &ip_ra_chain; |
266 | (ra = rcu_dereference_protected(*rap, | ||
267 | lockdep_is_held(&ip_ra_lock))) != NULL; | ||
268 | rap = &ra->next) { | ||
265 | if (ra->sk == sk) { | 269 | if (ra->sk == sk) { |
266 | if (on) { | 270 | if (on) { |
267 | spin_unlock_bh(&ip_ra_lock); | 271 | spin_unlock_bh(&ip_ra_lock); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index e9b816e6cd73..cd300aaee78f 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
676 | } | 676 | } |
677 | t = netdev_priv(dev); | 677 | t = netdev_priv(dev); |
678 | ipip_tunnel_unlink(ipn, t); | 678 | ipip_tunnel_unlink(ipn, t); |
679 | synchronize_net(); | ||
679 | t->parms.iph.saddr = p.iph.saddr; | 680 | t->parms.iph.saddr = p.iph.saddr; |
680 | t->parms.iph.daddr = p.iph.daddr; | 681 | t->parms.iph.daddr = p.iph.daddr; |
681 | memcpy(dev->dev_addr, &p.iph.saddr, 4); | 682 | memcpy(dev->dev_addr, &p.iph.saddr, 4); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3cad2591ace0..3fac340a28d5 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user, | |||
927 | private = &tmp; | 927 | private = &tmp; |
928 | } | 928 | } |
929 | #endif | 929 | #endif |
930 | memset(&info, 0, sizeof(info)); | ||
930 | info.valid_hooks = t->valid_hooks; | 931 | info.valid_hooks = t->valid_hooks; |
931 | memcpy(info.hook_entry, private->hook_entry, | 932 | memcpy(info.hook_entry, private->hook_entry, |
932 | sizeof(info.hook_entry)); | 933 | sizeof(info.hook_entry)); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d31b007a6d80..a846d633b3b6 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user, | |||
1124 | private = &tmp; | 1124 | private = &tmp; |
1125 | } | 1125 | } |
1126 | #endif | 1126 | #endif |
1127 | memset(&info, 0, sizeof(info)); | ||
1127 | info.valid_hooks = t->valid_hooks; | 1128 | info.valid_hooks = t->valid_hooks; |
1128 | memcpy(info.hook_entry, private->hook_entry, | 1129 | memcpy(info.hook_entry, private->hook_entry, |
1129 | sizeof(info.hook_entry)); | 1130 | sizeof(info.hook_entry)); |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 295c97431e43..c04787ce1a71 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum) | |||
47 | return rcu_dereference(nf_nat_protos[protonum]); | 47 | return rcu_dereference(nf_nat_protos[protonum]); |
48 | } | 48 | } |
49 | 49 | ||
50 | static const struct nf_nat_protocol * | ||
51 | nf_nat_proto_find_get(u_int8_t protonum) | ||
52 | { | ||
53 | const struct nf_nat_protocol *p; | ||
54 | |||
55 | rcu_read_lock(); | ||
56 | p = __nf_nat_proto_find(protonum); | ||
57 | if (!try_module_get(p->me)) | ||
58 | p = &nf_nat_unknown_protocol; | ||
59 | rcu_read_unlock(); | ||
60 | |||
61 | return p; | ||
62 | } | ||
63 | |||
64 | static void | ||
65 | nf_nat_proto_put(const struct nf_nat_protocol *p) | ||
66 | { | ||
67 | module_put(p->me); | ||
68 | } | ||
69 | |||
70 | /* We keep an extra hash for each conntrack, for fast searching. */ | 50 | /* We keep an extra hash for each conntrack, for fast searching. */ |
71 | static inline unsigned int | 51 | static inline unsigned int |
72 | hash_by_src(const struct net *net, u16 zone, | 52 | hash_by_src(const struct net *net, u16 zone, |
@@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = { | |||
588 | #include <linux/netfilter/nfnetlink.h> | 568 | #include <linux/netfilter/nfnetlink.h> |
589 | #include <linux/netfilter/nfnetlink_conntrack.h> | 569 | #include <linux/netfilter/nfnetlink_conntrack.h> |
590 | 570 | ||
571 | static const struct nf_nat_protocol * | ||
572 | nf_nat_proto_find_get(u_int8_t protonum) | ||
573 | { | ||
574 | const struct nf_nat_protocol *p; | ||
575 | |||
576 | rcu_read_lock(); | ||
577 | p = __nf_nat_proto_find(protonum); | ||
578 | if (!try_module_get(p->me)) | ||
579 | p = &nf_nat_unknown_protocol; | ||
580 | rcu_read_unlock(); | ||
581 | |||
582 | return p; | ||
583 | } | ||
584 | |||
585 | static void | ||
586 | nf_nat_proto_put(const struct nf_nat_protocol *p) | ||
587 | { | ||
588 | module_put(p->me); | ||
589 | } | ||
590 | |||
591 | static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { | 591 | static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { |
592 | [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, | 592 | [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, |
593 | [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, | 593 | [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4ae1f203f7cb..1b48eb1ed453 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
59 | local_bh_enable(); | 59 | local_bh_enable(); |
60 | 60 | ||
61 | socket_seq_show(seq); | 61 | socket_seq_show(seq); |
62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", | 62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", |
63 | sock_prot_inuse_get(net, &tcp_prot), orphans, | 63 | sock_prot_inuse_get(net, &tcp_prot), orphans, |
64 | tcp_death_row.tw_count, sockets, | 64 | tcp_death_row.tw_count, sockets, |
65 | atomic_read(&tcp_memory_allocated)); | 65 | atomic_long_read(&tcp_memory_allocated)); |
66 | seq_printf(seq, "UDP: inuse %d mem %d\n", | 66 | seq_printf(seq, "UDP: inuse %d mem %ld\n", |
67 | sock_prot_inuse_get(net, &udp_prot), | 67 | sock_prot_inuse_get(net, &udp_prot), |
68 | atomic_read(&udp_memory_allocated)); | 68 | atomic_long_read(&udp_memory_allocated)); |
69 | seq_printf(seq, "UDPLITE: inuse %d\n", | 69 | seq_printf(seq, "UDPLITE: inuse %d\n", |
70 | sock_prot_inuse_get(net, &udplite_prot)); | 70 | sock_prot_inuse_get(net, &udplite_prot)); |
71 | seq_printf(seq, "RAW: inuse %d\n", | 71 | seq_printf(seq, "RAW: inuse %d\n", |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 65699c24411c..9ae5c01cd0b2 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <net/protocol.h> | 29 | #include <net/protocol.h> |
30 | 30 | ||
31 | const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly; | 31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Add a protocol handler to the hash tables | 34 | * Add a protocol handler to the hash tables |
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) | |||
38 | { | 38 | { |
39 | int hash = protocol & (MAX_INET_PROTOS - 1); | 39 | int hash = protocol & (MAX_INET_PROTOS - 1); |
40 | 40 | ||
41 | return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1; | 41 | return !cmpxchg((const struct net_protocol **)&inet_protos[hash], |
42 | NULL, prot) ? 0 : -1; | ||
42 | } | 43 | } |
43 | EXPORT_SYMBOL(inet_add_protocol); | 44 | EXPORT_SYMBOL(inet_add_protocol); |
44 | 45 | ||
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) | |||
50 | { | 51 | { |
51 | int ret, hash = protocol & (MAX_INET_PROTOS - 1); | 52 | int ret, hash = protocol & (MAX_INET_PROTOS - 1); |
52 | 53 | ||
53 | ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1; | 54 | ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash], |
55 | prot, NULL) == prot) ? 0 : -1; | ||
54 | 56 | ||
55 | synchronize_net(); | 57 | synchronize_net(); |
56 | 58 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d6cb2bfcd8e1..987bf9adb318 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = { | |||
198 | */ | 198 | */ |
199 | 199 | ||
200 | struct rt_hash_bucket { | 200 | struct rt_hash_bucket { |
201 | struct rtable *chain; | 201 | struct rtable __rcu *chain; |
202 | }; | 202 | }; |
203 | 203 | ||
204 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ | 204 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ |
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) | |||
280 | struct rtable *r = NULL; | 280 | struct rtable *r = NULL; |
281 | 281 | ||
282 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { | 282 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { |
283 | if (!rt_hash_table[st->bucket].chain) | 283 | if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) |
284 | continue; | 284 | continue; |
285 | rcu_read_lock_bh(); | 285 | rcu_read_lock_bh(); |
286 | r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); | 286 | r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); |
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, | |||
300 | { | 300 | { |
301 | struct rt_cache_iter_state *st = seq->private; | 301 | struct rt_cache_iter_state *st = seq->private; |
302 | 302 | ||
303 | r = r->dst.rt_next; | 303 | r = rcu_dereference_bh(r->dst.rt_next); |
304 | while (!r) { | 304 | while (!r) { |
305 | rcu_read_unlock_bh(); | 305 | rcu_read_unlock_bh(); |
306 | do { | 306 | do { |
307 | if (--st->bucket < 0) | 307 | if (--st->bucket < 0) |
308 | return NULL; | 308 | return NULL; |
309 | } while (!rt_hash_table[st->bucket].chain); | 309 | } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); |
310 | rcu_read_lock_bh(); | 310 | rcu_read_lock_bh(); |
311 | r = rt_hash_table[st->bucket].chain; | 311 | r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); |
312 | } | 312 | } |
313 | return rcu_dereference_bh(r); | 313 | return r; |
314 | } | 314 | } |
315 | 315 | ||
316 | static struct rtable *rt_cache_get_next(struct seq_file *seq, | 316 | static struct rtable *rt_cache_get_next(struct seq_file *seq, |
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context) | |||
721 | for (i = 0; i <= rt_hash_mask; i++) { | 721 | for (i = 0; i <= rt_hash_mask; i++) { |
722 | if (process_context && need_resched()) | 722 | if (process_context && need_resched()) |
723 | cond_resched(); | 723 | cond_resched(); |
724 | rth = rt_hash_table[i].chain; | 724 | rth = rcu_dereference_raw(rt_hash_table[i].chain); |
725 | if (!rth) | 725 | if (!rth) |
726 | continue; | 726 | continue; |
727 | 727 | ||
728 | spin_lock_bh(rt_hash_lock_addr(i)); | 728 | spin_lock_bh(rt_hash_lock_addr(i)); |
729 | #ifdef CONFIG_NET_NS | 729 | #ifdef CONFIG_NET_NS |
730 | { | 730 | { |
731 | struct rtable ** prev, * p; | 731 | struct rtable __rcu **prev; |
732 | struct rtable *p; | ||
732 | 733 | ||
733 | rth = rt_hash_table[i].chain; | 734 | rth = rcu_dereference_protected(rt_hash_table[i].chain, |
735 | lockdep_is_held(rt_hash_lock_addr(i))); | ||
734 | 736 | ||
735 | /* defer releasing the head of the list after spin_unlock */ | 737 | /* defer releasing the head of the list after spin_unlock */ |
736 | for (tail = rth; tail; tail = tail->dst.rt_next) | 738 | for (tail = rth; tail; |
739 | tail = rcu_dereference_protected(tail->dst.rt_next, | ||
740 | lockdep_is_held(rt_hash_lock_addr(i)))) | ||
737 | if (!rt_is_expired(tail)) | 741 | if (!rt_is_expired(tail)) |
738 | break; | 742 | break; |
739 | if (rth != tail) | 743 | if (rth != tail) |
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context) | |||
741 | 745 | ||
742 | /* call rt_free on entries after the tail requiring flush */ | 746 | /* call rt_free on entries after the tail requiring flush */ |
743 | prev = &rt_hash_table[i].chain; | 747 | prev = &rt_hash_table[i].chain; |
744 | for (p = *prev; p; p = next) { | 748 | for (p = rcu_dereference_protected(*prev, |
745 | next = p->dst.rt_next; | 749 | lockdep_is_held(rt_hash_lock_addr(i))); |
750 | p != NULL; | ||
751 | p = next) { | ||
752 | next = rcu_dereference_protected(p->dst.rt_next, | ||
753 | lockdep_is_held(rt_hash_lock_addr(i))); | ||
746 | if (!rt_is_expired(p)) { | 754 | if (!rt_is_expired(p)) { |
747 | prev = &p->dst.rt_next; | 755 | prev = &p->dst.rt_next; |
748 | } else { | 756 | } else { |
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context) | |||
752 | } | 760 | } |
753 | } | 761 | } |
754 | #else | 762 | #else |
755 | rth = rt_hash_table[i].chain; | 763 | rth = rcu_dereference_protected(rt_hash_table[i].chain, |
756 | rt_hash_table[i].chain = NULL; | 764 | lockdep_is_held(rt_hash_lock_addr(i))); |
765 | rcu_assign_pointer(rt_hash_table[i].chain, NULL); | ||
757 | tail = NULL; | 766 | tail = NULL; |
758 | #endif | 767 | #endif |
759 | spin_unlock_bh(rt_hash_lock_addr(i)); | 768 | spin_unlock_bh(rt_hash_lock_addr(i)); |
760 | 769 | ||
761 | for (; rth != tail; rth = next) { | 770 | for (; rth != tail; rth = next) { |
762 | next = rth->dst.rt_next; | 771 | next = rcu_dereference_protected(rth->dst.rt_next, 1); |
763 | rt_free(rth); | 772 | rt_free(rth); |
764 | } | 773 | } |
765 | } | 774 | } |
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) | |||
790 | while (aux != rth) { | 799 | while (aux != rth) { |
791 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | 800 | if (compare_hash_inputs(&aux->fl, &rth->fl)) |
792 | return 0; | 801 | return 0; |
793 | aux = aux->dst.rt_next; | 802 | aux = rcu_dereference_protected(aux->dst.rt_next, 1); |
794 | } | 803 | } |
795 | return ONE; | 804 | return ONE; |
796 | } | 805 | } |
@@ -799,7 +808,8 @@ static void rt_check_expire(void) | |||
799 | { | 808 | { |
800 | static unsigned int rover; | 809 | static unsigned int rover; |
801 | unsigned int i = rover, goal; | 810 | unsigned int i = rover, goal; |
802 | struct rtable *rth, **rthp; | 811 | struct rtable *rth; |
812 | struct rtable __rcu **rthp; | ||
803 | unsigned long samples = 0; | 813 | unsigned long samples = 0; |
804 | unsigned long sum = 0, sum2 = 0; | 814 | unsigned long sum = 0, sum2 = 0; |
805 | unsigned long delta; | 815 | unsigned long delta; |
@@ -825,11 +835,12 @@ static void rt_check_expire(void) | |||
825 | 835 | ||
826 | samples++; | 836 | samples++; |
827 | 837 | ||
828 | if (*rthp == NULL) | 838 | if (rcu_dereference_raw(*rthp) == NULL) |
829 | continue; | 839 | continue; |
830 | length = 0; | 840 | length = 0; |
831 | spin_lock_bh(rt_hash_lock_addr(i)); | 841 | spin_lock_bh(rt_hash_lock_addr(i)); |
832 | while ((rth = *rthp) != NULL) { | 842 | while ((rth = rcu_dereference_protected(*rthp, |
843 | lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { | ||
833 | prefetch(rth->dst.rt_next); | 844 | prefetch(rth->dst.rt_next); |
834 | if (rt_is_expired(rth)) { | 845 | if (rt_is_expired(rth)) { |
835 | *rthp = rth->dst.rt_next; | 846 | *rthp = rth->dst.rt_next; |
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops) | |||
941 | static unsigned long last_gc; | 952 | static unsigned long last_gc; |
942 | static int rover; | 953 | static int rover; |
943 | static int equilibrium; | 954 | static int equilibrium; |
944 | struct rtable *rth, **rthp; | 955 | struct rtable *rth; |
956 | struct rtable __rcu **rthp; | ||
945 | unsigned long now = jiffies; | 957 | unsigned long now = jiffies; |
946 | int goal; | 958 | int goal; |
947 | int entries = dst_entries_get_fast(&ipv4_dst_ops); | 959 | int entries = dst_entries_get_fast(&ipv4_dst_ops); |
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops) | |||
995 | k = (k + 1) & rt_hash_mask; | 1007 | k = (k + 1) & rt_hash_mask; |
996 | rthp = &rt_hash_table[k].chain; | 1008 | rthp = &rt_hash_table[k].chain; |
997 | spin_lock_bh(rt_hash_lock_addr(k)); | 1009 | spin_lock_bh(rt_hash_lock_addr(k)); |
998 | while ((rth = *rthp) != NULL) { | 1010 | while ((rth = rcu_dereference_protected(*rthp, |
1011 | lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) { | ||
999 | if (!rt_is_expired(rth) && | 1012 | if (!rt_is_expired(rth) && |
1000 | !rt_may_expire(rth, tmo, expire)) { | 1013 | !rt_may_expire(rth, tmo, expire)) { |
1001 | tmo >>= 1; | 1014 | tmo >>= 1; |
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head) | |||
1071 | 1084 | ||
1072 | while (rth) { | 1085 | while (rth) { |
1073 | length += has_noalias(head, rth); | 1086 | length += has_noalias(head, rth); |
1074 | rth = rth->dst.rt_next; | 1087 | rth = rcu_dereference_protected(rth->dst.rt_next, 1); |
1075 | } | 1088 | } |
1076 | return length >> FRACT_BITS; | 1089 | return length >> FRACT_BITS; |
1077 | } | 1090 | } |
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head) | |||
1079 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1092 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1080 | struct rtable **rp, struct sk_buff *skb, int ifindex) | 1093 | struct rtable **rp, struct sk_buff *skb, int ifindex) |
1081 | { | 1094 | { |
1082 | struct rtable *rth, **rthp; | 1095 | struct rtable *rth, *cand; |
1096 | struct rtable __rcu **rthp, **candp; | ||
1083 | unsigned long now; | 1097 | unsigned long now; |
1084 | struct rtable *cand, **candp; | ||
1085 | u32 min_score; | 1098 | u32 min_score; |
1086 | int chain_length; | 1099 | int chain_length; |
1087 | int attempts = !in_softirq(); | 1100 | int attempts = !in_softirq(); |
@@ -1128,7 +1141,8 @@ restart: | |||
1128 | rthp = &rt_hash_table[hash].chain; | 1141 | rthp = &rt_hash_table[hash].chain; |
1129 | 1142 | ||
1130 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1143 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1131 | while ((rth = *rthp) != NULL) { | 1144 | while ((rth = rcu_dereference_protected(*rthp, |
1145 | lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) { | ||
1132 | if (rt_is_expired(rth)) { | 1146 | if (rt_is_expired(rth)) { |
1133 | *rthp = rth->dst.rt_next; | 1147 | *rthp = rth->dst.rt_next; |
1134 | rt_free(rth); | 1148 | rt_free(rth); |
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident); | |||
1324 | 1338 | ||
1325 | static void rt_del(unsigned hash, struct rtable *rt) | 1339 | static void rt_del(unsigned hash, struct rtable *rt) |
1326 | { | 1340 | { |
1327 | struct rtable **rthp, *aux; | 1341 | struct rtable __rcu **rthp; |
1342 | struct rtable *aux; | ||
1328 | 1343 | ||
1329 | rthp = &rt_hash_table[hash].chain; | 1344 | rthp = &rt_hash_table[hash].chain; |
1330 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1345 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1331 | ip_rt_put(rt); | 1346 | ip_rt_put(rt); |
1332 | while ((aux = *rthp) != NULL) { | 1347 | while ((aux = rcu_dereference_protected(*rthp, |
1348 | lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) { | ||
1333 | if (aux == rt || rt_is_expired(aux)) { | 1349 | if (aux == rt || rt_is_expired(aux)) { |
1334 | *rthp = aux->dst.rt_next; | 1350 | *rthp = aux->dst.rt_next; |
1335 | rt_free(aux); | 1351 | rt_free(aux); |
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1346 | { | 1362 | { |
1347 | int i, k; | 1363 | int i, k; |
1348 | struct in_device *in_dev = __in_dev_get_rcu(dev); | 1364 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
1349 | struct rtable *rth, **rthp; | 1365 | struct rtable *rth; |
1366 | struct rtable __rcu **rthp; | ||
1350 | __be32 skeys[2] = { saddr, 0 }; | 1367 | __be32 skeys[2] = { saddr, 0 }; |
1351 | int ikeys[2] = { dev->ifindex, 0 }; | 1368 | int ikeys[2] = { dev->ifindex, 0 }; |
1352 | struct netevent_redirect netevent; | 1369 | struct netevent_redirect netevent; |
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1379 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], | 1396 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
1380 | rt_genid(net)); | 1397 | rt_genid(net)); |
1381 | 1398 | ||
1382 | rthp=&rt_hash_table[hash].chain; | 1399 | rthp = &rt_hash_table[hash].chain; |
1383 | 1400 | ||
1384 | while ((rth = rcu_dereference(*rthp)) != NULL) { | 1401 | while ((rth = rcu_dereference(*rthp)) != NULL) { |
1385 | struct rtable *rt; | 1402 | struct rtable *rt; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d96c1da4b17c..e91911d7aae2 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -398,7 +398,7 @@ static struct ctl_table ipv4_table[] = { | |||
398 | .data = &sysctl_tcp_mem, | 398 | .data = &sysctl_tcp_mem, |
399 | .maxlen = sizeof(sysctl_tcp_mem), | 399 | .maxlen = sizeof(sysctl_tcp_mem), |
400 | .mode = 0644, | 400 | .mode = 0644, |
401 | .proc_handler = proc_dointvec | 401 | .proc_handler = proc_doulongvec_minmax |
402 | }, | 402 | }, |
403 | { | 403 | { |
404 | .procname = "tcp_wmem", | 404 | .procname = "tcp_wmem", |
@@ -602,8 +602,7 @@ static struct ctl_table ipv4_table[] = { | |||
602 | .data = &sysctl_udp_mem, | 602 | .data = &sysctl_udp_mem, |
603 | .maxlen = sizeof(sysctl_udp_mem), | 603 | .maxlen = sizeof(sysctl_udp_mem), |
604 | .mode = 0644, | 604 | .mode = 0644, |
605 | .proc_handler = proc_dointvec_minmax, | 605 | .proc_handler = proc_doulongvec_minmax, |
606 | .extra1 = &zero | ||
607 | }, | 606 | }, |
608 | { | 607 | { |
609 | .procname = "udp_rmem_min", | 608 | .procname = "udp_rmem_min", |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1664a0590bb8..081419969485 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | |||
282 | struct percpu_counter tcp_orphan_count; | 282 | struct percpu_counter tcp_orphan_count; |
283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | 283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
284 | 284 | ||
285 | int sysctl_tcp_mem[3] __read_mostly; | 285 | long sysctl_tcp_mem[3] __read_mostly; |
286 | int sysctl_tcp_wmem[3] __read_mostly; | 286 | int sysctl_tcp_wmem[3] __read_mostly; |
287 | int sysctl_tcp_rmem[3] __read_mostly; | 287 | int sysctl_tcp_rmem[3] __read_mostly; |
288 | 288 | ||
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem); | |||
290 | EXPORT_SYMBOL(sysctl_tcp_rmem); | 290 | EXPORT_SYMBOL(sysctl_tcp_rmem); |
291 | EXPORT_SYMBOL(sysctl_tcp_wmem); | 291 | EXPORT_SYMBOL(sysctl_tcp_wmem); |
292 | 292 | ||
293 | atomic_t tcp_memory_allocated; /* Current allocated memory. */ | 293 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ |
294 | EXPORT_SYMBOL(tcp_memory_allocated); | 294 | EXPORT_SYMBOL(tcp_memory_allocated); |
295 | 295 | ||
296 | /* | 296 | /* |
@@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2246 | /* Values greater than interface MTU won't take effect. However | 2246 | /* Values greater than interface MTU won't take effect. However |
2247 | * at the point when this call is done we typically don't yet | 2247 | * at the point when this call is done we typically don't yet |
2248 | * know which interface is going to be used */ | 2248 | * know which interface is going to be used */ |
2249 | if (val < 8 || val > MAX_TCP_WINDOW) { | 2249 | if (val < 64 || val > MAX_TCP_WINDOW) { |
2250 | err = -EINVAL; | 2250 | err = -EINVAL; |
2251 | break; | 2251 | break; |
2252 | } | 2252 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3357f69e353d..6d8ab1c4efc3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
259 | int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + | 259 | int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + |
260 | sizeof(struct sk_buff); | 260 | sizeof(struct sk_buff); |
261 | 261 | ||
262 | if (sk->sk_sndbuf < 3 * sndmem) | 262 | if (sk->sk_sndbuf < 3 * sndmem) { |
263 | sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); | 263 | sk->sk_sndbuf = 3 * sndmem; |
264 | if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) | ||
265 | sk->sk_sndbuf = sysctl_tcp_wmem[2]; | ||
266 | } | ||
264 | } | 267 | } |
265 | 268 | ||
266 | /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) | 269 | /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) |
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk) | |||
396 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && | 399 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && |
397 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && | 400 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && |
398 | !tcp_memory_pressure && | 401 | !tcp_memory_pressure && |
399 | atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { | 402 | atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { |
400 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), | 403 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), |
401 | sysctl_tcp_rmem[2]); | 404 | sysctl_tcp_rmem[2]); |
402 | } | 405 | } |
@@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk) | |||
4861 | return 0; | 4864 | return 0; |
4862 | 4865 | ||
4863 | /* If we are under soft global TCP memory pressure, do not expand. */ | 4866 | /* If we are under soft global TCP memory pressure, do not expand. */ |
4864 | if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) | 4867 | if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) |
4865 | return 0; | 4868 | return 0; |
4866 | 4869 | ||
4867 | /* If we filled the congestion window, do not expand. */ | 4870 | /* If we filled the congestion window, do not expand. */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8f8527d41682..69ccbc1dde9c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
415 | !icsk->icsk_backoff) | 415 | !icsk->icsk_backoff) |
416 | break; | 416 | break; |
417 | 417 | ||
418 | if (sock_owned_by_user(sk)) | ||
419 | break; | ||
420 | |||
418 | icsk->icsk_backoff--; | 421 | icsk->icsk_backoff--; |
419 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << | 422 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << |
420 | icsk->icsk_backoff; | 423 | icsk->icsk_backoff; |
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
429 | if (remaining) { | 432 | if (remaining) { |
430 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 433 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
431 | remaining, TCP_RTO_MAX); | 434 | remaining, TCP_RTO_MAX); |
432 | } else if (sock_owned_by_user(sk)) { | ||
433 | /* RTO revert clocked out retransmission, | ||
434 | * but socket is locked. Will defer. */ | ||
435 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
436 | HZ/20, TCP_RTO_MAX); | ||
437 | } else { | 435 | } else { |
438 | /* RTO revert clocked out retransmission. | 436 | /* RTO revert clocked out retransmission. |
439 | * Will retransmit now */ | 437 | * Will retransmit now */ |
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index 9a17bd2a0a37..ac3b3ee4b07c 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c | |||
@@ -14,27 +14,32 @@ | |||
14 | #include <net/protocol.h> | 14 | #include <net/protocol.h> |
15 | #include <net/xfrm.h> | 15 | #include <net/xfrm.h> |
16 | 16 | ||
17 | static struct xfrm_tunnel *tunnel4_handlers __read_mostly; | 17 | static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly; |
18 | static struct xfrm_tunnel *tunnel64_handlers __read_mostly; | 18 | static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly; |
19 | static DEFINE_MUTEX(tunnel4_mutex); | 19 | static DEFINE_MUTEX(tunnel4_mutex); |
20 | 20 | ||
21 | static inline struct xfrm_tunnel **fam_handlers(unsigned short family) | 21 | static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family) |
22 | { | 22 | { |
23 | return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; | 23 | return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; |
24 | } | 24 | } |
25 | 25 | ||
26 | int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) | 26 | int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) |
27 | { | 27 | { |
28 | struct xfrm_tunnel **pprev; | 28 | struct xfrm_tunnel __rcu **pprev; |
29 | struct xfrm_tunnel *t; | ||
30 | |||
29 | int ret = -EEXIST; | 31 | int ret = -EEXIST; |
30 | int priority = handler->priority; | 32 | int priority = handler->priority; |
31 | 33 | ||
32 | mutex_lock(&tunnel4_mutex); | 34 | mutex_lock(&tunnel4_mutex); |
33 | 35 | ||
34 | for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { | 36 | for (pprev = fam_handlers(family); |
35 | if ((*pprev)->priority > priority) | 37 | (t = rcu_dereference_protected(*pprev, |
38 | lockdep_is_held(&tunnel4_mutex))) != NULL; | ||
39 | pprev = &t->next) { | ||
40 | if (t->priority > priority) | ||
36 | break; | 41 | break; |
37 | if ((*pprev)->priority == priority) | 42 | if (t->priority == priority) |
38 | goto err; | 43 | goto err; |
39 | } | 44 | } |
40 | 45 | ||
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register); | |||
52 | 57 | ||
53 | int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) | 58 | int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) |
54 | { | 59 | { |
55 | struct xfrm_tunnel **pprev; | 60 | struct xfrm_tunnel __rcu **pprev; |
61 | struct xfrm_tunnel *t; | ||
56 | int ret = -ENOENT; | 62 | int ret = -ENOENT; |
57 | 63 | ||
58 | mutex_lock(&tunnel4_mutex); | 64 | mutex_lock(&tunnel4_mutex); |
59 | 65 | ||
60 | for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { | 66 | for (pprev = fam_handlers(family); |
61 | if (*pprev == handler) { | 67 | (t = rcu_dereference_protected(*pprev, |
68 | lockdep_is_held(&tunnel4_mutex))) != NULL; | ||
69 | pprev = &t->next) { | ||
70 | if (t == handler) { | ||
62 | *pprev = handler->next; | 71 | *pprev = handler->next; |
63 | ret = 0; | 72 | ret = 0; |
64 | break; | 73 | break; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b3f7e8cf18ac..5e0a3a582a59 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -110,7 +110,7 @@ | |||
110 | struct udp_table udp_table __read_mostly; | 110 | struct udp_table udp_table __read_mostly; |
111 | EXPORT_SYMBOL(udp_table); | 111 | EXPORT_SYMBOL(udp_table); |
112 | 112 | ||
113 | int sysctl_udp_mem[3] __read_mostly; | 113 | long sysctl_udp_mem[3] __read_mostly; |
114 | EXPORT_SYMBOL(sysctl_udp_mem); | 114 | EXPORT_SYMBOL(sysctl_udp_mem); |
115 | 115 | ||
116 | int sysctl_udp_rmem_min __read_mostly; | 116 | int sysctl_udp_rmem_min __read_mostly; |
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min); | |||
119 | int sysctl_udp_wmem_min __read_mostly; | 119 | int sysctl_udp_wmem_min __read_mostly; |
120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); | 120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); |
121 | 121 | ||
122 | atomic_t udp_memory_allocated; | 122 | atomic_long_t udp_memory_allocated; |
123 | EXPORT_SYMBOL(udp_memory_allocated); | 123 | EXPORT_SYMBOL(udp_memory_allocated); |
124 | 124 | ||
125 | #define MAX_UDP_PORTS 65536 | 125 | #define MAX_UDP_PORTS 65536 |
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1413 | } | 1413 | } |
1414 | } | 1414 | } |
1415 | 1415 | ||
1416 | if (sk->sk_filter) { | 1416 | if (rcu_dereference_raw(sk->sk_filter)) { |
1417 | if (udp_lib_checksum_complete(skb)) | 1417 | if (udp_lib_checksum_complete(skb)) |
1418 | goto drop; | 1418 | goto drop; |
1419 | } | 1419 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ec7a91d9e865..b41ce0f0d514 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i | |||
836 | { | 836 | { |
837 | struct inet6_dev *idev = ifp->idev; | 837 | struct inet6_dev *idev = ifp->idev; |
838 | struct in6_addr addr, *tmpaddr; | 838 | struct in6_addr addr, *tmpaddr; |
839 | unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp; | 839 | unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; |
840 | unsigned long regen_advance; | 840 | unsigned long regen_advance; |
841 | int tmp_plen; | 841 | int tmp_plen; |
842 | int ret = 0; | 842 | int ret = 0; |
@@ -886,12 +886,13 @@ retry: | |||
886 | goto out; | 886 | goto out; |
887 | } | 887 | } |
888 | memcpy(&addr.s6_addr[8], idev->rndid, 8); | 888 | memcpy(&addr.s6_addr[8], idev->rndid, 8); |
889 | age = (jiffies - ifp->tstamp) / HZ; | ||
889 | tmp_valid_lft = min_t(__u32, | 890 | tmp_valid_lft = min_t(__u32, |
890 | ifp->valid_lft, | 891 | ifp->valid_lft, |
891 | idev->cnf.temp_valid_lft); | 892 | idev->cnf.temp_valid_lft + age); |
892 | tmp_prefered_lft = min_t(__u32, | 893 | tmp_prefered_lft = min_t(__u32, |
893 | ifp->prefered_lft, | 894 | ifp->prefered_lft, |
894 | idev->cnf.temp_prefered_lft - | 895 | idev->cnf.temp_prefered_lft + age - |
895 | idev->cnf.max_desync_factor); | 896 | idev->cnf.max_desync_factor); |
896 | tmp_plen = ifp->prefix_len; | 897 | tmp_plen = ifp->prefix_len; |
897 | max_addresses = idev->cnf.max_addresses; | 898 | max_addresses = idev->cnf.max_addresses; |
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) | |||
1426 | { | 1427 | { |
1427 | struct inet6_dev *idev = ifp->idev; | 1428 | struct inet6_dev *idev = ifp->idev; |
1428 | 1429 | ||
1429 | if (addrconf_dad_end(ifp)) | 1430 | if (addrconf_dad_end(ifp)) { |
1431 | in6_ifa_put(ifp); | ||
1430 | return; | 1432 | return; |
1433 | } | ||
1431 | 1434 | ||
1432 | if (net_ratelimit()) | 1435 | if (net_ratelimit()) |
1433 | printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", | 1436 | printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", |
@@ -2021,10 +2024,11 @@ ok: | |||
2021 | ipv6_ifa_notify(0, ift); | 2024 | ipv6_ifa_notify(0, ift); |
2022 | } | 2025 | } |
2023 | 2026 | ||
2024 | if (create && in6_dev->cnf.use_tempaddr > 0) { | 2027 | if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { |
2025 | /* | 2028 | /* |
2026 | * When a new public address is created as described in [ADDRCONF], | 2029 | * When a new public address is created as described in [ADDRCONF], |
2027 | * also create a new temporary address. | 2030 | * also create a new temporary address. Also create a temporary |
2031 | * address if it's enabled but no temporary address currently exists. | ||
2028 | */ | 2032 | */ |
2029 | read_unlock_bh(&in6_dev->lock); | 2033 | read_unlock_bh(&in6_dev->lock); |
2030 | ipv6_create_tempaddr(ifp, NULL); | 2034 | ipv6_create_tempaddr(ifp, NULL); |
@@ -2736,10 +2740,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2736 | /* Flag it for later restoration when link comes up */ | 2740 | /* Flag it for later restoration when link comes up */ |
2737 | ifa->flags |= IFA_F_TENTATIVE; | 2741 | ifa->flags |= IFA_F_TENTATIVE; |
2738 | ifa->state = INET6_IFADDR_STATE_DAD; | 2742 | ifa->state = INET6_IFADDR_STATE_DAD; |
2739 | |||
2740 | write_unlock_bh(&idev->lock); | ||
2741 | |||
2742 | in6_ifa_hold(ifa); | ||
2743 | } else { | 2743 | } else { |
2744 | list_del(&ifa->if_list); | 2744 | list_del(&ifa->if_list); |
2745 | 2745 | ||
@@ -2754,19 +2754,15 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2754 | ifa->state = INET6_IFADDR_STATE_DEAD; | 2754 | ifa->state = INET6_IFADDR_STATE_DEAD; |
2755 | spin_unlock_bh(&ifa->state_lock); | 2755 | spin_unlock_bh(&ifa->state_lock); |
2756 | 2756 | ||
2757 | if (state == INET6_IFADDR_STATE_DEAD) | 2757 | if (state == INET6_IFADDR_STATE_DEAD) { |
2758 | goto put_ifa; | 2758 | in6_ifa_put(ifa); |
2759 | } else { | ||
2760 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | ||
2761 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2762 | NETDEV_DOWN, ifa); | ||
2763 | } | ||
2764 | write_lock_bh(&idev->lock); | ||
2759 | } | 2765 | } |
2760 | |||
2761 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | ||
2762 | if (ifa->state == INET6_IFADDR_STATE_DEAD) | ||
2763 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2764 | NETDEV_DOWN, ifa); | ||
2765 | |||
2766 | put_ifa: | ||
2767 | in6_ifa_put(ifa); | ||
2768 | |||
2769 | write_lock_bh(&idev->lock); | ||
2770 | } | 2766 | } |
2771 | 2767 | ||
2772 | list_splice(&keep_list, &idev->addr_list); | 2768 | list_splice(&keep_list, &idev->addr_list); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index c2c0f89397b1..2a59610c2a58 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1284 | t = netdev_priv(dev); | 1284 | t = netdev_priv(dev); |
1285 | 1285 | ||
1286 | ip6_tnl_unlink(ip6n, t); | 1286 | ip6_tnl_unlink(ip6n, t); |
1287 | synchronize_net(); | ||
1287 | err = ip6_tnl_change(t, &p); | 1288 | err = ip6_tnl_change(t, &p); |
1288 | ip6_tnl_link(ip6n, t); | 1289 | ip6_tnl_link(ip6n, t); |
1289 | netdev_state_change(dev); | 1290 | netdev_state_change(dev); |
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev) | |||
1371 | dev->flags |= IFF_NOARP; | 1372 | dev->flags |= IFF_NOARP; |
1372 | dev->addr_len = sizeof(struct in6_addr); | 1373 | dev->addr_len = sizeof(struct in6_addr); |
1373 | dev->features |= NETIF_F_NETNS_LOCAL; | 1374 | dev->features |= NETIF_F_NETNS_LOCAL; |
1375 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
1374 | } | 1376 | } |
1375 | 1377 | ||
1376 | 1378 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0553867a317f..d1770e061c08 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
343 | break; | 343 | break; |
344 | 344 | ||
345 | case IPV6_TRANSPARENT: | 345 | case IPV6_TRANSPARENT: |
346 | if (!capable(CAP_NET_ADMIN)) { | ||
347 | retv = -EPERM; | ||
348 | break; | ||
349 | } | ||
346 | if (optlen < sizeof(int)) | 350 | if (optlen < sizeof(int)) |
347 | goto e_inval; | 351 | goto e_inval; |
348 | /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ | 352 | /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 44d2eeac089b..448464844a25 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -5,10 +5,15 @@ | |||
5 | menu "IPv6: Netfilter Configuration" | 5 | menu "IPv6: Netfilter Configuration" |
6 | depends on INET && IPV6 && NETFILTER | 6 | depends on INET && IPV6 && NETFILTER |
7 | 7 | ||
8 | config NF_DEFRAG_IPV6 | ||
9 | tristate | ||
10 | default n | ||
11 | |||
8 | config NF_CONNTRACK_IPV6 | 12 | config NF_CONNTRACK_IPV6 |
9 | tristate "IPv6 connection tracking support" | 13 | tristate "IPv6 connection tracking support" |
10 | depends on INET && IPV6 && NF_CONNTRACK | 14 | depends on INET && IPV6 && NF_CONNTRACK |
11 | default m if NETFILTER_ADVANCED=n | 15 | default m if NETFILTER_ADVANCED=n |
16 | select NF_DEFRAG_IPV6 | ||
12 | ---help--- | 17 | ---help--- |
13 | Connection tracking keeps a record of what packets have passed | 18 | Connection tracking keeps a record of what packets have passed |
14 | through your machine, in order to figure out how they are related | 19 | through your machine, in order to figure out how they are related |
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index 3f8e4a3d83ce..0a432c9b0795 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o | |||
12 | 12 | ||
13 | # objects for l3 independent conntrack | 13 | # objects for l3 independent conntrack |
14 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o | 14 | nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o |
15 | nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o | ||
16 | 15 | ||
17 | # l3 independent conntrack | 16 | # l3 independent conntrack |
18 | obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o | 17 | obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o |
19 | 18 | ||
19 | # defrag | ||
20 | nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o | ||
21 | obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o | ||
22 | |||
20 | # matches | 23 | # matches |
21 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o | 24 | obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o |
22 | obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o | 25 | obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 51df035897e7..455582384ece 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user, | |||
1137 | private = &tmp; | 1137 | private = &tmp; |
1138 | } | 1138 | } |
1139 | #endif | 1139 | #endif |
1140 | memset(&info, 0, sizeof(info)); | ||
1140 | info.valid_hooks = t->valid_hooks; | 1141 | info.valid_hooks = t->valid_hooks; |
1141 | memcpy(info.hook_entry, private->hook_entry, | 1142 | memcpy(info.hook_entry, private->hook_entry, |
1142 | sizeof(info.hook_entry)); | 1143 | sizeof(info.hook_entry)); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 489d71b844ac..79d43aa8fa8d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -286,7 +286,7 @@ found: | |||
286 | 286 | ||
287 | /* Check for overlap with preceding fragment. */ | 287 | /* Check for overlap with preceding fragment. */ |
288 | if (prev && | 288 | if (prev && |
289 | (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) | 289 | (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) |
290 | goto discard_fq; | 290 | goto discard_fq; |
291 | 291 | ||
292 | /* Look for overlap with succeeding segment. */ | 292 | /* Look for overlap with succeeding segment. */ |
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void) | |||
625 | inet_frags_init_net(&nf_init_frags); | 625 | inet_frags_init_net(&nf_init_frags); |
626 | inet_frags_init(&nf_frags); | 626 | inet_frags_init(&nf_frags); |
627 | 627 | ||
628 | #ifdef CONFIG_SYSCTL | ||
628 | nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, | 629 | nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, |
629 | nf_ct_frag6_sysctl_table); | 630 | nf_ct_frag6_sysctl_table); |
630 | if (!nf_ct_frag6_sysctl_header) { | 631 | if (!nf_ct_frag6_sysctl_header) { |
631 | inet_frags_fini(&nf_frags); | 632 | inet_frags_fini(&nf_frags); |
632 | return -ENOMEM; | 633 | return -ENOMEM; |
633 | } | 634 | } |
635 | #endif | ||
634 | 636 | ||
635 | return 0; | 637 | return 0; |
636 | } | 638 | } |
637 | 639 | ||
638 | void nf_ct_frag6_cleanup(void) | 640 | void nf_ct_frag6_cleanup(void) |
639 | { | 641 | { |
642 | #ifdef CONFIG_SYSCTL | ||
640 | unregister_sysctl_table(nf_ct_frag6_sysctl_header); | 643 | unregister_sysctl_table(nf_ct_frag6_sysctl_header); |
641 | nf_ct_frag6_sysctl_header = NULL; | 644 | nf_ct_frag6_sysctl_header = NULL; |
642 | 645 | #endif | |
643 | inet_frags_fini(&nf_frags); | 646 | inet_frags_fini(&nf_frags); |
644 | 647 | ||
645 | nf_init_frags.low_thresh = 0; | 648 | nf_init_frags.low_thresh = 0; |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index d082eaeefa25..24b3558b8e67 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -126,6 +126,8 @@ static const struct snmp_mib snmp6_udp6_list[] = { | |||
126 | SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), | 126 | SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), |
127 | SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), | 127 | SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), |
128 | SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), | 128 | SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), |
129 | SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), | ||
130 | SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), | ||
129 | SNMP_MIB_SENTINEL | 131 | SNMP_MIB_SENTINEL |
130 | }; | 132 | }; |
131 | 133 | ||
@@ -134,6 +136,8 @@ static const struct snmp_mib snmp6_udplite6_list[] = { | |||
134 | SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), | 136 | SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), |
135 | SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), | 137 | SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), |
136 | SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), | 138 | SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), |
139 | SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS), | ||
140 | SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS), | ||
137 | SNMP_MIB_SENTINEL | 141 | SNMP_MIB_SENTINEL |
138 | }; | 142 | }; |
139 | 143 | ||
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 9bb936ae2452..9a7978fdc02a 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -25,13 +25,14 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <net/protocol.h> | 26 | #include <net/protocol.h> |
27 | 27 | ||
28 | const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly; | 28 | const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; |
29 | 29 | ||
30 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 30 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
31 | { | 31 | { |
32 | int hash = protocol & (MAX_INET_PROTOS - 1); | 32 | int hash = protocol & (MAX_INET_PROTOS - 1); |
33 | 33 | ||
34 | return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1; | 34 | return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], |
35 | NULL, prot) ? 0 : -1; | ||
35 | } | 36 | } |
36 | EXPORT_SYMBOL(inet6_add_protocol); | 37 | EXPORT_SYMBOL(inet6_add_protocol); |
37 | 38 | ||
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol | |||
43 | { | 44 | { |
44 | int ret, hash = protocol & (MAX_INET_PROTOS - 1); | 45 | int ret, hash = protocol & (MAX_INET_PROTOS - 1); |
45 | 46 | ||
46 | ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1; | 47 | ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], |
48 | prot, NULL) == prot) ? 0 : -1; | ||
47 | 49 | ||
48 | synchronize_net(); | 50 | synchronize_net(); |
49 | 51 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 45e6efb7f171..86c39526ba5e 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr, | |||
373 | 373 | ||
374 | static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) | 374 | static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) |
375 | { | 375 | { |
376 | if ((raw6_sk(sk)->checksum || sk->sk_filter) && | 376 | if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && |
377 | skb_checksum_complete(skb)) { | 377 | skb_checksum_complete(skb)) { |
378 | atomic_inc(&sk->sk_drops); | 378 | atomic_inc(&sk->sk_drops); |
379 | kfree_skb(skb); | 379 | kfree_skb(skb); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c7ba3149633f..0f2766453759 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -349,7 +349,7 @@ found: | |||
349 | 349 | ||
350 | /* Check for overlap with preceding fragment. */ | 350 | /* Check for overlap with preceding fragment. */ |
351 | if (prev && | 351 | if (prev && |
352 | (FRAG6_CB(prev)->offset + prev->len) - offset > 0) | 352 | (FRAG6_CB(prev)->offset + prev->len) > offset) |
353 | goto discard_fq; | 353 | goto discard_fq; |
354 | 354 | ||
355 | /* Look for overlap with succeeding segment. */ | 355 | /* Look for overlap with succeeding segment. */ |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 25661f968f3f..96455ffb76fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1945 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); | 1945 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1946 | struct neighbour *neigh; | 1946 | struct neighbour *neigh; |
1947 | 1947 | ||
1948 | if (rt == NULL) | 1948 | if (rt == NULL) { |
1949 | if (net_ratelimit()) | ||
1950 | pr_warning("IPv6: Maximum number of routes reached," | ||
1951 | " consider increasing route/max_size.\n"); | ||
1949 | return ERR_PTR(-ENOMEM); | 1952 | return ERR_PTR(-ENOMEM); |
1953 | } | ||
1950 | 1954 | ||
1951 | dev_hold(net->loopback_dev); | 1955 | dev_hold(net->loopback_dev); |
1952 | in6_dev_hold(idev); | 1956 | in6_dev_hold(idev); |
@@ -2741,6 +2745,7 @@ static void __net_exit ip6_route_net_exit(struct net *net) | |||
2741 | kfree(net->ipv6.ip6_prohibit_entry); | 2745 | kfree(net->ipv6.ip6_prohibit_entry); |
2742 | kfree(net->ipv6.ip6_blk_hole_entry); | 2746 | kfree(net->ipv6.ip6_blk_hole_entry); |
2743 | #endif | 2747 | #endif |
2748 | dst_entries_destroy(&net->ipv6.ip6_dst_ops); | ||
2744 | } | 2749 | } |
2745 | 2750 | ||
2746 | static struct pernet_operations ip6_route_net_ops = { | 2751 | static struct pernet_operations ip6_route_net_ops = { |
@@ -2832,5 +2837,6 @@ void ip6_route_cleanup(void) | |||
2832 | xfrm6_fini(); | 2837 | xfrm6_fini(); |
2833 | fib6_gc_cleanup(); | 2838 | fib6_gc_cleanup(); |
2834 | unregister_pernet_subsys(&ip6_route_net_ops); | 2839 | unregister_pernet_subsys(&ip6_route_net_ops); |
2840 | dst_entries_destroy(&ip6_dst_blackhole_ops); | ||
2835 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); | 2841 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); |
2836 | } | 2842 | } |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 367a6cc584cc..d6bfaec3bbbf 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
963 | } | 963 | } |
964 | t = netdev_priv(dev); | 964 | t = netdev_priv(dev); |
965 | ipip6_tunnel_unlink(sitn, t); | 965 | ipip6_tunnel_unlink(sitn, t); |
966 | synchronize_net(); | ||
966 | t->parms.iph.saddr = p.iph.saddr; | 967 | t->parms.iph.saddr = p.iph.saddr; |
967 | t->parms.iph.daddr = p.iph.daddr; | 968 | t->parms.iph.daddr = p.iph.daddr; |
968 | memcpy(dev->dev_addr, &p.iph.saddr, 4); | 969 | memcpy(dev->dev_addr, &p.iph.saddr, 4); |
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index d9864725d0c6..4f3cec12aa85 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c | |||
@@ -30,23 +30,26 @@ | |||
30 | #include <net/protocol.h> | 30 | #include <net/protocol.h> |
31 | #include <net/xfrm.h> | 31 | #include <net/xfrm.h> |
32 | 32 | ||
33 | static struct xfrm6_tunnel *tunnel6_handlers __read_mostly; | 33 | static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly; |
34 | static struct xfrm6_tunnel *tunnel46_handlers __read_mostly; | 34 | static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly; |
35 | static DEFINE_MUTEX(tunnel6_mutex); | 35 | static DEFINE_MUTEX(tunnel6_mutex); |
36 | 36 | ||
37 | int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) | 37 | int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) |
38 | { | 38 | { |
39 | struct xfrm6_tunnel **pprev; | 39 | struct xfrm6_tunnel __rcu **pprev; |
40 | struct xfrm6_tunnel *t; | ||
40 | int ret = -EEXIST; | 41 | int ret = -EEXIST; |
41 | int priority = handler->priority; | 42 | int priority = handler->priority; |
42 | 43 | ||
43 | mutex_lock(&tunnel6_mutex); | 44 | mutex_lock(&tunnel6_mutex); |
44 | 45 | ||
45 | for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; | 46 | for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; |
46 | *pprev; pprev = &(*pprev)->next) { | 47 | (t = rcu_dereference_protected(*pprev, |
47 | if ((*pprev)->priority > priority) | 48 | lockdep_is_held(&tunnel6_mutex))) != NULL; |
49 | pprev = &t->next) { | ||
50 | if (t->priority > priority) | ||
48 | break; | 51 | break; |
49 | if ((*pprev)->priority == priority) | 52 | if (t->priority == priority) |
50 | goto err; | 53 | goto err; |
51 | } | 54 | } |
52 | 55 | ||
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register); | |||
65 | 68 | ||
66 | int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) | 69 | int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) |
67 | { | 70 | { |
68 | struct xfrm6_tunnel **pprev; | 71 | struct xfrm6_tunnel __rcu **pprev; |
72 | struct xfrm6_tunnel *t; | ||
69 | int ret = -ENOENT; | 73 | int ret = -ENOENT; |
70 | 74 | ||
71 | mutex_lock(&tunnel6_mutex); | 75 | mutex_lock(&tunnel6_mutex); |
72 | 76 | ||
73 | for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; | 77 | for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; |
74 | *pprev; pprev = &(*pprev)->next) { | 78 | (t = rcu_dereference_protected(*pprev, |
75 | if (*pprev == handler) { | 79 | lockdep_is_held(&tunnel6_mutex))) != NULL; |
80 | pprev = &t->next) { | ||
81 | if (t == handler) { | ||
76 | *pprev = handler->next; | 82 | *pprev = handler->next; |
77 | ret = 0; | 83 | ret = 0; |
78 | break; | 84 | break; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c84dad432114..91def93bec85 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
527 | } | 527 | } |
528 | } | 528 | } |
529 | 529 | ||
530 | if (sk->sk_filter) { | 530 | if (rcu_dereference_raw(sk->sk_filter)) { |
531 | if (udp_lib_checksum_complete(skb)) | 531 | if (udp_lib_checksum_complete(skb)) |
532 | goto drop; | 532 | goto drop; |
533 | } | 533 | } |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1712af1c7b3f..c64ce0a0bb03 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -111,6 +111,10 @@ struct l2tp_net { | |||
111 | spinlock_t l2tp_session_hlist_lock; | 111 | spinlock_t l2tp_session_hlist_lock; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
115 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
116 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
117 | |||
114 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | 118 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
115 | { | 119 | { |
116 | BUG_ON(!net); | 120 | BUG_ON(!net); |
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net) | |||
118 | return net_generic(net, l2tp_net_id); | 122 | return net_generic(net, l2tp_net_id); |
119 | } | 123 | } |
120 | 124 | ||
125 | |||
126 | /* Tunnel reference counts. Incremented per session that is added to | ||
127 | * the tunnel. | ||
128 | */ | ||
129 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
130 | { | ||
131 | atomic_inc(&tunnel->ref_count); | ||
132 | } | ||
133 | |||
134 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
135 | { | ||
136 | if (atomic_dec_and_test(&tunnel->ref_count)) | ||
137 | l2tp_tunnel_free(tunnel); | ||
138 | } | ||
139 | #ifdef L2TP_REFCNT_DEBUG | ||
140 | #define l2tp_tunnel_inc_refcount(_t) do { \ | ||
141 | printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
142 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
143 | } while (0) | ||
144 | #define l2tp_tunnel_dec_refcount(_t) do { \ | ||
145 | printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
146 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
147 | } while (0) | ||
148 | #else | ||
149 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
150 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
151 | #endif | ||
152 | |||
121 | /* Session hash global list for L2TPv3. | 153 | /* Session hash global list for L2TPv3. |
122 | * The session_id SHOULD be random according to RFC3931, but several | 154 | * The session_id SHOULD be random according to RFC3931, but several |
123 | * L2TP implementations use incrementing session_ids. So we do a real | 155 | * L2TP implementations use incrementing session_ids. So we do a real |
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common); | |||
699 | * Returns 1 if the packet was not a good data packet and could not be | 731 | * Returns 1 if the packet was not a good data packet and could not be |
700 | * forwarded. All such packets are passed up to userspace to deal with. | 732 | * forwarded. All such packets are passed up to userspace to deal with. |
701 | */ | 733 | */ |
702 | int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | 734 | static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, |
703 | int (*payload_hook)(struct sk_buff *skb)) | 735 | int (*payload_hook)(struct sk_buff *skb)) |
704 | { | 736 | { |
705 | struct l2tp_session *session = NULL; | 737 | struct l2tp_session *session = NULL; |
706 | unsigned char *ptr, *optr; | 738 | unsigned char *ptr, *optr; |
@@ -812,7 +844,6 @@ error: | |||
812 | 844 | ||
813 | return 1; | 845 | return 1; |
814 | } | 846 | } |
815 | EXPORT_SYMBOL_GPL(l2tp_udp_recv_core); | ||
816 | 847 | ||
817 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. | 848 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. |
818 | * Return codes: | 849 | * Return codes: |
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) | |||
922 | return bufp - optr; | 953 | return bufp - optr; |
923 | } | 954 | } |
924 | 955 | ||
925 | int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len) | 956 | static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, |
957 | size_t data_len) | ||
926 | { | 958 | { |
927 | struct l2tp_tunnel *tunnel = session->tunnel; | 959 | struct l2tp_tunnel *tunnel = session->tunnel; |
928 | unsigned int len = skb->len; | 960 | unsigned int len = skb->len; |
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat | |||
970 | 1002 | ||
971 | return 0; | 1003 | return 0; |
972 | } | 1004 | } |
973 | EXPORT_SYMBOL_GPL(l2tp_xmit_core); | ||
974 | 1005 | ||
975 | /* Automatically called when the skb is freed. | 1006 | /* Automatically called when the skb is freed. |
976 | */ | 1007 | */ |
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | |||
1089 | * The tunnel context is deleted only when all session sockets have been | 1120 | * The tunnel context is deleted only when all session sockets have been |
1090 | * closed. | 1121 | * closed. |
1091 | */ | 1122 | */ |
1092 | void l2tp_tunnel_destruct(struct sock *sk) | 1123 | static void l2tp_tunnel_destruct(struct sock *sk) |
1093 | { | 1124 | { |
1094 | struct l2tp_tunnel *tunnel; | 1125 | struct l2tp_tunnel *tunnel; |
1095 | 1126 | ||
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk) | |||
1128 | end: | 1159 | end: |
1129 | return; | 1160 | return; |
1130 | } | 1161 | } |
1131 | EXPORT_SYMBOL(l2tp_tunnel_destruct); | ||
1132 | 1162 | ||
1133 | /* When the tunnel is closed, all the attached sessions need to go too. | 1163 | /* When the tunnel is closed, all the attached sessions need to go too. |
1134 | */ | 1164 | */ |
1135 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | 1165 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) |
1136 | { | 1166 | { |
1137 | int hash; | 1167 | int hash; |
1138 | struct hlist_node *walk; | 1168 | struct hlist_node *walk; |
@@ -1193,12 +1223,11 @@ again: | |||
1193 | } | 1223 | } |
1194 | write_unlock_bh(&tunnel->hlist_lock); | 1224 | write_unlock_bh(&tunnel->hlist_lock); |
1195 | } | 1225 | } |
1196 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1197 | 1226 | ||
1198 | /* Really kill the tunnel. | 1227 | /* Really kill the tunnel. |
1199 | * Come here only when all sessions have been cleared from the tunnel. | 1228 | * Come here only when all sessions have been cleared from the tunnel. |
1200 | */ | 1229 | */ |
1201 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | 1230 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) |
1202 | { | 1231 | { |
1203 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1232 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); |
1204 | 1233 | ||
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | |||
1217 | atomic_dec(&l2tp_tunnel_count); | 1246 | atomic_dec(&l2tp_tunnel_count); |
1218 | kfree(tunnel); | 1247 | kfree(tunnel); |
1219 | } | 1248 | } |
1220 | EXPORT_SYMBOL_GPL(l2tp_tunnel_free); | ||
1221 | 1249 | ||
1222 | /* Create a socket for the tunnel, if one isn't set up by | 1250 | /* Create a socket for the tunnel, if one isn't set up by |
1223 | * userspace. This is used for static tunnels where there is no | 1251 | * userspace. This is used for static tunnels where there is no |
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); | |||
1512 | /* We come here whenever a session's send_seq, cookie_len or | 1540 | /* We come here whenever a session's send_seq, cookie_len or |
1513 | * l2specific_len parameters are set. | 1541 | * l2specific_len parameters are set. |
1514 | */ | 1542 | */ |
1515 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) | 1543 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version) |
1516 | { | 1544 | { |
1517 | if (version == L2TP_HDR_VER_2) { | 1545 | if (version == L2TP_HDR_VER_2) { |
1518 | session->hdr_len = 6; | 1546 | session->hdr_len = 6; |
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) | |||
1525 | } | 1553 | } |
1526 | 1554 | ||
1527 | } | 1555 | } |
1528 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
1529 | 1556 | ||
1530 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | 1557 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) |
1531 | { | 1558 | { |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index f0f318edd3f1..a16a48e79fab 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i | |||
231 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 231 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
232 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | 232 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); |
233 | extern int l2tp_session_delete(struct l2tp_session *session); | 233 | extern int l2tp_session_delete(struct l2tp_session *session); |
234 | extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
235 | extern void l2tp_session_free(struct l2tp_session *session); | 234 | extern void l2tp_session_free(struct l2tp_session *session); |
236 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | 235 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); |
237 | extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb)); | ||
238 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 236 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
239 | 237 | ||
240 | extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len); | ||
241 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | 238 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); |
242 | extern void l2tp_tunnel_destruct(struct sock *sk); | ||
243 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
244 | extern void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
245 | 239 | ||
246 | extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); | 240 | extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); |
247 | extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | 241 | extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); |
248 | 242 | ||
249 | /* Tunnel reference counts. Incremented per session that is added to | ||
250 | * the tunnel. | ||
251 | */ | ||
252 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
253 | { | ||
254 | atomic_inc(&tunnel->ref_count); | ||
255 | } | ||
256 | |||
257 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
258 | { | ||
259 | if (atomic_dec_and_test(&tunnel->ref_count)) | ||
260 | l2tp_tunnel_free(tunnel); | ||
261 | } | ||
262 | #ifdef L2TP_REFCNT_DEBUG | ||
263 | #define l2tp_tunnel_inc_refcount(_t) do { \ | ||
264 | printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
265 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
266 | } while (0) | ||
267 | #define l2tp_tunnel_dec_refcount(_t) do { \ | ||
268 | printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
269 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
270 | } while (0) | ||
271 | #else | ||
272 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
273 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
274 | #endif | ||
275 | |||
276 | /* Session reference counts. Incremented when code obtains a reference | 243 | /* Session reference counts. Incremented when code obtains a reference |
277 | * to a session. | 244 | * to a session. |
278 | */ | 245 | */ |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 104ec3b283d4..b8dbae82fab8 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) | |||
249 | struct seq_file *seq; | 249 | struct seq_file *seq; |
250 | int rc = -ENOMEM; | 250 | int rc = -ENOMEM; |
251 | 251 | ||
252 | pd = kzalloc(GFP_KERNEL, sizeof(*pd)); | 252 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
253 | if (pd == NULL) | 253 | if (pd == NULL) |
254 | goto out; | 254 | goto out; |
255 | 255 | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 1c770c0644d1..0bf6a59545ab 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -576,7 +576,7 @@ out: | |||
576 | return copied; | 576 | return copied; |
577 | } | 577 | } |
578 | 578 | ||
579 | struct proto l2tp_ip_prot = { | 579 | static struct proto l2tp_ip_prot = { |
580 | .name = "L2TP/IP", | 580 | .name = "L2TP/IP", |
581 | .owner = THIS_MODULE, | 581 | .owner = THIS_MODULE, |
582 | .init = l2tp_ip_open, | 582 | .init = l2tp_ip_open, |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 4aa47d074a79..1243d1db5c59 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -203,9 +203,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf, | |||
203 | size_t count, loff_t *ppos) | 203 | size_t count, loff_t *ppos) |
204 | { | 204 | { |
205 | struct ieee80211_key *key = file->private_data; | 205 | struct ieee80211_key *key = file->private_data; |
206 | int i, res, bufsize = 2 * key->conf.keylen + 2; | 206 | int i, bufsize = 2 * key->conf.keylen + 2; |
207 | char *buf = kmalloc(bufsize, GFP_KERNEL); | 207 | char *buf = kmalloc(bufsize, GFP_KERNEL); |
208 | char *p = buf; | 208 | char *p = buf; |
209 | ssize_t res; | ||
210 | |||
211 | if (!buf) | ||
212 | return -ENOMEM; | ||
209 | 213 | ||
210 | for (i = 0; i < key->conf.keylen; i++) | 214 | for (i = 0; i < key->conf.keylen; i++) |
211 | p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); | 215 | p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ff60c022f51d..239c4836a946 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -456,6 +456,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | |||
456 | if (!sta) | 456 | if (!sta) |
457 | return NULL; | 457 | return NULL; |
458 | 458 | ||
459 | sta->last_rx = jiffies; | ||
459 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); | 460 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); |
460 | 461 | ||
461 | /* make sure mandatory rates are always added */ | 462 | /* make sure mandatory rates are always added */ |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f9163b12c7f1..7aa85591dbe7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
391 | u32 hw_reconf_flags = 0; | 391 | u32 hw_reconf_flags = 0; |
392 | int i; | 392 | int i; |
393 | 393 | ||
394 | if (local->scan_sdata == sdata) | ||
395 | ieee80211_scan_cancel(local); | ||
396 | |||
394 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); | 397 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
395 | 398 | ||
396 | /* | 399 | /* |
@@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
523 | synchronize_rcu(); | 526 | synchronize_rcu(); |
524 | skb_queue_purge(&sdata->skb_queue); | 527 | skb_queue_purge(&sdata->skb_queue); |
525 | 528 | ||
526 | if (local->scan_sdata == sdata) | ||
527 | ieee80211_scan_cancel(local); | ||
528 | |||
529 | /* | 529 | /* |
530 | * Disable beaconing here for mesh only, AP and IBSS | 530 | * Disable beaconing here for mesh only, AP and IBSS |
531 | * are already taken care of. | 531 | * are already taken care of. |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 22bc42b18991..107a0cbe52ac 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -677,10 +677,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
677 | /* | 677 | /* |
678 | * Calculate scan IE length -- we need this to alloc | 678 | * Calculate scan IE length -- we need this to alloc |
679 | * memory and to subtract from the driver limit. It | 679 | * memory and to subtract from the driver limit. It |
680 | * includes the (extended) supported rates and HT | 680 | * includes the DS Params, (extended) supported rates, and HT |
681 | * information -- SSID is the driver's responsibility. | 681 | * information -- SSID is the driver's responsibility. |
682 | */ | 682 | */ |
683 | local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */ | 683 | local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + |
684 | 3 /* DS Params */; | ||
684 | if (supp_ht) | 685 | if (supp_ht) |
685 | local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); | 686 | local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); |
686 | 687 | ||
@@ -748,7 +749,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
748 | hw->queues = IEEE80211_MAX_QUEUES; | 749 | hw->queues = IEEE80211_MAX_QUEUES; |
749 | 750 | ||
750 | local->workqueue = | 751 | local->workqueue = |
751 | create_singlethread_workqueue(wiphy_name(local->hw.wiphy)); | 752 | alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0); |
752 | if (!local->workqueue) { | 753 | if (!local->workqueue) { |
753 | result = -ENOMEM; | 754 | result = -ENOMEM; |
754 | goto fail_workqueue; | 755 | goto fail_workqueue; |
@@ -962,12 +963,6 @@ static void __exit ieee80211_exit(void) | |||
962 | rc80211_minstrel_ht_exit(); | 963 | rc80211_minstrel_ht_exit(); |
963 | rc80211_minstrel_exit(); | 964 | rc80211_minstrel_exit(); |
964 | 965 | ||
965 | /* | ||
966 | * For key todo, it'll be empty by now but the work | ||
967 | * might still be scheduled. | ||
968 | */ | ||
969 | flush_scheduled_work(); | ||
970 | |||
971 | if (mesh_allocated) | 966 | if (mesh_allocated) |
972 | ieee80211s_stop(); | 967 | ieee80211s_stop(); |
973 | 968 | ||
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 809cf230d251..33f76993da08 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -329,6 +329,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, | |||
329 | * if needed. | 329 | * if needed. |
330 | */ | 330 | */ |
331 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | 331 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { |
332 | /* Skip invalid rates */ | ||
333 | if (info->control.rates[i].idx < 0) | ||
334 | break; | ||
332 | /* Rate masking supports only legacy rates for now */ | 335 | /* Rate masking supports only legacy rates for now */ |
333 | if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) | 336 | if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) |
334 | continue; | 337 | continue; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 43288259f4a1..1534f2b44caf 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY | |||
525 | depends on NETFILTER_XTABLES | 525 | depends on NETFILTER_XTABLES |
526 | depends on NETFILTER_ADVANCED | 526 | depends on NETFILTER_ADVANCED |
527 | select NF_DEFRAG_IPV4 | 527 | select NF_DEFRAG_IPV4 |
528 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | ||
528 | help | 529 | help |
529 | This option adds a `TPROXY' target, which is somewhat similar to | 530 | This option adds a `TPROXY' target, which is somewhat similar to |
530 | REDIRECT. It can only be used in the mangle table and is useful | 531 | REDIRECT. It can only be used in the mangle table and is useful |
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET | |||
927 | depends on NETFILTER_ADVANCED | 928 | depends on NETFILTER_ADVANCED |
928 | depends on !NF_CONNTRACK || NF_CONNTRACK | 929 | depends on !NF_CONNTRACK || NF_CONNTRACK |
929 | select NF_DEFRAG_IPV4 | 930 | select NF_DEFRAG_IPV4 |
931 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | ||
930 | help | 932 | help |
931 | This option adds a `socket' match, which can be used to match | 933 | This option adds a `socket' match, which can be used to match |
932 | packets for which a TCP or UDP socket lookup finds a valid socket. | 934 | packets for which a TCP or UDP socket lookup finds a valid socket. |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 1eacf8d9966a..27a5ea6b6a0f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) | |||
1312 | if (!hash) { | 1312 | if (!hash) { |
1313 | *vmalloced = 1; | 1313 | *vmalloced = 1; |
1314 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | 1314 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); |
1315 | hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 1315 | hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
1316 | PAGE_KERNEL); | ||
1316 | } | 1317 | } |
1317 | 1318 | ||
1318 | if (hash && nulls) | 1319 | if (hash && nulls) |
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index ed6d92958023..dc7bb74110df 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) | |||
292 | 292 | ||
293 | for (i = 0; i < MAX_NF_CT_PROTO; i++) | 293 | for (i = 0; i < MAX_NF_CT_PROTO; i++) |
294 | proto_array[i] = &nf_conntrack_l4proto_generic; | 294 | proto_array[i] = &nf_conntrack_l4proto_generic; |
295 | |||
296 | /* Before making proto_array visible to lockless readers, | ||
297 | * we must make sure its content is committed to memory. | ||
298 | */ | ||
299 | smp_wmb(); | ||
300 | |||
295 | nf_ct_protos[l4proto->l3proto] = proto_array; | 301 | nf_ct_protos[l4proto->l3proto] = proto_array; |
296 | } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != | 302 | } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != |
297 | &nf_conntrack_l4proto_generic) { | 303 | &nf_conntrack_l4proto_generic) { |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 19c482caf30b..640678f47a2a 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -21,7 +21,9 @@ | |||
21 | #include <linux/netfilter_ipv4/ip_tables.h> | 21 | #include <linux/netfilter_ipv4/ip_tables.h> |
22 | 22 | ||
23 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 23 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
24 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 24 | |
25 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | ||
26 | #define XT_TPROXY_HAVE_IPV6 1 | ||
25 | #include <net/if_inet6.h> | 27 | #include <net/if_inet6.h> |
26 | #include <net/addrconf.h> | 28 | #include <net/addrconf.h> |
27 | #include <linux/netfilter_ipv6/ip6_tables.h> | 29 | #include <linux/netfilter_ipv6/ip6_tables.h> |
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
172 | return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); | 174 | return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); |
173 | } | 175 | } |
174 | 176 | ||
175 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 177 | #ifdef XT_TPROXY_HAVE_IPV6 |
176 | 178 | ||
177 | static inline const struct in6_addr * | 179 | static inline const struct in6_addr * |
178 | tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, | 180 | tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, |
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = { | |||
372 | .hooks = 1 << NF_INET_PRE_ROUTING, | 374 | .hooks = 1 << NF_INET_PRE_ROUTING, |
373 | .me = THIS_MODULE, | 375 | .me = THIS_MODULE, |
374 | }, | 376 | }, |
375 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 377 | #ifdef XT_TPROXY_HAVE_IPV6 |
376 | { | 378 | { |
377 | .name = "TPROXY", | 379 | .name = "TPROXY", |
378 | .family = NFPROTO_IPV6, | 380 | .family = NFPROTO_IPV6, |
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = { | |||
391 | static int __init tproxy_tg_init(void) | 393 | static int __init tproxy_tg_init(void) |
392 | { | 394 | { |
393 | nf_defrag_ipv4_enable(); | 395 | nf_defrag_ipv4_enable(); |
394 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 396 | #ifdef XT_TPROXY_HAVE_IPV6 |
395 | nf_defrag_ipv6_enable(); | 397 | nf_defrag_ipv6_enable(); |
396 | #endif | 398 | #endif |
397 | 399 | ||
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 2dbd4c857735..00d6ae838303 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/netfilter/x_tables.h> | 15 | #include <linux/netfilter/x_tables.h> |
16 | #include <linux/netfilter_ipv4/ip_tables.h> | 16 | #include <linux/netfilter_ipv4/ip_tables.h> |
17 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
18 | #include <net/tcp.h> | 17 | #include <net/tcp.h> |
19 | #include <net/udp.h> | 18 | #include <net/udp.h> |
20 | #include <net/icmp.h> | 19 | #include <net/icmp.h> |
@@ -22,7 +21,12 @@ | |||
22 | #include <net/inet_sock.h> | 21 | #include <net/inet_sock.h> |
23 | #include <net/netfilter/nf_tproxy_core.h> | 22 | #include <net/netfilter/nf_tproxy_core.h> |
24 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 23 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
24 | |||
25 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | ||
26 | #define XT_SOCKET_HAVE_IPV6 1 | ||
27 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
25 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | 28 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> |
29 | #endif | ||
26 | 30 | ||
27 | #include <linux/netfilter/xt_socket.h> | 31 | #include <linux/netfilter/xt_socket.h> |
28 | 32 | ||
@@ -186,12 +190,12 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
186 | return socket_match(skb, par, par->matchinfo); | 190 | return socket_match(skb, par, par->matchinfo); |
187 | } | 191 | } |
188 | 192 | ||
189 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 193 | #ifdef XT_SOCKET_HAVE_IPV6 |
190 | 194 | ||
191 | static int | 195 | static int |
192 | extract_icmp6_fields(const struct sk_buff *skb, | 196 | extract_icmp6_fields(const struct sk_buff *skb, |
193 | unsigned int outside_hdrlen, | 197 | unsigned int outside_hdrlen, |
194 | u8 *protocol, | 198 | int *protocol, |
195 | struct in6_addr **raddr, | 199 | struct in6_addr **raddr, |
196 | struct in6_addr **laddr, | 200 | struct in6_addr **laddr, |
197 | __be16 *rport, | 201 | __be16 *rport, |
@@ -248,8 +252,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
248 | struct sock *sk; | 252 | struct sock *sk; |
249 | struct in6_addr *daddr, *saddr; | 253 | struct in6_addr *daddr, *saddr; |
250 | __be16 dport, sport; | 254 | __be16 dport, sport; |
251 | int thoff; | 255 | int thoff, tproto; |
252 | u8 tproto; | ||
253 | const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; | 256 | const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; |
254 | 257 | ||
255 | tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); | 258 | tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); |
@@ -301,7 +304,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
301 | sk = NULL; | 304 | sk = NULL; |
302 | } | 305 | } |
303 | 306 | ||
304 | pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu " | 307 | pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " |
305 | "(orig %pI6:%hu) sock %p\n", | 308 | "(orig %pI6:%hu) sock %p\n", |
306 | tproto, saddr, ntohs(sport), | 309 | tproto, saddr, ntohs(sport), |
307 | daddr, ntohs(dport), | 310 | daddr, ntohs(dport), |
@@ -331,7 +334,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = { | |||
331 | (1 << NF_INET_LOCAL_IN), | 334 | (1 << NF_INET_LOCAL_IN), |
332 | .me = THIS_MODULE, | 335 | .me = THIS_MODULE, |
333 | }, | 336 | }, |
334 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 337 | #ifdef XT_SOCKET_HAVE_IPV6 |
335 | { | 338 | { |
336 | .name = "socket", | 339 | .name = "socket", |
337 | .revision = 1, | 340 | .revision = 1, |
@@ -348,7 +351,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = { | |||
348 | static int __init socket_mt_init(void) | 351 | static int __init socket_mt_init(void) |
349 | { | 352 | { |
350 | nf_defrag_ipv4_enable(); | 353 | nf_defrag_ipv4_enable(); |
351 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 354 | #ifdef XT_SOCKET_HAVE_IPV6 |
352 | nf_defrag_ipv6_enable(); | 355 | nf_defrag_ipv6_enable(); |
353 | #endif | 356 | #endif |
354 | 357 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index cd96ed3ccee4..478181d53c55 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -83,9 +83,9 @@ struct netlink_sock { | |||
83 | struct module *module; | 83 | struct module *module; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct listeners_rcu_head { | 86 | struct listeners { |
87 | struct rcu_head rcu_head; | 87 | struct rcu_head rcu; |
88 | void *ptr; | 88 | unsigned long masks[0]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | #define NETLINK_KERNEL_SOCKET 0x1 | 91 | #define NETLINK_KERNEL_SOCKET 0x1 |
@@ -119,7 +119,7 @@ struct nl_pid_hash { | |||
119 | struct netlink_table { | 119 | struct netlink_table { |
120 | struct nl_pid_hash hash; | 120 | struct nl_pid_hash hash; |
121 | struct hlist_head mc_list; | 121 | struct hlist_head mc_list; |
122 | unsigned long *listeners; | 122 | struct listeners __rcu *listeners; |
123 | unsigned int nl_nonroot; | 123 | unsigned int nl_nonroot; |
124 | unsigned int groups; | 124 | unsigned int groups; |
125 | struct mutex *cb_mutex; | 125 | struct mutex *cb_mutex; |
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk) | |||
338 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) | 338 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) |
339 | mask |= nlk_sk(sk)->groups[i]; | 339 | mask |= nlk_sk(sk)->groups[i]; |
340 | } | 340 | } |
341 | tbl->listeners[i] = mask; | 341 | tbl->listeners->masks[i] = mask; |
342 | } | 342 | } |
343 | /* this function is only called with the netlink table "grabbed", which | 343 | /* this function is only called with the netlink table "grabbed", which |
344 | * makes sure updates are visible before bind or setsockopt return. */ | 344 | * makes sure updates are visible before bind or setsockopt return. */ |
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast); | |||
936 | int netlink_has_listeners(struct sock *sk, unsigned int group) | 936 | int netlink_has_listeners(struct sock *sk, unsigned int group) |
937 | { | 937 | { |
938 | int res = 0; | 938 | int res = 0; |
939 | unsigned long *listeners; | 939 | struct listeners *listeners; |
940 | 940 | ||
941 | BUG_ON(!netlink_is_kernel(sk)); | 941 | BUG_ON(!netlink_is_kernel(sk)); |
942 | 942 | ||
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group) | |||
944 | listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); | 944 | listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); |
945 | 945 | ||
946 | if (group - 1 < nl_table[sk->sk_protocol].groups) | 946 | if (group - 1 < nl_table[sk->sk_protocol].groups) |
947 | res = test_bit(group - 1, listeners); | 947 | res = test_bit(group - 1, listeners->masks); |
948 | 948 | ||
949 | rcu_read_unlock(); | 949 | rcu_read_unlock(); |
950 | 950 | ||
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1498 | struct socket *sock; | 1498 | struct socket *sock; |
1499 | struct sock *sk; | 1499 | struct sock *sk; |
1500 | struct netlink_sock *nlk; | 1500 | struct netlink_sock *nlk; |
1501 | unsigned long *listeners = NULL; | 1501 | struct listeners *listeners = NULL; |
1502 | 1502 | ||
1503 | BUG_ON(!nl_table); | 1503 | BUG_ON(!nl_table); |
1504 | 1504 | ||
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1523 | if (groups < 32) | 1523 | if (groups < 32) |
1524 | groups = 32; | 1524 | groups = 32; |
1525 | 1525 | ||
1526 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | 1526 | listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); |
1527 | GFP_KERNEL); | ||
1528 | if (!listeners) | 1527 | if (!listeners) |
1529 | goto out_sock_release; | 1528 | goto out_sock_release; |
1530 | 1529 | ||
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1541 | netlink_table_grab(); | 1540 | netlink_table_grab(); |
1542 | if (!nl_table[unit].registered) { | 1541 | if (!nl_table[unit].registered) { |
1543 | nl_table[unit].groups = groups; | 1542 | nl_table[unit].groups = groups; |
1544 | nl_table[unit].listeners = listeners; | 1543 | rcu_assign_pointer(nl_table[unit].listeners, listeners); |
1545 | nl_table[unit].cb_mutex = cb_mutex; | 1544 | nl_table[unit].cb_mutex = cb_mutex; |
1546 | nl_table[unit].module = module; | 1545 | nl_table[unit].module = module; |
1547 | nl_table[unit].registered = 1; | 1546 | nl_table[unit].registered = 1; |
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk) | |||
1572 | EXPORT_SYMBOL(netlink_kernel_release); | 1571 | EXPORT_SYMBOL(netlink_kernel_release); |
1573 | 1572 | ||
1574 | 1573 | ||
1575 | static void netlink_free_old_listeners(struct rcu_head *rcu_head) | 1574 | static void listeners_free_rcu(struct rcu_head *head) |
1576 | { | 1575 | { |
1577 | struct listeners_rcu_head *lrh; | 1576 | kfree(container_of(head, struct listeners, rcu)); |
1578 | |||
1579 | lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head); | ||
1580 | kfree(lrh->ptr); | ||
1581 | } | 1577 | } |
1582 | 1578 | ||
1583 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | 1579 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) |
1584 | { | 1580 | { |
1585 | unsigned long *listeners, *old = NULL; | 1581 | struct listeners *new, *old; |
1586 | struct listeners_rcu_head *old_rcu_head; | ||
1587 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | 1582 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; |
1588 | 1583 | ||
1589 | if (groups < 32) | 1584 | if (groups < 32) |
1590 | groups = 32; | 1585 | groups = 32; |
1591 | 1586 | ||
1592 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { | 1587 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { |
1593 | listeners = kzalloc(NLGRPSZ(groups) + | 1588 | new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); |
1594 | sizeof(struct listeners_rcu_head), | 1589 | if (!new) |
1595 | GFP_ATOMIC); | ||
1596 | if (!listeners) | ||
1597 | return -ENOMEM; | 1590 | return -ENOMEM; |
1598 | old = tbl->listeners; | 1591 | old = rcu_dereference_raw(tbl->listeners); |
1599 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); | 1592 | memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); |
1600 | rcu_assign_pointer(tbl->listeners, listeners); | 1593 | rcu_assign_pointer(tbl->listeners, new); |
1601 | /* | 1594 | |
1602 | * Free the old memory after an RCU grace period so we | 1595 | call_rcu(&old->rcu, listeners_free_rcu); |
1603 | * don't leak it. We use call_rcu() here in order to be | ||
1604 | * able to call this function from atomic contexts. The | ||
1605 | * allocation of this memory will have reserved enough | ||
1606 | * space for struct listeners_rcu_head at the end. | ||
1607 | */ | ||
1608 | old_rcu_head = (void *)(tbl->listeners + | ||
1609 | NLGRPLONGS(tbl->groups)); | ||
1610 | old_rcu_head->ptr = old; | ||
1611 | call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners); | ||
1612 | } | 1596 | } |
1613 | tbl->groups = groups; | 1597 | tbl->groups = groups; |
1614 | 1598 | ||
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
2104 | 2088 | ||
2105 | static void __init netlink_add_usersock_entry(void) | 2089 | static void __init netlink_add_usersock_entry(void) |
2106 | { | 2090 | { |
2107 | unsigned long *listeners; | 2091 | struct listeners *listeners; |
2108 | int groups = 32; | 2092 | int groups = 32; |
2109 | 2093 | ||
2110 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | 2094 | listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); |
2111 | GFP_KERNEL); | ||
2112 | if (!listeners) | 2095 | if (!listeners) |
2113 | panic("netlink_add_usersock_entry: Cannot allocate listneres\n"); | 2096 | panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); |
2114 | 2097 | ||
2115 | netlink_table_grab(); | 2098 | netlink_table_grab(); |
2116 | 2099 | ||
2117 | nl_table[NETLINK_USERSOCK].groups = groups; | 2100 | nl_table[NETLINK_USERSOCK].groups = groups; |
2118 | nl_table[NETLINK_USERSOCK].listeners = listeners; | 2101 | rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); |
2119 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | 2102 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; |
2120 | nl_table[NETLINK_USERSOCK].registered = 1; | 2103 | nl_table[NETLINK_USERSOCK].registered = 1; |
2121 | 2104 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3616f27b9d46..8298e676f5a0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1610 | 1610 | ||
1611 | err = -EINVAL; | 1611 | err = -EINVAL; |
1612 | vnet_hdr_len = sizeof(vnet_hdr); | 1612 | vnet_hdr_len = sizeof(vnet_hdr); |
1613 | if ((len -= vnet_hdr_len) < 0) | 1613 | if (len < vnet_hdr_len) |
1614 | goto out_free; | 1614 | goto out_free; |
1615 | 1615 | ||
1616 | len -= vnet_hdr_len; | ||
1617 | |||
1616 | if (skb_is_gso(skb)) { | 1618 | if (skb_is_gso(skb)) { |
1617 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 1619 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
1618 | 1620 | ||
@@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | |||
1719 | rcu_read_lock(); | 1721 | rcu_read_lock(); |
1720 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); | 1722 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); |
1721 | if (dev) | 1723 | if (dev) |
1722 | strlcpy(uaddr->sa_data, dev->name, 15); | 1724 | strncpy(uaddr->sa_data, dev->name, 14); |
1723 | else | 1725 | else |
1724 | memset(uaddr->sa_data, 0, 14); | 1726 | memset(uaddr->sa_data, 0, 14); |
1725 | rcu_read_unlock(); | 1727 | rcu_read_unlock(); |
@@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1742 | sll->sll_family = AF_PACKET; | 1744 | sll->sll_family = AF_PACKET; |
1743 | sll->sll_ifindex = po->ifindex; | 1745 | sll->sll_ifindex = po->ifindex; |
1744 | sll->sll_protocol = po->num; | 1746 | sll->sll_protocol = po->num; |
1747 | sll->sll_pkttype = 0; | ||
1745 | rcu_read_lock(); | 1748 | rcu_read_lock(); |
1746 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | 1749 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); |
1747 | if (dev) { | 1750 | if (dev) { |
diff --git a/net/rds/loop.c b/net/rds/loop.c index c390156b426f..aeec1d483b17 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
134 | static void rds_loop_conn_free(void *arg) | 134 | static void rds_loop_conn_free(void *arg) |
135 | { | 135 | { |
136 | struct rds_loop_connection *lc = arg; | 136 | struct rds_loop_connection *lc = arg; |
137 | unsigned long flags; | ||
138 | |||
137 | rdsdebug("lc %p\n", lc); | 139 | rdsdebug("lc %p\n", lc); |
140 | spin_lock_irqsave(&loop_conns_lock, flags); | ||
138 | list_del(&lc->loop_node); | 141 | list_del(&lc->loop_node); |
142 | spin_unlock_irqrestore(&loop_conns_lock, flags); | ||
139 | kfree(lc); | 143 | kfree(lc); |
140 | } | 144 | } |
141 | 145 | ||
diff --git a/net/rds/message.c b/net/rds/message.c index a84545dae370..1fd3d29023d7 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -224,6 +224,9 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) | |||
224 | WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); | 224 | WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); |
225 | WARN_ON(!nents); | 225 | WARN_ON(!nents); |
226 | 226 | ||
227 | if (rm->m_used_sgs + nents > rm->m_total_sgs) | ||
228 | return NULL; | ||
229 | |||
227 | sg_ret = &sg_first[rm->m_used_sgs]; | 230 | sg_ret = &sg_first[rm->m_used_sgs]; |
228 | sg_init_table(sg_ret, nents); | 231 | sg_init_table(sg_ret, nents); |
229 | rm->m_used_sgs += nents; | 232 | rm->m_used_sgs += nents; |
@@ -246,6 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
246 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | 249 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
247 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); | 250 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); |
248 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); | 251 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); |
252 | if (!rm->data.op_sg) { | ||
253 | rds_message_put(rm); | ||
254 | return ERR_PTR(-ENOMEM); | ||
255 | } | ||
249 | 256 | ||
250 | for (i = 0; i < rm->data.op_nents; ++i) { | 257 | for (i = 0; i < rm->data.op_nents; ++i) { |
251 | sg_set_page(&rm->data.op_sg[i], | 258 | sg_set_page(&rm->data.op_sg[i], |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 1a41debca1ce..8920f2a83327 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -479,13 +479,38 @@ void rds_atomic_free_op(struct rm_atomic_op *ao) | |||
479 | 479 | ||
480 | 480 | ||
481 | /* | 481 | /* |
482 | * Count the number of pages needed to describe an incoming iovec. | 482 | * Count the number of pages needed to describe an incoming iovec array. |
483 | */ | 483 | */ |
484 | static int rds_rdma_pages(struct rds_rdma_args *args) | 484 | static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) |
485 | { | ||
486 | int tot_pages = 0; | ||
487 | unsigned int nr_pages; | ||
488 | unsigned int i; | ||
489 | |||
490 | /* figure out the number of pages in the vector */ | ||
491 | for (i = 0; i < nr_iovecs; i++) { | ||
492 | nr_pages = rds_pages_in_vec(&iov[i]); | ||
493 | if (nr_pages == 0) | ||
494 | return -EINVAL; | ||
495 | |||
496 | tot_pages += nr_pages; | ||
497 | |||
498 | /* | ||
499 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, | ||
500 | * so tot_pages cannot overflow without first going negative. | ||
501 | */ | ||
502 | if (tot_pages < 0) | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | |||
506 | return tot_pages; | ||
507 | } | ||
508 | |||
509 | int rds_rdma_extra_size(struct rds_rdma_args *args) | ||
485 | { | 510 | { |
486 | struct rds_iovec vec; | 511 | struct rds_iovec vec; |
487 | struct rds_iovec __user *local_vec; | 512 | struct rds_iovec __user *local_vec; |
488 | unsigned int tot_pages = 0; | 513 | int tot_pages = 0; |
489 | unsigned int nr_pages; | 514 | unsigned int nr_pages; |
490 | unsigned int i; | 515 | unsigned int i; |
491 | 516 | ||
@@ -502,14 +527,16 @@ static int rds_rdma_pages(struct rds_rdma_args *args) | |||
502 | return -EINVAL; | 527 | return -EINVAL; |
503 | 528 | ||
504 | tot_pages += nr_pages; | 529 | tot_pages += nr_pages; |
505 | } | ||
506 | 530 | ||
507 | return tot_pages; | 531 | /* |
508 | } | 532 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, |
533 | * so tot_pages cannot overflow without first going negative. | ||
534 | */ | ||
535 | if (tot_pages < 0) | ||
536 | return -EINVAL; | ||
537 | } | ||
509 | 538 | ||
510 | int rds_rdma_extra_size(struct rds_rdma_args *args) | 539 | return tot_pages * sizeof(struct scatterlist); |
511 | { | ||
512 | return rds_rdma_pages(args) * sizeof(struct scatterlist); | ||
513 | } | 540 | } |
514 | 541 | ||
515 | /* | 542 | /* |
@@ -520,13 +547,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
520 | struct cmsghdr *cmsg) | 547 | struct cmsghdr *cmsg) |
521 | { | 548 | { |
522 | struct rds_rdma_args *args; | 549 | struct rds_rdma_args *args; |
523 | struct rds_iovec vec; | ||
524 | struct rm_rdma_op *op = &rm->rdma; | 550 | struct rm_rdma_op *op = &rm->rdma; |
525 | int nr_pages; | 551 | int nr_pages; |
526 | unsigned int nr_bytes; | 552 | unsigned int nr_bytes; |
527 | struct page **pages = NULL; | 553 | struct page **pages = NULL; |
528 | struct rds_iovec __user *local_vec; | 554 | struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; |
529 | unsigned int nr; | 555 | int iov_size; |
530 | unsigned int i, j; | 556 | unsigned int i, j; |
531 | int ret = 0; | 557 | int ret = 0; |
532 | 558 | ||
@@ -546,9 +572,26 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
546 | goto out; | 572 | goto out; |
547 | } | 573 | } |
548 | 574 | ||
549 | nr_pages = rds_rdma_pages(args); | 575 | /* Check whether to allocate the iovec area */ |
550 | if (nr_pages < 0) | 576 | iov_size = args->nr_local * sizeof(struct rds_iovec); |
577 | if (args->nr_local > UIO_FASTIOV) { | ||
578 | iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); | ||
579 | if (!iovs) { | ||
580 | ret = -ENOMEM; | ||
581 | goto out; | ||
582 | } | ||
583 | } | ||
584 | |||
585 | if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { | ||
586 | ret = -EFAULT; | ||
587 | goto out; | ||
588 | } | ||
589 | |||
590 | nr_pages = rds_rdma_pages(iovs, args->nr_local); | ||
591 | if (nr_pages < 0) { | ||
592 | ret = -EINVAL; | ||
551 | goto out; | 593 | goto out; |
594 | } | ||
552 | 595 | ||
553 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 596 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
554 | if (!pages) { | 597 | if (!pages) { |
@@ -564,6 +607,10 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
564 | op->op_recverr = rs->rs_recverr; | 607 | op->op_recverr = rs->rs_recverr; |
565 | WARN_ON(!nr_pages); | 608 | WARN_ON(!nr_pages); |
566 | op->op_sg = rds_message_alloc_sgs(rm, nr_pages); | 609 | op->op_sg = rds_message_alloc_sgs(rm, nr_pages); |
610 | if (!op->op_sg) { | ||
611 | ret = -ENOMEM; | ||
612 | goto out; | ||
613 | } | ||
567 | 614 | ||
568 | if (op->op_notify || op->op_recverr) { | 615 | if (op->op_notify || op->op_recverr) { |
569 | /* We allocate an uninitialized notifier here, because | 616 | /* We allocate an uninitialized notifier here, because |
@@ -597,50 +644,40 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
597 | (unsigned long long)args->remote_vec.addr, | 644 | (unsigned long long)args->remote_vec.addr, |
598 | op->op_rkey); | 645 | op->op_rkey); |
599 | 646 | ||
600 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; | ||
601 | |||
602 | for (i = 0; i < args->nr_local; i++) { | 647 | for (i = 0; i < args->nr_local; i++) { |
603 | if (copy_from_user(&vec, &local_vec[i], | 648 | struct rds_iovec *iov = &iovs[i]; |
604 | sizeof(struct rds_iovec))) { | 649 | /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ |
605 | ret = -EFAULT; | 650 | unsigned int nr = rds_pages_in_vec(iov); |
606 | goto out; | ||
607 | } | ||
608 | |||
609 | nr = rds_pages_in_vec(&vec); | ||
610 | if (nr == 0) { | ||
611 | ret = -EINVAL; | ||
612 | goto out; | ||
613 | } | ||
614 | 651 | ||
615 | rs->rs_user_addr = vec.addr; | 652 | rs->rs_user_addr = iov->addr; |
616 | rs->rs_user_bytes = vec.bytes; | 653 | rs->rs_user_bytes = iov->bytes; |
617 | 654 | ||
618 | /* If it's a WRITE operation, we want to pin the pages for reading. | 655 | /* If it's a WRITE operation, we want to pin the pages for reading. |
619 | * If it's a READ operation, we need to pin the pages for writing. | 656 | * If it's a READ operation, we need to pin the pages for writing. |
620 | */ | 657 | */ |
621 | ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write); | 658 | ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); |
622 | if (ret < 0) | 659 | if (ret < 0) |
623 | goto out; | 660 | goto out; |
624 | 661 | ||
625 | rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n", | 662 | rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", |
626 | nr_bytes, nr, vec.bytes, vec.addr); | 663 | nr_bytes, nr, iov->bytes, iov->addr); |
627 | 664 | ||
628 | nr_bytes += vec.bytes; | 665 | nr_bytes += iov->bytes; |
629 | 666 | ||
630 | for (j = 0; j < nr; j++) { | 667 | for (j = 0; j < nr; j++) { |
631 | unsigned int offset = vec.addr & ~PAGE_MASK; | 668 | unsigned int offset = iov->addr & ~PAGE_MASK; |
632 | struct scatterlist *sg; | 669 | struct scatterlist *sg; |
633 | 670 | ||
634 | sg = &op->op_sg[op->op_nents + j]; | 671 | sg = &op->op_sg[op->op_nents + j]; |
635 | sg_set_page(sg, pages[j], | 672 | sg_set_page(sg, pages[j], |
636 | min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), | 673 | min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), |
637 | offset); | 674 | offset); |
638 | 675 | ||
639 | rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n", | 676 | rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", |
640 | sg->offset, sg->length, vec.addr, vec.bytes); | 677 | sg->offset, sg->length, iov->addr, iov->bytes); |
641 | 678 | ||
642 | vec.addr += sg->length; | 679 | iov->addr += sg->length; |
643 | vec.bytes -= sg->length; | 680 | iov->bytes -= sg->length; |
644 | } | 681 | } |
645 | 682 | ||
646 | op->op_nents += nr; | 683 | op->op_nents += nr; |
@@ -655,13 +692,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
655 | } | 692 | } |
656 | op->op_bytes = nr_bytes; | 693 | op->op_bytes = nr_bytes; |
657 | 694 | ||
658 | ret = 0; | ||
659 | out: | 695 | out: |
696 | if (iovs != iovstack) | ||
697 | sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); | ||
660 | kfree(pages); | 698 | kfree(pages); |
661 | if (ret) | 699 | if (ret) |
662 | rds_rdma_free_op(op); | 700 | rds_rdma_free_op(op); |
663 | 701 | else | |
664 | rds_stats_inc(s_send_rdma); | 702 | rds_stats_inc(s_send_rdma); |
665 | 703 | ||
666 | return ret; | 704 | return ret; |
667 | } | 705 | } |
@@ -773,6 +811,10 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, | |||
773 | rm->atomic.op_active = 1; | 811 | rm->atomic.op_active = 1; |
774 | rm->atomic.op_recverr = rs->rs_recverr; | 812 | rm->atomic.op_recverr = rs->rs_recverr; |
775 | rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); | 813 | rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); |
814 | if (!rm->atomic.op_sg) { | ||
815 | ret = -ENOMEM; | ||
816 | goto err; | ||
817 | } | ||
776 | 818 | ||
777 | /* verify 8 byte-aligned */ | 819 | /* verify 8 byte-aligned */ |
778 | if (args->local_addr & 0x7) { | 820 | if (args->local_addr & 0x7) { |
diff --git a/net/rds/send.c b/net/rds/send.c index 0bc9db17a87d..35b9c2e9caf1 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -973,6 +973,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
973 | /* Attach data to the rm */ | 973 | /* Attach data to the rm */ |
974 | if (payload_len) { | 974 | if (payload_len) { |
975 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | 975 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); |
976 | if (!rm->data.op_sg) { | ||
977 | ret = -ENOMEM; | ||
978 | goto out; | ||
979 | } | ||
976 | ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); | 980 | ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); |
977 | if (ret) | 981 | if (ret) |
978 | goto out; | 982 | goto out; |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 08a8c6cf2d10..8e0a32001c90 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
221 | static void rds_tcp_conn_free(void *arg) | 221 | static void rds_tcp_conn_free(void *arg) |
222 | { | 222 | { |
223 | struct rds_tcp_connection *tc = arg; | 223 | struct rds_tcp_connection *tc = arg; |
224 | unsigned long flags; | ||
224 | rdsdebug("freeing tc %p\n", tc); | 225 | rdsdebug("freeing tc %p\n", tc); |
226 | |||
227 | spin_lock_irqsave(&rds_tcp_conn_lock, flags); | ||
228 | list_del(&tc->t_tcp_node); | ||
229 | spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); | ||
230 | |||
225 | kmem_cache_free(rds_tcp_conn_slab, tc); | 231 | kmem_cache_free(rds_tcp_conn_slab, tc); |
226 | } | 232 | } |
227 | 233 | ||
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index efd4f95fd050..f23d9155b1ef 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, | |||
268 | goto nla_put_failure; | 268 | goto nla_put_failure; |
269 | 269 | ||
270 | nla_nest_end(skb, nest); | 270 | nla_nest_end(skb, nest); |
271 | |||
272 | if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0) | ||
273 | goto nla_put_failure; | ||
274 | |||
271 | return skb->len; | 275 | return skb->len; |
272 | 276 | ||
273 | nla_put_failure: | 277 | nla_put_failure: |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 37dff78e9cb1..d49c40fb7e09 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = { | |||
34 | .populate = cgrp_populate, | 34 | .populate = cgrp_populate, |
35 | #ifdef CONFIG_NET_CLS_CGROUP | 35 | #ifdef CONFIG_NET_CLS_CGROUP |
36 | .subsys_id = net_cls_subsys_id, | 36 | .subsys_id = net_cls_subsys_id, |
37 | #else | ||
38 | #define net_cls_subsys_id net_cls_subsys.subsys_id | ||
39 | #endif | 37 | #endif |
40 | .module = THIS_MODULE, | 38 | .module = THIS_MODULE, |
41 | }; | 39 | }; |
diff --git a/net/sched/em_text.c b/net/sched/em_text.c index 763253257411..ea8f566e720c 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c | |||
@@ -103,7 +103,8 @@ retry: | |||
103 | 103 | ||
104 | static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) | 104 | static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) |
105 | { | 105 | { |
106 | textsearch_destroy(EM_TEXT_PRIV(m)->config); | 106 | if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) |
107 | textsearch_destroy(EM_TEXT_PRIV(m)->config); | ||
107 | } | 108 | } |
108 | 109 | ||
109 | static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) | 110 | static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1ef29c74d85e..e58f9476f29c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific; | |||
92 | struct kmem_cache *sctp_chunk_cachep __read_mostly; | 92 | struct kmem_cache *sctp_chunk_cachep __read_mostly; |
93 | struct kmem_cache *sctp_bucket_cachep __read_mostly; | 93 | struct kmem_cache *sctp_bucket_cachep __read_mostly; |
94 | 94 | ||
95 | int sysctl_sctp_mem[3]; | 95 | long sysctl_sctp_mem[3]; |
96 | int sysctl_sctp_rmem[3]; | 96 | int sysctl_sctp_rmem[3]; |
97 | int sysctl_sctp_wmem[3]; | 97 | int sysctl_sctp_wmem[3]; |
98 | 98 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e34ca9cc1167..6bd554323a34 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *, | |||
111 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | 111 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; |
112 | 112 | ||
113 | extern struct kmem_cache *sctp_bucket_cachep; | 113 | extern struct kmem_cache *sctp_bucket_cachep; |
114 | extern int sysctl_sctp_mem[3]; | 114 | extern long sysctl_sctp_mem[3]; |
115 | extern int sysctl_sctp_rmem[3]; | 115 | extern int sysctl_sctp_rmem[3]; |
116 | extern int sysctl_sctp_wmem[3]; | 116 | extern int sysctl_sctp_wmem[3]; |
117 | 117 | ||
118 | static int sctp_memory_pressure; | 118 | static int sctp_memory_pressure; |
119 | static atomic_t sctp_memory_allocated; | 119 | static atomic_long_t sctp_memory_allocated; |
120 | struct percpu_counter sctp_sockets_allocated; | 120 | struct percpu_counter sctp_sockets_allocated; |
121 | 121 | ||
122 | static void sctp_enter_memory_pressure(struct sock *sk) | 122 | static void sctp_enter_memory_pressure(struct sock *sk) |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 832590bbe0c0..50cb57f0919e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -54,7 +54,7 @@ static int sack_timer_max = 500; | |||
54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | 54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ |
55 | static int rwnd_scale_max = 16; | 55 | static int rwnd_scale_max = 16; |
56 | 56 | ||
57 | extern int sysctl_sctp_mem[3]; | 57 | extern long sysctl_sctp_mem[3]; |
58 | extern int sysctl_sctp_rmem[3]; | 58 | extern int sysctl_sctp_rmem[3]; |
59 | extern int sysctl_sctp_wmem[3]; | 59 | extern int sysctl_sctp_wmem[3]; |
60 | 60 | ||
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = { | |||
203 | .data = &sysctl_sctp_mem, | 203 | .data = &sysctl_sctp_mem, |
204 | .maxlen = sizeof(sysctl_sctp_mem), | 204 | .maxlen = sizeof(sysctl_sctp_mem), |
205 | .mode = 0644, | 205 | .mode = 0644, |
206 | .proc_handler = proc_dointvec, | 206 | .proc_handler = proc_doulongvec_minmax |
207 | }, | 207 | }, |
208 | { | 208 | { |
209 | .procname = "sctp_rmem", | 209 | .procname = "sctp_rmem", |
diff --git a/net/socket.c b/net/socket.c index ee3cd280c76e..3ca2fd9e3720 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -305,19 +305,17 @@ static const struct super_operations sockfs_ops = { | |||
305 | .statfs = simple_statfs, | 305 | .statfs = simple_statfs, |
306 | }; | 306 | }; |
307 | 307 | ||
308 | static int sockfs_get_sb(struct file_system_type *fs_type, | 308 | static struct dentry *sockfs_mount(struct file_system_type *fs_type, |
309 | int flags, const char *dev_name, void *data, | 309 | int flags, const char *dev_name, void *data) |
310 | struct vfsmount *mnt) | ||
311 | { | 310 | { |
312 | return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC, | 311 | return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); |
313 | mnt); | ||
314 | } | 312 | } |
315 | 313 | ||
316 | static struct vfsmount *sock_mnt __read_mostly; | 314 | static struct vfsmount *sock_mnt __read_mostly; |
317 | 315 | ||
318 | static struct file_system_type sock_fs_type = { | 316 | static struct file_system_type sock_fs_type = { |
319 | .name = "sockfs", | 317 | .name = "sockfs", |
320 | .get_sb = sockfs_get_sb, | 318 | .mount = sockfs_mount, |
321 | .kill_sb = kill_anon_super, | 319 | .kill_sb = kill_anon_super, |
322 | }; | 320 | }; |
323 | 321 | ||
@@ -1654,6 +1652,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, | |||
1654 | struct iovec iov; | 1652 | struct iovec iov; |
1655 | int fput_needed; | 1653 | int fput_needed; |
1656 | 1654 | ||
1655 | if (len > INT_MAX) | ||
1656 | len = INT_MAX; | ||
1657 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1657 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1658 | if (!sock) | 1658 | if (!sock) |
1659 | goto out; | 1659 | goto out; |
@@ -1711,6 +1711,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
1711 | int err, err2; | 1711 | int err, err2; |
1712 | int fput_needed; | 1712 | int fput_needed; |
1713 | 1713 | ||
1714 | if (size > INT_MAX) | ||
1715 | size = INT_MAX; | ||
1714 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1716 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1715 | if (!sock) | 1717 | if (!sock) |
1716 | goto out; | 1718 | goto out; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7df92d237cb8..10a17a37ec4e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/sunrpc/rpc_pipe_fs.h> | 28 | #include <linux/sunrpc/rpc_pipe_fs.h> |
29 | #include <linux/sunrpc/cache.h> | 29 | #include <linux/sunrpc/cache.h> |
30 | 30 | ||
31 | static struct vfsmount *rpc_mount __read_mostly; | 31 | static struct vfsmount *rpc_mnt __read_mostly; |
32 | static int rpc_mount_count; | 32 | static int rpc_mount_count; |
33 | 33 | ||
34 | static struct file_system_type rpc_pipe_fs_type; | 34 | static struct file_system_type rpc_pipe_fs_type; |
@@ -417,16 +417,16 @@ struct vfsmount *rpc_get_mount(void) | |||
417 | { | 417 | { |
418 | int err; | 418 | int err; |
419 | 419 | ||
420 | err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count); | 420 | err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mnt, &rpc_mount_count); |
421 | if (err != 0) | 421 | if (err != 0) |
422 | return ERR_PTR(err); | 422 | return ERR_PTR(err); |
423 | return rpc_mount; | 423 | return rpc_mnt; |
424 | } | 424 | } |
425 | EXPORT_SYMBOL_GPL(rpc_get_mount); | 425 | EXPORT_SYMBOL_GPL(rpc_get_mount); |
426 | 426 | ||
427 | void rpc_put_mount(void) | 427 | void rpc_put_mount(void) |
428 | { | 428 | { |
429 | simple_release_fs(&rpc_mount, &rpc_mount_count); | 429 | simple_release_fs(&rpc_mnt, &rpc_mount_count); |
430 | } | 430 | } |
431 | EXPORT_SYMBOL_GPL(rpc_put_mount); | 431 | EXPORT_SYMBOL_GPL(rpc_put_mount); |
432 | 432 | ||
@@ -1018,17 +1018,17 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
1018 | return 0; | 1018 | return 0; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static int | 1021 | static struct dentry * |
1022 | rpc_get_sb(struct file_system_type *fs_type, | 1022 | rpc_mount(struct file_system_type *fs_type, |
1023 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | 1023 | int flags, const char *dev_name, void *data) |
1024 | { | 1024 | { |
1025 | return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt); | 1025 | return mount_single(fs_type, flags, data, rpc_fill_super); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | static struct file_system_type rpc_pipe_fs_type = { | 1028 | static struct file_system_type rpc_pipe_fs_type = { |
1029 | .owner = THIS_MODULE, | 1029 | .owner = THIS_MODULE, |
1030 | .name = "rpc_pipefs", | 1030 | .name = "rpc_pipefs", |
1031 | .get_sb = rpc_get_sb, | 1031 | .mount = rpc_mount, |
1032 | .kill_sb = kill_litter_super, | 1032 | .kill_sb = kill_litter_super, |
1033 | }; | 1033 | }; |
1034 | 1034 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 33217fc3d697..e9f0d5004483 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, | |||
396 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 396 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
397 | struct tipc_sock *tsock = tipc_sk(sock->sk); | 397 | struct tipc_sock *tsock = tipc_sk(sock->sk); |
398 | 398 | ||
399 | memset(addr, 0, sizeof(*addr)); | ||
399 | if (peer) { | 400 | if (peer) { |
400 | if ((sock->state != SS_CONNECTED) && | 401 | if ((sock->state != SS_CONNECTED) && |
401 | ((peer != 2) || (sock->state != SS_DISCONNECTING))) | 402 | ((peer != 2) || (sock->state != SS_DISCONNECTING))) |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c506241f8637..4e78e3f26798 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb, | |||
224 | } | 224 | } |
225 | 225 | ||
226 | *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); | 226 | *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); |
227 | if (IS_ERR(dev)) { | 227 | if (IS_ERR(*rdev)) { |
228 | err = PTR_ERR(dev); | 228 | err = PTR_ERR(*rdev); |
229 | goto out_rtnl; | 229 | goto out_rtnl; |
230 | } | 230 | } |
231 | 231 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index d14bbf960c18..4b9f8912526c 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1167,7 +1167,7 @@ static int ignore_request(struct wiphy *wiphy, | |||
1167 | return 0; | 1167 | return 0; |
1168 | return -EALREADY; | 1168 | return -EALREADY; |
1169 | } | 1169 | } |
1170 | return REG_INTERSECT; | 1170 | return 0; |
1171 | case NL80211_REGDOM_SET_BY_DRIVER: | 1171 | case NL80211_REGDOM_SET_BY_DRIVER: |
1172 | if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { | 1172 | if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { |
1173 | if (regdom_changes(pending_request->alpha2)) | 1173 | if (regdom_changes(pending_request->alpha2)) |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 771bab00754b..55187c8f6420 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
61 | while (len > 0) { | 61 | while (len > 0) { |
62 | switch (*p & X25_FAC_CLASS_MASK) { | 62 | switch (*p & X25_FAC_CLASS_MASK) { |
63 | case X25_FAC_CLASS_A: | 63 | case X25_FAC_CLASS_A: |
64 | if (len < 2) | ||
65 | return 0; | ||
64 | switch (*p) { | 66 | switch (*p) { |
65 | case X25_FAC_REVERSE: | 67 | case X25_FAC_REVERSE: |
66 | if((p[1] & 0x81) == 0x81) { | 68 | if((p[1] & 0x81) == 0x81) { |
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
104 | len -= 2; | 106 | len -= 2; |
105 | break; | 107 | break; |
106 | case X25_FAC_CLASS_B: | 108 | case X25_FAC_CLASS_B: |
109 | if (len < 3) | ||
110 | return 0; | ||
107 | switch (*p) { | 111 | switch (*p) { |
108 | case X25_FAC_PACKET_SIZE: | 112 | case X25_FAC_PACKET_SIZE: |
109 | facilities->pacsize_in = p[1]; | 113 | facilities->pacsize_in = p[1]; |
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
125 | len -= 3; | 129 | len -= 3; |
126 | break; | 130 | break; |
127 | case X25_FAC_CLASS_C: | 131 | case X25_FAC_CLASS_C: |
132 | if (len < 4) | ||
133 | return 0; | ||
128 | printk(KERN_DEBUG "X.25: unknown facility %02X, " | 134 | printk(KERN_DEBUG "X.25: unknown facility %02X, " |
129 | "values %02X, %02X, %02X\n", | 135 | "values %02X, %02X, %02X\n", |
130 | p[0], p[1], p[2], p[3]); | 136 | p[0], p[1], p[2], p[3]); |
@@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
132 | len -= 4; | 138 | len -= 4; |
133 | break; | 139 | break; |
134 | case X25_FAC_CLASS_D: | 140 | case X25_FAC_CLASS_D: |
141 | if (len < p[1] + 2) | ||
142 | return 0; | ||
135 | switch (*p) { | 143 | switch (*p) { |
136 | case X25_FAC_CALLING_AE: | 144 | case X25_FAC_CALLING_AE: |
137 | if (p[1] > X25_MAX_DTE_FACIL_LEN) | 145 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) |
138 | break; | 146 | return 0; |
139 | dte_facs->calling_len = p[2]; | 147 | dte_facs->calling_len = p[2]; |
140 | memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); | 148 | memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); |
141 | *vc_fac_mask |= X25_MASK_CALLING_AE; | 149 | *vc_fac_mask |= X25_MASK_CALLING_AE; |
142 | break; | 150 | break; |
143 | case X25_FAC_CALLED_AE: | 151 | case X25_FAC_CALLED_AE: |
144 | if (p[1] > X25_MAX_DTE_FACIL_LEN) | 152 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) |
145 | break; | 153 | return 0; |
146 | dte_facs->called_len = p[2]; | 154 | dte_facs->called_len = p[2]; |
147 | memcpy(dte_facs->called_ae, &p[3], p[1] - 1); | 155 | memcpy(dte_facs->called_ae, &p[3], p[1] - 1); |
148 | *vc_fac_mask |= X25_MASK_CALLED_AE; | 156 | *vc_fac_mask |= X25_MASK_CALLED_AE; |
149 | break; | 157 | break; |
150 | default: | 158 | default: |
151 | printk(KERN_DEBUG "X.25: unknown facility %02X," | 159 | printk(KERN_DEBUG "X.25: unknown facility %02X," |
152 | "length %d, values %02X, %02X, " | 160 | "length %d\n", p[0], p[1]); |
153 | "%02X, %02X\n", | ||
154 | p[0], p[1], p[2], p[3], p[4], p[5]); | ||
155 | break; | 161 | break; |
156 | } | 162 | } |
157 | len -= p[1] + 2; | 163 | len -= p[1] + 2; |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 63178961efac..f729f022be69 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
119 | &x25->vc_facil_mask); | 119 | &x25->vc_facil_mask); |
120 | if (len > 0) | 120 | if (len > 0) |
121 | skb_pull(skb, len); | 121 | skb_pull(skb, len); |
122 | else | ||
123 | return -1; | ||
122 | /* | 124 | /* |
123 | * Copy any Call User Data. | 125 | * Copy any Call User Data. |
124 | */ | 126 | */ |