diff options
Diffstat (limited to 'net')
43 files changed, 349 insertions, 159 deletions
diff --git a/net/Makefile b/net/Makefile index a3330ebe2c53..a51d9465e628 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ | |||
19 | obj-$(CONFIG_INET) += ipv4/ | 19 | obj-$(CONFIG_INET) += ipv4/ |
20 | obj-$(CONFIG_XFRM) += xfrm/ | 20 | obj-$(CONFIG_XFRM) += xfrm/ |
21 | obj-$(CONFIG_UNIX) += unix/ | 21 | obj-$(CONFIG_UNIX) += unix/ |
22 | ifneq ($(CONFIG_IPV6),) | 22 | obj-$(CONFIG_NET) += ipv6/ |
23 | obj-y += ipv6/ | ||
24 | endif | ||
25 | obj-$(CONFIG_PACKET) += packet/ | 23 | obj-$(CONFIG_PACKET) += packet/ |
26 | obj-$(CONFIG_NET_KEY) += key/ | 24 | obj-$(CONFIG_NET_KEY) += key/ |
27 | obj-$(CONFIG_BRIDGE) += bridge/ | 25 | obj-$(CONFIG_BRIDGE) += bridge/ |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2575c2db6404..d7b9af4703d0 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
727 | break; | 727 | break; |
728 | } | 728 | } |
729 | 729 | ||
730 | tty_unlock(); | ||
730 | schedule(); | 731 | schedule(); |
732 | tty_lock(); | ||
731 | } | 733 | } |
732 | set_current_state(TASK_RUNNING); | 734 | set_current_state(TASK_RUNNING); |
733 | remove_wait_queue(&dev->wait, &wait); | 735 | remove_wait_queue(&dev->wait, &wait); |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 9190ae462cb4..6dee7bf648a9 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -6,6 +6,7 @@ config BRIDGE | |||
6 | tristate "802.1d Ethernet Bridging" | 6 | tristate "802.1d Ethernet Bridging" |
7 | select LLC | 7 | select LLC |
8 | select STP | 8 | select STP |
9 | depends on IPV6 || IPV6=n | ||
9 | ---help--- | 10 | ---help--- |
10 | If you say Y here, then your Linux box will be able to act as an | 11 | If you say Y here, then your Linux box will be able to act as an |
11 | Ethernet bridge, which means that the different Ethernet segments it | 12 | Ethernet bridge, which means that the different Ethernet segments it |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 09d5c0987925..030a002ff8ee 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -37,10 +37,9 @@ | |||
37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) | 37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) |
38 | 38 | ||
39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
40 | static inline int ipv6_is_local_multicast(const struct in6_addr *addr) | 40 | static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) |
41 | { | 41 | { |
42 | if (ipv6_addr_is_multicast(addr) && | 42 | if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) |
43 | IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) | ||
44 | return 1; | 43 | return 1; |
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
@@ -435,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
435 | eth = eth_hdr(skb); | 434 | eth = eth_hdr(skb); |
436 | 435 | ||
437 | memcpy(eth->h_source, br->dev->dev_addr, 6); | 436 | memcpy(eth->h_source, br->dev->dev_addr, 6); |
438 | ipv6_eth_mc_map(group, eth->h_dest); | ||
439 | eth->h_proto = htons(ETH_P_IPV6); | 437 | eth->h_proto = htons(ETH_P_IPV6); |
440 | skb_put(skb, sizeof(*eth)); | 438 | skb_put(skb, sizeof(*eth)); |
441 | 439 | ||
@@ -447,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
447 | ip6h->payload_len = htons(8 + sizeof(*mldq)); | 445 | ip6h->payload_len = htons(8 + sizeof(*mldq)); |
448 | ip6h->nexthdr = IPPROTO_HOPOPTS; | 446 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
449 | ip6h->hop_limit = 1; | 447 | ip6h->hop_limit = 1; |
450 | ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); | 448 | ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, |
449 | &ip6h->saddr); | ||
451 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | 450 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); |
451 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); | ||
452 | 452 | ||
453 | hopopt = (u8 *)(ip6h + 1); | 453 | hopopt = (u8 *)(ip6h + 1); |
454 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ | 454 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ |
@@ -780,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, | |||
780 | { | 780 | { |
781 | struct br_ip br_group; | 781 | struct br_ip br_group; |
782 | 782 | ||
783 | if (ipv6_is_local_multicast(group)) | 783 | if (!ipv6_is_transient_multicast(group)) |
784 | return 0; | 784 | return 0; |
785 | 785 | ||
786 | ipv6_addr_copy(&br_group.u.ip6, group); | 786 | ipv6_addr_copy(&br_group.u.ip6, group); |
787 | br_group.proto = htons(ETH_P_IP); | 787 | br_group.proto = htons(ETH_P_IPV6); |
788 | 788 | ||
789 | return br_multicast_add_group(br, port, &br_group); | 789 | return br_multicast_add_group(br, port, &br_group); |
790 | } | 790 | } |
@@ -1013,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1013 | 1013 | ||
1014 | nsrcs = skb_header_pointer(skb, | 1014 | nsrcs = skb_header_pointer(skb, |
1015 | len + offsetof(struct mld2_grec, | 1015 | len + offsetof(struct mld2_grec, |
1016 | grec_mca), | 1016 | grec_nsrcs), |
1017 | sizeof(_nsrcs), &_nsrcs); | 1017 | sizeof(_nsrcs), &_nsrcs); |
1018 | if (!nsrcs) | 1018 | if (!nsrcs) |
1019 | return -EINVAL; | 1019 | return -EINVAL; |
1020 | 1020 | ||
1021 | if (!pskb_may_pull(skb, | 1021 | if (!pskb_may_pull(skb, |
1022 | len + sizeof(*grec) + | 1022 | len + sizeof(*grec) + |
1023 | sizeof(struct in6_addr) * (*nsrcs))) | 1023 | sizeof(struct in6_addr) * ntohs(*nsrcs))) |
1024 | return -EINVAL; | 1024 | return -EINVAL; |
1025 | 1025 | ||
1026 | grec = (struct mld2_grec *)(skb->data + len); | 1026 | grec = (struct mld2_grec *)(skb->data + len); |
1027 | len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); | 1027 | len += sizeof(*grec) + |
1028 | sizeof(struct in6_addr) * ntohs(*nsrcs); | ||
1028 | 1029 | ||
1029 | /* We treat these as MLDv1 reports for now. */ | 1030 | /* We treat these as MLDv1 reports for now. */ |
1030 | switch (grec->grec_type) { | 1031 | switch (grec->grec_type) { |
@@ -1340,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1340 | { | 1341 | { |
1341 | struct br_ip br_group; | 1342 | struct br_ip br_group; |
1342 | 1343 | ||
1343 | if (ipv6_is_local_multicast(group)) | 1344 | if (!ipv6_is_transient_multicast(group)) |
1344 | return; | 1345 | return; |
1345 | 1346 | ||
1346 | ipv6_addr_copy(&br_group.u.ip6, group); | 1347 | ipv6_addr_copy(&br_group.u.ip6, group); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dff633d62e5b..05f357828a2f 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) | |||
252 | { | 252 | { |
253 | struct kvec iov = {buf, len}; | 253 | struct kvec iov = {buf, len}; |
254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
255 | int r; | ||
255 | 256 | ||
256 | return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); | 257 | r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); |
258 | if (r == -EAGAIN) | ||
259 | r = 0; | ||
260 | return r; | ||
257 | } | 261 | } |
258 | 262 | ||
259 | /* | 263 | /* |
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, | |||
264 | size_t kvlen, size_t len, int more) | 268 | size_t kvlen, size_t len, int more) |
265 | { | 269 | { |
266 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 270 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
271 | int r; | ||
267 | 272 | ||
268 | if (more) | 273 | if (more) |
269 | msg.msg_flags |= MSG_MORE; | 274 | msg.msg_flags |= MSG_MORE; |
270 | else | 275 | else |
271 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ | 276 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ |
272 | 277 | ||
273 | return kernel_sendmsg(sock, &msg, iov, kvlen, len); | 278 | r = kernel_sendmsg(sock, &msg, iov, kvlen, len); |
279 | if (r == -EAGAIN) | ||
280 | r = 0; | ||
281 | return r; | ||
274 | } | 282 | } |
275 | 283 | ||
276 | 284 | ||
@@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con) | |||
328 | ceph_msg_put(con->out_msg); | 336 | ceph_msg_put(con->out_msg); |
329 | con->out_msg = NULL; | 337 | con->out_msg = NULL; |
330 | } | 338 | } |
331 | con->out_keepalive_pending = false; | ||
332 | con->in_seq = 0; | 339 | con->in_seq = 0; |
333 | con->in_seq_acked = 0; | 340 | con->in_seq_acked = 0; |
334 | } | 341 | } |
@@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
847 | (msg->pages || msg->pagelist || msg->bio || in_trail)) | 854 | (msg->pages || msg->pagelist || msg->bio || in_trail)) |
848 | kunmap(page); | 855 | kunmap(page); |
849 | 856 | ||
857 | if (ret == -EAGAIN) | ||
858 | ret = 0; | ||
850 | if (ret <= 0) | 859 | if (ret <= 0) |
851 | goto out; | 860 | goto out; |
852 | 861 | ||
@@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con) | |||
1238 | con->auth_retry); | 1247 | con->auth_retry); |
1239 | if (con->auth_retry == 2) { | 1248 | if (con->auth_retry == 2) { |
1240 | con->error_msg = "connect authorization failure"; | 1249 | con->error_msg = "connect authorization failure"; |
1241 | reset_connection(con); | ||
1242 | set_bit(CLOSED, &con->state); | ||
1243 | return -1; | 1250 | return -1; |
1244 | } | 1251 | } |
1245 | con->auth_retry = 1; | 1252 | con->auth_retry = 1; |
@@ -1705,14 +1712,6 @@ more: | |||
1705 | 1712 | ||
1706 | /* open the socket first? */ | 1713 | /* open the socket first? */ |
1707 | if (con->sock == NULL) { | 1714 | if (con->sock == NULL) { |
1708 | /* | ||
1709 | * if we were STANDBY and are reconnecting _this_ | ||
1710 | * connection, bump connect_seq now. Always bump | ||
1711 | * global_seq. | ||
1712 | */ | ||
1713 | if (test_and_clear_bit(STANDBY, &con->state)) | ||
1714 | con->connect_seq++; | ||
1715 | |||
1716 | prepare_write_banner(msgr, con); | 1715 | prepare_write_banner(msgr, con); |
1717 | prepare_write_connect(msgr, con, 1); | 1716 | prepare_write_connect(msgr, con, 1); |
1718 | prepare_read_banner(con); | 1717 | prepare_read_banner(con); |
@@ -1737,16 +1736,12 @@ more_kvec: | |||
1737 | if (con->out_skip) { | 1736 | if (con->out_skip) { |
1738 | ret = write_partial_skip(con); | 1737 | ret = write_partial_skip(con); |
1739 | if (ret <= 0) | 1738 | if (ret <= 0) |
1740 | goto done; | 1739 | goto out; |
1741 | if (ret < 0) { | ||
1742 | dout("try_write write_partial_skip err %d\n", ret); | ||
1743 | goto done; | ||
1744 | } | ||
1745 | } | 1740 | } |
1746 | if (con->out_kvec_left) { | 1741 | if (con->out_kvec_left) { |
1747 | ret = write_partial_kvec(con); | 1742 | ret = write_partial_kvec(con); |
1748 | if (ret <= 0) | 1743 | if (ret <= 0) |
1749 | goto done; | 1744 | goto out; |
1750 | } | 1745 | } |
1751 | 1746 | ||
1752 | /* msg pages? */ | 1747 | /* msg pages? */ |
@@ -1761,11 +1756,11 @@ more_kvec: | |||
1761 | if (ret == 1) | 1756 | if (ret == 1) |
1762 | goto more_kvec; /* we need to send the footer, too! */ | 1757 | goto more_kvec; /* we need to send the footer, too! */ |
1763 | if (ret == 0) | 1758 | if (ret == 0) |
1764 | goto done; | 1759 | goto out; |
1765 | if (ret < 0) { | 1760 | if (ret < 0) { |
1766 | dout("try_write write_partial_msg_pages err %d\n", | 1761 | dout("try_write write_partial_msg_pages err %d\n", |
1767 | ret); | 1762 | ret); |
1768 | goto done; | 1763 | goto out; |
1769 | } | 1764 | } |
1770 | } | 1765 | } |
1771 | 1766 | ||
@@ -1789,10 +1784,9 @@ do_next: | |||
1789 | /* Nothing to do! */ | 1784 | /* Nothing to do! */ |
1790 | clear_bit(WRITE_PENDING, &con->state); | 1785 | clear_bit(WRITE_PENDING, &con->state); |
1791 | dout("try_write nothing else to write.\n"); | 1786 | dout("try_write nothing else to write.\n"); |
1792 | done: | ||
1793 | ret = 0; | 1787 | ret = 0; |
1794 | out: | 1788 | out: |
1795 | dout("try_write done on %p\n", con); | 1789 | dout("try_write done on %p ret %d\n", con, ret); |
1796 | return ret; | 1790 | return ret; |
1797 | } | 1791 | } |
1798 | 1792 | ||
@@ -1821,19 +1815,17 @@ more: | |||
1821 | dout("try_read connecting\n"); | 1815 | dout("try_read connecting\n"); |
1822 | ret = read_partial_banner(con); | 1816 | ret = read_partial_banner(con); |
1823 | if (ret <= 0) | 1817 | if (ret <= 0) |
1824 | goto done; | ||
1825 | if (process_banner(con) < 0) { | ||
1826 | ret = -1; | ||
1827 | goto out; | 1818 | goto out; |
1828 | } | 1819 | ret = process_banner(con); |
1820 | if (ret < 0) | ||
1821 | goto out; | ||
1829 | } | 1822 | } |
1830 | ret = read_partial_connect(con); | 1823 | ret = read_partial_connect(con); |
1831 | if (ret <= 0) | 1824 | if (ret <= 0) |
1832 | goto done; | ||
1833 | if (process_connect(con) < 0) { | ||
1834 | ret = -1; | ||
1835 | goto out; | 1825 | goto out; |
1836 | } | 1826 | ret = process_connect(con); |
1827 | if (ret < 0) | ||
1828 | goto out; | ||
1837 | goto more; | 1829 | goto more; |
1838 | } | 1830 | } |
1839 | 1831 | ||
@@ -1848,7 +1840,7 @@ more: | |||
1848 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); | 1840 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); |
1849 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); | 1841 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); |
1850 | if (ret <= 0) | 1842 | if (ret <= 0) |
1851 | goto done; | 1843 | goto out; |
1852 | con->in_base_pos += ret; | 1844 | con->in_base_pos += ret; |
1853 | if (con->in_base_pos) | 1845 | if (con->in_base_pos) |
1854 | goto more; | 1846 | goto more; |
@@ -1859,7 +1851,7 @@ more: | |||
1859 | */ | 1851 | */ |
1860 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); | 1852 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); |
1861 | if (ret <= 0) | 1853 | if (ret <= 0) |
1862 | goto done; | 1854 | goto out; |
1863 | dout("try_read got tag %d\n", (int)con->in_tag); | 1855 | dout("try_read got tag %d\n", (int)con->in_tag); |
1864 | switch (con->in_tag) { | 1856 | switch (con->in_tag) { |
1865 | case CEPH_MSGR_TAG_MSG: | 1857 | case CEPH_MSGR_TAG_MSG: |
@@ -1870,7 +1862,7 @@ more: | |||
1870 | break; | 1862 | break; |
1871 | case CEPH_MSGR_TAG_CLOSE: | 1863 | case CEPH_MSGR_TAG_CLOSE: |
1872 | set_bit(CLOSED, &con->state); /* fixme */ | 1864 | set_bit(CLOSED, &con->state); /* fixme */ |
1873 | goto done; | 1865 | goto out; |
1874 | default: | 1866 | default: |
1875 | goto bad_tag; | 1867 | goto bad_tag; |
1876 | } | 1868 | } |
@@ -1882,13 +1874,12 @@ more: | |||
1882 | case -EBADMSG: | 1874 | case -EBADMSG: |
1883 | con->error_msg = "bad crc"; | 1875 | con->error_msg = "bad crc"; |
1884 | ret = -EIO; | 1876 | ret = -EIO; |
1885 | goto out; | 1877 | break; |
1886 | case -EIO: | 1878 | case -EIO: |
1887 | con->error_msg = "io error"; | 1879 | con->error_msg = "io error"; |
1888 | goto out; | 1880 | break; |
1889 | default: | ||
1890 | goto done; | ||
1891 | } | 1881 | } |
1882 | goto out; | ||
1892 | } | 1883 | } |
1893 | if (con->in_tag == CEPH_MSGR_TAG_READY) | 1884 | if (con->in_tag == CEPH_MSGR_TAG_READY) |
1894 | goto more; | 1885 | goto more; |
@@ -1898,15 +1889,13 @@ more: | |||
1898 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { | 1889 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { |
1899 | ret = read_partial_ack(con); | 1890 | ret = read_partial_ack(con); |
1900 | if (ret <= 0) | 1891 | if (ret <= 0) |
1901 | goto done; | 1892 | goto out; |
1902 | process_ack(con); | 1893 | process_ack(con); |
1903 | goto more; | 1894 | goto more; |
1904 | } | 1895 | } |
1905 | 1896 | ||
1906 | done: | ||
1907 | ret = 0; | ||
1908 | out: | 1897 | out: |
1909 | dout("try_read done on %p\n", con); | 1898 | dout("try_read done on %p ret %d\n", con, ret); |
1910 | return ret; | 1899 | return ret; |
1911 | 1900 | ||
1912 | bad_tag: | 1901 | bad_tag: |
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work) | |||
1951 | work.work); | 1940 | work.work); |
1952 | 1941 | ||
1953 | mutex_lock(&con->mutex); | 1942 | mutex_lock(&con->mutex); |
1943 | if (test_and_clear_bit(BACKOFF, &con->state)) { | ||
1944 | dout("con_work %p backing off\n", con); | ||
1945 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | ||
1946 | round_jiffies_relative(con->delay))) { | ||
1947 | dout("con_work %p backoff %lu\n", con, con->delay); | ||
1948 | mutex_unlock(&con->mutex); | ||
1949 | return; | ||
1950 | } else { | ||
1951 | con->ops->put(con); | ||
1952 | dout("con_work %p FAILED to back off %lu\n", con, | ||
1953 | con->delay); | ||
1954 | } | ||
1955 | } | ||
1954 | 1956 | ||
1957 | if (test_bit(STANDBY, &con->state)) { | ||
1958 | dout("con_work %p STANDBY\n", con); | ||
1959 | goto done; | ||
1960 | } | ||
1955 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ | 1961 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ |
1956 | dout("con_work CLOSED\n"); | 1962 | dout("con_work CLOSED\n"); |
1957 | con_close_socket(con); | 1963 | con_close_socket(con); |
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con) | |||
2008 | /* Requeue anything that hasn't been acked */ | 2014 | /* Requeue anything that hasn't been acked */ |
2009 | list_splice_init(&con->out_sent, &con->out_queue); | 2015 | list_splice_init(&con->out_sent, &con->out_queue); |
2010 | 2016 | ||
2011 | /* If there are no messages in the queue, place the connection | 2017 | /* If there are no messages queued or keepalive pending, place |
2012 | * in a STANDBY state (i.e., don't try to reconnect just yet). */ | 2018 | * the connection in a STANDBY state */ |
2013 | if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { | 2019 | if (list_empty(&con->out_queue) && |
2014 | dout("fault setting STANDBY\n"); | 2020 | !test_bit(KEEPALIVE_PENDING, &con->state)) { |
2021 | dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); | ||
2022 | clear_bit(WRITE_PENDING, &con->state); | ||
2015 | set_bit(STANDBY, &con->state); | 2023 | set_bit(STANDBY, &con->state); |
2016 | } else { | 2024 | } else { |
2017 | /* retry after a delay. */ | 2025 | /* retry after a delay. */ |
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con) | |||
2019 | con->delay = BASE_DELAY_INTERVAL; | 2027 | con->delay = BASE_DELAY_INTERVAL; |
2020 | else if (con->delay < MAX_DELAY_INTERVAL) | 2028 | else if (con->delay < MAX_DELAY_INTERVAL) |
2021 | con->delay *= 2; | 2029 | con->delay *= 2; |
2022 | dout("fault queueing %p delay %lu\n", con, con->delay); | ||
2023 | con->ops->get(con); | 2030 | con->ops->get(con); |
2024 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | 2031 | if (queue_delayed_work(ceph_msgr_wq, &con->work, |
2025 | round_jiffies_relative(con->delay)) == 0) | 2032 | round_jiffies_relative(con->delay))) { |
2033 | dout("fault queued %p delay %lu\n", con, con->delay); | ||
2034 | } else { | ||
2026 | con->ops->put(con); | 2035 | con->ops->put(con); |
2036 | dout("fault failed to queue %p delay %lu, backoff\n", | ||
2037 | con, con->delay); | ||
2038 | /* | ||
2039 | * In many cases we see a socket state change | ||
2040 | * while con_work is running and end up | ||
2041 | * queuing (non-delayed) work, such that we | ||
2042 | * can't backoff with a delay. Set a flag so | ||
2043 | * that when con_work restarts we schedule the | ||
2044 | * delay then. | ||
2045 | */ | ||
2046 | set_bit(BACKOFF, &con->state); | ||
2047 | } | ||
2027 | } | 2048 | } |
2028 | 2049 | ||
2029 | out_unlock: | 2050 | out_unlock: |
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr) | |||
2094 | } | 2115 | } |
2095 | EXPORT_SYMBOL(ceph_messenger_destroy); | 2116 | EXPORT_SYMBOL(ceph_messenger_destroy); |
2096 | 2117 | ||
2118 | static void clear_standby(struct ceph_connection *con) | ||
2119 | { | ||
2120 | /* come back from STANDBY? */ | ||
2121 | if (test_and_clear_bit(STANDBY, &con->state)) { | ||
2122 | mutex_lock(&con->mutex); | ||
2123 | dout("clear_standby %p and ++connect_seq\n", con); | ||
2124 | con->connect_seq++; | ||
2125 | WARN_ON(test_bit(WRITE_PENDING, &con->state)); | ||
2126 | WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); | ||
2127 | mutex_unlock(&con->mutex); | ||
2128 | } | ||
2129 | } | ||
2130 | |||
2097 | /* | 2131 | /* |
2098 | * Queue up an outgoing message on the given connection. | 2132 | * Queue up an outgoing message on the given connection. |
2099 | */ | 2133 | */ |
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
2126 | 2160 | ||
2127 | /* if there wasn't anything waiting to send before, queue | 2161 | /* if there wasn't anything waiting to send before, queue |
2128 | * new work */ | 2162 | * new work */ |
2163 | clear_standby(con); | ||
2129 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2164 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
2130 | queue_con(con); | 2165 | queue_con(con); |
2131 | } | 2166 | } |
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) | |||
2191 | */ | 2226 | */ |
2192 | void ceph_con_keepalive(struct ceph_connection *con) | 2227 | void ceph_con_keepalive(struct ceph_connection *con) |
2193 | { | 2228 | { |
2229 | dout("con_keepalive %p\n", con); | ||
2230 | clear_standby(con); | ||
2194 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && | 2231 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && |
2195 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2232 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
2196 | queue_con(con); | 2233 | queue_con(con); |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 1a040e64c69f..cd9c21df87d1 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data, | |||
16 | int num_pages, bool write_page) | 16 | int num_pages, bool write_page) |
17 | { | 17 | { |
18 | struct page **pages; | 18 | struct page **pages; |
19 | int rc; | 19 | int got = 0; |
20 | int rc = 0; | ||
20 | 21 | ||
21 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | 22 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); |
22 | if (!pages) | 23 | if (!pages) |
23 | return ERR_PTR(-ENOMEM); | 24 | return ERR_PTR(-ENOMEM); |
24 | 25 | ||
25 | down_read(¤t->mm->mmap_sem); | 26 | down_read(¤t->mm->mmap_sem); |
26 | rc = get_user_pages(current, current->mm, (unsigned long)data, | 27 | while (got < num_pages) { |
27 | num_pages, write_page, 0, pages, NULL); | 28 | rc = get_user_pages(current, current->mm, |
29 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), | ||
30 | num_pages - got, write_page, 0, pages + got, NULL); | ||
31 | if (rc < 0) | ||
32 | break; | ||
33 | BUG_ON(rc == 0); | ||
34 | got += rc; | ||
35 | } | ||
28 | up_read(¤t->mm->mmap_sem); | 36 | up_read(¤t->mm->mmap_sem); |
29 | if (rc < num_pages) | 37 | if (rc < 0) |
30 | goto fail; | 38 | goto fail; |
31 | return pages; | 39 | return pages; |
32 | 40 | ||
33 | fail: | 41 | fail: |
34 | ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); | 42 | ceph_put_page_vector(pages, got, false); |
35 | return ERR_PTR(rc); | 43 | return ERR_PTR(rc); |
36 | } | 44 | } |
37 | EXPORT_SYMBOL(ceph_get_direct_page_vector); | 45 | EXPORT_SYMBOL(ceph_get_direct_page_vector); |
diff --git a/net/core/dev.c b/net/core/dev.c index 8ae6631abcc2..6561021d22d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change); | |||
1114 | void dev_load(struct net *net, const char *name) | 1114 | void dev_load(struct net *net, const char *name) |
1115 | { | 1115 | { |
1116 | struct net_device *dev; | 1116 | struct net_device *dev; |
1117 | int no_module; | ||
1117 | 1118 | ||
1118 | rcu_read_lock(); | 1119 | rcu_read_lock(); |
1119 | dev = dev_get_by_name_rcu(net, name); | 1120 | dev = dev_get_by_name_rcu(net, name); |
1120 | rcu_read_unlock(); | 1121 | rcu_read_unlock(); |
1121 | 1122 | ||
1122 | if (!dev && capable(CAP_NET_ADMIN)) | 1123 | no_module = !dev; |
1123 | request_module("%s", name); | 1124 | if (no_module && capable(CAP_NET_ADMIN)) |
1125 | no_module = request_module("netdev-%s", name); | ||
1126 | if (no_module && capable(CAP_SYS_MODULE)) { | ||
1127 | if (!request_module("%s", name)) | ||
1128 | pr_err("Loading kernel module for a network device " | ||
1129 | "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " | ||
1130 | "instead\n", name); | ||
1131 | } | ||
1124 | } | 1132 | } |
1125 | EXPORT_SYMBOL(dev_load); | 1133 | EXPORT_SYMBOL(dev_load); |
1126 | 1134 | ||
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 508f9c18992f..133fd22ea287 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | |||
144 | 144 | ||
145 | list_for_each_entry(ha, &from_list->list, list) { | 145 | list_for_each_entry(ha, &from_list->list, list) { |
146 | type = addr_type ? addr_type : ha->type; | 146 | type = addr_type ? addr_type : ha->type; |
147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | 147 | __hw_addr_del(to_list, ha->addr, addr_len, type); |
148 | } | 148 | } |
149 | } | 149 | } |
150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | 150 | EXPORT_SYMBOL(__hw_addr_del_multiple); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a9e7fc4c461f..b5bada92f637 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) | |||
3321 | pkt_dev->started_at); | 3321 | pkt_dev->started_at); |
3322 | ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); | 3322 | ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); |
3323 | 3323 | ||
3324 | p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", | 3324 | p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", |
3325 | (unsigned long long)ktime_to_us(elapsed), | 3325 | (unsigned long long)ktime_to_us(elapsed), |
3326 | (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), | 3326 | (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), |
3327 | (unsigned long long)ktime_to_us(idle), | 3327 | (unsigned long long)ktime_to_us(idle), |
diff --git a/net/core/scm.c b/net/core/scm.c index bbe454450801..4c1ef026d695 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -95,7 +95,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) | |||
95 | int fd = fdp[i]; | 95 | int fd = fdp[i]; |
96 | struct file *file; | 96 | struct file *file; |
97 | 97 | ||
98 | if (fd < 0 || !(file = fget(fd))) | 98 | if (fd < 0 || !(file = fget_raw(fd))) |
99 | return -EBADF; | 99 | return -EBADF; |
100 | *fpp++ = file; | 100 | *fpp++ = file; |
101 | fpl->count++; | 101 | fpl->count++; |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d5074a567289..c44348adba3b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, | |||
1193 | goto err; | 1193 | goto err; |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { | 1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { |
1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); | 1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); |
1198 | err = ops->ieee_setpfc(netdev, pfc); | 1198 | err = ops->ieee_setpfc(netdev, pfc); |
1199 | if (err) | 1199 | if (err) |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 8cde009e8b85..4222e7a654b0 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
614 | /* Caller (dccp_v4_do_rcv) will send Reset */ | 614 | /* Caller (dccp_v4_do_rcv) will send Reset */ |
615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | 615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; |
616 | return 1; | 616 | return 1; |
617 | } else if (sk->sk_state == DCCP_CLOSED) { | ||
618 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
619 | return 1; | ||
617 | } | 620 | } |
618 | 621 | ||
619 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { | 622 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { |
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
668 | } | 671 | } |
669 | 672 | ||
670 | switch (sk->sk_state) { | 673 | switch (sk->sk_state) { |
671 | case DCCP_CLOSED: | ||
672 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
673 | return 1; | ||
674 | |||
675 | case DCCP_REQUESTING: | 674 | case DCCP_REQUESTING: |
676 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); | 675 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); |
677 | if (queued >= 0) | 676 | if (queued >= 0) |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 739435a6af39..cfa7a5e1c5c9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | |||
67 | size_t result_len = 0; | 67 | size_t result_len = 0; |
68 | const char *data = _data, *end, *opt; | 68 | const char *data = _data, *end, *opt; |
69 | 69 | ||
70 | kenter("%%%d,%s,'%s',%zu", | 70 | kenter("%%%d,%s,'%*.*s',%zu", |
71 | key->serial, key->description, data, datalen); | 71 | key->serial, key->description, |
72 | (int)datalen, (int)datalen, data, datalen); | ||
72 | 73 | ||
73 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') | 74 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') |
74 | return -EINVAL; | 75 | return -EINVAL; |
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) | |||
217 | seq_printf(m, ": %u", key->datalen); | 218 | seq_printf(m, ": %u", key->datalen); |
218 | } | 219 | } |
219 | 220 | ||
221 | /* | ||
222 | * read the DNS data | ||
223 | * - the key's semaphore is read-locked | ||
224 | */ | ||
225 | static long dns_resolver_read(const struct key *key, | ||
226 | char __user *buffer, size_t buflen) | ||
227 | { | ||
228 | if (key->type_data.x[0]) | ||
229 | return key->type_data.x[0]; | ||
230 | |||
231 | return user_read(key, buffer, buflen); | ||
232 | } | ||
233 | |||
220 | struct key_type key_type_dns_resolver = { | 234 | struct key_type key_type_dns_resolver = { |
221 | .name = "dns_resolver", | 235 | .name = "dns_resolver", |
222 | .instantiate = dns_resolver_instantiate, | 236 | .instantiate = dns_resolver_instantiate, |
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = { | |||
224 | .revoke = user_revoke, | 238 | .revoke = user_revoke, |
225 | .destroy = user_destroy, | 239 | .destroy = user_destroy, |
226 | .describe = dns_resolver_describe, | 240 | .describe = dns_resolver_describe, |
227 | .read = user_read, | 241 | .read = dns_resolver_read, |
228 | }; | 242 | }; |
229 | 243 | ||
230 | static int __init init_dns_resolver(void) | 244 | static int __init init_dns_resolver(void) |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index df4616fce929..036652c8166d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
670 | ifap = &ifa->ifa_next) { | 670 | ifap = &ifa->ifa_next) { |
671 | if (!strcmp(ifr.ifr_name, ifa->ifa_label) && | 671 | if (!strcmp(ifr.ifr_name, ifa->ifa_label) && |
672 | sin_orig.sin_addr.s_addr == | 672 | sin_orig.sin_addr.s_addr == |
673 | ifa->ifa_address) { | 673 | ifa->ifa_local) { |
674 | break; /* found */ | 674 | break; /* found */ |
675 | } | 675 | } |
676 | } | 676 | } |
@@ -1040,8 +1040,8 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev, | |||
1040 | return; | 1040 | return; |
1041 | 1041 | ||
1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | 1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, |
1043 | ifa->ifa_address, dev, | 1043 | ifa->ifa_local, dev, |
1044 | ifa->ifa_address, NULL, | 1044 | ifa->ifa_local, NULL, |
1045 | dev->dev_addr, NULL); | 1045 | dev->dev_addr, NULL); |
1046 | } | 1046 | } |
1047 | 1047 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c5af909cf701..3c8dfa16614d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -505,7 +505,9 @@ restart: | |||
505 | } | 505 | } |
506 | 506 | ||
507 | rcu_read_unlock(); | 507 | rcu_read_unlock(); |
508 | local_bh_disable(); | ||
508 | inet_twsk_deschedule(tw, twdr); | 509 | inet_twsk_deschedule(tw, twdr); |
510 | local_bh_enable(); | ||
509 | inet_twsk_put(tw); | 511 | inet_twsk_put(tw); |
510 | goto restart_rcu; | 512 | goto restart_rcu; |
511 | } | 513 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6613edfac28c..d1d0e2c256fc 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1765,4 +1765,4 @@ module_exit(ipgre_fini); | |||
1765 | MODULE_LICENSE("GPL"); | 1765 | MODULE_LICENSE("GPL"); |
1766 | MODULE_ALIAS_RTNL_LINK("gre"); | 1766 | MODULE_ALIAS_RTNL_LINK("gre"); |
1767 | MODULE_ALIAS_RTNL_LINK("gretap"); | 1767 | MODULE_ALIAS_RTNL_LINK("gretap"); |
1768 | MODULE_ALIAS("gre0"); | 1768 | MODULE_ALIAS_NETDEV("gre0"); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 988f52fba54a..a5f58e7cbb26 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void) | |||
913 | module_init(ipip_init); | 913 | module_init(ipip_init); |
914 | module_exit(ipip_fini); | 914 | module_exit(ipip_fini); |
915 | MODULE_LICENSE("GPL"); | 915 | MODULE_LICENSE("GPL"); |
916 | MODULE_ALIAS("tunl0"); | 916 | MODULE_ALIAS_NETDEV("tunl0"); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb7f82ebf4a3..65f6c0406245 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, | |||
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* D-SACK for already forgotten data... Do dumb counting. */ | 1224 | /* D-SACK for already forgotten data... Do dumb counting. */ |
1225 | if (dup_sack && | 1225 | if (dup_sack && tp->undo_marker && tp->undo_retrans && |
1226 | !after(end_seq_0, prior_snd_una) && | 1226 | !after(end_seq_0, prior_snd_una) && |
1227 | after(end_seq_0, tp->undo_marker)) | 1227 | after(end_seq_0, tp->undo_marker)) |
1228 | tp->undo_retrans--; | 1228 | tp->undo_retrans--; |
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1299 | 1299 | ||
1300 | /* Account D-SACK for retransmitted packet. */ | 1300 | /* Account D-SACK for retransmitted packet. */ |
1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { | 1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { |
1302 | if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | 1302 | if (tp->undo_marker && tp->undo_retrans && |
1303 | after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | ||
1303 | tp->undo_retrans--; | 1304 | tp->undo_retrans--; |
1304 | if (sacked & TCPCB_SACKED_ACKED) | 1305 | if (sacked & TCPCB_SACKED_ACKED) |
1305 | state->reord = min(fack_count, state->reord); | 1306 | state->reord = min(fack_count, state->reord); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 406f320336e6..dfa5beb0c1c8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2162 | if (!tp->retrans_stamp) | 2162 | if (!tp->retrans_stamp) |
2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; | 2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; |
2164 | 2164 | ||
2165 | tp->undo_retrans++; | 2165 | tp->undo_retrans += tcp_skb_pcount(skb); |
2166 | 2166 | ||
2167 | /* snd_nxt is stored to detect loss of retransmitted segment, | 2167 | /* snd_nxt is stored to detect loss of retransmitted segment, |
2168 | * see tcp_input.c tcp_sacktag_write_queue(). | 2168 | * see tcp_input.c tcp_sacktag_write_queue(). |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4f4483e697bd..e528a42a52be 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -57,6 +57,7 @@ | |||
57 | MODULE_AUTHOR("Ville Nuorvala"); | 57 | MODULE_AUTHOR("Ville Nuorvala"); |
58 | MODULE_DESCRIPTION("IPv6 tunneling device"); | 58 | MODULE_DESCRIPTION("IPv6 tunneling device"); |
59 | MODULE_LICENSE("GPL"); | 59 | MODULE_LICENSE("GPL"); |
60 | MODULE_ALIAS_NETDEV("ip6tnl0"); | ||
60 | 61 | ||
61 | #ifdef IP6_TNL_DEBUG | 62 | #ifdef IP6_TNL_DEBUG |
62 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) | 63 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 09c88891a753..de338037a736 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -410,7 +410,7 @@ fallback: | |||
410 | if (p != NULL) { | 410 | if (p != NULL) { |
411 | sb_add(m, "%02x", *p++); | 411 | sb_add(m, "%02x", *p++); |
412 | for (i = 1; i < len; i++) | 412 | for (i = 1; i < len; i++) |
413 | sb_add(m, ":%02x", p[i]); | 413 | sb_add(m, ":%02x", *p++); |
414 | } | 414 | } |
415 | sb_add(m, " "); | 415 | sb_add(m, " "); |
416 | 416 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a998db6e7895..e7db7014e89f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -739,8 +739,10 @@ restart: | |||
739 | 739 | ||
740 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) | 740 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) |
741 | nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); | 741 | nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); |
742 | else | 742 | else if (!(rt->dst.flags & DST_HOST)) |
743 | nrt = rt6_alloc_clone(rt, &fl->fl6_dst); | 743 | nrt = rt6_alloc_clone(rt, &fl->fl6_dst); |
744 | else | ||
745 | goto out2; | ||
744 | 746 | ||
745 | dst_release(&rt->dst); | 747 | dst_release(&rt->dst); |
746 | rt = nrt ? : net->ipv6.ip6_null_entry; | 748 | rt = nrt ? : net->ipv6.ip6_null_entry; |
@@ -2557,14 +2559,16 @@ static | |||
2557 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, | 2559 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, |
2558 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2560 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2559 | { | 2561 | { |
2560 | struct net *net = current->nsproxy->net_ns; | 2562 | struct net *net; |
2561 | int delay = net->ipv6.sysctl.flush_delay; | 2563 | int delay; |
2562 | if (write) { | 2564 | if (!write) |
2563 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
2564 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
2565 | return 0; | ||
2566 | } else | ||
2567 | return -EINVAL; | 2565 | return -EINVAL; |
2566 | |||
2567 | net = (struct net *)ctl->extra1; | ||
2568 | delay = net->ipv6.sysctl.flush_delay; | ||
2569 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
2570 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
2571 | return 0; | ||
2568 | } | 2572 | } |
2569 | 2573 | ||
2570 | ctl_table ipv6_route_table_template[] = { | 2574 | ctl_table ipv6_route_table_template[] = { |
@@ -2651,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) | |||
2651 | 2655 | ||
2652 | if (table) { | 2656 | if (table) { |
2653 | table[0].data = &net->ipv6.sysctl.flush_delay; | 2657 | table[0].data = &net->ipv6.sysctl.flush_delay; |
2658 | table[0].extra1 = net; | ||
2654 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; | 2659 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; |
2655 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | 2660 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; |
2656 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | 2661 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 8ce38f10a547..d2c16e10f650 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1290,4 +1290,4 @@ static int __init sit_init(void) | |||
1290 | module_init(sit_init); | 1290 | module_init(sit_init); |
1291 | module_exit(sit_cleanup); | 1291 | module_exit(sit_cleanup); |
1292 | MODULE_LICENSE("GPL"); | 1292 | MODULE_LICENSE("GPL"); |
1293 | MODULE_ALIAS("sit0"); | 1293 | MODULE_ALIAS_NETDEV("sit0"); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744e..7a10a8d1b2d0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) | |||
1229 | } | 1229 | } |
1230 | mutex_unlock(&local->iflist_mtx); | 1230 | mutex_unlock(&local->iflist_mtx); |
1231 | unregister_netdevice_many(&unreg_list); | 1231 | unregister_netdevice_many(&unreg_list); |
1232 | list_del(&unreg_list); | ||
1232 | } | 1233 | } |
1233 | 1234 | ||
1234 | static u32 ieee80211_idle_off(struct ieee80211_local *local, | 1235 | static u32 ieee80211_idle_off(struct ieee80211_local *local, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e33746..c9ceb4d57ab0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
1033 | if (is_multicast_ether_addr(hdr->addr1)) | 1033 | if (is_multicast_ether_addr(hdr->addr1)) |
1034 | return; | 1034 | return; |
1035 | 1035 | ||
1036 | /* | ||
1037 | * In case we receive frames after disassociation. | ||
1038 | */ | ||
1039 | if (!sdata->u.mgd.associated) | ||
1040 | return; | ||
1041 | |||
1036 | ieee80211_sta_reset_conn_monitor(sdata); | 1042 | ieee80211_sta_reset_conn_monitor(sdata); |
1037 | } | 1043 | } |
1038 | 1044 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 22f7ad5101ab..ba98e1308f3c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
808 | dest->u_threshold = udest->u_threshold; | 808 | dest->u_threshold = udest->u_threshold; |
809 | dest->l_threshold = udest->l_threshold; | 809 | dest->l_threshold = udest->l_threshold; |
810 | 810 | ||
811 | spin_lock(&dest->dst_lock); | 811 | spin_lock_bh(&dest->dst_lock); |
812 | ip_vs_dst_reset(dest); | 812 | ip_vs_dst_reset(dest); |
813 | spin_unlock(&dest->dst_lock); | 813 | spin_unlock_bh(&dest->dst_lock); |
814 | 814 | ||
815 | if (add) | 815 | if (add) |
816 | ip_vs_new_estimator(&dest->stats); | 816 | ip_vs_new_estimator(&dest->stats); |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index b07393eab88e..91816998ed86 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); | |||
85 | 85 | ||
86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) | 86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) |
87 | { | 87 | { |
88 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
89 | return -EINVAL; | ||
88 | mutex_lock(&nf_log_mutex); | 90 | mutex_lock(&nf_log_mutex); |
89 | if (__find_logger(pf, logger->name) == NULL) { | 91 | if (__find_logger(pf, logger->name) == NULL) { |
90 | mutex_unlock(&nf_log_mutex); | 92 | mutex_unlock(&nf_log_mutex); |
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); | |||
98 | 100 | ||
99 | void nf_log_unbind_pf(u_int8_t pf) | 101 | void nf_log_unbind_pf(u_int8_t pf) |
100 | { | 102 | { |
103 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
104 | return; | ||
101 | mutex_lock(&nf_log_mutex); | 105 | mutex_lock(&nf_log_mutex); |
102 | rcu_assign_pointer(nf_loggers[pf], NULL); | 106 | rcu_assign_pointer(nf_loggers[pf], NULL); |
103 | mutex_unlock(&nf_log_mutex); | 107 | mutex_unlock(&nf_log_mutex); |
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 4d87befb04c0..474d621cbc2e 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c | |||
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb) | |||
28 | skb->destructor = NULL; | 28 | skb->destructor = NULL; |
29 | 29 | ||
30 | if (sk) | 30 | if (sk) |
31 | nf_tproxy_put_sock(sk); | 31 | sock_put(sk); |
32 | } | 32 | } |
33 | 33 | ||
34 | /* consumes sk */ | 34 | /* consumes sk */ |
35 | int | 35 | void |
36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) | 36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) |
37 | { | 37 | { |
38 | bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? | 38 | /* assigning tw sockets complicates things; most |
39 | inet_twsk(sk)->tw_transparent : | 39 | * skb->sk->X checks would have to test sk->sk_state first */ |
40 | inet_sk(sk)->transparent; | 40 | if (sk->sk_state == TCP_TIME_WAIT) { |
41 | 41 | inet_twsk_put(inet_twsk(sk)); | |
42 | if (transparent) { | 42 | return; |
43 | skb_orphan(skb); | 43 | } |
44 | skb->sk = sk; | 44 | |
45 | skb->destructor = nf_tproxy_destructor; | 45 | skb_orphan(skb); |
46 | return 1; | 46 | skb->sk = sk; |
47 | } else | 47 | skb->destructor = nf_tproxy_destructor; |
48 | nf_tproxy_put_sock(sk); | ||
49 | |||
50 | return 0; | ||
51 | } | 48 | } |
52 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); | 49 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); |
53 | 50 | ||
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 640678f47a2a..dcfd57eb9d02 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -33,6 +33,20 @@ | |||
33 | #include <net/netfilter/nf_tproxy_core.h> | 33 | #include <net/netfilter/nf_tproxy_core.h> |
34 | #include <linux/netfilter/xt_TPROXY.h> | 34 | #include <linux/netfilter/xt_TPROXY.h> |
35 | 35 | ||
36 | static bool tproxy_sk_is_transparent(struct sock *sk) | ||
37 | { | ||
38 | if (sk->sk_state != TCP_TIME_WAIT) { | ||
39 | if (inet_sk(sk)->transparent) | ||
40 | return true; | ||
41 | sock_put(sk); | ||
42 | } else { | ||
43 | if (inet_twsk(sk)->tw_transparent) | ||
44 | return true; | ||
45 | inet_twsk_put(inet_twsk(sk)); | ||
46 | } | ||
47 | return false; | ||
48 | } | ||
49 | |||
36 | static inline __be32 | 50 | static inline __be32 |
37 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) | 51 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) |
38 | { | 52 | { |
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
141 | skb->dev, NFT_LOOKUP_LISTENER); | 155 | skb->dev, NFT_LOOKUP_LISTENER); |
142 | 156 | ||
143 | /* NOTE: assign_sock consumes our sk reference */ | 157 | /* NOTE: assign_sock consumes our sk reference */ |
144 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 158 | if (sk && tproxy_sk_is_transparent(sk)) { |
145 | /* This should be in a separate target, but we don't do multiple | 159 | /* This should be in a separate target, but we don't do multiple |
146 | targets on the same rule yet */ | 160 | targets on the same rule yet */ |
147 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; | 161 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; |
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
149 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", | 163 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", |
150 | iph->protocol, &iph->daddr, ntohs(hp->dest), | 164 | iph->protocol, &iph->daddr, ntohs(hp->dest), |
151 | &laddr, ntohs(lport), skb->mark); | 165 | &laddr, ntohs(lport), skb->mark); |
166 | |||
167 | nf_tproxy_assign_sock(skb, sk); | ||
152 | return NF_ACCEPT; | 168 | return NF_ACCEPT; |
153 | } | 169 | } |
154 | 170 | ||
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
306 | par->in, NFT_LOOKUP_LISTENER); | 322 | par->in, NFT_LOOKUP_LISTENER); |
307 | 323 | ||
308 | /* NOTE: assign_sock consumes our sk reference */ | 324 | /* NOTE: assign_sock consumes our sk reference */ |
309 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 325 | if (sk && tproxy_sk_is_transparent(sk)) { |
310 | /* This should be in a separate target, but we don't do multiple | 326 | /* This should be in a separate target, but we don't do multiple |
311 | targets on the same rule yet */ | 327 | targets on the same rule yet */ |
312 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; | 328 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; |
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
314 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", | 330 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", |
315 | tproto, &iph->saddr, ntohs(hp->source), | 331 | tproto, &iph->saddr, ntohs(hp->source), |
316 | laddr, ntohs(lport), skb->mark); | 332 | laddr, ntohs(lport), skb->mark); |
333 | |||
334 | nf_tproxy_assign_sock(skb, sk); | ||
317 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
318 | } | 336 | } |
319 | 337 | ||
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 00d6ae838303..9cc46356b577 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -35,6 +35,15 @@ | |||
35 | #include <net/netfilter/nf_conntrack.h> | 35 | #include <net/netfilter/nf_conntrack.h> |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | static void | ||
39 | xt_socket_put_sk(struct sock *sk) | ||
40 | { | ||
41 | if (sk->sk_state == TCP_TIME_WAIT) | ||
42 | inet_twsk_put(inet_twsk(sk)); | ||
43 | else | ||
44 | sock_put(sk); | ||
45 | } | ||
46 | |||
38 | static int | 47 | static int |
39 | extract_icmp4_fields(const struct sk_buff *skb, | 48 | extract_icmp4_fields(const struct sk_buff *skb, |
40 | u8 *protocol, | 49 | u8 *protocol, |
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, | |||
164 | (sk->sk_state == TCP_TIME_WAIT && | 173 | (sk->sk_state == TCP_TIME_WAIT && |
165 | inet_twsk(sk)->tw_transparent)); | 174 | inet_twsk(sk)->tw_transparent)); |
166 | 175 | ||
167 | nf_tproxy_put_sock(sk); | 176 | xt_socket_put_sk(sk); |
168 | 177 | ||
169 | if (wildcard || !transparent) | 178 | if (wildcard || !transparent) |
170 | sk = NULL; | 179 | sk = NULL; |
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
298 | (sk->sk_state == TCP_TIME_WAIT && | 307 | (sk->sk_state == TCP_TIME_WAIT && |
299 | inet_twsk(sk)->tw_transparent)); | 308 | inet_twsk(sk)->tw_transparent)); |
300 | 309 | ||
301 | nf_tproxy_put_sock(sk); | 310 | xt_socket_put_sk(sk); |
302 | 311 | ||
303 | if (wildcard || !transparent) | 312 | if (wildcard || !transparent) |
304 | sk = NULL; | 313 | sk = NULL; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 478181d53c55..1f924595bdef 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
1408 | size_t copied; | 1408 | size_t copied; |
1409 | struct sk_buff *skb, *data_skb; | 1409 | struct sk_buff *skb, *data_skb; |
1410 | int err; | 1410 | int err, ret; |
1411 | 1411 | ||
1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
1413 | return -EOPNOTSUPP; | 1413 | return -EOPNOTSUPP; |
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1470 | 1470 | ||
1471 | skb_free_datagram(sk, skb); | 1471 | skb_free_datagram(sk, skb); |
1472 | 1472 | ||
1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { |
1474 | netlink_dump(sk); | 1474 | ret = netlink_dump(sk); |
1475 | if (ret) { | ||
1476 | sk->sk_err = ret; | ||
1477 | sk->sk_error_report(sk); | ||
1478 | } | ||
1479 | } | ||
1475 | 1480 | ||
1476 | scm_recv(sock, msg, siocb->scm, flags); | 1481 | scm_recv(sock, msg, siocb->scm, flags); |
1477 | out: | 1482 | out: |
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1736 | struct netlink_callback *cb; | 1741 | struct netlink_callback *cb; |
1737 | struct sock *sk; | 1742 | struct sock *sk; |
1738 | struct netlink_sock *nlk; | 1743 | struct netlink_sock *nlk; |
1744 | int ret; | ||
1739 | 1745 | ||
1740 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); | 1746 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
1741 | if (cb == NULL) | 1747 | if (cb == NULL) |
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1764 | nlk->cb = cb; | 1770 | nlk->cb = cb; |
1765 | mutex_unlock(nlk->cb_mutex); | 1771 | mutex_unlock(nlk->cb_mutex); |
1766 | 1772 | ||
1767 | netlink_dump(sk); | 1773 | ret = netlink_dump(sk); |
1774 | |||
1768 | sock_put(sk); | 1775 | sock_put(sk); |
1769 | 1776 | ||
1777 | if (ret) | ||
1778 | return ret; | ||
1779 | |||
1770 | /* We successfully started a dump, by returning -EINTR we | 1780 | /* We successfully started a dump, by returning -EINTR we |
1771 | * signal not to send ACK even if it was requested. | 1781 | * signal not to send ACK even if it was requested. |
1772 | */ | 1782 | */ |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 71f373c421bc..c47a511f203d 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
551 | if (conn->c_loopback | 551 | if (conn->c_loopback |
552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
554 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 554 | scat = &rm->data.op_sg[sg]; |
555 | ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
556 | ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); | ||
557 | return ret; | ||
555 | } | 558 | } |
556 | 559 | ||
557 | /* FIXME we may overallocate here */ | 560 | /* FIXME we may overallocate here */ |
diff --git a/net/rds/loop.c b/net/rds/loop.c index aeec1d483b17..bca6761a3ca2 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
61 | unsigned int hdr_off, unsigned int sg, | 61 | unsigned int hdr_off, unsigned int sg, |
62 | unsigned int off) | 62 | unsigned int off) |
63 | { | 63 | { |
64 | struct scatterlist *sgp = &rm->data.op_sg[sg]; | ||
65 | int ret = sizeof(struct rds_header) + | ||
66 | be32_to_cpu(rm->m_inc.i_hdr.h_len); | ||
67 | |||
64 | /* Do not send cong updates to loopback */ | 68 | /* Do not send cong updates to loopback */ |
65 | if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 69 | if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
66 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 70 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
67 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 71 | ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); |
72 | goto out; | ||
68 | } | 73 | } |
69 | 74 | ||
70 | BUG_ON(hdr_off || sg || off); | 75 | BUG_ON(hdr_off || sg || off); |
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
80 | NULL); | 85 | NULL); |
81 | 86 | ||
82 | rds_inc_put(&rm->m_inc); | 87 | rds_inc_put(&rm->m_inc); |
83 | 88 | out: | |
84 | return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); | 89 | return ret; |
85 | } | 90 | } |
86 | 91 | ||
87 | /* | 92 | /* |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 89315009bab1..1a2b0633fece 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) | |||
423 | goto protocol_error; | 423 | goto protocol_error; |
424 | } | 424 | } |
425 | 425 | ||
426 | case RXRPC_PACKET_TYPE_ACKALL: | ||
426 | case RXRPC_PACKET_TYPE_ACK: | 427 | case RXRPC_PACKET_TYPE_ACK: |
427 | /* ACK processing is done in process context */ | 428 | /* ACK processing is done in process context */ |
428 | read_lock_bh(&call->state_lock); | 429 | read_lock_bh(&call->state_lock); |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353fe..d763793d39de 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, | |||
89 | return ret; | 89 | return ret; |
90 | 90 | ||
91 | plen -= sizeof(*token); | 91 | plen -= sizeof(*token); |
92 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 92 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
93 | if (!token) | 93 | if (!token) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | 95 | ||
96 | token->kad = kmalloc(plen, GFP_KERNEL); | 96 | token->kad = kzalloc(plen, GFP_KERNEL); |
97 | if (!token->kad) { | 97 | if (!token->kad) { |
98 | kfree(token); | 98 | kfree(token); |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
731 | goto error; | 731 | goto error; |
732 | 732 | ||
733 | ret = -ENOMEM; | 733 | ret = -ENOMEM; |
734 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 734 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
735 | if (!token) | 735 | if (!token) |
736 | goto error; | 736 | goto error; |
737 | token->kad = kmalloc(plen, GFP_KERNEL); | 737 | token->kad = kzalloc(plen, GFP_KERNEL); |
738 | if (!token->kad) | 738 | if (!token->kad) |
739 | goto error_free; | 739 | goto error_free; |
740 | 740 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 34dc598440a2..1bc698039ae2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev) | |||
839 | 839 | ||
840 | list_add(&dev->unreg_list, &single); | 840 | list_add(&dev->unreg_list, &single); |
841 | dev_deactivate_many(&single); | 841 | dev_deactivate_many(&single); |
842 | list_del(&single); | ||
842 | } | 843 | } |
843 | 844 | ||
844 | static void dev_init_scheduler_queue(struct net_device *dev, | 845 | static void dev_init_scheduler_queue(struct net_device *dev, |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2cc46f0962ca..b23428f3c0dd 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); | 2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); |
2030 | 2030 | ||
2031 | if (*errp) { | 2031 | if (*errp) { |
2032 | sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 2032 | if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
2033 | WORD_ROUND(ntohs(param.p->length))); | 2033 | WORD_ROUND(ntohs(param.p->length)))) |
2034 | sctp_addto_chunk_fixed(*errp, | 2034 | sctp_addto_chunk_fixed(*errp, |
2035 | WORD_ROUND(ntohs(param.p->length)), | 2035 | WORD_ROUND(ntohs(param.p->length)), |
2036 | param.v); | 2036 | param.v); |
2037 | } else { | 2037 | } else { |
2038 | /* If there is no memory for generating the ERROR | 2038 | /* If there is no memory for generating the ERROR |
2039 | * report as specified, an ABORT will be triggered | 2039 | * report as specified, an ABORT will be triggered |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 2841cc6bcfda..3fc8624fcd17 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) | |||
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Mark an RPC call as having completed by clearing the 'active' bit | 254 | * Mark an RPC call as having completed by clearing the 'active' bit |
255 | * and then waking up all tasks that were sleeping. | ||
255 | */ | 256 | */ |
256 | static void rpc_mark_complete_task(struct rpc_task *task) | 257 | static int rpc_complete_task(struct rpc_task *task) |
257 | { | 258 | { |
258 | smp_mb__before_clear_bit(); | 259 | void *m = &task->tk_runstate; |
260 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | ||
261 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | ||
262 | unsigned long flags; | ||
263 | int ret; | ||
264 | |||
265 | spin_lock_irqsave(&wq->lock, flags); | ||
259 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 266 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
260 | smp_mb__after_clear_bit(); | 267 | ret = atomic_dec_and_test(&task->tk_count); |
261 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 268 | if (waitqueue_active(wq)) |
269 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | ||
270 | spin_unlock_irqrestore(&wq->lock, flags); | ||
271 | return ret; | ||
262 | } | 272 | } |
263 | 273 | ||
264 | /* | 274 | /* |
265 | * Allow callers to wait for completion of an RPC call | 275 | * Allow callers to wait for completion of an RPC call |
276 | * | ||
277 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | ||
278 | * to enforce taking of the wq->lock and hence avoid races with | ||
279 | * rpc_complete_task(). | ||
266 | */ | 280 | */ |
267 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 281 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
268 | { | 282 | { |
269 | if (action == NULL) | 283 | if (action == NULL) |
270 | action = rpc_wait_bit_killable; | 284 | action = rpc_wait_bit_killable; |
271 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 285 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
272 | action, TASK_KILLABLE); | 286 | action, TASK_KILLABLE); |
273 | } | 287 | } |
274 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 288 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) | |||
857 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 871 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
858 | } | 872 | } |
859 | 873 | ||
860 | void rpc_put_task(struct rpc_task *task) | 874 | static void rpc_release_resources_task(struct rpc_task *task) |
861 | { | 875 | { |
862 | if (!atomic_dec_and_test(&task->tk_count)) | ||
863 | return; | ||
864 | /* Release resources */ | ||
865 | if (task->tk_rqstp) | 876 | if (task->tk_rqstp) |
866 | xprt_release(task); | 877 | xprt_release(task); |
867 | if (task->tk_msg.rpc_cred) | 878 | if (task->tk_msg.rpc_cred) |
868 | put_rpccred(task->tk_msg.rpc_cred); | 879 | put_rpccred(task->tk_msg.rpc_cred); |
869 | rpc_task_release_client(task); | 880 | rpc_task_release_client(task); |
870 | if (task->tk_workqueue != NULL) { | 881 | } |
882 | |||
883 | static void rpc_final_put_task(struct rpc_task *task, | ||
884 | struct workqueue_struct *q) | ||
885 | { | ||
886 | if (q != NULL) { | ||
871 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 887 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
872 | queue_work(task->tk_workqueue, &task->u.tk_work); | 888 | queue_work(q, &task->u.tk_work); |
873 | } else | 889 | } else |
874 | rpc_free_task(task); | 890 | rpc_free_task(task); |
875 | } | 891 | } |
892 | |||
893 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | ||
894 | { | ||
895 | if (atomic_dec_and_test(&task->tk_count)) { | ||
896 | rpc_release_resources_task(task); | ||
897 | rpc_final_put_task(task, q); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | void rpc_put_task(struct rpc_task *task) | ||
902 | { | ||
903 | rpc_do_put_task(task, NULL); | ||
904 | } | ||
876 | EXPORT_SYMBOL_GPL(rpc_put_task); | 905 | EXPORT_SYMBOL_GPL(rpc_put_task); |
877 | 906 | ||
907 | void rpc_put_task_async(struct rpc_task *task) | ||
908 | { | ||
909 | rpc_do_put_task(task, task->tk_workqueue); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | ||
912 | |||
878 | static void rpc_release_task(struct rpc_task *task) | 913 | static void rpc_release_task(struct rpc_task *task) |
879 | { | 914 | { |
880 | dprintk("RPC: %5u release task\n", task->tk_pid); | 915 | dprintk("RPC: %5u release task\n", task->tk_pid); |
881 | 916 | ||
882 | BUG_ON (RPC_IS_QUEUED(task)); | 917 | BUG_ON (RPC_IS_QUEUED(task)); |
883 | 918 | ||
884 | /* Wake up anyone who is waiting for task completion */ | 919 | rpc_release_resources_task(task); |
885 | rpc_mark_complete_task(task); | ||
886 | 920 | ||
887 | rpc_put_task(task); | 921 | /* |
922 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | ||
923 | * so it should be safe to use task->tk_count as a test for whether | ||
924 | * or not any other processes still hold references to our rpc_task. | ||
925 | */ | ||
926 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | ||
927 | /* Wake up anyone who may be waiting for task completion */ | ||
928 | if (!rpc_complete_task(task)) | ||
929 | return; | ||
930 | } else { | ||
931 | if (!atomic_dec_and_test(&task->tk_count)) | ||
932 | return; | ||
933 | } | ||
934 | rpc_final_put_task(task, task->tk_workqueue); | ||
888 | } | 935 | } |
889 | 936 | ||
890 | int rpciod_up(void) | 937 | int rpciod_up(void) |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 9df1eadc912a..1a10dcd999ea 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1335 | p, 0, length, DMA_FROM_DEVICE); | 1335 | p, 0, length, DMA_FROM_DEVICE); |
1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { | 1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { |
1337 | put_page(p); | 1337 | put_page(p); |
1338 | svc_rdma_put_context(ctxt, 1); | ||
1338 | return; | 1339 | return; |
1339 | } | 1340 | } |
1340 | atomic_inc(&xprt->sc_dma_used); | 1341 | atomic_inc(&xprt->sc_dma_used); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c431f5a57960..be96d429b475 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, | |||
1631 | } | 1631 | } |
1632 | xs_reclassify_socket(family, sock); | 1632 | xs_reclassify_socket(family, sock); |
1633 | 1633 | ||
1634 | if (xs_bind(transport, sock)) { | 1634 | err = xs_bind(transport, sock); |
1635 | if (err) { | ||
1635 | sock_release(sock); | 1636 | sock_release(sock); |
1636 | goto out; | 1637 | goto out; |
1637 | } | 1638 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index dd419d286204..ba5b8c208498 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -850,7 +850,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
850 | * Get the parent directory, calculate the hash for last | 850 | * Get the parent directory, calculate the hash for last |
851 | * component. | 851 | * component. |
852 | */ | 852 | */ |
853 | err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); | 853 | err = kern_path_parent(sunaddr->sun_path, &nd); |
854 | if (err) | 854 | if (err) |
855 | goto out_mknod_parent; | 855 | goto out_mknod_parent; |
856 | 856 | ||
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1724 | 1724 | ||
1725 | msg->msg_namelen = 0; | 1725 | msg->msg_namelen = 0; |
1726 | 1726 | ||
1727 | mutex_lock(&u->readlock); | 1727 | err = mutex_lock_interruptible(&u->readlock); |
1728 | if (err) { | ||
1729 | err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); | ||
1730 | goto out; | ||
1731 | } | ||
1728 | 1732 | ||
1729 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 1733 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
1730 | if (!skb) { | 1734 | if (!skb) { |
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1864 | memset(&tmp_scm, 0, sizeof(tmp_scm)); | 1868 | memset(&tmp_scm, 0, sizeof(tmp_scm)); |
1865 | } | 1869 | } |
1866 | 1870 | ||
1867 | mutex_lock(&u->readlock); | 1871 | err = mutex_lock_interruptible(&u->readlock); |
1872 | if (err) { | ||
1873 | err = sock_intr_errno(timeo); | ||
1874 | goto out; | ||
1875 | } | ||
1868 | 1876 | ||
1869 | do { | 1877 | do { |
1870 | int chunk; | 1878 | int chunk; |
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1895 | 1903 | ||
1896 | timeo = unix_stream_data_wait(sk, timeo); | 1904 | timeo = unix_stream_data_wait(sk, timeo); |
1897 | 1905 | ||
1898 | if (signal_pending(current)) { | 1906 | if (signal_pending(current) |
1907 | || mutex_lock_interruptible(&u->readlock)) { | ||
1899 | err = sock_intr_errno(timeo); | 1908 | err = sock_intr_errno(timeo); |
1900 | goto out; | 1909 | goto out; |
1901 | } | 1910 | } |
1902 | mutex_lock(&u->readlock); | 1911 | |
1903 | continue; | 1912 | continue; |
1904 | unlock: | 1913 | unlock: |
1905 | unix_state_unlock(sk); | 1914 | unix_state_unlock(sk); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index f89f83bf828e..b6f4b994eb35 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
@@ -104,7 +104,7 @@ struct sock *unix_get_socket(struct file *filp) | |||
104 | /* | 104 | /* |
105 | * Socket ? | 105 | * Socket ? |
106 | */ | 106 | */ |
107 | if (S_ISSOCK(inode->i_mode)) { | 107 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
108 | struct socket *sock = SOCKET_I(inode); | 108 | struct socket *sock = SOCKET_I(inode); |
109 | struct sock *s = sock->sk; | 109 | struct sock *s = sock->sk; |
110 | 110 | ||
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd5..d112f038edf0 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev, | |||
802 | return freq; | 802 | return freq; |
803 | if (freq == 0) | 803 | if (freq == 0) |
804 | return -EINVAL; | 804 | return -EINVAL; |
805 | wdev_lock(wdev); | ||
806 | mutex_lock(&rdev->devlist_mtx); | 805 | mutex_lock(&rdev->devlist_mtx); |
806 | wdev_lock(wdev); | ||
807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); | 807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); |
808 | mutex_unlock(&rdev->devlist_mtx); | ||
809 | wdev_unlock(wdev); | 808 | wdev_unlock(wdev); |
809 | mutex_unlock(&rdev->devlist_mtx); | ||
810 | return err; | 810 | return err; |
811 | default: | 811 | default: |
812 | return -EOPNOTSUPP; | 812 | return -EOPNOTSUPP; |