aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /net/core/sock.c
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c110
1 files changed, 25 insertions, 85 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 611f424fb76b..b4f3ea2fce60 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -437,7 +437,6 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438{ 438{
439 int err; 439 int err;
440 int skb_len;
441 unsigned long flags; 440 unsigned long flags;
442 struct sk_buff_head *list = &sk->sk_receive_queue; 441 struct sk_buff_head *list = &sk->sk_receive_queue;
443 442
@@ -459,13 +458,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
459 skb->dev = NULL; 458 skb->dev = NULL;
460 skb_set_owner_r(skb, sk); 459 skb_set_owner_r(skb, sk);
461 460
462 /* Cache the SKB length before we tack it onto the receive
463 * queue. Once it is added it no longer belongs to us and
464 * may be freed by other threads of control pulling packets
465 * from the queue.
466 */
467 skb_len = skb->len;
468
469 /* we escape from rcu protected region, make sure we dont leak 461 /* we escape from rcu protected region, make sure we dont leak
470 * a norefcounted dst 462 * a norefcounted dst
471 */ 463 */
@@ -1642,18 +1634,24 @@ void sock_rfree(struct sk_buff *skb)
1642} 1634}
1643EXPORT_SYMBOL(sock_rfree); 1635EXPORT_SYMBOL(sock_rfree);
1644 1636
1637void sock_efree(struct sk_buff *skb)
1638{
1639 sock_put(skb->sk);
1640}
1641EXPORT_SYMBOL(sock_efree);
1642
1643#ifdef CONFIG_INET
1645void sock_edemux(struct sk_buff *skb) 1644void sock_edemux(struct sk_buff *skb)
1646{ 1645{
1647 struct sock *sk = skb->sk; 1646 struct sock *sk = skb->sk;
1648 1647
1649#ifdef CONFIG_INET
1650 if (sk->sk_state == TCP_TIME_WAIT) 1648 if (sk->sk_state == TCP_TIME_WAIT)
1651 inet_twsk_put(inet_twsk(sk)); 1649 inet_twsk_put(inet_twsk(sk));
1652 else 1650 else
1653#endif
1654 sock_put(sk); 1651 sock_put(sk);
1655} 1652}
1656EXPORT_SYMBOL(sock_edemux); 1653EXPORT_SYMBOL(sock_edemux);
1654#endif
1657 1655
1658kuid_t sock_i_uid(struct sock *sk) 1656kuid_t sock_i_uid(struct sock *sk)
1659{ 1657{
@@ -1761,21 +1759,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1761 unsigned long data_len, int noblock, 1759 unsigned long data_len, int noblock,
1762 int *errcode, int max_page_order) 1760 int *errcode, int max_page_order)
1763{ 1761{
1764 struct sk_buff *skb = NULL; 1762 struct sk_buff *skb;
1765 unsigned long chunk;
1766 gfp_t gfp_mask;
1767 long timeo; 1763 long timeo;
1768 int err; 1764 int err;
1769 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1770 struct page *page;
1771 int i;
1772
1773 err = -EMSGSIZE;
1774 if (npages > MAX_SKB_FRAGS)
1775 goto failure;
1776 1765
1777 timeo = sock_sndtimeo(sk, noblock); 1766 timeo = sock_sndtimeo(sk, noblock);
1778 while (!skb) { 1767 for (;;) {
1779 err = sock_error(sk); 1768 err = sock_error(sk);
1780 if (err != 0) 1769 if (err != 0)
1781 goto failure; 1770 goto failure;
@@ -1784,66 +1773,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1784 if (sk->sk_shutdown & SEND_SHUTDOWN) 1773 if (sk->sk_shutdown & SEND_SHUTDOWN)
1785 goto failure; 1774 goto failure;
1786 1775
1787 if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { 1776 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1788 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1777 break;
1789 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1790 err = -EAGAIN;
1791 if (!timeo)
1792 goto failure;
1793 if (signal_pending(current))
1794 goto interrupted;
1795 timeo = sock_wait_for_wmem(sk, timeo);
1796 continue;
1797 }
1798
1799 err = -ENOBUFS;
1800 gfp_mask = sk->sk_allocation;
1801 if (gfp_mask & __GFP_WAIT)
1802 gfp_mask |= __GFP_REPEAT;
1803 1778
1804 skb = alloc_skb(header_len, gfp_mask); 1779 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1805 if (!skb) 1780 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1781 err = -EAGAIN;
1782 if (!timeo)
1806 goto failure; 1783 goto failure;
1807 1784 if (signal_pending(current))
1808 skb->truesize += data_len; 1785 goto interrupted;
1809 1786 timeo = sock_wait_for_wmem(sk, timeo);
1810 for (i = 0; npages > 0; i++) {
1811 int order = max_page_order;
1812
1813 while (order) {
1814 if (npages >= 1 << order) {
1815 page = alloc_pages(sk->sk_allocation |
1816 __GFP_COMP |
1817 __GFP_NOWARN |
1818 __GFP_NORETRY,
1819 order);
1820 if (page)
1821 goto fill_page;
1822 /* Do not retry other high order allocations */
1823 order = 1;
1824 max_page_order = 0;
1825 }
1826 order--;
1827 }
1828 page = alloc_page(sk->sk_allocation);
1829 if (!page)
1830 goto failure;
1831fill_page:
1832 chunk = min_t(unsigned long, data_len,
1833 PAGE_SIZE << order);
1834 skb_fill_page_desc(skb, i, page, 0, chunk);
1835 data_len -= chunk;
1836 npages -= 1 << order;
1837 }
1838 } 1787 }
1839 1788 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1840 skb_set_owner_w(skb, sk); 1789 errcode, sk->sk_allocation);
1790 if (skb)
1791 skb_set_owner_w(skb, sk);
1841 return skb; 1792 return skb;
1842 1793
1843interrupted: 1794interrupted:
1844 err = sock_intr_errno(timeo); 1795 err = sock_intr_errno(timeo);
1845failure: 1796failure:
1846 kfree_skb(skb);
1847 *errcode = err; 1797 *errcode = err;
1848 return NULL; 1798 return NULL;
1849} 1799}
@@ -2492,11 +2442,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2492 int level, int type) 2442 int level, int type)
2493{ 2443{
2494 struct sock_exterr_skb *serr; 2444 struct sock_exterr_skb *serr;
2495 struct sk_buff *skb, *skb2; 2445 struct sk_buff *skb;
2496 int copied, err; 2446 int copied, err;
2497 2447
2498 err = -EAGAIN; 2448 err = -EAGAIN;
2499 skb = skb_dequeue(&sk->sk_error_queue); 2449 skb = sock_dequeue_err_skb(sk);
2500 if (skb == NULL) 2450 if (skb == NULL)
2501 goto out; 2451 goto out;
2502 2452
@@ -2517,16 +2467,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2517 msg->msg_flags |= MSG_ERRQUEUE; 2467 msg->msg_flags |= MSG_ERRQUEUE;
2518 err = copied; 2468 err = copied;
2519 2469
2520 /* Reset and regenerate socket error */
2521 spin_lock_bh(&sk->sk_error_queue.lock);
2522 sk->sk_err = 0;
2523 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2524 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2525 spin_unlock_bh(&sk->sk_error_queue.lock);
2526 sk->sk_error_report(sk);
2527 } else
2528 spin_unlock_bh(&sk->sk_error_queue.lock);
2529
2530out_free_skb: 2470out_free_skb:
2531 kfree_skb(skb); 2471 kfree_skb(skb);
2532out: 2472out: