aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 22:25:39 -0400
commitac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch)
treee37328cfbeaf43716dd5914cad9179e57e84df76 /include/net
parenta40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff)
parent437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches: - MM - a few random fixes - a couple of RTC leftovers * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits) rtc/rtc-88pm80x: remove unneed devm_kfree rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables tmpfs: distribute interleave better across nodes mm: remove redundant initialization mm: warn if pg_data_t isn't initialized with zero mips: zero out pg_data_t when it's allocated memcg: gix memory accounting scalability in shrink_page_list mm/sparse: remove index_init_lock mm/sparse: more checks on mem_section number mm/sparse: optimize sparse_index_alloc memcg: add mem_cgroup_from_css() helper memcg: further prevent OOM with too many dirty pages memcg: prevent OOM with too many dirty pages mm: mmu_notifier: fix freed page still mapped in secondary MMU mm: memcg: only check anon swapin page charges for swap cache mm: memcg: only check swap cache pages for repeated charging mm: memcg: split swapin charge function into private and public part mm: memcg: remove needless !mm fixup to init_mm when charging mm: memcg: remove unneeded shmem charge type ...
Diffstat (limited to 'include/net')
-rw-r--r--include/net/sock.h40
1 files changed, 35 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index e067f8c18f88..b3730239bf18 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -621,6 +621,7 @@ enum sock_flags {
621 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 621 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
622 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 622 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
623 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 623 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
624 SOCK_MEMALLOC, /* VM depends on this socket for swapping */
624 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ 625 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
625 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ 626 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
626 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ 627 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
@@ -658,6 +659,26 @@ static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
658 return test_bit(flag, &sk->sk_flags); 659 return test_bit(flag, &sk->sk_flags);
659} 660}
660 661
662#ifdef CONFIG_NET
663extern struct static_key memalloc_socks;
664static inline int sk_memalloc_socks(void)
665{
666 return static_key_false(&memalloc_socks);
667}
668#else
669
670static inline int sk_memalloc_socks(void)
671{
672 return 0;
673}
674
675#endif
676
677static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
678{
679 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
680}
681
661static inline void sk_acceptq_removed(struct sock *sk) 682static inline void sk_acceptq_removed(struct sock *sk)
662{ 683{
663 sk->sk_ack_backlog--; 684 sk->sk_ack_backlog--;
@@ -733,8 +754,13 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
733 return 0; 754 return 0;
734} 755}
735 756
757extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
758
736static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 759static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
737{ 760{
761 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
762 return __sk_backlog_rcv(sk, skb);
763
738 return sk->sk_backlog_rcv(sk, skb); 764 return sk->sk_backlog_rcv(sk, skb);
739} 765}
740 766
@@ -798,6 +824,8 @@ extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
798extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 824extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
799extern int sk_stream_error(struct sock *sk, int flags, int err); 825extern int sk_stream_error(struct sock *sk, int flags, int err);
800extern void sk_stream_kill_queues(struct sock *sk); 826extern void sk_stream_kill_queues(struct sock *sk);
827extern void sk_set_memalloc(struct sock *sk);
828extern void sk_clear_memalloc(struct sock *sk);
801 829
802extern int sk_wait_data(struct sock *sk, long *timeo); 830extern int sk_wait_data(struct sock *sk, long *timeo);
803 831
@@ -913,7 +941,7 @@ struct proto {
913#ifdef SOCK_REFCNT_DEBUG 941#ifdef SOCK_REFCNT_DEBUG
914 atomic_t socks; 942 atomic_t socks;
915#endif 943#endif
916#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 944#ifdef CONFIG_MEMCG_KMEM
917 /* 945 /*
918 * cgroup specific init/deinit functions. Called once for all 946 * cgroup specific init/deinit functions. Called once for all
919 * protocols that implement it, from cgroups populate function. 947 * protocols that implement it, from cgroups populate function.
@@ -994,7 +1022,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
994#define sk_refcnt_debug_release(sk) do { } while (0) 1022#define sk_refcnt_debug_release(sk) do { } while (0)
995#endif /* SOCK_REFCNT_DEBUG */ 1023#endif /* SOCK_REFCNT_DEBUG */
996 1024
997#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) 1025#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
998extern struct static_key memcg_socket_limit_enabled; 1026extern struct static_key memcg_socket_limit_enabled;
999static inline struct cg_proto *parent_cg_proto(struct proto *proto, 1027static inline struct cg_proto *parent_cg_proto(struct proto *proto,
1000 struct cg_proto *cg_proto) 1028 struct cg_proto *cg_proto)
@@ -1301,12 +1329,14 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
1301 __sk_mem_schedule(sk, size, SK_MEM_SEND); 1329 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1302} 1330}
1303 1331
1304static inline bool sk_rmem_schedule(struct sock *sk, int size) 1332static inline bool
1333sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size)
1305{ 1334{
1306 if (!sk_has_account(sk)) 1335 if (!sk_has_account(sk))
1307 return true; 1336 return true;
1308 return size <= sk->sk_forward_alloc || 1337 return size<= sk->sk_forward_alloc ||
1309 __sk_mem_schedule(sk, size, SK_MEM_RECV); 1338 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1339 skb_pfmemalloc(skb);
1310} 1340}
1311 1341
1312static inline void sk_mem_reclaim(struct sock *sk) 1342static inline void sk_mem_reclaim(struct sock *sk)