aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h31
1 files changed, 17 insertions, 14 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 182ca99405ad..14f6e9d19dc7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -140,6 +140,7 @@ typedef __u64 __bitwise __addrpair;
140 * @skc_family: network address family 140 * @skc_family: network address family
141 * @skc_state: Connection state 141 * @skc_state: Connection state
142 * @skc_reuse: %SO_REUSEADDR setting 142 * @skc_reuse: %SO_REUSEADDR setting
143 * @skc_reuseport: %SO_REUSEPORT setting
143 * @skc_bound_dev_if: bound device index if != 0 144 * @skc_bound_dev_if: bound device index if != 0
144 * @skc_bind_node: bind hash linkage for various protocol lookup tables 145 * @skc_bind_node: bind hash linkage for various protocol lookup tables
145 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 146 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
@@ -179,7 +180,8 @@ struct sock_common {
179 180
180 unsigned short skc_family; 181 unsigned short skc_family;
181 volatile unsigned char skc_state; 182 volatile unsigned char skc_state;
182 unsigned char skc_reuse; 183 unsigned char skc_reuse:4;
184 unsigned char skc_reuseport:4;
183 int skc_bound_dev_if; 185 int skc_bound_dev_if;
184 union { 186 union {
185 struct hlist_node skc_bind_node; 187 struct hlist_node skc_bind_node;
@@ -297,6 +299,7 @@ struct sock {
297#define sk_family __sk_common.skc_family 299#define sk_family __sk_common.skc_family
298#define sk_state __sk_common.skc_state 300#define sk_state __sk_common.skc_state
299#define sk_reuse __sk_common.skc_reuse 301#define sk_reuse __sk_common.skc_reuse
302#define sk_reuseport __sk_common.skc_reuseport
300#define sk_bound_dev_if __sk_common.skc_bound_dev_if 303#define sk_bound_dev_if __sk_common.skc_bound_dev_if
301#define sk_bind_node __sk_common.skc_bind_node 304#define sk_bind_node __sk_common.skc_bind_node
302#define sk_prot __sk_common.skc_prot 305#define sk_prot __sk_common.skc_prot
@@ -337,7 +340,7 @@ struct sock {
337#endif 340#endif
338 unsigned long sk_flags; 341 unsigned long sk_flags;
339 struct dst_entry *sk_rx_dst; 342 struct dst_entry *sk_rx_dst;
340 struct dst_entry *sk_dst_cache; 343 struct dst_entry __rcu *sk_dst_cache;
341 spinlock_t sk_dst_lock; 344 spinlock_t sk_dst_lock;
342 atomic_t sk_wmem_alloc; 345 atomic_t sk_wmem_alloc;
343 atomic_t sk_omem_alloc; 346 atomic_t sk_omem_alloc;
@@ -603,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
603 hlist_add_head(&sk->sk_bind_node, list); 606 hlist_add_head(&sk->sk_bind_node, list);
604} 607}
605 608
606#define sk_for_each(__sk, node, list) \ 609#define sk_for_each(__sk, list) \
607 hlist_for_each_entry(__sk, node, list, sk_node) 610 hlist_for_each_entry(__sk, list, sk_node)
608#define sk_for_each_rcu(__sk, node, list) \ 611#define sk_for_each_rcu(__sk, list) \
609 hlist_for_each_entry_rcu(__sk, node, list, sk_node) 612 hlist_for_each_entry_rcu(__sk, list, sk_node)
610#define sk_nulls_for_each(__sk, node, list) \ 613#define sk_nulls_for_each(__sk, node, list) \
611 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 614 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
612#define sk_nulls_for_each_rcu(__sk, node, list) \ 615#define sk_nulls_for_each_rcu(__sk, node, list) \
613 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 616 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
614#define sk_for_each_from(__sk, node) \ 617#define sk_for_each_from(__sk) \
615 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 618 hlist_for_each_entry_from(__sk, sk_node)
616 hlist_for_each_entry_from(__sk, node, sk_node)
617#define sk_nulls_for_each_from(__sk, node) \ 619#define sk_nulls_for_each_from(__sk, node) \
618 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 620 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
619 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 621 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
620#define sk_for_each_safe(__sk, node, tmp, list) \ 622#define sk_for_each_safe(__sk, tmp, list) \
621 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 623 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
622#define sk_for_each_bound(__sk, node, list) \ 624#define sk_for_each_bound(__sk, list) \
623 hlist_for_each_entry(__sk, node, list, sk_bind_node) 625 hlist_for_each_entry(__sk, list, sk_bind_node)
624 626
625static inline struct user_namespace *sk_user_ns(struct sock *sk) 627static inline struct user_namespace *sk_user_ns(struct sock *sk)
626{ 628{
@@ -664,6 +666,7 @@ enum sock_flags {
664 * Will use last 4 bytes of packet sent from 666 * Will use last 4 bytes of packet sent from
665 * user-space instead. 667 * user-space instead.
666 */ 668 */
669 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
667}; 670};
668 671
669static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 672static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -1037,7 +1040,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
1037 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 1040 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1038} 1041}
1039 1042
1040inline void sk_refcnt_debug_release(const struct sock *sk) 1043static inline void sk_refcnt_debug_release(const struct sock *sk)
1041{ 1044{
1042 if (atomic_read(&sk->sk_refcnt) != 1) 1045 if (atomic_read(&sk->sk_refcnt) != 1)
1043 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 1046 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",