diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 83 |
1 files changed, 55 insertions, 28 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 2d8d6adf161..edd4d73ce7f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/timer.h> | 44 | #include <linux/timer.h> |
45 | #include <linux/cache.h> | 45 | #include <linux/cache.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/lockdep.h> | ||
47 | #include <linux/netdevice.h> | 48 | #include <linux/netdevice.h> |
48 | #include <linux/skbuff.h> /* struct sk_buff */ | 49 | #include <linux/skbuff.h> /* struct sk_buff */ |
49 | #include <linux/security.h> | 50 | #include <linux/security.h> |
@@ -78,14 +79,17 @@ typedef struct { | |||
78 | spinlock_t slock; | 79 | spinlock_t slock; |
79 | struct sock_iocb *owner; | 80 | struct sock_iocb *owner; |
80 | wait_queue_head_t wq; | 81 | wait_queue_head_t wq; |
82 | /* | ||
83 | * We express the mutex-alike socket_lock semantics | ||
84 | * to the lock validator by explicitly managing | ||
85 | * the slock as a lock variant (in addition to | ||
86 | * the slock itself): | ||
87 | */ | ||
88 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
89 | struct lockdep_map dep_map; | ||
90 | #endif | ||
81 | } socket_lock_t; | 91 | } socket_lock_t; |
82 | 92 | ||
83 | #define sock_lock_init(__sk) \ | ||
84 | do { spin_lock_init(&((__sk)->sk_lock.slock)); \ | ||
85 | (__sk)->sk_lock.owner = NULL; \ | ||
86 | init_waitqueue_head(&((__sk)->sk_lock.wq)); \ | ||
87 | } while(0) | ||
88 | |||
89 | struct sock; | 93 | struct sock; |
90 | struct proto; | 94 | struct proto; |
91 | 95 | ||
@@ -140,6 +144,7 @@ struct sock_common { | |||
140 | * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings | 144 | * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings |
141 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 145 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets |
142 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 146 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) |
147 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) | ||
143 | * @sk_lingertime: %SO_LINGER l_linger setting | 148 | * @sk_lingertime: %SO_LINGER l_linger setting |
144 | * @sk_backlog: always used with the per-socket spinlock held | 149 | * @sk_backlog: always used with the per-socket spinlock held |
145 | * @sk_callback_lock: used with the callbacks in the end of this struct | 150 | * @sk_callback_lock: used with the callbacks in the end of this struct |
@@ -211,6 +216,7 @@ struct sock { | |||
211 | gfp_t sk_allocation; | 216 | gfp_t sk_allocation; |
212 | int sk_sndbuf; | 217 | int sk_sndbuf; |
213 | int sk_route_caps; | 218 | int sk_route_caps; |
219 | int sk_gso_type; | ||
214 | int sk_rcvlowat; | 220 | int sk_rcvlowat; |
215 | unsigned long sk_flags; | 221 | unsigned long sk_flags; |
216 | unsigned long sk_lingertime; | 222 | unsigned long sk_lingertime; |
@@ -383,7 +389,6 @@ enum sock_flags { | |||
383 | SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ | 389 | SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ |
384 | SOCK_DBG, /* %SO_DEBUG setting */ | 390 | SOCK_DBG, /* %SO_DEBUG setting */ |
385 | SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ | 391 | SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ |
386 | SOCK_NO_LARGESEND, /* whether to sent large segments or not */ | ||
387 | SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ | 392 | SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ |
388 | SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ | 393 | SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ |
389 | }; | 394 | }; |
@@ -746,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk)); | |||
746 | 751 | ||
747 | /* BH context may only use the following locking interface. */ | 752 | /* BH context may only use the following locking interface. */ |
748 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) | 753 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) |
754 | #define bh_lock_sock_nested(__sk) \ | ||
755 | spin_lock_nested(&((__sk)->sk_lock.slock), \ | ||
756 | SINGLE_DEPTH_NESTING) | ||
749 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) | 757 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) |
750 | 758 | ||
751 | extern struct sock *sk_alloc(int family, | 759 | extern struct sock *sk_alloc(int family, |
@@ -854,30 +862,24 @@ extern void sock_init_data(struct socket *sock, struct sock *sk); | |||
854 | * | 862 | * |
855 | */ | 863 | */ |
856 | 864 | ||
857 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) | 865 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) |
858 | { | 866 | { |
859 | int err; | 867 | int err; |
868 | struct sk_filter *filter; | ||
860 | 869 | ||
861 | err = security_sock_rcv_skb(sk, skb); | 870 | err = security_sock_rcv_skb(sk, skb); |
862 | if (err) | 871 | if (err) |
863 | return err; | 872 | return err; |
864 | 873 | ||
865 | if (sk->sk_filter) { | 874 | rcu_read_lock_bh(); |
866 | struct sk_filter *filter; | 875 | filter = sk->sk_filter; |
867 | 876 | if (filter) { | |
868 | if (needlock) | 877 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, |
869 | bh_lock_sock(sk); | 878 | filter->len); |
870 | 879 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | |
871 | filter = sk->sk_filter; | ||
872 | if (filter) { | ||
873 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, | ||
874 | filter->len); | ||
875 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | ||
876 | } | ||
877 | |||
878 | if (needlock) | ||
879 | bh_unlock_sock(sk); | ||
880 | } | 880 | } |
881 | rcu_read_unlock_bh(); | ||
882 | |||
881 | return err; | 883 | return err; |
882 | } | 884 | } |
883 | 885 | ||
@@ -889,6 +891,12 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) | |||
889 | * Remove a filter from a socket and release its resources. | 891 | * Remove a filter from a socket and release its resources. |
890 | */ | 892 | */ |
891 | 893 | ||
894 | static inline void sk_filter_rcu_free(struct rcu_head *rcu) | ||
895 | { | ||
896 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | ||
897 | kfree(fp); | ||
898 | } | ||
899 | |||
892 | static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) | 900 | static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) |
893 | { | 901 | { |
894 | unsigned int size = sk_filter_len(fp); | 902 | unsigned int size = sk_filter_len(fp); |
@@ -896,7 +904,7 @@ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) | |||
896 | atomic_sub(size, &sk->sk_omem_alloc); | 904 | atomic_sub(size, &sk->sk_omem_alloc); |
897 | 905 | ||
898 | if (atomic_dec_and_test(&fp->refcnt)) | 906 | if (atomic_dec_and_test(&fp->refcnt)) |
899 | kfree(fp); | 907 | call_rcu_bh(&fp->rcu, sk_filter_rcu_free); |
900 | } | 908 | } |
901 | 909 | ||
902 | static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) | 910 | static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
@@ -961,9 +969,23 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) | |||
961 | sk->sk_sleep = &parent->wait; | 969 | sk->sk_sleep = &parent->wait; |
962 | parent->sk = sk; | 970 | parent->sk = sk; |
963 | sk->sk_socket = parent; | 971 | sk->sk_socket = parent; |
972 | security_sock_graft(sk, parent); | ||
964 | write_unlock_bh(&sk->sk_callback_lock); | 973 | write_unlock_bh(&sk->sk_callback_lock); |
965 | } | 974 | } |
966 | 975 | ||
976 | static inline void sock_copy(struct sock *nsk, const struct sock *osk) | ||
977 | { | ||
978 | #ifdef CONFIG_SECURITY_NETWORK | ||
979 | void *sptr = nsk->sk_security; | ||
980 | #endif | ||
981 | |||
982 | memcpy(nsk, osk, osk->sk_prot->obj_size); | ||
983 | #ifdef CONFIG_SECURITY_NETWORK | ||
984 | nsk->sk_security = sptr; | ||
985 | security_sk_clone(osk, nsk); | ||
986 | #endif | ||
987 | } | ||
988 | |||
967 | extern int sock_i_uid(struct sock *sk); | 989 | extern int sock_i_uid(struct sock *sk); |
968 | extern unsigned long sock_i_ino(struct sock *sk); | 990 | extern unsigned long sock_i_ino(struct sock *sk); |
969 | 991 | ||
@@ -1026,15 +1048,20 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
1026 | 1048 | ||
1027 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1049 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1028 | 1050 | ||
1051 | static inline int sk_can_gso(const struct sock *sk) | ||
1052 | { | ||
1053 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | ||
1054 | } | ||
1055 | |||
1029 | static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | 1056 | static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) |
1030 | { | 1057 | { |
1031 | __sk_dst_set(sk, dst); | 1058 | __sk_dst_set(sk, dst); |
1032 | sk->sk_route_caps = dst->dev->features; | 1059 | sk->sk_route_caps = dst->dev->features; |
1033 | if (sk->sk_route_caps & NETIF_F_GSO) | 1060 | if (sk->sk_route_caps & NETIF_F_GSO) |
1034 | sk->sk_route_caps |= NETIF_F_TSO; | 1061 | sk->sk_route_caps |= NETIF_F_GSO_MASK; |
1035 | if (sk->sk_route_caps & NETIF_F_TSO) { | 1062 | if (sk_can_gso(sk)) { |
1036 | if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) | 1063 | if (dst->header_len) |
1037 | sk->sk_route_caps &= ~NETIF_F_TSO; | 1064 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
1038 | else | 1065 | else |
1039 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; | 1066 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; |
1040 | } | 1067 | } |