diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 68 |
1 files changed, 31 insertions, 37 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 5697caf8cc76..a441c9cdd625 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -295,7 +295,8 @@ struct sock { | |||
295 | unsigned short sk_ack_backlog; | 295 | unsigned short sk_ack_backlog; |
296 | unsigned short sk_max_ack_backlog; | 296 | unsigned short sk_max_ack_backlog; |
297 | __u32 sk_priority; | 297 | __u32 sk_priority; |
298 | struct ucred sk_peercred; | 298 | struct pid *sk_peer_pid; |
299 | const struct cred *sk_peer_cred; | ||
299 | long sk_rcvtimeo; | 300 | long sk_rcvtimeo; |
300 | long sk_sndtimeo; | 301 | long sk_sndtimeo; |
301 | struct sk_filter *sk_filter; | 302 | struct sk_filter *sk_filter; |
@@ -312,7 +313,7 @@ struct sock { | |||
312 | void *sk_security; | 313 | void *sk_security; |
313 | #endif | 314 | #endif |
314 | __u32 sk_mark; | 315 | __u32 sk_mark; |
315 | /* XXX 4 bytes hole on 64 bit */ | 316 | u32 sk_classid; |
316 | void (*sk_state_change)(struct sock *sk); | 317 | void (*sk_state_change)(struct sock *sk); |
317 | void (*sk_data_ready)(struct sock *sk, int bytes); | 318 | void (*sk_data_ready)(struct sock *sk, int bytes); |
318 | void (*sk_write_space)(struct sock *sk); | 319 | void (*sk_write_space)(struct sock *sk); |
@@ -771,6 +772,7 @@ struct proto { | |||
771 | int *sysctl_wmem; | 772 | int *sysctl_wmem; |
772 | int *sysctl_rmem; | 773 | int *sysctl_rmem; |
773 | int max_header; | 774 | int max_header; |
775 | bool no_autobind; | ||
774 | 776 | ||
775 | struct kmem_cache *slab; | 777 | struct kmem_cache *slab; |
776 | unsigned int obj_size; | 778 | unsigned int obj_size; |
@@ -1026,15 +1028,23 @@ extern void release_sock(struct sock *sk); | |||
1026 | SINGLE_DEPTH_NESTING) | 1028 | SINGLE_DEPTH_NESTING) |
1027 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) | 1029 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) |
1028 | 1030 | ||
1029 | static inline void lock_sock_bh(struct sock *sk) | 1031 | extern bool lock_sock_fast(struct sock *sk); |
1032 | /** | ||
1033 | * unlock_sock_fast - complement of lock_sock_fast | ||
1034 | * @sk: socket | ||
1035 | * @slow: slow mode | ||
1036 | * | ||
1037 | * fast unlock socket for user context. | ||
1038 | * If slow mode is on, we call regular release_sock() | ||
1039 | */ | ||
1040 | static inline void unlock_sock_fast(struct sock *sk, bool slow) | ||
1030 | { | 1041 | { |
1031 | spin_lock_bh(&sk->sk_lock.slock); | 1042 | if (slow) |
1043 | release_sock(sk); | ||
1044 | else | ||
1045 | spin_unlock_bh(&sk->sk_lock.slock); | ||
1032 | } | 1046 | } |
1033 | 1047 | ||
1034 | static inline void unlock_sock_bh(struct sock *sk) | ||
1035 | { | ||
1036 | spin_unlock_bh(&sk->sk_lock.slock); | ||
1037 | } | ||
1038 | 1048 | ||
1039 | extern struct sock *sk_alloc(struct net *net, int family, | 1049 | extern struct sock *sk_alloc(struct net *net, int family, |
1040 | gfp_t priority, | 1050 | gfp_t priority, |
@@ -1074,6 +1084,14 @@ extern void *sock_kmalloc(struct sock *sk, int size, | |||
1074 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); | 1084 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); |
1075 | extern void sk_send_sigurg(struct sock *sk); | 1085 | extern void sk_send_sigurg(struct sock *sk); |
1076 | 1086 | ||
1087 | #ifdef CONFIG_CGROUPS | ||
1088 | extern void sock_update_classid(struct sock *sk); | ||
1089 | #else | ||
1090 | static inline void sock_update_classid(struct sock *sk) | ||
1091 | { | ||
1092 | } | ||
1093 | #endif | ||
1094 | |||
1077 | /* | 1095 | /* |
1078 | * Functions to fill in entries in struct proto_ops when a protocol | 1096 | * Functions to fill in entries in struct proto_ops when a protocol |
1079 | * does not implement a particular function. | 1097 | * does not implement a particular function. |
@@ -1208,12 +1226,7 @@ static inline void sk_tx_queue_clear(struct sock *sk) | |||
1208 | 1226 | ||
1209 | static inline int sk_tx_queue_get(const struct sock *sk) | 1227 | static inline int sk_tx_queue_get(const struct sock *sk) |
1210 | { | 1228 | { |
1211 | return sk->sk_tx_queue_mapping; | 1229 | return sk ? sk->sk_tx_queue_mapping : -1; |
1212 | } | ||
1213 | |||
1214 | static inline bool sk_tx_queue_recorded(const struct sock *sk) | ||
1215 | { | ||
1216 | return (sk && sk->sk_tx_queue_mapping >= 0); | ||
1217 | } | 1230 | } |
1218 | 1231 | ||
1219 | static inline void sk_set_socket(struct sock *sk, struct socket *sock) | 1232 | static inline void sk_set_socket(struct sock *sk, struct socket *sock) |
@@ -1404,7 +1417,7 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1404 | 1417 | ||
1405 | /** | 1418 | /** |
1406 | * wq_has_sleeper - check if there are any waiting processes | 1419 | * wq_has_sleeper - check if there are any waiting processes |
1407 | * @sk: struct socket_wq | 1420 | * @wq: struct socket_wq |
1408 | * | 1421 | * |
1409 | * Returns true if socket_wq has waiting processes | 1422 | * Returns true if socket_wq has waiting processes |
1410 | * | 1423 | * |
@@ -1508,20 +1521,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | |||
1508 | 1521 | ||
1509 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 1522 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1510 | 1523 | ||
1511 | static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | 1524 | extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); |
1512 | { | ||
1513 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | ||
1514 | number of warnings when compiling with -W --ANK | ||
1515 | */ | ||
1516 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
1517 | (unsigned)sk->sk_rcvbuf) | ||
1518 | return -ENOMEM; | ||
1519 | skb_set_owner_r(skb, sk); | ||
1520 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
1521 | if (!sock_flag(sk, SOCK_DEAD)) | ||
1522 | sk->sk_data_ready(sk, skb->len); | ||
1523 | return 0; | ||
1524 | } | ||
1525 | 1525 | ||
1526 | /* | 1526 | /* |
1527 | * Recover an error report and clear atomically | 1527 | * Recover an error report and clear atomically |
@@ -1708,19 +1708,13 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
1708 | static inline | 1708 | static inline |
1709 | struct net *sock_net(const struct sock *sk) | 1709 | struct net *sock_net(const struct sock *sk) |
1710 | { | 1710 | { |
1711 | #ifdef CONFIG_NET_NS | 1711 | return read_pnet(&sk->sk_net); |
1712 | return sk->sk_net; | ||
1713 | #else | ||
1714 | return &init_net; | ||
1715 | #endif | ||
1716 | } | 1712 | } |
1717 | 1713 | ||
1718 | static inline | 1714 | static inline |
1719 | void sock_net_set(struct sock *sk, struct net *net) | 1715 | void sock_net_set(struct sock *sk, struct net *net) |
1720 | { | 1716 | { |
1721 | #ifdef CONFIG_NET_NS | 1717 | write_pnet(&sk->sk_net, net); |
1722 | sk->sk_net = net; | ||
1723 | #endif | ||
1724 | } | 1718 | } |
1725 | 1719 | ||
1726 | /* | 1720 | /* |