aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h83
1 files changed, 75 insertions, 8 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 9f96394f694e..1ad6435f252e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -51,6 +51,7 @@
51#include <linux/skbuff.h> /* struct sk_buff */ 51#include <linux/skbuff.h> /* struct sk_buff */
52#include <linux/mm.h> 52#include <linux/mm.h>
53#include <linux/security.h> 53#include <linux/security.h>
54#include <linux/slab.h>
54 55
55#include <linux/filter.h> 56#include <linux/filter.h>
56#include <linux/rculist_nulls.h> 57#include <linux/rculist_nulls.h>
@@ -73,7 +74,7 @@
73 printk(KERN_DEBUG msg); } while (0) 74 printk(KERN_DEBUG msg); } while (0)
74#else 75#else
75/* Validate arguments and do nothing */ 76/* Validate arguments and do nothing */
76static void inline int __attribute__ ((format (printf, 2, 3))) 77static inline void __attribute__ ((format (printf, 2, 3)))
77SOCK_DEBUG(struct sock *sk, const char *msg, ...) 78SOCK_DEBUG(struct sock *sk, const char *msg, ...)
78{ 79{
79} 80}
@@ -105,14 +106,17 @@ struct net;
105/** 106/**
106 * struct sock_common - minimal network layer representation of sockets 107 * struct sock_common - minimal network layer representation of sockets
107 * @skc_node: main hash linkage for various protocol lookup tables 108 * @skc_node: main hash linkage for various protocol lookup tables
108 * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol 109 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
109 * @skc_refcnt: reference count 110 * @skc_refcnt: reference count
111 * @skc_tx_queue_mapping: tx queue number for this connection
110 * @skc_hash: hash value used with various protocol lookup tables 112 * @skc_hash: hash value used with various protocol lookup tables
113 * @skc_u16hashes: two u16 hash values used by UDP lookup tables
111 * @skc_family: network address family 114 * @skc_family: network address family
112 * @skc_state: Connection state 115 * @skc_state: Connection state
113 * @skc_reuse: %SO_REUSEADDR setting 116 * @skc_reuse: %SO_REUSEADDR setting
114 * @skc_bound_dev_if: bound device index if != 0 117 * @skc_bound_dev_if: bound device index if != 0
115 * @skc_bind_node: bind hash linkage for various protocol lookup tables 118 * @skc_bind_node: bind hash linkage for various protocol lookup tables
119 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
116 * @skc_prot: protocol handlers inside a network family 120 * @skc_prot: protocol handlers inside a network family
117 * @skc_net: reference to the network namespace of this socket 121 * @skc_net: reference to the network namespace of this socket
118 * 122 *
@@ -128,13 +132,20 @@ struct sock_common {
128 struct hlist_nulls_node skc_nulls_node; 132 struct hlist_nulls_node skc_nulls_node;
129 }; 133 };
130 atomic_t skc_refcnt; 134 atomic_t skc_refcnt;
135 int skc_tx_queue_mapping;
131 136
132 unsigned int skc_hash; 137 union {
138 unsigned int skc_hash;
139 __u16 skc_u16hashes[2];
140 };
133 unsigned short skc_family; 141 unsigned short skc_family;
134 volatile unsigned char skc_state; 142 volatile unsigned char skc_state;
135 unsigned char skc_reuse; 143 unsigned char skc_reuse;
136 int skc_bound_dev_if; 144 int skc_bound_dev_if;
137 struct hlist_node skc_bind_node; 145 union {
146 struct hlist_node skc_bind_node;
147 struct hlist_nulls_node skc_portaddr_node;
148 };
138 struct proto *skc_prot; 149 struct proto *skc_prot;
139#ifdef CONFIG_NET_NS 150#ifdef CONFIG_NET_NS
140 struct net *skc_net; 151 struct net *skc_net;
@@ -215,6 +226,7 @@ struct sock {
215#define sk_node __sk_common.skc_node 226#define sk_node __sk_common.skc_node
216#define sk_nulls_node __sk_common.skc_nulls_node 227#define sk_nulls_node __sk_common.skc_nulls_node
217#define sk_refcnt __sk_common.skc_refcnt 228#define sk_refcnt __sk_common.skc_refcnt
229#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
218 230
219#define sk_copy_start __sk_common.skc_hash 231#define sk_copy_start __sk_common.skc_hash
220#define sk_hash __sk_common.skc_hash 232#define sk_hash __sk_common.skc_hash
@@ -242,6 +254,8 @@ struct sock {
242 struct { 254 struct {
243 struct sk_buff *head; 255 struct sk_buff *head;
244 struct sk_buff *tail; 256 struct sk_buff *tail;
257 int len;
258 int limit;
245 } sk_backlog; 259 } sk_backlog;
246 wait_queue_head_t *sk_sleep; 260 wait_queue_head_t *sk_sleep;
247 struct dst_entry *sk_dst_cache; 261 struct dst_entry *sk_dst_cache;
@@ -306,6 +320,11 @@ struct sock {
306/* 320/*
307 * Hashed lists helper routines 321 * Hashed lists helper routines
308 */ 322 */
323static inline struct sock *sk_entry(const struct hlist_node *node)
324{
325 return hlist_entry(node, struct sock, sk_node);
326}
327
309static inline struct sock *__sk_head(const struct hlist_head *head) 328static inline struct sock *__sk_head(const struct hlist_head *head)
310{ 329{
311 return hlist_entry(head->first, struct sock, sk_node); 330 return hlist_entry(head->first, struct sock, sk_node);
@@ -365,6 +384,7 @@ static __inline__ void __sk_del_node(struct sock *sk)
365 __hlist_del(&sk->sk_node); 384 __hlist_del(&sk->sk_node);
366} 385}
367 386
387/* NB: equivalent to hlist_del_init_rcu */
368static __inline__ int __sk_del_node_init(struct sock *sk) 388static __inline__ int __sk_del_node_init(struct sock *sk)
369{ 389{
370 if (sk_hashed(sk)) { 390 if (sk_hashed(sk)) {
@@ -405,6 +425,7 @@ static __inline__ int sk_del_node_init(struct sock *sk)
405 } 425 }
406 return rc; 426 return rc;
407} 427}
428#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
408 429
409static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) 430static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
410{ 431{
@@ -438,6 +459,12 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
438 __sk_add_node(sk, list); 459 __sk_add_node(sk, list);
439} 460}
440 461
462static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
463{
464 sock_hold(sk);
465 hlist_add_head_rcu(&sk->sk_node, list);
466}
467
441static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 468static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
442{ 469{
443 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 470 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -462,6 +489,8 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
462 489
463#define sk_for_each(__sk, node, list) \ 490#define sk_for_each(__sk, node, list) \
464 hlist_for_each_entry(__sk, node, list, sk_node) 491 hlist_for_each_entry(__sk, node, list, sk_node)
492#define sk_for_each_rcu(__sk, node, list) \
493 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
465#define sk_nulls_for_each(__sk, node, list) \ 494#define sk_nulls_for_each(__sk, node, list) \
466 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 495 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
467#define sk_nulls_for_each_rcu(__sk, node, list) \ 496#define sk_nulls_for_each_rcu(__sk, node, list) \
@@ -504,6 +533,8 @@ enum sock_flags {
504 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */ 533 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
505 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */ 534 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
506 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ 535 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
536 SOCK_FASYNC, /* fasync() active */
537 SOCK_RXQ_OVFL,
507}; 538};
508 539
509static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 540static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -561,8 +592,8 @@ static inline int sk_stream_memory_free(struct sock *sk)
561 return sk->sk_wmem_queued < sk->sk_sndbuf; 592 return sk->sk_wmem_queued < sk->sk_sndbuf;
562} 593}
563 594
564/* The per-socket spinlock must be held here. */ 595/* OOB backlog add */
565static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 596static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
566{ 597{
567 if (!sk->sk_backlog.tail) { 598 if (!sk->sk_backlog.tail) {
568 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 599 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
@@ -573,6 +604,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
573 skb->next = NULL; 604 skb->next = NULL;
574} 605}
575 606
607/* The per-socket spinlock must be held here. */
608static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
609{
610 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
611 return -ENOBUFS;
612
613 __sk_add_backlog(sk, skb);
614 sk->sk_backlog.len += skb->truesize;
615 return 0;
616}
617
576static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 618static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
577{ 619{
578 return sk->sk_backlog_rcv(sk, skb); 620 return sk->sk_backlog_rcv(sk, skb);
@@ -1031,7 +1073,7 @@ extern void sk_common_release(struct sock *sk);
1031extern void sock_init_data(struct socket *sock, struct sock *sk); 1073extern void sock_init_data(struct socket *sock, struct sock *sk);
1032 1074
1033/** 1075/**
1034 * sk_filter_release: Release a socket filter 1076 * sk_filter_release - release a socket filter
1035 * @fp: filter to remove 1077 * @fp: filter to remove
1036 * 1078 *
1037 * Remove a filter from a socket and release its resources. 1079 * Remove a filter from a socket and release its resources.
@@ -1092,8 +1134,29 @@ static inline void sock_put(struct sock *sk)
1092extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1134extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1093 const int nested); 1135 const int nested);
1094 1136
1137static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1138{
1139 sk->sk_tx_queue_mapping = tx_queue;
1140}
1141
1142static inline void sk_tx_queue_clear(struct sock *sk)
1143{
1144 sk->sk_tx_queue_mapping = -1;
1145}
1146
1147static inline int sk_tx_queue_get(const struct sock *sk)
1148{
1149 return sk->sk_tx_queue_mapping;
1150}
1151
1152static inline bool sk_tx_queue_recorded(const struct sock *sk)
1153{
1154 return (sk && sk->sk_tx_queue_mapping >= 0);
1155}
1156
1095static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1157static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1096{ 1158{
1159 sk_tx_queue_clear(sk);
1097 sk->sk_socket = sock; 1160 sk->sk_socket = sock;
1098} 1161}
1099 1162
@@ -1150,6 +1213,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1150{ 1213{
1151 struct dst_entry *old_dst; 1214 struct dst_entry *old_dst;
1152 1215
1216 sk_tx_queue_clear(sk);
1153 old_dst = sk->sk_dst_cache; 1217 old_dst = sk->sk_dst_cache;
1154 sk->sk_dst_cache = dst; 1218 sk->sk_dst_cache = dst;
1155 dst_release(old_dst); 1219 dst_release(old_dst);
@@ -1168,6 +1232,7 @@ __sk_dst_reset(struct sock *sk)
1168{ 1232{
1169 struct dst_entry *old_dst; 1233 struct dst_entry *old_dst;
1170 1234
1235 sk_tx_queue_clear(sk);
1171 old_dst = sk->sk_dst_cache; 1236 old_dst = sk->sk_dst_cache;
1172 sk->sk_dst_cache = NULL; 1237 sk->sk_dst_cache = NULL;
1173 dst_release(old_dst); 1238 dst_release(old_dst);
@@ -1396,7 +1461,7 @@ static inline unsigned long sock_wspace(struct sock *sk)
1396 1461
1397static inline void sk_wake_async(struct sock *sk, int how, int band) 1462static inline void sk_wake_async(struct sock *sk, int how, int band)
1398{ 1463{
1399 if (sk->sk_socket && sk->sk_socket->fasync_list) 1464 if (sock_flag(sk, SOCK_FASYNC))
1400 sock_wake_async(sk->sk_socket, how, band); 1465 sock_wake_async(sk->sk_socket, how, band);
1401} 1466}
1402 1467
@@ -1492,6 +1557,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1492 sk->sk_stamp = kt; 1557 sk->sk_stamp = kt;
1493} 1558}
1494 1559
1560extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
1561
1495/** 1562/**
1496 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1563 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1497 * @msg: outgoing packet 1564 * @msg: outgoing packet