aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h34
1 files changed, 31 insertions, 3 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 3f1a4804bb3f..092b0551e77f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -253,6 +253,8 @@ struct sock {
253 struct { 253 struct {
254 struct sk_buff *head; 254 struct sk_buff *head;
255 struct sk_buff *tail; 255 struct sk_buff *tail;
256 int len;
257 int limit;
256 } sk_backlog; 258 } sk_backlog;
257 wait_queue_head_t *sk_sleep; 259 wait_queue_head_t *sk_sleep;
258 struct dst_entry *sk_dst_cache; 260 struct dst_entry *sk_dst_cache;
@@ -317,6 +319,11 @@ struct sock {
317/* 319/*
318 * Hashed lists helper routines 320 * Hashed lists helper routines
319 */ 321 */
322static inline struct sock *sk_entry(const struct hlist_node *node)
323{
324 return hlist_entry(node, struct sock, sk_node);
325}
326
320static inline struct sock *__sk_head(const struct hlist_head *head) 327static inline struct sock *__sk_head(const struct hlist_head *head)
321{ 328{
322 return hlist_entry(head->first, struct sock, sk_node); 329 return hlist_entry(head->first, struct sock, sk_node);
@@ -376,6 +383,7 @@ static __inline__ void __sk_del_node(struct sock *sk)
376 __hlist_del(&sk->sk_node); 383 __hlist_del(&sk->sk_node);
377} 384}
378 385
386/* NB: equivalent to hlist_del_init_rcu */
379static __inline__ int __sk_del_node_init(struct sock *sk) 387static __inline__ int __sk_del_node_init(struct sock *sk)
380{ 388{
381 if (sk_hashed(sk)) { 389 if (sk_hashed(sk)) {
@@ -416,6 +424,7 @@ static __inline__ int sk_del_node_init(struct sock *sk)
416 } 424 }
417 return rc; 425 return rc;
418} 426}
427#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
419 428
420static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) 429static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
421{ 430{
@@ -449,6 +458,12 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
449 __sk_add_node(sk, list); 458 __sk_add_node(sk, list);
450} 459}
451 460
461static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
462{
463 sock_hold(sk);
464 hlist_add_head_rcu(&sk->sk_node, list);
465}
466
452static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 467static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
453{ 468{
454 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 469 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -473,6 +488,8 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
473 488
474#define sk_for_each(__sk, node, list) \ 489#define sk_for_each(__sk, node, list) \
475 hlist_for_each_entry(__sk, node, list, sk_node) 490 hlist_for_each_entry(__sk, node, list, sk_node)
491#define sk_for_each_rcu(__sk, node, list) \
492 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
476#define sk_nulls_for_each(__sk, node, list) \ 493#define sk_nulls_for_each(__sk, node, list) \
477 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 494 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
478#define sk_nulls_for_each_rcu(__sk, node, list) \ 495#define sk_nulls_for_each_rcu(__sk, node, list) \
@@ -574,8 +591,8 @@ static inline int sk_stream_memory_free(struct sock *sk)
574 return sk->sk_wmem_queued < sk->sk_sndbuf; 591 return sk->sk_wmem_queued < sk->sk_sndbuf;
575} 592}
576 593
577/* The per-socket spinlock must be held here. */ 594/* OOB backlog add */
578static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 595static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
579{ 596{
580 if (!sk->sk_backlog.tail) { 597 if (!sk->sk_backlog.tail) {
581 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 598 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
@@ -586,6 +603,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
586 skb->next = NULL; 603 skb->next = NULL;
587} 604}
588 605
606/* The per-socket spinlock must be held here. */
607static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
608{
609 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
610 return -ENOBUFS;
611
612 __sk_add_backlog(sk, skb);
613 sk->sk_backlog.len += skb->truesize;
614 return 0;
615}
616
589static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 617static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
590{ 618{
591 return sk->sk_backlog_rcv(sk, skb); 619 return sk->sk_backlog_rcv(sk, skb);
@@ -1044,7 +1072,7 @@ extern void sk_common_release(struct sock *sk);
1044extern void sock_init_data(struct socket *sock, struct sock *sk); 1072extern void sock_init_data(struct socket *sock, struct sock *sk);
1045 1073
1046/** 1074/**
1047 * sk_filter_release: Release a socket filter 1075 * sk_filter_release - release a socket filter
1048 * @fp: filter to remove 1076 * @fp: filter to remove
1049 * 1077 *
1050 * Remove a filter from a socket and release its resources. 1078 * Remove a filter from a socket and release its resources.