diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 39 |
1 files changed, 22 insertions, 17 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index dfeb8b13024f..453c79d0915b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #ifndef _SOCK_H | 40 | #ifndef _SOCK_H |
41 | #define _SOCK_H | 41 | #define _SOCK_H |
42 | 42 | ||
43 | #include <linux/kernel.h> | ||
43 | #include <linux/list.h> | 44 | #include <linux/list.h> |
44 | #include <linux/timer.h> | 45 | #include <linux/timer.h> |
45 | #include <linux/cache.h> | 46 | #include <linux/cache.h> |
@@ -55,6 +56,7 @@ | |||
55 | #include <asm/atomic.h> | 56 | #include <asm/atomic.h> |
56 | #include <net/dst.h> | 57 | #include <net/dst.h> |
57 | #include <net/checksum.h> | 58 | #include <net/checksum.h> |
59 | #include <net/net_namespace.h> | ||
58 | 60 | ||
59 | /* | 61 | /* |
60 | * This structure really needs to be cleaned up. | 62 | * This structure really needs to be cleaned up. |
@@ -75,10 +77,9 @@ | |||
75 | * between user contexts and software interrupt processing, whereas the | 77 | * between user contexts and software interrupt processing, whereas the |
76 | * mini-semaphore synchronizes multiple users amongst themselves. | 78 | * mini-semaphore synchronizes multiple users amongst themselves. |
77 | */ | 79 | */ |
78 | struct sock_iocb; | ||
79 | typedef struct { | 80 | typedef struct { |
80 | spinlock_t slock; | 81 | spinlock_t slock; |
81 | struct sock_iocb *owner; | 82 | int owned; |
82 | wait_queue_head_t wq; | 83 | wait_queue_head_t wq; |
83 | /* | 84 | /* |
84 | * We express the mutex-alike socket_lock semantics | 85 | * We express the mutex-alike socket_lock semantics |
@@ -105,6 +106,7 @@ struct proto; | |||
105 | * @skc_refcnt: reference count | 106 | * @skc_refcnt: reference count |
106 | * @skc_hash: hash value used with various protocol lookup tables | 107 | * @skc_hash: hash value used with various protocol lookup tables |
107 | * @skc_prot: protocol handlers inside a network family | 108 | * @skc_prot: protocol handlers inside a network family |
109 | * @skc_net: reference to the network namespace of this socket | ||
108 | * | 110 | * |
109 | * This is the minimal network layer representation of sockets, the header | 111 | * This is the minimal network layer representation of sockets, the header |
110 | * for struct sock and struct inet_timewait_sock. | 112 | * for struct sock and struct inet_timewait_sock. |
@@ -119,6 +121,7 @@ struct sock_common { | |||
119 | atomic_t skc_refcnt; | 121 | atomic_t skc_refcnt; |
120 | unsigned int skc_hash; | 122 | unsigned int skc_hash; |
121 | struct proto *skc_prot; | 123 | struct proto *skc_prot; |
124 | struct net *skc_net; | ||
122 | }; | 125 | }; |
123 | 126 | ||
124 | /** | 127 | /** |
@@ -195,6 +198,7 @@ struct sock { | |||
195 | #define sk_refcnt __sk_common.skc_refcnt | 198 | #define sk_refcnt __sk_common.skc_refcnt |
196 | #define sk_hash __sk_common.skc_hash | 199 | #define sk_hash __sk_common.skc_hash |
197 | #define sk_prot __sk_common.skc_prot | 200 | #define sk_prot __sk_common.skc_prot |
201 | #define sk_net __sk_common.skc_net | ||
198 | unsigned char sk_shutdown : 2, | 202 | unsigned char sk_shutdown : 2, |
199 | sk_no_check : 2, | 203 | sk_no_check : 2, |
200 | sk_userlocks : 4; | 204 | sk_userlocks : 4; |
@@ -481,17 +485,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
481 | skb->next = NULL; | 485 | skb->next = NULL; |
482 | } | 486 | } |
483 | 487 | ||
484 | #define sk_wait_event(__sk, __timeo, __condition) \ | 488 | #define sk_wait_event(__sk, __timeo, __condition) \ |
485 | ({ int rc; \ | 489 | ({ int __rc; \ |
486 | release_sock(__sk); \ | 490 | release_sock(__sk); \ |
487 | rc = __condition; \ | 491 | __rc = __condition; \ |
488 | if (!rc) { \ | 492 | if (!__rc) { \ |
489 | *(__timeo) = schedule_timeout(*(__timeo)); \ | 493 | *(__timeo) = schedule_timeout(*(__timeo)); \ |
490 | } \ | 494 | } \ |
491 | lock_sock(__sk); \ | 495 | lock_sock(__sk); \ |
492 | rc = __condition; \ | 496 | __rc = __condition; \ |
493 | rc; \ | 497 | __rc; \ |
494 | }) | 498 | }) |
495 | 499 | ||
496 | extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); | 500 | extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); |
497 | extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); | 501 | extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); |
@@ -702,7 +706,7 @@ extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); | |||
702 | 706 | ||
703 | static inline int sk_stream_pages(int amt) | 707 | static inline int sk_stream_pages(int amt) |
704 | { | 708 | { |
705 | return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; | 709 | return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM); |
706 | } | 710 | } |
707 | 711 | ||
708 | static inline void sk_stream_mem_reclaim(struct sock *sk) | 712 | static inline void sk_stream_mem_reclaim(struct sock *sk) |
@@ -736,7 +740,7 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | |||
736 | * Since ~2.3.5 it is also exclusive sleep lock serializing | 740 | * Since ~2.3.5 it is also exclusive sleep lock serializing |
737 | * accesses from user process context. | 741 | * accesses from user process context. |
738 | */ | 742 | */ |
739 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owner) | 743 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) |
740 | 744 | ||
741 | /* | 745 | /* |
742 | * Macro so as to not evaluate some arguments when | 746 | * Macro so as to not evaluate some arguments when |
@@ -747,7 +751,7 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | |||
747 | */ | 751 | */ |
748 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ | 752 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ |
749 | do { \ | 753 | do { \ |
750 | sk->sk_lock.owner = NULL; \ | 754 | sk->sk_lock.owned = 0; \ |
751 | init_waitqueue_head(&sk->sk_lock.wq); \ | 755 | init_waitqueue_head(&sk->sk_lock.wq); \ |
752 | spin_lock_init(&(sk)->sk_lock.slock); \ | 756 | spin_lock_init(&(sk)->sk_lock.slock); \ |
753 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ | 757 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ |
@@ -773,7 +777,7 @@ extern void FASTCALL(release_sock(struct sock *sk)); | |||
773 | SINGLE_DEPTH_NESTING) | 777 | SINGLE_DEPTH_NESTING) |
774 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) | 778 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) |
775 | 779 | ||
776 | extern struct sock *sk_alloc(int family, | 780 | extern struct sock *sk_alloc(struct net *net, int family, |
777 | gfp_t priority, | 781 | gfp_t priority, |
778 | struct proto *prot, int zero_it); | 782 | struct proto *prot, int zero_it); |
779 | extern void sk_free(struct sock *sk); | 783 | extern void sk_free(struct sock *sk); |
@@ -1002,6 +1006,7 @@ static inline void sock_copy(struct sock *nsk, const struct sock *osk) | |||
1002 | #endif | 1006 | #endif |
1003 | 1007 | ||
1004 | memcpy(nsk, osk, osk->sk_prot->obj_size); | 1008 | memcpy(nsk, osk, osk->sk_prot->obj_size); |
1009 | get_net(nsk->sk_net); | ||
1005 | #ifdef CONFIG_SECURITY_NETWORK | 1010 | #ifdef CONFIG_SECURITY_NETWORK |
1006 | nsk->sk_security = sptr; | 1011 | nsk->sk_security = sptr; |
1007 | security_sk_clone(osk, nsk); | 1012 | security_sk_clone(osk, nsk); |