diff options
author | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
commit | 3d5271f9883cba7b54762bc4fe027d4172f06db7 (patch) | |
tree | ab8a881a14478598a0c8bda0d26c62cdccfffd6d /include/net/sock.h | |
parent | 378b2556f4e09fa6f87ff0cb5c4395ff28257d02 (diff) | |
parent | 9115a6c787596e687df03010d97fccc5e0762506 (diff) |
Pull release into acpica branch
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 49 |
1 files changed, 28 insertions, 21 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 8c48fbecb7cf..982b4ecd187b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -99,6 +99,7 @@ struct proto; | |||
99 | * @skc_node: main hash linkage for various protocol lookup tables | 99 | * @skc_node: main hash linkage for various protocol lookup tables |
100 | * @skc_bind_node: bind hash linkage for various protocol lookup tables | 100 | * @skc_bind_node: bind hash linkage for various protocol lookup tables |
101 | * @skc_refcnt: reference count | 101 | * @skc_refcnt: reference count |
102 | * @skc_hash: hash value used with various protocol lookup tables | ||
102 | * @skc_prot: protocol handlers inside a network family | 103 | * @skc_prot: protocol handlers inside a network family |
103 | * | 104 | * |
104 | * This is the minimal network layer representation of sockets, the header | 105 | * This is the minimal network layer representation of sockets, the header |
@@ -112,6 +113,7 @@ struct sock_common { | |||
112 | struct hlist_node skc_node; | 113 | struct hlist_node skc_node; |
113 | struct hlist_node skc_bind_node; | 114 | struct hlist_node skc_bind_node; |
114 | atomic_t skc_refcnt; | 115 | atomic_t skc_refcnt; |
116 | unsigned int skc_hash; | ||
115 | struct proto *skc_prot; | 117 | struct proto *skc_prot; |
116 | }; | 118 | }; |
117 | 119 | ||
@@ -139,7 +141,6 @@ struct sock_common { | |||
139 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 141 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets |
140 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 142 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) |
141 | * @sk_lingertime: %SO_LINGER l_linger setting | 143 | * @sk_lingertime: %SO_LINGER l_linger setting |
142 | * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash) | ||
143 | * @sk_backlog: always used with the per-socket spinlock held | 144 | * @sk_backlog: always used with the per-socket spinlock held |
144 | * @sk_callback_lock: used with the callbacks in the end of this struct | 145 | * @sk_callback_lock: used with the callbacks in the end of this struct |
145 | * @sk_error_queue: rarely used | 146 | * @sk_error_queue: rarely used |
@@ -186,6 +187,7 @@ struct sock { | |||
186 | #define sk_node __sk_common.skc_node | 187 | #define sk_node __sk_common.skc_node |
187 | #define sk_bind_node __sk_common.skc_bind_node | 188 | #define sk_bind_node __sk_common.skc_bind_node |
188 | #define sk_refcnt __sk_common.skc_refcnt | 189 | #define sk_refcnt __sk_common.skc_refcnt |
190 | #define sk_hash __sk_common.skc_hash | ||
189 | #define sk_prot __sk_common.skc_prot | 191 | #define sk_prot __sk_common.skc_prot |
190 | unsigned char sk_shutdown : 2, | 192 | unsigned char sk_shutdown : 2, |
191 | sk_no_check : 2, | 193 | sk_no_check : 2, |
@@ -205,10 +207,9 @@ struct sock { | |||
205 | struct sk_buff_head sk_write_queue; | 207 | struct sk_buff_head sk_write_queue; |
206 | int sk_wmem_queued; | 208 | int sk_wmem_queued; |
207 | int sk_forward_alloc; | 209 | int sk_forward_alloc; |
208 | unsigned int sk_allocation; | 210 | gfp_t sk_allocation; |
209 | int sk_sndbuf; | 211 | int sk_sndbuf; |
210 | int sk_route_caps; | 212 | int sk_route_caps; |
211 | int sk_hashent; | ||
212 | unsigned long sk_flags; | 213 | unsigned long sk_flags; |
213 | unsigned long sk_lingertime; | 214 | unsigned long sk_lingertime; |
214 | /* | 215 | /* |
@@ -460,16 +461,16 @@ static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) | |||
460 | } | 461 | } |
461 | 462 | ||
462 | /* The per-socket spinlock must be held here. */ | 463 | /* The per-socket spinlock must be held here. */ |
463 | #define sk_add_backlog(__sk, __skb) \ | 464 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
464 | do { if (!(__sk)->sk_backlog.tail) { \ | 465 | { |
465 | (__sk)->sk_backlog.head = \ | 466 | if (!sk->sk_backlog.tail) { |
466 | (__sk)->sk_backlog.tail = (__skb); \ | 467 | sk->sk_backlog.head = sk->sk_backlog.tail = skb; |
467 | } else { \ | 468 | } else { |
468 | ((__sk)->sk_backlog.tail)->next = (__skb); \ | 469 | sk->sk_backlog.tail->next = skb; |
469 | (__sk)->sk_backlog.tail = (__skb); \ | 470 | sk->sk_backlog.tail = skb; |
470 | } \ | 471 | } |
471 | (__skb)->next = NULL; \ | 472 | skb->next = NULL; |
472 | } while(0) | 473 | } |
473 | 474 | ||
474 | #define sk_wait_event(__sk, __timeo, __condition) \ | 475 | #define sk_wait_event(__sk, __timeo, __condition) \ |
475 | ({ int rc; \ | 476 | ({ int rc; \ |
@@ -738,18 +739,18 @@ extern void FASTCALL(release_sock(struct sock *sk)); | |||
738 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) | 739 | #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) |
739 | 740 | ||
740 | extern struct sock *sk_alloc(int family, | 741 | extern struct sock *sk_alloc(int family, |
741 | unsigned int __nocast priority, | 742 | gfp_t priority, |
742 | struct proto *prot, int zero_it); | 743 | struct proto *prot, int zero_it); |
743 | extern void sk_free(struct sock *sk); | 744 | extern void sk_free(struct sock *sk); |
744 | extern struct sock *sk_clone(const struct sock *sk, | 745 | extern struct sock *sk_clone(const struct sock *sk, |
745 | const unsigned int __nocast priority); | 746 | const gfp_t priority); |
746 | 747 | ||
747 | extern struct sk_buff *sock_wmalloc(struct sock *sk, | 748 | extern struct sk_buff *sock_wmalloc(struct sock *sk, |
748 | unsigned long size, int force, | 749 | unsigned long size, int force, |
749 | unsigned int __nocast priority); | 750 | gfp_t priority); |
750 | extern struct sk_buff *sock_rmalloc(struct sock *sk, | 751 | extern struct sk_buff *sock_rmalloc(struct sock *sk, |
751 | unsigned long size, int force, | 752 | unsigned long size, int force, |
752 | unsigned int __nocast priority); | 753 | gfp_t priority); |
753 | extern void sock_wfree(struct sk_buff *skb); | 754 | extern void sock_wfree(struct sk_buff *skb); |
754 | extern void sock_rfree(struct sk_buff *skb); | 755 | extern void sock_rfree(struct sk_buff *skb); |
755 | 756 | ||
@@ -765,7 +766,7 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, | |||
765 | int noblock, | 766 | int noblock, |
766 | int *errcode); | 767 | int *errcode); |
767 | extern void *sock_kmalloc(struct sock *sk, int size, | 768 | extern void *sock_kmalloc(struct sock *sk, int size, |
768 | unsigned int __nocast priority); | 769 | gfp_t priority); |
769 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); | 770 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); |
770 | extern void sk_send_sigurg(struct sock *sk); | 771 | extern void sk_send_sigurg(struct sock *sk); |
771 | 772 | ||
@@ -1200,7 +1201,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) | |||
1200 | 1201 | ||
1201 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | 1202 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, |
1202 | int size, int mem, | 1203 | int size, int mem, |
1203 | unsigned int __nocast gfp) | 1204 | gfp_t gfp) |
1204 | { | 1205 | { |
1205 | struct sk_buff *skb; | 1206 | struct sk_buff *skb; |
1206 | int hdr_len; | 1207 | int hdr_len; |
@@ -1223,7 +1224,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | |||
1223 | 1224 | ||
1224 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, | 1225 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, |
1225 | int size, | 1226 | int size, |
1226 | unsigned int __nocast gfp) | 1227 | gfp_t gfp) |
1227 | { | 1228 | { |
1228 | return sk_stream_alloc_pskb(sk, size, 0, gfp); | 1229 | return sk_stream_alloc_pskb(sk, size, 0, gfp); |
1229 | } | 1230 | } |
@@ -1246,6 +1247,12 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1246 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | 1247 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ |
1247 | skb = skb->next) | 1248 | skb = skb->next) |
1248 | 1249 | ||
1250 | /*from STCP for fast SACK Process*/ | ||
1251 | #define sk_stream_for_retrans_queue_from(skb, sk) \ | ||
1252 | for (; (skb != (sk)->sk_send_head) && \ | ||
1253 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | ||
1254 | skb = skb->next) | ||
1255 | |||
1249 | /* | 1256 | /* |
1250 | * Default write policy as shown to user space via poll/select/SIGIO | 1257 | * Default write policy as shown to user space via poll/select/SIGIO |
1251 | */ | 1258 | */ |
@@ -1254,7 +1261,7 @@ static inline int sock_writeable(const struct sock *sk) | |||
1254 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); | 1261 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); |
1255 | } | 1262 | } |
1256 | 1263 | ||
1257 | static inline unsigned int __nocast gfp_any(void) | 1264 | static inline gfp_t gfp_any(void) |
1258 | { | 1265 | { |
1259 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 1266 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; |
1260 | } | 1267 | } |