aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sock.h18
-rw-r--r--net/core/sock.c2
2 files changed, 10 insertions, 10 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 2c7d60ca3548..a3366c3c837a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -202,6 +202,15 @@ struct sock {
202 unsigned short sk_type; 202 unsigned short sk_type;
203 int sk_rcvbuf; 203 int sk_rcvbuf;
204 socket_lock_t sk_lock; 204 socket_lock_t sk_lock;
205 /*
206 * The backlog queue is special, it is always used with
207 * the per-socket spinlock held and requires low latency
208 * access. Therefore we special case it's implementation.
209 */
210 struct {
211 struct sk_buff *head;
212 struct sk_buff *tail;
213 } sk_backlog;
205 wait_queue_head_t *sk_sleep; 214 wait_queue_head_t *sk_sleep;
206 struct dst_entry *sk_dst_cache; 215 struct dst_entry *sk_dst_cache;
207 struct xfrm_policy *sk_policy[2]; 216 struct xfrm_policy *sk_policy[2];
@@ -221,15 +230,6 @@ struct sock {
221 int sk_rcvlowat; 230 int sk_rcvlowat;
222 unsigned long sk_flags; 231 unsigned long sk_flags;
223 unsigned long sk_lingertime; 232 unsigned long sk_lingertime;
224 /*
225 * The backlog queue is special, it is always used with
226 * the per-socket spinlock held and requires low latency
227 * access. Therefore we special case it's implementation.
228 */
229 struct {
230 struct sk_buff *head;
231 struct sk_buff *tail;
232 } sk_backlog;
233 struct sk_buff_head sk_error_queue; 233 struct sk_buff_head sk_error_queue;
234 struct proto *sk_prot_creator; 234 struct proto *sk_prot_creator;
235 rwlock_t sk_callback_lock; 235 rwlock_t sk_callback_lock;
diff --git a/net/core/sock.c b/net/core/sock.c
index 27c4f62382bd..6d35d5775ba8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -904,6 +904,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
904 sk_node_init(&newsk->sk_node); 904 sk_node_init(&newsk->sk_node);
905 sock_lock_init(newsk); 905 sock_lock_init(newsk);
906 bh_lock_sock(newsk); 906 bh_lock_sock(newsk);
907 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
907 908
908 atomic_set(&newsk->sk_rmem_alloc, 0); 909 atomic_set(&newsk->sk_rmem_alloc, 0);
909 atomic_set(&newsk->sk_wmem_alloc, 0); 910 atomic_set(&newsk->sk_wmem_alloc, 0);
@@ -923,7 +924,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
923 newsk->sk_wmem_queued = 0; 924 newsk->sk_wmem_queued = 0;
924 newsk->sk_forward_alloc = 0; 925 newsk->sk_forward_alloc = 0;
925 newsk->sk_send_head = NULL; 926 newsk->sk_send_head = NULL;
926 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
927 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 927 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
928 928
929 sock_reset_flag(newsk, SOCK_DONE); 929 sock_reset_flag(newsk, SOCK_DONE);