diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 86 |
1 files changed, 81 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 2f47107f6d0f..5a3a151bd730 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/list.h> | 44 | #include <linux/list.h> |
45 | #include <linux/list_nulls.h> | ||
45 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
46 | #include <linux/cache.h> | 47 | #include <linux/cache.h> |
47 | #include <linux/module.h> | 48 | #include <linux/module.h> |
@@ -52,6 +53,7 @@ | |||
52 | #include <linux/security.h> | 53 | #include <linux/security.h> |
53 | 54 | ||
54 | #include <linux/filter.h> | 55 | #include <linux/filter.h> |
56 | #include <linux/rculist_nulls.h> | ||
55 | 57 | ||
56 | #include <asm/atomic.h> | 58 | #include <asm/atomic.h> |
57 | #include <net/dst.h> | 59 | #include <net/dst.h> |
@@ -106,6 +108,7 @@ struct net; | |||
106 | * @skc_reuse: %SO_REUSEADDR setting | 108 | * @skc_reuse: %SO_REUSEADDR setting |
107 | * @skc_bound_dev_if: bound device index if != 0 | 109 | * @skc_bound_dev_if: bound device index if != 0 |
108 | * @skc_node: main hash linkage for various protocol lookup tables | 110 | * @skc_node: main hash linkage for various protocol lookup tables |
111 | * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol | ||
109 | * @skc_bind_node: bind hash linkage for various protocol lookup tables | 112 | * @skc_bind_node: bind hash linkage for various protocol lookup tables |
110 | * @skc_refcnt: reference count | 113 | * @skc_refcnt: reference count |
111 | * @skc_hash: hash value used with various protocol lookup tables | 114 | * @skc_hash: hash value used with various protocol lookup tables |
@@ -120,7 +123,10 @@ struct sock_common { | |||
120 | volatile unsigned char skc_state; | 123 | volatile unsigned char skc_state; |
121 | unsigned char skc_reuse; | 124 | unsigned char skc_reuse; |
122 | int skc_bound_dev_if; | 125 | int skc_bound_dev_if; |
123 | struct hlist_node skc_node; | 126 | union { |
127 | struct hlist_node skc_node; | ||
128 | struct hlist_nulls_node skc_nulls_node; | ||
129 | }; | ||
124 | struct hlist_node skc_bind_node; | 130 | struct hlist_node skc_bind_node; |
125 | atomic_t skc_refcnt; | 131 | atomic_t skc_refcnt; |
126 | unsigned int skc_hash; | 132 | unsigned int skc_hash; |
@@ -206,6 +212,7 @@ struct sock { | |||
206 | #define sk_reuse __sk_common.skc_reuse | 212 | #define sk_reuse __sk_common.skc_reuse |
207 | #define sk_bound_dev_if __sk_common.skc_bound_dev_if | 213 | #define sk_bound_dev_if __sk_common.skc_bound_dev_if |
208 | #define sk_node __sk_common.skc_node | 214 | #define sk_node __sk_common.skc_node |
215 | #define sk_nulls_node __sk_common.skc_nulls_node | ||
209 | #define sk_bind_node __sk_common.skc_bind_node | 216 | #define sk_bind_node __sk_common.skc_bind_node |
210 | #define sk_refcnt __sk_common.skc_refcnt | 217 | #define sk_refcnt __sk_common.skc_refcnt |
211 | #define sk_hash __sk_common.skc_hash | 218 | #define sk_hash __sk_common.skc_hash |
@@ -229,7 +236,9 @@ struct sock { | |||
229 | } sk_backlog; | 236 | } sk_backlog; |
230 | wait_queue_head_t *sk_sleep; | 237 | wait_queue_head_t *sk_sleep; |
231 | struct dst_entry *sk_dst_cache; | 238 | struct dst_entry *sk_dst_cache; |
239 | #ifdef CONFIG_XFRM | ||
232 | struct xfrm_policy *sk_policy[2]; | 240 | struct xfrm_policy *sk_policy[2]; |
241 | #endif | ||
233 | rwlock_t sk_dst_lock; | 242 | rwlock_t sk_dst_lock; |
234 | atomic_t sk_rmem_alloc; | 243 | atomic_t sk_rmem_alloc; |
235 | atomic_t sk_wmem_alloc; | 244 | atomic_t sk_wmem_alloc; |
@@ -237,7 +246,9 @@ struct sock { | |||
237 | int sk_sndbuf; | 246 | int sk_sndbuf; |
238 | struct sk_buff_head sk_receive_queue; | 247 | struct sk_buff_head sk_receive_queue; |
239 | struct sk_buff_head sk_write_queue; | 248 | struct sk_buff_head sk_write_queue; |
249 | #ifdef CONFIG_NET_DMA | ||
240 | struct sk_buff_head sk_async_wait_queue; | 250 | struct sk_buff_head sk_async_wait_queue; |
251 | #endif | ||
241 | int sk_wmem_queued; | 252 | int sk_wmem_queued; |
242 | int sk_forward_alloc; | 253 | int sk_forward_alloc; |
243 | gfp_t sk_allocation; | 254 | gfp_t sk_allocation; |
@@ -269,7 +280,9 @@ struct sock { | |||
269 | struct sk_buff *sk_send_head; | 280 | struct sk_buff *sk_send_head; |
270 | __u32 sk_sndmsg_off; | 281 | __u32 sk_sndmsg_off; |
271 | int sk_write_pending; | 282 | int sk_write_pending; |
283 | #ifdef CONFIG_SECURITY | ||
272 | void *sk_security; | 284 | void *sk_security; |
285 | #endif | ||
273 | __u32 sk_mark; | 286 | __u32 sk_mark; |
274 | /* XXX 4 bytes hole on 64 bit */ | 287 | /* XXX 4 bytes hole on 64 bit */ |
275 | void (*sk_state_change)(struct sock *sk); | 288 | void (*sk_state_change)(struct sock *sk); |
@@ -294,12 +307,30 @@ static inline struct sock *sk_head(const struct hlist_head *head) | |||
294 | return hlist_empty(head) ? NULL : __sk_head(head); | 307 | return hlist_empty(head) ? NULL : __sk_head(head); |
295 | } | 308 | } |
296 | 309 | ||
310 | static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) | ||
311 | { | ||
312 | return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); | ||
313 | } | ||
314 | |||
315 | static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) | ||
316 | { | ||
317 | return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); | ||
318 | } | ||
319 | |||
297 | static inline struct sock *sk_next(const struct sock *sk) | 320 | static inline struct sock *sk_next(const struct sock *sk) |
298 | { | 321 | { |
299 | return sk->sk_node.next ? | 322 | return sk->sk_node.next ? |
300 | hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; | 323 | hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; |
301 | } | 324 | } |
302 | 325 | ||
326 | static inline struct sock *sk_nulls_next(const struct sock *sk) | ||
327 | { | ||
328 | return (!is_a_nulls(sk->sk_nulls_node.next)) ? | ||
329 | hlist_nulls_entry(sk->sk_nulls_node.next, | ||
330 | struct sock, sk_nulls_node) : | ||
331 | NULL; | ||
332 | } | ||
333 | |||
303 | static inline int sk_unhashed(const struct sock *sk) | 334 | static inline int sk_unhashed(const struct sock *sk) |
304 | { | 335 | { |
305 | return hlist_unhashed(&sk->sk_node); | 336 | return hlist_unhashed(&sk->sk_node); |
@@ -315,6 +346,11 @@ static __inline__ void sk_node_init(struct hlist_node *node) | |||
315 | node->pprev = NULL; | 346 | node->pprev = NULL; |
316 | } | 347 | } |
317 | 348 | ||
349 | static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) | ||
350 | { | ||
351 | node->pprev = NULL; | ||
352 | } | ||
353 | |||
318 | static __inline__ void __sk_del_node(struct sock *sk) | 354 | static __inline__ void __sk_del_node(struct sock *sk) |
319 | { | 355 | { |
320 | __hlist_del(&sk->sk_node); | 356 | __hlist_del(&sk->sk_node); |
@@ -361,6 +397,27 @@ static __inline__ int sk_del_node_init(struct sock *sk) | |||
361 | return rc; | 397 | return rc; |
362 | } | 398 | } |
363 | 399 | ||
400 | static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) | ||
401 | { | ||
402 | if (sk_hashed(sk)) { | ||
403 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); | ||
404 | return 1; | ||
405 | } | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | ||
410 | { | ||
411 | int rc = __sk_nulls_del_node_init_rcu(sk); | ||
412 | |||
413 | if (rc) { | ||
414 | /* paranoid for a while -acme */ | ||
415 | WARN_ON(atomic_read(&sk->sk_refcnt) == 1); | ||
416 | __sock_put(sk); | ||
417 | } | ||
418 | return rc; | ||
419 | } | ||
420 | |||
364 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) | 421 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) |
365 | { | 422 | { |
366 | hlist_add_head(&sk->sk_node, list); | 423 | hlist_add_head(&sk->sk_node, list); |
@@ -372,6 +429,17 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | |||
372 | __sk_add_node(sk, list); | 429 | __sk_add_node(sk, list); |
373 | } | 430 | } |
374 | 431 | ||
432 | static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | ||
433 | { | ||
434 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); | ||
435 | } | ||
436 | |||
437 | static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | ||
438 | { | ||
439 | sock_hold(sk); | ||
440 | __sk_nulls_add_node_rcu(sk, list); | ||
441 | } | ||
442 | |||
375 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 443 | static __inline__ void __sk_del_bind_node(struct sock *sk) |
376 | { | 444 | { |
377 | __hlist_del(&sk->sk_bind_node); | 445 | __hlist_del(&sk->sk_bind_node); |
@@ -385,9 +453,16 @@ static __inline__ void sk_add_bind_node(struct sock *sk, | |||
385 | 453 | ||
386 | #define sk_for_each(__sk, node, list) \ | 454 | #define sk_for_each(__sk, node, list) \ |
387 | hlist_for_each_entry(__sk, node, list, sk_node) | 455 | hlist_for_each_entry(__sk, node, list, sk_node) |
456 | #define sk_nulls_for_each(__sk, node, list) \ | ||
457 | hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) | ||
458 | #define sk_nulls_for_each_rcu(__sk, node, list) \ | ||
459 | hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) | ||
388 | #define sk_for_each_from(__sk, node) \ | 460 | #define sk_for_each_from(__sk, node) \ |
389 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 461 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ |
390 | hlist_for_each_entry_from(__sk, node, sk_node) | 462 | hlist_for_each_entry_from(__sk, node, sk_node) |
463 | #define sk_nulls_for_each_from(__sk, node) \ | ||
464 | if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ | ||
465 | hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) | ||
391 | #define sk_for_each_continue(__sk, node) \ | 466 | #define sk_for_each_continue(__sk, node) \ |
392 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 467 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ |
393 | hlist_for_each_entry_continue(__sk, node, sk_node) | 468 | hlist_for_each_entry_continue(__sk, node, sk_node) |
@@ -574,7 +649,7 @@ struct proto { | |||
574 | /* Memory pressure */ | 649 | /* Memory pressure */ |
575 | void (*enter_memory_pressure)(struct sock *sk); | 650 | void (*enter_memory_pressure)(struct sock *sk); |
576 | atomic_t *memory_allocated; /* Current allocated memory. */ | 651 | atomic_t *memory_allocated; /* Current allocated memory. */ |
577 | atomic_t *sockets_allocated; /* Current number of sockets. */ | 652 | struct percpu_counter *sockets_allocated; /* Current number of sockets. */ |
578 | /* | 653 | /* |
579 | * Pressure flag: try to collapse. | 654 | * Pressure flag: try to collapse. |
580 | * Technical note: it is used by multiple contexts non atomically. | 655 | * Technical note: it is used by multiple contexts non atomically. |
@@ -587,17 +662,18 @@ struct proto { | |||
587 | int *sysctl_rmem; | 662 | int *sysctl_rmem; |
588 | int max_header; | 663 | int max_header; |
589 | 664 | ||
590 | struct kmem_cache *slab; | 665 | struct kmem_cache *slab; |
591 | unsigned int obj_size; | 666 | unsigned int obj_size; |
667 | int slab_flags; | ||
592 | 668 | ||
593 | atomic_t *orphan_count; | 669 | struct percpu_counter *orphan_count; |
594 | 670 | ||
595 | struct request_sock_ops *rsk_prot; | 671 | struct request_sock_ops *rsk_prot; |
596 | struct timewait_sock_ops *twsk_prot; | 672 | struct timewait_sock_ops *twsk_prot; |
597 | 673 | ||
598 | union { | 674 | union { |
599 | struct inet_hashinfo *hashinfo; | 675 | struct inet_hashinfo *hashinfo; |
600 | struct hlist_head *udp_hash; | 676 | struct udp_table *udp_table; |
601 | struct raw_hashinfo *raw_hash; | 677 | struct raw_hashinfo *raw_hash; |
602 | } h; | 678 | } h; |
603 | 679 | ||