diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-16 22:39:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-16 22:39:21 -0500 |
commit | 88ab1932eac721c6e7336708558fa5ed02c85c80 (patch) | |
tree | c8788a1e3de08100bca341fa4180adfe5d02880f /include | |
parent | bbaffaca4810de1a25e32ecaf836eeaacc7a3d11 (diff) |
udp: Use hlist_nulls in UDP RCU code
This is a straightforward patch, using hlist_nulls infrastructure.
RCUification already done on UDP two weeks ago.
Using hlist_nulls permits us to avoid some memory barriers, both
at lookup time and delete time.
Patch is large because it adds new macros to include/net/sock.h.
These macros will be used by TCP & DCCP in next patch.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rculist.h | 17 | ||||
-rw-r--r-- | include/net/sock.h | 57 | ||||
-rw-r--r-- | include/net/udp.h | 2 |
3 files changed, 47 insertions, 29 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 3ba2998b22ba..e649bd3f2c97 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -383,22 +383,5 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
383 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 383 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
384 | pos = rcu_dereference(pos->next)) | 384 | pos = rcu_dereference(pos->next)) |
385 | 385 | ||
386 | /** | ||
387 | * hlist_for_each_entry_rcu_safenext - iterate over rcu list of given type | ||
388 | * @tpos: the type * to use as a loop cursor. | ||
389 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
390 | * @head: the head for your list. | ||
391 | * @member: the name of the hlist_node within the struct. | ||
392 | * @next: the &struct hlist_node to use as a next cursor | ||
393 | * | ||
394 | * Special version of hlist_for_each_entry_rcu that make sure | ||
395 | * each next pointer is fetched before each iteration. | ||
396 | */ | ||
397 | #define hlist_for_each_entry_rcu_safenext(tpos, pos, head, member, next) \ | ||
398 | for (pos = rcu_dereference((head)->first); \ | ||
399 | pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && \ | ||
400 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | ||
401 | pos = rcu_dereference(next)) | ||
402 | |||
403 | #endif /* __KERNEL__ */ | 386 | #endif /* __KERNEL__ */ |
404 | #endif | 387 | #endif |
diff --git a/include/net/sock.h b/include/net/sock.h index 8b2b82131b67..0a638948868d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/list.h> | 44 | #include <linux/list.h> |
45 | #include <linux/list_nulls.h> | ||
45 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
46 | #include <linux/cache.h> | 47 | #include <linux/cache.h> |
47 | #include <linux/module.h> | 48 | #include <linux/module.h> |
@@ -52,6 +53,7 @@ | |||
52 | #include <linux/security.h> | 53 | #include <linux/security.h> |
53 | 54 | ||
54 | #include <linux/filter.h> | 55 | #include <linux/filter.h> |
56 | #include <linux/rculist_nulls.h> | ||
55 | 57 | ||
56 | #include <asm/atomic.h> | 58 | #include <asm/atomic.h> |
57 | #include <net/dst.h> | 59 | #include <net/dst.h> |
@@ -106,6 +108,7 @@ struct net; | |||
106 | * @skc_reuse: %SO_REUSEADDR setting | 108 | * @skc_reuse: %SO_REUSEADDR setting |
107 | * @skc_bound_dev_if: bound device index if != 0 | 109 | * @skc_bound_dev_if: bound device index if != 0 |
108 | * @skc_node: main hash linkage for various protocol lookup tables | 110 | * @skc_node: main hash linkage for various protocol lookup tables |
111 | * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol | ||
109 | * @skc_bind_node: bind hash linkage for various protocol lookup tables | 112 | * @skc_bind_node: bind hash linkage for various protocol lookup tables |
110 | * @skc_refcnt: reference count | 113 | * @skc_refcnt: reference count |
111 | * @skc_hash: hash value used with various protocol lookup tables | 114 | * @skc_hash: hash value used with various protocol lookup tables |
@@ -120,7 +123,10 @@ struct sock_common { | |||
120 | volatile unsigned char skc_state; | 123 | volatile unsigned char skc_state; |
121 | unsigned char skc_reuse; | 124 | unsigned char skc_reuse; |
122 | int skc_bound_dev_if; | 125 | int skc_bound_dev_if; |
123 | struct hlist_node skc_node; | 126 | union { |
127 | struct hlist_node skc_node; | ||
128 | struct hlist_nulls_node skc_nulls_node; | ||
129 | }; | ||
124 | struct hlist_node skc_bind_node; | 130 | struct hlist_node skc_bind_node; |
125 | atomic_t skc_refcnt; | 131 | atomic_t skc_refcnt; |
126 | unsigned int skc_hash; | 132 | unsigned int skc_hash; |
@@ -206,6 +212,7 @@ struct sock { | |||
206 | #define sk_reuse __sk_common.skc_reuse | 212 | #define sk_reuse __sk_common.skc_reuse |
207 | #define sk_bound_dev_if __sk_common.skc_bound_dev_if | 213 | #define sk_bound_dev_if __sk_common.skc_bound_dev_if |
208 | #define sk_node __sk_common.skc_node | 214 | #define sk_node __sk_common.skc_node |
215 | #define sk_nulls_node __sk_common.skc_nulls_node | ||
209 | #define sk_bind_node __sk_common.skc_bind_node | 216 | #define sk_bind_node __sk_common.skc_bind_node |
210 | #define sk_refcnt __sk_common.skc_refcnt | 217 | #define sk_refcnt __sk_common.skc_refcnt |
211 | #define sk_hash __sk_common.skc_hash | 218 | #define sk_hash __sk_common.skc_hash |
@@ -300,12 +307,30 @@ static inline struct sock *sk_head(const struct hlist_head *head) | |||
300 | return hlist_empty(head) ? NULL : __sk_head(head); | 307 | return hlist_empty(head) ? NULL : __sk_head(head); |
301 | } | 308 | } |
302 | 309 | ||
310 | static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) | ||
311 | { | ||
312 | return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); | ||
313 | } | ||
314 | |||
315 | static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) | ||
316 | { | ||
317 | return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); | ||
318 | } | ||
319 | |||
303 | static inline struct sock *sk_next(const struct sock *sk) | 320 | static inline struct sock *sk_next(const struct sock *sk) |
304 | { | 321 | { |
305 | return sk->sk_node.next ? | 322 | return sk->sk_node.next ? |
306 | hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; | 323 | hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; |
307 | } | 324 | } |
308 | 325 | ||
326 | static inline struct sock *sk_nulls_next(const struct sock *sk) | ||
327 | { | ||
328 | return (!is_a_nulls(sk->sk_nulls_node.next)) ? | ||
329 | hlist_nulls_entry(sk->sk_nulls_node.next, | ||
330 | struct sock, sk_nulls_node) : | ||
331 | NULL; | ||
332 | } | ||
333 | |||
309 | static inline int sk_unhashed(const struct sock *sk) | 334 | static inline int sk_unhashed(const struct sock *sk) |
310 | { | 335 | { |
311 | return hlist_unhashed(&sk->sk_node); | 336 | return hlist_unhashed(&sk->sk_node); |
@@ -321,6 +346,11 @@ static __inline__ void sk_node_init(struct hlist_node *node) | |||
321 | node->pprev = NULL; | 346 | node->pprev = NULL; |
322 | } | 347 | } |
323 | 348 | ||
349 | static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) | ||
350 | { | ||
351 | node->pprev = NULL; | ||
352 | } | ||
353 | |||
324 | static __inline__ void __sk_del_node(struct sock *sk) | 354 | static __inline__ void __sk_del_node(struct sock *sk) |
325 | { | 355 | { |
326 | __hlist_del(&sk->sk_node); | 356 | __hlist_del(&sk->sk_node); |
@@ -367,18 +397,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) | |||
367 | return rc; | 397 | return rc; |
368 | } | 398 | } |
369 | 399 | ||
370 | static __inline__ int __sk_del_node_init_rcu(struct sock *sk) | 400 | static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) |
371 | { | 401 | { |
372 | if (sk_hashed(sk)) { | 402 | if (sk_hashed(sk)) { |
373 | hlist_del_init_rcu(&sk->sk_node); | 403 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); |
374 | return 1; | 404 | return 1; |
375 | } | 405 | } |
376 | return 0; | 406 | return 0; |
377 | } | 407 | } |
378 | 408 | ||
379 | static __inline__ int sk_del_node_init_rcu(struct sock *sk) | 409 | static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) |
380 | { | 410 | { |
381 | int rc = __sk_del_node_init_rcu(sk); | 411 | int rc = __sk_nulls_del_node_init_rcu(sk); |
382 | 412 | ||
383 | if (rc) { | 413 | if (rc) { |
384 | /* paranoid for a while -acme */ | 414 | /* paranoid for a while -acme */ |
@@ -399,15 +429,15 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | |||
399 | __sk_add_node(sk, list); | 429 | __sk_add_node(sk, list); |
400 | } | 430 | } |
401 | 431 | ||
402 | static __inline__ void __sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | 432 | static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
403 | { | 433 | { |
404 | hlist_add_head_rcu(&sk->sk_node, list); | 434 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
405 | } | 435 | } |
406 | 436 | ||
407 | static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | 437 | static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
408 | { | 438 | { |
409 | sock_hold(sk); | 439 | sock_hold(sk); |
410 | __sk_add_node_rcu(sk, list); | 440 | __sk_nulls_add_node_rcu(sk, list); |
411 | } | 441 | } |
412 | 442 | ||
413 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 443 | static __inline__ void __sk_del_bind_node(struct sock *sk) |
@@ -423,11 +453,16 @@ static __inline__ void sk_add_bind_node(struct sock *sk, | |||
423 | 453 | ||
424 | #define sk_for_each(__sk, node, list) \ | 454 | #define sk_for_each(__sk, node, list) \ |
425 | hlist_for_each_entry(__sk, node, list, sk_node) | 455 | hlist_for_each_entry(__sk, node, list, sk_node) |
426 | #define sk_for_each_rcu_safenext(__sk, node, list, next) \ | 456 | #define sk_nulls_for_each(__sk, node, list) \ |
427 | hlist_for_each_entry_rcu_safenext(__sk, node, list, sk_node, next) | 457 | hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) |
458 | #define sk_nulls_for_each_rcu(__sk, node, list) \ | ||
459 | hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) | ||
428 | #define sk_for_each_from(__sk, node) \ | 460 | #define sk_for_each_from(__sk, node) \ |
429 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 461 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ |
430 | hlist_for_each_entry_from(__sk, node, sk_node) | 462 | hlist_for_each_entry_from(__sk, node, sk_node) |
463 | #define sk_nulls_for_each_from(__sk, node) \ | ||
464 | if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ | ||
465 | hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) | ||
431 | #define sk_for_each_continue(__sk, node) \ | 466 | #define sk_for_each_continue(__sk, node) \ |
432 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 467 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ |
433 | hlist_for_each_entry_continue(__sk, node, sk_node) | 468 | hlist_for_each_entry_continue(__sk, node, sk_node) |
diff --git a/include/net/udp.h b/include/net/udp.h index df2bfe545374..90e6ce56be65 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -51,7 +51,7 @@ struct udp_skb_cb { | |||
51 | #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) | 51 | #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) |
52 | 52 | ||
53 | struct udp_hslot { | 53 | struct udp_hslot { |
54 | struct hlist_head head; | 54 | struct hlist_nulls_head head; |
55 | spinlock_t lock; | 55 | spinlock_t lock; |
56 | } __attribute__((aligned(2 * sizeof(long)))); | 56 | } __attribute__((aligned(2 * sizeof(long)))); |
57 | struct udp_table { | 57 | struct udp_table { |