diff options
author | Steffen Klassert <steffen.klassert@secunet.com> | 2014-02-19 04:07:34 -0500 |
---|---|---|
committer | Steffen Klassert <steffen.klassert@secunet.com> | 2014-02-19 04:35:43 -0500 |
commit | 1a1ccc96abb2ed9b8fbb71018e64b97324caef53 (patch) | |
tree | 0888c68978167c36ce434344264afb19cf83a7c7 /net/xfrm | |
parent | d3623099d3509fa68fa28235366049dd3156c63a (diff) |
xfrm: Remove caching of xfrm_policy_sk_bundles
We currently cache socket policy bundles at xfrm_policy_sk_bundles.
These cached bundles are never used. Instead we create and cache
a new one whenever xfrm_lookup() is called on a socket policy.
Most protocols cache the used routes to the socket, so let's
remove the unused caching of socket policy bundles in xfrm.
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/xfrm')
-rw-r--r-- | net/xfrm/xfrm_policy.c | 28 |
1 files changed, 0 insertions, 28 deletions
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2232c6f26aff..bb3669d973a7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -39,8 +39,6 @@ | |||
39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) | 39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) |
40 | #define XFRM_MAX_QUEUE_LEN 100 | 40 | #define XFRM_MAX_QUEUE_LEN 100 |
41 | 41 | ||
42 | static struct dst_entry *xfrm_policy_sk_bundles; | ||
43 | |||
44 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); | 42 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); |
45 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] | 43 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] |
46 | __read_mostly; | 44 | __read_mostly; |
@@ -2109,13 +2107,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2109 | goto no_transform; | 2107 | goto no_transform; |
2110 | } | 2108 | } |
2111 | 2109 | ||
2112 | dst_hold(&xdst->u.dst); | ||
2113 | |||
2114 | spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2115 | xdst->u.dst.next = xfrm_policy_sk_bundles; | ||
2116 | xfrm_policy_sk_bundles = &xdst->u.dst; | ||
2117 | spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2118 | |||
2119 | route = xdst->route; | 2110 | route = xdst->route; |
2120 | } | 2111 | } |
2121 | } | 2112 | } |
@@ -2549,33 +2540,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) | |||
2549 | return dst; | 2540 | return dst; |
2550 | } | 2541 | } |
2551 | 2542 | ||
2552 | static void __xfrm_garbage_collect(struct net *net) | ||
2553 | { | ||
2554 | struct dst_entry *head, *next; | ||
2555 | |||
2556 | spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2557 | head = xfrm_policy_sk_bundles; | ||
2558 | xfrm_policy_sk_bundles = NULL; | ||
2559 | spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2560 | |||
2561 | while (head) { | ||
2562 | next = head->next; | ||
2563 | dst_free(head); | ||
2564 | head = next; | ||
2565 | } | ||
2566 | } | ||
2567 | |||
2568 | void xfrm_garbage_collect(struct net *net) | 2543 | void xfrm_garbage_collect(struct net *net) |
2569 | { | 2544 | { |
2570 | flow_cache_flush(net); | 2545 | flow_cache_flush(net); |
2571 | __xfrm_garbage_collect(net); | ||
2572 | } | 2546 | } |
2573 | EXPORT_SYMBOL(xfrm_garbage_collect); | 2547 | EXPORT_SYMBOL(xfrm_garbage_collect); |
2574 | 2548 | ||
2575 | static void xfrm_garbage_collect_deferred(struct net *net) | 2549 | static void xfrm_garbage_collect_deferred(struct net *net) |
2576 | { | 2550 | { |
2577 | flow_cache_flush_deferred(net); | 2551 | flow_cache_flush_deferred(net); |
2578 | __xfrm_garbage_collect(net); | ||
2579 | } | 2552 | } |
2580 | 2553 | ||
2581 | static void xfrm_init_pmtu(struct dst_entry *dst) | 2554 | static void xfrm_init_pmtu(struct dst_entry *dst) |
@@ -2944,7 +2917,6 @@ static int __net_init xfrm_net_init(struct net *net) | |||
2944 | /* Initialize the per-net locks here */ | 2917 | /* Initialize the per-net locks here */ |
2945 | spin_lock_init(&net->xfrm.xfrm_state_lock); | 2918 | spin_lock_init(&net->xfrm.xfrm_state_lock); |
2946 | rwlock_init(&net->xfrm.xfrm_policy_lock); | 2919 | rwlock_init(&net->xfrm.xfrm_policy_lock); |
2947 | spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2948 | mutex_init(&net->xfrm.xfrm_cfg_mutex); | 2920 | mutex_init(&net->xfrm.xfrm_cfg_mutex); |
2949 | 2921 | ||
2950 | flow_cache_init(net); | 2922 | flow_cache_init(net); |