aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inetpeer.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r--net/ipv4/inetpeer.c99
1 files changed, 52 insertions, 47 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b2509..e1e0a4e8fd34 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -82,23 +82,39 @@ static const struct inet_peer peer_fake_node = {
82 .avl_height = 0 82 .avl_height = 0
83}; 83};
84 84
85struct inet_peer_base { 85void inet_peer_base_init(struct inet_peer_base *bp)
86 struct inet_peer __rcu *root; 86{
87 seqlock_t lock; 87 bp->root = peer_avl_empty_rcu;
88 int total; 88 seqlock_init(&bp->lock);
89}; 89 bp->flush_seq = ~0U;
90 bp->total = 0;
91}
92EXPORT_SYMBOL_GPL(inet_peer_base_init);
90 93
91static struct inet_peer_base v4_peers = { 94static atomic_t v4_seq = ATOMIC_INIT(0);
92 .root = peer_avl_empty_rcu, 95static atomic_t v6_seq = ATOMIC_INIT(0);
93 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
94 .total = 0,
95};
96 96
97static struct inet_peer_base v6_peers = { 97static atomic_t *inetpeer_seq_ptr(int family)
98 .root = peer_avl_empty_rcu, 98{
99 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), 99 return (family == AF_INET ? &v4_seq : &v6_seq);
100 .total = 0, 100}
101}; 101
102static inline void flush_check(struct inet_peer_base *base, int family)
103{
104 atomic_t *fp = inetpeer_seq_ptr(family);
105
106 if (unlikely(base->flush_seq != atomic_read(fp))) {
107 inetpeer_invalidate_tree(base);
108 base->flush_seq = atomic_read(fp);
109 }
110}
111
112void inetpeer_invalidate_family(int family)
113{
114 atomic_t *fp = inetpeer_seq_ptr(family);
115
116 atomic_inc(fp);
117}
102 118
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 119#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104 120
@@ -110,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
110 126
111static void inetpeer_gc_worker(struct work_struct *work) 127static void inetpeer_gc_worker(struct work_struct *work)
112{ 128{
113 struct inet_peer *p, *n; 129 struct inet_peer *p, *n, *c;
114 LIST_HEAD(list); 130 LIST_HEAD(list);
115 131
116 spin_lock_bh(&gc_lock); 132 spin_lock_bh(&gc_lock);
@@ -122,17 +138,19 @@ static void inetpeer_gc_worker(struct work_struct *work)
122 138
123 list_for_each_entry_safe(p, n, &list, gc_list) { 139 list_for_each_entry_safe(p, n, &list, gc_list) {
124 140
125 if(need_resched()) 141 if (need_resched())
126 cond_resched(); 142 cond_resched();
127 143
128 if (p->avl_left != peer_avl_empty) { 144 c = rcu_dereference_protected(p->avl_left, 1);
129 list_add_tail(&p->avl_left->gc_list, &list); 145 if (c != peer_avl_empty) {
130 p->avl_left = peer_avl_empty; 146 list_add_tail(&c->gc_list, &list);
147 p->avl_left = peer_avl_empty_rcu;
131 } 148 }
132 149
133 if (p->avl_right != peer_avl_empty) { 150 c = rcu_dereference_protected(p->avl_right, 1);
134 list_add_tail(&p->avl_right->gc_list, &list); 151 if (c != peer_avl_empty) {
135 p->avl_right = peer_avl_empty; 152 list_add_tail(&c->gc_list, &list);
153 p->avl_right = peer_avl_empty_rcu;
136 } 154 }
137 155
138 n = list_entry(p->gc_list.next, struct inet_peer, gc_list); 156 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
@@ -401,11 +419,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
401 call_rcu(&p->rcu, inetpeer_free_rcu); 419 call_rcu(&p->rcu, inetpeer_free_rcu);
402} 420}
403 421
404static struct inet_peer_base *family_to_base(int family)
405{
406 return family == AF_INET ? &v4_peers : &v6_peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */ 422/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base, 423static int inet_peer_gc(struct inet_peer_base *base,
411 struct inet_peer __rcu **stack[PEER_MAXDEPTH], 424 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
@@ -443,14 +456,17 @@ static int inet_peer_gc(struct inet_peer_base *base,
443 return cnt; 456 return cnt;
444} 457}
445 458
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) 459struct inet_peer *inet_getpeer(struct inet_peer_base *base,
460 const struct inetpeer_addr *daddr,
461 int create)
447{ 462{
448 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 463 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 struct inet_peer_base *base = family_to_base(daddr->family);
450 struct inet_peer *p; 464 struct inet_peer *p;
451 unsigned int sequence; 465 unsigned int sequence;
452 int invalidated, gccnt = 0; 466 int invalidated, gccnt = 0;
453 467
468 flush_check(base, daddr->family);
469
454 /* Attempt a lockless lookup first. 470 /* Attempt a lockless lookup first.
455 * Because of a concurrent writer, we might not find an existing entry. 471 * Because of a concurrent writer, we might not find an existing entry.
456 */ 472 */
@@ -492,13 +508,9 @@ relookup:
492 (daddr->family == AF_INET) ? 508 (daddr->family == AF_INET) ?
493 secure_ip_id(daddr->addr.a4) : 509 secure_ip_id(daddr->addr.a4) :
494 secure_ipv6_id(daddr->addr.a6)); 510 secure_ipv6_id(daddr->addr.a6));
495 p->tcp_ts_stamp = 0;
496 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497 p->rate_tokens = 0; 512 p->rate_tokens = 0;
498 p->rate_last = 0; 513 p->rate_last = 0;
499 p->pmtu_expires = 0;
500 p->pmtu_orig = 0;
501 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502 INIT_LIST_HEAD(&p->gc_list); 514 INIT_LIST_HEAD(&p->gc_list);
503 515
504 /* Link the node. */ 516 /* Link the node. */
@@ -571,26 +583,19 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
571 schedule_delayed_work(&gc_work, gc_delay); 583 schedule_delayed_work(&gc_work, gc_delay);
572} 584}
573 585
574void inetpeer_invalidate_tree(int family) 586void inetpeer_invalidate_tree(struct inet_peer_base *base)
575{ 587{
576 struct inet_peer *old, *new, *prev; 588 struct inet_peer *root;
577 struct inet_peer_base *base = family_to_base(family);
578 589
579 write_seqlock_bh(&base->lock); 590 write_seqlock_bh(&base->lock);
580 591
581 old = base->root; 592 root = rcu_deref_locked(base->root, base);
582 if (old == peer_avl_empty_rcu) 593 if (root != peer_avl_empty) {
583 goto out; 594 base->root = peer_avl_empty_rcu;
584
585 new = peer_avl_empty_rcu;
586
587 prev = cmpxchg(&base->root, old, new);
588 if (prev == old) {
589 base->total = 0; 595 base->total = 0;
590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); 596 call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
591 } 597 }
592 598
593out:
594 write_sequnlock_bh(&base->lock); 599 write_sequnlock_bh(&base->lock);
595} 600}
596EXPORT_SYMBOL(inetpeer_invalidate_tree); 601EXPORT_SYMBOL(inetpeer_invalidate_tree);