aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/bind.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-04-21 16:04:43 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:12:26 -0400
commit976673ee1b92d939168c8c1fbad3e16c45caa545 (patch)
tree36c682f96789118bebe0c14e3a5a5353b184a22b /net/rds/bind.c
parentce47f52f42e69d48d1b63fa618fce9cd7ffa9417 (diff)
rds: switch to rwlock on bind_lock
The bind_lock is almost entirely readonly, but it gets hammered during normal operations and is a major bottleneck. This commit changes it to an rwlock, which takes it from 80% of the system time on a big numa machine down to much lower numbers. A better fix would involve RCU, which is done in a later commit Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'net/rds/bind.c')
-rw-r--r--net/rds/bind.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 65de5cbdb576..a65afff6f4b5 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -43,7 +43,7 @@
43 * This is now called for every incoming frame so we arguably care much more 43 * This is now called for every incoming frame so we arguably care much more
44 * about it than we used to. 44 * about it than we used to.
45 */ 45 */
46static DEFINE_SPINLOCK(rds_bind_lock); 46static DEFINE_RWLOCK(rds_bind_lock);
47static struct rb_root rds_bind_tree = RB_ROOT; 47static struct rb_root rds_bind_tree = RB_ROOT;
48 48
49static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port, 49static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
@@ -88,13 +88,13 @@ struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
88 struct rds_sock *rs; 88 struct rds_sock *rs;
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&rds_bind_lock, flags); 91 read_lock_irqsave(&rds_bind_lock, flags);
92 rs = rds_bind_tree_walk(addr, port, NULL); 92 rs = rds_bind_tree_walk(addr, port, NULL);
93 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 93 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
94 rds_sock_addref(rs); 94 rds_sock_addref(rs);
95 else 95 else
96 rs = NULL; 96 rs = NULL;
97 spin_unlock_irqrestore(&rds_bind_lock, flags); 97 read_unlock_irqrestore(&rds_bind_lock, flags);
98 98
99 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, 99 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
100 ntohs(port)); 100 ntohs(port));
@@ -116,7 +116,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
116 last = rover - 1; 116 last = rover - 1;
117 } 117 }
118 118
119 spin_lock_irqsave(&rds_bind_lock, flags); 119 write_lock_irqsave(&rds_bind_lock, flags);
120 120
121 do { 121 do {
122 if (rover == 0) 122 if (rover == 0)
@@ -137,7 +137,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
137 rs, &addr, (int)ntohs(*port)); 137 rs, &addr, (int)ntohs(*port));
138 } 138 }
139 139
140 spin_unlock_irqrestore(&rds_bind_lock, flags); 140 write_unlock_irqrestore(&rds_bind_lock, flags);
141 141
142 return ret; 142 return ret;
143} 143}
@@ -146,7 +146,7 @@ void rds_remove_bound(struct rds_sock *rs)
146{ 146{
147 unsigned long flags; 147 unsigned long flags;
148 148
149 spin_lock_irqsave(&rds_bind_lock, flags); 149 write_lock_irqsave(&rds_bind_lock, flags);
150 150
151 if (rs->rs_bound_addr) { 151 if (rs->rs_bound_addr) {
152 rdsdebug("rs %p unbinding from %pI4:%d\n", 152 rdsdebug("rs %p unbinding from %pI4:%d\n",
@@ -158,7 +158,7 @@ void rds_remove_bound(struct rds_sock *rs)
158 rs->rs_bound_addr = 0; 158 rs->rs_bound_addr = 0;
159 } 159 }
160 160
161 spin_unlock_irqrestore(&rds_bind_lock, flags); 161 write_unlock_irqrestore(&rds_bind_lock, flags);
162} 162}
163 163
164int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 164int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)