diff options
author | Andy Grover <andy.grover@oracle.com> | 2009-10-30 04:51:54 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-30 18:06:38 -0400 |
commit | 86357b19bcabd9355937f3fb84f90ba9fe76a5d3 (patch) | |
tree | ec9dfe857ce9d5c807b7cce4cd0c831c8e1fcdee /net/rds | |
parent | 244546f0d3101c5441f5b14cfe8a79d62679eaea (diff) |
RDS: Fix potential race around rds_i[bw]_allocation
"At rds_ib_recv_refill_one(), it first executes atomic_read(&rds_ib_allocation)
for if-condition checking,
and then executes atomic_inc(&rds_ib_allocation) if the condition was
not satisfied.
However, if any other code which updates rds_ib_allocation executes
between these two atomic operation executions,
it seems that it may result race condition. (especially when
rds_ib_allocation + 1 == rds_ib_sysctl_max_recv_allocation)"
This patch fixes this by using atomic_inc_unless to eliminate the
possibility of allocating more than rds_ib_sysctl_max_recv_allocation
and then decrementing the count if the allocation fails. It also
makes an identical change to the iwarp transport.
Reported-by: Shin Hong <hongshin@gmail.com>
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/ib_recv.c | 7 | ||||
-rw-r--r-- | net/rds/iw_recv.c | 7 |
2 files changed, 8 insertions, 6 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index cd7a6cfcab03..2f009d391c29 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, | |||
143 | int ret = -ENOMEM; | 143 | int ret = -ENOMEM; |
144 | 144 | ||
145 | if (recv->r_ibinc == NULL) { | 145 | if (recv->r_ibinc == NULL) { |
146 | if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { | 146 | if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { |
147 | rds_ib_stats_inc(s_ib_rx_alloc_limit); | 147 | rds_ib_stats_inc(s_ib_rx_alloc_limit); |
148 | goto out; | 148 | goto out; |
149 | } | 149 | } |
150 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, | 150 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, |
151 | kptr_gfp); | 151 | kptr_gfp); |
152 | if (recv->r_ibinc == NULL) | 152 | if (recv->r_ibinc == NULL) { |
153 | atomic_dec(&rds_ib_allocation); | ||
153 | goto out; | 154 | goto out; |
154 | atomic_inc(&rds_ib_allocation); | 155 | } |
155 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); | 156 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); |
156 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); | 157 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); |
157 | } | 158 | } |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 8683f5f66c4b..9f98150af19f 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn, | |||
143 | int ret = -ENOMEM; | 143 | int ret = -ENOMEM; |
144 | 144 | ||
145 | if (recv->r_iwinc == NULL) { | 145 | if (recv->r_iwinc == NULL) { |
146 | if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { | 146 | if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { |
147 | rds_iw_stats_inc(s_iw_rx_alloc_limit); | 147 | rds_iw_stats_inc(s_iw_rx_alloc_limit); |
148 | goto out; | 148 | goto out; |
149 | } | 149 | } |
150 | recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, | 150 | recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, |
151 | kptr_gfp); | 151 | kptr_gfp); |
152 | if (recv->r_iwinc == NULL) | 152 | if (recv->r_iwinc == NULL) { |
153 | atomic_dec(&rds_iw_allocation); | ||
153 | goto out; | 154 | goto out; |
154 | atomic_inc(&rds_iw_allocation); | 155 | } |
155 | INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); | 156 | INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); |
156 | rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); | 157 | rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); |
157 | } | 158 | } |