aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-02-27 20:04:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:16 -0500
commit3b069c5d857a5f1b8cb6bb74c70d9446089f5077 (patch)
tree58e005a8e9beba035a2551edd34afe1d802dba53 /drivers
parent4ae42b0ff0f9993c79d7282218b98d8a8a4263f5 (diff)
IB/core: convert to idr_alloc()
Convert to the much saner new idr interface. v2: Mike triggered WARN_ON() in idr_preload() because send_mad(), which may be used from non-process context, was calling idr_preload() unconditionally. Preload iff @gfp_mask has __GFP_WAIT. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Sean Hefty <sean.hefty@intel.com> Reported-by: "Marciniszyn, Mike" <mike.marciniszyn@intel.com> Cc: Roland Dreier <roland@kernel.org> Cc: Sean Hefty <sean.hefty@intel.com> Cc: Hal Rosenstock <hal.rosenstock@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/cm.c22
-rw-r--r--drivers/infiniband/core/cma.c24
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c16
-rw-r--r--drivers/infiniband/core/ucma.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
6 files changed, 48 insertions, 81 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 394fea2ba1bc..98281fe5ea4b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
382static int cm_alloc_id(struct cm_id_private *cm_id_priv) 382static int cm_alloc_id(struct cm_id_private *cm_id_priv)
383{ 383{
384 unsigned long flags; 384 unsigned long flags;
385 int ret, id; 385 int id;
386 static int next_id; 386 static int next_id;
387 387
388 do { 388 idr_preload(GFP_KERNEL);
389 spin_lock_irqsave(&cm.lock, flags); 389 spin_lock_irqsave(&cm.lock, flags);
390 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 390
391 next_id, &id); 391 id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
392 if (!ret) 392 if (id >= 0)
393 next_id = ((unsigned) id + 1) & MAX_IDR_MASK; 393 next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
394 spin_unlock_irqrestore(&cm.lock, flags); 394
395 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 395 spin_unlock_irqrestore(&cm.lock, flags);
396 idr_preload_end();
396 397
397 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 398 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
398 return ret; 399 return id < 0 ? id : 0;
399} 400}
400 401
401static void cm_free_id(__be32 local_id) 402static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
3844 cm.remote_sidr_table = RB_ROOT; 3845 cm.remote_sidr_table = RB_ROOT;
3845 idr_init(&cm.local_id_table); 3846 idr_init(&cm.local_id_table);
3846 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3847 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3847 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3848 INIT_LIST_HEAD(&cm.timewait_list); 3848 INIT_LIST_HEAD(&cm.timewait_list);
3849 3849
3850 ret = class_register(&cm_class); 3850 ret = class_register(&cm_class);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d789eea32168..c32eeaa3f3b1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
2143 unsigned short snum) 2143 unsigned short snum)
2144{ 2144{
2145 struct rdma_bind_list *bind_list; 2145 struct rdma_bind_list *bind_list;
2146 int port, ret; 2146 int ret;
2147 2147
2148 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2148 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
2149 if (!bind_list) 2149 if (!bind_list)
2150 return -ENOMEM; 2150 return -ENOMEM;
2151 2151
2152 do { 2152 ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
2153 ret = idr_get_new_above(ps, bind_list, snum, &port); 2153 if (ret < 0)
2154 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2154 goto err;
2155
2156 if (ret)
2157 goto err1;
2158
2159 if (port != snum) {
2160 ret = -EADDRNOTAVAIL;
2161 goto err2;
2162 }
2163 2155
2164 bind_list->ps = ps; 2156 bind_list->ps = ps;
2165 bind_list->port = (unsigned short) port; 2157 bind_list->port = (unsigned short)ret;
2166 cma_bind_port(bind_list, id_priv); 2158 cma_bind_port(bind_list, id_priv);
2167 return 0; 2159 return 0;
2168err2: 2160err:
2169 idr_remove(ps, port);
2170err1:
2171 kfree(bind_list); 2161 kfree(bind_list);
2172 return ret; 2162 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
2173} 2163}
2174 2164
2175static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2165static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a8905abc56e4..934f45e79e5e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
611 611
612static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 612static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
613{ 613{
614 bool preload = gfp_mask & __GFP_WAIT;
614 unsigned long flags; 615 unsigned long flags;
615 int ret, id; 616 int ret, id;
616 617
617retry: 618 if (preload)
618 if (!idr_pre_get(&query_idr, gfp_mask)) 619 idr_preload(gfp_mask);
619 return -ENOMEM;
620 spin_lock_irqsave(&idr_lock, flags); 620 spin_lock_irqsave(&idr_lock, flags);
621 ret = idr_get_new(&query_idr, query, &id); 621
622 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
623
622 spin_unlock_irqrestore(&idr_lock, flags); 624 spin_unlock_irqrestore(&idr_lock, flags);
623 if (ret == -EAGAIN) 625 if (preload)
624 goto retry; 626 idr_preload_end();
625 if (ret) 627 if (id < 0)
626 return ret; 628 return id;
627 629
628 query->mad_buf->timeout_ms = timeout_ms; 630 query->mad_buf->timeout_ms = timeout_ms;
629 query->mad_buf->context[0] = query; 631 query->mad_buf->context[0] = query;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 49b15ac1987e..f2f63933e8a9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
176static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) 176static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
177{ 177{
178 struct ib_ucm_context *ctx; 178 struct ib_ucm_context *ctx;
179 int result;
180 179
181 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 180 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
182 if (!ctx) 181 if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
187 ctx->file = file; 186 ctx->file = file;
188 INIT_LIST_HEAD(&ctx->events); 187 INIT_LIST_HEAD(&ctx->events);
189 188
190 do { 189 mutex_lock(&ctx_id_mutex);
191 result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 190 ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
192 if (!result) 191 mutex_unlock(&ctx_id_mutex);
193 goto error; 192 if (ctx->id < 0)
194
195 mutex_lock(&ctx_id_mutex);
196 result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
197 mutex_unlock(&ctx_id_mutex);
198 } while (result == -EAGAIN);
199
200 if (result)
201 goto error; 193 goto error;
202 194
203 list_add_tail(&ctx->file_list, &file->ctxs); 195 list_add_tail(&ctx->file_list, &file->ctxs);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2709ff581392..5ca44cd9b00c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
145static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 145static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
146{ 146{
147 struct ucma_context *ctx; 147 struct ucma_context *ctx;
148 int ret;
149 148
150 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
151 if (!ctx) 150 if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
156 INIT_LIST_HEAD(&ctx->mc_list); 155 INIT_LIST_HEAD(&ctx->mc_list);
157 ctx->file = file; 156 ctx->file = file;
158 157
159 do { 158 mutex_lock(&mut);
160 ret = idr_pre_get(&ctx_idr, GFP_KERNEL); 159 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
161 if (!ret) 160 mutex_unlock(&mut);
162 goto error; 161 if (ctx->id < 0)
163
164 mutex_lock(&mut);
165 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
166 mutex_unlock(&mut);
167 } while (ret == -EAGAIN);
168
169 if (ret)
170 goto error; 162 goto error;
171 163
172 list_add_tail(&ctx->list, &file->ctx_list); 164 list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
180static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 172static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
181{ 173{
182 struct ucma_multicast *mc; 174 struct ucma_multicast *mc;
183 int ret;
184 175
185 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 176 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
186 if (!mc) 177 if (!mc)
187 return NULL; 178 return NULL;
188 179
189 do { 180 mutex_lock(&mut);
190 ret = idr_pre_get(&multicast_idr, GFP_KERNEL); 181 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
191 if (!ret) 182 mutex_unlock(&mut);
192 goto error; 183 if (mc->id < 0)
193
194 mutex_lock(&mut);
195 ret = idr_get_new(&multicast_idr, mc, &mc->id);
196 mutex_unlock(&mut);
197 } while (ret == -EAGAIN);
198
199 if (ret)
200 goto error; 184 goto error;
201 185
202 mc->ctx = ctx; 186 mc->ctx = ctx;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e71d834c922a..a7d00f6b3bc1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
125{ 125{
126 int ret; 126 int ret;
127 127
128retry: 128 idr_preload(GFP_KERNEL);
129 if (!idr_pre_get(idr, GFP_KERNEL))
130 return -ENOMEM;
131
132 spin_lock(&ib_uverbs_idr_lock); 129 spin_lock(&ib_uverbs_idr_lock);
133 ret = idr_get_new(idr, uobj, &uobj->id);
134 spin_unlock(&ib_uverbs_idr_lock);
135 130
136 if (ret == -EAGAIN) 131 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
137 goto retry; 132 if (ret >= 0)
133 uobj->id = ret;
138 134
139 return ret; 135 spin_unlock(&ib_uverbs_idr_lock);
136 idr_preload_end();
137
138 return ret < 0 ? ret : 0;
140} 139}
141 140
142void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 141void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)