diff options
author | Hal Rosenstock <halr@voltaire.com> | 2005-07-27 14:45:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:09 -0400 |
commit | b82cab6b331b51d82f90d2207f3bbfdf09361ac9 (patch) | |
tree | 5aad260a806939b2ea5b2d5a029b38b77a4fcd07 /drivers/infiniband/core/mad.c | |
parent | c183a4c33528d17cde0dcb093ae4248d8cb8f649 (diff) |
[PATCH] IB: Update MAD client API
Automatically allocate a MR when registering a MAD agent.
MAD clients are modified to use this updated API.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r-- | drivers/infiniband/core/mad.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 23628c622a5..52748b0f768 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -261,19 +261,26 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
261 | ret = ERR_PTR(-ENOMEM); | 261 | ret = ERR_PTR(-ENOMEM); |
262 | goto error1; | 262 | goto error1; |
263 | } | 263 | } |
264 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
265 | |||
266 | mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | ||
267 | IB_ACCESS_LOCAL_WRITE); | ||
268 | if (IS_ERR(mad_agent_priv->agent.mr)) { | ||
269 | ret = ERR_PTR(-ENOMEM); | ||
270 | goto error2; | ||
271 | } | ||
264 | 272 | ||
265 | if (mad_reg_req) { | 273 | if (mad_reg_req) { |
266 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); | 274 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); |
267 | if (!reg_req) { | 275 | if (!reg_req) { |
268 | ret = ERR_PTR(-ENOMEM); | 276 | ret = ERR_PTR(-ENOMEM); |
269 | goto error2; | 277 | goto error3; |
270 | } | 278 | } |
271 | /* Make a copy of the MAD registration request */ | 279 | /* Make a copy of the MAD registration request */ |
272 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); | 280 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); |
273 | } | 281 | } |
274 | 282 | ||
275 | /* Now, fill in the various structures */ | 283 | /* Now, fill in the various structures */ |
276 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
277 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | 284 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
278 | mad_agent_priv->reg_req = reg_req; | 285 | mad_agent_priv->reg_req = reg_req; |
279 | mad_agent_priv->rmpp_version = rmpp_version; | 286 | mad_agent_priv->rmpp_version = rmpp_version; |
@@ -301,7 +308,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
301 | if (method) { | 308 | if (method) { |
302 | if (method_in_use(&method, | 309 | if (method_in_use(&method, |
303 | mad_reg_req)) | 310 | mad_reg_req)) |
304 | goto error3; | 311 | goto error4; |
305 | } | 312 | } |
306 | } | 313 | } |
307 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | 314 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, |
@@ -317,14 +324,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
317 | if (is_vendor_method_in_use( | 324 | if (is_vendor_method_in_use( |
318 | vendor_class, | 325 | vendor_class, |
319 | mad_reg_req)) | 326 | mad_reg_req)) |
320 | goto error3; | 327 | goto error4; |
321 | } | 328 | } |
322 | } | 329 | } |
323 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | 330 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); |
324 | } | 331 | } |
325 | if (ret2) { | 332 | if (ret2) { |
326 | ret = ERR_PTR(ret2); | 333 | ret = ERR_PTR(ret2); |
327 | goto error3; | 334 | goto error4; |
328 | } | 335 | } |
329 | } | 336 | } |
330 | 337 | ||
@@ -346,11 +353,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
346 | 353 | ||
347 | return &mad_agent_priv->agent; | 354 | return &mad_agent_priv->agent; |
348 | 355 | ||
349 | error3: | 356 | error4: |
350 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 357 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
351 | kfree(reg_req); | 358 | kfree(reg_req); |
352 | error2: | 359 | error3: |
353 | kfree(mad_agent_priv); | 360 | kfree(mad_agent_priv); |
361 | error2: | ||
362 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
354 | error1: | 363 | error1: |
355 | return ret; | 364 | return ret; |
356 | } | 365 | } |
@@ -487,18 +496,15 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
487 | * MADs, preventing us from queuing additional work | 496 | * MADs, preventing us from queuing additional work |
488 | */ | 497 | */ |
489 | cancel_mads(mad_agent_priv); | 498 | cancel_mads(mad_agent_priv); |
490 | |||
491 | port_priv = mad_agent_priv->qp_info->port_priv; | 499 | port_priv = mad_agent_priv->qp_info->port_priv; |
492 | |||
493 | cancel_delayed_work(&mad_agent_priv->timed_work); | 500 | cancel_delayed_work(&mad_agent_priv->timed_work); |
494 | flush_workqueue(port_priv->wq); | ||
495 | 501 | ||
496 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 502 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
497 | remove_mad_reg_req(mad_agent_priv); | 503 | remove_mad_reg_req(mad_agent_priv); |
498 | list_del(&mad_agent_priv->agent_list); | 504 | list_del(&mad_agent_priv->agent_list); |
499 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 505 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
500 | 506 | ||
501 | /* XXX: Cleanup pending RMPP receives for this agent */ | 507 | flush_workqueue(port_priv->wq); |
502 | 508 | ||
503 | atomic_dec(&mad_agent_priv->refcount); | 509 | atomic_dec(&mad_agent_priv->refcount); |
504 | wait_event(mad_agent_priv->wait, | 510 | wait_event(mad_agent_priv->wait, |
@@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
506 | 512 | ||
507 | if (mad_agent_priv->reg_req) | 513 | if (mad_agent_priv->reg_req) |
508 | kfree(mad_agent_priv->reg_req); | 514 | kfree(mad_agent_priv->reg_req); |
515 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
509 | kfree(mad_agent_priv); | 516 | kfree(mad_agent_priv); |
510 | } | 517 | } |
511 | 518 | ||
@@ -750,7 +757,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
750 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | 757 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); |
751 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 758 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
752 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | 759 | queue_work(mad_agent_priv->qp_info->port_priv->wq, |
753 | &mad_agent_priv->local_work); | 760 | &mad_agent_priv->local_work); |
754 | ret = 1; | 761 | ret = 1; |
755 | out: | 762 | out: |
756 | return ret; | 763 | return ret; |