diff options
author | Hal Rosenstock <halr@voltaire.com> | 2005-07-27 14:45:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:09 -0400 |
commit | b82cab6b331b51d82f90d2207f3bbfdf09361ac9 (patch) | |
tree | 5aad260a806939b2ea5b2d5a029b38b77a4fcd07 /drivers/infiniband | |
parent | c183a4c33528d17cde0dcb093ae4248d8cb8f649 (diff) |
[PATCH] IB: Update MAD client API
Automatically allocate a MR when registering a MAD agent.
MAD clients are modified to use this updated API.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/agent.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/core/agent_priv.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/include/ib_mad.h | 1 |
5 files changed, 24 insertions, 40 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 23d1957c4b29..dde25ee81b65 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
@@ -134,7 +134,7 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent, | |||
134 | sizeof(mad_priv->mad), | 134 | sizeof(mad_priv->mad), |
135 | DMA_TO_DEVICE); | 135 | DMA_TO_DEVICE); |
136 | gather_list.length = sizeof(mad_priv->mad); | 136 | gather_list.length = sizeof(mad_priv->mad); |
137 | gather_list.lkey = (*port_priv->mr).lkey; | 137 | gather_list.lkey = mad_agent->mr->lkey; |
138 | 138 | ||
139 | send_wr.next = NULL; | 139 | send_wr.next = NULL; |
140 | send_wr.opcode = IB_WR_SEND; | 140 | send_wr.opcode = IB_WR_SEND; |
@@ -322,22 +322,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
322 | goto error3; | 322 | goto error3; |
323 | } | 323 | } |
324 | 324 | ||
325 | port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd, | ||
326 | IB_ACCESS_LOCAL_WRITE); | ||
327 | if (IS_ERR(port_priv->mr)) { | ||
328 | printk(KERN_ERR SPFX "Couldn't get DMA MR\n"); | ||
329 | ret = PTR_ERR(port_priv->mr); | ||
330 | goto error4; | ||
331 | } | ||
332 | |||
333 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); | 325 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); |
334 | list_add_tail(&port_priv->port_list, &ib_agent_port_list); | 326 | list_add_tail(&port_priv->port_list, &ib_agent_port_list); |
335 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 327 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
336 | 328 | ||
337 | return 0; | 329 | return 0; |
338 | 330 | ||
339 | error4: | ||
340 | ib_unregister_mad_agent(port_priv->perf_mgmt_agent); | ||
341 | error3: | 331 | error3: |
342 | ib_unregister_mad_agent(port_priv->smp_agent); | 332 | ib_unregister_mad_agent(port_priv->smp_agent); |
343 | error2: | 333 | error2: |
@@ -361,8 +351,6 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
361 | list_del(&port_priv->port_list); | 351 | list_del(&port_priv->port_list); |
362 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 352 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
363 | 353 | ||
364 | ib_dereg_mr(port_priv->mr); | ||
365 | |||
366 | ib_unregister_mad_agent(port_priv->perf_mgmt_agent); | 354 | ib_unregister_mad_agent(port_priv->perf_mgmt_agent); |
367 | ib_unregister_mad_agent(port_priv->smp_agent); | 355 | ib_unregister_mad_agent(port_priv->smp_agent); |
368 | kfree(port_priv); | 356 | kfree(port_priv); |
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h index 17a0cce5813c..17435af1e914 100644 --- a/drivers/infiniband/core/agent_priv.h +++ b/drivers/infiniband/core/agent_priv.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
34 | * SOFTWARE. | 34 | * SOFTWARE. |
35 | * | 35 | * |
36 | * $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $ | 36 | * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $ |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #ifndef __IB_AGENT_PRIV_H__ | 39 | #ifndef __IB_AGENT_PRIV_H__ |
@@ -57,7 +57,6 @@ struct ib_agent_port_private { | |||
57 | int port_num; | 57 | int port_num; |
58 | struct ib_mad_agent *smp_agent; /* SM class */ | 58 | struct ib_mad_agent *smp_agent; /* SM class */ |
59 | struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ | 59 | struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ |
60 | struct ib_mr *mr; | ||
61 | }; | 60 | }; |
62 | 61 | ||
63 | #endif /* __IB_AGENT_PRIV_H__ */ | 62 | #endif /* __IB_AGENT_PRIV_H__ */ |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 23628c622a50..52748b0f7685 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -261,19 +261,26 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
261 | ret = ERR_PTR(-ENOMEM); | 261 | ret = ERR_PTR(-ENOMEM); |
262 | goto error1; | 262 | goto error1; |
263 | } | 263 | } |
264 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
265 | |||
266 | mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | ||
267 | IB_ACCESS_LOCAL_WRITE); | ||
268 | if (IS_ERR(mad_agent_priv->agent.mr)) { | ||
269 | ret = ERR_PTR(-ENOMEM); | ||
270 | goto error2; | ||
271 | } | ||
264 | 272 | ||
265 | if (mad_reg_req) { | 273 | if (mad_reg_req) { |
266 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); | 274 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); |
267 | if (!reg_req) { | 275 | if (!reg_req) { |
268 | ret = ERR_PTR(-ENOMEM); | 276 | ret = ERR_PTR(-ENOMEM); |
269 | goto error2; | 277 | goto error3; |
270 | } | 278 | } |
271 | /* Make a copy of the MAD registration request */ | 279 | /* Make a copy of the MAD registration request */ |
272 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); | 280 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); |
273 | } | 281 | } |
274 | 282 | ||
275 | /* Now, fill in the various structures */ | 283 | /* Now, fill in the various structures */ |
276 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
277 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | 284 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
278 | mad_agent_priv->reg_req = reg_req; | 285 | mad_agent_priv->reg_req = reg_req; |
279 | mad_agent_priv->rmpp_version = rmpp_version; | 286 | mad_agent_priv->rmpp_version = rmpp_version; |
@@ -301,7 +308,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
301 | if (method) { | 308 | if (method) { |
302 | if (method_in_use(&method, | 309 | if (method_in_use(&method, |
303 | mad_reg_req)) | 310 | mad_reg_req)) |
304 | goto error3; | 311 | goto error4; |
305 | } | 312 | } |
306 | } | 313 | } |
307 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | 314 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, |
@@ -317,14 +324,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
317 | if (is_vendor_method_in_use( | 324 | if (is_vendor_method_in_use( |
318 | vendor_class, | 325 | vendor_class, |
319 | mad_reg_req)) | 326 | mad_reg_req)) |
320 | goto error3; | 327 | goto error4; |
321 | } | 328 | } |
322 | } | 329 | } |
323 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | 330 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); |
324 | } | 331 | } |
325 | if (ret2) { | 332 | if (ret2) { |
326 | ret = ERR_PTR(ret2); | 333 | ret = ERR_PTR(ret2); |
327 | goto error3; | 334 | goto error4; |
328 | } | 335 | } |
329 | } | 336 | } |
330 | 337 | ||
@@ -346,11 +353,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
346 | 353 | ||
347 | return &mad_agent_priv->agent; | 354 | return &mad_agent_priv->agent; |
348 | 355 | ||
349 | error3: | 356 | error4: |
350 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 357 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
351 | kfree(reg_req); | 358 | kfree(reg_req); |
352 | error2: | 359 | error3: |
353 | kfree(mad_agent_priv); | 360 | kfree(mad_agent_priv); |
361 | error2: | ||
362 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
354 | error1: | 363 | error1: |
355 | return ret; | 364 | return ret; |
356 | } | 365 | } |
@@ -487,18 +496,15 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
487 | * MADs, preventing us from queuing additional work | 496 | * MADs, preventing us from queuing additional work |
488 | */ | 497 | */ |
489 | cancel_mads(mad_agent_priv); | 498 | cancel_mads(mad_agent_priv); |
490 | |||
491 | port_priv = mad_agent_priv->qp_info->port_priv; | 499 | port_priv = mad_agent_priv->qp_info->port_priv; |
492 | |||
493 | cancel_delayed_work(&mad_agent_priv->timed_work); | 500 | cancel_delayed_work(&mad_agent_priv->timed_work); |
494 | flush_workqueue(port_priv->wq); | ||
495 | 501 | ||
496 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 502 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
497 | remove_mad_reg_req(mad_agent_priv); | 503 | remove_mad_reg_req(mad_agent_priv); |
498 | list_del(&mad_agent_priv->agent_list); | 504 | list_del(&mad_agent_priv->agent_list); |
499 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 505 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
500 | 506 | ||
501 | /* XXX: Cleanup pending RMPP receives for this agent */ | 507 | flush_workqueue(port_priv->wq); |
502 | 508 | ||
503 | atomic_dec(&mad_agent_priv->refcount); | 509 | atomic_dec(&mad_agent_priv->refcount); |
504 | wait_event(mad_agent_priv->wait, | 510 | wait_event(mad_agent_priv->wait, |
@@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
506 | 512 | ||
507 | if (mad_agent_priv->reg_req) | 513 | if (mad_agent_priv->reg_req) |
508 | kfree(mad_agent_priv->reg_req); | 514 | kfree(mad_agent_priv->reg_req); |
515 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
509 | kfree(mad_agent_priv); | 516 | kfree(mad_agent_priv); |
510 | } | 517 | } |
511 | 518 | ||
@@ -750,7 +757,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
750 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | 757 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); |
751 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 758 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
752 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | 759 | queue_work(mad_agent_priv->qp_info->port_priv->wq, |
753 | &mad_agent_priv->local_work); | 760 | &mad_agent_priv->local_work); |
754 | ret = 1; | 761 | ret = 1; |
755 | out: | 762 | out: |
756 | return ret; | 763 | return ret; |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 5a08e81fa827..649824e33253 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -77,7 +77,6 @@ struct ib_sa_sm_ah { | |||
77 | 77 | ||
78 | struct ib_sa_port { | 78 | struct ib_sa_port { |
79 | struct ib_mad_agent *agent; | 79 | struct ib_mad_agent *agent; |
80 | struct ib_mr *mr; | ||
81 | struct ib_sa_sm_ah *sm_ah; | 80 | struct ib_sa_sm_ah *sm_ah; |
82 | struct work_struct update_task; | 81 | struct work_struct update_task; |
83 | spinlock_t ah_lock; | 82 | spinlock_t ah_lock; |
@@ -492,7 +491,7 @@ retry: | |||
492 | sizeof (struct ib_sa_mad), | 491 | sizeof (struct ib_sa_mad), |
493 | DMA_TO_DEVICE); | 492 | DMA_TO_DEVICE); |
494 | gather_list.length = sizeof (struct ib_sa_mad); | 493 | gather_list.length = sizeof (struct ib_sa_mad); |
495 | gather_list.lkey = port->mr->lkey; | 494 | gather_list.lkey = port->agent->mr->lkey; |
496 | pci_unmap_addr_set(query, mapping, gather_list.addr); | 495 | pci_unmap_addr_set(query, mapping, gather_list.addr); |
497 | 496 | ||
498 | ret = ib_post_send_mad(port->agent, &wr, &bad_wr); | 497 | ret = ib_post_send_mad(port->agent, &wr, &bad_wr); |
@@ -780,7 +779,6 @@ static void ib_sa_add_one(struct ib_device *device) | |||
780 | sa_dev->end_port = e; | 779 | sa_dev->end_port = e; |
781 | 780 | ||
782 | for (i = 0; i <= e - s; ++i) { | 781 | for (i = 0; i <= e - s; ++i) { |
783 | sa_dev->port[i].mr = NULL; | ||
784 | sa_dev->port[i].sm_ah = NULL; | 782 | sa_dev->port[i].sm_ah = NULL; |
785 | sa_dev->port[i].port_num = i + s; | 783 | sa_dev->port[i].port_num = i + s; |
786 | spin_lock_init(&sa_dev->port[i].ah_lock); | 784 | spin_lock_init(&sa_dev->port[i].ah_lock); |
@@ -792,13 +790,6 @@ static void ib_sa_add_one(struct ib_device *device) | |||
792 | if (IS_ERR(sa_dev->port[i].agent)) | 790 | if (IS_ERR(sa_dev->port[i].agent)) |
793 | goto err; | 791 | goto err; |
794 | 792 | ||
795 | sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd, | ||
796 | IB_ACCESS_LOCAL_WRITE); | ||
797 | if (IS_ERR(sa_dev->port[i].mr)) { | ||
798 | ib_unregister_mad_agent(sa_dev->port[i].agent); | ||
799 | goto err; | ||
800 | } | ||
801 | |||
802 | INIT_WORK(&sa_dev->port[i].update_task, | 793 | INIT_WORK(&sa_dev->port[i].update_task, |
803 | update_sm_ah, &sa_dev->port[i]); | 794 | update_sm_ah, &sa_dev->port[i]); |
804 | } | 795 | } |
@@ -822,10 +813,8 @@ static void ib_sa_add_one(struct ib_device *device) | |||
822 | return; | 813 | return; |
823 | 814 | ||
824 | err: | 815 | err: |
825 | while (--i >= 0) { | 816 | while (--i >= 0) |
826 | ib_dereg_mr(sa_dev->port[i].mr); | ||
827 | ib_unregister_mad_agent(sa_dev->port[i].agent); | 817 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
828 | } | ||
829 | 818 | ||
830 | kfree(sa_dev); | 819 | kfree(sa_dev); |
831 | 820 | ||
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h index 4a6bf6763a97..60378c1a9ccf 100644 --- a/drivers/infiniband/include/ib_mad.h +++ b/drivers/infiniband/include/ib_mad.h | |||
@@ -180,6 +180,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, | |||
180 | struct ib_mad_agent { | 180 | struct ib_mad_agent { |
181 | struct ib_device *device; | 181 | struct ib_device *device; |
182 | struct ib_qp *qp; | 182 | struct ib_qp *qp; |
183 | struct ib_mr *mr; | ||
183 | ib_mad_recv_handler recv_handler; | 184 | ib_mad_recv_handler recv_handler; |
184 | ib_mad_send_handler send_handler; | 185 | ib_mad_send_handler send_handler; |
185 | ib_mad_snoop_handler snoop_handler; | 186 | ib_mad_snoop_handler snoop_handler; |