diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-29 16:12:36 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-29 16:12:36 -0400 |
commit | 2fca877b68b2b4fc5b94277858a1bedd46017cde (patch) | |
tree | fd02725406299ba2f26354463b3c261721e9eb6b /drivers/infiniband/core/mad.c | |
parent | ff40c6d3d1437ecdf295b8e39adcb06c3d6021ef (diff) | |
parent | 02b3e4e2d71b6058ec11cc01c72ac651eb3ded2b (diff) |
/spare/repo/libata-dev branch 'v2.6.13'
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r-- | drivers/infiniband/core/mad.c | 600 |
1 files changed, 343 insertions, 257 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 23628c622a50..b97e210ce9c8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | ||
3 | * | 5 | * |
4 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -29,12 +31,12 @@ | |||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. | 32 | * SOFTWARE. |
31 | * | 33 | * |
32 | * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $ | 34 | * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $ |
33 | */ | 35 | */ |
34 | |||
35 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
36 | 37 | ||
37 | #include "mad_priv.h" | 38 | #include "mad_priv.h" |
39 | #include "mad_rmpp.h" | ||
38 | #include "smi.h" | 40 | #include "smi.h" |
39 | #include "agent.h" | 41 | #include "agent.h" |
40 | 42 | ||
@@ -45,6 +47,7 @@ MODULE_AUTHOR("Sean Hefty"); | |||
45 | 47 | ||
46 | 48 | ||
47 | kmem_cache_t *ib_mad_cache; | 49 | kmem_cache_t *ib_mad_cache; |
50 | |||
48 | static struct list_head ib_mad_port_list; | 51 | static struct list_head ib_mad_port_list; |
49 | static u32 ib_mad_client_id = 0; | 52 | static u32 ib_mad_client_id = 0; |
50 | 53 | ||
@@ -58,16 +61,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, | |||
58 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); | 61 | static void remove_mad_reg_req(struct ib_mad_agent_private *priv); |
59 | static struct ib_mad_agent_private *find_mad_agent( | 62 | static struct ib_mad_agent_private *find_mad_agent( |
60 | struct ib_mad_port_private *port_priv, | 63 | struct ib_mad_port_private *port_priv, |
61 | struct ib_mad *mad, int solicited); | 64 | struct ib_mad *mad); |
62 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
63 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
64 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
65 | static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | ||
66 | struct ib_mad_send_wc *mad_send_wc); | ||
67 | static void timeout_sends(void *data); | 68 | static void timeout_sends(void *data); |
68 | static void cancel_sends(void *data); | ||
69 | static void local_completions(void *data); | 69 | static void local_completions(void *data); |
70 | static int solicited_mad(struct ib_mad *mad); | ||
71 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
72 | struct ib_mad_agent_private *agent_priv, | 71 | struct ib_mad_agent_private *agent_priv, |
73 | u8 mgmt_class); | 72 | u8 mgmt_class); |
@@ -197,8 +196,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
197 | if (qpn == -1) | 196 | if (qpn == -1) |
198 | goto error1; | 197 | goto error1; |
199 | 198 | ||
200 | if (rmpp_version) | 199 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) |
201 | goto error1; /* XXX: until RMPP implemented */ | 200 | goto error1; |
202 | 201 | ||
203 | /* Validate MAD registration request if supplied */ | 202 | /* Validate MAD registration request if supplied */ |
204 | if (mad_reg_req) { | 203 | if (mad_reg_req) { |
@@ -261,22 +260,29 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
261 | ret = ERR_PTR(-ENOMEM); | 260 | ret = ERR_PTR(-ENOMEM); |
262 | goto error1; | 261 | goto error1; |
263 | } | 262 | } |
263 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
264 | |||
265 | mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, | ||
266 | IB_ACCESS_LOCAL_WRITE); | ||
267 | if (IS_ERR(mad_agent_priv->agent.mr)) { | ||
268 | ret = ERR_PTR(-ENOMEM); | ||
269 | goto error2; | ||
270 | } | ||
264 | 271 | ||
265 | if (mad_reg_req) { | 272 | if (mad_reg_req) { |
266 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); | 273 | reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); |
267 | if (!reg_req) { | 274 | if (!reg_req) { |
268 | ret = ERR_PTR(-ENOMEM); | 275 | ret = ERR_PTR(-ENOMEM); |
269 | goto error2; | 276 | goto error3; |
270 | } | 277 | } |
271 | /* Make a copy of the MAD registration request */ | 278 | /* Make a copy of the MAD registration request */ |
272 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); | 279 | memcpy(reg_req, mad_reg_req, sizeof *reg_req); |
273 | } | 280 | } |
274 | 281 | ||
275 | /* Now, fill in the various structures */ | 282 | /* Now, fill in the various structures */ |
276 | memset(mad_agent_priv, 0, sizeof *mad_agent_priv); | ||
277 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | 283 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
278 | mad_agent_priv->reg_req = reg_req; | 284 | mad_agent_priv->reg_req = reg_req; |
279 | mad_agent_priv->rmpp_version = rmpp_version; | 285 | mad_agent_priv->agent.rmpp_version = rmpp_version; |
280 | mad_agent_priv->agent.device = device; | 286 | mad_agent_priv->agent.device = device; |
281 | mad_agent_priv->agent.recv_handler = recv_handler; | 287 | mad_agent_priv->agent.recv_handler = recv_handler; |
282 | mad_agent_priv->agent.send_handler = send_handler; | 288 | mad_agent_priv->agent.send_handler = send_handler; |
@@ -301,7 +307,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
301 | if (method) { | 307 | if (method) { |
302 | if (method_in_use(&method, | 308 | if (method_in_use(&method, |
303 | mad_reg_req)) | 309 | mad_reg_req)) |
304 | goto error3; | 310 | goto error4; |
305 | } | 311 | } |
306 | } | 312 | } |
307 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, | 313 | ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, |
@@ -317,14 +323,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
317 | if (is_vendor_method_in_use( | 323 | if (is_vendor_method_in_use( |
318 | vendor_class, | 324 | vendor_class, |
319 | mad_reg_req)) | 325 | mad_reg_req)) |
320 | goto error3; | 326 | goto error4; |
321 | } | 327 | } |
322 | } | 328 | } |
323 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); | 329 | ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); |
324 | } | 330 | } |
325 | if (ret2) { | 331 | if (ret2) { |
326 | ret = ERR_PTR(ret2); | 332 | ret = ERR_PTR(ret2); |
327 | goto error3; | 333 | goto error4; |
328 | } | 334 | } |
329 | } | 335 | } |
330 | 336 | ||
@@ -335,22 +341,24 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
335 | spin_lock_init(&mad_agent_priv->lock); | 341 | spin_lock_init(&mad_agent_priv->lock); |
336 | INIT_LIST_HEAD(&mad_agent_priv->send_list); | 342 | INIT_LIST_HEAD(&mad_agent_priv->send_list); |
337 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 343 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
344 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | ||
345 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | ||
338 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 346 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); |
339 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 347 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
340 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 348 | INIT_WORK(&mad_agent_priv->local_work, local_completions, |
341 | mad_agent_priv); | 349 | mad_agent_priv); |
342 | INIT_LIST_HEAD(&mad_agent_priv->canceled_list); | ||
343 | INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv); | ||
344 | atomic_set(&mad_agent_priv->refcount, 1); | 350 | atomic_set(&mad_agent_priv->refcount, 1); |
345 | init_waitqueue_head(&mad_agent_priv->wait); | 351 | init_waitqueue_head(&mad_agent_priv->wait); |
346 | 352 | ||
347 | return &mad_agent_priv->agent; | 353 | return &mad_agent_priv->agent; |
348 | 354 | ||
349 | error3: | 355 | error4: |
350 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 356 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
351 | kfree(reg_req); | 357 | kfree(reg_req); |
352 | error2: | 358 | error3: |
353 | kfree(mad_agent_priv); | 359 | kfree(mad_agent_priv); |
360 | error2: | ||
361 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
354 | error1: | 362 | error1: |
355 | return ret; | 363 | return ret; |
356 | } | 364 | } |
@@ -487,18 +495,16 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
487 | * MADs, preventing us from queuing additional work | 495 | * MADs, preventing us from queuing additional work |
488 | */ | 496 | */ |
489 | cancel_mads(mad_agent_priv); | 497 | cancel_mads(mad_agent_priv); |
490 | |||
491 | port_priv = mad_agent_priv->qp_info->port_priv; | 498 | port_priv = mad_agent_priv->qp_info->port_priv; |
492 | |||
493 | cancel_delayed_work(&mad_agent_priv->timed_work); | 499 | cancel_delayed_work(&mad_agent_priv->timed_work); |
494 | flush_workqueue(port_priv->wq); | ||
495 | 500 | ||
496 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 501 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
497 | remove_mad_reg_req(mad_agent_priv); | 502 | remove_mad_reg_req(mad_agent_priv); |
498 | list_del(&mad_agent_priv->agent_list); | 503 | list_del(&mad_agent_priv->agent_list); |
499 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 504 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
500 | 505 | ||
501 | /* XXX: Cleanup pending RMPP receives for this agent */ | 506 | flush_workqueue(port_priv->wq); |
507 | ib_cancel_rmpp_recvs(mad_agent_priv); | ||
502 | 508 | ||
503 | atomic_dec(&mad_agent_priv->refcount); | 509 | atomic_dec(&mad_agent_priv->refcount); |
504 | wait_event(mad_agent_priv->wait, | 510 | wait_event(mad_agent_priv->wait, |
@@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
506 | 512 | ||
507 | if (mad_agent_priv->reg_req) | 513 | if (mad_agent_priv->reg_req) |
508 | kfree(mad_agent_priv->reg_req); | 514 | kfree(mad_agent_priv->reg_req); |
515 | ib_dereg_mr(mad_agent_priv->agent.mr); | ||
509 | kfree(mad_agent_priv); | 516 | kfree(mad_agent_priv); |
510 | } | 517 | } |
511 | 518 | ||
@@ -551,6 +558,13 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | |||
551 | } | 558 | } |
552 | EXPORT_SYMBOL(ib_unregister_mad_agent); | 559 | EXPORT_SYMBOL(ib_unregister_mad_agent); |
553 | 560 | ||
561 | static inline int response_mad(struct ib_mad *mad) | ||
562 | { | ||
563 | /* Trap represses are responses although response bit is reset */ | ||
564 | return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
565 | (mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); | ||
566 | } | ||
567 | |||
554 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | 568 | static void dequeue_mad(struct ib_mad_list_head *mad_list) |
555 | { | 569 | { |
556 | struct ib_mad_queue *mad_queue; | 570 | struct ib_mad_queue *mad_queue; |
@@ -643,7 +657,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
643 | struct ib_smp *smp, | 657 | struct ib_smp *smp, |
644 | struct ib_send_wr *send_wr) | 658 | struct ib_send_wr *send_wr) |
645 | { | 659 | { |
646 | int ret, solicited; | 660 | int ret; |
647 | unsigned long flags; | 661 | unsigned long flags; |
648 | struct ib_mad_local_private *local; | 662 | struct ib_mad_local_private *local; |
649 | struct ib_mad_private *mad_priv; | 663 | struct ib_mad_private *mad_priv; |
@@ -689,11 +703,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
689 | switch (ret) | 703 | switch (ret) |
690 | { | 704 | { |
691 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | 705 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: |
692 | /* | 706 | if (response_mad(&mad_priv->mad.mad) && |
693 | * See if response is solicited and | ||
694 | * there is a recv handler | ||
695 | */ | ||
696 | if (solicited_mad(&mad_priv->mad.mad) && | ||
697 | mad_agent_priv->agent.recv_handler) { | 707 | mad_agent_priv->agent.recv_handler) { |
698 | local->mad_priv = mad_priv; | 708 | local->mad_priv = mad_priv; |
699 | local->recv_mad_agent = mad_agent_priv; | 709 | local->recv_mad_agent = mad_agent_priv; |
@@ -710,15 +720,13 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
710 | break; | 720 | break; |
711 | case IB_MAD_RESULT_SUCCESS: | 721 | case IB_MAD_RESULT_SUCCESS: |
712 | /* Treat like an incoming receive MAD */ | 722 | /* Treat like an incoming receive MAD */ |
713 | solicited = solicited_mad(&mad_priv->mad.mad); | ||
714 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | 723 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
715 | mad_agent_priv->agent.port_num); | 724 | mad_agent_priv->agent.port_num); |
716 | if (port_priv) { | 725 | if (port_priv) { |
717 | mad_priv->mad.mad.mad_hdr.tid = | 726 | mad_priv->mad.mad.mad_hdr.tid = |
718 | ((struct ib_mad *)smp)->mad_hdr.tid; | 727 | ((struct ib_mad *)smp)->mad_hdr.tid; |
719 | recv_mad_agent = find_mad_agent(port_priv, | 728 | recv_mad_agent = find_mad_agent(port_priv, |
720 | &mad_priv->mad.mad, | 729 | &mad_priv->mad.mad); |
721 | solicited); | ||
722 | } | 730 | } |
723 | if (!port_priv || !recv_mad_agent) { | 731 | if (!port_priv || !recv_mad_agent) { |
724 | kmem_cache_free(ib_mad_cache, mad_priv); | 732 | kmem_cache_free(ib_mad_cache, mad_priv); |
@@ -750,43 +758,133 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
750 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); | 758 | list_add_tail(&local->completion_list, &mad_agent_priv->local_list); |
751 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 759 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
752 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | 760 | queue_work(mad_agent_priv->qp_info->port_priv->wq, |
753 | &mad_agent_priv->local_work); | 761 | &mad_agent_priv->local_work); |
754 | ret = 1; | 762 | ret = 1; |
755 | out: | 763 | out: |
756 | return ret; | 764 | return ret; |
757 | } | 765 | } |
758 | 766 | ||
759 | static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv, | 767 | static int get_buf_length(int hdr_len, int data_len) |
760 | struct ib_mad_send_wr_private *mad_send_wr) | 768 | { |
769 | int seg_size, pad; | ||
770 | |||
771 | seg_size = sizeof(struct ib_mad) - hdr_len; | ||
772 | if (data_len && seg_size) { | ||
773 | pad = seg_size - data_len % seg_size; | ||
774 | if (pad == seg_size) | ||
775 | pad = 0; | ||
776 | } else | ||
777 | pad = seg_size; | ||
778 | return hdr_len + data_len + pad; | ||
779 | } | ||
780 | |||
781 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | ||
782 | u32 remote_qpn, u16 pkey_index, | ||
783 | struct ib_ah *ah, int rmpp_active, | ||
784 | int hdr_len, int data_len, | ||
785 | unsigned int __nocast gfp_mask) | ||
786 | { | ||
787 | struct ib_mad_agent_private *mad_agent_priv; | ||
788 | struct ib_mad_send_buf *send_buf; | ||
789 | int buf_size; | ||
790 | void *buf; | ||
791 | |||
792 | mad_agent_priv = container_of(mad_agent, | ||
793 | struct ib_mad_agent_private, agent); | ||
794 | buf_size = get_buf_length(hdr_len, data_len); | ||
795 | |||
796 | if ((!mad_agent->rmpp_version && | ||
797 | (rmpp_active || buf_size > sizeof(struct ib_mad))) || | ||
798 | (!rmpp_active && buf_size > sizeof(struct ib_mad))) | ||
799 | return ERR_PTR(-EINVAL); | ||
800 | |||
801 | buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); | ||
802 | if (!buf) | ||
803 | return ERR_PTR(-ENOMEM); | ||
804 | memset(buf, 0, sizeof *send_buf + buf_size); | ||
805 | |||
806 | send_buf = buf + buf_size; | ||
807 | send_buf->mad = buf; | ||
808 | |||
809 | send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device, | ||
810 | buf, buf_size, DMA_TO_DEVICE); | ||
811 | pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr); | ||
812 | send_buf->sge.length = buf_size; | ||
813 | send_buf->sge.lkey = mad_agent->mr->lkey; | ||
814 | |||
815 | send_buf->send_wr.wr_id = (unsigned long) send_buf; | ||
816 | send_buf->send_wr.sg_list = &send_buf->sge; | ||
817 | send_buf->send_wr.num_sge = 1; | ||
818 | send_buf->send_wr.opcode = IB_WR_SEND; | ||
819 | send_buf->send_wr.send_flags = IB_SEND_SIGNALED; | ||
820 | send_buf->send_wr.wr.ud.ah = ah; | ||
821 | send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr; | ||
822 | send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; | ||
823 | send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | ||
824 | send_buf->send_wr.wr.ud.pkey_index = pkey_index; | ||
825 | |||
826 | if (rmpp_active) { | ||
827 | struct ib_rmpp_mad *rmpp_mad; | ||
828 | rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad; | ||
829 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - | ||
830 | offsetof(struct ib_rmpp_mad, data) + data_len); | ||
831 | rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; | ||
832 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | ||
833 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, | ||
834 | IB_MGMT_RMPP_FLAG_ACTIVE); | ||
835 | } | ||
836 | |||
837 | send_buf->mad_agent = mad_agent; | ||
838 | atomic_inc(&mad_agent_priv->refcount); | ||
839 | return send_buf; | ||
840 | } | ||
841 | EXPORT_SYMBOL(ib_create_send_mad); | ||
842 | |||
843 | void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | ||
844 | { | ||
845 | struct ib_mad_agent_private *mad_agent_priv; | ||
846 | |||
847 | mad_agent_priv = container_of(send_buf->mad_agent, | ||
848 | struct ib_mad_agent_private, agent); | ||
849 | |||
850 | dma_unmap_single(send_buf->mad_agent->device->dma_device, | ||
851 | pci_unmap_addr(send_buf, mapping), | ||
852 | send_buf->sge.length, DMA_TO_DEVICE); | ||
853 | kfree(send_buf->mad); | ||
854 | |||
855 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
856 | wake_up(&mad_agent_priv->wait); | ||
857 | } | ||
858 | EXPORT_SYMBOL(ib_free_send_mad); | ||
859 | |||
860 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | ||
761 | { | 861 | { |
762 | struct ib_mad_qp_info *qp_info; | 862 | struct ib_mad_qp_info *qp_info; |
763 | struct ib_send_wr *bad_send_wr; | 863 | struct ib_send_wr *bad_send_wr; |
864 | struct list_head *list; | ||
764 | unsigned long flags; | 865 | unsigned long flags; |
765 | int ret; | 866 | int ret; |
766 | 867 | ||
767 | /* Replace user's WR ID with our own to find WR upon completion */ | 868 | /* Set WR ID to find mad_send_wr upon completion */ |
768 | qp_info = mad_agent_priv->qp_info; | 869 | qp_info = mad_send_wr->mad_agent_priv->qp_info; |
769 | mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id; | ||
770 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; | 870 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; |
771 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | 871 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; |
772 | 872 | ||
773 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 873 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
774 | if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) { | 874 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
775 | list_add_tail(&mad_send_wr->mad_list.list, | 875 | ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, |
776 | &qp_info->send_queue.list); | ||
777 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
778 | ret = ib_post_send(mad_agent_priv->agent.qp, | ||
779 | &mad_send_wr->send_wr, &bad_send_wr); | 876 | &mad_send_wr->send_wr, &bad_send_wr); |
780 | if (ret) { | 877 | list = &qp_info->send_queue.list; |
781 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | ||
782 | dequeue_mad(&mad_send_wr->mad_list); | ||
783 | } | ||
784 | } else { | 878 | } else { |
785 | list_add_tail(&mad_send_wr->mad_list.list, | ||
786 | &qp_info->overflow_list); | ||
787 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
788 | ret = 0; | 879 | ret = 0; |
880 | list = &qp_info->overflow_list; | ||
789 | } | 881 | } |
882 | |||
883 | if (!ret) { | ||
884 | qp_info->send_queue.count++; | ||
885 | list_add_tail(&mad_send_wr->mad_list.list, list); | ||
886 | } | ||
887 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
790 | return ret; | 888 | return ret; |
791 | } | 889 | } |
792 | 890 | ||
@@ -860,18 +958,19 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
860 | ret = -ENOMEM; | 958 | ret = -ENOMEM; |
861 | goto error2; | 959 | goto error2; |
862 | } | 960 | } |
961 | memset(mad_send_wr, 0, sizeof *mad_send_wr); | ||
863 | 962 | ||
864 | mad_send_wr->send_wr = *send_wr; | 963 | mad_send_wr->send_wr = *send_wr; |
865 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | 964 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; |
866 | memcpy(mad_send_wr->sg_list, send_wr->sg_list, | 965 | memcpy(mad_send_wr->sg_list, send_wr->sg_list, |
867 | sizeof *send_wr->sg_list * send_wr->num_sge); | 966 | sizeof *send_wr->sg_list * send_wr->num_sge); |
868 | mad_send_wr->send_wr.next = NULL; | 967 | mad_send_wr->wr_id = send_wr->wr_id; |
869 | mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; | 968 | mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; |
870 | mad_send_wr->agent = mad_agent; | 969 | mad_send_wr->mad_agent_priv = mad_agent_priv; |
871 | /* Timeout will be updated after send completes */ | 970 | /* Timeout will be updated after send completes */ |
872 | mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. | 971 | mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. |
873 | ud.timeout_ms); | 972 | ud.timeout_ms); |
874 | mad_send_wr->retry = 0; | 973 | mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; |
875 | /* One reference for each work request to QP + response */ | 974 | /* One reference for each work request to QP + response */ |
876 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | 975 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); |
877 | mad_send_wr->status = IB_WC_SUCCESS; | 976 | mad_send_wr->status = IB_WC_SUCCESS; |
@@ -883,8 +982,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
883 | &mad_agent_priv->send_list); | 982 | &mad_agent_priv->send_list); |
884 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 983 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
885 | 984 | ||
886 | ret = ib_send_mad(mad_agent_priv, mad_send_wr); | 985 | if (mad_agent_priv->agent.rmpp_version) { |
887 | if (ret) { | 986 | ret = ib_send_rmpp_mad(mad_send_wr); |
987 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | ||
988 | ret = ib_send_mad(mad_send_wr); | ||
989 | } else | ||
990 | ret = ib_send_mad(mad_send_wr); | ||
991 | if (ret < 0) { | ||
888 | /* Fail send request */ | 992 | /* Fail send request */ |
889 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 993 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
890 | list_del(&mad_send_wr->agent_list); | 994 | list_del(&mad_send_wr->agent_list); |
@@ -910,41 +1014,28 @@ EXPORT_SYMBOL(ib_post_send_mad); | |||
910 | */ | 1014 | */ |
911 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) | 1015 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) |
912 | { | 1016 | { |
913 | struct ib_mad_recv_buf *entry; | 1017 | struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; |
914 | struct ib_mad_private_header *mad_priv_hdr; | 1018 | struct ib_mad_private_header *mad_priv_hdr; |
915 | struct ib_mad_private *priv; | 1019 | struct ib_mad_private *priv; |
1020 | struct list_head free_list; | ||
916 | 1021 | ||
917 | mad_priv_hdr = container_of(mad_recv_wc, | 1022 | INIT_LIST_HEAD(&free_list); |
918 | struct ib_mad_private_header, | 1023 | list_splice_init(&mad_recv_wc->rmpp_list, &free_list); |
919 | recv_wc); | ||
920 | priv = container_of(mad_priv_hdr, struct ib_mad_private, header); | ||
921 | 1024 | ||
922 | /* | 1025 | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, |
923 | * Walk receive buffer list associated with this WC | 1026 | &free_list, list) { |
924 | * No need to remove them from list of receive buffers | 1027 | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, |
925 | */ | 1028 | recv_buf); |
926 | list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) { | ||
927 | /* Free previous receive buffer */ | ||
928 | kmem_cache_free(ib_mad_cache, priv); | ||
929 | mad_priv_hdr = container_of(mad_recv_wc, | 1029 | mad_priv_hdr = container_of(mad_recv_wc, |
930 | struct ib_mad_private_header, | 1030 | struct ib_mad_private_header, |
931 | recv_wc); | 1031 | recv_wc); |
932 | priv = container_of(mad_priv_hdr, struct ib_mad_private, | 1032 | priv = container_of(mad_priv_hdr, struct ib_mad_private, |
933 | header); | 1033 | header); |
1034 | kmem_cache_free(ib_mad_cache, priv); | ||
934 | } | 1035 | } |
935 | |||
936 | /* Free last buffer */ | ||
937 | kmem_cache_free(ib_mad_cache, priv); | ||
938 | } | 1036 | } |
939 | EXPORT_SYMBOL(ib_free_recv_mad); | 1037 | EXPORT_SYMBOL(ib_free_recv_mad); |
940 | 1038 | ||
941 | void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, | ||
942 | void *buf) | ||
943 | { | ||
944 | printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n"); | ||
945 | } | ||
946 | EXPORT_SYMBOL(ib_coalesce_recv_mad); | ||
947 | |||
948 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, | 1039 | struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, |
949 | u8 rmpp_version, | 1040 | u8 rmpp_version, |
950 | ib_mad_send_handler send_handler, | 1041 | ib_mad_send_handler send_handler, |
@@ -1338,42 +1429,15 @@ out: | |||
1338 | return; | 1429 | return; |
1339 | } | 1430 | } |
1340 | 1431 | ||
1341 | static int response_mad(struct ib_mad *mad) | ||
1342 | { | ||
1343 | /* Trap represses are responses although response bit is reset */ | ||
1344 | return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
1345 | (mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); | ||
1346 | } | ||
1347 | |||
1348 | static int solicited_mad(struct ib_mad *mad) | ||
1349 | { | ||
1350 | /* CM MADs are never solicited */ | ||
1351 | if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) { | ||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1355 | /* XXX: Determine whether MAD is using RMPP */ | ||
1356 | |||
1357 | /* Not using RMPP */ | ||
1358 | /* Is this MAD a response to a previous MAD ? */ | ||
1359 | return response_mad(mad); | ||
1360 | } | ||
1361 | |||
1362 | static struct ib_mad_agent_private * | 1432 | static struct ib_mad_agent_private * |
1363 | find_mad_agent(struct ib_mad_port_private *port_priv, | 1433 | find_mad_agent(struct ib_mad_port_private *port_priv, |
1364 | struct ib_mad *mad, | 1434 | struct ib_mad *mad) |
1365 | int solicited) | ||
1366 | { | 1435 | { |
1367 | struct ib_mad_agent_private *mad_agent = NULL; | 1436 | struct ib_mad_agent_private *mad_agent = NULL; |
1368 | unsigned long flags; | 1437 | unsigned long flags; |
1369 | 1438 | ||
1370 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 1439 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
1371 | 1440 | if (response_mad(mad)) { | |
1372 | /* | ||
1373 | * Whether MAD was solicited determines type of routing to | ||
1374 | * MAD client. | ||
1375 | */ | ||
1376 | if (solicited) { | ||
1377 | u32 hi_tid; | 1441 | u32 hi_tid; |
1378 | struct ib_mad_agent_private *entry; | 1442 | struct ib_mad_agent_private *entry; |
1379 | 1443 | ||
@@ -1477,21 +1541,20 @@ out: | |||
1477 | return valid; | 1541 | return valid; |
1478 | } | 1542 | } |
1479 | 1543 | ||
1480 | /* | 1544 | static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, |
1481 | * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet | 1545 | struct ib_mad_hdr *mad_hdr) |
1482 | */ | ||
1483 | static struct ib_mad_private * | ||
1484 | reassemble_recv(struct ib_mad_agent_private *mad_agent_priv, | ||
1485 | struct ib_mad_private *recv) | ||
1486 | { | 1546 | { |
1487 | /* Until we have RMPP, all receives are reassembled!... */ | 1547 | struct ib_rmpp_mad *rmpp_mad; |
1488 | INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list); | 1548 | |
1489 | return recv; | 1549 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; |
1550 | return !mad_agent_priv->agent.rmpp_version || | ||
1551 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | ||
1552 | IB_MGMT_RMPP_FLAG_ACTIVE) || | ||
1553 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | ||
1490 | } | 1554 | } |
1491 | 1555 | ||
1492 | static struct ib_mad_send_wr_private* | 1556 | struct ib_mad_send_wr_private* |
1493 | find_send_req(struct ib_mad_agent_private *mad_agent_priv, | 1557 | ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) |
1494 | u64 tid) | ||
1495 | { | 1558 | { |
1496 | struct ib_mad_send_wr_private *mad_send_wr; | 1559 | struct ib_mad_send_wr_private *mad_send_wr; |
1497 | 1560 | ||
@@ -1507,7 +1570,9 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv, | |||
1507 | */ | 1570 | */ |
1508 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 1571 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
1509 | agent_list) { | 1572 | agent_list) { |
1510 | if (mad_send_wr->tid == tid && mad_send_wr->timeout) { | 1573 | if (is_data_mad(mad_agent_priv, |
1574 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | ||
1575 | mad_send_wr->tid == tid && mad_send_wr->timeout) { | ||
1511 | /* Verify request has not been canceled */ | 1576 | /* Verify request has not been canceled */ |
1512 | return (mad_send_wr->status == IB_WC_SUCCESS) ? | 1577 | return (mad_send_wr->status == IB_WC_SUCCESS) ? |
1513 | mad_send_wr : NULL; | 1578 | mad_send_wr : NULL; |
@@ -1516,43 +1581,55 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv, | |||
1516 | return NULL; | 1581 | return NULL; |
1517 | } | 1582 | } |
1518 | 1583 | ||
1584 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) | ||
1585 | { | ||
1586 | mad_send_wr->timeout = 0; | ||
1587 | if (mad_send_wr->refcount == 1) { | ||
1588 | list_del(&mad_send_wr->agent_list); | ||
1589 | list_add_tail(&mad_send_wr->agent_list, | ||
1590 | &mad_send_wr->mad_agent_priv->done_list); | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1519 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | 1594 | static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, |
1520 | struct ib_mad_private *recv, | 1595 | struct ib_mad_recv_wc *mad_recv_wc) |
1521 | int solicited) | ||
1522 | { | 1596 | { |
1523 | struct ib_mad_send_wr_private *mad_send_wr; | 1597 | struct ib_mad_send_wr_private *mad_send_wr; |
1524 | struct ib_mad_send_wc mad_send_wc; | 1598 | struct ib_mad_send_wc mad_send_wc; |
1525 | unsigned long flags; | 1599 | unsigned long flags; |
1526 | 1600 | u64 tid; | |
1527 | /* Fully reassemble receive before processing */ | 1601 | |
1528 | recv = reassemble_recv(mad_agent_priv, recv); | 1602 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
1529 | if (!recv) { | 1603 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); |
1530 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1604 | if (mad_agent_priv->agent.rmpp_version) { |
1531 | wake_up(&mad_agent_priv->wait); | 1605 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1532 | return; | 1606 | mad_recv_wc); |
1607 | if (!mad_recv_wc) { | ||
1608 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
1609 | wake_up(&mad_agent_priv->wait); | ||
1610 | return; | ||
1611 | } | ||
1533 | } | 1612 | } |
1534 | 1613 | ||
1535 | /* Complete corresponding request */ | 1614 | /* Complete corresponding request */ |
1536 | if (solicited) { | 1615 | if (response_mad(mad_recv_wc->recv_buf.mad)) { |
1616 | tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; | ||
1537 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1617 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1538 | mad_send_wr = find_send_req(mad_agent_priv, | 1618 | mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); |
1539 | recv->mad.mad.mad_hdr.tid); | ||
1540 | if (!mad_send_wr) { | 1619 | if (!mad_send_wr) { |
1541 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1620 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1542 | ib_free_recv_mad(&recv->header.recv_wc); | 1621 | ib_free_recv_mad(mad_recv_wc); |
1543 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1622 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
1544 | wake_up(&mad_agent_priv->wait); | 1623 | wake_up(&mad_agent_priv->wait); |
1545 | return; | 1624 | return; |
1546 | } | 1625 | } |
1547 | /* Timeout = 0 means that we won't wait for a response */ | 1626 | ib_mark_mad_done(mad_send_wr); |
1548 | mad_send_wr->timeout = 0; | ||
1549 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1627 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1550 | 1628 | ||
1551 | /* Defined behavior is to complete response before request */ | 1629 | /* Defined behavior is to complete response before request */ |
1552 | recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id; | 1630 | mad_recv_wc->wc->wr_id = mad_send_wr->wr_id; |
1553 | mad_agent_priv->agent.recv_handler( | 1631 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1554 | &mad_agent_priv->agent, | 1632 | mad_recv_wc); |
1555 | &recv->header.recv_wc); | ||
1556 | atomic_dec(&mad_agent_priv->refcount); | 1633 | atomic_dec(&mad_agent_priv->refcount); |
1557 | 1634 | ||
1558 | mad_send_wc.status = IB_WC_SUCCESS; | 1635 | mad_send_wc.status = IB_WC_SUCCESS; |
@@ -1560,9 +1637,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1560 | mad_send_wc.wr_id = mad_send_wr->wr_id; | 1637 | mad_send_wc.wr_id = mad_send_wr->wr_id; |
1561 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 1638 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
1562 | } else { | 1639 | } else { |
1563 | mad_agent_priv->agent.recv_handler( | 1640 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1564 | &mad_agent_priv->agent, | 1641 | mad_recv_wc); |
1565 | &recv->header.recv_wc); | ||
1566 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1642 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
1567 | wake_up(&mad_agent_priv->wait); | 1643 | wake_up(&mad_agent_priv->wait); |
1568 | } | 1644 | } |
@@ -1576,7 +1652,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1576 | struct ib_mad_private *recv, *response; | 1652 | struct ib_mad_private *recv, *response; |
1577 | struct ib_mad_list_head *mad_list; | 1653 | struct ib_mad_list_head *mad_list; |
1578 | struct ib_mad_agent_private *mad_agent; | 1654 | struct ib_mad_agent_private *mad_agent; |
1579 | int solicited; | ||
1580 | 1655 | ||
1581 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 1656 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
1582 | if (!response) | 1657 | if (!response) |
@@ -1662,11 +1737,9 @@ local: | |||
1662 | } | 1737 | } |
1663 | } | 1738 | } |
1664 | 1739 | ||
1665 | /* Determine corresponding MAD agent for incoming receive MAD */ | 1740 | mad_agent = find_mad_agent(port_priv, &recv->mad.mad); |
1666 | solicited = solicited_mad(&recv->mad.mad); | ||
1667 | mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited); | ||
1668 | if (mad_agent) { | 1741 | if (mad_agent) { |
1669 | ib_mad_complete_recv(mad_agent, recv, solicited); | 1742 | ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); |
1670 | /* | 1743 | /* |
1671 | * recv is freed up in error cases in ib_mad_complete_recv | 1744 | * recv is freed up in error cases in ib_mad_complete_recv |
1672 | * or via recv_handler in ib_mad_complete_recv() | 1745 | * or via recv_handler in ib_mad_complete_recv() |
@@ -1710,26 +1783,31 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |||
1710 | } | 1783 | } |
1711 | } | 1784 | } |
1712 | 1785 | ||
1713 | static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv, | 1786 | static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) |
1714 | struct ib_mad_send_wr_private *mad_send_wr ) | ||
1715 | { | 1787 | { |
1788 | struct ib_mad_agent_private *mad_agent_priv; | ||
1716 | struct ib_mad_send_wr_private *temp_mad_send_wr; | 1789 | struct ib_mad_send_wr_private *temp_mad_send_wr; |
1717 | struct list_head *list_item; | 1790 | struct list_head *list_item; |
1718 | unsigned long delay; | 1791 | unsigned long delay; |
1719 | 1792 | ||
1793 | mad_agent_priv = mad_send_wr->mad_agent_priv; | ||
1720 | list_del(&mad_send_wr->agent_list); | 1794 | list_del(&mad_send_wr->agent_list); |
1721 | 1795 | ||
1722 | delay = mad_send_wr->timeout; | 1796 | delay = mad_send_wr->timeout; |
1723 | mad_send_wr->timeout += jiffies; | 1797 | mad_send_wr->timeout += jiffies; |
1724 | 1798 | ||
1725 | list_for_each_prev(list_item, &mad_agent_priv->wait_list) { | 1799 | if (delay) { |
1726 | temp_mad_send_wr = list_entry(list_item, | 1800 | list_for_each_prev(list_item, &mad_agent_priv->wait_list) { |
1727 | struct ib_mad_send_wr_private, | 1801 | temp_mad_send_wr = list_entry(list_item, |
1728 | agent_list); | 1802 | struct ib_mad_send_wr_private, |
1729 | if (time_after(mad_send_wr->timeout, | 1803 | agent_list); |
1730 | temp_mad_send_wr->timeout)) | 1804 | if (time_after(mad_send_wr->timeout, |
1731 | break; | 1805 | temp_mad_send_wr->timeout)) |
1806 | break; | ||
1807 | } | ||
1732 | } | 1808 | } |
1809 | else | ||
1810 | list_item = &mad_agent_priv->wait_list; | ||
1733 | list_add(&mad_send_wr->agent_list, list_item); | 1811 | list_add(&mad_send_wr->agent_list, list_item); |
1734 | 1812 | ||
1735 | /* Reschedule a work item if we have a shorter timeout */ | 1813 | /* Reschedule a work item if we have a shorter timeout */ |
@@ -1740,19 +1818,32 @@ static void wait_for_response(struct ib_mad_agent_private *mad_agent_priv, | |||
1740 | } | 1818 | } |
1741 | } | 1819 | } |
1742 | 1820 | ||
1821 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, | ||
1822 | int timeout_ms) | ||
1823 | { | ||
1824 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | ||
1825 | wait_for_response(mad_send_wr); | ||
1826 | } | ||
1827 | |||
1743 | /* | 1828 | /* |
1744 | * Process a send work completion | 1829 | * Process a send work completion |
1745 | */ | 1830 | */ |
1746 | static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | 1831 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, |
1747 | struct ib_mad_send_wc *mad_send_wc) | 1832 | struct ib_mad_send_wc *mad_send_wc) |
1748 | { | 1833 | { |
1749 | struct ib_mad_agent_private *mad_agent_priv; | 1834 | struct ib_mad_agent_private *mad_agent_priv; |
1750 | unsigned long flags; | 1835 | unsigned long flags; |
1836 | int ret; | ||
1751 | 1837 | ||
1752 | mad_agent_priv = container_of(mad_send_wr->agent, | 1838 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1753 | struct ib_mad_agent_private, agent); | ||
1754 | |||
1755 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1839 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1840 | if (mad_agent_priv->agent.rmpp_version) { | ||
1841 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | ||
1842 | if (ret == IB_RMPP_RESULT_CONSUMED) | ||
1843 | goto done; | ||
1844 | } else | ||
1845 | ret = IB_RMPP_RESULT_UNHANDLED; | ||
1846 | |||
1756 | if (mad_send_wc->status != IB_WC_SUCCESS && | 1847 | if (mad_send_wc->status != IB_WC_SUCCESS && |
1757 | mad_send_wr->status == IB_WC_SUCCESS) { | 1848 | mad_send_wr->status == IB_WC_SUCCESS) { |
1758 | mad_send_wr->status = mad_send_wc->status; | 1849 | mad_send_wr->status = mad_send_wc->status; |
@@ -1762,10 +1853,9 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
1762 | if (--mad_send_wr->refcount > 0) { | 1853 | if (--mad_send_wr->refcount > 0) { |
1763 | if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && | 1854 | if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && |
1764 | mad_send_wr->status == IB_WC_SUCCESS) { | 1855 | mad_send_wr->status == IB_WC_SUCCESS) { |
1765 | wait_for_response(mad_agent_priv, mad_send_wr); | 1856 | wait_for_response(mad_send_wr); |
1766 | } | 1857 | } |
1767 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1858 | goto done; |
1768 | return; | ||
1769 | } | 1859 | } |
1770 | 1860 | ||
1771 | /* Remove send from MAD agent and notify client of completion */ | 1861 | /* Remove send from MAD agent and notify client of completion */ |
@@ -1775,14 +1865,18 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
1775 | 1865 | ||
1776 | if (mad_send_wr->status != IB_WC_SUCCESS ) | 1866 | if (mad_send_wr->status != IB_WC_SUCCESS ) |
1777 | mad_send_wc->status = mad_send_wr->status; | 1867 | mad_send_wc->status = mad_send_wr->status; |
1778 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 1868 | if (ret != IB_RMPP_RESULT_INTERNAL) |
1779 | mad_send_wc); | 1869 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
1870 | mad_send_wc); | ||
1780 | 1871 | ||
1781 | /* Release reference on agent taken when sending */ | 1872 | /* Release reference on agent taken when sending */ |
1782 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1873 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
1783 | wake_up(&mad_agent_priv->wait); | 1874 | wake_up(&mad_agent_priv->wait); |
1784 | 1875 | ||
1785 | kfree(mad_send_wr); | 1876 | kfree(mad_send_wr); |
1877 | return; | ||
1878 | done: | ||
1879 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||
1786 | } | 1880 | } |
1787 | 1881 | ||
1788 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | 1882 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, |
@@ -1961,6 +2055,8 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |||
1961 | 2055 | ||
1962 | /* Empty wait list to prevent receives from finding a request */ | 2056 | /* Empty wait list to prevent receives from finding a request */ |
1963 | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); | 2057 | list_splice_init(&mad_agent_priv->wait_list, &cancel_list); |
2058 | /* Empty local completion list as well */ | ||
2059 | list_splice_init(&mad_agent_priv->local_list, &cancel_list); | ||
1964 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2060 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1965 | 2061 | ||
1966 | /* Report all cancelled requests */ | 2062 | /* Report all cancelled requests */ |
@@ -1980,8 +2076,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |||
1980 | } | 2076 | } |
1981 | 2077 | ||
1982 | static struct ib_mad_send_wr_private* | 2078 | static struct ib_mad_send_wr_private* |
1983 | find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, | 2079 | find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) |
1984 | u64 wr_id) | ||
1985 | { | 2080 | { |
1986 | struct ib_mad_send_wr_private *mad_send_wr; | 2081 | struct ib_mad_send_wr_private *mad_send_wr; |
1987 | 2082 | ||
@@ -1993,79 +2088,50 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, | |||
1993 | 2088 | ||
1994 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 2089 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
1995 | agent_list) { | 2090 | agent_list) { |
1996 | if (mad_send_wr->wr_id == wr_id) | 2091 | if (is_data_mad(mad_agent_priv, |
2092 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | ||
2093 | mad_send_wr->wr_id == wr_id) | ||
1997 | return mad_send_wr; | 2094 | return mad_send_wr; |
1998 | } | 2095 | } |
1999 | return NULL; | 2096 | return NULL; |
2000 | } | 2097 | } |
2001 | 2098 | ||
2002 | void cancel_sends(void *data) | 2099 | int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) |
2003 | { | ||
2004 | struct ib_mad_agent_private *mad_agent_priv; | ||
2005 | struct ib_mad_send_wr_private *mad_send_wr; | ||
2006 | struct ib_mad_send_wc mad_send_wc; | ||
2007 | unsigned long flags; | ||
2008 | |||
2009 | mad_agent_priv = data; | ||
2010 | |||
2011 | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | ||
2012 | mad_send_wc.vendor_err = 0; | ||
2013 | |||
2014 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||
2015 | while (!list_empty(&mad_agent_priv->canceled_list)) { | ||
2016 | mad_send_wr = list_entry(mad_agent_priv->canceled_list.next, | ||
2017 | struct ib_mad_send_wr_private, | ||
2018 | agent_list); | ||
2019 | |||
2020 | list_del(&mad_send_wr->agent_list); | ||
2021 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||
2022 | |||
2023 | mad_send_wc.wr_id = mad_send_wr->wr_id; | ||
2024 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | ||
2025 | &mad_send_wc); | ||
2026 | |||
2027 | kfree(mad_send_wr); | ||
2028 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
2029 | wake_up(&mad_agent_priv->wait); | ||
2030 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | ||
2031 | } | ||
2032 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||
2033 | } | ||
2034 | |||
2035 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, | ||
2036 | u64 wr_id) | ||
2037 | { | 2100 | { |
2038 | struct ib_mad_agent_private *mad_agent_priv; | 2101 | struct ib_mad_agent_private *mad_agent_priv; |
2039 | struct ib_mad_send_wr_private *mad_send_wr; | 2102 | struct ib_mad_send_wr_private *mad_send_wr; |
2040 | unsigned long flags; | 2103 | unsigned long flags; |
2104 | int active; | ||
2041 | 2105 | ||
2042 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | 2106 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
2043 | agent); | 2107 | agent); |
2044 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2108 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2045 | mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); | 2109 | mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); |
2046 | if (!mad_send_wr) { | 2110 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { |
2047 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2111 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2048 | goto out; | 2112 | return -EINVAL; |
2049 | } | 2113 | } |
2050 | 2114 | ||
2051 | if (mad_send_wr->status == IB_WC_SUCCESS) | 2115 | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); |
2052 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 2116 | if (!timeout_ms) { |
2053 | |||
2054 | if (mad_send_wr->refcount != 0) { | ||
2055 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | 2117 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
2056 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2118 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2057 | goto out; | ||
2058 | } | 2119 | } |
2059 | 2120 | ||
2060 | list_del(&mad_send_wr->agent_list); | 2121 | mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; |
2061 | list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list); | 2122 | if (active) |
2062 | adjust_timeout(mad_agent_priv); | 2123 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
2124 | else | ||
2125 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); | ||
2126 | |||
2063 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2127 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2128 | return 0; | ||
2129 | } | ||
2130 | EXPORT_SYMBOL(ib_modify_mad); | ||
2064 | 2131 | ||
2065 | queue_work(mad_agent_priv->qp_info->port_priv->wq, | 2132 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id) |
2066 | &mad_agent_priv->canceled_work); | 2133 | { |
2067 | out: | 2134 | ib_modify_mad(mad_agent, wr_id, 0); |
2068 | return; | ||
2069 | } | 2135 | } |
2070 | EXPORT_SYMBOL(ib_cancel_mad); | 2136 | EXPORT_SYMBOL(ib_cancel_mad); |
2071 | 2137 | ||
@@ -2075,6 +2141,7 @@ static void local_completions(void *data) | |||
2075 | struct ib_mad_local_private *local; | 2141 | struct ib_mad_local_private *local; |
2076 | struct ib_mad_agent_private *recv_mad_agent; | 2142 | struct ib_mad_agent_private *recv_mad_agent; |
2077 | unsigned long flags; | 2143 | unsigned long flags; |
2144 | int recv = 0; | ||
2078 | struct ib_wc wc; | 2145 | struct ib_wc wc; |
2079 | struct ib_mad_send_wc mad_send_wc; | 2146 | struct ib_mad_send_wc mad_send_wc; |
2080 | 2147 | ||
@@ -2090,10 +2157,10 @@ static void local_completions(void *data) | |||
2090 | recv_mad_agent = local->recv_mad_agent; | 2157 | recv_mad_agent = local->recv_mad_agent; |
2091 | if (!recv_mad_agent) { | 2158 | if (!recv_mad_agent) { |
2092 | printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); | 2159 | printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); |
2093 | kmem_cache_free(ib_mad_cache, local->mad_priv); | ||
2094 | goto local_send_completion; | 2160 | goto local_send_completion; |
2095 | } | 2161 | } |
2096 | 2162 | ||
2163 | recv = 1; | ||
2097 | /* | 2164 | /* |
2098 | * Defined behavior is to complete response | 2165 | * Defined behavior is to complete response |
2099 | * before request | 2166 | * before request |
@@ -2105,7 +2172,9 @@ static void local_completions(void *data) | |||
2105 | local->mad_priv->header.recv_wc.wc = &wc; | 2172 | local->mad_priv->header.recv_wc.wc = &wc; |
2106 | local->mad_priv->header.recv_wc.mad_len = | 2173 | local->mad_priv->header.recv_wc.mad_len = |
2107 | sizeof(struct ib_mad); | 2174 | sizeof(struct ib_mad); |
2108 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list); | 2175 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); |
2176 | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | ||
2177 | &local->mad_priv->header.recv_wc.rmpp_list); | ||
2109 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; | 2178 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
2110 | local->mad_priv->header.recv_wc.recv_buf.mad = | 2179 | local->mad_priv->header.recv_wc.recv_buf.mad = |
2111 | &local->mad_priv->mad.mad; | 2180 | &local->mad_priv->mad.mad; |
@@ -2136,11 +2205,47 @@ local_send_completion: | |||
2136 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2205 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2137 | list_del(&local->completion_list); | 2206 | list_del(&local->completion_list); |
2138 | atomic_dec(&mad_agent_priv->refcount); | 2207 | atomic_dec(&mad_agent_priv->refcount); |
2208 | if (!recv) | ||
2209 | kmem_cache_free(ib_mad_cache, local->mad_priv); | ||
2139 | kfree(local); | 2210 | kfree(local); |
2140 | } | 2211 | } |
2141 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2212 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2142 | } | 2213 | } |
2143 | 2214 | ||
2215 | static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | ||
2216 | { | ||
2217 | int ret; | ||
2218 | |||
2219 | if (!mad_send_wr->retries--) | ||
2220 | return -ETIMEDOUT; | ||
2221 | |||
2222 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. | ||
2223 | wr.ud.timeout_ms); | ||
2224 | |||
2225 | if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { | ||
2226 | ret = ib_retry_rmpp(mad_send_wr); | ||
2227 | switch (ret) { | ||
2228 | case IB_RMPP_RESULT_UNHANDLED: | ||
2229 | ret = ib_send_mad(mad_send_wr); | ||
2230 | break; | ||
2231 | case IB_RMPP_RESULT_CONSUMED: | ||
2232 | ret = 0; | ||
2233 | break; | ||
2234 | default: | ||
2235 | ret = -ECOMM; | ||
2236 | break; | ||
2237 | } | ||
2238 | } else | ||
2239 | ret = ib_send_mad(mad_send_wr); | ||
2240 | |||
2241 | if (!ret) { | ||
2242 | mad_send_wr->refcount++; | ||
2243 | list_add_tail(&mad_send_wr->agent_list, | ||
2244 | &mad_send_wr->mad_agent_priv->send_list); | ||
2245 | } | ||
2246 | return ret; | ||
2247 | } | ||
2248 | |||
2144 | static void timeout_sends(void *data) | 2249 | static void timeout_sends(void *data) |
2145 | { | 2250 | { |
2146 | struct ib_mad_agent_private *mad_agent_priv; | 2251 | struct ib_mad_agent_private *mad_agent_priv; |
@@ -2149,8 +2254,6 @@ static void timeout_sends(void *data) | |||
2149 | unsigned long flags, delay; | 2254 | unsigned long flags, delay; |
2150 | 2255 | ||
2151 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2256 | mad_agent_priv = (struct ib_mad_agent_private *)data; |
2152 | |||
2153 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | ||
2154 | mad_send_wc.vendor_err = 0; | 2257 | mad_send_wc.vendor_err = 0; |
2155 | 2258 | ||
2156 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2259 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
@@ -2170,8 +2273,16 @@ static void timeout_sends(void *data) | |||
2170 | } | 2273 | } |
2171 | 2274 | ||
2172 | list_del(&mad_send_wr->agent_list); | 2275 | list_del(&mad_send_wr->agent_list); |
2276 | if (mad_send_wr->status == IB_WC_SUCCESS && | ||
2277 | !retry_send(mad_send_wr)) | ||
2278 | continue; | ||
2279 | |||
2173 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2280 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2174 | 2281 | ||
2282 | if (mad_send_wr->status == IB_WC_SUCCESS) | ||
2283 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | ||
2284 | else | ||
2285 | mad_send_wc.status = mad_send_wr->status; | ||
2175 | mad_send_wc.wr_id = mad_send_wr->wr_id; | 2286 | mad_send_wc.wr_id = mad_send_wr->wr_id; |
2176 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 2287 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2177 | &mad_send_wc); | 2288 | &mad_send_wc); |
@@ -2447,14 +2558,6 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2447 | unsigned long flags; | 2558 | unsigned long flags; |
2448 | char name[sizeof "ib_mad123"]; | 2559 | char name[sizeof "ib_mad123"]; |
2449 | 2560 | ||
2450 | /* First, check if port already open at MAD layer */ | ||
2451 | port_priv = ib_get_mad_port(device, port_num); | ||
2452 | if (port_priv) { | ||
2453 | printk(KERN_DEBUG PFX "%s port %d already open\n", | ||
2454 | device->name, port_num); | ||
2455 | return 0; | ||
2456 | } | ||
2457 | |||
2458 | /* Create new device info */ | 2561 | /* Create new device info */ |
2459 | port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); | 2562 | port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); |
2460 | if (!port_priv) { | 2563 | if (!port_priv) { |
@@ -2579,7 +2682,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) | |||
2579 | 2682 | ||
2580 | static void ib_mad_init_device(struct ib_device *device) | 2683 | static void ib_mad_init_device(struct ib_device *device) |
2581 | { | 2684 | { |
2582 | int ret, num_ports, cur_port, i, ret2; | 2685 | int num_ports, cur_port, i; |
2583 | 2686 | ||
2584 | if (device->node_type == IB_NODE_SWITCH) { | 2687 | if (device->node_type == IB_NODE_SWITCH) { |
2585 | num_ports = 1; | 2688 | num_ports = 1; |
@@ -2589,47 +2692,37 @@ static void ib_mad_init_device(struct ib_device *device) | |||
2589 | cur_port = 1; | 2692 | cur_port = 1; |
2590 | } | 2693 | } |
2591 | for (i = 0; i < num_ports; i++, cur_port++) { | 2694 | for (i = 0; i < num_ports; i++, cur_port++) { |
2592 | ret = ib_mad_port_open(device, cur_port); | 2695 | if (ib_mad_port_open(device, cur_port)) { |
2593 | if (ret) { | ||
2594 | printk(KERN_ERR PFX "Couldn't open %s port %d\n", | 2696 | printk(KERN_ERR PFX "Couldn't open %s port %d\n", |
2595 | device->name, cur_port); | 2697 | device->name, cur_port); |
2596 | goto error_device_open; | 2698 | goto error_device_open; |
2597 | } | 2699 | } |
2598 | ret = ib_agent_port_open(device, cur_port); | 2700 | if (ib_agent_port_open(device, cur_port)) { |
2599 | if (ret) { | ||
2600 | printk(KERN_ERR PFX "Couldn't open %s port %d " | 2701 | printk(KERN_ERR PFX "Couldn't open %s port %d " |
2601 | "for agents\n", | 2702 | "for agents\n", |
2602 | device->name, cur_port); | 2703 | device->name, cur_port); |
2603 | goto error_device_open; | 2704 | goto error_device_open; |
2604 | } | 2705 | } |
2605 | } | 2706 | } |
2606 | 2707 | return; | |
2607 | goto error_device_query; | ||
2608 | 2708 | ||
2609 | error_device_open: | 2709 | error_device_open: |
2610 | while (i > 0) { | 2710 | while (i > 0) { |
2611 | cur_port--; | 2711 | cur_port--; |
2612 | ret2 = ib_agent_port_close(device, cur_port); | 2712 | if (ib_agent_port_close(device, cur_port)) |
2613 | if (ret2) { | ||
2614 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 2713 | printk(KERN_ERR PFX "Couldn't close %s port %d " |
2615 | "for agents\n", | 2714 | "for agents\n", |
2616 | device->name, cur_port); | 2715 | device->name, cur_port); |
2617 | } | 2716 | if (ib_mad_port_close(device, cur_port)) |
2618 | ret2 = ib_mad_port_close(device, cur_port); | ||
2619 | if (ret2) { | ||
2620 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 2717 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", |
2621 | device->name, cur_port); | 2718 | device->name, cur_port); |
2622 | } | ||
2623 | i--; | 2719 | i--; |
2624 | } | 2720 | } |
2625 | |||
2626 | error_device_query: | ||
2627 | return; | ||
2628 | } | 2721 | } |
2629 | 2722 | ||
2630 | static void ib_mad_remove_device(struct ib_device *device) | 2723 | static void ib_mad_remove_device(struct ib_device *device) |
2631 | { | 2724 | { |
2632 | int ret = 0, i, num_ports, cur_port, ret2; | 2725 | int i, num_ports, cur_port; |
2633 | 2726 | ||
2634 | if (device->node_type == IB_NODE_SWITCH) { | 2727 | if (device->node_type == IB_NODE_SWITCH) { |
2635 | num_ports = 1; | 2728 | num_ports = 1; |
@@ -2639,21 +2732,13 @@ static void ib_mad_remove_device(struct ib_device *device) | |||
2639 | cur_port = 1; | 2732 | cur_port = 1; |
2640 | } | 2733 | } |
2641 | for (i = 0; i < num_ports; i++, cur_port++) { | 2734 | for (i = 0; i < num_ports; i++, cur_port++) { |
2642 | ret2 = ib_agent_port_close(device, cur_port); | 2735 | if (ib_agent_port_close(device, cur_port)) |
2643 | if (ret2) { | ||
2644 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 2736 | printk(KERN_ERR PFX "Couldn't close %s port %d " |
2645 | "for agents\n", | 2737 | "for agents\n", |
2646 | device->name, cur_port); | 2738 | device->name, cur_port); |
2647 | if (!ret) | 2739 | if (ib_mad_port_close(device, cur_port)) |
2648 | ret = ret2; | ||
2649 | } | ||
2650 | ret2 = ib_mad_port_close(device, cur_port); | ||
2651 | if (ret2) { | ||
2652 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 2740 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", |
2653 | device->name, cur_port); | 2741 | device->name, cur_port); |
2654 | if (!ret) | ||
2655 | ret = ret2; | ||
2656 | } | ||
2657 | } | 2742 | } |
2658 | } | 2743 | } |
2659 | 2744 | ||
@@ -2709,3 +2794,4 @@ static void __exit ib_mad_cleanup_module(void) | |||
2709 | 2794 | ||
2710 | module_init(ib_mad_init_module); | 2795 | module_init(ib_mad_init_module); |
2711 | module_exit(ib_mad_cleanup_module); | 2796 | module_exit(ib_mad_cleanup_module); |
2797 | |||