aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/sa_query.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-25 13:51:39 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-25 13:51:39 -0400
commit34816ad98efe4d47ffd858a0345321f9d85d9420 (patch)
tree8a5ed6a9b80e667c4c02d9993711ced06d158555 /drivers/infiniband/core/sa_query.c
parentae7971a7706384ca373fb7e212fe195698e6c5a1 (diff)
[IB] Fix MAD layer DMA mappings to avoid touching data buffer once mapped
The MAD layer was violating the DMA API by touching data buffers used for sends after the DMA mapping was done. This causes problems on non-cache-coherent architectures, because the device doing DMA won't see updates to the payload buffers that exist only in the CPU cache. Fix this by having all MAD consumers use ib_create_send_mad() to allocate their send buffers, and moving the DMA mapping into the MAD layer so it can be done just before calling send (and after any modifications of the send buffer by the MAD layer). Tested on a non-cache-coherent PowerPC 440SPe system. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/sa_query.c')
-rw-r--r--drivers/infiniband/core/sa_query.c239
1 files changed, 114 insertions, 125 deletions
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0e5ef97f76..89ce9dc210 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -73,11 +73,10 @@ struct ib_sa_device {
73struct ib_sa_query { 73struct ib_sa_query {
74 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 74 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
75 void (*release)(struct ib_sa_query *); 75 void (*release)(struct ib_sa_query *);
76 struct ib_sa_port *port; 76 struct ib_sa_port *port;
77 struct ib_sa_mad *mad; 77 struct ib_mad_send_buf *mad_buf;
78 struct ib_sa_sm_ah *sm_ah; 78 struct ib_sa_sm_ah *sm_ah;
79 DECLARE_PCI_UNMAP_ADDR(mapping) 79 int id;
80 int id;
81}; 80};
82 81
83struct ib_sa_service_query { 82struct ib_sa_service_query {
@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
428 struct ib_mad_agent *agent; 427 struct ib_mad_agent *agent;
428 struct ib_mad_send_buf *mad_buf;
429 429
430 spin_lock_irqsave(&idr_lock, flags); 430 spin_lock_irqsave(&idr_lock, flags);
431 if (idr_find(&query_idr, id) != query) { 431 if (idr_find(&query_idr, id) != query) {
@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
433 return; 433 return;
434 } 434 }
435 agent = query->port->agent; 435 agent = query->port->agent;
436 mad_buf = query->mad_buf;
436 spin_unlock_irqrestore(&idr_lock, flags); 437 spin_unlock_irqrestore(&idr_lock, flags);
437 438
438 ib_cancel_mad(agent, id); 439 ib_cancel_mad(agent, mad_buf);
439} 440}
440EXPORT_SYMBOL(ib_sa_cancel_query); 441EXPORT_SYMBOL(ib_sa_cancel_query);
441 442
@@ -457,71 +458,46 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
457 458
458static int send_mad(struct ib_sa_query *query, int timeout_ms) 459static int send_mad(struct ib_sa_query *query, int timeout_ms)
459{ 460{
460 struct ib_sa_port *port = query->port;
461 unsigned long flags; 461 unsigned long flags;
462 int ret; 462 int ret, id;
463 struct ib_sge gather_list;
464 struct ib_send_wr *bad_wr, wr = {
465 .opcode = IB_WR_SEND,
466 .sg_list = &gather_list,
467 .num_sge = 1,
468 .send_flags = IB_SEND_SIGNALED,
469 .wr = {
470 .ud = {
471 .mad_hdr = &query->mad->mad_hdr,
472 .remote_qpn = 1,
473 .remote_qkey = IB_QP1_QKEY,
474 .timeout_ms = timeout_ms,
475 }
476 }
477 };
478 463
479retry: 464retry:
480 if (!idr_pre_get(&query_idr, GFP_ATOMIC)) 465 if (!idr_pre_get(&query_idr, GFP_ATOMIC))
481 return -ENOMEM; 466 return -ENOMEM;
482 spin_lock_irqsave(&idr_lock, flags); 467 spin_lock_irqsave(&idr_lock, flags);
483 ret = idr_get_new(&query_idr, query, &query->id); 468 ret = idr_get_new(&query_idr, query, &id);
484 spin_unlock_irqrestore(&idr_lock, flags); 469 spin_unlock_irqrestore(&idr_lock, flags);
485 if (ret == -EAGAIN) 470 if (ret == -EAGAIN)
486 goto retry; 471 goto retry;
487 if (ret) 472 if (ret)
488 return ret; 473 return ret;
489 474
490 wr.wr_id = query->id; 475 query->mad_buf->timeout_ms = timeout_ms;
476 query->mad_buf->context[0] = query;
477 query->id = id;
491 478
492 spin_lock_irqsave(&port->ah_lock, flags); 479 spin_lock_irqsave(&query->port->ah_lock, flags);
493 kref_get(&port->sm_ah->ref); 480 kref_get(&query->port->sm_ah->ref);
494 query->sm_ah = port->sm_ah; 481 query->sm_ah = query->port->sm_ah;
495 wr.wr.ud.ah = port->sm_ah->ah; 482 spin_unlock_irqrestore(&query->port->ah_lock, flags);
496 spin_unlock_irqrestore(&port->ah_lock, flags);
497 483
498 gather_list.addr = dma_map_single(port->agent->device->dma_device, 484 query->mad_buf->ah = query->sm_ah->ah;
499 query->mad,
500 sizeof (struct ib_sa_mad),
501 DMA_TO_DEVICE);
502 gather_list.length = sizeof (struct ib_sa_mad);
503 gather_list.lkey = port->agent->mr->lkey;
504 pci_unmap_addr_set(query, mapping, gather_list.addr);
505 485
506 ret = ib_post_send_mad(port->agent, &wr, &bad_wr); 486 ret = ib_post_send_mad(query->mad_buf, NULL);
507 if (ret) { 487 if (ret) {
508 dma_unmap_single(port->agent->device->dma_device,
509 pci_unmap_addr(query, mapping),
510 sizeof (struct ib_sa_mad),
511 DMA_TO_DEVICE);
512 kref_put(&query->sm_ah->ref, free_sm_ah);
513 spin_lock_irqsave(&idr_lock, flags); 488 spin_lock_irqsave(&idr_lock, flags);
514 idr_remove(&query_idr, query->id); 489 idr_remove(&query_idr, id);
515 spin_unlock_irqrestore(&idr_lock, flags); 490 spin_unlock_irqrestore(&idr_lock, flags);
491
492 kref_put(&query->sm_ah->ref, free_sm_ah);
516 } 493 }
517 494
518 /* 495 /*
519 * It's not safe to dereference query any more, because the 496 * It's not safe to dereference query any more, because the
520 * send may already have completed and freed the query in 497 * send may already have completed and freed the query in
521 * another context. So use wr.wr_id, which has a copy of the 498 * another context.
522 * query's id.
523 */ 499 */
524 return ret ? ret : wr.wr_id; 500 return ret ? ret : id;
525} 501}
526 502
527static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 503static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
@@ -543,7 +519,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
543 519
544static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 520static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
545{ 521{
546 kfree(sa_query->mad);
547 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 522 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
548} 523}
549 524
@@ -585,6 +560,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
585 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 560 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
586 struct ib_sa_port *port; 561 struct ib_sa_port *port;
587 struct ib_mad_agent *agent; 562 struct ib_mad_agent *agent;
563 struct ib_sa_mad *mad;
588 int ret; 564 int ret;
589 565
590 if (!sa_dev) 566 if (!sa_dev)
@@ -596,36 +572,44 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
596 query = kmalloc(sizeof *query, gfp_mask); 572 query = kmalloc(sizeof *query, gfp_mask);
597 if (!query) 573 if (!query)
598 return -ENOMEM; 574 return -ENOMEM;
599 query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 575
600 if (!query->sa_query.mad) { 576 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
601 kfree(query); 577 0, IB_MGMT_SA_HDR,
602 return -ENOMEM; 578 IB_MGMT_SA_DATA, gfp_mask);
579 if (!query->sa_query.mad_buf) {
580 ret = -ENOMEM;
581 goto err1;
603 } 582 }
604 583
605 query->callback = callback; 584 query->callback = callback;
606 query->context = context; 585 query->context = context;
607 586
608 init_mad(query->sa_query.mad, agent); 587 mad = query->sa_query.mad_buf->mad;
588 init_mad(mad, agent);
609 589
610 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 590 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
611 query->sa_query.release = ib_sa_path_rec_release; 591 query->sa_query.release = ib_sa_path_rec_release;
612 query->sa_query.port = port; 592 query->sa_query.port = port;
613 query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET; 593 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
614 query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 594 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
615 query->sa_query.mad->sa_hdr.comp_mask = comp_mask; 595 mad->sa_hdr.comp_mask = comp_mask;
616 596
617 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 597 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
618 rec, query->sa_query.mad->data);
619 598
620 *sa_query = &query->sa_query; 599 *sa_query = &query->sa_query;
621 600
622 ret = send_mad(&query->sa_query, timeout_ms); 601 ret = send_mad(&query->sa_query, timeout_ms);
623 if (ret < 0) { 602 if (ret < 0)
624 *sa_query = NULL; 603 goto err2;
625 kfree(query->sa_query.mad); 604
626 kfree(query); 605 return ret;
627 } 606
607err2:
608 *sa_query = NULL;
609 ib_free_send_mad(query->sa_query.mad_buf);
628 610
611err1:
612 kfree(query);
629 return ret; 613 return ret;
630} 614}
631EXPORT_SYMBOL(ib_sa_path_rec_get); 615EXPORT_SYMBOL(ib_sa_path_rec_get);
@@ -649,7 +633,6 @@ static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
649 633
650static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 634static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
651{ 635{
652 kfree(sa_query->mad);
653 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 636 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
654} 637}
655 638
@@ -693,6 +676,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
693 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 676 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
694 struct ib_sa_port *port; 677 struct ib_sa_port *port;
695 struct ib_mad_agent *agent; 678 struct ib_mad_agent *agent;
679 struct ib_sa_mad *mad;
696 int ret; 680 int ret;
697 681
698 if (!sa_dev) 682 if (!sa_dev)
@@ -709,37 +693,45 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
709 query = kmalloc(sizeof *query, gfp_mask); 693 query = kmalloc(sizeof *query, gfp_mask);
710 if (!query) 694 if (!query)
711 return -ENOMEM; 695 return -ENOMEM;
712 query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 696
713 if (!query->sa_query.mad) { 697 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
714 kfree(query); 698 0, IB_MGMT_SA_HDR,
715 return -ENOMEM; 699 IB_MGMT_SA_DATA, gfp_mask);
700 if (!query->sa_query.mad_buf) {
701 ret = -ENOMEM;
702 goto err1;
716 } 703 }
717 704
718 query->callback = callback; 705 query->callback = callback;
719 query->context = context; 706 query->context = context;
720 707
721 init_mad(query->sa_query.mad, agent); 708 mad = query->sa_query.mad_buf->mad;
709 init_mad(mad, agent);
722 710
723 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 711 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
724 query->sa_query.release = ib_sa_service_rec_release; 712 query->sa_query.release = ib_sa_service_rec_release;
725 query->sa_query.port = port; 713 query->sa_query.port = port;
726 query->sa_query.mad->mad_hdr.method = method; 714 mad->mad_hdr.method = method;
727 query->sa_query.mad->mad_hdr.attr_id = 715 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
728 cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 716 mad->sa_hdr.comp_mask = comp_mask;
729 query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
730 717
731 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 718 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
732 rec, query->sa_query.mad->data); 719 rec, mad->data);
733 720
734 *sa_query = &query->sa_query; 721 *sa_query = &query->sa_query;
735 722
736 ret = send_mad(&query->sa_query, timeout_ms); 723 ret = send_mad(&query->sa_query, timeout_ms);
737 if (ret < 0) { 724 if (ret < 0)
738 *sa_query = NULL; 725 goto err2;
739 kfree(query->sa_query.mad); 726
740 kfree(query); 727 return ret;
741 }
742 728
729err2:
730 *sa_query = NULL;
731 ib_free_send_mad(query->sa_query.mad_buf);
732
733err1:
734 kfree(query);
743 return ret; 735 return ret;
744} 736}
745EXPORT_SYMBOL(ib_sa_service_rec_query); 737EXPORT_SYMBOL(ib_sa_service_rec_query);
@@ -763,7 +755,6 @@ static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
763 755
764static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 756static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
765{ 757{
766 kfree(sa_query->mad);
767 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 758 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
768} 759}
769 760
@@ -782,6 +773,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
782 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 773 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
783 struct ib_sa_port *port; 774 struct ib_sa_port *port;
784 struct ib_mad_agent *agent; 775 struct ib_mad_agent *agent;
776 struct ib_sa_mad *mad;
785 int ret; 777 int ret;
786 778
787 if (!sa_dev) 779 if (!sa_dev)
@@ -793,53 +785,55 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
793 query = kmalloc(sizeof *query, gfp_mask); 785 query = kmalloc(sizeof *query, gfp_mask);
794 if (!query) 786 if (!query)
795 return -ENOMEM; 787 return -ENOMEM;
796 query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 788
797 if (!query->sa_query.mad) { 789 query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
798 kfree(query); 790 0, IB_MGMT_SA_HDR,
799 return -ENOMEM; 791 IB_MGMT_SA_DATA, gfp_mask);
792 if (!query->sa_query.mad_buf) {
793 ret = -ENOMEM;
794 goto err1;
800 } 795 }
801 796
802 query->callback = callback; 797 query->callback = callback;
803 query->context = context; 798 query->context = context;
804 799
805 init_mad(query->sa_query.mad, agent); 800 mad = query->sa_query.mad_buf->mad;
801 init_mad(mad, agent);
806 802
807 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 803 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
808 query->sa_query.release = ib_sa_mcmember_rec_release; 804 query->sa_query.release = ib_sa_mcmember_rec_release;
809 query->sa_query.port = port; 805 query->sa_query.port = port;
810 query->sa_query.mad->mad_hdr.method = method; 806 mad->mad_hdr.method = method;
811 query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 807 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
812 query->sa_query.mad->sa_hdr.comp_mask = comp_mask; 808 mad->sa_hdr.comp_mask = comp_mask;
813 809
814 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 810 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
815 rec, query->sa_query.mad->data); 811 rec, mad->data);
816 812
817 *sa_query = &query->sa_query; 813 *sa_query = &query->sa_query;
818 814
819 ret = send_mad(&query->sa_query, timeout_ms); 815 ret = send_mad(&query->sa_query, timeout_ms);
820 if (ret < 0) { 816 if (ret < 0)
821 *sa_query = NULL; 817 goto err2;
822 kfree(query->sa_query.mad);
823 kfree(query);
824 }
825 818
826 return ret; 819 return ret;
820
821err2:
822 *sa_query = NULL;
823 ib_free_send_mad(query->sa_query.mad_buf);
824
825err1:
826 kfree(query);
827 return ret;
827} 828}
828EXPORT_SYMBOL(ib_sa_mcmember_rec_query); 829EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
829 830
830static void send_handler(struct ib_mad_agent *agent, 831static void send_handler(struct ib_mad_agent *agent,
831 struct ib_mad_send_wc *mad_send_wc) 832 struct ib_mad_send_wc *mad_send_wc)
832{ 833{
833 struct ib_sa_query *query; 834 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
834 unsigned long flags; 835 unsigned long flags;
835 836
836 spin_lock_irqsave(&idr_lock, flags);
837 query = idr_find(&query_idr, mad_send_wc->wr_id);
838 spin_unlock_irqrestore(&idr_lock, flags);
839
840 if (!query)
841 return;
842
843 if (query->callback) 837 if (query->callback)
844 switch (mad_send_wc->status) { 838 switch (mad_send_wc->status) {
845 case IB_WC_SUCCESS: 839 case IB_WC_SUCCESS:
@@ -856,30 +850,25 @@ static void send_handler(struct ib_mad_agent *agent,
856 break; 850 break;
857 } 851 }
858 852
859 dma_unmap_single(agent->device->dma_device,
860 pci_unmap_addr(query, mapping),
861 sizeof (struct ib_sa_mad),
862 DMA_TO_DEVICE);
863 kref_put(&query->sm_ah->ref, free_sm_ah);
864
865 query->release(query);
866
867 spin_lock_irqsave(&idr_lock, flags); 853 spin_lock_irqsave(&idr_lock, flags);
868 idr_remove(&query_idr, mad_send_wc->wr_id); 854 idr_remove(&query_idr, query->id);
869 spin_unlock_irqrestore(&idr_lock, flags); 855 spin_unlock_irqrestore(&idr_lock, flags);
856
857 ib_free_send_mad(mad_send_wc->send_buf);
858 kref_put(&query->sm_ah->ref, free_sm_ah);
859 query->release(query);
870} 860}
871 861
872static void recv_handler(struct ib_mad_agent *mad_agent, 862static void recv_handler(struct ib_mad_agent *mad_agent,
873 struct ib_mad_recv_wc *mad_recv_wc) 863 struct ib_mad_recv_wc *mad_recv_wc)
874{ 864{
875 struct ib_sa_query *query; 865 struct ib_sa_query *query;
876 unsigned long flags; 866 struct ib_mad_send_buf *mad_buf;
877 867
878 spin_lock_irqsave(&idr_lock, flags); 868 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
879 query = idr_find(&query_idr, mad_recv_wc->wc->wr_id); 869 query = mad_buf->context[0];
880 spin_unlock_irqrestore(&idr_lock, flags);
881 870
882 if (query && query->callback) { 871 if (query->callback) {
883 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 872 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
884 query->callback(query, 873 query->callback(query,
885 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 874 mad_recv_wc->recv_buf.mad->mad_hdr.status ?