aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/mad.c76
-rw-r--r--drivers/infiniband/include/ib_mad.h60
2 files changed, 136 insertions, 0 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 52748b0f7685..d66ecf8243ec 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -763,6 +763,82 @@ out:
763 return ret; 763 return ret;
764} 764}
765 765
766static int get_buf_length(int hdr_len, int data_len)
767{
768 int seg_size, pad;
769
770 seg_size = sizeof(struct ib_mad) - hdr_len;
771 if (data_len && seg_size) {
772 pad = seg_size - data_len % seg_size;
773 if (pad == seg_size)
774 pad = 0;
775 } else
776 pad = seg_size;
777 return hdr_len + data_len + pad;
778}
779
780struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
781 u32 remote_qpn, u16 pkey_index,
782 struct ib_ah *ah,
783 int hdr_len, int data_len,
784 unsigned int __nocast gfp_mask)
785{
786 struct ib_mad_agent_private *mad_agent_priv;
787 struct ib_mad_send_buf *send_buf;
788 int buf_size;
789 void *buf;
790
791 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len);
794
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
796 if (!buf)
797 return ERR_PTR(-ENOMEM);
798
799 send_buf = buf + buf_size;
800 memset(send_buf, 0, sizeof *send_buf);
801 send_buf->mad = buf;
802
803 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device,
804 buf, buf_size, DMA_TO_DEVICE);
805 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr);
806 send_buf->sge.length = buf_size;
807 send_buf->sge.lkey = mad_agent->mr->lkey;
808
809 send_buf->send_wr.wr_id = (unsigned long) send_buf;
810 send_buf->send_wr.sg_list = &send_buf->sge;
811 send_buf->send_wr.num_sge = 1;
812 send_buf->send_wr.opcode = IB_WR_SEND;
813 send_buf->send_wr.send_flags = IB_SEND_SIGNALED;
814 send_buf->send_wr.wr.ud.ah = ah;
815 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
819 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount);
821 return send_buf;
822}
823EXPORT_SYMBOL(ib_create_send_mad);
824
825void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
826{
827 struct ib_mad_agent_private *mad_agent_priv;
828
829 mad_agent_priv = container_of(send_buf->mad_agent,
830 struct ib_mad_agent_private, agent);
831
832 dma_unmap_single(send_buf->mad_agent->device->dma_device,
833 pci_unmap_addr(send_buf, mapping),
834 send_buf->sge.length, DMA_TO_DEVICE);
835 kfree(send_buf->mad);
836
837 if (atomic_dec_and_test(&mad_agent_priv->refcount))
838 wake_up(&mad_agent_priv->wait);
839}
840EXPORT_SYMBOL(ib_free_send_mad);
841
766static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv, 842static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv,
767 struct ib_mad_send_wr_private *mad_send_wr) 843 struct ib_mad_send_wr_private *mad_send_wr)
768{ 844{
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
index 60378c1a9ccf..a6f06b8c4acf 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/drivers/infiniband/include/ib_mad.h
@@ -39,6 +39,8 @@
39#if !defined( IB_MAD_H ) 39#if !defined( IB_MAD_H )
40#define IB_MAD_H 40#define IB_MAD_H
41 41
42#include <linux/pci.h>
43
42#include <ib_verbs.h> 44#include <ib_verbs.h>
43 45
44/* Management base version */ 46/* Management base version */
@@ -73,6 +75,7 @@
73#define IB_QP0 0 75#define IB_QP0 0
74#define IB_QP1 __constant_htonl(1) 76#define IB_QP1 __constant_htonl(1)
75#define IB_QP1_QKEY 0x80010000 77#define IB_QP1_QKEY 0x80010000
78#define IB_QP_SET_QKEY 0x80000000
76 79
77struct ib_grh { 80struct ib_grh {
78 u32 version_tclass_flow; 81 u32 version_tclass_flow;
@@ -124,6 +127,30 @@ struct ib_vendor_mad {
124 u8 data[216]; 127 u8 data[216];
125} __attribute__ ((packed)); 128} __attribute__ ((packed));
126 129
130/**
131 * ib_mad_send_buf - MAD data buffer and work request for sends.
132 * @mad: References an allocated MAD data buffer. The size of the data
133 * buffer is specified in the @send_wr.length field.
134 * @mapping: DMA mapping information.
135 * @mad_agent: MAD agent that allocated the buffer.
136 * @context: User-controlled context fields.
137 * @send_wr: An initialized work request structure used when sending the MAD.
138 * The wr_id field of the work request is initialized to reference this
139 * data structure.
140 * @sge: A scatter-gather list referenced by the work request.
141 *
142 * Users are responsible for initializing the MAD buffer itself, with the
143 * exception of specifying the payload length field in any RMPP MAD.
144 */
145struct ib_mad_send_buf {
146 struct ib_mad *mad;
147 DECLARE_PCI_UNMAP_ADDR(mapping)
148 struct ib_mad_agent *mad_agent;
149 void *context[2];
150 struct ib_send_wr send_wr;
151 struct ib_sge sge;
152};
153
127struct ib_mad_agent; 154struct ib_mad_agent;
128struct ib_mad_send_wc; 155struct ib_mad_send_wc;
129struct ib_mad_recv_wc; 156struct ib_mad_recv_wc;
@@ -402,4 +429,37 @@ struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
402int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 429int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
403 struct ib_wc *wc); 430 struct ib_wc *wc);
404 431
432/**
433 * ib_create_send_mad - Allocate and initialize a data buffer and work request
434 * for sending a MAD.
435 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
436 * @remote_qpn: Specifies the QPN of the receiving node.
437 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
438 * is valid only if the remote_qpn is QP 1.
439 * @ah: References the address handle used to transfer to the remote node.
440 * @hdr_len: Indicates the size of the data header of the MAD. This length
441 * should include the common MAD header, RMPP header, plus any class
442 * specific header.
443 * @data_len: Indicates the size of any user-transfered data. The call will
444 * automatically adjust the allocated buffer size to account for any
445 * additional padding that may be necessary.
446 * @gfp_mask: GFP mask used for the memory allocation.
447 *
448 * This is a helper routine that may be used to allocate a MAD. Users are
449 * not required to allocate outbound MADs using this call. The returned
450 * MAD send buffer will reference a data buffer usable for sending a MAD, along
451 * with an intialized work request structure.
452 */
453struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
454 u32 remote_qpn, u16 pkey_index,
455 struct ib_ah *ah,
456 int hdr_len, int data_len,
457 unsigned int __nocast gfp_mask);
458
459/**
460 * ib_free_send_mad - Returns data buffers used to send a MAD.
461 * @send_buf: Previously allocated send data buffer.
462 */
463void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
464
405#endif /* IB_MAD_H */ 465#endif /* IB_MAD_H */