aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_mad.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-25 13:51:39 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-25 13:51:39 -0400
commit34816ad98efe4d47ffd858a0345321f9d85d9420 (patch)
tree8a5ed6a9b80e667c4c02d9993711ced06d158555 /drivers/infiniband/hw/mthca/mthca_mad.c
parentae7971a7706384ca373fb7e212fe195698e6c5a1 (diff)
[IB] Fix MAD layer DMA mappings to avoid touching data buffer once mapped
The MAD layer was violating the DMA API by touching data buffers used for sends after the DMA mapping was done. This causes problems on non-cache-coherent architectures, because the device doing DMA won't see updates to the payload buffers that exist only in the CPU cache. Fix this by having all MAD consumers use ib_create_send_mad() to allocate their send buffers, and moving the DMA mapping into the MAD layer so it can be done just before calling send (and after any modifications of the send buffer by the MAD layer). Tested on a non-cache-coherent PowerPC 440SPe system. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_mad.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c72
1 files changed, 9 insertions, 63 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 9804174f7f3c..8561b297a19b 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -46,11 +46,6 @@ enum {
46 MTHCA_VENDOR_CLASS2 = 0xa 46 MTHCA_VENDOR_CLASS2 = 0xa
47}; 47};
48 48
49struct mthca_trap_mad {
50 struct ib_mad *mad;
51 DECLARE_PCI_UNMAP_ADDR(mapping)
52};
53
54static void update_sm_ah(struct mthca_dev *dev, 49static void update_sm_ah(struct mthca_dev *dev,
55 u8 port_num, u16 lid, u8 sl) 50 u8 port_num, u16 lid, u8 sl)
56{ 51{
@@ -116,49 +111,14 @@ static void forward_trap(struct mthca_dev *dev,
116 struct ib_mad *mad) 111 struct ib_mad *mad)
117{ 112{
118 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 113 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
119 struct mthca_trap_mad *tmad; 114 struct ib_mad_send_buf *send_buf;
120 struct ib_sge gather_list;
121 struct ib_send_wr *bad_wr, wr = {
122 .opcode = IB_WR_SEND,
123 .sg_list = &gather_list,
124 .num_sge = 1,
125 .send_flags = IB_SEND_SIGNALED,
126 .wr = {
127 .ud = {
128 .remote_qpn = qpn,
129 .remote_qkey = qpn ? IB_QP1_QKEY : 0,
130 .timeout_ms = 0
131 }
132 }
133 };
134 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 115 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
135 int ret; 116 int ret;
136 unsigned long flags; 117 unsigned long flags;
137 118
138 if (agent) { 119 if (agent) {
139 tmad = kmalloc(sizeof *tmad, GFP_KERNEL); 120 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
140 if (!tmad) 121 IB_MGMT_MAD_DATA, GFP_ATOMIC);
141 return;
142
143 tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL);
144 if (!tmad->mad) {
145 kfree(tmad);
146 return;
147 }
148
149 memcpy(tmad->mad, mad, sizeof *mad);
150
151 wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr;
152 wr.wr_id = (unsigned long) tmad;
153
154 gather_list.addr = dma_map_single(agent->device->dma_device,
155 tmad->mad,
156 sizeof *tmad->mad,
157 DMA_TO_DEVICE);
158 gather_list.length = sizeof *tmad->mad;
159 gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
160 pci_unmap_addr_set(tmad, mapping, gather_list.addr);
161
162 /* 122 /*
163 * We rely here on the fact that MLX QPs don't use the 123 * We rely here on the fact that MLX QPs don't use the
164 * address handle after the send is posted (this is 124 * address handle after the send is posted (this is
@@ -166,21 +126,15 @@ static void forward_trap(struct mthca_dev *dev,
166 * it's OK for our devices). 126 * it's OK for our devices).
167 */ 127 */
168 spin_lock_irqsave(&dev->sm_lock, flags); 128 spin_lock_irqsave(&dev->sm_lock, flags);
169 wr.wr.ud.ah = dev->sm_ah[port_num - 1]; 129 memcpy(send_buf->mad, mad, sizeof *mad);
170 if (wr.wr.ud.ah) 130 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
171 ret = ib_post_send_mad(agent, &wr, &bad_wr); 131 ret = ib_post_send_mad(send_buf, NULL);
172 else 132 else
173 ret = -EINVAL; 133 ret = -EINVAL;
174 spin_unlock_irqrestore(&dev->sm_lock, flags); 134 spin_unlock_irqrestore(&dev->sm_lock, flags);
175 135
176 if (ret) { 136 if (ret)
177 dma_unmap_single(agent->device->dma_device, 137 ib_free_send_mad(send_buf);
178 pci_unmap_addr(tmad, mapping),
179 sizeof *tmad->mad,
180 DMA_TO_DEVICE);
181 kfree(tmad->mad);
182 kfree(tmad);
183 }
184 } 138 }
185} 139}
186 140
@@ -267,15 +221,7 @@ int mthca_process_mad(struct ib_device *ibdev,
267static void send_handler(struct ib_mad_agent *agent, 221static void send_handler(struct ib_mad_agent *agent,
268 struct ib_mad_send_wc *mad_send_wc) 222 struct ib_mad_send_wc *mad_send_wc)
269{ 223{
270 struct mthca_trap_mad *tmad = 224 ib_free_send_mad(mad_send_wc->send_buf);
271 (void *) (unsigned long) mad_send_wc->wr_id;
272
273 dma_unmap_single(agent->device->dma_device,
274 pci_unmap_addr(tmad, mapping),
275 sizeof *tmad->mad,
276 DMA_TO_DEVICE);
277 kfree(tmad->mad);
278 kfree(tmad);
279} 225}
280 226
281int mthca_create_agents(struct mthca_dev *dev) 227int mthca_create_agents(struct mthca_dev *dev)