diff options
-rw-r--r-- | drivers/infiniband/core/agent.c | 293 | ||||
-rw-r--r-- | drivers/infiniband/core/agent.h | 13 | ||||
-rw-r--r-- | drivers/infiniband/core/agent_priv.h | 62 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 137 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 288 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_priv.h | 8 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 87 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 239 | ||||
-rw-r--r-- | drivers/infiniband/core/smi.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 47 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mad.c | 72 | ||||
-rw-r--r-- | include/rdma/ib_mad.h | 66 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 3 |
14 files changed, 475 insertions, 844 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 5ac86f566dc0..0c3c6952faae 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
@@ -37,58 +37,41 @@ | |||
37 | * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ | 37 | * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #include <linux/dma-mapping.h> | 40 | #include "agent.h" |
41 | 41 | #include "smi.h" | |
42 | #include <asm/bug.h> | ||
43 | 42 | ||
44 | #include <rdma/ib_smi.h> | 43 | #define SPFX "ib_agent: " |
45 | 44 | ||
46 | #include "smi.h" | 45 | struct ib_agent_port_private { |
47 | #include "agent_priv.h" | 46 | struct list_head port_list; |
48 | #include "mad_priv.h" | 47 | struct ib_mad_agent *agent[2]; |
49 | #include "agent.h" | 48 | }; |
50 | 49 | ||
51 | spinlock_t ib_agent_port_list_lock; | 50 | static DEFINE_SPINLOCK(ib_agent_port_list_lock); |
52 | static LIST_HEAD(ib_agent_port_list); | 51 | static LIST_HEAD(ib_agent_port_list); |
53 | 52 | ||
54 | /* | 53 | static struct ib_agent_port_private * |
55 | * Caller must hold ib_agent_port_list_lock | 54 | __ib_get_agent_port(struct ib_device *device, int port_num) |
56 | */ | ||
57 | static inline struct ib_agent_port_private * | ||
58 | __ib_get_agent_port(struct ib_device *device, int port_num, | ||
59 | struct ib_mad_agent *mad_agent) | ||
60 | { | 55 | { |
61 | struct ib_agent_port_private *entry; | 56 | struct ib_agent_port_private *entry; |
62 | 57 | ||
63 | BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ | 58 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { |
64 | 59 | if (entry->agent[0]->device == device && | |
65 | if (device) { | 60 | entry->agent[0]->port_num == port_num) |
66 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { | 61 | return entry; |
67 | if (entry->smp_agent->device == device && | ||
68 | entry->port_num == port_num) | ||
69 | return entry; | ||
70 | } | ||
71 | } else { | ||
72 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { | ||
73 | if ((entry->smp_agent == mad_agent) || | ||
74 | (entry->perf_mgmt_agent == mad_agent)) | ||
75 | return entry; | ||
76 | } | ||
77 | } | 62 | } |
78 | return NULL; | 63 | return NULL; |
79 | } | 64 | } |
80 | 65 | ||
81 | static inline struct ib_agent_port_private * | 66 | static struct ib_agent_port_private * |
82 | ib_get_agent_port(struct ib_device *device, int port_num, | 67 | ib_get_agent_port(struct ib_device *device, int port_num) |
83 | struct ib_mad_agent *mad_agent) | ||
84 | { | 68 | { |
85 | struct ib_agent_port_private *entry; | 69 | struct ib_agent_port_private *entry; |
86 | unsigned long flags; | 70 | unsigned long flags; |
87 | 71 | ||
88 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); | 72 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); |
89 | entry = __ib_get_agent_port(device, port_num, mad_agent); | 73 | entry = __ib_get_agent_port(device, port_num); |
90 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 74 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
91 | |||
92 | return entry; | 75 | return entry; |
93 | } | 76 | } |
94 | 77 | ||
@@ -100,192 +83,76 @@ int smi_check_local_dr_smp(struct ib_smp *smp, | |||
100 | 83 | ||
101 | if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | 84 | if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
102 | return 1; | 85 | return 1; |
103 | port_priv = ib_get_agent_port(device, port_num, NULL); | 86 | |
87 | port_priv = ib_get_agent_port(device, port_num); | ||
104 | if (!port_priv) { | 88 | if (!port_priv) { |
105 | printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " | 89 | printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " |
106 | "not open\n", | 90 | "not open\n", device->name, port_num); |
107 | device->name, port_num); | ||
108 | return 1; | 91 | return 1; |
109 | } | 92 | } |
110 | 93 | ||
111 | return smi_check_local_smp(port_priv->smp_agent, smp); | 94 | return smi_check_local_smp(port_priv->agent[0], smp); |
112 | } | 95 | } |
113 | 96 | ||
114 | static int agent_mad_send(struct ib_mad_agent *mad_agent, | 97 | int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, |
115 | struct ib_agent_port_private *port_priv, | 98 | struct ib_wc *wc, struct ib_device *device, |
116 | struct ib_mad_private *mad_priv, | 99 | int port_num, int qpn) |
117 | struct ib_grh *grh, | ||
118 | struct ib_wc *wc) | ||
119 | { | 100 | { |
120 | struct ib_agent_send_wr *agent_send_wr; | 101 | struct ib_agent_port_private *port_priv; |
121 | struct ib_sge gather_list; | 102 | struct ib_mad_agent *agent; |
122 | struct ib_send_wr send_wr; | 103 | struct ib_mad_send_buf *send_buf; |
123 | struct ib_send_wr *bad_send_wr; | 104 | struct ib_ah *ah; |
124 | struct ib_ah_attr ah_attr; | 105 | int ret; |
125 | unsigned long flags; | ||
126 | int ret = 1; | ||
127 | |||
128 | agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); | ||
129 | if (!agent_send_wr) | ||
130 | goto out; | ||
131 | agent_send_wr->mad = mad_priv; | ||
132 | |||
133 | gather_list.addr = dma_map_single(mad_agent->device->dma_device, | ||
134 | &mad_priv->mad, | ||
135 | sizeof(mad_priv->mad), | ||
136 | DMA_TO_DEVICE); | ||
137 | gather_list.length = sizeof(mad_priv->mad); | ||
138 | gather_list.lkey = mad_agent->mr->lkey; | ||
139 | |||
140 | send_wr.next = NULL; | ||
141 | send_wr.opcode = IB_WR_SEND; | ||
142 | send_wr.sg_list = &gather_list; | ||
143 | send_wr.num_sge = 1; | ||
144 | send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ | ||
145 | send_wr.wr.ud.timeout_ms = 0; | ||
146 | send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; | ||
147 | 106 | ||
148 | ah_attr.dlid = wc->slid; | 107 | port_priv = ib_get_agent_port(device, port_num); |
149 | ah_attr.port_num = mad_agent->port_num; | 108 | if (!port_priv) { |
150 | ah_attr.src_path_bits = wc->dlid_path_bits; | 109 | printk(KERN_ERR SPFX "Unable to find port agent\n"); |
151 | ah_attr.sl = wc->sl; | 110 | return -ENODEV; |
152 | ah_attr.static_rate = 0; | ||
153 | ah_attr.ah_flags = 0; /* No GRH */ | ||
154 | if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { | ||
155 | if (wc->wc_flags & IB_WC_GRH) { | ||
156 | ah_attr.ah_flags = IB_AH_GRH; | ||
157 | /* Should sgid be looked up ? */ | ||
158 | ah_attr.grh.sgid_index = 0; | ||
159 | ah_attr.grh.hop_limit = grh->hop_limit; | ||
160 | ah_attr.grh.flow_label = be32_to_cpu( | ||
161 | grh->version_tclass_flow) & 0xfffff; | ||
162 | ah_attr.grh.traffic_class = (be32_to_cpu( | ||
163 | grh->version_tclass_flow) >> 20) & 0xff; | ||
164 | memcpy(ah_attr.grh.dgid.raw, | ||
165 | grh->sgid.raw, | ||
166 | sizeof(ah_attr.grh.dgid)); | ||
167 | } | ||
168 | } | 111 | } |
169 | 112 | ||
170 | agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); | 113 | agent = port_priv->agent[qpn]; |
171 | if (IS_ERR(agent_send_wr->ah)) { | 114 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); |
172 | printk(KERN_ERR SPFX "No memory for address handle\n"); | 115 | if (IS_ERR(ah)) { |
173 | kfree(agent_send_wr); | 116 | ret = PTR_ERR(ah); |
174 | goto out; | 117 | printk(KERN_ERR SPFX "ib_create_ah_from_wc error:%d\n", ret); |
118 | return ret; | ||
175 | } | 119 | } |
176 | 120 | ||
177 | send_wr.wr.ud.ah = agent_send_wr->ah; | 121 | send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, |
178 | if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { | 122 | IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
179 | send_wr.wr.ud.pkey_index = wc->pkey_index; | 123 | GFP_KERNEL); |
180 | send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; | 124 | if (IS_ERR(send_buf)) { |
181 | } else { /* for SMPs */ | 125 | ret = PTR_ERR(send_buf); |
182 | send_wr.wr.ud.pkey_index = 0; | 126 | printk(KERN_ERR SPFX "ib_create_send_mad error:%d\n", ret); |
183 | send_wr.wr.ud.remote_qkey = 0; | 127 | goto err1; |
184 | } | 128 | } |
185 | send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; | ||
186 | send_wr.wr_id = (unsigned long)agent_send_wr; | ||
187 | 129 | ||
188 | pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); | 130 | memcpy(send_buf->mad, mad, sizeof *mad); |
189 | 131 | send_buf->ah = ah; | |
190 | /* Send */ | 132 | if ((ret = ib_post_send_mad(send_buf, NULL))) { |
191 | spin_lock_irqsave(&port_priv->send_list_lock, flags); | 133 | printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret); |
192 | if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { | 134 | goto err2; |
193 | spin_unlock_irqrestore(&port_priv->send_list_lock, flags); | ||
194 | dma_unmap_single(mad_agent->device->dma_device, | ||
195 | pci_unmap_addr(agent_send_wr, mapping), | ||
196 | sizeof(mad_priv->mad), | ||
197 | DMA_TO_DEVICE); | ||
198 | ib_destroy_ah(agent_send_wr->ah); | ||
199 | kfree(agent_send_wr); | ||
200 | } else { | ||
201 | list_add_tail(&agent_send_wr->send_list, | ||
202 | &port_priv->send_posted_list); | ||
203 | spin_unlock_irqrestore(&port_priv->send_list_lock, flags); | ||
204 | ret = 0; | ||
205 | } | 135 | } |
206 | 136 | return 0; | |
207 | out: | 137 | err2: |
138 | ib_free_send_mad(send_buf); | ||
139 | err1: | ||
140 | ib_destroy_ah(ah); | ||
208 | return ret; | 141 | return ret; |
209 | } | 142 | } |
210 | 143 | ||
211 | int agent_send(struct ib_mad_private *mad, | ||
212 | struct ib_grh *grh, | ||
213 | struct ib_wc *wc, | ||
214 | struct ib_device *device, | ||
215 | int port_num) | ||
216 | { | ||
217 | struct ib_agent_port_private *port_priv; | ||
218 | struct ib_mad_agent *mad_agent; | ||
219 | |||
220 | port_priv = ib_get_agent_port(device, port_num, NULL); | ||
221 | if (!port_priv) { | ||
222 | printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", | ||
223 | device->name, port_num); | ||
224 | return 1; | ||
225 | } | ||
226 | |||
227 | /* Get mad agent based on mgmt_class in MAD */ | ||
228 | switch (mad->mad.mad.mad_hdr.mgmt_class) { | ||
229 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | ||
230 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | ||
231 | mad_agent = port_priv->smp_agent; | ||
232 | break; | ||
233 | case IB_MGMT_CLASS_PERF_MGMT: | ||
234 | mad_agent = port_priv->perf_mgmt_agent; | ||
235 | break; | ||
236 | default: | ||
237 | return 1; | ||
238 | } | ||
239 | |||
240 | return agent_mad_send(mad_agent, port_priv, mad, grh, wc); | ||
241 | } | ||
242 | |||
243 | static void agent_send_handler(struct ib_mad_agent *mad_agent, | 144 | static void agent_send_handler(struct ib_mad_agent *mad_agent, |
244 | struct ib_mad_send_wc *mad_send_wc) | 145 | struct ib_mad_send_wc *mad_send_wc) |
245 | { | 146 | { |
246 | struct ib_agent_port_private *port_priv; | 147 | ib_destroy_ah(mad_send_wc->send_buf->ah); |
247 | struct ib_agent_send_wr *agent_send_wr; | 148 | ib_free_send_mad(mad_send_wc->send_buf); |
248 | unsigned long flags; | ||
249 | |||
250 | /* Find matching MAD agent */ | ||
251 | port_priv = ib_get_agent_port(NULL, 0, mad_agent); | ||
252 | if (!port_priv) { | ||
253 | printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " | ||
254 | "agent %p\n", mad_agent); | ||
255 | return; | ||
256 | } | ||
257 | |||
258 | agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; | ||
259 | spin_lock_irqsave(&port_priv->send_list_lock, flags); | ||
260 | /* Remove completed send from posted send MAD list */ | ||
261 | list_del(&agent_send_wr->send_list); | ||
262 | spin_unlock_irqrestore(&port_priv->send_list_lock, flags); | ||
263 | |||
264 | dma_unmap_single(mad_agent->device->dma_device, | ||
265 | pci_unmap_addr(agent_send_wr, mapping), | ||
266 | sizeof(agent_send_wr->mad->mad), | ||
267 | DMA_TO_DEVICE); | ||
268 | |||
269 | ib_destroy_ah(agent_send_wr->ah); | ||
270 | |||
271 | /* Release allocated memory */ | ||
272 | kmem_cache_free(ib_mad_cache, agent_send_wr->mad); | ||
273 | kfree(agent_send_wr); | ||
274 | } | 149 | } |
275 | 150 | ||
276 | int ib_agent_port_open(struct ib_device *device, int port_num) | 151 | int ib_agent_port_open(struct ib_device *device, int port_num) |
277 | { | 152 | { |
278 | int ret; | ||
279 | struct ib_agent_port_private *port_priv; | 153 | struct ib_agent_port_private *port_priv; |
280 | unsigned long flags; | 154 | unsigned long flags; |
281 | 155 | int ret; | |
282 | /* First, check if port already open for SMI */ | ||
283 | port_priv = ib_get_agent_port(device, port_num, NULL); | ||
284 | if (port_priv) { | ||
285 | printk(KERN_DEBUG SPFX "%s port %d already open\n", | ||
286 | device->name, port_num); | ||
287 | return 0; | ||
288 | } | ||
289 | 156 | ||
290 | /* Create new device info */ | 157 | /* Create new device info */ |
291 | port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); | 158 | port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); |
@@ -294,32 +161,25 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
294 | ret = -ENOMEM; | 161 | ret = -ENOMEM; |
295 | goto error1; | 162 | goto error1; |
296 | } | 163 | } |
297 | |||
298 | memset(port_priv, 0, sizeof *port_priv); | 164 | memset(port_priv, 0, sizeof *port_priv); |
299 | port_priv->port_num = port_num; | ||
300 | spin_lock_init(&port_priv->send_list_lock); | ||
301 | INIT_LIST_HEAD(&port_priv->send_posted_list); | ||
302 | 165 | ||
303 | /* Obtain send only MAD agent for SM class (SMI QP) */ | 166 | /* Obtain send only MAD agent for SMI QP */ |
304 | port_priv->smp_agent = ib_register_mad_agent(device, port_num, | 167 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, |
305 | IB_QPT_SMI, | 168 | IB_QPT_SMI, NULL, 0, |
306 | NULL, 0, | ||
307 | &agent_send_handler, | 169 | &agent_send_handler, |
308 | NULL, NULL); | 170 | NULL, NULL); |
309 | 171 | if (IS_ERR(port_priv->agent[0])) { | |
310 | if (IS_ERR(port_priv->smp_agent)) { | 172 | ret = PTR_ERR(port_priv->agent[0]); |
311 | ret = PTR_ERR(port_priv->smp_agent); | ||
312 | goto error2; | 173 | goto error2; |
313 | } | 174 | } |
314 | 175 | ||
315 | /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ | 176 | /* Obtain send only MAD agent for GSI QP */ |
316 | port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, | 177 | port_priv->agent[1] = ib_register_mad_agent(device, port_num, |
317 | IB_QPT_GSI, | 178 | IB_QPT_GSI, NULL, 0, |
318 | NULL, 0, | 179 | &agent_send_handler, |
319 | &agent_send_handler, | 180 | NULL, NULL); |
320 | NULL, NULL); | 181 | if (IS_ERR(port_priv->agent[1])) { |
321 | if (IS_ERR(port_priv->perf_mgmt_agent)) { | 182 | ret = PTR_ERR(port_priv->agent[1]); |
322 | ret = PTR_ERR(port_priv->perf_mgmt_agent); | ||
323 | goto error3; | 183 | goto error3; |
324 | } | 184 | } |
325 | 185 | ||
@@ -330,7 +190,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
330 | return 0; | 190 | return 0; |
331 | 191 | ||
332 | error3: | 192 | error3: |
333 | ib_unregister_mad_agent(port_priv->smp_agent); | 193 | ib_unregister_mad_agent(port_priv->agent[0]); |
334 | error2: | 194 | error2: |
335 | kfree(port_priv); | 195 | kfree(port_priv); |
336 | error1: | 196 | error1: |
@@ -343,7 +203,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
343 | unsigned long flags; | 203 | unsigned long flags; |
344 | 204 | ||
345 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); | 205 | spin_lock_irqsave(&ib_agent_port_list_lock, flags); |
346 | port_priv = __ib_get_agent_port(device, port_num, NULL); | 206 | port_priv = __ib_get_agent_port(device, port_num); |
347 | if (port_priv == NULL) { | 207 | if (port_priv == NULL) { |
348 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 208 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
349 | printk(KERN_ERR SPFX "Port %d not found\n", port_num); | 209 | printk(KERN_ERR SPFX "Port %d not found\n", port_num); |
@@ -352,9 +212,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
352 | list_del(&port_priv->port_list); | 212 | list_del(&port_priv->port_list); |
353 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 213 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
354 | 214 | ||
355 | ib_unregister_mad_agent(port_priv->perf_mgmt_agent); | 215 | ib_unregister_mad_agent(port_priv->agent[1]); |
356 | ib_unregister_mad_agent(port_priv->smp_agent); | 216 | ib_unregister_mad_agent(port_priv->agent[0]); |
357 | kfree(port_priv); | 217 | kfree(port_priv); |
358 | |||
359 | return 0; | 218 | return 0; |
360 | } | 219 | } |
diff --git a/drivers/infiniband/core/agent.h b/drivers/infiniband/core/agent.h index d9426842254a..c5f3cfec942a 100644 --- a/drivers/infiniband/core/agent.h +++ b/drivers/infiniband/core/agent.h | |||
@@ -39,17 +39,14 @@ | |||
39 | #ifndef __AGENT_H_ | 39 | #ifndef __AGENT_H_ |
40 | #define __AGENT_H_ | 40 | #define __AGENT_H_ |
41 | 41 | ||
42 | extern spinlock_t ib_agent_port_list_lock; | 42 | #include <rdma/ib_mad.h> |
43 | 43 | ||
44 | extern int ib_agent_port_open(struct ib_device *device, | 44 | extern int ib_agent_port_open(struct ib_device *device, int port_num); |
45 | int port_num); | ||
46 | 45 | ||
47 | extern int ib_agent_port_close(struct ib_device *device, int port_num); | 46 | extern int ib_agent_port_close(struct ib_device *device, int port_num); |
48 | 47 | ||
49 | extern int agent_send(struct ib_mad_private *mad, | 48 | extern int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, |
50 | struct ib_grh *grh, | 49 | struct ib_wc *wc, struct ib_device *device, |
51 | struct ib_wc *wc, | 50 | int port_num, int qpn); |
52 | struct ib_device *device, | ||
53 | int port_num); | ||
54 | 51 | ||
55 | #endif /* __AGENT_H_ */ | 52 | #endif /* __AGENT_H_ */ |
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h deleted file mode 100644 index 2ec6d7f1b7d0..000000000000 --- a/drivers/infiniband/core/agent_priv.h +++ /dev/null | |||
@@ -1,62 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. | ||
3 | * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. | ||
4 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | ||
5 | * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. | ||
6 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | ||
7 | * | ||
8 | * This software is available to you under a choice of one of two | ||
9 | * licenses. You may choose to be licensed under the terms of the GNU | ||
10 | * General Public License (GPL) Version 2, available from the file | ||
11 | * COPYING in the main directory of this source tree, or the | ||
12 | * OpenIB.org BSD license below: | ||
13 | * | ||
14 | * Redistribution and use in source and binary forms, with or | ||
15 | * without modification, are permitted provided that the following | ||
16 | * conditions are met: | ||
17 | * | ||
18 | * - Redistributions of source code must retain the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer. | ||
21 | * | ||
22 | * - Redistributions in binary form must reproduce the above | ||
23 | * copyright notice, this list of conditions and the following | ||
24 | * disclaimer in the documentation and/or other materials | ||
25 | * provided with the distribution. | ||
26 | * | ||
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
34 | * SOFTWARE. | ||
35 | * | ||
36 | * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $ | ||
37 | */ | ||
38 | |||
39 | #ifndef __IB_AGENT_PRIV_H__ | ||
40 | #define __IB_AGENT_PRIV_H__ | ||
41 | |||
42 | #include <linux/pci.h> | ||
43 | |||
44 | #define SPFX "ib_agent: " | ||
45 | |||
46 | struct ib_agent_send_wr { | ||
47 | struct list_head send_list; | ||
48 | struct ib_ah *ah; | ||
49 | struct ib_mad_private *mad; | ||
50 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
51 | }; | ||
52 | |||
53 | struct ib_agent_port_private { | ||
54 | struct list_head port_list; | ||
55 | struct list_head send_posted_list; | ||
56 | spinlock_t send_list_lock; | ||
57 | int port_num; | ||
58 | struct ib_mad_agent *smp_agent; /* SM class */ | ||
59 | struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ | ||
60 | }; | ||
61 | |||
62 | #endif /* __IB_AGENT_PRIV_H__ */ | ||
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 389fecbaf662..580c3a2bb102 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -176,8 +176,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
176 | 176 | ||
177 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, | 177 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, |
178 | cm_id_priv->av.pkey_index, | 178 | cm_id_priv->av.pkey_index, |
179 | ah, 0, sizeof(struct ib_mad_hdr), | 179 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
180 | sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr), | ||
181 | GFP_ATOMIC); | 180 | GFP_ATOMIC); |
182 | if (IS_ERR(m)) { | 181 | if (IS_ERR(m)) { |
183 | ib_destroy_ah(ah); | 182 | ib_destroy_ah(ah); |
@@ -185,7 +184,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
185 | } | 184 | } |
186 | 185 | ||
187 | /* Timeout set by caller if response is expected. */ | 186 | /* Timeout set by caller if response is expected. */ |
188 | m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries; | 187 | m->ah = ah; |
188 | m->retries = cm_id_priv->max_cm_retries; | ||
189 | 189 | ||
190 | atomic_inc(&cm_id_priv->refcount); | 190 | atomic_inc(&cm_id_priv->refcount); |
191 | m->context[0] = cm_id_priv; | 191 | m->context[0] = cm_id_priv; |
@@ -206,20 +206,20 @@ static int cm_alloc_response_msg(struct cm_port *port, | |||
206 | return PTR_ERR(ah); | 206 | return PTR_ERR(ah); |
207 | 207 | ||
208 | m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, | 208 | m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, |
209 | ah, 0, sizeof(struct ib_mad_hdr), | 209 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
210 | sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr), | ||
211 | GFP_ATOMIC); | 210 | GFP_ATOMIC); |
212 | if (IS_ERR(m)) { | 211 | if (IS_ERR(m)) { |
213 | ib_destroy_ah(ah); | 212 | ib_destroy_ah(ah); |
214 | return PTR_ERR(m); | 213 | return PTR_ERR(m); |
215 | } | 214 | } |
215 | m->ah = ah; | ||
216 | *msg = m; | 216 | *msg = m; |
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | static void cm_free_msg(struct ib_mad_send_buf *msg) | 220 | static void cm_free_msg(struct ib_mad_send_buf *msg) |
221 | { | 221 | { |
222 | ib_destroy_ah(msg->send_wr.wr.ud.ah); | 222 | ib_destroy_ah(msg->ah); |
223 | if (msg->context[0]) | 223 | if (msg->context[0]) |
224 | cm_deref_id(msg->context[0]); | 224 | cm_deref_id(msg->context[0]); |
225 | ib_free_send_mad(msg); | 225 | ib_free_send_mad(msg); |
@@ -678,8 +678,7 @@ retest: | |||
678 | break; | 678 | break; |
679 | case IB_CM_SIDR_REQ_SENT: | 679 | case IB_CM_SIDR_REQ_SENT: |
680 | cm_id->state = IB_CM_IDLE; | 680 | cm_id->state = IB_CM_IDLE; |
681 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 681 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
682 | (unsigned long) cm_id_priv->msg); | ||
683 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 682 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
684 | break; | 683 | break; |
685 | case IB_CM_SIDR_REQ_RCVD: | 684 | case IB_CM_SIDR_REQ_RCVD: |
@@ -690,8 +689,7 @@ retest: | |||
690 | case IB_CM_MRA_REQ_RCVD: | 689 | case IB_CM_MRA_REQ_RCVD: |
691 | case IB_CM_REP_SENT: | 690 | case IB_CM_REP_SENT: |
692 | case IB_CM_MRA_REP_RCVD: | 691 | case IB_CM_MRA_REP_RCVD: |
693 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 692 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
694 | (unsigned long) cm_id_priv->msg); | ||
695 | /* Fall through */ | 693 | /* Fall through */ |
696 | case IB_CM_REQ_RCVD: | 694 | case IB_CM_REQ_RCVD: |
697 | case IB_CM_MRA_REQ_SENT: | 695 | case IB_CM_MRA_REQ_SENT: |
@@ -708,8 +706,7 @@ retest: | |||
708 | ib_send_cm_dreq(cm_id, NULL, 0); | 706 | ib_send_cm_dreq(cm_id, NULL, 0); |
709 | goto retest; | 707 | goto retest; |
710 | case IB_CM_DREQ_SENT: | 708 | case IB_CM_DREQ_SENT: |
711 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 709 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
712 | (unsigned long) cm_id_priv->msg); | ||
713 | cm_enter_timewait(cm_id_priv); | 710 | cm_enter_timewait(cm_id_priv); |
714 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 711 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
715 | break; | 712 | break; |
@@ -883,7 +880,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
883 | struct ib_cm_req_param *param) | 880 | struct ib_cm_req_param *param) |
884 | { | 881 | { |
885 | struct cm_id_private *cm_id_priv; | 882 | struct cm_id_private *cm_id_priv; |
886 | struct ib_send_wr *bad_send_wr; | ||
887 | struct cm_req_msg *req_msg; | 883 | struct cm_req_msg *req_msg; |
888 | unsigned long flags; | 884 | unsigned long flags; |
889 | int ret; | 885 | int ret; |
@@ -936,7 +932,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
936 | req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; | 932 | req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; |
937 | cm_format_req(req_msg, cm_id_priv, param); | 933 | cm_format_req(req_msg, cm_id_priv, param); |
938 | cm_id_priv->tid = req_msg->hdr.tid; | 934 | cm_id_priv->tid = req_msg->hdr.tid; |
939 | cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; | 935 | cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; |
940 | cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; | 936 | cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; |
941 | 937 | ||
942 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); | 938 | cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); |
@@ -945,8 +941,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
945 | cm_req_get_primary_local_ack_timeout(req_msg); | 941 | cm_req_get_primary_local_ack_timeout(req_msg); |
946 | 942 | ||
947 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 943 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
948 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 944 | ret = ib_post_send_mad(cm_id_priv->msg, NULL); |
949 | &cm_id_priv->msg->send_wr, &bad_send_wr); | ||
950 | if (ret) { | 945 | if (ret) { |
951 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 946 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
952 | goto error2; | 947 | goto error2; |
@@ -969,7 +964,6 @@ static int cm_issue_rej(struct cm_port *port, | |||
969 | void *ari, u8 ari_length) | 964 | void *ari, u8 ari_length) |
970 | { | 965 | { |
971 | struct ib_mad_send_buf *msg = NULL; | 966 | struct ib_mad_send_buf *msg = NULL; |
972 | struct ib_send_wr *bad_send_wr; | ||
973 | struct cm_rej_msg *rej_msg, *rcv_msg; | 967 | struct cm_rej_msg *rej_msg, *rcv_msg; |
974 | int ret; | 968 | int ret; |
975 | 969 | ||
@@ -992,7 +986,7 @@ static int cm_issue_rej(struct cm_port *port, | |||
992 | memcpy(rej_msg->ari, ari, ari_length); | 986 | memcpy(rej_msg->ari, ari, ari_length); |
993 | } | 987 | } |
994 | 988 | ||
995 | ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr); | 989 | ret = ib_post_send_mad(msg, NULL); |
996 | if (ret) | 990 | if (ret) |
997 | cm_free_msg(msg); | 991 | cm_free_msg(msg); |
998 | 992 | ||
@@ -1172,7 +1166,6 @@ static void cm_dup_req_handler(struct cm_work *work, | |||
1172 | struct cm_id_private *cm_id_priv) | 1166 | struct cm_id_private *cm_id_priv) |
1173 | { | 1167 | { |
1174 | struct ib_mad_send_buf *msg = NULL; | 1168 | struct ib_mad_send_buf *msg = NULL; |
1175 | struct ib_send_wr *bad_send_wr; | ||
1176 | unsigned long flags; | 1169 | unsigned long flags; |
1177 | int ret; | 1170 | int ret; |
1178 | 1171 | ||
@@ -1201,8 +1194,7 @@ static void cm_dup_req_handler(struct cm_work *work, | |||
1201 | } | 1194 | } |
1202 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1195 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1203 | 1196 | ||
1204 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, | 1197 | ret = ib_post_send_mad(msg, NULL); |
1205 | &bad_send_wr); | ||
1206 | if (ret) | 1198 | if (ret) |
1207 | goto free; | 1199 | goto free; |
1208 | return; | 1200 | return; |
@@ -1367,7 +1359,6 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, | |||
1367 | struct cm_id_private *cm_id_priv; | 1359 | struct cm_id_private *cm_id_priv; |
1368 | struct ib_mad_send_buf *msg; | 1360 | struct ib_mad_send_buf *msg; |
1369 | struct cm_rep_msg *rep_msg; | 1361 | struct cm_rep_msg *rep_msg; |
1370 | struct ib_send_wr *bad_send_wr; | ||
1371 | unsigned long flags; | 1362 | unsigned long flags; |
1372 | int ret; | 1363 | int ret; |
1373 | 1364 | ||
@@ -1389,11 +1380,10 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, | |||
1389 | 1380 | ||
1390 | rep_msg = (struct cm_rep_msg *) msg->mad; | 1381 | rep_msg = (struct cm_rep_msg *) msg->mad; |
1391 | cm_format_rep(rep_msg, cm_id_priv, param); | 1382 | cm_format_rep(rep_msg, cm_id_priv, param); |
1392 | msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; | 1383 | msg->timeout_ms = cm_id_priv->timeout_ms; |
1393 | msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; | 1384 | msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; |
1394 | 1385 | ||
1395 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 1386 | ret = ib_post_send_mad(msg, NULL); |
1396 | &msg->send_wr, &bad_send_wr); | ||
1397 | if (ret) { | 1387 | if (ret) { |
1398 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1388 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1399 | cm_free_msg(msg); | 1389 | cm_free_msg(msg); |
@@ -1431,7 +1421,6 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id, | |||
1431 | { | 1421 | { |
1432 | struct cm_id_private *cm_id_priv; | 1422 | struct cm_id_private *cm_id_priv; |
1433 | struct ib_mad_send_buf *msg; | 1423 | struct ib_mad_send_buf *msg; |
1434 | struct ib_send_wr *bad_send_wr; | ||
1435 | unsigned long flags; | 1424 | unsigned long flags; |
1436 | void *data; | 1425 | void *data; |
1437 | int ret; | 1426 | int ret; |
@@ -1458,8 +1447,7 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id, | |||
1458 | cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, | 1447 | cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, |
1459 | private_data, private_data_len); | 1448 | private_data, private_data_len); |
1460 | 1449 | ||
1461 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 1450 | ret = ib_post_send_mad(msg, NULL); |
1462 | &msg->send_wr, &bad_send_wr); | ||
1463 | if (ret) { | 1451 | if (ret) { |
1464 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1452 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1465 | cm_free_msg(msg); | 1453 | cm_free_msg(msg); |
@@ -1504,7 +1492,6 @@ static void cm_dup_rep_handler(struct cm_work *work) | |||
1504 | struct cm_id_private *cm_id_priv; | 1492 | struct cm_id_private *cm_id_priv; |
1505 | struct cm_rep_msg *rep_msg; | 1493 | struct cm_rep_msg *rep_msg; |
1506 | struct ib_mad_send_buf *msg = NULL; | 1494 | struct ib_mad_send_buf *msg = NULL; |
1507 | struct ib_send_wr *bad_send_wr; | ||
1508 | unsigned long flags; | 1495 | unsigned long flags; |
1509 | int ret; | 1496 | int ret; |
1510 | 1497 | ||
@@ -1532,8 +1519,7 @@ static void cm_dup_rep_handler(struct cm_work *work) | |||
1532 | goto unlock; | 1519 | goto unlock; |
1533 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1520 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1534 | 1521 | ||
1535 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, | 1522 | ret = ib_post_send_mad(msg, NULL); |
1536 | &bad_send_wr); | ||
1537 | if (ret) | 1523 | if (ret) |
1538 | goto free; | 1524 | goto free; |
1539 | goto deref; | 1525 | goto deref; |
@@ -1601,8 +1587,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1601 | 1587 | ||
1602 | /* todo: handle peer_to_peer */ | 1588 | /* todo: handle peer_to_peer */ |
1603 | 1589 | ||
1604 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 1590 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
1605 | (unsigned long) cm_id_priv->msg); | ||
1606 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 1591 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
1607 | if (!ret) | 1592 | if (!ret) |
1608 | list_add_tail(&work->list, &cm_id_priv->work_list); | 1593 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -1636,8 +1621,7 @@ static int cm_establish_handler(struct cm_work *work) | |||
1636 | goto out; | 1621 | goto out; |
1637 | } | 1622 | } |
1638 | 1623 | ||
1639 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 1624 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
1640 | (unsigned long) cm_id_priv->msg); | ||
1641 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 1625 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
1642 | if (!ret) | 1626 | if (!ret) |
1643 | list_add_tail(&work->list, &cm_id_priv->work_list); | 1627 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -1676,8 +1660,7 @@ static int cm_rtu_handler(struct cm_work *work) | |||
1676 | } | 1660 | } |
1677 | cm_id_priv->id.state = IB_CM_ESTABLISHED; | 1661 | cm_id_priv->id.state = IB_CM_ESTABLISHED; |
1678 | 1662 | ||
1679 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 1663 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
1680 | (unsigned long) cm_id_priv->msg); | ||
1681 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 1664 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
1682 | if (!ret) | 1665 | if (!ret) |
1683 | list_add_tail(&work->list, &cm_id_priv->work_list); | 1666 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -1714,7 +1697,6 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id, | |||
1714 | { | 1697 | { |
1715 | struct cm_id_private *cm_id_priv; | 1698 | struct cm_id_private *cm_id_priv; |
1716 | struct ib_mad_send_buf *msg; | 1699 | struct ib_mad_send_buf *msg; |
1717 | struct ib_send_wr *bad_send_wr; | ||
1718 | unsigned long flags; | 1700 | unsigned long flags; |
1719 | int ret; | 1701 | int ret; |
1720 | 1702 | ||
@@ -1736,11 +1718,10 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id, | |||
1736 | 1718 | ||
1737 | cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, | 1719 | cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, |
1738 | private_data, private_data_len); | 1720 | private_data, private_data_len); |
1739 | msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; | 1721 | msg->timeout_ms = cm_id_priv->timeout_ms; |
1740 | msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; | 1722 | msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; |
1741 | 1723 | ||
1742 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 1724 | ret = ib_post_send_mad(msg, NULL); |
1743 | &msg->send_wr, &bad_send_wr); | ||
1744 | if (ret) { | 1725 | if (ret) { |
1745 | cm_enter_timewait(cm_id_priv); | 1726 | cm_enter_timewait(cm_id_priv); |
1746 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1727 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
@@ -1774,7 +1755,6 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id, | |||
1774 | { | 1755 | { |
1775 | struct cm_id_private *cm_id_priv; | 1756 | struct cm_id_private *cm_id_priv; |
1776 | struct ib_mad_send_buf *msg; | 1757 | struct ib_mad_send_buf *msg; |
1777 | struct ib_send_wr *bad_send_wr; | ||
1778 | unsigned long flags; | 1758 | unsigned long flags; |
1779 | void *data; | 1759 | void *data; |
1780 | int ret; | 1760 | int ret; |
@@ -1804,8 +1784,7 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id, | |||
1804 | cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, | 1784 | cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, |
1805 | private_data, private_data_len); | 1785 | private_data, private_data_len); |
1806 | 1786 | ||
1807 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, | 1787 | ret = ib_post_send_mad(msg, NULL); |
1808 | &bad_send_wr); | ||
1809 | if (ret) { | 1788 | if (ret) { |
1810 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1789 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1811 | cm_free_msg(msg); | 1790 | cm_free_msg(msg); |
@@ -1822,7 +1801,6 @@ static int cm_dreq_handler(struct cm_work *work) | |||
1822 | struct cm_id_private *cm_id_priv; | 1801 | struct cm_id_private *cm_id_priv; |
1823 | struct cm_dreq_msg *dreq_msg; | 1802 | struct cm_dreq_msg *dreq_msg; |
1824 | struct ib_mad_send_buf *msg = NULL; | 1803 | struct ib_mad_send_buf *msg = NULL; |
1825 | struct ib_send_wr *bad_send_wr; | ||
1826 | unsigned long flags; | 1804 | unsigned long flags; |
1827 | int ret; | 1805 | int ret; |
1828 | 1806 | ||
@@ -1841,8 +1819,7 @@ static int cm_dreq_handler(struct cm_work *work) | |||
1841 | switch (cm_id_priv->id.state) { | 1819 | switch (cm_id_priv->id.state) { |
1842 | case IB_CM_REP_SENT: | 1820 | case IB_CM_REP_SENT: |
1843 | case IB_CM_DREQ_SENT: | 1821 | case IB_CM_DREQ_SENT: |
1844 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 1822 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
1845 | (unsigned long) cm_id_priv->msg); | ||
1846 | break; | 1823 | break; |
1847 | case IB_CM_ESTABLISHED: | 1824 | case IB_CM_ESTABLISHED: |
1848 | case IB_CM_MRA_REP_RCVD: | 1825 | case IB_CM_MRA_REP_RCVD: |
@@ -1856,8 +1833,7 @@ static int cm_dreq_handler(struct cm_work *work) | |||
1856 | cm_id_priv->private_data_len); | 1833 | cm_id_priv->private_data_len); |
1857 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1834 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1858 | 1835 | ||
1859 | if (ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 1836 | if (ib_post_send_mad(msg, NULL)) |
1860 | &msg->send_wr, &bad_send_wr)) | ||
1861 | cm_free_msg(msg); | 1837 | cm_free_msg(msg); |
1862 | goto deref; | 1838 | goto deref; |
1863 | default: | 1839 | default: |
@@ -1904,8 +1880,7 @@ static int cm_drep_handler(struct cm_work *work) | |||
1904 | } | 1880 | } |
1905 | cm_enter_timewait(cm_id_priv); | 1881 | cm_enter_timewait(cm_id_priv); |
1906 | 1882 | ||
1907 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 1883 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
1908 | (unsigned long) cm_id_priv->msg); | ||
1909 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 1884 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
1910 | if (!ret) | 1885 | if (!ret) |
1911 | list_add_tail(&work->list, &cm_id_priv->work_list); | 1886 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -1930,7 +1905,6 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id, | |||
1930 | { | 1905 | { |
1931 | struct cm_id_private *cm_id_priv; | 1906 | struct cm_id_private *cm_id_priv; |
1932 | struct ib_mad_send_buf *msg; | 1907 | struct ib_mad_send_buf *msg; |
1933 | struct ib_send_wr *bad_send_wr; | ||
1934 | unsigned long flags; | 1908 | unsigned long flags; |
1935 | int ret; | 1909 | int ret; |
1936 | 1910 | ||
@@ -1974,8 +1948,7 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id, | |||
1974 | if (ret) | 1948 | if (ret) |
1975 | goto out; | 1949 | goto out; |
1976 | 1950 | ||
1977 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 1951 | ret = ib_post_send_mad(msg, NULL); |
1978 | &msg->send_wr, &bad_send_wr); | ||
1979 | if (ret) | 1952 | if (ret) |
1980 | cm_free_msg(msg); | 1953 | cm_free_msg(msg); |
1981 | 1954 | ||
@@ -2051,8 +2024,7 @@ static int cm_rej_handler(struct cm_work *work) | |||
2051 | case IB_CM_MRA_REQ_RCVD: | 2024 | case IB_CM_MRA_REQ_RCVD: |
2052 | case IB_CM_REP_SENT: | 2025 | case IB_CM_REP_SENT: |
2053 | case IB_CM_MRA_REP_RCVD: | 2026 | case IB_CM_MRA_REP_RCVD: |
2054 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 2027 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
2055 | (unsigned long) cm_id_priv->msg); | ||
2056 | /* fall through */ | 2028 | /* fall through */ |
2057 | case IB_CM_REQ_RCVD: | 2029 | case IB_CM_REQ_RCVD: |
2058 | case IB_CM_MRA_REQ_SENT: | 2030 | case IB_CM_MRA_REQ_SENT: |
@@ -2062,8 +2034,7 @@ static int cm_rej_handler(struct cm_work *work) | |||
2062 | cm_reset_to_idle(cm_id_priv); | 2034 | cm_reset_to_idle(cm_id_priv); |
2063 | break; | 2035 | break; |
2064 | case IB_CM_DREQ_SENT: | 2036 | case IB_CM_DREQ_SENT: |
2065 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 2037 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
2066 | (unsigned long) cm_id_priv->msg); | ||
2067 | /* fall through */ | 2038 | /* fall through */ |
2068 | case IB_CM_REP_RCVD: | 2039 | case IB_CM_REP_RCVD: |
2069 | case IB_CM_MRA_REP_SENT: | 2040 | case IB_CM_MRA_REP_SENT: |
@@ -2098,7 +2069,6 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, | |||
2098 | { | 2069 | { |
2099 | struct cm_id_private *cm_id_priv; | 2070 | struct cm_id_private *cm_id_priv; |
2100 | struct ib_mad_send_buf *msg; | 2071 | struct ib_mad_send_buf *msg; |
2101 | struct ib_send_wr *bad_send_wr; | ||
2102 | void *data; | 2072 | void *data; |
2103 | unsigned long flags; | 2073 | unsigned long flags; |
2104 | int ret; | 2074 | int ret; |
@@ -2122,8 +2092,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, | |||
2122 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, | 2092 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, |
2123 | CM_MSG_RESPONSE_REQ, service_timeout, | 2093 | CM_MSG_RESPONSE_REQ, service_timeout, |
2124 | private_data, private_data_len); | 2094 | private_data, private_data_len); |
2125 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2095 | ret = ib_post_send_mad(msg, NULL); |
2126 | &msg->send_wr, &bad_send_wr); | ||
2127 | if (ret) | 2096 | if (ret) |
2128 | goto error2; | 2097 | goto error2; |
2129 | cm_id->state = IB_CM_MRA_REQ_SENT; | 2098 | cm_id->state = IB_CM_MRA_REQ_SENT; |
@@ -2136,8 +2105,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, | |||
2136 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, | 2105 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, |
2137 | CM_MSG_RESPONSE_REP, service_timeout, | 2106 | CM_MSG_RESPONSE_REP, service_timeout, |
2138 | private_data, private_data_len); | 2107 | private_data, private_data_len); |
2139 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2108 | ret = ib_post_send_mad(msg, NULL); |
2140 | &msg->send_wr, &bad_send_wr); | ||
2141 | if (ret) | 2109 | if (ret) |
2142 | goto error2; | 2110 | goto error2; |
2143 | cm_id->state = IB_CM_MRA_REP_SENT; | 2111 | cm_id->state = IB_CM_MRA_REP_SENT; |
@@ -2150,8 +2118,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, | |||
2150 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, | 2118 | cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, |
2151 | CM_MSG_RESPONSE_OTHER, service_timeout, | 2119 | CM_MSG_RESPONSE_OTHER, service_timeout, |
2152 | private_data, private_data_len); | 2120 | private_data, private_data_len); |
2153 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2121 | ret = ib_post_send_mad(msg, NULL); |
2154 | &msg->send_wr, &bad_send_wr); | ||
2155 | if (ret) | 2122 | if (ret) |
2156 | goto error2; | 2123 | goto error2; |
2157 | cm_id->lap_state = IB_CM_MRA_LAP_SENT; | 2124 | cm_id->lap_state = IB_CM_MRA_LAP_SENT; |
@@ -2213,14 +2180,14 @@ static int cm_mra_handler(struct cm_work *work) | |||
2213 | case IB_CM_REQ_SENT: | 2180 | case IB_CM_REQ_SENT: |
2214 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || | 2181 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || |
2215 | ib_modify_mad(cm_id_priv->av.port->mad_agent, | 2182 | ib_modify_mad(cm_id_priv->av.port->mad_agent, |
2216 | (unsigned long) cm_id_priv->msg, timeout)) | 2183 | cm_id_priv->msg, timeout)) |
2217 | goto out; | 2184 | goto out; |
2218 | cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; | 2185 | cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; |
2219 | break; | 2186 | break; |
2220 | case IB_CM_REP_SENT: | 2187 | case IB_CM_REP_SENT: |
2221 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || | 2188 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || |
2222 | ib_modify_mad(cm_id_priv->av.port->mad_agent, | 2189 | ib_modify_mad(cm_id_priv->av.port->mad_agent, |
2223 | (unsigned long) cm_id_priv->msg, timeout)) | 2190 | cm_id_priv->msg, timeout)) |
2224 | goto out; | 2191 | goto out; |
2225 | cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; | 2192 | cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; |
2226 | break; | 2193 | break; |
@@ -2228,7 +2195,7 @@ static int cm_mra_handler(struct cm_work *work) | |||
2228 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || | 2195 | if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || |
2229 | cm_id_priv->id.lap_state != IB_CM_LAP_SENT || | 2196 | cm_id_priv->id.lap_state != IB_CM_LAP_SENT || |
2230 | ib_modify_mad(cm_id_priv->av.port->mad_agent, | 2197 | ib_modify_mad(cm_id_priv->av.port->mad_agent, |
2231 | (unsigned long) cm_id_priv->msg, timeout)) | 2198 | cm_id_priv->msg, timeout)) |
2232 | goto out; | 2199 | goto out; |
2233 | cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; | 2200 | cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; |
2234 | break; | 2201 | break; |
@@ -2291,7 +2258,6 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2291 | { | 2258 | { |
2292 | struct cm_id_private *cm_id_priv; | 2259 | struct cm_id_private *cm_id_priv; |
2293 | struct ib_mad_send_buf *msg; | 2260 | struct ib_mad_send_buf *msg; |
2294 | struct ib_send_wr *bad_send_wr; | ||
2295 | unsigned long flags; | 2261 | unsigned long flags; |
2296 | int ret; | 2262 | int ret; |
2297 | 2263 | ||
@@ -2312,11 +2278,10 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2312 | 2278 | ||
2313 | cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, | 2279 | cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, |
2314 | alternate_path, private_data, private_data_len); | 2280 | alternate_path, private_data, private_data_len); |
2315 | msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; | 2281 | msg->timeout_ms = cm_id_priv->timeout_ms; |
2316 | msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; | 2282 | msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; |
2317 | 2283 | ||
2318 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2284 | ret = ib_post_send_mad(msg, NULL); |
2319 | &msg->send_wr, &bad_send_wr); | ||
2320 | if (ret) { | 2285 | if (ret) { |
2321 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 2286 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
2322 | cm_free_msg(msg); | 2287 | cm_free_msg(msg); |
@@ -2360,7 +2325,6 @@ static int cm_lap_handler(struct cm_work *work) | |||
2360 | struct cm_lap_msg *lap_msg; | 2325 | struct cm_lap_msg *lap_msg; |
2361 | struct ib_cm_lap_event_param *param; | 2326 | struct ib_cm_lap_event_param *param; |
2362 | struct ib_mad_send_buf *msg = NULL; | 2327 | struct ib_mad_send_buf *msg = NULL; |
2363 | struct ib_send_wr *bad_send_wr; | ||
2364 | unsigned long flags; | 2328 | unsigned long flags; |
2365 | int ret; | 2329 | int ret; |
2366 | 2330 | ||
@@ -2394,8 +2358,7 @@ static int cm_lap_handler(struct cm_work *work) | |||
2394 | cm_id_priv->private_data_len); | 2358 | cm_id_priv->private_data_len); |
2395 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 2359 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
2396 | 2360 | ||
2397 | if (ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2361 | if (ib_post_send_mad(msg, NULL)) |
2398 | &msg->send_wr, &bad_send_wr)) | ||
2399 | cm_free_msg(msg); | 2362 | cm_free_msg(msg); |
2400 | goto deref; | 2363 | goto deref; |
2401 | default: | 2364 | default: |
@@ -2451,7 +2414,6 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id, | |||
2451 | { | 2414 | { |
2452 | struct cm_id_private *cm_id_priv; | 2415 | struct cm_id_private *cm_id_priv; |
2453 | struct ib_mad_send_buf *msg; | 2416 | struct ib_mad_send_buf *msg; |
2454 | struct ib_send_wr *bad_send_wr; | ||
2455 | unsigned long flags; | 2417 | unsigned long flags; |
2456 | int ret; | 2418 | int ret; |
2457 | 2419 | ||
@@ -2474,8 +2436,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id, | |||
2474 | 2436 | ||
2475 | cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, | 2437 | cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, |
2476 | info, info_length, private_data, private_data_len); | 2438 | info, info_length, private_data, private_data_len); |
2477 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2439 | ret = ib_post_send_mad(msg, NULL); |
2478 | &msg->send_wr, &bad_send_wr); | ||
2479 | if (ret) { | 2440 | if (ret) { |
2480 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 2441 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
2481 | cm_free_msg(msg); | 2442 | cm_free_msg(msg); |
@@ -2514,8 +2475,7 @@ static int cm_apr_handler(struct cm_work *work) | |||
2514 | goto out; | 2475 | goto out; |
2515 | } | 2476 | } |
2516 | cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; | 2477 | cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; |
2517 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 2478 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
2518 | (unsigned long) cm_id_priv->msg); | ||
2519 | cm_id_priv->msg = NULL; | 2479 | cm_id_priv->msg = NULL; |
2520 | 2480 | ||
2521 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2481 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
@@ -2590,7 +2550,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, | |||
2590 | { | 2550 | { |
2591 | struct cm_id_private *cm_id_priv; | 2551 | struct cm_id_private *cm_id_priv; |
2592 | struct ib_mad_send_buf *msg; | 2552 | struct ib_mad_send_buf *msg; |
2593 | struct ib_send_wr *bad_send_wr; | ||
2594 | unsigned long flags; | 2553 | unsigned long flags; |
2595 | int ret; | 2554 | int ret; |
2596 | 2555 | ||
@@ -2613,13 +2572,12 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, | |||
2613 | 2572 | ||
2614 | cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, | 2573 | cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, |
2615 | param); | 2574 | param); |
2616 | msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; | 2575 | msg->timeout_ms = cm_id_priv->timeout_ms; |
2617 | msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; | 2576 | msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; |
2618 | 2577 | ||
2619 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 2578 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
2620 | if (cm_id->state == IB_CM_IDLE) | 2579 | if (cm_id->state == IB_CM_IDLE) |
2621 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2580 | ret = ib_post_send_mad(msg, NULL); |
2622 | &msg->send_wr, &bad_send_wr); | ||
2623 | else | 2581 | else |
2624 | ret = -EINVAL; | 2582 | ret = -EINVAL; |
2625 | 2583 | ||
@@ -2733,7 +2691,6 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, | |||
2733 | { | 2691 | { |
2734 | struct cm_id_private *cm_id_priv; | 2692 | struct cm_id_private *cm_id_priv; |
2735 | struct ib_mad_send_buf *msg; | 2693 | struct ib_mad_send_buf *msg; |
2736 | struct ib_send_wr *bad_send_wr; | ||
2737 | unsigned long flags; | 2694 | unsigned long flags; |
2738 | int ret; | 2695 | int ret; |
2739 | 2696 | ||
@@ -2755,8 +2712,7 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, | |||
2755 | 2712 | ||
2756 | cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, | 2713 | cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, |
2757 | param); | 2714 | param); |
2758 | ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, | 2715 | ret = ib_post_send_mad(msg, NULL); |
2759 | &msg->send_wr, &bad_send_wr); | ||
2760 | if (ret) { | 2716 | if (ret) { |
2761 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 2717 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
2762 | cm_free_msg(msg); | 2718 | cm_free_msg(msg); |
@@ -2809,8 +2765,7 @@ static int cm_sidr_rep_handler(struct cm_work *work) | |||
2809 | goto out; | 2765 | goto out; |
2810 | } | 2766 | } |
2811 | cm_id_priv->id.state = IB_CM_IDLE; | 2767 | cm_id_priv->id.state = IB_CM_IDLE; |
2812 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | 2768 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
2813 | (unsigned long) cm_id_priv->msg); | ||
2814 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 2769 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
2815 | 2770 | ||
2816 | cm_format_sidr_rep_event(work); | 2771 | cm_format_sidr_rep_event(work); |
@@ -2878,9 +2833,7 @@ discard: | |||
2878 | static void cm_send_handler(struct ib_mad_agent *mad_agent, | 2833 | static void cm_send_handler(struct ib_mad_agent *mad_agent, |
2879 | struct ib_mad_send_wc *mad_send_wc) | 2834 | struct ib_mad_send_wc *mad_send_wc) |
2880 | { | 2835 | { |
2881 | struct ib_mad_send_buf *msg; | 2836 | struct ib_mad_send_buf *msg = mad_send_wc->send_buf; |
2882 | |||
2883 | msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id; | ||
2884 | 2837 | ||
2885 | switch (mad_send_wc->status) { | 2838 | switch (mad_send_wc->status) { |
2886 | case IB_WC_SUCCESS: | 2839 | case IB_WC_SUCCESS: |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index af302e830561..88f9f8c9eacc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -579,7 +579,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list) | |||
579 | } | 579 | } |
580 | 580 | ||
581 | static void snoop_send(struct ib_mad_qp_info *qp_info, | 581 | static void snoop_send(struct ib_mad_qp_info *qp_info, |
582 | struct ib_send_wr *send_wr, | 582 | struct ib_mad_send_buf *send_buf, |
583 | struct ib_mad_send_wc *mad_send_wc, | 583 | struct ib_mad_send_wc *mad_send_wc, |
584 | int mad_snoop_flags) | 584 | int mad_snoop_flags) |
585 | { | 585 | { |
@@ -597,7 +597,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, | |||
597 | atomic_inc(&mad_snoop_priv->refcount); | 597 | atomic_inc(&mad_snoop_priv->refcount); |
598 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 598 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
599 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | 599 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
600 | send_wr, mad_send_wc); | 600 | send_buf, mad_send_wc); |
601 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 601 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) |
602 | wake_up(&mad_snoop_priv->wait); | 602 | wake_up(&mad_snoop_priv->wait); |
603 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 603 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
@@ -654,10 +654,10 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, | |||
654 | * Return < 0 if error | 654 | * Return < 0 if error |
655 | */ | 655 | */ |
656 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | 656 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, |
657 | struct ib_smp *smp, | 657 | struct ib_mad_send_wr_private *mad_send_wr) |
658 | struct ib_send_wr *send_wr) | ||
659 | { | 658 | { |
660 | int ret; | 659 | int ret; |
660 | struct ib_smp *smp = mad_send_wr->send_buf.mad; | ||
661 | unsigned long flags; | 661 | unsigned long flags; |
662 | struct ib_mad_local_private *local; | 662 | struct ib_mad_local_private *local; |
663 | struct ib_mad_private *mad_priv; | 663 | struct ib_mad_private *mad_priv; |
@@ -666,6 +666,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
666 | struct ib_device *device = mad_agent_priv->agent.device; | 666 | struct ib_device *device = mad_agent_priv->agent.device; |
667 | u8 port_num = mad_agent_priv->agent.port_num; | 667 | u8 port_num = mad_agent_priv->agent.port_num; |
668 | struct ib_wc mad_wc; | 668 | struct ib_wc mad_wc; |
669 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; | ||
669 | 670 | ||
670 | if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { | 671 | if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { |
671 | ret = -EINVAL; | 672 | ret = -EINVAL; |
@@ -745,13 +746,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
745 | goto out; | 746 | goto out; |
746 | } | 747 | } |
747 | 748 | ||
748 | local->send_wr = *send_wr; | 749 | local->mad_send_wr = mad_send_wr; |
749 | local->send_wr.sg_list = local->sg_list; | ||
750 | memcpy(local->sg_list, send_wr->sg_list, | ||
751 | sizeof *send_wr->sg_list * send_wr->num_sge); | ||
752 | local->send_wr.next = NULL; | ||
753 | local->tid = send_wr->wr.ud.mad_hdr->tid; | ||
754 | local->wr_id = send_wr->wr_id; | ||
755 | /* Reference MAD agent until send side of local completion handled */ | 750 | /* Reference MAD agent until send side of local completion handled */ |
756 | atomic_inc(&mad_agent_priv->refcount); | 751 | atomic_inc(&mad_agent_priv->refcount); |
757 | /* Queue local completion to local list */ | 752 | /* Queue local completion to local list */ |
@@ -781,17 +776,17 @@ static int get_buf_length(int hdr_len, int data_len) | |||
781 | 776 | ||
782 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | 777 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
783 | u32 remote_qpn, u16 pkey_index, | 778 | u32 remote_qpn, u16 pkey_index, |
784 | struct ib_ah *ah, int rmpp_active, | 779 | int rmpp_active, |
785 | int hdr_len, int data_len, | 780 | int hdr_len, int data_len, |
786 | gfp_t gfp_mask) | 781 | gfp_t gfp_mask) |
787 | { | 782 | { |
788 | struct ib_mad_agent_private *mad_agent_priv; | 783 | struct ib_mad_agent_private *mad_agent_priv; |
789 | struct ib_mad_send_buf *send_buf; | 784 | struct ib_mad_send_wr_private *mad_send_wr; |
790 | int buf_size; | 785 | int buf_size; |
791 | void *buf; | 786 | void *buf; |
792 | 787 | ||
793 | mad_agent_priv = container_of(mad_agent, | 788 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
794 | struct ib_mad_agent_private, agent); | 789 | agent); |
795 | buf_size = get_buf_length(hdr_len, data_len); | 790 | buf_size = get_buf_length(hdr_len, data_len); |
796 | 791 | ||
797 | if ((!mad_agent->rmpp_version && | 792 | if ((!mad_agent->rmpp_version && |
@@ -799,45 +794,40 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
799 | (!rmpp_active && buf_size > sizeof(struct ib_mad))) | 794 | (!rmpp_active && buf_size > sizeof(struct ib_mad))) |
800 | return ERR_PTR(-EINVAL); | 795 | return ERR_PTR(-EINVAL); |
801 | 796 | ||
802 | buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); | 797 | buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask); |
803 | if (!buf) | 798 | if (!buf) |
804 | return ERR_PTR(-ENOMEM); | 799 | return ERR_PTR(-ENOMEM); |
805 | memset(buf, 0, sizeof *send_buf + buf_size); | 800 | memset(buf, 0, sizeof *mad_send_wr + buf_size); |
806 | 801 | ||
807 | send_buf = buf + buf_size; | 802 | mad_send_wr = buf + buf_size; |
808 | send_buf->mad = buf; | 803 | mad_send_wr->send_buf.mad = buf; |
809 | 804 | ||
810 | send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device, | 805 | mad_send_wr->mad_agent_priv = mad_agent_priv; |
811 | buf, buf_size, DMA_TO_DEVICE); | 806 | mad_send_wr->sg_list[0].length = buf_size; |
812 | pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr); | 807 | mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; |
813 | send_buf->sge.length = buf_size; | 808 | |
814 | send_buf->sge.lkey = mad_agent->mr->lkey; | 809 | mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; |
815 | 810 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | |
816 | send_buf->send_wr.wr_id = (unsigned long) send_buf; | 811 | mad_send_wr->send_wr.num_sge = 1; |
817 | send_buf->send_wr.sg_list = &send_buf->sge; | 812 | mad_send_wr->send_wr.opcode = IB_WR_SEND; |
818 | send_buf->send_wr.num_sge = 1; | 813 | mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; |
819 | send_buf->send_wr.opcode = IB_WR_SEND; | 814 | mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; |
820 | send_buf->send_wr.send_flags = IB_SEND_SIGNALED; | 815 | mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; |
821 | send_buf->send_wr.wr.ud.ah = ah; | 816 | mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; |
822 | send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr; | ||
823 | send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; | ||
824 | send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | ||
825 | send_buf->send_wr.wr.ud.pkey_index = pkey_index; | ||
826 | 817 | ||
827 | if (rmpp_active) { | 818 | if (rmpp_active) { |
828 | struct ib_rmpp_mad *rmpp_mad; | 819 | struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad; |
829 | rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad; | ||
830 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - | 820 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - |
831 | offsetof(struct ib_rmpp_mad, data) + data_len); | 821 | IB_MGMT_RMPP_HDR + data_len); |
832 | rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; | 822 | rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; |
833 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | 823 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; |
834 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, | 824 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, |
835 | IB_MGMT_RMPP_FLAG_ACTIVE); | 825 | IB_MGMT_RMPP_FLAG_ACTIVE); |
836 | } | 826 | } |
837 | 827 | ||
838 | send_buf->mad_agent = mad_agent; | 828 | mad_send_wr->send_buf.mad_agent = mad_agent; |
839 | atomic_inc(&mad_agent_priv->refcount); | 829 | atomic_inc(&mad_agent_priv->refcount); |
840 | return send_buf; | 830 | return &mad_send_wr->send_buf; |
841 | } | 831 | } |
842 | EXPORT_SYMBOL(ib_create_send_mad); | 832 | EXPORT_SYMBOL(ib_create_send_mad); |
843 | 833 | ||
@@ -847,10 +837,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | |||
847 | 837 | ||
848 | mad_agent_priv = container_of(send_buf->mad_agent, | 838 | mad_agent_priv = container_of(send_buf->mad_agent, |
849 | struct ib_mad_agent_private, agent); | 839 | struct ib_mad_agent_private, agent); |
850 | |||
851 | dma_unmap_single(send_buf->mad_agent->device->dma_device, | ||
852 | pci_unmap_addr(send_buf, mapping), | ||
853 | send_buf->sge.length, DMA_TO_DEVICE); | ||
854 | kfree(send_buf->mad); | 840 | kfree(send_buf->mad); |
855 | 841 | ||
856 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 842 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
@@ -861,8 +847,10 @@ EXPORT_SYMBOL(ib_free_send_mad); | |||
861 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | 847 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) |
862 | { | 848 | { |
863 | struct ib_mad_qp_info *qp_info; | 849 | struct ib_mad_qp_info *qp_info; |
864 | struct ib_send_wr *bad_send_wr; | ||
865 | struct list_head *list; | 850 | struct list_head *list; |
851 | struct ib_send_wr *bad_send_wr; | ||
852 | struct ib_mad_agent *mad_agent; | ||
853 | struct ib_sge *sge; | ||
866 | unsigned long flags; | 854 | unsigned long flags; |
867 | int ret; | 855 | int ret; |
868 | 856 | ||
@@ -871,10 +859,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
871 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; | 859 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; |
872 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | 860 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; |
873 | 861 | ||
862 | mad_agent = mad_send_wr->send_buf.mad_agent; | ||
863 | sge = mad_send_wr->sg_list; | ||
864 | sge->addr = dma_map_single(mad_agent->device->dma_device, | ||
865 | mad_send_wr->send_buf.mad, sge->length, | ||
866 | DMA_TO_DEVICE); | ||
867 | pci_unmap_addr_set(mad_send_wr, mapping, sge->addr); | ||
868 | |||
874 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 869 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
875 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | 870 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
876 | ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, | 871 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, |
877 | &mad_send_wr->send_wr, &bad_send_wr); | 872 | &bad_send_wr); |
878 | list = &qp_info->send_queue.list; | 873 | list = &qp_info->send_queue.list; |
879 | } else { | 874 | } else { |
880 | ret = 0; | 875 | ret = 0; |
@@ -886,6 +881,11 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
886 | list_add_tail(&mad_send_wr->mad_list.list, list); | 881 | list_add_tail(&mad_send_wr->mad_list.list, list); |
887 | } | 882 | } |
888 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | 883 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); |
884 | if (ret) | ||
885 | dma_unmap_single(mad_agent->device->dma_device, | ||
886 | pci_unmap_addr(mad_send_wr, mapping), | ||
887 | sge->length, DMA_TO_DEVICE); | ||
888 | |||
889 | return ret; | 889 | return ret; |
890 | } | 890 | } |
891 | 891 | ||
@@ -893,45 +893,28 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
893 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | 893 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated |
894 | * with the registered client | 894 | * with the registered client |
895 | */ | 895 | */ |
896 | int ib_post_send_mad(struct ib_mad_agent *mad_agent, | 896 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
897 | struct ib_send_wr *send_wr, | 897 | struct ib_mad_send_buf **bad_send_buf) |
898 | struct ib_send_wr **bad_send_wr) | ||
899 | { | 898 | { |
900 | int ret = -EINVAL; | ||
901 | struct ib_mad_agent_private *mad_agent_priv; | 899 | struct ib_mad_agent_private *mad_agent_priv; |
902 | 900 | struct ib_mad_send_buf *next_send_buf; | |
903 | /* Validate supplied parameters */ | 901 | struct ib_mad_send_wr_private *mad_send_wr; |
904 | if (!bad_send_wr) | 902 | unsigned long flags; |
905 | goto error1; | 903 | int ret = -EINVAL; |
906 | |||
907 | if (!mad_agent || !send_wr) | ||
908 | goto error2; | ||
909 | |||
910 | if (!mad_agent->send_handler) | ||
911 | goto error2; | ||
912 | |||
913 | mad_agent_priv = container_of(mad_agent, | ||
914 | struct ib_mad_agent_private, | ||
915 | agent); | ||
916 | 904 | ||
917 | /* Walk list of send WRs and post each on send list */ | 905 | /* Walk list of send WRs and post each on send list */ |
918 | while (send_wr) { | 906 | for (; send_buf; send_buf = next_send_buf) { |
919 | unsigned long flags; | ||
920 | struct ib_send_wr *next_send_wr; | ||
921 | struct ib_mad_send_wr_private *mad_send_wr; | ||
922 | struct ib_smp *smp; | ||
923 | |||
924 | /* Validate more parameters */ | ||
925 | if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG) | ||
926 | goto error2; | ||
927 | 907 | ||
928 | if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler) | 908 | mad_send_wr = container_of(send_buf, |
929 | goto error2; | 909 | struct ib_mad_send_wr_private, |
910 | send_buf); | ||
911 | mad_agent_priv = mad_send_wr->mad_agent_priv; | ||
930 | 912 | ||
931 | if (!send_wr->wr.ud.mad_hdr) { | 913 | if (!send_buf->mad_agent->send_handler || |
932 | printk(KERN_ERR PFX "MAD header must be supplied " | 914 | (send_buf->timeout_ms && |
933 | "in WR %p\n", send_wr); | 915 | !send_buf->mad_agent->recv_handler)) { |
934 | goto error2; | 916 | ret = -EINVAL; |
917 | goto error; | ||
935 | } | 918 | } |
936 | 919 | ||
937 | /* | 920 | /* |
@@ -939,40 +922,24 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
939 | * current one completes, and the user modifies the work | 922 | * current one completes, and the user modifies the work |
940 | * request associated with the completion | 923 | * request associated with the completion |
941 | */ | 924 | */ |
942 | next_send_wr = (struct ib_send_wr *)send_wr->next; | 925 | next_send_buf = send_buf->next; |
926 | mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | ||
943 | 927 | ||
944 | smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr; | 928 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == |
945 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 929 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
946 | ret = handle_outgoing_dr_smp(mad_agent_priv, smp, | 930 | ret = handle_outgoing_dr_smp(mad_agent_priv, |
947 | send_wr); | 931 | mad_send_wr); |
948 | if (ret < 0) /* error */ | 932 | if (ret < 0) /* error */ |
949 | goto error2; | 933 | goto error; |
950 | else if (ret == 1) /* locally consumed */ | 934 | else if (ret == 1) /* locally consumed */ |
951 | goto next; | 935 | continue; |
952 | } | 936 | } |
953 | 937 | ||
954 | /* Allocate MAD send WR tracking structure */ | 938 | mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; |
955 | mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC); | ||
956 | if (!mad_send_wr) { | ||
957 | printk(KERN_ERR PFX "No memory for " | ||
958 | "ib_mad_send_wr_private\n"); | ||
959 | ret = -ENOMEM; | ||
960 | goto error2; | ||
961 | } | ||
962 | memset(mad_send_wr, 0, sizeof *mad_send_wr); | ||
963 | |||
964 | mad_send_wr->send_wr = *send_wr; | ||
965 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | ||
966 | memcpy(mad_send_wr->sg_list, send_wr->sg_list, | ||
967 | sizeof *send_wr->sg_list * send_wr->num_sge); | ||
968 | mad_send_wr->wr_id = send_wr->wr_id; | ||
969 | mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; | ||
970 | mad_send_wr->mad_agent_priv = mad_agent_priv; | ||
971 | /* Timeout will be updated after send completes */ | 939 | /* Timeout will be updated after send completes */ |
972 | mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. | 940 | mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); |
973 | ud.timeout_ms); | 941 | mad_send_wr->retries = send_buf->retries; |
974 | mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; | 942 | /* Reference for work request to QP + response */ |
975 | /* One reference for each work request to QP + response */ | ||
976 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); | 943 | mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); |
977 | mad_send_wr->status = IB_WC_SUCCESS; | 944 | mad_send_wr->status = IB_WC_SUCCESS; |
978 | 945 | ||
@@ -995,16 +962,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
995 | list_del(&mad_send_wr->agent_list); | 962 | list_del(&mad_send_wr->agent_list); |
996 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 963 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
997 | atomic_dec(&mad_agent_priv->refcount); | 964 | atomic_dec(&mad_agent_priv->refcount); |
998 | goto error2; | 965 | goto error; |
999 | } | 966 | } |
1000 | next: | ||
1001 | send_wr = next_send_wr; | ||
1002 | } | 967 | } |
1003 | return 0; | 968 | return 0; |
1004 | 969 | error: | |
1005 | error2: | 970 | if (bad_send_buf) |
1006 | *bad_send_wr = send_wr; | 971 | *bad_send_buf = send_buf; |
1007 | error1: | ||
1008 | return ret; | 972 | return ret; |
1009 | } | 973 | } |
1010 | EXPORT_SYMBOL(ib_post_send_mad); | 974 | EXPORT_SYMBOL(ib_post_send_mad); |
@@ -1447,8 +1411,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1447 | * of MAD. | 1411 | * of MAD. |
1448 | */ | 1412 | */ |
1449 | hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; | 1413 | hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; |
1450 | list_for_each_entry(entry, &port_priv->agent_list, | 1414 | list_for_each_entry(entry, &port_priv->agent_list, agent_list) { |
1451 | agent_list) { | ||
1452 | if (entry->agent.hi_tid == hi_tid) { | 1415 | if (entry->agent.hi_tid == hi_tid) { |
1453 | mad_agent = entry; | 1416 | mad_agent = entry; |
1454 | break; | 1417 | break; |
@@ -1571,8 +1534,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) | |||
1571 | */ | 1534 | */ |
1572 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 1535 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
1573 | agent_list) { | 1536 | agent_list) { |
1574 | if (is_data_mad(mad_agent_priv, | 1537 | if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && |
1575 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | ||
1576 | mad_send_wr->tid == tid && mad_send_wr->timeout) { | 1538 | mad_send_wr->tid == tid && mad_send_wr->timeout) { |
1577 | /* Verify request has not been canceled */ | 1539 | /* Verify request has not been canceled */ |
1578 | return (mad_send_wr->status == IB_WC_SUCCESS) ? | 1540 | return (mad_send_wr->status == IB_WC_SUCCESS) ? |
@@ -1628,14 +1590,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1628 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1590 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1629 | 1591 | ||
1630 | /* Defined behavior is to complete response before request */ | 1592 | /* Defined behavior is to complete response before request */ |
1631 | mad_recv_wc->wc->wr_id = mad_send_wr->wr_id; | 1593 | mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; |
1632 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1594 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1633 | mad_recv_wc); | 1595 | mad_recv_wc); |
1634 | atomic_dec(&mad_agent_priv->refcount); | 1596 | atomic_dec(&mad_agent_priv->refcount); |
1635 | 1597 | ||
1636 | mad_send_wc.status = IB_WC_SUCCESS; | 1598 | mad_send_wc.status = IB_WC_SUCCESS; |
1637 | mad_send_wc.vendor_err = 0; | 1599 | mad_send_wc.vendor_err = 0; |
1638 | mad_send_wc.wr_id = mad_send_wr->wr_id; | 1600 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
1639 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 1601 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
1640 | } else { | 1602 | } else { |
1641 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1603 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
@@ -1728,11 +1690,11 @@ local: | |||
1728 | if (ret & IB_MAD_RESULT_CONSUMED) | 1690 | if (ret & IB_MAD_RESULT_CONSUMED) |
1729 | goto out; | 1691 | goto out; |
1730 | if (ret & IB_MAD_RESULT_REPLY) { | 1692 | if (ret & IB_MAD_RESULT_REPLY) { |
1731 | /* Send response */ | 1693 | agent_send_response(&response->mad.mad, |
1732 | if (!agent_send(response, &recv->grh, wc, | 1694 | &recv->grh, wc, |
1733 | port_priv->device, | 1695 | port_priv->device, |
1734 | port_priv->port_num)) | 1696 | port_priv->port_num, |
1735 | response = NULL; | 1697 | qp_info->qp->qp_num); |
1736 | goto out; | 1698 | goto out; |
1737 | } | 1699 | } |
1738 | } | 1700 | } |
@@ -1866,15 +1828,15 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
1866 | 1828 | ||
1867 | if (mad_send_wr->status != IB_WC_SUCCESS ) | 1829 | if (mad_send_wr->status != IB_WC_SUCCESS ) |
1868 | mad_send_wc->status = mad_send_wr->status; | 1830 | mad_send_wc->status = mad_send_wr->status; |
1869 | if (ret != IB_RMPP_RESULT_INTERNAL) | 1831 | if (ret == IB_RMPP_RESULT_INTERNAL) |
1832 | ib_rmpp_send_handler(mad_send_wc); | ||
1833 | else | ||
1870 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 1834 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
1871 | mad_send_wc); | 1835 | mad_send_wc); |
1872 | 1836 | ||
1873 | /* Release reference on agent taken when sending */ | 1837 | /* Release reference on agent taken when sending */ |
1874 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1838 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
1875 | wake_up(&mad_agent_priv->wait); | 1839 | wake_up(&mad_agent_priv->wait); |
1876 | |||
1877 | kfree(mad_send_wr); | ||
1878 | return; | 1840 | return; |
1879 | done: | 1841 | done: |
1880 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1842 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
@@ -1888,6 +1850,7 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | |||
1888 | struct ib_mad_qp_info *qp_info; | 1850 | struct ib_mad_qp_info *qp_info; |
1889 | struct ib_mad_queue *send_queue; | 1851 | struct ib_mad_queue *send_queue; |
1890 | struct ib_send_wr *bad_send_wr; | 1852 | struct ib_send_wr *bad_send_wr; |
1853 | struct ib_mad_send_wc mad_send_wc; | ||
1891 | unsigned long flags; | 1854 | unsigned long flags; |
1892 | int ret; | 1855 | int ret; |
1893 | 1856 | ||
@@ -1898,6 +1861,9 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | |||
1898 | qp_info = send_queue->qp_info; | 1861 | qp_info = send_queue->qp_info; |
1899 | 1862 | ||
1900 | retry: | 1863 | retry: |
1864 | dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, | ||
1865 | pci_unmap_addr(mad_send_wr, mapping), | ||
1866 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); | ||
1901 | queued_send_wr = NULL; | 1867 | queued_send_wr = NULL; |
1902 | spin_lock_irqsave(&send_queue->lock, flags); | 1868 | spin_lock_irqsave(&send_queue->lock, flags); |
1903 | list_del(&mad_list->list); | 1869 | list_del(&mad_list->list); |
@@ -1914,17 +1880,17 @@ retry: | |||
1914 | } | 1880 | } |
1915 | spin_unlock_irqrestore(&send_queue->lock, flags); | 1881 | spin_unlock_irqrestore(&send_queue->lock, flags); |
1916 | 1882 | ||
1917 | /* Restore client wr_id in WC and complete send */ | 1883 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
1918 | wc->wr_id = mad_send_wr->wr_id; | 1884 | mad_send_wc.status = wc->status; |
1885 | mad_send_wc.vendor_err = wc->vendor_err; | ||
1919 | if (atomic_read(&qp_info->snoop_count)) | 1886 | if (atomic_read(&qp_info->snoop_count)) |
1920 | snoop_send(qp_info, &mad_send_wr->send_wr, | 1887 | snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, |
1921 | (struct ib_mad_send_wc *)wc, | ||
1922 | IB_MAD_SNOOP_SEND_COMPLETIONS); | 1888 | IB_MAD_SNOOP_SEND_COMPLETIONS); |
1923 | ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc); | 1889 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
1924 | 1890 | ||
1925 | if (queued_send_wr) { | 1891 | if (queued_send_wr) { |
1926 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | 1892 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, |
1927 | &bad_send_wr); | 1893 | &bad_send_wr); |
1928 | if (ret) { | 1894 | if (ret) { |
1929 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | 1895 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); |
1930 | mad_send_wr = queued_send_wr; | 1896 | mad_send_wr = queued_send_wr; |
@@ -2066,38 +2032,37 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |||
2066 | 2032 | ||
2067 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | 2033 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, |
2068 | &cancel_list, agent_list) { | 2034 | &cancel_list, agent_list) { |
2069 | mad_send_wc.wr_id = mad_send_wr->wr_id; | 2035 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2036 | list_del(&mad_send_wr->agent_list); | ||
2070 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 2037 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2071 | &mad_send_wc); | 2038 | &mad_send_wc); |
2072 | |||
2073 | list_del(&mad_send_wr->agent_list); | ||
2074 | kfree(mad_send_wr); | ||
2075 | atomic_dec(&mad_agent_priv->refcount); | 2039 | atomic_dec(&mad_agent_priv->refcount); |
2076 | } | 2040 | } |
2077 | } | 2041 | } |
2078 | 2042 | ||
2079 | static struct ib_mad_send_wr_private* | 2043 | static struct ib_mad_send_wr_private* |
2080 | find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) | 2044 | find_send_wr(struct ib_mad_agent_private *mad_agent_priv, |
2045 | struct ib_mad_send_buf *send_buf) | ||
2081 | { | 2046 | { |
2082 | struct ib_mad_send_wr_private *mad_send_wr; | 2047 | struct ib_mad_send_wr_private *mad_send_wr; |
2083 | 2048 | ||
2084 | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, | 2049 | list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, |
2085 | agent_list) { | 2050 | agent_list) { |
2086 | if (mad_send_wr->wr_id == wr_id) | 2051 | if (&mad_send_wr->send_buf == send_buf) |
2087 | return mad_send_wr; | 2052 | return mad_send_wr; |
2088 | } | 2053 | } |
2089 | 2054 | ||
2090 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 2055 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
2091 | agent_list) { | 2056 | agent_list) { |
2092 | if (is_data_mad(mad_agent_priv, | 2057 | if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && |
2093 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | 2058 | &mad_send_wr->send_buf == send_buf) |
2094 | mad_send_wr->wr_id == wr_id) | ||
2095 | return mad_send_wr; | 2059 | return mad_send_wr; |
2096 | } | 2060 | } |
2097 | return NULL; | 2061 | return NULL; |
2098 | } | 2062 | } |
2099 | 2063 | ||
2100 | int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | 2064 | int ib_modify_mad(struct ib_mad_agent *mad_agent, |
2065 | struct ib_mad_send_buf *send_buf, u32 timeout_ms) | ||
2101 | { | 2066 | { |
2102 | struct ib_mad_agent_private *mad_agent_priv; | 2067 | struct ib_mad_agent_private *mad_agent_priv; |
2103 | struct ib_mad_send_wr_private *mad_send_wr; | 2068 | struct ib_mad_send_wr_private *mad_send_wr; |
@@ -2107,7 +2072,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | |||
2107 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | 2072 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
2108 | agent); | 2073 | agent); |
2109 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2074 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2110 | mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); | 2075 | mad_send_wr = find_send_wr(mad_agent_priv, send_buf); |
2111 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { | 2076 | if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { |
2112 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2077 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
2113 | return -EINVAL; | 2078 | return -EINVAL; |
@@ -2119,7 +2084,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | |||
2119 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 2084 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2120 | } | 2085 | } |
2121 | 2086 | ||
2122 | mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; | 2087 | mad_send_wr->send_buf.timeout_ms = timeout_ms; |
2123 | if (active) | 2088 | if (active) |
2124 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | 2089 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
2125 | else | 2090 | else |
@@ -2130,9 +2095,10 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | |||
2130 | } | 2095 | } |
2131 | EXPORT_SYMBOL(ib_modify_mad); | 2096 | EXPORT_SYMBOL(ib_modify_mad); |
2132 | 2097 | ||
2133 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id) | 2098 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, |
2099 | struct ib_mad_send_buf *send_buf) | ||
2134 | { | 2100 | { |
2135 | ib_modify_mad(mad_agent, wr_id, 0); | 2101 | ib_modify_mad(mad_agent, send_buf, 0); |
2136 | } | 2102 | } |
2137 | EXPORT_SYMBOL(ib_cancel_mad); | 2103 | EXPORT_SYMBOL(ib_cancel_mad); |
2138 | 2104 | ||
@@ -2166,10 +2132,9 @@ static void local_completions(void *data) | |||
2166 | * Defined behavior is to complete response | 2132 | * Defined behavior is to complete response |
2167 | * before request | 2133 | * before request |
2168 | */ | 2134 | */ |
2169 | build_smp_wc(local->wr_id, | 2135 | build_smp_wc((unsigned long) local->mad_send_wr, |
2170 | be16_to_cpu(IB_LID_PERMISSIVE), | 2136 | be16_to_cpu(IB_LID_PERMISSIVE), |
2171 | 0 /* pkey index */, | 2137 | 0, recv_mad_agent->agent.port_num, &wc); |
2172 | recv_mad_agent->agent.port_num, &wc); | ||
2173 | 2138 | ||
2174 | local->mad_priv->header.recv_wc.wc = &wc; | 2139 | local->mad_priv->header.recv_wc.wc = &wc; |
2175 | local->mad_priv->header.recv_wc.mad_len = | 2140 | local->mad_priv->header.recv_wc.mad_len = |
@@ -2196,11 +2161,11 @@ local_send_completion: | |||
2196 | /* Complete send */ | 2161 | /* Complete send */ |
2197 | mad_send_wc.status = IB_WC_SUCCESS; | 2162 | mad_send_wc.status = IB_WC_SUCCESS; |
2198 | mad_send_wc.vendor_err = 0; | 2163 | mad_send_wc.vendor_err = 0; |
2199 | mad_send_wc.wr_id = local->wr_id; | 2164 | mad_send_wc.send_buf = &local->mad_send_wr->send_buf; |
2200 | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) | 2165 | if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) |
2201 | snoop_send(mad_agent_priv->qp_info, &local->send_wr, | 2166 | snoop_send(mad_agent_priv->qp_info, |
2202 | &mad_send_wc, | 2167 | &local->mad_send_wr->send_buf, |
2203 | IB_MAD_SNOOP_SEND_COMPLETIONS); | 2168 | &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); |
2204 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 2169 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2205 | &mad_send_wc); | 2170 | &mad_send_wc); |
2206 | 2171 | ||
@@ -2221,8 +2186,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2221 | if (!mad_send_wr->retries--) | 2186 | if (!mad_send_wr->retries--) |
2222 | return -ETIMEDOUT; | 2187 | return -ETIMEDOUT; |
2223 | 2188 | ||
2224 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. | 2189 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
2225 | wr.ud.timeout_ms); | ||
2226 | 2190 | ||
2227 | if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { | 2191 | if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { |
2228 | ret = ib_retry_rmpp(mad_send_wr); | 2192 | ret = ib_retry_rmpp(mad_send_wr); |
@@ -2285,11 +2249,10 @@ static void timeout_sends(void *data) | |||
2285 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; | 2249 | mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; |
2286 | else | 2250 | else |
2287 | mad_send_wc.status = mad_send_wr->status; | 2251 | mad_send_wc.status = mad_send_wr->status; |
2288 | mad_send_wc.wr_id = mad_send_wr->wr_id; | 2252 | mad_send_wc.send_buf = &mad_send_wr->send_buf; |
2289 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 2253 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
2290 | &mad_send_wc); | 2254 | &mad_send_wc); |
2291 | 2255 | ||
2292 | kfree(mad_send_wr); | ||
2293 | atomic_dec(&mad_agent_priv->refcount); | 2256 | atomic_dec(&mad_agent_priv->refcount); |
2294 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2257 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2295 | } | 2258 | } |
@@ -2761,7 +2724,6 @@ static int __init ib_mad_init_module(void) | |||
2761 | int ret; | 2724 | int ret; |
2762 | 2725 | ||
2763 | spin_lock_init(&ib_mad_port_list_lock); | 2726 | spin_lock_init(&ib_mad_port_list_lock); |
2764 | spin_lock_init(&ib_agent_port_list_lock); | ||
2765 | 2727 | ||
2766 | ib_mad_cache = kmem_cache_create("ib_mad", | 2728 | ib_mad_cache = kmem_cache_create("ib_mad", |
2767 | sizeof(struct ib_mad_private), | 2729 | sizeof(struct ib_mad_private), |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index f1ba794e0daa..570f78682af3 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private { | |||
118 | struct ib_mad_list_head mad_list; | 118 | struct ib_mad_list_head mad_list; |
119 | struct list_head agent_list; | 119 | struct list_head agent_list; |
120 | struct ib_mad_agent_private *mad_agent_priv; | 120 | struct ib_mad_agent_private *mad_agent_priv; |
121 | struct ib_mad_send_buf send_buf; | ||
122 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
121 | struct ib_send_wr send_wr; | 123 | struct ib_send_wr send_wr; |
122 | struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; | 124 | struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; |
123 | u64 wr_id; /* client WR ID */ | ||
124 | __be64 tid; | 125 | __be64 tid; |
125 | unsigned long timeout; | 126 | unsigned long timeout; |
126 | int retries; | 127 | int retries; |
@@ -141,10 +142,7 @@ struct ib_mad_local_private { | |||
141 | struct list_head completion_list; | 142 | struct list_head completion_list; |
142 | struct ib_mad_private *mad_priv; | 143 | struct ib_mad_private *mad_priv; |
143 | struct ib_mad_agent_private *recv_mad_agent; | 144 | struct ib_mad_agent_private *recv_mad_agent; |
144 | struct ib_send_wr send_wr; | 145 | struct ib_mad_send_wr_private *mad_send_wr; |
145 | struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; | ||
146 | u64 wr_id; /* client WR ID */ | ||
147 | __be64 tid; | ||
148 | }; | 146 | }; |
149 | 147 | ||
150 | struct ib_mad_mgmt_method_table { | 148 | struct ib_mad_mgmt_method_table { |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index e23836d0e21b..ba112cd5f93c 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) | |||
103 | static int data_offset(u8 mgmt_class) | 103 | static int data_offset(u8 mgmt_class) |
104 | { | 104 | { |
105 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | 105 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) |
106 | return offsetof(struct ib_sa_mad, data); | 106 | return IB_MGMT_SA_HDR; |
107 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | 107 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && |
108 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | 108 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) |
109 | return offsetof(struct ib_vendor_mad, data); | 109 | return IB_MGMT_VENDOR_HDR; |
110 | else | 110 | else |
111 | return offsetof(struct ib_rmpp_mad, data); | 111 | return IB_MGMT_RMPP_HDR; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void format_ack(struct ib_rmpp_mad *ack, | 114 | static void format_ack(struct ib_rmpp_mad *ack, |
@@ -135,21 +135,18 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, | |||
135 | struct ib_mad_recv_wc *recv_wc) | 135 | struct ib_mad_recv_wc *recv_wc) |
136 | { | 136 | { |
137 | struct ib_mad_send_buf *msg; | 137 | struct ib_mad_send_buf *msg; |
138 | struct ib_send_wr *bad_send_wr; | 138 | int ret; |
139 | int hdr_len, ret; | ||
140 | 139 | ||
141 | hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); | ||
142 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, | 140 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, |
143 | recv_wc->wc->pkey_index, rmpp_recv->ah, 1, | 141 | recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR, |
144 | hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, | 142 | IB_MGMT_RMPP_DATA, GFP_KERNEL); |
145 | GFP_KERNEL); | ||
146 | if (!msg) | 143 | if (!msg) |
147 | return; | 144 | return; |
148 | 145 | ||
149 | format_ack((struct ib_rmpp_mad *) msg->mad, | 146 | format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, |
150 | (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); | 147 | rmpp_recv); |
151 | ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, | 148 | msg->ah = rmpp_recv->ah; |
152 | &bad_send_wr); | 149 | ret = ib_post_send_mad(msg, NULL); |
153 | if (ret) | 150 | if (ret) |
154 | ib_free_send_mad(msg); | 151 | ib_free_send_mad(msg); |
155 | } | 152 | } |
@@ -160,30 +157,31 @@ static int alloc_response_msg(struct ib_mad_agent *agent, | |||
160 | { | 157 | { |
161 | struct ib_mad_send_buf *m; | 158 | struct ib_mad_send_buf *m; |
162 | struct ib_ah *ah; | 159 | struct ib_ah *ah; |
163 | int hdr_len; | ||
164 | 160 | ||
165 | ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, | 161 | ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, |
166 | recv_wc->recv_buf.grh, agent->port_num); | 162 | recv_wc->recv_buf.grh, agent->port_num); |
167 | if (IS_ERR(ah)) | 163 | if (IS_ERR(ah)) |
168 | return PTR_ERR(ah); | 164 | return PTR_ERR(ah); |
169 | 165 | ||
170 | hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); | ||
171 | m = ib_create_send_mad(agent, recv_wc->wc->src_qp, | 166 | m = ib_create_send_mad(agent, recv_wc->wc->src_qp, |
172 | recv_wc->wc->pkey_index, ah, 1, hdr_len, | 167 | recv_wc->wc->pkey_index, 1, |
173 | sizeof(struct ib_rmpp_mad) - hdr_len, | 168 | IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL); |
174 | GFP_KERNEL); | ||
175 | if (IS_ERR(m)) { | 169 | if (IS_ERR(m)) { |
176 | ib_destroy_ah(ah); | 170 | ib_destroy_ah(ah); |
177 | return PTR_ERR(m); | 171 | return PTR_ERR(m); |
178 | } | 172 | } |
173 | m->ah = ah; | ||
179 | *msg = m; | 174 | *msg = m; |
180 | return 0; | 175 | return 0; |
181 | } | 176 | } |
182 | 177 | ||
183 | static void free_msg(struct ib_mad_send_buf *msg) | 178 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) |
184 | { | 179 | { |
185 | ib_destroy_ah(msg->send_wr.wr.ud.ah); | 180 | struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; |
186 | ib_free_send_mad(msg); | 181 | |
182 | if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK) | ||
183 | ib_destroy_ah(mad_send_wc->send_buf->ah); | ||
184 | ib_free_send_mad(mad_send_wc->send_buf); | ||
187 | } | 185 | } |
188 | 186 | ||
189 | static void nack_recv(struct ib_mad_agent_private *agent, | 187 | static void nack_recv(struct ib_mad_agent_private *agent, |
@@ -191,14 +189,13 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
191 | { | 189 | { |
192 | struct ib_mad_send_buf *msg; | 190 | struct ib_mad_send_buf *msg; |
193 | struct ib_rmpp_mad *rmpp_mad; | 191 | struct ib_rmpp_mad *rmpp_mad; |
194 | struct ib_send_wr *bad_send_wr; | ||
195 | int ret; | 192 | int ret; |
196 | 193 | ||
197 | ret = alloc_response_msg(&agent->agent, recv_wc, &msg); | 194 | ret = alloc_response_msg(&agent->agent, recv_wc, &msg); |
198 | if (ret) | 195 | if (ret) |
199 | return; | 196 | return; |
200 | 197 | ||
201 | rmpp_mad = (struct ib_rmpp_mad *) msg->mad; | 198 | rmpp_mad = msg->mad; |
202 | memcpy(rmpp_mad, recv_wc->recv_buf.mad, | 199 | memcpy(rmpp_mad, recv_wc->recv_buf.mad, |
203 | data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); | 200 | data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); |
204 | 201 | ||
@@ -210,9 +207,11 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
210 | rmpp_mad->rmpp_hdr.seg_num = 0; | 207 | rmpp_mad->rmpp_hdr.seg_num = 0; |
211 | rmpp_mad->rmpp_hdr.paylen_newwin = 0; | 208 | rmpp_mad->rmpp_hdr.paylen_newwin = 0; |
212 | 209 | ||
213 | ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); | 210 | ret = ib_post_send_mad(msg, NULL); |
214 | if (ret) | 211 | if (ret) { |
215 | free_msg(msg); | 212 | ib_destroy_ah(msg->ah); |
213 | ib_free_send_mad(msg); | ||
214 | } | ||
216 | } | 215 | } |
217 | 216 | ||
218 | static void recv_timeout_handler(void *data) | 217 | static void recv_timeout_handler(void *data) |
@@ -585,7 +584,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
585 | int timeout; | 584 | int timeout; |
586 | u32 paylen; | 585 | u32 paylen; |
587 | 586 | ||
588 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; | 587 | rmpp_mad = mad_send_wr->send_buf.mad; |
589 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | 588 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); |
590 | rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); | 589 | rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); |
591 | 590 | ||
@@ -612,7 +611,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
612 | } | 611 | } |
613 | 612 | ||
614 | /* 2 seconds for an ACK until we can find the packet lifetime */ | 613 | /* 2 seconds for an ACK until we can find the packet lifetime */ |
615 | timeout = mad_send_wr->send_wr.wr.ud.timeout_ms; | 614 | timeout = mad_send_wr->send_buf.timeout_ms; |
616 | if (!timeout || timeout > 2000) | 615 | if (!timeout || timeout > 2000) |
617 | mad_send_wr->timeout = msecs_to_jiffies(2000); | 616 | mad_send_wr->timeout = msecs_to_jiffies(2000); |
618 | mad_send_wr->seg_num++; | 617 | mad_send_wr->seg_num++; |
@@ -640,7 +639,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, | |||
640 | 639 | ||
641 | wc.status = IB_WC_REM_ABORT_ERR; | 640 | wc.status = IB_WC_REM_ABORT_ERR; |
642 | wc.vendor_err = rmpp_status; | 641 | wc.vendor_err = rmpp_status; |
643 | wc.wr_id = mad_send_wr->wr_id; | 642 | wc.send_buf = &mad_send_wr->send_buf; |
644 | ib_mad_complete_send_wr(mad_send_wr, &wc); | 643 | ib_mad_complete_send_wr(mad_send_wr, &wc); |
645 | return; | 644 | return; |
646 | out: | 645 | out: |
@@ -694,12 +693,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
694 | 693 | ||
695 | if (seg_num > mad_send_wr->last_ack) { | 694 | if (seg_num > mad_send_wr->last_ack) { |
696 | mad_send_wr->last_ack = seg_num; | 695 | mad_send_wr->last_ack = seg_num; |
697 | mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; | 696 | mad_send_wr->retries = mad_send_wr->send_buf.retries; |
698 | } | 697 | } |
699 | mad_send_wr->newwin = newwin; | 698 | mad_send_wr->newwin = newwin; |
700 | if (mad_send_wr->last_ack == mad_send_wr->total_seg) { | 699 | if (mad_send_wr->last_ack == mad_send_wr->total_seg) { |
701 | /* If no response is expected, the ACK completes the send */ | 700 | /* If no response is expected, the ACK completes the send */ |
702 | if (!mad_send_wr->send_wr.wr.ud.timeout_ms) { | 701 | if (!mad_send_wr->send_buf.timeout_ms) { |
703 | struct ib_mad_send_wc wc; | 702 | struct ib_mad_send_wc wc; |
704 | 703 | ||
705 | ib_mark_mad_done(mad_send_wr); | 704 | ib_mark_mad_done(mad_send_wr); |
@@ -707,13 +706,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
707 | 706 | ||
708 | wc.status = IB_WC_SUCCESS; | 707 | wc.status = IB_WC_SUCCESS; |
709 | wc.vendor_err = 0; | 708 | wc.vendor_err = 0; |
710 | wc.wr_id = mad_send_wr->wr_id; | 709 | wc.send_buf = &mad_send_wr->send_buf; |
711 | ib_mad_complete_send_wr(mad_send_wr, &wc); | 710 | ib_mad_complete_send_wr(mad_send_wr, &wc); |
712 | return; | 711 | return; |
713 | } | 712 | } |
714 | if (mad_send_wr->refcount == 1) | 713 | if (mad_send_wr->refcount == 1) |
715 | ib_reset_mad_timeout(mad_send_wr, mad_send_wr-> | 714 | ib_reset_mad_timeout(mad_send_wr, |
716 | send_wr.wr.ud.timeout_ms); | 715 | mad_send_wr->send_buf.timeout_ms); |
717 | } else if (mad_send_wr->refcount == 1 && | 716 | } else if (mad_send_wr->refcount == 1 && |
718 | mad_send_wr->seg_num < mad_send_wr->newwin && | 717 | mad_send_wr->seg_num < mad_send_wr->newwin && |
719 | mad_send_wr->seg_num <= mad_send_wr->total_seg) { | 718 | mad_send_wr->seg_num <= mad_send_wr->total_seg) { |
@@ -842,7 +841,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
842 | struct ib_rmpp_mad *rmpp_mad; | 841 | struct ib_rmpp_mad *rmpp_mad; |
843 | int i, total_len, ret; | 842 | int i, total_len, ret; |
844 | 843 | ||
845 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; | 844 | rmpp_mad = mad_send_wr->send_buf.mad; |
846 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 845 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
847 | IB_MGMT_RMPP_FLAG_ACTIVE)) | 846 | IB_MGMT_RMPP_FLAG_ACTIVE)) |
848 | return IB_RMPP_RESULT_UNHANDLED; | 847 | return IB_RMPP_RESULT_UNHANDLED; |
@@ -863,7 +862,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
863 | 862 | ||
864 | mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / | 863 | mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / |
865 | (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); | 864 | (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); |
866 | mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) - | 865 | mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR - |
867 | be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); | 866 | be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); |
868 | 867 | ||
869 | /* We need to wait for the final ACK even if there isn't a response */ | 868 | /* We need to wait for the final ACK even if there isn't a response */ |
@@ -878,23 +877,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, | |||
878 | struct ib_mad_send_wc *mad_send_wc) | 877 | struct ib_mad_send_wc *mad_send_wc) |
879 | { | 878 | { |
880 | struct ib_rmpp_mad *rmpp_mad; | 879 | struct ib_rmpp_mad *rmpp_mad; |
881 | struct ib_mad_send_buf *msg; | ||
882 | int ret; | 880 | int ret; |
883 | 881 | ||
884 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; | 882 | rmpp_mad = mad_send_wr->send_buf.mad; |
885 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 883 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
886 | IB_MGMT_RMPP_FLAG_ACTIVE)) | 884 | IB_MGMT_RMPP_FLAG_ACTIVE)) |
887 | return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ | 885 | return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ |
888 | 886 | ||
889 | if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { | 887 | if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) |
890 | msg = (struct ib_mad_send_buf *) (unsigned long) | ||
891 | mad_send_wc->wr_id; | ||
892 | if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) | ||
893 | ib_free_send_mad(msg); | ||
894 | else | ||
895 | free_msg(msg); | ||
896 | return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ | 888 | return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ |
897 | } | ||
898 | 889 | ||
899 | if (mad_send_wc->status != IB_WC_SUCCESS || | 890 | if (mad_send_wc->status != IB_WC_SUCCESS || |
900 | mad_send_wr->status != IB_WC_SUCCESS) | 891 | mad_send_wr->status != IB_WC_SUCCESS) |
@@ -905,7 +896,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, | |||
905 | 896 | ||
906 | if (mad_send_wr->last_ack == mad_send_wr->total_seg) { | 897 | if (mad_send_wr->last_ack == mad_send_wr->total_seg) { |
907 | mad_send_wr->timeout = | 898 | mad_send_wr->timeout = |
908 | msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms); | 899 | msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); |
909 | return IB_RMPP_RESULT_PROCESSED; /* Send done */ | 900 | return IB_RMPP_RESULT_PROCESSED; /* Send done */ |
910 | } | 901 | } |
911 | 902 | ||
@@ -926,7 +917,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) | |||
926 | struct ib_rmpp_mad *rmpp_mad; | 917 | struct ib_rmpp_mad *rmpp_mad; |
927 | int ret; | 918 | int ret; |
928 | 919 | ||
929 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; | 920 | rmpp_mad = mad_send_wr->send_buf.mad; |
930 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 921 | if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
931 | IB_MGMT_RMPP_FLAG_ACTIVE)) | 922 | IB_MGMT_RMPP_FLAG_ACTIVE)) |
932 | return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ | 923 | return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ |
diff --git a/drivers/infiniband/core/mad_rmpp.h b/drivers/infiniband/core/mad_rmpp.h index c4924dfb8e75..f0616fd22494 100644 --- a/drivers/infiniband/core/mad_rmpp.h +++ b/drivers/infiniband/core/mad_rmpp.h | |||
@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, | |||
51 | int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, | 51 | int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, |
52 | struct ib_mad_send_wc *mad_send_wc); | 52 | struct ib_mad_send_wc *mad_send_wc); |
53 | 53 | ||
54 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc); | ||
55 | |||
54 | void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent); | 56 | void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent); |
55 | 57 | ||
56 | int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr); | 58 | int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 0e5ef97f7637..89ce9dc210d4 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -73,11 +73,10 @@ struct ib_sa_device { | |||
73 | struct ib_sa_query { | 73 | struct ib_sa_query { |
74 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); | 74 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); |
75 | void (*release)(struct ib_sa_query *); | 75 | void (*release)(struct ib_sa_query *); |
76 | struct ib_sa_port *port; | 76 | struct ib_sa_port *port; |
77 | struct ib_sa_mad *mad; | 77 | struct ib_mad_send_buf *mad_buf; |
78 | struct ib_sa_sm_ah *sm_ah; | 78 | struct ib_sa_sm_ah *sm_ah; |
79 | DECLARE_PCI_UNMAP_ADDR(mapping) | 79 | int id; |
80 | int id; | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | struct ib_sa_service_query { | 82 | struct ib_sa_service_query { |
@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) | |||
426 | { | 425 | { |
427 | unsigned long flags; | 426 | unsigned long flags; |
428 | struct ib_mad_agent *agent; | 427 | struct ib_mad_agent *agent; |
428 | struct ib_mad_send_buf *mad_buf; | ||
429 | 429 | ||
430 | spin_lock_irqsave(&idr_lock, flags); | 430 | spin_lock_irqsave(&idr_lock, flags); |
431 | if (idr_find(&query_idr, id) != query) { | 431 | if (idr_find(&query_idr, id) != query) { |
@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) | |||
433 | return; | 433 | return; |
434 | } | 434 | } |
435 | agent = query->port->agent; | 435 | agent = query->port->agent; |
436 | mad_buf = query->mad_buf; | ||
436 | spin_unlock_irqrestore(&idr_lock, flags); | 437 | spin_unlock_irqrestore(&idr_lock, flags); |
437 | 438 | ||
438 | ib_cancel_mad(agent, id); | 439 | ib_cancel_mad(agent, mad_buf); |
439 | } | 440 | } |
440 | EXPORT_SYMBOL(ib_sa_cancel_query); | 441 | EXPORT_SYMBOL(ib_sa_cancel_query); |
441 | 442 | ||
@@ -457,71 +458,46 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) | |||
457 | 458 | ||
458 | static int send_mad(struct ib_sa_query *query, int timeout_ms) | 459 | static int send_mad(struct ib_sa_query *query, int timeout_ms) |
459 | { | 460 | { |
460 | struct ib_sa_port *port = query->port; | ||
461 | unsigned long flags; | 461 | unsigned long flags; |
462 | int ret; | 462 | int ret, id; |
463 | struct ib_sge gather_list; | ||
464 | struct ib_send_wr *bad_wr, wr = { | ||
465 | .opcode = IB_WR_SEND, | ||
466 | .sg_list = &gather_list, | ||
467 | .num_sge = 1, | ||
468 | .send_flags = IB_SEND_SIGNALED, | ||
469 | .wr = { | ||
470 | .ud = { | ||
471 | .mad_hdr = &query->mad->mad_hdr, | ||
472 | .remote_qpn = 1, | ||
473 | .remote_qkey = IB_QP1_QKEY, | ||
474 | .timeout_ms = timeout_ms, | ||
475 | } | ||
476 | } | ||
477 | }; | ||
478 | 463 | ||
479 | retry: | 464 | retry: |
480 | if (!idr_pre_get(&query_idr, GFP_ATOMIC)) | 465 | if (!idr_pre_get(&query_idr, GFP_ATOMIC)) |
481 | return -ENOMEM; | 466 | return -ENOMEM; |
482 | spin_lock_irqsave(&idr_lock, flags); | 467 | spin_lock_irqsave(&idr_lock, flags); |
483 | ret = idr_get_new(&query_idr, query, &query->id); | 468 | ret = idr_get_new(&query_idr, query, &id); |
484 | spin_unlock_irqrestore(&idr_lock, flags); | 469 | spin_unlock_irqrestore(&idr_lock, flags); |
485 | if (ret == -EAGAIN) | 470 | if (ret == -EAGAIN) |
486 | goto retry; | 471 | goto retry; |
487 | if (ret) | 472 | if (ret) |
488 | return ret; | 473 | return ret; |
489 | 474 | ||
490 | wr.wr_id = query->id; | 475 | query->mad_buf->timeout_ms = timeout_ms; |
476 | query->mad_buf->context[0] = query; | ||
477 | query->id = id; | ||
491 | 478 | ||
492 | spin_lock_irqsave(&port->ah_lock, flags); | 479 | spin_lock_irqsave(&query->port->ah_lock, flags); |
493 | kref_get(&port->sm_ah->ref); | 480 | kref_get(&query->port->sm_ah->ref); |
494 | query->sm_ah = port->sm_ah; | 481 | query->sm_ah = query->port->sm_ah; |
495 | wr.wr.ud.ah = port->sm_ah->ah; | 482 | spin_unlock_irqrestore(&query->port->ah_lock, flags); |
496 | spin_unlock_irqrestore(&port->ah_lock, flags); | ||
497 | 483 | ||
498 | gather_list.addr = dma_map_single(port->agent->device->dma_device, | 484 | query->mad_buf->ah = query->sm_ah->ah; |
499 | query->mad, | ||
500 | sizeof (struct ib_sa_mad), | ||
501 | DMA_TO_DEVICE); | ||
502 | gather_list.length = sizeof (struct ib_sa_mad); | ||
503 | gather_list.lkey = port->agent->mr->lkey; | ||
504 | pci_unmap_addr_set(query, mapping, gather_list.addr); | ||
505 | 485 | ||
506 | ret = ib_post_send_mad(port->agent, &wr, &bad_wr); | 486 | ret = ib_post_send_mad(query->mad_buf, NULL); |
507 | if (ret) { | 487 | if (ret) { |
508 | dma_unmap_single(port->agent->device->dma_device, | ||
509 | pci_unmap_addr(query, mapping), | ||
510 | sizeof (struct ib_sa_mad), | ||
511 | DMA_TO_DEVICE); | ||
512 | kref_put(&query->sm_ah->ref, free_sm_ah); | ||
513 | spin_lock_irqsave(&idr_lock, flags); | 488 | spin_lock_irqsave(&idr_lock, flags); |
514 | idr_remove(&query_idr, query->id); | 489 | idr_remove(&query_idr, id); |
515 | spin_unlock_irqrestore(&idr_lock, flags); | 490 | spin_unlock_irqrestore(&idr_lock, flags); |
491 | |||
492 | kref_put(&query->sm_ah->ref, free_sm_ah); | ||
516 | } | 493 | } |
517 | 494 | ||
518 | /* | 495 | /* |
519 | * It's not safe to dereference query any more, because the | 496 | * It's not safe to dereference query any more, because the |
520 | * send may already have completed and freed the query in | 497 | * send may already have completed and freed the query in |
521 | * another context. So use wr.wr_id, which has a copy of the | 498 | * another context. |
522 | * query's id. | ||
523 | */ | 499 | */ |
524 | return ret ? ret : wr.wr_id; | 500 | return ret ? ret : id; |
525 | } | 501 | } |
526 | 502 | ||
527 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, | 503 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, |
@@ -543,7 +519,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, | |||
543 | 519 | ||
544 | static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) | 520 | static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) |
545 | { | 521 | { |
546 | kfree(sa_query->mad); | ||
547 | kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); | 522 | kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); |
548 | } | 523 | } |
549 | 524 | ||
@@ -585,6 +560,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
585 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | 560 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); |
586 | struct ib_sa_port *port; | 561 | struct ib_sa_port *port; |
587 | struct ib_mad_agent *agent; | 562 | struct ib_mad_agent *agent; |
563 | struct ib_sa_mad *mad; | ||
588 | int ret; | 564 | int ret; |
589 | 565 | ||
590 | if (!sa_dev) | 566 | if (!sa_dev) |
@@ -596,36 +572,44 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
596 | query = kmalloc(sizeof *query, gfp_mask); | 572 | query = kmalloc(sizeof *query, gfp_mask); |
597 | if (!query) | 573 | if (!query) |
598 | return -ENOMEM; | 574 | return -ENOMEM; |
599 | query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); | 575 | |
600 | if (!query->sa_query.mad) { | 576 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, |
601 | kfree(query); | 577 | 0, IB_MGMT_SA_HDR, |
602 | return -ENOMEM; | 578 | IB_MGMT_SA_DATA, gfp_mask); |
579 | if (!query->sa_query.mad_buf) { | ||
580 | ret = -ENOMEM; | ||
581 | goto err1; | ||
603 | } | 582 | } |
604 | 583 | ||
605 | query->callback = callback; | 584 | query->callback = callback; |
606 | query->context = context; | 585 | query->context = context; |
607 | 586 | ||
608 | init_mad(query->sa_query.mad, agent); | 587 | mad = query->sa_query.mad_buf->mad; |
588 | init_mad(mad, agent); | ||
609 | 589 | ||
610 | query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; | 590 | query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; |
611 | query->sa_query.release = ib_sa_path_rec_release; | 591 | query->sa_query.release = ib_sa_path_rec_release; |
612 | query->sa_query.port = port; | 592 | query->sa_query.port = port; |
613 | query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET; | 593 | mad->mad_hdr.method = IB_MGMT_METHOD_GET; |
614 | query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); | 594 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); |
615 | query->sa_query.mad->sa_hdr.comp_mask = comp_mask; | 595 | mad->sa_hdr.comp_mask = comp_mask; |
616 | 596 | ||
617 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), | 597 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); |
618 | rec, query->sa_query.mad->data); | ||
619 | 598 | ||
620 | *sa_query = &query->sa_query; | 599 | *sa_query = &query->sa_query; |
621 | 600 | ||
622 | ret = send_mad(&query->sa_query, timeout_ms); | 601 | ret = send_mad(&query->sa_query, timeout_ms); |
623 | if (ret < 0) { | 602 | if (ret < 0) |
624 | *sa_query = NULL; | 603 | goto err2; |
625 | kfree(query->sa_query.mad); | 604 | |
626 | kfree(query); | 605 | return ret; |
627 | } | 606 | |
607 | err2: | ||
608 | *sa_query = NULL; | ||
609 | ib_free_send_mad(query->sa_query.mad_buf); | ||
628 | 610 | ||
611 | err1: | ||
612 | kfree(query); | ||
629 | return ret; | 613 | return ret; |
630 | } | 614 | } |
631 | EXPORT_SYMBOL(ib_sa_path_rec_get); | 615 | EXPORT_SYMBOL(ib_sa_path_rec_get); |
@@ -649,7 +633,6 @@ static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, | |||
649 | 633 | ||
650 | static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) | 634 | static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) |
651 | { | 635 | { |
652 | kfree(sa_query->mad); | ||
653 | kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); | 636 | kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); |
654 | } | 637 | } |
655 | 638 | ||
@@ -693,6 +676,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |||
693 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | 676 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); |
694 | struct ib_sa_port *port; | 677 | struct ib_sa_port *port; |
695 | struct ib_mad_agent *agent; | 678 | struct ib_mad_agent *agent; |
679 | struct ib_sa_mad *mad; | ||
696 | int ret; | 680 | int ret; |
697 | 681 | ||
698 | if (!sa_dev) | 682 | if (!sa_dev) |
@@ -709,37 +693,45 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |||
709 | query = kmalloc(sizeof *query, gfp_mask); | 693 | query = kmalloc(sizeof *query, gfp_mask); |
710 | if (!query) | 694 | if (!query) |
711 | return -ENOMEM; | 695 | return -ENOMEM; |
712 | query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); | 696 | |
713 | if (!query->sa_query.mad) { | 697 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, |
714 | kfree(query); | 698 | 0, IB_MGMT_SA_HDR, |
715 | return -ENOMEM; | 699 | IB_MGMT_SA_DATA, gfp_mask); |
700 | if (!query->sa_query.mad_buf) { | ||
701 | ret = -ENOMEM; | ||
702 | goto err1; | ||
716 | } | 703 | } |
717 | 704 | ||
718 | query->callback = callback; | 705 | query->callback = callback; |
719 | query->context = context; | 706 | query->context = context; |
720 | 707 | ||
721 | init_mad(query->sa_query.mad, agent); | 708 | mad = query->sa_query.mad_buf->mad; |
709 | init_mad(mad, agent); | ||
722 | 710 | ||
723 | query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; | 711 | query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; |
724 | query->sa_query.release = ib_sa_service_rec_release; | 712 | query->sa_query.release = ib_sa_service_rec_release; |
725 | query->sa_query.port = port; | 713 | query->sa_query.port = port; |
726 | query->sa_query.mad->mad_hdr.method = method; | 714 | mad->mad_hdr.method = method; |
727 | query->sa_query.mad->mad_hdr.attr_id = | 715 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); |
728 | cpu_to_be16(IB_SA_ATTR_SERVICE_REC); | 716 | mad->sa_hdr.comp_mask = comp_mask; |
729 | query->sa_query.mad->sa_hdr.comp_mask = comp_mask; | ||
730 | 717 | ||
731 | ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), | 718 | ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), |
732 | rec, query->sa_query.mad->data); | 719 | rec, mad->data); |
733 | 720 | ||
734 | *sa_query = &query->sa_query; | 721 | *sa_query = &query->sa_query; |
735 | 722 | ||
736 | ret = send_mad(&query->sa_query, timeout_ms); | 723 | ret = send_mad(&query->sa_query, timeout_ms); |
737 | if (ret < 0) { | 724 | if (ret < 0) |
738 | *sa_query = NULL; | 725 | goto err2; |
739 | kfree(query->sa_query.mad); | 726 | |
740 | kfree(query); | 727 | return ret; |
741 | } | ||
742 | 728 | ||
729 | err2: | ||
730 | *sa_query = NULL; | ||
731 | ib_free_send_mad(query->sa_query.mad_buf); | ||
732 | |||
733 | err1: | ||
734 | kfree(query); | ||
743 | return ret; | 735 | return ret; |
744 | } | 736 | } |
745 | EXPORT_SYMBOL(ib_sa_service_rec_query); | 737 | EXPORT_SYMBOL(ib_sa_service_rec_query); |
@@ -763,7 +755,6 @@ static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, | |||
763 | 755 | ||
764 | static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) | 756 | static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) |
765 | { | 757 | { |
766 | kfree(sa_query->mad); | ||
767 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); | 758 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); |
768 | } | 759 | } |
769 | 760 | ||
@@ -782,6 +773,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
782 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | 773 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); |
783 | struct ib_sa_port *port; | 774 | struct ib_sa_port *port; |
784 | struct ib_mad_agent *agent; | 775 | struct ib_mad_agent *agent; |
776 | struct ib_sa_mad *mad; | ||
785 | int ret; | 777 | int ret; |
786 | 778 | ||
787 | if (!sa_dev) | 779 | if (!sa_dev) |
@@ -793,53 +785,55 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
793 | query = kmalloc(sizeof *query, gfp_mask); | 785 | query = kmalloc(sizeof *query, gfp_mask); |
794 | if (!query) | 786 | if (!query) |
795 | return -ENOMEM; | 787 | return -ENOMEM; |
796 | query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); | 788 | |
797 | if (!query->sa_query.mad) { | 789 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, |
798 | kfree(query); | 790 | 0, IB_MGMT_SA_HDR, |
799 | return -ENOMEM; | 791 | IB_MGMT_SA_DATA, gfp_mask); |
792 | if (!query->sa_query.mad_buf) { | ||
793 | ret = -ENOMEM; | ||
794 | goto err1; | ||
800 | } | 795 | } |
801 | 796 | ||
802 | query->callback = callback; | 797 | query->callback = callback; |
803 | query->context = context; | 798 | query->context = context; |
804 | 799 | ||
805 | init_mad(query->sa_query.mad, agent); | 800 | mad = query->sa_query.mad_buf->mad; |
801 | init_mad(mad, agent); | ||
806 | 802 | ||
807 | query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; | 803 | query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; |
808 | query->sa_query.release = ib_sa_mcmember_rec_release; | 804 | query->sa_query.release = ib_sa_mcmember_rec_release; |
809 | query->sa_query.port = port; | 805 | query->sa_query.port = port; |
810 | query->sa_query.mad->mad_hdr.method = method; | 806 | mad->mad_hdr.method = method; |
811 | query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); | 807 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); |
812 | query->sa_query.mad->sa_hdr.comp_mask = comp_mask; | 808 | mad->sa_hdr.comp_mask = comp_mask; |
813 | 809 | ||
814 | ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), | 810 | ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), |
815 | rec, query->sa_query.mad->data); | 811 | rec, mad->data); |
816 | 812 | ||
817 | *sa_query = &query->sa_query; | 813 | *sa_query = &query->sa_query; |
818 | 814 | ||
819 | ret = send_mad(&query->sa_query, timeout_ms); | 815 | ret = send_mad(&query->sa_query, timeout_ms); |
820 | if (ret < 0) { | 816 | if (ret < 0) |
821 | *sa_query = NULL; | 817 | goto err2; |
822 | kfree(query->sa_query.mad); | ||
823 | kfree(query); | ||
824 | } | ||
825 | 818 | ||
826 | return ret; | 819 | return ret; |
820 | |||
821 | err2: | ||
822 | *sa_query = NULL; | ||
823 | ib_free_send_mad(query->sa_query.mad_buf); | ||
824 | |||
825 | err1: | ||
826 | kfree(query); | ||
827 | return ret; | ||
827 | } | 828 | } |
828 | EXPORT_SYMBOL(ib_sa_mcmember_rec_query); | 829 | EXPORT_SYMBOL(ib_sa_mcmember_rec_query); |
829 | 830 | ||
830 | static void send_handler(struct ib_mad_agent *agent, | 831 | static void send_handler(struct ib_mad_agent *agent, |
831 | struct ib_mad_send_wc *mad_send_wc) | 832 | struct ib_mad_send_wc *mad_send_wc) |
832 | { | 833 | { |
833 | struct ib_sa_query *query; | 834 | struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; |
834 | unsigned long flags; | 835 | unsigned long flags; |
835 | 836 | ||
836 | spin_lock_irqsave(&idr_lock, flags); | ||
837 | query = idr_find(&query_idr, mad_send_wc->wr_id); | ||
838 | spin_unlock_irqrestore(&idr_lock, flags); | ||
839 | |||
840 | if (!query) | ||
841 | return; | ||
842 | |||
843 | if (query->callback) | 837 | if (query->callback) |
844 | switch (mad_send_wc->status) { | 838 | switch (mad_send_wc->status) { |
845 | case IB_WC_SUCCESS: | 839 | case IB_WC_SUCCESS: |
@@ -856,30 +850,25 @@ static void send_handler(struct ib_mad_agent *agent, | |||
856 | break; | 850 | break; |
857 | } | 851 | } |
858 | 852 | ||
859 | dma_unmap_single(agent->device->dma_device, | ||
860 | pci_unmap_addr(query, mapping), | ||
861 | sizeof (struct ib_sa_mad), | ||
862 | DMA_TO_DEVICE); | ||
863 | kref_put(&query->sm_ah->ref, free_sm_ah); | ||
864 | |||
865 | query->release(query); | ||
866 | |||
867 | spin_lock_irqsave(&idr_lock, flags); | 853 | spin_lock_irqsave(&idr_lock, flags); |
868 | idr_remove(&query_idr, mad_send_wc->wr_id); | 854 | idr_remove(&query_idr, query->id); |
869 | spin_unlock_irqrestore(&idr_lock, flags); | 855 | spin_unlock_irqrestore(&idr_lock, flags); |
856 | |||
857 | ib_free_send_mad(mad_send_wc->send_buf); | ||
858 | kref_put(&query->sm_ah->ref, free_sm_ah); | ||
859 | query->release(query); | ||
870 | } | 860 | } |
871 | 861 | ||
872 | static void recv_handler(struct ib_mad_agent *mad_agent, | 862 | static void recv_handler(struct ib_mad_agent *mad_agent, |
873 | struct ib_mad_recv_wc *mad_recv_wc) | 863 | struct ib_mad_recv_wc *mad_recv_wc) |
874 | { | 864 | { |
875 | struct ib_sa_query *query; | 865 | struct ib_sa_query *query; |
876 | unsigned long flags; | 866 | struct ib_mad_send_buf *mad_buf; |
877 | 867 | ||
878 | spin_lock_irqsave(&idr_lock, flags); | 868 | mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; |
879 | query = idr_find(&query_idr, mad_recv_wc->wc->wr_id); | 869 | query = mad_buf->context[0]; |
880 | spin_unlock_irqrestore(&idr_lock, flags); | ||
881 | 870 | ||
882 | if (query && query->callback) { | 871 | if (query->callback) { |
883 | if (mad_recv_wc->wc->status == IB_WC_SUCCESS) | 872 | if (mad_recv_wc->wc->status == IB_WC_SUCCESS) |
884 | query->callback(query, | 873 | query->callback(query, |
885 | mad_recv_wc->recv_buf.mad->mad_hdr.status ? | 874 | mad_recv_wc->recv_buf.mad->mad_hdr.status ? |
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index db25503a0736..2b3c40198f81 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h | |||
@@ -39,6 +39,8 @@ | |||
39 | #ifndef __SMI_H_ | 39 | #ifndef __SMI_H_ |
40 | #define __SMI_H_ | 40 | #define __SMI_H_ |
41 | 41 | ||
42 | #include <rdma/ib_smi.h> | ||
43 | |||
42 | int smi_handle_dr_smp_recv(struct ib_smp *smp, | 44 | int smi_handle_dr_smp_recv(struct ib_smp *smp, |
43 | u8 node_type, | 45 | u8 node_type, |
44 | int port_num, | 46 | int port_num, |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index fd200c064a2e..fc5519a3de99 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -96,7 +96,6 @@ struct ib_umad_file { | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | struct ib_umad_packet { | 98 | struct ib_umad_packet { |
99 | struct ib_ah *ah; | ||
100 | struct ib_mad_send_buf *msg; | 99 | struct ib_mad_send_buf *msg; |
101 | struct list_head list; | 100 | struct list_head list; |
102 | int length; | 101 | int length; |
@@ -139,10 +138,10 @@ static void send_handler(struct ib_mad_agent *agent, | |||
139 | struct ib_mad_send_wc *send_wc) | 138 | struct ib_mad_send_wc *send_wc) |
140 | { | 139 | { |
141 | struct ib_umad_file *file = agent->context; | 140 | struct ib_umad_file *file = agent->context; |
142 | struct ib_umad_packet *timeout, *packet = | 141 | struct ib_umad_packet *timeout; |
143 | (void *) (unsigned long) send_wc->wr_id; | 142 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; |
144 | 143 | ||
145 | ib_destroy_ah(packet->msg->send_wr.wr.ud.ah); | 144 | ib_destroy_ah(packet->msg->ah); |
146 | ib_free_send_mad(packet->msg); | 145 | ib_free_send_mad(packet->msg); |
147 | 146 | ||
148 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { | 147 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { |
@@ -268,11 +267,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
268 | struct ib_umad_packet *packet; | 267 | struct ib_umad_packet *packet; |
269 | struct ib_mad_agent *agent; | 268 | struct ib_mad_agent *agent; |
270 | struct ib_ah_attr ah_attr; | 269 | struct ib_ah_attr ah_attr; |
271 | struct ib_send_wr *bad_wr; | 270 | struct ib_ah *ah; |
272 | struct ib_rmpp_mad *rmpp_mad; | 271 | struct ib_rmpp_mad *rmpp_mad; |
273 | u8 method; | 272 | u8 method; |
274 | __be64 *tid; | 273 | __be64 *tid; |
275 | int ret, length, hdr_len, data_len, rmpp_hdr_size; | 274 | int ret, length, hdr_len, rmpp_hdr_size; |
276 | int rmpp_active = 0; | 275 | int rmpp_active = 0; |
277 | 276 | ||
278 | if (count < sizeof (struct ib_user_mad)) | 277 | if (count < sizeof (struct ib_user_mad)) |
@@ -321,9 +320,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
321 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; | 320 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; |
322 | } | 321 | } |
323 | 322 | ||
324 | packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); | 323 | ah = ib_create_ah(agent->qp->pd, &ah_attr); |
325 | if (IS_ERR(packet->ah)) { | 324 | if (IS_ERR(ah)) { |
326 | ret = PTR_ERR(packet->ah); | 325 | ret = PTR_ERR(ah); |
327 | goto err_up; | 326 | goto err_up; |
328 | } | 327 | } |
329 | 328 | ||
@@ -337,12 +336,10 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
337 | 336 | ||
338 | /* Validate that the management class can support RMPP */ | 337 | /* Validate that the management class can support RMPP */ |
339 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { | 338 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { |
340 | hdr_len = offsetof(struct ib_sa_mad, data); | 339 | hdr_len = IB_MGMT_SA_HDR; |
341 | data_len = length - hdr_len; | ||
342 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | 340 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && |
343 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { | 341 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { |
344 | hdr_len = offsetof(struct ib_vendor_mad, data); | 342 | hdr_len = IB_MGMT_VENDOR_HDR; |
345 | data_len = length - hdr_len; | ||
346 | } else { | 343 | } else { |
347 | ret = -EINVAL; | 344 | ret = -EINVAL; |
348 | goto err_ah; | 345 | goto err_ah; |
@@ -353,25 +350,23 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
353 | ret = -EINVAL; | 350 | ret = -EINVAL; |
354 | goto err_ah; | 351 | goto err_ah; |
355 | } | 352 | } |
356 | hdr_len = offsetof(struct ib_mad, data); | 353 | hdr_len = IB_MGMT_MAD_HDR; |
357 | data_len = length - hdr_len; | ||
358 | } | 354 | } |
359 | 355 | ||
360 | packet->msg = ib_create_send_mad(agent, | 356 | packet->msg = ib_create_send_mad(agent, |
361 | be32_to_cpu(packet->mad.hdr.qpn), | 357 | be32_to_cpu(packet->mad.hdr.qpn), |
362 | 0, packet->ah, rmpp_active, | 358 | 0, rmpp_active, |
363 | hdr_len, data_len, | 359 | hdr_len, length - hdr_len, |
364 | GFP_KERNEL); | 360 | GFP_KERNEL); |
365 | if (IS_ERR(packet->msg)) { | 361 | if (IS_ERR(packet->msg)) { |
366 | ret = PTR_ERR(packet->msg); | 362 | ret = PTR_ERR(packet->msg); |
367 | goto err_ah; | 363 | goto err_ah; |
368 | } | 364 | } |
369 | 365 | ||
370 | packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms; | 366 | packet->msg->ah = ah; |
371 | packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries; | 367 | packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; |
372 | 368 | packet->msg->retries = packet->mad.hdr.retries; | |
373 | /* Override send WR WRID initialized in ib_create_send_mad */ | 369 | packet->msg->context[0] = packet; |
374 | packet->msg->send_wr.wr_id = (unsigned long) packet; | ||
375 | 370 | ||
376 | if (!rmpp_active) { | 371 | if (!rmpp_active) { |
377 | /* Copy message from user into send buffer */ | 372 | /* Copy message from user into send buffer */ |
@@ -403,17 +398,17 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
403 | * transaction ID matches the agent being used to send the | 398 | * transaction ID matches the agent being used to send the |
404 | * MAD. | 399 | * MAD. |
405 | */ | 400 | */ |
406 | method = packet->msg->mad->mad_hdr.method; | 401 | method = ((struct ib_mad_hdr *) packet->msg)->method; |
407 | 402 | ||
408 | if (!(method & IB_MGMT_METHOD_RESP) && | 403 | if (!(method & IB_MGMT_METHOD_RESP) && |
409 | method != IB_MGMT_METHOD_TRAP_REPRESS && | 404 | method != IB_MGMT_METHOD_TRAP_REPRESS && |
410 | method != IB_MGMT_METHOD_SEND) { | 405 | method != IB_MGMT_METHOD_SEND) { |
411 | tid = &packet->msg->mad->mad_hdr.tid; | 406 | tid = &((struct ib_mad_hdr *) packet->msg)->tid; |
412 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | 407 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | |
413 | (be64_to_cpup(tid) & 0xffffffff)); | 408 | (be64_to_cpup(tid) & 0xffffffff)); |
414 | } | 409 | } |
415 | 410 | ||
416 | ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr); | 411 | ret = ib_post_send_mad(packet->msg, NULL); |
417 | if (ret) | 412 | if (ret) |
418 | goto err_msg; | 413 | goto err_msg; |
419 | 414 | ||
@@ -425,7 +420,7 @@ err_msg: | |||
425 | ib_free_send_mad(packet->msg); | 420 | ib_free_send_mad(packet->msg); |
426 | 421 | ||
427 | err_ah: | 422 | err_ah: |
428 | ib_destroy_ah(packet->ah); | 423 | ib_destroy_ah(ah); |
429 | 424 | ||
430 | err_up: | 425 | err_up: |
431 | up_read(&file->agent_mutex); | 426 | up_read(&file->agent_mutex); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 9804174f7f3c..8561b297a19b 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -46,11 +46,6 @@ enum { | |||
46 | MTHCA_VENDOR_CLASS2 = 0xa | 46 | MTHCA_VENDOR_CLASS2 = 0xa |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct mthca_trap_mad { | ||
50 | struct ib_mad *mad; | ||
51 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
52 | }; | ||
53 | |||
54 | static void update_sm_ah(struct mthca_dev *dev, | 49 | static void update_sm_ah(struct mthca_dev *dev, |
55 | u8 port_num, u16 lid, u8 sl) | 50 | u8 port_num, u16 lid, u8 sl) |
56 | { | 51 | { |
@@ -116,49 +111,14 @@ static void forward_trap(struct mthca_dev *dev, | |||
116 | struct ib_mad *mad) | 111 | struct ib_mad *mad) |
117 | { | 112 | { |
118 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; | 113 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; |
119 | struct mthca_trap_mad *tmad; | 114 | struct ib_mad_send_buf *send_buf; |
120 | struct ib_sge gather_list; | ||
121 | struct ib_send_wr *bad_wr, wr = { | ||
122 | .opcode = IB_WR_SEND, | ||
123 | .sg_list = &gather_list, | ||
124 | .num_sge = 1, | ||
125 | .send_flags = IB_SEND_SIGNALED, | ||
126 | .wr = { | ||
127 | .ud = { | ||
128 | .remote_qpn = qpn, | ||
129 | .remote_qkey = qpn ? IB_QP1_QKEY : 0, | ||
130 | .timeout_ms = 0 | ||
131 | } | ||
132 | } | ||
133 | }; | ||
134 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; | 115 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; |
135 | int ret; | 116 | int ret; |
136 | unsigned long flags; | 117 | unsigned long flags; |
137 | 118 | ||
138 | if (agent) { | 119 | if (agent) { |
139 | tmad = kmalloc(sizeof *tmad, GFP_KERNEL); | 120 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
140 | if (!tmad) | 121 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
141 | return; | ||
142 | |||
143 | tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL); | ||
144 | if (!tmad->mad) { | ||
145 | kfree(tmad); | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | memcpy(tmad->mad, mad, sizeof *mad); | ||
150 | |||
151 | wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr; | ||
152 | wr.wr_id = (unsigned long) tmad; | ||
153 | |||
154 | gather_list.addr = dma_map_single(agent->device->dma_device, | ||
155 | tmad->mad, | ||
156 | sizeof *tmad->mad, | ||
157 | DMA_TO_DEVICE); | ||
158 | gather_list.length = sizeof *tmad->mad; | ||
159 | gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; | ||
160 | pci_unmap_addr_set(tmad, mapping, gather_list.addr); | ||
161 | |||
162 | /* | 122 | /* |
163 | * We rely here on the fact that MLX QPs don't use the | 123 | * We rely here on the fact that MLX QPs don't use the |
164 | * address handle after the send is posted (this is | 124 | * address handle after the send is posted (this is |
@@ -166,21 +126,15 @@ static void forward_trap(struct mthca_dev *dev, | |||
166 | * it's OK for our devices). | 126 | * it's OK for our devices). |
167 | */ | 127 | */ |
168 | spin_lock_irqsave(&dev->sm_lock, flags); | 128 | spin_lock_irqsave(&dev->sm_lock, flags); |
169 | wr.wr.ud.ah = dev->sm_ah[port_num - 1]; | 129 | memcpy(send_buf->mad, mad, sizeof *mad); |
170 | if (wr.wr.ud.ah) | 130 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) |
171 | ret = ib_post_send_mad(agent, &wr, &bad_wr); | 131 | ret = ib_post_send_mad(send_buf, NULL); |
172 | else | 132 | else |
173 | ret = -EINVAL; | 133 | ret = -EINVAL; |
174 | spin_unlock_irqrestore(&dev->sm_lock, flags); | 134 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
175 | 135 | ||
176 | if (ret) { | 136 | if (ret) |
177 | dma_unmap_single(agent->device->dma_device, | 137 | ib_free_send_mad(send_buf); |
178 | pci_unmap_addr(tmad, mapping), | ||
179 | sizeof *tmad->mad, | ||
180 | DMA_TO_DEVICE); | ||
181 | kfree(tmad->mad); | ||
182 | kfree(tmad); | ||
183 | } | ||
184 | } | 138 | } |
185 | } | 139 | } |
186 | 140 | ||
@@ -267,15 +221,7 @@ int mthca_process_mad(struct ib_device *ibdev, | |||
267 | static void send_handler(struct ib_mad_agent *agent, | 221 | static void send_handler(struct ib_mad_agent *agent, |
268 | struct ib_mad_send_wc *mad_send_wc) | 222 | struct ib_mad_send_wc *mad_send_wc) |
269 | { | 223 | { |
270 | struct mthca_trap_mad *tmad = | 224 | ib_free_send_mad(mad_send_wc->send_buf); |
271 | (void *) (unsigned long) mad_send_wc->wr_id; | ||
272 | |||
273 | dma_unmap_single(agent->device->dma_device, | ||
274 | pci_unmap_addr(tmad, mapping), | ||
275 | sizeof *tmad->mad, | ||
276 | DMA_TO_DEVICE); | ||
277 | kfree(tmad->mad); | ||
278 | kfree(tmad); | ||
279 | } | 225 | } |
280 | 226 | ||
281 | int mthca_create_agents(struct mthca_dev *dev) | 227 | int mthca_create_agents(struct mthca_dev *dev) |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 4172e6841e3d..2c133506742b 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
@@ -109,10 +109,14 @@ | |||
109 | #define IB_QP_SET_QKEY 0x80000000 | 109 | #define IB_QP_SET_QKEY 0x80000000 |
110 | 110 | ||
111 | enum { | 111 | enum { |
112 | IB_MGMT_MAD_HDR = 24, | ||
112 | IB_MGMT_MAD_DATA = 232, | 113 | IB_MGMT_MAD_DATA = 232, |
114 | IB_MGMT_RMPP_HDR = 36, | ||
113 | IB_MGMT_RMPP_DATA = 220, | 115 | IB_MGMT_RMPP_DATA = 220, |
116 | IB_MGMT_VENDOR_HDR = 40, | ||
114 | IB_MGMT_VENDOR_DATA = 216, | 117 | IB_MGMT_VENDOR_DATA = 216, |
115 | IB_MGMT_SA_DATA = 200 | 118 | IB_MGMT_SA_HDR = 56, |
119 | IB_MGMT_SA_DATA = 200, | ||
116 | }; | 120 | }; |
117 | 121 | ||
118 | struct ib_mad_hdr { | 122 | struct ib_mad_hdr { |
@@ -203,26 +207,25 @@ struct ib_class_port_info | |||
203 | 207 | ||
204 | /** | 208 | /** |
205 | * ib_mad_send_buf - MAD data buffer and work request for sends. | 209 | * ib_mad_send_buf - MAD data buffer and work request for sends. |
206 | * @mad: References an allocated MAD data buffer. The size of the data | 210 | * @next: A pointer used to chain together MADs for posting. |
207 | * buffer is specified in the @send_wr.length field. | 211 | * @mad: References an allocated MAD data buffer. |
208 | * @mapping: DMA mapping information. | ||
209 | * @mad_agent: MAD agent that allocated the buffer. | 212 | * @mad_agent: MAD agent that allocated the buffer. |
213 | * @ah: The address handle to use when sending the MAD. | ||
210 | * @context: User-controlled context fields. | 214 | * @context: User-controlled context fields. |
211 | * @send_wr: An initialized work request structure used when sending the MAD. | 215 | * @timeout_ms: Time to wait for a response. |
212 | * The wr_id field of the work request is initialized to reference this | 216 | * @retries: Number of times to retry a request for a response. |
213 | * data structure. | ||
214 | * @sge: A scatter-gather list referenced by the work request. | ||
215 | * | 217 | * |
216 | * Users are responsible for initializing the MAD buffer itself, with the | 218 | * Users are responsible for initializing the MAD buffer itself, with the |
217 | * exception of specifying the payload length field in any RMPP MAD. | 219 | * exception of specifying the payload length field in any RMPP MAD. |
218 | */ | 220 | */ |
219 | struct ib_mad_send_buf { | 221 | struct ib_mad_send_buf { |
220 | struct ib_mad *mad; | 222 | struct ib_mad_send_buf *next; |
221 | DECLARE_PCI_UNMAP_ADDR(mapping) | 223 | void *mad; |
222 | struct ib_mad_agent *mad_agent; | 224 | struct ib_mad_agent *mad_agent; |
225 | struct ib_ah *ah; | ||
223 | void *context[2]; | 226 | void *context[2]; |
224 | struct ib_send_wr send_wr; | 227 | int timeout_ms; |
225 | struct ib_sge sge; | 228 | int retries; |
226 | }; | 229 | }; |
227 | 230 | ||
228 | /** | 231 | /** |
@@ -287,7 +290,7 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, | |||
287 | * or @mad_send_wc. | 290 | * or @mad_send_wc. |
288 | */ | 291 | */ |
289 | typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, | 292 | typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, |
290 | struct ib_send_wr *send_wr, | 293 | struct ib_mad_send_buf *send_buf, |
291 | struct ib_mad_send_wc *mad_send_wc); | 294 | struct ib_mad_send_wc *mad_send_wc); |
292 | 295 | ||
293 | /** | 296 | /** |
@@ -334,13 +337,13 @@ struct ib_mad_agent { | |||
334 | 337 | ||
335 | /** | 338 | /** |
336 | * ib_mad_send_wc - MAD send completion information. | 339 | * ib_mad_send_wc - MAD send completion information. |
337 | * @wr_id: Work request identifier associated with the send MAD request. | 340 | * @send_buf: Send MAD data buffer associated with the send MAD request. |
338 | * @status: Completion status. | 341 | * @status: Completion status. |
339 | * @vendor_err: Optional vendor error information returned with a failed | 342 | * @vendor_err: Optional vendor error information returned with a failed |
340 | * request. | 343 | * request. |
341 | */ | 344 | */ |
342 | struct ib_mad_send_wc { | 345 | struct ib_mad_send_wc { |
343 | u64 wr_id; | 346 | struct ib_mad_send_buf *send_buf; |
344 | enum ib_wc_status status; | 347 | enum ib_wc_status status; |
345 | u32 vendor_err; | 348 | u32 vendor_err; |
346 | }; | 349 | }; |
@@ -366,7 +369,7 @@ struct ib_mad_recv_buf { | |||
366 | * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. | 369 | * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. |
367 | * @mad_len: The length of the received MAD, without duplicated headers. | 370 | * @mad_len: The length of the received MAD, without duplicated headers. |
368 | * | 371 | * |
369 | * For received response, the wr_id field of the wc is set to the wr_id | 372 | * For received response, the wr_id contains a pointer to the ib_mad_send_buf |
370 | * for the corresponding send request. | 373 | * for the corresponding send request. |
371 | */ | 374 | */ |
372 | struct ib_mad_recv_wc { | 375 | struct ib_mad_recv_wc { |
@@ -463,9 +466,9 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); | |||
463 | /** | 466 | /** |
464 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated | 467 | * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated |
465 | * with the registered client. | 468 | * with the registered client. |
466 | * @mad_agent: Specifies the associated registration to post the send to. | 469 | * @send_buf: Specifies the information needed to send the MAD(s). |
467 | * @send_wr: Specifies the information needed to send the MAD(s). | 470 | * @bad_send_buf: Specifies the MAD on which an error was encountered. This |
468 | * @bad_send_wr: Specifies the MAD on which an error was encountered. | 471 | * parameter is optional if only a single MAD is posted. |
469 | * | 472 | * |
470 | * Sent MADs are not guaranteed to complete in the order that they were posted. | 473 | * Sent MADs are not guaranteed to complete in the order that they were posted. |
471 | * | 474 | * |
@@ -479,9 +482,8 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); | |||
479 | * defined data being transferred. The paylen_newwin field should be | 482 | * defined data being transferred. The paylen_newwin field should be |
480 | * specified in network-byte order. | 483 | * specified in network-byte order. |
481 | */ | 484 | */ |
482 | int ib_post_send_mad(struct ib_mad_agent *mad_agent, | 485 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
483 | struct ib_send_wr *send_wr, | 486 | struct ib_mad_send_buf **bad_send_buf); |
484 | struct ib_send_wr **bad_send_wr); | ||
485 | 487 | ||
486 | /** | 488 | /** |
487 | * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. | 489 | * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. |
@@ -507,23 +509,25 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); | |||
507 | /** | 509 | /** |
508 | * ib_cancel_mad - Cancels an outstanding send MAD operation. | 510 | * ib_cancel_mad - Cancels an outstanding send MAD operation. |
509 | * @mad_agent: Specifies the registration associated with sent MAD. | 511 | * @mad_agent: Specifies the registration associated with sent MAD. |
510 | * @wr_id: Indicates the work request identifier of the MAD to cancel. | 512 | * @send_buf: Indicates the MAD to cancel. |
511 | * | 513 | * |
512 | * MADs will be returned to the user through the corresponding | 514 | * MADs will be returned to the user through the corresponding |
513 | * ib_mad_send_handler. | 515 | * ib_mad_send_handler. |
514 | */ | 516 | */ |
515 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id); | 517 | void ib_cancel_mad(struct ib_mad_agent *mad_agent, |
518 | struct ib_mad_send_buf *send_buf); | ||
516 | 519 | ||
517 | /** | 520 | /** |
518 | * ib_modify_mad - Modifies an outstanding send MAD operation. | 521 | * ib_modify_mad - Modifies an outstanding send MAD operation. |
519 | * @mad_agent: Specifies the registration associated with sent MAD. | 522 | * @mad_agent: Specifies the registration associated with sent MAD. |
520 | * @wr_id: Indicates the work request identifier of the MAD to modify. | 523 | * @send_buf: Indicates the MAD to modify. |
521 | * @timeout_ms: New timeout value for sent MAD. | 524 | * @timeout_ms: New timeout value for sent MAD. |
522 | * | 525 | * |
523 | * This call will reset the timeout value for a sent MAD to the specified | 526 | * This call will reset the timeout value for a sent MAD to the specified |
524 | * value. | 527 | * value. |
525 | */ | 528 | */ |
526 | int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms); | 529 | int ib_modify_mad(struct ib_mad_agent *mad_agent, |
530 | struct ib_mad_send_buf *send_buf, u32 timeout_ms); | ||
527 | 531 | ||
528 | /** | 532 | /** |
529 | * ib_redirect_mad_qp - Registers a QP for MAD services. | 533 | * ib_redirect_mad_qp - Registers a QP for MAD services. |
@@ -572,7 +576,6 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |||
572 | * @remote_qpn: Specifies the QPN of the receiving node. | 576 | * @remote_qpn: Specifies the QPN of the receiving node. |
573 | * @pkey_index: Specifies which PKey the MAD will be sent using. This field | 577 | * @pkey_index: Specifies which PKey the MAD will be sent using. This field |
574 | * is valid only if the remote_qpn is QP 1. | 578 | * is valid only if the remote_qpn is QP 1. |
575 | * @ah: References the address handle used to transfer to the remote node. | ||
576 | * @rmpp_active: Indicates if the send will enable RMPP. | 579 | * @rmpp_active: Indicates if the send will enable RMPP. |
577 | * @hdr_len: Indicates the size of the data header of the MAD. This length | 580 | * @hdr_len: Indicates the size of the data header of the MAD. This length |
578 | * should include the common MAD header, RMPP header, plus any class | 581 | * should include the common MAD header, RMPP header, plus any class |
@@ -582,11 +585,10 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |||
582 | * additional padding that may be necessary. | 585 | * additional padding that may be necessary. |
583 | * @gfp_mask: GFP mask used for the memory allocation. | 586 | * @gfp_mask: GFP mask used for the memory allocation. |
584 | * | 587 | * |
585 | * This is a helper routine that may be used to allocate a MAD. Users are | 588 | * This routine allocates a MAD for sending. The returned MAD send buffer |
586 | * not required to allocate outbound MADs using this call. The returned | 589 | * will reference a data buffer usable for sending a MAD, along |
587 | * MAD send buffer will reference a data buffer usable for sending a MAD, along | ||
588 | * with an initialized work request structure. Users may modify the returned | 590 | * with an initialized work request structure. Users may modify the returned |
589 | * MAD data buffer or work request before posting the send. | 591 | * MAD data buffer before posting the send. |
590 | * | 592 | * |
591 | * The returned data buffer will be cleared. Users are responsible for | 593 | * The returned data buffer will be cleared. Users are responsible for |
592 | * initializing the common MAD and any class specific headers. If @rmpp_active | 594 | * initializing the common MAD and any class specific headers. If @rmpp_active |
@@ -594,7 +596,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |||
594 | */ | 596 | */ |
595 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | 597 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
596 | u32 remote_qpn, u16 pkey_index, | 598 | u32 remote_qpn, u16 pkey_index, |
597 | struct ib_ah *ah, int rmpp_active, | 599 | int rmpp_active, |
598 | int hdr_len, int data_len, | 600 | int hdr_len, int data_len, |
599 | gfp_t gfp_mask); | 601 | gfp_t gfp_mask); |
600 | 602 | ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a5a963cb5676..f72d46d54e0a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -595,11 +595,8 @@ struct ib_send_wr { | |||
595 | } atomic; | 595 | } atomic; |
596 | struct { | 596 | struct { |
597 | struct ib_ah *ah; | 597 | struct ib_ah *ah; |
598 | struct ib_mad_hdr *mad_hdr; | ||
599 | u32 remote_qpn; | 598 | u32 remote_qpn; |
600 | u32 remote_qkey; | 599 | u32 remote_qkey; |
601 | int timeout_ms; /* valid for MADs only */ | ||
602 | int retries; /* valid for MADs only */ | ||
603 | u16 pkey_index; /* valid for GSI only */ | 600 | u16 pkey_index; /* valid for GSI only */ |
604 | u8 port_num; /* valid for DR SMPs on switch only */ | 601 | u8 port_num; /* valid for DR SMPs on switch only */ |
605 | } ud; | 602 | } ud; |