diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/infiniband/hw/mthca/mthca_cmd.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_cmd.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 1767 |
1 files changed, 1767 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c new file mode 100644 index 000000000000..9def0981f630 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -0,0 +1,1767 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | * $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $ | ||
33 | */ | ||
34 | |||
35 | #include <linux/sched.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <ib_mad.h> | ||
40 | |||
41 | #include "mthca_dev.h" | ||
42 | #include "mthca_config_reg.h" | ||
43 | #include "mthca_cmd.h" | ||
44 | #include "mthca_memfree.h" | ||
45 | |||
46 | #define CMD_POLL_TOKEN 0xffff | ||
47 | |||
48 | enum { | ||
49 | HCR_IN_PARAM_OFFSET = 0x00, | ||
50 | HCR_IN_MODIFIER_OFFSET = 0x08, | ||
51 | HCR_OUT_PARAM_OFFSET = 0x0c, | ||
52 | HCR_TOKEN_OFFSET = 0x14, | ||
53 | HCR_STATUS_OFFSET = 0x18, | ||
54 | |||
55 | HCR_OPMOD_SHIFT = 12, | ||
56 | HCA_E_BIT = 22, | ||
57 | HCR_GO_BIT = 23 | ||
58 | }; | ||
59 | |||
60 | enum { | ||
61 | /* initialization and general commands */ | ||
62 | CMD_SYS_EN = 0x1, | ||
63 | CMD_SYS_DIS = 0x2, | ||
64 | CMD_MAP_FA = 0xfff, | ||
65 | CMD_UNMAP_FA = 0xffe, | ||
66 | CMD_RUN_FW = 0xff6, | ||
67 | CMD_MOD_STAT_CFG = 0x34, | ||
68 | CMD_QUERY_DEV_LIM = 0x3, | ||
69 | CMD_QUERY_FW = 0x4, | ||
70 | CMD_ENABLE_LAM = 0xff8, | ||
71 | CMD_DISABLE_LAM = 0xff7, | ||
72 | CMD_QUERY_DDR = 0x5, | ||
73 | CMD_QUERY_ADAPTER = 0x6, | ||
74 | CMD_INIT_HCA = 0x7, | ||
75 | CMD_CLOSE_HCA = 0x8, | ||
76 | CMD_INIT_IB = 0x9, | ||
77 | CMD_CLOSE_IB = 0xa, | ||
78 | CMD_QUERY_HCA = 0xb, | ||
79 | CMD_SET_IB = 0xc, | ||
80 | CMD_ACCESS_DDR = 0x2e, | ||
81 | CMD_MAP_ICM = 0xffa, | ||
82 | CMD_UNMAP_ICM = 0xff9, | ||
83 | CMD_MAP_ICM_AUX = 0xffc, | ||
84 | CMD_UNMAP_ICM_AUX = 0xffb, | ||
85 | CMD_SET_ICM_SIZE = 0xffd, | ||
86 | |||
87 | /* TPT commands */ | ||
88 | CMD_SW2HW_MPT = 0xd, | ||
89 | CMD_QUERY_MPT = 0xe, | ||
90 | CMD_HW2SW_MPT = 0xf, | ||
91 | CMD_READ_MTT = 0x10, | ||
92 | CMD_WRITE_MTT = 0x11, | ||
93 | CMD_SYNC_TPT = 0x2f, | ||
94 | |||
95 | /* EQ commands */ | ||
96 | CMD_MAP_EQ = 0x12, | ||
97 | CMD_SW2HW_EQ = 0x13, | ||
98 | CMD_HW2SW_EQ = 0x14, | ||
99 | CMD_QUERY_EQ = 0x15, | ||
100 | |||
101 | /* CQ commands */ | ||
102 | CMD_SW2HW_CQ = 0x16, | ||
103 | CMD_HW2SW_CQ = 0x17, | ||
104 | CMD_QUERY_CQ = 0x18, | ||
105 | CMD_RESIZE_CQ = 0x2c, | ||
106 | |||
107 | /* SRQ commands */ | ||
108 | CMD_SW2HW_SRQ = 0x35, | ||
109 | CMD_HW2SW_SRQ = 0x36, | ||
110 | CMD_QUERY_SRQ = 0x37, | ||
111 | |||
112 | /* QP/EE commands */ | ||
113 | CMD_RST2INIT_QPEE = 0x19, | ||
114 | CMD_INIT2RTR_QPEE = 0x1a, | ||
115 | CMD_RTR2RTS_QPEE = 0x1b, | ||
116 | CMD_RTS2RTS_QPEE = 0x1c, | ||
117 | CMD_SQERR2RTS_QPEE = 0x1d, | ||
118 | CMD_2ERR_QPEE = 0x1e, | ||
119 | CMD_RTS2SQD_QPEE = 0x1f, | ||
120 | CMD_SQD2SQD_QPEE = 0x38, | ||
121 | CMD_SQD2RTS_QPEE = 0x20, | ||
122 | CMD_ERR2RST_QPEE = 0x21, | ||
123 | CMD_QUERY_QPEE = 0x22, | ||
124 | CMD_INIT2INIT_QPEE = 0x2d, | ||
125 | CMD_SUSPEND_QPEE = 0x32, | ||
126 | CMD_UNSUSPEND_QPEE = 0x33, | ||
127 | /* special QPs and management commands */ | ||
128 | CMD_CONF_SPECIAL_QP = 0x23, | ||
129 | CMD_MAD_IFC = 0x24, | ||
130 | |||
131 | /* multicast commands */ | ||
132 | CMD_READ_MGM = 0x25, | ||
133 | CMD_WRITE_MGM = 0x26, | ||
134 | CMD_MGID_HASH = 0x27, | ||
135 | |||
136 | /* miscellaneous commands */ | ||
137 | CMD_DIAG_RPRT = 0x30, | ||
138 | CMD_NOP = 0x31, | ||
139 | |||
140 | /* debug commands */ | ||
141 | CMD_QUERY_DEBUG_MSG = 0x2a, | ||
142 | CMD_SET_DEBUG_MSG = 0x2b, | ||
143 | }; | ||
144 | |||
145 | /* | ||
146 | * According to Mellanox code, FW may be starved and never complete | ||
147 | * commands. So we can't use strict timeouts described in PRM -- we | ||
148 | * just arbitrarily select 60 seconds for now. | ||
149 | */ | ||
150 | #if 0 | ||
151 | /* | ||
152 | * Round up and add 1 to make sure we get the full wait time (since we | ||
153 | * will be starting in the middle of a jiffy) | ||
154 | */ | ||
155 | enum { | ||
156 | CMD_TIME_CLASS_A = (HZ + 999) / 1000 + 1, | ||
157 | CMD_TIME_CLASS_B = (HZ + 99) / 100 + 1, | ||
158 | CMD_TIME_CLASS_C = (HZ + 9) / 10 + 1 | ||
159 | }; | ||
160 | #else | ||
161 | enum { | ||
162 | CMD_TIME_CLASS_A = 60 * HZ, | ||
163 | CMD_TIME_CLASS_B = 60 * HZ, | ||
164 | CMD_TIME_CLASS_C = 60 * HZ | ||
165 | }; | ||
166 | #endif | ||
167 | |||
168 | enum { | ||
169 | GO_BIT_TIMEOUT = HZ * 10 | ||
170 | }; | ||
171 | |||
172 | struct mthca_cmd_context { | ||
173 | struct completion done; | ||
174 | struct timer_list timer; | ||
175 | int result; | ||
176 | int next; | ||
177 | u64 out_param; | ||
178 | u16 token; | ||
179 | u8 status; | ||
180 | }; | ||
181 | |||
182 | static inline int go_bit(struct mthca_dev *dev) | ||
183 | { | ||
184 | return readl(dev->hcr + HCR_STATUS_OFFSET) & | ||
185 | swab32(1 << HCR_GO_BIT); | ||
186 | } | ||
187 | |||
188 | static int mthca_cmd_post(struct mthca_dev *dev, | ||
189 | u64 in_param, | ||
190 | u64 out_param, | ||
191 | u32 in_modifier, | ||
192 | u8 op_modifier, | ||
193 | u16 op, | ||
194 | u16 token, | ||
195 | int event) | ||
196 | { | ||
197 | int err = 0; | ||
198 | |||
199 | if (down_interruptible(&dev->cmd.hcr_sem)) | ||
200 | return -EINTR; | ||
201 | |||
202 | if (event) { | ||
203 | unsigned long end = jiffies + GO_BIT_TIMEOUT; | ||
204 | |||
205 | while (go_bit(dev) && time_before(jiffies, end)) { | ||
206 | set_current_state(TASK_RUNNING); | ||
207 | schedule(); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | if (go_bit(dev)) { | ||
212 | err = -EAGAIN; | ||
213 | goto out; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * We use writel (instead of something like memcpy_toio) | ||
218 | * because writes of less than 32 bits to the HCR don't work | ||
219 | * (and some architectures such as ia64 implement memcpy_toio | ||
220 | * in terms of writeb). | ||
221 | */ | ||
222 | __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); | ||
223 | __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); | ||
224 | __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); | ||
225 | __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); | ||
226 | __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); | ||
227 | __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); | ||
228 | |||
229 | /* __raw_writel may not order writes. */ | ||
230 | wmb(); | ||
231 | |||
232 | __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | | ||
233 | (event ? (1 << HCA_E_BIT) : 0) | | ||
234 | (op_modifier << HCR_OPMOD_SHIFT) | | ||
235 | op), dev->hcr + 6 * 4); | ||
236 | |||
237 | out: | ||
238 | up(&dev->cmd.hcr_sem); | ||
239 | return err; | ||
240 | } | ||
241 | |||
242 | static int mthca_cmd_poll(struct mthca_dev *dev, | ||
243 | u64 in_param, | ||
244 | u64 *out_param, | ||
245 | int out_is_imm, | ||
246 | u32 in_modifier, | ||
247 | u8 op_modifier, | ||
248 | u16 op, | ||
249 | unsigned long timeout, | ||
250 | u8 *status) | ||
251 | { | ||
252 | int err = 0; | ||
253 | unsigned long end; | ||
254 | |||
255 | if (down_interruptible(&dev->cmd.poll_sem)) | ||
256 | return -EINTR; | ||
257 | |||
258 | err = mthca_cmd_post(dev, in_param, | ||
259 | out_param ? *out_param : 0, | ||
260 | in_modifier, op_modifier, | ||
261 | op, CMD_POLL_TOKEN, 0); | ||
262 | if (err) | ||
263 | goto out; | ||
264 | |||
265 | end = timeout + jiffies; | ||
266 | while (go_bit(dev) && time_before(jiffies, end)) { | ||
267 | set_current_state(TASK_RUNNING); | ||
268 | schedule(); | ||
269 | } | ||
270 | |||
271 | if (go_bit(dev)) { | ||
272 | err = -EBUSY; | ||
273 | goto out; | ||
274 | } | ||
275 | |||
276 | if (out_is_imm) { | ||
277 | memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); | ||
278 | be64_to_cpus(out_param); | ||
279 | } | ||
280 | |||
281 | *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; | ||
282 | |||
283 | out: | ||
284 | up(&dev->cmd.poll_sem); | ||
285 | return err; | ||
286 | } | ||
287 | |||
288 | void mthca_cmd_event(struct mthca_dev *dev, | ||
289 | u16 token, | ||
290 | u8 status, | ||
291 | u64 out_param) | ||
292 | { | ||
293 | struct mthca_cmd_context *context = | ||
294 | &dev->cmd.context[token & dev->cmd.token_mask]; | ||
295 | |||
296 | /* previously timed out command completing at long last */ | ||
297 | if (token != context->token) | ||
298 | return; | ||
299 | |||
300 | context->result = 0; | ||
301 | context->status = status; | ||
302 | context->out_param = out_param; | ||
303 | |||
304 | context->token += dev->cmd.token_mask + 1; | ||
305 | |||
306 | complete(&context->done); | ||
307 | } | ||
308 | |||
309 | static void event_timeout(unsigned long context_ptr) | ||
310 | { | ||
311 | struct mthca_cmd_context *context = | ||
312 | (struct mthca_cmd_context *) context_ptr; | ||
313 | |||
314 | context->result = -EBUSY; | ||
315 | complete(&context->done); | ||
316 | } | ||
317 | |||
318 | static int mthca_cmd_wait(struct mthca_dev *dev, | ||
319 | u64 in_param, | ||
320 | u64 *out_param, | ||
321 | int out_is_imm, | ||
322 | u32 in_modifier, | ||
323 | u8 op_modifier, | ||
324 | u16 op, | ||
325 | unsigned long timeout, | ||
326 | u8 *status) | ||
327 | { | ||
328 | int err = 0; | ||
329 | struct mthca_cmd_context *context; | ||
330 | |||
331 | if (down_interruptible(&dev->cmd.event_sem)) | ||
332 | return -EINTR; | ||
333 | |||
334 | spin_lock(&dev->cmd.context_lock); | ||
335 | BUG_ON(dev->cmd.free_head < 0); | ||
336 | context = &dev->cmd.context[dev->cmd.free_head]; | ||
337 | dev->cmd.free_head = context->next; | ||
338 | spin_unlock(&dev->cmd.context_lock); | ||
339 | |||
340 | init_completion(&context->done); | ||
341 | |||
342 | err = mthca_cmd_post(dev, in_param, | ||
343 | out_param ? *out_param : 0, | ||
344 | in_modifier, op_modifier, | ||
345 | op, context->token, 1); | ||
346 | if (err) | ||
347 | goto out; | ||
348 | |||
349 | context->timer.expires = jiffies + timeout; | ||
350 | add_timer(&context->timer); | ||
351 | |||
352 | wait_for_completion(&context->done); | ||
353 | del_timer_sync(&context->timer); | ||
354 | |||
355 | err = context->result; | ||
356 | if (err) | ||
357 | goto out; | ||
358 | |||
359 | *status = context->status; | ||
360 | if (*status) | ||
361 | mthca_dbg(dev, "Command %02x completed with status %02x\n", | ||
362 | op, *status); | ||
363 | |||
364 | if (out_is_imm) | ||
365 | *out_param = context->out_param; | ||
366 | |||
367 | out: | ||
368 | spin_lock(&dev->cmd.context_lock); | ||
369 | context->next = dev->cmd.free_head; | ||
370 | dev->cmd.free_head = context - dev->cmd.context; | ||
371 | spin_unlock(&dev->cmd.context_lock); | ||
372 | |||
373 | up(&dev->cmd.event_sem); | ||
374 | return err; | ||
375 | } | ||
376 | |||
377 | /* Invoke a command with an output mailbox */ | ||
378 | static int mthca_cmd_box(struct mthca_dev *dev, | ||
379 | u64 in_param, | ||
380 | u64 out_param, | ||
381 | u32 in_modifier, | ||
382 | u8 op_modifier, | ||
383 | u16 op, | ||
384 | unsigned long timeout, | ||
385 | u8 *status) | ||
386 | { | ||
387 | if (dev->cmd.use_events) | ||
388 | return mthca_cmd_wait(dev, in_param, &out_param, 0, | ||
389 | in_modifier, op_modifier, op, | ||
390 | timeout, status); | ||
391 | else | ||
392 | return mthca_cmd_poll(dev, in_param, &out_param, 0, | ||
393 | in_modifier, op_modifier, op, | ||
394 | timeout, status); | ||
395 | } | ||
396 | |||
397 | /* Invoke a command with no output parameter */ | ||
398 | static int mthca_cmd(struct mthca_dev *dev, | ||
399 | u64 in_param, | ||
400 | u32 in_modifier, | ||
401 | u8 op_modifier, | ||
402 | u16 op, | ||
403 | unsigned long timeout, | ||
404 | u8 *status) | ||
405 | { | ||
406 | return mthca_cmd_box(dev, in_param, 0, in_modifier, | ||
407 | op_modifier, op, timeout, status); | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * Invoke a command with an immediate output parameter (and copy the | ||
412 | * output into the caller's out_param pointer after the command | ||
413 | * executes). | ||
414 | */ | ||
415 | static int mthca_cmd_imm(struct mthca_dev *dev, | ||
416 | u64 in_param, | ||
417 | u64 *out_param, | ||
418 | u32 in_modifier, | ||
419 | u8 op_modifier, | ||
420 | u16 op, | ||
421 | unsigned long timeout, | ||
422 | u8 *status) | ||
423 | { | ||
424 | if (dev->cmd.use_events) | ||
425 | return mthca_cmd_wait(dev, in_param, out_param, 1, | ||
426 | in_modifier, op_modifier, op, | ||
427 | timeout, status); | ||
428 | else | ||
429 | return mthca_cmd_poll(dev, in_param, out_param, 1, | ||
430 | in_modifier, op_modifier, op, | ||
431 | timeout, status); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * Switch to using events to issue FW commands (should be called after | ||
436 | * event queue to command events has been initialized). | ||
437 | */ | ||
438 | int mthca_cmd_use_events(struct mthca_dev *dev) | ||
439 | { | ||
440 | int i; | ||
441 | |||
442 | dev->cmd.context = kmalloc(dev->cmd.max_cmds * | ||
443 | sizeof (struct mthca_cmd_context), | ||
444 | GFP_KERNEL); | ||
445 | if (!dev->cmd.context) | ||
446 | return -ENOMEM; | ||
447 | |||
448 | for (i = 0; i < dev->cmd.max_cmds; ++i) { | ||
449 | dev->cmd.context[i].token = i; | ||
450 | dev->cmd.context[i].next = i + 1; | ||
451 | init_timer(&dev->cmd.context[i].timer); | ||
452 | dev->cmd.context[i].timer.data = | ||
453 | (unsigned long) &dev->cmd.context[i]; | ||
454 | dev->cmd.context[i].timer.function = event_timeout; | ||
455 | } | ||
456 | |||
457 | dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; | ||
458 | dev->cmd.free_head = 0; | ||
459 | |||
460 | sema_init(&dev->cmd.event_sem, dev->cmd.max_cmds); | ||
461 | spin_lock_init(&dev->cmd.context_lock); | ||
462 | |||
463 | for (dev->cmd.token_mask = 1; | ||
464 | dev->cmd.token_mask < dev->cmd.max_cmds; | ||
465 | dev->cmd.token_mask <<= 1) | ||
466 | ; /* nothing */ | ||
467 | --dev->cmd.token_mask; | ||
468 | |||
469 | dev->cmd.use_events = 1; | ||
470 | down(&dev->cmd.poll_sem); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * Switch back to polling (used when shutting down the device) | ||
477 | */ | ||
478 | void mthca_cmd_use_polling(struct mthca_dev *dev) | ||
479 | { | ||
480 | int i; | ||
481 | |||
482 | dev->cmd.use_events = 0; | ||
483 | |||
484 | for (i = 0; i < dev->cmd.max_cmds; ++i) | ||
485 | down(&dev->cmd.event_sem); | ||
486 | |||
487 | kfree(dev->cmd.context); | ||
488 | |||
489 | up(&dev->cmd.poll_sem); | ||
490 | } | ||
491 | |||
492 | int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) | ||
493 | { | ||
494 | u64 out; | ||
495 | int ret; | ||
496 | |||
497 | ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status); | ||
498 | |||
499 | if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR) | ||
500 | mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, " | ||
501 | "sladdr=%d, SPD source=%s\n", | ||
502 | (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, | ||
503 | (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"); | ||
504 | |||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) | ||
509 | { | ||
510 | return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status); | ||
511 | } | ||
512 | |||
513 | static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, | ||
514 | u64 virt, u8 *status) | ||
515 | { | ||
516 | u32 *inbox; | ||
517 | dma_addr_t indma; | ||
518 | struct mthca_icm_iter iter; | ||
519 | int lg; | ||
520 | int nent = 0; | ||
521 | int i; | ||
522 | int err = 0; | ||
523 | int ts = 0, tc = 0; | ||
524 | |||
525 | inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); | ||
526 | if (!inbox) | ||
527 | return -ENOMEM; | ||
528 | |||
529 | memset(inbox, 0, PAGE_SIZE); | ||
530 | |||
531 | for (mthca_icm_first(icm, &iter); | ||
532 | !mthca_icm_last(&iter); | ||
533 | mthca_icm_next(&iter)) { | ||
534 | /* | ||
535 | * We have to pass pages that are aligned to their | ||
536 | * size, so find the least significant 1 in the | ||
537 | * address or size and use that as our log2 size. | ||
538 | */ | ||
539 | lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1; | ||
540 | if (lg < 12) { | ||
541 | mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n", | ||
542 | (unsigned long long) mthca_icm_addr(&iter), | ||
543 | mthca_icm_size(&iter)); | ||
544 | err = -EINVAL; | ||
545 | goto out; | ||
546 | } | ||
547 | for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { | ||
548 | if (virt != -1) { | ||
549 | *((__be64 *) (inbox + nent * 4)) = | ||
550 | cpu_to_be64(virt); | ||
551 | virt += 1 << lg; | ||
552 | } | ||
553 | |||
554 | *((__be64 *) (inbox + nent * 4 + 2)) = | ||
555 | cpu_to_be64((mthca_icm_addr(&iter) + | ||
556 | (i << lg)) | (lg - 12)); | ||
557 | ts += 1 << (lg - 10); | ||
558 | ++tc; | ||
559 | |||
560 | if (nent == PAGE_SIZE / 16) { | ||
561 | err = mthca_cmd(dev, indma, nent, 0, op, | ||
562 | CMD_TIME_CLASS_B, status); | ||
563 | if (err || *status) | ||
564 | goto out; | ||
565 | nent = 0; | ||
566 | } | ||
567 | } | ||
568 | } | ||
569 | |||
570 | if (nent) | ||
571 | err = mthca_cmd(dev, indma, nent, 0, op, | ||
572 | CMD_TIME_CLASS_B, status); | ||
573 | |||
574 | switch (op) { | ||
575 | case CMD_MAP_FA: | ||
576 | mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); | ||
577 | break; | ||
578 | case CMD_MAP_ICM_AUX: | ||
579 | mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); | ||
580 | break; | ||
581 | case CMD_MAP_ICM: | ||
582 | mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", | ||
583 | tc, ts, (unsigned long long) virt - (ts << 10)); | ||
584 | break; | ||
585 | } | ||
586 | |||
587 | out: | ||
588 | pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); | ||
589 | return err; | ||
590 | } | ||
591 | |||
592 | int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) | ||
593 | { | ||
594 | return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status); | ||
595 | } | ||
596 | |||
597 | int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) | ||
598 | { | ||
599 | return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); | ||
600 | } | ||
601 | |||
602 | int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) | ||
603 | { | ||
604 | return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status); | ||
605 | } | ||
606 | |||
607 | int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) | ||
608 | { | ||
609 | u32 *outbox; | ||
610 | dma_addr_t outdma; | ||
611 | int err = 0; | ||
612 | u8 lg; | ||
613 | |||
614 | #define QUERY_FW_OUT_SIZE 0x100 | ||
615 | #define QUERY_FW_VER_OFFSET 0x00 | ||
616 | #define QUERY_FW_MAX_CMD_OFFSET 0x0f | ||
617 | #define QUERY_FW_ERR_START_OFFSET 0x30 | ||
618 | #define QUERY_FW_ERR_SIZE_OFFSET 0x38 | ||
619 | |||
620 | #define QUERY_FW_START_OFFSET 0x20 | ||
621 | #define QUERY_FW_END_OFFSET 0x28 | ||
622 | |||
623 | #define QUERY_FW_SIZE_OFFSET 0x00 | ||
624 | #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 | ||
625 | #define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 | ||
626 | #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 | ||
627 | |||
628 | outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma); | ||
629 | if (!outbox) { | ||
630 | return -ENOMEM; | ||
631 | } | ||
632 | |||
633 | err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW, | ||
634 | CMD_TIME_CLASS_A, status); | ||
635 | |||
636 | if (err) | ||
637 | goto out; | ||
638 | |||
639 | MTHCA_GET(dev->fw_ver, outbox, QUERY_FW_VER_OFFSET); | ||
640 | /* | ||
641 | * FW subminor version is at more signifant bits than minor | ||
642 | * version, so swap here. | ||
643 | */ | ||
644 | dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) | | ||
645 | ((dev->fw_ver & 0xffff0000ull) >> 16) | | ||
646 | ((dev->fw_ver & 0x0000ffffull) << 16); | ||
647 | |||
648 | MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); | ||
649 | dev->cmd.max_cmds = 1 << lg; | ||
650 | |||
651 | mthca_dbg(dev, "FW version %012llx, max commands %d\n", | ||
652 | (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); | ||
653 | |||
654 | if (dev->hca_type == ARBEL_NATIVE) { | ||
655 | MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); | ||
656 | MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); | ||
657 | MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET); | ||
658 | MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET); | ||
659 | mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2); | ||
660 | |||
661 | /* | ||
662 | * Arbel page size is always 4 KB; round up number of | ||
663 | * system pages needed. | ||
664 | */ | ||
665 | dev->fw.arbel.fw_pages = | ||
666 | (dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> | ||
667 | (PAGE_SHIFT - 12); | ||
668 | |||
669 | mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", | ||
670 | (unsigned long long) dev->fw.arbel.clr_int_base, | ||
671 | (unsigned long long) dev->fw.arbel.eq_arm_base, | ||
672 | (unsigned long long) dev->fw.arbel.eq_set_ci_base); | ||
673 | } else { | ||
674 | MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET); | ||
675 | MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET); | ||
676 | |||
677 | mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n", | ||
678 | (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10), | ||
679 | (unsigned long long) dev->fw.tavor.fw_start, | ||
680 | (unsigned long long) dev->fw.tavor.fw_end); | ||
681 | } | ||
682 | |||
683 | out: | ||
684 | pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma); | ||
685 | return err; | ||
686 | } | ||
687 | |||
688 | int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) | ||
689 | { | ||
690 | u8 info; | ||
691 | u32 *outbox; | ||
692 | dma_addr_t outdma; | ||
693 | int err = 0; | ||
694 | |||
695 | #define ENABLE_LAM_OUT_SIZE 0x100 | ||
696 | #define ENABLE_LAM_START_OFFSET 0x00 | ||
697 | #define ENABLE_LAM_END_OFFSET 0x08 | ||
698 | #define ENABLE_LAM_INFO_OFFSET 0x13 | ||
699 | |||
700 | #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) | ||
701 | #define ENABLE_LAM_INFO_ECC_MASK 0x3 | ||
702 | |||
703 | outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma); | ||
704 | if (!outbox) | ||
705 | return -ENOMEM; | ||
706 | |||
707 | err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM, | ||
708 | CMD_TIME_CLASS_C, status); | ||
709 | |||
710 | if (err) | ||
711 | goto out; | ||
712 | |||
713 | if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE) | ||
714 | goto out; | ||
715 | |||
716 | MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); | ||
717 | MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET); | ||
718 | MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET); | ||
719 | |||
720 | if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) != | ||
721 | !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { | ||
722 | mthca_info(dev, "FW reports that HCA-attached memory " | ||
723 | "is %s hidden; does not match PCI config\n", | ||
724 | (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ? | ||
725 | "" : "not"); | ||
726 | } | ||
727 | if (info & ENABLE_LAM_INFO_HIDDEN_FLAG) | ||
728 | mthca_dbg(dev, "HCA-attached memory is hidden.\n"); | ||
729 | |||
730 | mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", | ||
731 | (int) ((dev->ddr_end - dev->ddr_start) >> 10), | ||
732 | (unsigned long long) dev->ddr_start, | ||
733 | (unsigned long long) dev->ddr_end); | ||
734 | |||
735 | out: | ||
736 | pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma); | ||
737 | return err; | ||
738 | } | ||
739 | |||
740 | int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) | ||
741 | { | ||
742 | return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); | ||
743 | } | ||
744 | |||
745 | int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) | ||
746 | { | ||
747 | u8 info; | ||
748 | u32 *outbox; | ||
749 | dma_addr_t outdma; | ||
750 | int err = 0; | ||
751 | |||
752 | #define QUERY_DDR_OUT_SIZE 0x100 | ||
753 | #define QUERY_DDR_START_OFFSET 0x00 | ||
754 | #define QUERY_DDR_END_OFFSET 0x08 | ||
755 | #define QUERY_DDR_INFO_OFFSET 0x13 | ||
756 | |||
757 | #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) | ||
758 | #define QUERY_DDR_INFO_ECC_MASK 0x3 | ||
759 | |||
760 | outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma); | ||
761 | if (!outbox) | ||
762 | return -ENOMEM; | ||
763 | |||
764 | err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR, | ||
765 | CMD_TIME_CLASS_A, status); | ||
766 | |||
767 | if (err) | ||
768 | goto out; | ||
769 | |||
770 | MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET); | ||
771 | MTHCA_GET(dev->ddr_end, outbox, QUERY_DDR_END_OFFSET); | ||
772 | MTHCA_GET(info, outbox, QUERY_DDR_INFO_OFFSET); | ||
773 | |||
774 | if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) != | ||
775 | !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { | ||
776 | mthca_info(dev, "FW reports that HCA-attached memory " | ||
777 | "is %s hidden; does not match PCI config\n", | ||
778 | (info & QUERY_DDR_INFO_HIDDEN_FLAG) ? | ||
779 | "" : "not"); | ||
780 | } | ||
781 | if (info & QUERY_DDR_INFO_HIDDEN_FLAG) | ||
782 | mthca_dbg(dev, "HCA-attached memory is hidden.\n"); | ||
783 | |||
784 | mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", | ||
785 | (int) ((dev->ddr_end - dev->ddr_start) >> 10), | ||
786 | (unsigned long long) dev->ddr_start, | ||
787 | (unsigned long long) dev->ddr_end); | ||
788 | |||
789 | out: | ||
790 | pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma); | ||
791 | return err; | ||
792 | } | ||
793 | |||
794 | int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | ||
795 | struct mthca_dev_lim *dev_lim, u8 *status) | ||
796 | { | ||
797 | u32 *outbox; | ||
798 | dma_addr_t outdma; | ||
799 | u8 field; | ||
800 | u16 size; | ||
801 | int err; | ||
802 | |||
803 | #define QUERY_DEV_LIM_OUT_SIZE 0x100 | ||
804 | #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET 0x10 | ||
805 | #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET 0x11 | ||
806 | #define QUERY_DEV_LIM_RSVD_QP_OFFSET 0x12 | ||
807 | #define QUERY_DEV_LIM_MAX_QP_OFFSET 0x13 | ||
808 | #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET 0x14 | ||
809 | #define QUERY_DEV_LIM_MAX_SRQ_OFFSET 0x15 | ||
810 | #define QUERY_DEV_LIM_RSVD_EEC_OFFSET 0x16 | ||
811 | #define QUERY_DEV_LIM_MAX_EEC_OFFSET 0x17 | ||
812 | #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET 0x19 | ||
813 | #define QUERY_DEV_LIM_RSVD_CQ_OFFSET 0x1a | ||
814 | #define QUERY_DEV_LIM_MAX_CQ_OFFSET 0x1b | ||
815 | #define QUERY_DEV_LIM_MAX_MPT_OFFSET 0x1d | ||
816 | #define QUERY_DEV_LIM_RSVD_EQ_OFFSET 0x1e | ||
817 | #define QUERY_DEV_LIM_MAX_EQ_OFFSET 0x1f | ||
818 | #define QUERY_DEV_LIM_RSVD_MTT_OFFSET 0x20 | ||
819 | #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET 0x21 | ||
820 | #define QUERY_DEV_LIM_RSVD_MRW_OFFSET 0x22 | ||
821 | #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET 0x23 | ||
822 | #define QUERY_DEV_LIM_MAX_AV_OFFSET 0x27 | ||
823 | #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET 0x29 | ||
824 | #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET 0x2b | ||
825 | #define QUERY_DEV_LIM_MAX_RDMA_OFFSET 0x2f | ||
826 | #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET 0x33 | ||
827 | #define QUERY_DEV_LIM_ACK_DELAY_OFFSET 0x35 | ||
828 | #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 | ||
829 | #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 | ||
830 | #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b | ||
831 | #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f | ||
832 | #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 | ||
833 | #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 | ||
834 | #define QUERY_DEV_LIM_UAR_SZ_OFFSET 0x49 | ||
835 | #define QUERY_DEV_LIM_PAGE_SZ_OFFSET 0x4b | ||
836 | #define QUERY_DEV_LIM_MAX_SG_OFFSET 0x51 | ||
837 | #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET 0x52 | ||
838 | #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET 0x55 | ||
839 | #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56 | ||
840 | #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET 0x61 | ||
841 | #define QUERY_DEV_LIM_RSVD_MCG_OFFSET 0x62 | ||
842 | #define QUERY_DEV_LIM_MAX_MCG_OFFSET 0x63 | ||
843 | #define QUERY_DEV_LIM_RSVD_PD_OFFSET 0x64 | ||
844 | #define QUERY_DEV_LIM_MAX_PD_OFFSET 0x65 | ||
845 | #define QUERY_DEV_LIM_RSVD_RDD_OFFSET 0x66 | ||
846 | #define QUERY_DEV_LIM_MAX_RDD_OFFSET 0x67 | ||
847 | #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET 0x80 | ||
848 | #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET 0x82 | ||
849 | #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET 0x84 | ||
850 | #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET 0x86 | ||
851 | #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET 0x88 | ||
852 | #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET 0x8a | ||
853 | #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET 0x8c | ||
854 | #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET 0x8e | ||
855 | #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET 0x90 | ||
856 | #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET 0x92 | ||
857 | #define QUERY_DEV_LIM_PBL_SZ_OFFSET 0x96 | ||
858 | #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET 0x97 | ||
859 | #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET 0x98 | ||
860 | #define QUERY_DEV_LIM_LAMR_OFFSET 0x9f | ||
861 | #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 | ||
862 | |||
863 | outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma); | ||
864 | if (!outbox) | ||
865 | return -ENOMEM; | ||
866 | |||
867 | err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM, | ||
868 | CMD_TIME_CLASS_A, status); | ||
869 | |||
870 | if (err) | ||
871 | goto out; | ||
872 | |||
873 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); | ||
874 | dev_lim->max_srq_sz = 1 << field; | ||
875 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); | ||
876 | dev_lim->max_qp_sz = 1 << field; | ||
877 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); | ||
878 | dev_lim->reserved_qps = 1 << (field & 0xf); | ||
879 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); | ||
880 | dev_lim->max_qps = 1 << (field & 0x1f); | ||
881 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET); | ||
882 | dev_lim->reserved_srqs = 1 << (field >> 4); | ||
883 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET); | ||
884 | dev_lim->max_srqs = 1 << (field & 0x1f); | ||
885 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET); | ||
886 | dev_lim->reserved_eecs = 1 << (field & 0xf); | ||
887 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET); | ||
888 | dev_lim->max_eecs = 1 << (field & 0x1f); | ||
889 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET); | ||
890 | dev_lim->max_cq_sz = 1 << field; | ||
891 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET); | ||
892 | dev_lim->reserved_cqs = 1 << (field & 0xf); | ||
893 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET); | ||
894 | dev_lim->max_cqs = 1 << (field & 0x1f); | ||
895 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET); | ||
896 | dev_lim->max_mpts = 1 << (field & 0x3f); | ||
897 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET); | ||
898 | dev_lim->reserved_eqs = 1 << (field & 0xf); | ||
899 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET); | ||
900 | dev_lim->max_eqs = 1 << (field & 0x7); | ||
901 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); | ||
902 | dev_lim->reserved_mtts = 1 << (field >> 4); | ||
903 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); | ||
904 | dev_lim->max_mrw_sz = 1 << field; | ||
905 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET); | ||
906 | dev_lim->reserved_mrws = 1 << (field & 0xf); | ||
907 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET); | ||
908 | dev_lim->max_mtt_seg = 1 << (field & 0x3f); | ||
909 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET); | ||
910 | dev_lim->max_requester_per_qp = 1 << (field & 0x3f); | ||
911 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET); | ||
912 | dev_lim->max_responder_per_qp = 1 << (field & 0x3f); | ||
913 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET); | ||
914 | dev_lim->max_rdma_global = 1 << (field & 0x3f); | ||
915 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET); | ||
916 | dev_lim->local_ca_ack_delay = field & 0x1f; | ||
917 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET); | ||
918 | dev_lim->max_mtu = field >> 4; | ||
919 | dev_lim->max_port_width = field & 0xf; | ||
920 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET); | ||
921 | dev_lim->max_vl = field >> 4; | ||
922 | dev_lim->num_ports = field & 0xf; | ||
923 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); | ||
924 | dev_lim->max_gids = 1 << (field & 0xf); | ||
925 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); | ||
926 | dev_lim->max_pkeys = 1 << (field & 0xf); | ||
927 | MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); | ||
928 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET); | ||
929 | dev_lim->reserved_uars = field >> 4; | ||
930 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET); | ||
931 | dev_lim->uar_size = 1 << ((field & 0x3f) + 20); | ||
932 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET); | ||
933 | dev_lim->min_page_sz = 1 << field; | ||
934 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET); | ||
935 | dev_lim->max_sg = field; | ||
936 | |||
937 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET); | ||
938 | dev_lim->max_desc_sz = size; | ||
939 | |||
940 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET); | ||
941 | dev_lim->max_qp_per_mcg = 1 << field; | ||
942 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET); | ||
943 | dev_lim->reserved_mgms = field & 0xf; | ||
944 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET); | ||
945 | dev_lim->max_mcgs = 1 << field; | ||
946 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET); | ||
947 | dev_lim->reserved_pds = field >> 4; | ||
948 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET); | ||
949 | dev_lim->max_pds = 1 << (field & 0x3f); | ||
950 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET); | ||
951 | dev_lim->reserved_rdds = field >> 4; | ||
952 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET); | ||
953 | dev_lim->max_rdds = 1 << (field & 0x3f); | ||
954 | |||
955 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET); | ||
956 | dev_lim->eec_entry_sz = size; | ||
957 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET); | ||
958 | dev_lim->qpc_entry_sz = size; | ||
959 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET); | ||
960 | dev_lim->eeec_entry_sz = size; | ||
961 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET); | ||
962 | dev_lim->eqpc_entry_sz = size; | ||
963 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET); | ||
964 | dev_lim->eqc_entry_sz = size; | ||
965 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET); | ||
966 | dev_lim->cqc_entry_sz = size; | ||
967 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET); | ||
968 | dev_lim->srq_entry_sz = size; | ||
969 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); | ||
970 | dev_lim->uar_scratch_entry_sz = size; | ||
971 | |||
972 | mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", | ||
973 | dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); | ||
974 | mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", | ||
975 | dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); | ||
976 | mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", | ||
977 | dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); | ||
978 | mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", | ||
979 | dev_lim->reserved_mrws, dev_lim->reserved_mtts); | ||
980 | mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", | ||
981 | dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); | ||
982 | mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", | ||
983 | dev_lim->max_pds, dev_lim->reserved_mgms); | ||
984 | |||
985 | mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); | ||
986 | |||
987 | if (dev->hca_type == ARBEL_NATIVE) { | ||
988 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); | ||
989 | dev_lim->hca.arbel.resize_srq = field & 1; | ||
990 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET); | ||
991 | dev_lim->mtt_seg_sz = size; | ||
992 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); | ||
993 | dev_lim->mpt_entry_sz = size; | ||
994 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); | ||
995 | dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f); | ||
996 | MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox, | ||
997 | QUERY_DEV_LIM_BMME_FLAGS_OFFSET); | ||
998 | MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox, | ||
999 | QUERY_DEV_LIM_RSVD_LKEY_OFFSET); | ||
1000 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET); | ||
1001 | dev_lim->hca.arbel.lam_required = field & 1; | ||
1002 | MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox, | ||
1003 | QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET); | ||
1004 | |||
1005 | if (dev_lim->hca.arbel.bmme_flags & 1) | ||
1006 | mthca_dbg(dev, "Base MM extensions: yes " | ||
1007 | "(flags %d, max PBL %d, rsvd L_Key %08x)\n", | ||
1008 | dev_lim->hca.arbel.bmme_flags, | ||
1009 | dev_lim->hca.arbel.max_pbl_sz, | ||
1010 | dev_lim->hca.arbel.reserved_lkey); | ||
1011 | else | ||
1012 | mthca_dbg(dev, "Base MM extensions: no\n"); | ||
1013 | |||
1014 | mthca_dbg(dev, "Max ICM size %lld MB\n", | ||
1015 | (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20); | ||
1016 | } else { | ||
1017 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); | ||
1018 | dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); | ||
1019 | dev_lim->mtt_seg_sz = MTHCA_MTT_SEG_SIZE; | ||
1020 | dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; | ||
1021 | } | ||
1022 | |||
1023 | out: | ||
1024 | pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); | ||
1025 | return err; | ||
1026 | } | ||
1027 | |||
1028 | int mthca_QUERY_ADAPTER(struct mthca_dev *dev, | ||
1029 | struct mthca_adapter *adapter, u8 *status) | ||
1030 | { | ||
1031 | u32 *outbox; | ||
1032 | dma_addr_t outdma; | ||
1033 | int err; | ||
1034 | |||
1035 | #define QUERY_ADAPTER_OUT_SIZE 0x100 | ||
1036 | #define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 | ||
1037 | #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 | ||
1038 | #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 | ||
1039 | #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 | ||
1040 | |||
1041 | outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma); | ||
1042 | if (!outbox) | ||
1043 | return -ENOMEM; | ||
1044 | |||
1045 | err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER, | ||
1046 | CMD_TIME_CLASS_A, status); | ||
1047 | |||
1048 | if (err) | ||
1049 | goto out; | ||
1050 | |||
1051 | MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); | ||
1052 | MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); | ||
1053 | MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); | ||
1054 | MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); | ||
1055 | |||
1056 | out: | ||
1057 | pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); | ||
1058 | return err; | ||
1059 | } | ||
1060 | |||
1061 | int mthca_INIT_HCA(struct mthca_dev *dev, | ||
1062 | struct mthca_init_hca_param *param, | ||
1063 | u8 *status) | ||
1064 | { | ||
1065 | u32 *inbox; | ||
1066 | dma_addr_t indma; | ||
1067 | int err; | ||
1068 | |||
1069 | #define INIT_HCA_IN_SIZE 0x200 | ||
1070 | #define INIT_HCA_FLAGS_OFFSET 0x014 | ||
1071 | #define INIT_HCA_QPC_OFFSET 0x020 | ||
1072 | #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) | ||
1073 | #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) | ||
1074 | #define INIT_HCA_EEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x20) | ||
1075 | #define INIT_HCA_LOG_EEC_OFFSET (INIT_HCA_QPC_OFFSET + 0x27) | ||
1076 | #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) | ||
1077 | #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) | ||
1078 | #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) | ||
1079 | #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) | ||
1080 | #define INIT_HCA_EQPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) | ||
1081 | #define INIT_HCA_EEEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) | ||
1082 | #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) | ||
1083 | #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) | ||
1084 | #define INIT_HCA_RDB_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) | ||
1085 | #define INIT_HCA_UDAV_OFFSET 0x0b0 | ||
1086 | #define INIT_HCA_UDAV_LKEY_OFFSET (INIT_HCA_UDAV_OFFSET + 0x0) | ||
1087 | #define INIT_HCA_UDAV_PD_OFFSET (INIT_HCA_UDAV_OFFSET + 0x4) | ||
1088 | #define INIT_HCA_MCAST_OFFSET 0x0c0 | ||
1089 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) | ||
1090 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) | ||
1091 | #define INIT_HCA_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) | ||
1092 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) | ||
1093 | #define INIT_HCA_TPT_OFFSET 0x0f0 | ||
1094 | #define INIT_HCA_MPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) | ||
1095 | #define INIT_HCA_MTT_SEG_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x09) | ||
1096 | #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) | ||
1097 | #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) | ||
1098 | #define INIT_HCA_UAR_OFFSET 0x120 | ||
1099 | #define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00) | ||
1100 | #define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09) | ||
1101 | #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) | ||
1102 | #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) | ||
1103 | #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) | ||
1104 | #define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) | ||
1105 | |||
1106 | inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); | ||
1107 | if (!inbox) | ||
1108 | return -ENOMEM; | ||
1109 | |||
1110 | memset(inbox, 0, INIT_HCA_IN_SIZE); | ||
1111 | |||
1112 | #if defined(__LITTLE_ENDIAN) | ||
1113 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); | ||
1114 | #elif defined(__BIG_ENDIAN) | ||
1115 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); | ||
1116 | #else | ||
1117 | #error Host endianness not defined | ||
1118 | #endif | ||
1119 | /* Check port for UD address vector: */ | ||
1120 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); | ||
1121 | |||
1122 | /* We leave wqe_quota, responder_exu, etc as 0 (default) */ | ||
1123 | |||
1124 | /* QPC/EEC/CQC/EQC/RDB attributes */ | ||
1125 | |||
1126 | MTHCA_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); | ||
1127 | MTHCA_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); | ||
1128 | MTHCA_PUT(inbox, param->eec_base, INIT_HCA_EEC_BASE_OFFSET); | ||
1129 | MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET); | ||
1130 | MTHCA_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); | ||
1131 | MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); | ||
1132 | MTHCA_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); | ||
1133 | MTHCA_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); | ||
1134 | MTHCA_PUT(inbox, param->eqpc_base, INIT_HCA_EQPC_BASE_OFFSET); | ||
1135 | MTHCA_PUT(inbox, param->eeec_base, INIT_HCA_EEEC_BASE_OFFSET); | ||
1136 | MTHCA_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); | ||
1137 | MTHCA_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); | ||
1138 | MTHCA_PUT(inbox, param->rdb_base, INIT_HCA_RDB_BASE_OFFSET); | ||
1139 | |||
1140 | /* UD AV attributes */ | ||
1141 | |||
1142 | /* multicast attributes */ | ||
1143 | |||
1144 | MTHCA_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); | ||
1145 | MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); | ||
1146 | MTHCA_PUT(inbox, param->mc_hash_sz, INIT_HCA_MC_HASH_SZ_OFFSET); | ||
1147 | MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); | ||
1148 | |||
1149 | /* TPT attributes */ | ||
1150 | |||
1151 | MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); | ||
1152 | if (dev->hca_type != ARBEL_NATIVE) | ||
1153 | MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); | ||
1154 | MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); | ||
1155 | MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); | ||
1156 | |||
1157 | /* UAR attributes */ | ||
1158 | { | ||
1159 | u8 uar_page_sz = PAGE_SHIFT - 12; | ||
1160 | MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); | ||
1161 | } | ||
1162 | |||
1163 | MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); | ||
1164 | |||
1165 | if (dev->hca_type == ARBEL_NATIVE) { | ||
1166 | MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET); | ||
1167 | MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); | ||
1168 | MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); | ||
1169 | } | ||
1170 | |||
1171 | err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, | ||
1172 | HZ, status); | ||
1173 | |||
1174 | pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); | ||
1175 | return err; | ||
1176 | } | ||
1177 | |||
1178 | int mthca_INIT_IB(struct mthca_dev *dev, | ||
1179 | struct mthca_init_ib_param *param, | ||
1180 | int port, u8 *status) | ||
1181 | { | ||
1182 | u32 *inbox; | ||
1183 | dma_addr_t indma; | ||
1184 | int err; | ||
1185 | u32 flags; | ||
1186 | |||
1187 | #define INIT_IB_IN_SIZE 56 | ||
1188 | #define INIT_IB_FLAGS_OFFSET 0x00 | ||
1189 | #define INIT_IB_FLAG_SIG (1 << 18) | ||
1190 | #define INIT_IB_FLAG_NG (1 << 17) | ||
1191 | #define INIT_IB_FLAG_G0 (1 << 16) | ||
1192 | #define INIT_IB_FLAG_1X (1 << 8) | ||
1193 | #define INIT_IB_FLAG_4X (1 << 9) | ||
1194 | #define INIT_IB_FLAG_12X (1 << 11) | ||
1195 | #define INIT_IB_VL_SHIFT 4 | ||
1196 | #define INIT_IB_MTU_SHIFT 12 | ||
1197 | #define INIT_IB_MAX_GID_OFFSET 0x06 | ||
1198 | #define INIT_IB_MAX_PKEY_OFFSET 0x0a | ||
1199 | #define INIT_IB_GUID0_OFFSET 0x10 | ||
1200 | #define INIT_IB_NODE_GUID_OFFSET 0x18 | ||
1201 | #define INIT_IB_SI_GUID_OFFSET 0x20 | ||
1202 | |||
1203 | inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma); | ||
1204 | if (!inbox) | ||
1205 | return -ENOMEM; | ||
1206 | |||
1207 | memset(inbox, 0, INIT_IB_IN_SIZE); | ||
1208 | |||
1209 | flags = 0; | ||
1210 | flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0; | ||
1211 | flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0; | ||
1212 | flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; | ||
1213 | flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; | ||
1214 | flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; | ||
1215 | flags |= param->vl_cap << INIT_IB_VL_SHIFT; | ||
1216 | flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; | ||
1217 | MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); | ||
1218 | |||
1219 | MTHCA_PUT(inbox, param->gid_cap, INIT_IB_MAX_GID_OFFSET); | ||
1220 | MTHCA_PUT(inbox, param->pkey_cap, INIT_IB_MAX_PKEY_OFFSET); | ||
1221 | MTHCA_PUT(inbox, param->guid0, INIT_IB_GUID0_OFFSET); | ||
1222 | MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); | ||
1223 | MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); | ||
1224 | |||
1225 | err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB, | ||
1226 | CMD_TIME_CLASS_A, status); | ||
1227 | |||
1228 | pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); | ||
1229 | return err; | ||
1230 | } | ||
1231 | |||
1232 | int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) | ||
1233 | { | ||
1234 | return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status); | ||
1235 | } | ||
1236 | |||
1237 | int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) | ||
1238 | { | ||
1239 | return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, HZ, status); | ||
1240 | } | ||
1241 | |||
1242 | int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, | ||
1243 | int port, u8 *status) | ||
1244 | { | ||
1245 | u32 *inbox; | ||
1246 | dma_addr_t indma; | ||
1247 | int err; | ||
1248 | u32 flags = 0; | ||
1249 | |||
1250 | #define SET_IB_IN_SIZE 0x40 | ||
1251 | #define SET_IB_FLAGS_OFFSET 0x00 | ||
1252 | #define SET_IB_FLAG_SIG (1 << 18) | ||
1253 | #define SET_IB_FLAG_RQK (1 << 0) | ||
1254 | #define SET_IB_CAP_MASK_OFFSET 0x04 | ||
1255 | #define SET_IB_SI_GUID_OFFSET 0x08 | ||
1256 | |||
1257 | inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma); | ||
1258 | if (!inbox) | ||
1259 | return -ENOMEM; | ||
1260 | |||
1261 | memset(inbox, 0, SET_IB_IN_SIZE); | ||
1262 | |||
1263 | flags |= param->set_si_guid ? SET_IB_FLAG_SIG : 0; | ||
1264 | flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0; | ||
1265 | MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET); | ||
1266 | |||
1267 | MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); | ||
1268 | MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); | ||
1269 | |||
1270 | err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB, | ||
1271 | CMD_TIME_CLASS_B, status); | ||
1272 | |||
1273 | pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); | ||
1274 | return err; | ||
1275 | } | ||
1276 | |||
1277 | int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) | ||
1278 | { | ||
1279 | return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status); | ||
1280 | } | ||
1281 | |||
1282 | int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) | ||
1283 | { | ||
1284 | u64 *inbox; | ||
1285 | dma_addr_t indma; | ||
1286 | int err; | ||
1287 | |||
1288 | inbox = pci_alloc_consistent(dev->pdev, 16, &indma); | ||
1289 | if (!inbox) | ||
1290 | return -ENOMEM; | ||
1291 | |||
1292 | inbox[0] = cpu_to_be64(virt); | ||
1293 | inbox[1] = cpu_to_be64(dma_addr); | ||
1294 | |||
1295 | err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status); | ||
1296 | |||
1297 | pci_free_consistent(dev->pdev, 16, inbox, indma); | ||
1298 | |||
1299 | if (!err) | ||
1300 | mthca_dbg(dev, "Mapped page at %llx for ICM.\n", | ||
1301 | (unsigned long long) virt); | ||
1302 | |||
1303 | return err; | ||
1304 | } | ||
1305 | |||
1306 | int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) | ||
1307 | { | ||
1308 | mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n", | ||
1309 | page_count, (unsigned long long) virt); | ||
1310 | |||
1311 | return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status); | ||
1312 | } | ||
1313 | |||
1314 | int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) | ||
1315 | { | ||
1316 | return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status); | ||
1317 | } | ||
1318 | |||
1319 | int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) | ||
1320 | { | ||
1321 | return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status); | ||
1322 | } | ||
1323 | |||
1324 | int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, | ||
1325 | u8 *status) | ||
1326 | { | ||
1327 | int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, | ||
1328 | CMD_TIME_CLASS_A, status); | ||
1329 | |||
1330 | if (ret || status) | ||
1331 | return ret; | ||
1332 | |||
1333 | /* | ||
1334 | * Arbel page size is always 4 KB; round up number of system | ||
1335 | * pages needed. | ||
1336 | */ | ||
1337 | *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); | ||
1338 | |||
1339 | return 0; | ||
1340 | } | ||
1341 | |||
1342 | int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, | ||
1343 | int mpt_index, u8 *status) | ||
1344 | { | ||
1345 | dma_addr_t indma; | ||
1346 | int err; | ||
1347 | |||
1348 | indma = pci_map_single(dev->pdev, mpt_entry, | ||
1349 | MTHCA_MPT_ENTRY_SIZE, | ||
1350 | PCI_DMA_TODEVICE); | ||
1351 | if (pci_dma_mapping_error(indma)) | ||
1352 | return -ENOMEM; | ||
1353 | |||
1354 | err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT, | ||
1355 | CMD_TIME_CLASS_B, status); | ||
1356 | |||
1357 | pci_unmap_single(dev->pdev, indma, | ||
1358 | MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE); | ||
1359 | return err; | ||
1360 | } | ||
1361 | |||
1362 | int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, | ||
1363 | int mpt_index, u8 *status) | ||
1364 | { | ||
1365 | dma_addr_t outdma = 0; | ||
1366 | int err; | ||
1367 | |||
1368 | if (mpt_entry) { | ||
1369 | outdma = pci_map_single(dev->pdev, mpt_entry, | ||
1370 | MTHCA_MPT_ENTRY_SIZE, | ||
1371 | PCI_DMA_FROMDEVICE); | ||
1372 | if (pci_dma_mapping_error(outdma)) | ||
1373 | return -ENOMEM; | ||
1374 | } | ||
1375 | |||
1376 | err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry, | ||
1377 | CMD_HW2SW_MPT, | ||
1378 | CMD_TIME_CLASS_B, status); | ||
1379 | |||
1380 | if (mpt_entry) | ||
1381 | pci_unmap_single(dev->pdev, outdma, | ||
1382 | MTHCA_MPT_ENTRY_SIZE, | ||
1383 | PCI_DMA_FROMDEVICE); | ||
1384 | return err; | ||
1385 | } | ||
1386 | |||
1387 | int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, | ||
1388 | int num_mtt, u8 *status) | ||
1389 | { | ||
1390 | dma_addr_t indma; | ||
1391 | int err; | ||
1392 | |||
1393 | indma = pci_map_single(dev->pdev, mtt_entry, | ||
1394 | (num_mtt + 2) * 8, | ||
1395 | PCI_DMA_TODEVICE); | ||
1396 | if (pci_dma_mapping_error(indma)) | ||
1397 | return -ENOMEM; | ||
1398 | |||
1399 | err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT, | ||
1400 | CMD_TIME_CLASS_B, status); | ||
1401 | |||
1402 | pci_unmap_single(dev->pdev, indma, | ||
1403 | (num_mtt + 2) * 8, PCI_DMA_TODEVICE); | ||
1404 | return err; | ||
1405 | } | ||
1406 | |||
1407 | int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, | ||
1408 | int eq_num, u8 *status) | ||
1409 | { | ||
1410 | mthca_dbg(dev, "%s mask %016llx for eqn %d\n", | ||
1411 | unmap ? "Clearing" : "Setting", | ||
1412 | (unsigned long long) event_mask, eq_num); | ||
1413 | return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, | ||
1414 | 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); | ||
1415 | } | ||
1416 | |||
1417 | int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, | ||
1418 | int eq_num, u8 *status) | ||
1419 | { | ||
1420 | dma_addr_t indma; | ||
1421 | int err; | ||
1422 | |||
1423 | indma = pci_map_single(dev->pdev, eq_context, | ||
1424 | MTHCA_EQ_CONTEXT_SIZE, | ||
1425 | PCI_DMA_TODEVICE); | ||
1426 | if (pci_dma_mapping_error(indma)) | ||
1427 | return -ENOMEM; | ||
1428 | |||
1429 | err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ, | ||
1430 | CMD_TIME_CLASS_A, status); | ||
1431 | |||
1432 | pci_unmap_single(dev->pdev, indma, | ||
1433 | MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); | ||
1434 | return err; | ||
1435 | } | ||
1436 | |||
1437 | int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, | ||
1438 | int eq_num, u8 *status) | ||
1439 | { | ||
1440 | dma_addr_t outdma = 0; | ||
1441 | int err; | ||
1442 | |||
1443 | outdma = pci_map_single(dev->pdev, eq_context, | ||
1444 | MTHCA_EQ_CONTEXT_SIZE, | ||
1445 | PCI_DMA_FROMDEVICE); | ||
1446 | if (pci_dma_mapping_error(outdma)) | ||
1447 | return -ENOMEM; | ||
1448 | |||
1449 | err = mthca_cmd_box(dev, 0, outdma, eq_num, 0, | ||
1450 | CMD_HW2SW_EQ, | ||
1451 | CMD_TIME_CLASS_A, status); | ||
1452 | |||
1453 | pci_unmap_single(dev->pdev, outdma, | ||
1454 | MTHCA_EQ_CONTEXT_SIZE, | ||
1455 | PCI_DMA_FROMDEVICE); | ||
1456 | return err; | ||
1457 | } | ||
1458 | |||
1459 | int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, | ||
1460 | int cq_num, u8 *status) | ||
1461 | { | ||
1462 | dma_addr_t indma; | ||
1463 | int err; | ||
1464 | |||
1465 | indma = pci_map_single(dev->pdev, cq_context, | ||
1466 | MTHCA_CQ_CONTEXT_SIZE, | ||
1467 | PCI_DMA_TODEVICE); | ||
1468 | if (pci_dma_mapping_error(indma)) | ||
1469 | return -ENOMEM; | ||
1470 | |||
1471 | err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ, | ||
1472 | CMD_TIME_CLASS_A, status); | ||
1473 | |||
1474 | pci_unmap_single(dev->pdev, indma, | ||
1475 | MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); | ||
1476 | return err; | ||
1477 | } | ||
1478 | |||
1479 | int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, | ||
1480 | int cq_num, u8 *status) | ||
1481 | { | ||
1482 | dma_addr_t outdma = 0; | ||
1483 | int err; | ||
1484 | |||
1485 | outdma = pci_map_single(dev->pdev, cq_context, | ||
1486 | MTHCA_CQ_CONTEXT_SIZE, | ||
1487 | PCI_DMA_FROMDEVICE); | ||
1488 | if (pci_dma_mapping_error(outdma)) | ||
1489 | return -ENOMEM; | ||
1490 | |||
1491 | err = mthca_cmd_box(dev, 0, outdma, cq_num, 0, | ||
1492 | CMD_HW2SW_CQ, | ||
1493 | CMD_TIME_CLASS_A, status); | ||
1494 | |||
1495 | pci_unmap_single(dev->pdev, outdma, | ||
1496 | MTHCA_CQ_CONTEXT_SIZE, | ||
1497 | PCI_DMA_FROMDEVICE); | ||
1498 | return err; | ||
1499 | } | ||
1500 | |||
1501 | int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, | ||
1502 | int is_ee, void *qp_context, u32 optmask, | ||
1503 | u8 *status) | ||
1504 | { | ||
1505 | static const u16 op[] = { | ||
1506 | [MTHCA_TRANS_RST2INIT] = CMD_RST2INIT_QPEE, | ||
1507 | [MTHCA_TRANS_INIT2INIT] = CMD_INIT2INIT_QPEE, | ||
1508 | [MTHCA_TRANS_INIT2RTR] = CMD_INIT2RTR_QPEE, | ||
1509 | [MTHCA_TRANS_RTR2RTS] = CMD_RTR2RTS_QPEE, | ||
1510 | [MTHCA_TRANS_RTS2RTS] = CMD_RTS2RTS_QPEE, | ||
1511 | [MTHCA_TRANS_SQERR2RTS] = CMD_SQERR2RTS_QPEE, | ||
1512 | [MTHCA_TRANS_ANY2ERR] = CMD_2ERR_QPEE, | ||
1513 | [MTHCA_TRANS_RTS2SQD] = CMD_RTS2SQD_QPEE, | ||
1514 | [MTHCA_TRANS_SQD2SQD] = CMD_SQD2SQD_QPEE, | ||
1515 | [MTHCA_TRANS_SQD2RTS] = CMD_SQD2RTS_QPEE, | ||
1516 | [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE | ||
1517 | }; | ||
1518 | u8 op_mod = 0; | ||
1519 | |||
1520 | dma_addr_t indma; | ||
1521 | int err; | ||
1522 | |||
1523 | if (trans < 0 || trans >= ARRAY_SIZE(op)) | ||
1524 | return -EINVAL; | ||
1525 | |||
1526 | if (trans == MTHCA_TRANS_ANY2RST) { | ||
1527 | indma = 0; | ||
1528 | op_mod = 3; /* don't write outbox, any->reset */ | ||
1529 | |||
1530 | /* For debugging */ | ||
1531 | qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, | ||
1532 | &indma); | ||
1533 | op_mod = 2; /* write outbox, any->reset */ | ||
1534 | } else { | ||
1535 | indma = pci_map_single(dev->pdev, qp_context, | ||
1536 | MTHCA_QP_CONTEXT_SIZE, | ||
1537 | PCI_DMA_TODEVICE); | ||
1538 | if (pci_dma_mapping_error(indma)) | ||
1539 | return -ENOMEM; | ||
1540 | |||
1541 | if (0) { | ||
1542 | int i; | ||
1543 | mthca_dbg(dev, "Dumping QP context:\n"); | ||
1544 | printk(" opt param mask: %08x\n", be32_to_cpup(qp_context)); | ||
1545 | for (i = 0; i < 0x100 / 4; ++i) { | ||
1546 | if (i % 8 == 0) | ||
1547 | printk(" [%02x] ", i * 4); | ||
1548 | printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); | ||
1549 | if ((i + 1) % 8 == 0) | ||
1550 | printk("\n"); | ||
1551 | } | ||
1552 | } | ||
1553 | } | ||
1554 | |||
1555 | if (trans == MTHCA_TRANS_ANY2RST) { | ||
1556 | err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num, | ||
1557 | op_mod, op[trans], CMD_TIME_CLASS_C, status); | ||
1558 | |||
1559 | if (0) { | ||
1560 | int i; | ||
1561 | mthca_dbg(dev, "Dumping QP context:\n"); | ||
1562 | printk(" %08x\n", be32_to_cpup(qp_context)); | ||
1563 | for (i = 0; i < 0x100 / 4; ++i) { | ||
1564 | if (i % 8 == 0) | ||
1565 | printk("[%02x] ", i * 4); | ||
1566 | printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); | ||
1567 | if ((i + 1) % 8 == 0) | ||
1568 | printk("\n"); | ||
1569 | } | ||
1570 | } | ||
1571 | |||
1572 | } else | ||
1573 | err = mthca_cmd(dev, indma, (!!is_ee << 24) | num, | ||
1574 | op_mod, op[trans], CMD_TIME_CLASS_C, status); | ||
1575 | |||
1576 | if (trans != MTHCA_TRANS_ANY2RST) | ||
1577 | pci_unmap_single(dev->pdev, indma, | ||
1578 | MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE); | ||
1579 | else | ||
1580 | pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, | ||
1581 | qp_context, indma); | ||
1582 | return err; | ||
1583 | } | ||
1584 | |||
1585 | int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, | ||
1586 | void *qp_context, u8 *status) | ||
1587 | { | ||
1588 | dma_addr_t outdma = 0; | ||
1589 | int err; | ||
1590 | |||
1591 | outdma = pci_map_single(dev->pdev, qp_context, | ||
1592 | MTHCA_QP_CONTEXT_SIZE, | ||
1593 | PCI_DMA_FROMDEVICE); | ||
1594 | if (pci_dma_mapping_error(outdma)) | ||
1595 | return -ENOMEM; | ||
1596 | |||
1597 | err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0, | ||
1598 | CMD_QUERY_QPEE, | ||
1599 | CMD_TIME_CLASS_A, status); | ||
1600 | |||
1601 | pci_unmap_single(dev->pdev, outdma, | ||
1602 | MTHCA_QP_CONTEXT_SIZE, | ||
1603 | PCI_DMA_FROMDEVICE); | ||
1604 | return err; | ||
1605 | } | ||
1606 | |||
1607 | int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, | ||
1608 | u8 *status) | ||
1609 | { | ||
1610 | u8 op_mod; | ||
1611 | |||
1612 | switch (type) { | ||
1613 | case IB_QPT_SMI: | ||
1614 | op_mod = 0; | ||
1615 | break; | ||
1616 | case IB_QPT_GSI: | ||
1617 | op_mod = 1; | ||
1618 | break; | ||
1619 | case IB_QPT_RAW_IPV6: | ||
1620 | op_mod = 2; | ||
1621 | break; | ||
1622 | case IB_QPT_RAW_ETY: | ||
1623 | op_mod = 3; | ||
1624 | break; | ||
1625 | default: | ||
1626 | return -EINVAL; | ||
1627 | } | ||
1628 | |||
1629 | return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, | ||
1630 | CMD_TIME_CLASS_B, status); | ||
1631 | } | ||
1632 | |||
1633 | int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, | ||
1634 | int port, struct ib_wc* in_wc, struct ib_grh* in_grh, | ||
1635 | void *in_mad, void *response_mad, u8 *status) | ||
1636 | { | ||
1637 | void *box; | ||
1638 | dma_addr_t dma; | ||
1639 | int err; | ||
1640 | u32 in_modifier = port; | ||
1641 | u8 op_modifier = 0; | ||
1642 | |||
1643 | #define MAD_IFC_BOX_SIZE 0x400 | ||
1644 | #define MAD_IFC_MY_QPN_OFFSET 0x100 | ||
1645 | #define MAD_IFC_RQPN_OFFSET 0x104 | ||
1646 | #define MAD_IFC_SL_OFFSET 0x108 | ||
1647 | #define MAD_IFC_G_PATH_OFFSET 0x109 | ||
1648 | #define MAD_IFC_RLID_OFFSET 0x10a | ||
1649 | #define MAD_IFC_PKEY_OFFSET 0x10e | ||
1650 | #define MAD_IFC_GRH_OFFSET 0x140 | ||
1651 | |||
1652 | box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma); | ||
1653 | if (!box) | ||
1654 | return -ENOMEM; | ||
1655 | |||
1656 | memcpy(box, in_mad, 256); | ||
1657 | |||
1658 | /* | ||
1659 | * Key check traps can't be generated unless we have in_wc to | ||
1660 | * tell us where to send the trap. | ||
1661 | */ | ||
1662 | if (ignore_mkey || !in_wc) | ||
1663 | op_modifier |= 0x1; | ||
1664 | if (ignore_bkey || !in_wc) | ||
1665 | op_modifier |= 0x2; | ||
1666 | |||
1667 | if (in_wc) { | ||
1668 | u8 val; | ||
1669 | |||
1670 | memset(box + 256, 0, 256); | ||
1671 | |||
1672 | MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); | ||
1673 | MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); | ||
1674 | |||
1675 | val = in_wc->sl << 4; | ||
1676 | MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET); | ||
1677 | |||
1678 | val = in_wc->dlid_path_bits | | ||
1679 | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); | ||
1680 | MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET); | ||
1681 | |||
1682 | MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET); | ||
1683 | MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); | ||
1684 | |||
1685 | if (in_grh) | ||
1686 | memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40); | ||
1687 | |||
1688 | op_modifier |= 0x10; | ||
1689 | |||
1690 | in_modifier |= in_wc->slid << 16; | ||
1691 | } | ||
1692 | |||
1693 | err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier, | ||
1694 | CMD_MAD_IFC, CMD_TIME_CLASS_C, status); | ||
1695 | |||
1696 | if (!err && !*status) | ||
1697 | memcpy(response_mad, box + 512, 256); | ||
1698 | |||
1699 | pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma); | ||
1700 | return err; | ||
1701 | } | ||
1702 | |||
1703 | int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, | ||
1704 | u8 *status) | ||
1705 | { | ||
1706 | dma_addr_t outdma = 0; | ||
1707 | int err; | ||
1708 | |||
1709 | outdma = pci_map_single(dev->pdev, mgm, | ||
1710 | MTHCA_MGM_ENTRY_SIZE, | ||
1711 | PCI_DMA_FROMDEVICE); | ||
1712 | if (pci_dma_mapping_error(outdma)) | ||
1713 | return -ENOMEM; | ||
1714 | |||
1715 | err = mthca_cmd_box(dev, 0, outdma, index, 0, | ||
1716 | CMD_READ_MGM, | ||
1717 | CMD_TIME_CLASS_A, status); | ||
1718 | |||
1719 | pci_unmap_single(dev->pdev, outdma, | ||
1720 | MTHCA_MGM_ENTRY_SIZE, | ||
1721 | PCI_DMA_FROMDEVICE); | ||
1722 | return err; | ||
1723 | } | ||
1724 | |||
1725 | int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, | ||
1726 | u8 *status) | ||
1727 | { | ||
1728 | dma_addr_t indma; | ||
1729 | int err; | ||
1730 | |||
1731 | indma = pci_map_single(dev->pdev, mgm, | ||
1732 | MTHCA_MGM_ENTRY_SIZE, | ||
1733 | PCI_DMA_TODEVICE); | ||
1734 | if (pci_dma_mapping_error(indma)) | ||
1735 | return -ENOMEM; | ||
1736 | |||
1737 | err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM, | ||
1738 | CMD_TIME_CLASS_A, status); | ||
1739 | |||
1740 | pci_unmap_single(dev->pdev, indma, | ||
1741 | MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE); | ||
1742 | return err; | ||
1743 | } | ||
1744 | |||
1745 | int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, | ||
1746 | u8 *status) | ||
1747 | { | ||
1748 | dma_addr_t indma; | ||
1749 | u64 imm; | ||
1750 | int err; | ||
1751 | |||
1752 | indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE); | ||
1753 | if (pci_dma_mapping_error(indma)) | ||
1754 | return -ENOMEM; | ||
1755 | |||
1756 | err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH, | ||
1757 | CMD_TIME_CLASS_A, status); | ||
1758 | *hash = imm; | ||
1759 | |||
1760 | pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE); | ||
1761 | return err; | ||
1762 | } | ||
1763 | |||
1764 | int mthca_NOP(struct mthca_dev *dev, u8 *status) | ||
1765 | { | ||
1766 | return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status); | ||
1767 | } | ||