diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r-- | drivers/infiniband/hw/mthca/Makefile | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_catas.c | 153 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mad.c | 72 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mcg.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 49 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_user.h | 6 |
14 files changed, 334 insertions, 90 deletions
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile index c44f7bae5424..47ec5a7cba0b 100644 --- a/drivers/infiniband/hw/mthca/Makefile +++ b/drivers/infiniband/hw/mthca/Makefile | |||
@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o | |||
7 | ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ | 7 | ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ |
8 | mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ | 8 | mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ |
9 | mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ | 9 | mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ |
10 | mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o | 10 | mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o \ |
11 | mthca_catas.o | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c new file mode 100644 index 000000000000..7ac52af43b99 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | * $Id$ | ||
33 | */ | ||
34 | |||
35 | #include "mthca_dev.h" | ||
36 | |||
37 | enum { | ||
38 | MTHCA_CATAS_POLL_INTERVAL = 5 * HZ, | ||
39 | |||
40 | MTHCA_CATAS_TYPE_INTERNAL = 0, | ||
41 | MTHCA_CATAS_TYPE_UPLINK = 3, | ||
42 | MTHCA_CATAS_TYPE_DDR = 4, | ||
43 | MTHCA_CATAS_TYPE_PARITY = 5, | ||
44 | }; | ||
45 | |||
46 | static DEFINE_SPINLOCK(catas_lock); | ||
47 | |||
48 | static void handle_catas(struct mthca_dev *dev) | ||
49 | { | ||
50 | struct ib_event event; | ||
51 | const char *type; | ||
52 | int i; | ||
53 | |||
54 | event.device = &dev->ib_dev; | ||
55 | event.event = IB_EVENT_DEVICE_FATAL; | ||
56 | event.element.port_num = 0; | ||
57 | |||
58 | ib_dispatch_event(&event); | ||
59 | |||
60 | switch (swab32(readl(dev->catas_err.map)) >> 24) { | ||
61 | case MTHCA_CATAS_TYPE_INTERNAL: | ||
62 | type = "internal error"; | ||
63 | break; | ||
64 | case MTHCA_CATAS_TYPE_UPLINK: | ||
65 | type = "uplink bus error"; | ||
66 | break; | ||
67 | case MTHCA_CATAS_TYPE_DDR: | ||
68 | type = "DDR data error"; | ||
69 | break; | ||
70 | case MTHCA_CATAS_TYPE_PARITY: | ||
71 | type = "internal parity error"; | ||
72 | break; | ||
73 | default: | ||
74 | type = "unknown error"; | ||
75 | break; | ||
76 | } | ||
77 | |||
78 | mthca_err(dev, "Catastrophic error detected: %s\n", type); | ||
79 | for (i = 0; i < dev->catas_err.size; ++i) | ||
80 | mthca_err(dev, " buf[%02x]: %08x\n", | ||
81 | i, swab32(readl(dev->catas_err.map + i))); | ||
82 | } | ||
83 | |||
84 | static void poll_catas(unsigned long dev_ptr) | ||
85 | { | ||
86 | struct mthca_dev *dev = (struct mthca_dev *) dev_ptr; | ||
87 | unsigned long flags; | ||
88 | int i; | ||
89 | |||
90 | for (i = 0; i < dev->catas_err.size; ++i) | ||
91 | if (readl(dev->catas_err.map + i)) { | ||
92 | handle_catas(dev); | ||
93 | return; | ||
94 | } | ||
95 | |||
96 | spin_lock_irqsave(&catas_lock, flags); | ||
97 | if (dev->catas_err.stop) | ||
98 | mod_timer(&dev->catas_err.timer, | ||
99 | jiffies + MTHCA_CATAS_POLL_INTERVAL); | ||
100 | spin_unlock_irqrestore(&catas_lock, flags); | ||
101 | |||
102 | return; | ||
103 | } | ||
104 | |||
105 | void mthca_start_catas_poll(struct mthca_dev *dev) | ||
106 | { | ||
107 | unsigned long addr; | ||
108 | |||
109 | init_timer(&dev->catas_err.timer); | ||
110 | dev->catas_err.stop = 0; | ||
111 | dev->catas_err.map = NULL; | ||
112 | |||
113 | addr = pci_resource_start(dev->pdev, 0) + | ||
114 | ((pci_resource_len(dev->pdev, 0) - 1) & | ||
115 | dev->catas_err.addr); | ||
116 | |||
117 | if (!request_mem_region(addr, dev->catas_err.size * 4, | ||
118 | DRV_NAME)) { | ||
119 | mthca_warn(dev, "couldn't request catastrophic error region " | ||
120 | "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); | ||
125 | if (!dev->catas_err.map) { | ||
126 | mthca_warn(dev, "couldn't map catastrophic error region " | ||
127 | "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); | ||
128 | release_mem_region(addr, dev->catas_err.size * 4); | ||
129 | return; | ||
130 | } | ||
131 | |||
132 | dev->catas_err.timer.data = (unsigned long) dev; | ||
133 | dev->catas_err.timer.function = poll_catas; | ||
134 | dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL; | ||
135 | add_timer(&dev->catas_err.timer); | ||
136 | } | ||
137 | |||
138 | void mthca_stop_catas_poll(struct mthca_dev *dev) | ||
139 | { | ||
140 | spin_lock_irq(&catas_lock); | ||
141 | dev->catas_err.stop = 1; | ||
142 | spin_unlock_irq(&catas_lock); | ||
143 | |||
144 | del_timer_sync(&dev->catas_err.timer); | ||
145 | |||
146 | if (dev->catas_err.map) { | ||
147 | iounmap(dev->catas_err.map); | ||
148 | release_mem_region(pci_resource_start(dev->pdev, 0) + | ||
149 | ((pci_resource_len(dev->pdev, 0) - 1) & | ||
150 | dev->catas_err.addr), | ||
151 | dev->catas_err.size * 4); | ||
152 | } | ||
153 | } | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 378646b5a1b8..49f211d55df7 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) | |||
706 | 707 | ||
707 | MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); | 708 | MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); |
708 | dev->cmd.max_cmds = 1 << lg; | 709 | dev->cmd.max_cmds = 1 << lg; |
710 | MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); | ||
711 | MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); | ||
709 | 712 | ||
710 | mthca_dbg(dev, "FW version %012llx, max commands %d\n", | 713 | mthca_dbg(dev, "FW version %012llx, max commands %d\n", |
711 | (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); | 714 | (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); |
715 | mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n", | ||
716 | (unsigned long long) dev->catas_err.addr, dev->catas_err.size); | ||
712 | 717 | ||
713 | if (mthca_is_memfree(dev)) { | 718 | if (mthca_is_memfree(dev)) { |
714 | MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); | 719 | MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); |
@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
933 | goto out; | 938 | goto out; |
934 | 939 | ||
935 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); | 940 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); |
936 | dev_lim->max_srq_sz = 1 << field; | 941 | dev_lim->max_srq_sz = (1 << field) - 1; |
937 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); | 942 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); |
938 | dev_lim->max_qp_sz = 1 << field; | 943 | dev_lim->max_qp_sz = (1 << field) - 1; |
939 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); | 944 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); |
940 | dev_lim->reserved_qps = 1 << (field & 0xf); | 945 | dev_lim->reserved_qps = 1 << (field & 0xf); |
941 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); | 946 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); |
@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1045 | dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); | 1050 | dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); |
1046 | mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", | 1051 | mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", |
1047 | dev_lim->max_pds, dev_lim->reserved_mgms); | 1052 | dev_lim->max_pds, dev_lim->reserved_mgms); |
1053 | mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | ||
1054 | dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); | ||
1048 | 1055 | ||
1049 | mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); | 1056 | mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); |
1050 | 1057 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 7bff5a8425f4..7e68bd4a3780 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -83,6 +83,8 @@ enum { | |||
83 | /* Arbel FW gives us these, but we need them for Tavor */ | 83 | /* Arbel FW gives us these, but we need them for Tavor */ |
84 | MTHCA_MPT_ENTRY_SIZE = 0x40, | 84 | MTHCA_MPT_ENTRY_SIZE = 0x40, |
85 | MTHCA_MTT_SEG_SIZE = 0x40, | 85 | MTHCA_MTT_SEG_SIZE = 0x40, |
86 | |||
87 | MTHCA_QP_PER_MGM = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2) | ||
86 | }; | 88 | }; |
87 | 89 | ||
88 | enum { | 90 | enum { |
@@ -128,12 +130,16 @@ struct mthca_limits { | |||
128 | int num_uars; | 130 | int num_uars; |
129 | int max_sg; | 131 | int max_sg; |
130 | int num_qps; | 132 | int num_qps; |
133 | int max_wqes; | ||
134 | int max_qp_init_rdma; | ||
131 | int reserved_qps; | 135 | int reserved_qps; |
132 | int num_srqs; | 136 | int num_srqs; |
137 | int max_srq_wqes; | ||
133 | int reserved_srqs; | 138 | int reserved_srqs; |
134 | int num_eecs; | 139 | int num_eecs; |
135 | int reserved_eecs; | 140 | int reserved_eecs; |
136 | int num_cqs; | 141 | int num_cqs; |
142 | int max_cqes; | ||
137 | int reserved_cqs; | 143 | int reserved_cqs; |
138 | int num_eqs; | 144 | int num_eqs; |
139 | int reserved_eqs; | 145 | int reserved_eqs; |
@@ -148,6 +154,7 @@ struct mthca_limits { | |||
148 | int reserved_mcgs; | 154 | int reserved_mcgs; |
149 | int num_pds; | 155 | int num_pds; |
150 | int reserved_pds; | 156 | int reserved_pds; |
157 | u32 flags; | ||
151 | u8 port_width_cap; | 158 | u8 port_width_cap; |
152 | }; | 159 | }; |
153 | 160 | ||
@@ -251,6 +258,14 @@ struct mthca_mcg_table { | |||
251 | struct mthca_icm_table *table; | 258 | struct mthca_icm_table *table; |
252 | }; | 259 | }; |
253 | 260 | ||
261 | struct mthca_catas_err { | ||
262 | u64 addr; | ||
263 | u32 __iomem *map; | ||
264 | unsigned long stop; | ||
265 | u32 size; | ||
266 | struct timer_list timer; | ||
267 | }; | ||
268 | |||
254 | struct mthca_dev { | 269 | struct mthca_dev { |
255 | struct ib_device ib_dev; | 270 | struct ib_device ib_dev; |
256 | struct pci_dev *pdev; | 271 | struct pci_dev *pdev; |
@@ -311,6 +326,8 @@ struct mthca_dev { | |||
311 | struct mthca_av_table av_table; | 326 | struct mthca_av_table av_table; |
312 | struct mthca_mcg_table mcg_table; | 327 | struct mthca_mcg_table mcg_table; |
313 | 328 | ||
329 | struct mthca_catas_err catas_err; | ||
330 | |||
314 | struct mthca_uar driver_uar; | 331 | struct mthca_uar driver_uar; |
315 | struct mthca_db_table *db_tab; | 332 | struct mthca_db_table *db_tab; |
316 | struct mthca_pd driver_pd; | 333 | struct mthca_pd driver_pd; |
@@ -398,6 +415,9 @@ void mthca_cleanup_mcg_table(struct mthca_dev *dev); | |||
398 | int mthca_register_device(struct mthca_dev *dev); | 415 | int mthca_register_device(struct mthca_dev *dev); |
399 | void mthca_unregister_device(struct mthca_dev *dev); | 416 | void mthca_unregister_device(struct mthca_dev *dev); |
400 | 417 | ||
418 | void mthca_start_catas_poll(struct mthca_dev *dev); | ||
419 | void mthca_stop_catas_poll(struct mthca_dev *dev); | ||
420 | |||
401 | int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); | 421 | int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); |
402 | void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); | 422 | void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); |
403 | 423 | ||
@@ -447,6 +467,8 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
447 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | 467 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, |
448 | struct ib_srq_attr *attr, struct mthca_srq *srq); | 468 | struct ib_srq_attr *attr, struct mthca_srq *srq); |
449 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); | 469 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); |
470 | int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
471 | enum ib_srq_attr_mask attr_mask); | ||
450 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | 472 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, |
451 | enum ib_event_type event_type); | 473 | enum ib_event_type event_type); |
452 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); | 474 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 8dfafda5ed24..e5a047a6dbeb 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -83,7 +83,8 @@ enum { | |||
83 | MTHCA_EVENT_TYPE_PATH_MIG = 0x01, | 83 | MTHCA_EVENT_TYPE_PATH_MIG = 0x01, |
84 | MTHCA_EVENT_TYPE_COMM_EST = 0x02, | 84 | MTHCA_EVENT_TYPE_COMM_EST = 0x02, |
85 | MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, | 85 | MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, |
86 | MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, | 86 | MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, |
87 | MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14, | ||
87 | MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, | 88 | MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, |
88 | MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, | 89 | MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, |
89 | MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, | 90 | MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, |
@@ -110,8 +111,9 @@ enum { | |||
110 | (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ | 111 | (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ |
111 | (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ | 112 | (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ |
112 | (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) | 113 | (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) |
113 | #define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | 114 | #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
114 | (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) | 115 | (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ |
116 | (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT)) | ||
115 | #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) | 117 | #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) |
116 | 118 | ||
117 | #define MTHCA_EQ_DB_INC_CI (1 << 24) | 119 | #define MTHCA_EQ_DB_INC_CI (1 << 24) |
@@ -142,6 +144,9 @@ struct mthca_eqe { | |||
142 | __be32 qpn; | 144 | __be32 qpn; |
143 | } __attribute__((packed)) qp; | 145 | } __attribute__((packed)) qp; |
144 | struct { | 146 | struct { |
147 | __be32 srqn; | ||
148 | } __attribute__((packed)) srq; | ||
149 | struct { | ||
145 | __be32 cqn; | 150 | __be32 cqn; |
146 | u32 reserved1; | 151 | u32 reserved1; |
147 | u8 reserved2[3]; | 152 | u8 reserved2[3]; |
@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) | |||
305 | IB_EVENT_SQ_DRAINED); | 310 | IB_EVENT_SQ_DRAINED); |
306 | break; | 311 | break; |
307 | 312 | ||
313 | case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: | ||
314 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | ||
315 | IB_EVENT_QP_LAST_WQE_REACHED); | ||
316 | break; | ||
317 | |||
318 | case MTHCA_EVENT_TYPE_SRQ_LIMIT: | ||
319 | mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | ||
320 | IB_EVENT_SRQ_LIMIT_REACHED); | ||
321 | break; | ||
322 | |||
308 | case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: | 323 | case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: |
309 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | 324 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
310 | IB_EVENT_QP_FATAL); | 325 | IB_EVENT_QP_FATAL); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 9804174f7f3c..8561b297a19b 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -46,11 +46,6 @@ enum { | |||
46 | MTHCA_VENDOR_CLASS2 = 0xa | 46 | MTHCA_VENDOR_CLASS2 = 0xa |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct mthca_trap_mad { | ||
50 | struct ib_mad *mad; | ||
51 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
52 | }; | ||
53 | |||
54 | static void update_sm_ah(struct mthca_dev *dev, | 49 | static void update_sm_ah(struct mthca_dev *dev, |
55 | u8 port_num, u16 lid, u8 sl) | 50 | u8 port_num, u16 lid, u8 sl) |
56 | { | 51 | { |
@@ -116,49 +111,14 @@ static void forward_trap(struct mthca_dev *dev, | |||
116 | struct ib_mad *mad) | 111 | struct ib_mad *mad) |
117 | { | 112 | { |
118 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; | 113 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; |
119 | struct mthca_trap_mad *tmad; | 114 | struct ib_mad_send_buf *send_buf; |
120 | struct ib_sge gather_list; | ||
121 | struct ib_send_wr *bad_wr, wr = { | ||
122 | .opcode = IB_WR_SEND, | ||
123 | .sg_list = &gather_list, | ||
124 | .num_sge = 1, | ||
125 | .send_flags = IB_SEND_SIGNALED, | ||
126 | .wr = { | ||
127 | .ud = { | ||
128 | .remote_qpn = qpn, | ||
129 | .remote_qkey = qpn ? IB_QP1_QKEY : 0, | ||
130 | .timeout_ms = 0 | ||
131 | } | ||
132 | } | ||
133 | }; | ||
134 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; | 115 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; |
135 | int ret; | 116 | int ret; |
136 | unsigned long flags; | 117 | unsigned long flags; |
137 | 118 | ||
138 | if (agent) { | 119 | if (agent) { |
139 | tmad = kmalloc(sizeof *tmad, GFP_KERNEL); | 120 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
140 | if (!tmad) | 121 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
141 | return; | ||
142 | |||
143 | tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL); | ||
144 | if (!tmad->mad) { | ||
145 | kfree(tmad); | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | memcpy(tmad->mad, mad, sizeof *mad); | ||
150 | |||
151 | wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr; | ||
152 | wr.wr_id = (unsigned long) tmad; | ||
153 | |||
154 | gather_list.addr = dma_map_single(agent->device->dma_device, | ||
155 | tmad->mad, | ||
156 | sizeof *tmad->mad, | ||
157 | DMA_TO_DEVICE); | ||
158 | gather_list.length = sizeof *tmad->mad; | ||
159 | gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; | ||
160 | pci_unmap_addr_set(tmad, mapping, gather_list.addr); | ||
161 | |||
162 | /* | 122 | /* |
163 | * We rely here on the fact that MLX QPs don't use the | 123 | * We rely here on the fact that MLX QPs don't use the |
164 | * address handle after the send is posted (this is | 124 | * address handle after the send is posted (this is |
@@ -166,21 +126,15 @@ static void forward_trap(struct mthca_dev *dev, | |||
166 | * it's OK for our devices). | 126 | * it's OK for our devices). |
167 | */ | 127 | */ |
168 | spin_lock_irqsave(&dev->sm_lock, flags); | 128 | spin_lock_irqsave(&dev->sm_lock, flags); |
169 | wr.wr.ud.ah = dev->sm_ah[port_num - 1]; | 129 | memcpy(send_buf->mad, mad, sizeof *mad); |
170 | if (wr.wr.ud.ah) | 130 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) |
171 | ret = ib_post_send_mad(agent, &wr, &bad_wr); | 131 | ret = ib_post_send_mad(send_buf, NULL); |
172 | else | 132 | else |
173 | ret = -EINVAL; | 133 | ret = -EINVAL; |
174 | spin_unlock_irqrestore(&dev->sm_lock, flags); | 134 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
175 | 135 | ||
176 | if (ret) { | 136 | if (ret) |
177 | dma_unmap_single(agent->device->dma_device, | 137 | ib_free_send_mad(send_buf); |
178 | pci_unmap_addr(tmad, mapping), | ||
179 | sizeof *tmad->mad, | ||
180 | DMA_TO_DEVICE); | ||
181 | kfree(tmad->mad); | ||
182 | kfree(tmad); | ||
183 | } | ||
184 | } | 138 | } |
185 | } | 139 | } |
186 | 140 | ||
@@ -267,15 +221,7 @@ int mthca_process_mad(struct ib_device *ibdev, | |||
267 | static void send_handler(struct ib_mad_agent *agent, | 221 | static void send_handler(struct ib_mad_agent *agent, |
268 | struct ib_mad_send_wc *mad_send_wc) | 222 | struct ib_mad_send_wc *mad_send_wc) |
269 | { | 223 | { |
270 | struct mthca_trap_mad *tmad = | 224 | ib_free_send_mad(mad_send_wc->send_buf); |
271 | (void *) (unsigned long) mad_send_wc->wr_id; | ||
272 | |||
273 | dma_unmap_single(agent->device->dma_device, | ||
274 | pci_unmap_addr(tmad, mapping), | ||
275 | sizeof *tmad->mad, | ||
276 | DMA_TO_DEVICE); | ||
277 | kfree(tmad->mad); | ||
278 | kfree(tmad); | ||
279 | } | 225 | } |
280 | 226 | ||
281 | int mthca_create_agents(struct mthca_dev *dev) | 227 | int mthca_create_agents(struct mthca_dev *dev) |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 23a3f56c7899..883d1e5a79bc 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -162,9 +162,18 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
162 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; | 162 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; |
163 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; | 163 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; |
164 | mdev->limits.max_sg = dev_lim->max_sg; | 164 | mdev->limits.max_sg = dev_lim->max_sg; |
165 | mdev->limits.max_wqes = dev_lim->max_qp_sz; | ||
166 | mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; | ||
165 | mdev->limits.reserved_qps = dev_lim->reserved_qps; | 167 | mdev->limits.reserved_qps = dev_lim->reserved_qps; |
168 | mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; | ||
166 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; | 169 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; |
167 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; | 170 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; |
171 | /* | ||
172 | * Subtract 1 from the limit because we need to allocate a | ||
173 | * spare CQE so the HCA HW can tell the difference between an | ||
174 | * empty CQ and a full CQ. | ||
175 | */ | ||
176 | mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; | ||
168 | mdev->limits.reserved_cqs = dev_lim->reserved_cqs; | 177 | mdev->limits.reserved_cqs = dev_lim->reserved_cqs; |
169 | mdev->limits.reserved_eqs = dev_lim->reserved_eqs; | 178 | mdev->limits.reserved_eqs = dev_lim->reserved_eqs; |
170 | mdev->limits.reserved_mtts = dev_lim->reserved_mtts; | 179 | mdev->limits.reserved_mtts = dev_lim->reserved_mtts; |
@@ -172,6 +181,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
172 | mdev->limits.reserved_uars = dev_lim->reserved_uars; | 181 | mdev->limits.reserved_uars = dev_lim->reserved_uars; |
173 | mdev->limits.reserved_pds = dev_lim->reserved_pds; | 182 | mdev->limits.reserved_pds = dev_lim->reserved_pds; |
174 | mdev->limits.port_width_cap = dev_lim->max_port_width; | 183 | mdev->limits.port_width_cap = dev_lim->max_port_width; |
184 | mdev->limits.flags = dev_lim->flags; | ||
175 | 185 | ||
176 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. | 186 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. |
177 | May be doable since hardware supports it for SRQ. | 187 | May be doable since hardware supports it for SRQ. |
@@ -1186,6 +1196,7 @@ MODULE_DEVICE_TABLE(pci, mthca_pci_table); | |||
1186 | 1196 | ||
1187 | static struct pci_driver mthca_driver = { | 1197 | static struct pci_driver mthca_driver = { |
1188 | .name = DRV_NAME, | 1198 | .name = DRV_NAME, |
1199 | .owner = THIS_MODULE, | ||
1189 | .id_table = mthca_pci_table, | 1200 | .id_table = mthca_pci_table, |
1190 | .probe = mthca_init_one, | 1201 | .probe = mthca_init_one, |
1191 | .remove = __devexit_p(mthca_remove_one) | 1202 | .remove = __devexit_p(mthca_remove_one) |
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index a2707605f4c8..b47ea7daf088 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c | |||
@@ -37,10 +37,6 @@ | |||
37 | #include "mthca_dev.h" | 37 | #include "mthca_dev.h" |
38 | #include "mthca_cmd.h" | 38 | #include "mthca_cmd.h" |
39 | 39 | ||
40 | enum { | ||
41 | MTHCA_QP_PER_MGM = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2) | ||
42 | }; | ||
43 | |||
44 | struct mthca_mgm { | 40 | struct mthca_mgm { |
45 | __be32 next_gid_index; | 41 | __be32 next_gid_index; |
46 | u32 reserved[3]; | 42 | u32 reserved[3]; |
@@ -189,7 +185,12 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
189 | } | 185 | } |
190 | 186 | ||
191 | for (i = 0; i < MTHCA_QP_PER_MGM; ++i) | 187 | for (i = 0; i < MTHCA_QP_PER_MGM; ++i) |
192 | if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { | 188 | if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { |
189 | mthca_dbg(dev, "QP %06x already a member of MGM\n", | ||
190 | ibqp->qp_num); | ||
191 | err = 0; | ||
192 | goto out; | ||
193 | } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { | ||
193 | mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); | 194 | mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); |
194 | break; | 195 | break; |
195 | } | 196 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 9ad8b3b6cfef..d72fe95cba08 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -487,7 +487,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, | |||
487 | } | 487 | } |
488 | } | 488 | } |
489 | 489 | ||
490 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) | 490 | int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, |
491 | u32 qn, __be32 **db) | ||
491 | { | 492 | { |
492 | int group; | 493 | int group; |
493 | int start, end, dir; | 494 | int start, end, dir; |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 29433f295253..4fdca26eea85 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h | |||
@@ -173,7 +173,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, | |||
173 | 173 | ||
174 | int mthca_init_db_tab(struct mthca_dev *dev); | 174 | int mthca_init_db_tab(struct mthca_dev *dev); |
175 | void mthca_cleanup_db_tab(struct mthca_dev *dev); | 175 | void mthca_cleanup_db_tab(struct mthca_dev *dev); |
176 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); | 176 | int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, |
177 | u32 qn, __be32 **db); | ||
177 | void mthca_free_db(struct mthca_dev *dev, int type, int db_index); | 178 | void mthca_free_db(struct mthca_dev *dev, int type, int db_index); |
178 | 179 | ||
179 | #endif /* MTHCA_MEMFREE_H */ | 180 | #endif /* MTHCA_MEMFREE_H */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 3f5319a46577..1b9477edbd7b 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <rdma/ib_smi.h> | 39 | #include <rdma/ib_smi.h> |
40 | #include <rdma/ib_user_verbs.h> | ||
40 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
41 | 42 | ||
42 | #include "mthca_dev.h" | 43 | #include "mthca_dev.h" |
@@ -90,15 +91,26 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
90 | 91 | ||
91 | props->max_mr_size = ~0ull; | 92 | props->max_mr_size = ~0ull; |
92 | props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; | 93 | props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; |
93 | props->max_qp_wr = 0xffff; | 94 | props->max_qp_wr = mdev->limits.max_wqes; |
94 | props->max_sge = mdev->limits.max_sg; | 95 | props->max_sge = mdev->limits.max_sg; |
95 | props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; | 96 | props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; |
96 | props->max_cqe = 0xffff; | 97 | props->max_cqe = mdev->limits.max_cqes; |
97 | props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; | 98 | props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; |
98 | props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; | 99 | props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; |
99 | props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; | 100 | props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; |
100 | props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift; | 101 | props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; |
102 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; | ||
103 | props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; | ||
104 | props->max_srq_wr = mdev->limits.max_srq_wqes; | ||
105 | props->max_srq_sge = mdev->limits.max_sg; | ||
101 | props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; | 106 | props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; |
107 | props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? | ||
108 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | ||
109 | props->max_pkeys = mdev->limits.pkey_table_len; | ||
110 | props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; | ||
111 | props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; | ||
112 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | ||
113 | props->max_mcast_grp; | ||
102 | 114 | ||
103 | err = 0; | 115 | err = 0; |
104 | out: | 116 | out: |
@@ -150,9 +162,13 @@ static int mthca_query_port(struct ib_device *ibdev, | |||
150 | props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; | 162 | props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; |
151 | props->max_msg_sz = 0x80000000; | 163 | props->max_msg_sz = 0x80000000; |
152 | props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; | 164 | props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; |
165 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | ||
153 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | 166 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); |
154 | props->active_width = out_mad->data[31] & 0xf; | 167 | props->active_width = out_mad->data[31] & 0xf; |
155 | props->active_speed = out_mad->data[35] >> 4; | 168 | props->active_speed = out_mad->data[35] >> 4; |
169 | props->max_mtu = out_mad->data[41] & 0xf; | ||
170 | props->active_mtu = out_mad->data[36] >> 4; | ||
171 | props->subnet_timeout = out_mad->data[51] & 0x1f; | ||
156 | 172 | ||
157 | out: | 173 | out: |
158 | kfree(in_mad); | 174 | kfree(in_mad); |
@@ -634,6 +650,9 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, | |||
634 | int nent; | 650 | int nent; |
635 | int err; | 651 | int err; |
636 | 652 | ||
653 | if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) | ||
654 | return ERR_PTR(-EINVAL); | ||
655 | |||
637 | if (context) { | 656 | if (context) { |
638 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | 657 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) |
639 | return ERR_PTR(-EFAULT); | 658 | return ERR_PTR(-EFAULT); |
@@ -1058,6 +1077,26 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1058 | strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); | 1077 | strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); |
1059 | dev->ib_dev.owner = THIS_MODULE; | 1078 | dev->ib_dev.owner = THIS_MODULE; |
1060 | 1079 | ||
1080 | dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION; | ||
1081 | dev->ib_dev.uverbs_cmd_mask = | ||
1082 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
1083 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
1084 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
1085 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
1086 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
1087 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
1088 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
1089 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
1090 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
1091 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
1092 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
1093 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
1094 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
1095 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | ||
1096 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | | ||
1097 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | ||
1098 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | ||
1099 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); | ||
1061 | dev->ib_dev.node_type = IB_NODE_CA; | 1100 | dev->ib_dev.node_type = IB_NODE_CA; |
1062 | dev->ib_dev.phys_port_cnt = dev->limits.num_ports; | 1101 | dev->ib_dev.phys_port_cnt = dev->limits.num_ports; |
1063 | dev->ib_dev.dma_device = &dev->pdev->dev; | 1102 | dev->ib_dev.dma_device = &dev->pdev->dev; |
@@ -1077,6 +1116,7 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1077 | 1116 | ||
1078 | if (dev->mthca_flags & MTHCA_FLAG_SRQ) { | 1117 | if (dev->mthca_flags & MTHCA_FLAG_SRQ) { |
1079 | dev->ib_dev.create_srq = mthca_create_srq; | 1118 | dev->ib_dev.create_srq = mthca_create_srq; |
1119 | dev->ib_dev.modify_srq = mthca_modify_srq; | ||
1080 | dev->ib_dev.destroy_srq = mthca_destroy_srq; | 1120 | dev->ib_dev.destroy_srq = mthca_destroy_srq; |
1081 | 1121 | ||
1082 | if (mthca_is_memfree(dev)) | 1122 | if (mthca_is_memfree(dev)) |
@@ -1135,10 +1175,13 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1135 | } | 1175 | } |
1136 | } | 1176 | } |
1137 | 1177 | ||
1178 | mthca_start_catas_poll(dev); | ||
1179 | |||
1138 | return 0; | 1180 | return 0; |
1139 | } | 1181 | } |
1140 | 1182 | ||
1141 | void mthca_unregister_device(struct mthca_dev *dev) | 1183 | void mthca_unregister_device(struct mthca_dev *dev) |
1142 | { | 1184 | { |
1185 | mthca_stop_catas_poll(dev); | ||
1143 | ib_unregister_device(&dev->ib_dev); | 1186 | ib_unregister_device(&dev->ib_dev); |
1144 | } | 1187 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 5fa00669f9b8..62ff091505da 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -338,8 +338,7 @@ static const struct { | |||
338 | [UC] = (IB_QP_AV | | 338 | [UC] = (IB_QP_AV | |
339 | IB_QP_PATH_MTU | | 339 | IB_QP_PATH_MTU | |
340 | IB_QP_DEST_QPN | | 340 | IB_QP_DEST_QPN | |
341 | IB_QP_RQ_PSN | | 341 | IB_QP_RQ_PSN), |
342 | IB_QP_MAX_DEST_RD_ATOMIC), | ||
343 | [RC] = (IB_QP_AV | | 342 | [RC] = (IB_QP_AV | |
344 | IB_QP_PATH_MTU | | 343 | IB_QP_PATH_MTU | |
345 | IB_QP_DEST_QPN | | 344 | IB_QP_DEST_QPN | |
@@ -368,8 +367,7 @@ static const struct { | |||
368 | .trans = MTHCA_TRANS_RTR2RTS, | 367 | .trans = MTHCA_TRANS_RTR2RTS, |
369 | .req_param = { | 368 | .req_param = { |
370 | [UD] = IB_QP_SQ_PSN, | 369 | [UD] = IB_QP_SQ_PSN, |
371 | [UC] = (IB_QP_SQ_PSN | | 370 | [UC] = IB_QP_SQ_PSN, |
372 | IB_QP_MAX_QP_RD_ATOMIC), | ||
373 | [RC] = (IB_QP_TIMEOUT | | 371 | [RC] = (IB_QP_TIMEOUT | |
374 | IB_QP_RETRY_CNT | | 372 | IB_QP_RETRY_CNT | |
375 | IB_QP_RNR_RETRY | | 373 | IB_QP_RNR_RETRY | |
@@ -446,8 +444,6 @@ static const struct { | |||
446 | [UD] = (IB_QP_PKEY_INDEX | | 444 | [UD] = (IB_QP_PKEY_INDEX | |
447 | IB_QP_QKEY), | 445 | IB_QP_QKEY), |
448 | [UC] = (IB_QP_AV | | 446 | [UC] = (IB_QP_AV | |
449 | IB_QP_MAX_QP_RD_ATOMIC | | ||
450 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
451 | IB_QP_CUR_STATE | | 447 | IB_QP_CUR_STATE | |
452 | IB_QP_ALT_PATH | | 448 | IB_QP_ALT_PATH | |
453 | IB_QP_ACCESS_FLAGS | | 449 | IB_QP_ACCESS_FLAGS | |
@@ -478,7 +474,7 @@ static const struct { | |||
478 | .opt_param = { | 474 | .opt_param = { |
479 | [UD] = (IB_QP_CUR_STATE | | 475 | [UD] = (IB_QP_CUR_STATE | |
480 | IB_QP_QKEY), | 476 | IB_QP_QKEY), |
481 | [UC] = (IB_QP_CUR_STATE), | 477 | [UC] = IB_QP_CUR_STATE, |
482 | [RC] = (IB_QP_CUR_STATE | | 478 | [RC] = (IB_QP_CUR_STATE | |
483 | IB_QP_MIN_RNR_TIMER), | 479 | IB_QP_MIN_RNR_TIMER), |
484 | [MLX] = (IB_QP_CUR_STATE | | 480 | [MLX] = (IB_QP_CUR_STATE | |
@@ -1112,8 +1108,10 @@ static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, | |||
1112 | struct mthca_qp *qp) | 1108 | struct mthca_qp *qp) |
1113 | { | 1109 | { |
1114 | /* Sanity check QP size before proceeding */ | 1110 | /* Sanity check QP size before proceeding */ |
1115 | if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 || | 1111 | if (cap->max_send_wr > dev->limits.max_wqes || |
1116 | cap->max_send_sge > 64 || cap->max_recv_sge > 64) | 1112 | cap->max_recv_wr > dev->limits.max_wqes || |
1113 | cap->max_send_sge > dev->limits.max_sg || | ||
1114 | cap->max_recv_sge > dev->limits.max_sg) | ||
1117 | return -EINVAL; | 1115 | return -EINVAL; |
1118 | 1116 | ||
1119 | if (mthca_is_memfree(dev)) { | 1117 | if (mthca_is_memfree(dev)) { |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 18998d48c53e..64f70aa1b3c0 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -186,7 +186,8 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
186 | int err; | 186 | int err; |
187 | 187 | ||
188 | /* Sanity check SRQ size before proceeding */ | 188 | /* Sanity check SRQ size before proceeding */ |
189 | if (attr->max_wr > 16 << 20 || attr->max_sge > 64) | 189 | if (attr->max_wr > dev->limits.max_srq_wqes || |
190 | attr->max_sge > dev->limits.max_sg) | ||
190 | return -EINVAL; | 191 | return -EINVAL; |
191 | 192 | ||
192 | srq->max = attr->max_wr; | 193 | srq->max = attr->max_wr; |
@@ -332,6 +333,29 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
332 | mthca_free_mailbox(dev, mailbox); | 333 | mthca_free_mailbox(dev, mailbox); |
333 | } | 334 | } |
334 | 335 | ||
336 | int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
337 | enum ib_srq_attr_mask attr_mask) | ||
338 | { | ||
339 | struct mthca_dev *dev = to_mdev(ibsrq->device); | ||
340 | struct mthca_srq *srq = to_msrq(ibsrq); | ||
341 | int ret; | ||
342 | u8 status; | ||
343 | |||
344 | /* We don't support resizing SRQs (yet?) */ | ||
345 | if (attr_mask & IB_SRQ_MAX_WR) | ||
346 | return -EINVAL; | ||
347 | |||
348 | if (attr_mask & IB_SRQ_LIMIT) { | ||
349 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); | ||
350 | if (ret) | ||
351 | return ret; | ||
352 | if (status) | ||
353 | return -EINVAL; | ||
354 | } | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
335 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | 359 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, |
336 | enum ib_event_type event_type) | 360 | enum ib_event_type event_type) |
337 | { | 361 | { |
@@ -354,7 +378,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
354 | 378 | ||
355 | event.device = &dev->ib_dev; | 379 | event.device = &dev->ib_dev; |
356 | event.event = event_type; | 380 | event.event = event_type; |
357 | event.element.srq = &srq->ibsrq; | 381 | event.element.srq = &srq->ibsrq; |
358 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 382 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
359 | 383 | ||
360 | out: | 384 | out: |
@@ -415,6 +439,14 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
415 | 439 | ||
416 | wqe = get_wqe(srq, ind); | 440 | wqe = get_wqe(srq, ind); |
417 | next_ind = *wqe_to_link(wqe); | 441 | next_ind = *wqe_to_link(wqe); |
442 | |||
443 | if (next_ind < 0) { | ||
444 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | ||
445 | err = -ENOMEM; | ||
446 | *bad_wr = wr; | ||
447 | break; | ||
448 | } | ||
449 | |||
418 | prev_wqe = srq->last; | 450 | prev_wqe = srq->last; |
419 | srq->last = wqe; | 451 | srq->last = wqe; |
420 | 452 | ||
@@ -506,6 +538,13 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
506 | wqe = get_wqe(srq, ind); | 538 | wqe = get_wqe(srq, ind); |
507 | next_ind = *wqe_to_link(wqe); | 539 | next_ind = *wqe_to_link(wqe); |
508 | 540 | ||
541 | if (next_ind < 0) { | ||
542 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | ||
543 | err = -ENOMEM; | ||
544 | *bad_wr = wr; | ||
545 | break; | ||
546 | } | ||
547 | |||
509 | ((struct mthca_next_seg *) wqe)->nda_op = | 548 | ((struct mthca_next_seg *) wqe)->nda_op = |
510 | cpu_to_be32((next_ind << srq->wqe_shift) | 1); | 549 | cpu_to_be32((next_ind << srq->wqe_shift) | 1); |
511 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | 550 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; |
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index 41613ec8a04e..bb015c6494c4 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h | |||
@@ -38,6 +38,12 @@ | |||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Increment this value if any changes that break userspace ABI | ||
42 | * compatibility are made. | ||
43 | */ | ||
44 | #define MTHCA_UVERBS_ABI_VERSION 1 | ||
45 | |||
46 | /* | ||
41 | * Make sure that all structs defined in this file remain laid out so | 47 | * Make sure that all structs defined in this file remain laid out so |
42 | * that they pack the same way on 32-bit and 64-bit architectures (to | 48 | * that they pack the same way on 32-bit and 64-bit architectures (to |
43 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). | 49 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). |