aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.com>2013-07-07 10:25:49 -0400
committerRoland Dreier <roland@purestorage.com>2013-07-08 13:32:24 -0400
commite126ba97dba9edeb6fafa3665b5f8497fc9cdf8c (patch)
treec886014a89a8a96b8fb171ad6683dc80ce2ff018 /drivers/net/ethernet
parent0134f16bc91cc15a38c867b81568b791c9b626aa (diff)
mlx5: Add driver for Mellanox Connect-IB adapters
The driver is comprised of two kernel modules: mlx5_ib and mlx5_core. This partitioning resembles what we have for mlx4, except that mlx5_ib is the pci device driver and not mlx5_core. mlx5_core is essentially a library that provides general functionality that is intended to be used by other Mellanox devices that will be introduced in the future. mlx5_ib has a similar role as any hardware device under drivers/infiniband/hw. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> [ Merge in coccinelle fixes from Fengguang Wu <fengguang.wu@intel.com>. - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c587
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c521
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c217
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c475
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c435
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c301
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c223
22 files changed, 5767 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index bcdbc14aeff0..8cf7563a8d92 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX
19if NET_VENDOR_MELLANOX 19if NET_VENDOR_MELLANOX
20 20
21source "drivers/net/ethernet/mellanox/mlx4/Kconfig" 21source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
22source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
22 23
23endif # NET_VENDOR_MELLANOX 24endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 37afb9683372..38fe32ef5e5f 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_MLX4_CORE) += mlx4/ 5obj-$(CONFIG_MLX4_CORE) += mlx4/
6obj-$(CONFIG_MLX5_CORE) += mlx5/core/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
new file mode 100644
index 000000000000..21962828925a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -0,0 +1,18 @@
1#
2# Mellanox driver configuration
3#
4
5config MLX5_CORE
6 tristate
7 depends on PCI && X86
8 default n
9
10config MLX5_DEBUG
11 bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
12 depends on MLX5_CORE
13 default y
14 ---help---
15 This option causes debugging code to be compiled into the
16 mlx5_core driver. The output can be turned on via the
17 debug_mask module parameter (which can also be set after
18 the driver is loaded through sysfs).
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
new file mode 100644
index 000000000000..105780bb980b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
2
3mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
5 mad.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
new file mode 100644
index 000000000000..b215742b842f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/export.h>
37#include <linux/bitmap.h>
38#include <linux/dma-mapping.h>
39#include <linux/vmalloc.h>
40#include <linux/mlx5/driver.h>
41
42#include "mlx5_core.h"
43
44/* Handling for queue buffers -- we allocate a bunch of memory and
45 * register it in a memory region at HCA virtual address 0. If the
46 * requested size is > max_direct, we split the allocation into
47 * multiple pages, so we don't require too much contiguous memory.
48 */
49
50int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
51 struct mlx5_buf *buf)
52{
53 dma_addr_t t;
54
55 buf->size = size;
56 if (size <= max_direct) {
57 buf->nbufs = 1;
58 buf->npages = 1;
59 buf->page_shift = get_order(size) + PAGE_SHIFT;
60 buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
61 size, &t, GFP_KERNEL);
62 if (!buf->direct.buf)
63 return -ENOMEM;
64
65 buf->direct.map = t;
66
67 while (t & ((1 << buf->page_shift) - 1)) {
68 --buf->page_shift;
69 buf->npages *= 2;
70 }
71 } else {
72 int i;
73
74 buf->direct.buf = NULL;
75 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
76 buf->npages = buf->nbufs;
77 buf->page_shift = PAGE_SHIFT;
78 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
79 GFP_KERNEL);
80 if (!buf->page_list)
81 return -ENOMEM;
82
83 for (i = 0; i < buf->nbufs; i++) {
84 buf->page_list[i].buf =
85 dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
86 &t, GFP_KERNEL);
87 if (!buf->page_list[i].buf)
88 goto err_free;
89
90 buf->page_list[i].map = t;
91 }
92
93 if (BITS_PER_LONG == 64) {
94 struct page **pages;
95 pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
96 if (!pages)
97 goto err_free;
98 for (i = 0; i < buf->nbufs; i++)
99 pages[i] = virt_to_page(buf->page_list[i].buf);
100 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
101 kfree(pages);
102 if (!buf->direct.buf)
103 goto err_free;
104 }
105 }
106
107 return 0;
108
109err_free:
110 mlx5_buf_free(dev, buf);
111
112 return -ENOMEM;
113}
114EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
115
116void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
117{
118 int i;
119
120 if (buf->nbufs == 1)
121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
122 buf->direct.map);
123 else {
124 if (BITS_PER_LONG == 64 && buf->direct.buf)
125 vunmap(buf->direct.buf);
126
127 for (i = 0; i < buf->nbufs; i++)
128 if (buf->page_list[i].buf)
129 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
130 buf->page_list[i].buf,
131 buf->page_list[i].map);
132 kfree(buf->page_list);
133 }
134}
135EXPORT_SYMBOL_GPL(mlx5_buf_free);
136
137static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
138{
139 struct mlx5_db_pgdir *pgdir;
140
141 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
142 if (!pgdir)
143 return NULL;
144
145 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
146 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
147 &pgdir->db_dma, GFP_KERNEL);
148 if (!pgdir->db_page) {
149 kfree(pgdir);
150 return NULL;
151 }
152
153 return pgdir;
154}
155
156static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
157 struct mlx5_db *db)
158{
159 int offset;
160 int i;
161
162 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
163 if (i >= MLX5_DB_PER_PAGE)
164 return -ENOMEM;
165
166 __clear_bit(i, pgdir->bitmap);
167
168 db->u.pgdir = pgdir;
169 db->index = i;
170 offset = db->index * L1_CACHE_BYTES;
171 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
172 db->dma = pgdir->db_dma + offset;
173
174 return 0;
175}
176
177int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
178{
179 struct mlx5_db_pgdir *pgdir;
180 int ret = 0;
181
182 mutex_lock(&dev->priv.pgdir_mutex);
183
184 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
185 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
186 goto out;
187
188 pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
189 if (!pgdir) {
190 ret = -ENOMEM;
191 goto out;
192 }
193
194 list_add(&pgdir->list, &dev->priv.pgdir_list);
195
196 /* This should never fail -- we just allocated an empty page: */
197 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
198
199out:
200 mutex_unlock(&dev->priv.pgdir_mutex);
201
202 return ret;
203}
204EXPORT_SYMBOL_GPL(mlx5_db_alloc);
205
206void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
207{
208 mutex_lock(&dev->priv.pgdir_mutex);
209
210 __set_bit(db->index, db->u.pgdir->bitmap);
211
212 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
213 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
214 db->u.pgdir->db_page, db->u.pgdir->db_dma);
215 list_del(&db->u.pgdir->list);
216 kfree(db->u.pgdir);
217 }
218
219 mutex_unlock(&dev->priv.pgdir_mutex);
220}
221EXPORT_SYMBOL_GPL(mlx5_db_free);
222
223
224void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
225{
226 u64 addr;
227 int i;
228
229 for (i = 0; i < buf->npages; i++) {
230 if (buf->nbufs == 1)
231 addr = buf->direct.map + (i << buf->page_shift);
232 else
233 addr = buf->page_list[i].map;
234
235 pas[i] = cpu_to_be64(addr);
236 }
237}
238EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
new file mode 100644
index 000000000000..c1c0eef89694
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -0,0 +1,1515 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/random.h>
42#include <linux/io-mapping.h>
43#include <linux/mlx5/driver.h>
44#include <linux/debugfs.h>
45
46#include "mlx5_core.h"
47
48enum {
49 CMD_IF_REV = 3,
50};
51
52enum {
53 CMD_MODE_POLLING,
54 CMD_MODE_EVENTS
55};
56
57enum {
58 NUM_LONG_LISTS = 2,
59 NUM_MED_LISTS = 64,
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
63};
64
65enum {
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
77};
78
79enum {
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
96};
97
98static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
101 mlx5_cmd_cbk_t cbk,
102 void *context, int page_queue)
103{
104 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 struct mlx5_cmd_work_ent *ent;
106
107 ent = kzalloc(sizeof(*ent), alloc_flags);
108 if (!ent)
109 return ERR_PTR(-ENOMEM);
110
111 ent->in = in;
112 ent->out = out;
113 ent->callback = cbk;
114 ent->context = context;
115 ent->cmd = cmd;
116 ent->page_queue = page_queue;
117
118 return ent;
119}
120
121static u8 alloc_token(struct mlx5_cmd *cmd)
122{
123 u8 token;
124
125 spin_lock(&cmd->token_lock);
126 token = cmd->token++ % 255 + 1;
127 spin_unlock(&cmd->token_lock);
128
129 return token;
130}
131
132static int alloc_ent(struct mlx5_cmd *cmd)
133{
134 unsigned long flags;
135 int ret;
136
137 spin_lock_irqsave(&cmd->alloc_lock, flags);
138 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
139 if (ret < cmd->max_reg_cmds)
140 clear_bit(ret, &cmd->bitmask);
141 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
142
143 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
144}
145
146static void free_ent(struct mlx5_cmd *cmd, int idx)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&cmd->alloc_lock, flags);
151 set_bit(idx, &cmd->bitmask);
152 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
153}
154
155static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
156{
157 return cmd->cmd_buf + (idx << cmd->log_stride);
158}
159
160static u8 xor8_buf(void *buf, int len)
161{
162 u8 *ptr = buf;
163 u8 sum = 0;
164 int i;
165
166 for (i = 0; i < len; i++)
167 sum ^= ptr[i];
168
169 return sum;
170}
171
172static int verify_block_sig(struct mlx5_cmd_prot_block *block)
173{
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
175 return -EINVAL;
176
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
178 return -EINVAL;
179
180 return 0;
181}
182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
184{
185 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
188}
189
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
191{
192 struct mlx5_cmd_mailbox *next = msg->next;
193
194 while (next) {
195 calc_block_sig(next->buf, token);
196 next = next->next;
197 }
198}
199
200static void set_signature(struct mlx5_cmd_work_ent *ent)
201{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token);
204 calc_chain_sig(ent->out, ent->token);
205}
206
207static void poll_timeout(struct mlx5_cmd_work_ent *ent)
208{
209 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
210 u8 own;
211
212 do {
213 own = ent->lay->status_own;
214 if (!(own & CMD_OWNER_HW)) {
215 ent->ret = 0;
216 return;
217 }
218 usleep_range(5000, 10000);
219 } while (time_before(jiffies, poll_end));
220
221 ent->ret = -ETIMEDOUT;
222}
223
224static void free_cmd(struct mlx5_cmd_work_ent *ent)
225{
226 kfree(ent);
227}
228
229
230static int verify_signature(struct mlx5_cmd_work_ent *ent)
231{
232 struct mlx5_cmd_mailbox *next = ent->out->next;
233 int err;
234 u8 sig;
235
236 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
237 if (sig != 0xff)
238 return -EINVAL;
239
240 while (next) {
241 err = verify_block_sig(next->buf);
242 if (err)
243 return err;
244
245 next = next->next;
246 }
247
248 return 0;
249}
250
251static void dump_buf(void *buf, int size, int data_only, int offset)
252{
253 __be32 *p = buf;
254 int i;
255
256 for (i = 0; i < size; i += 16) {
257 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
258 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
259 be32_to_cpu(p[3]));
260 p += 4;
261 offset += 16;
262 }
263 if (!data_only)
264 pr_debug("\n");
265}
266
267const char *mlx5_command_str(int command)
268{
269 switch (command) {
270 case MLX5_CMD_OP_QUERY_HCA_CAP:
271 return "QUERY_HCA_CAP";
272
273 case MLX5_CMD_OP_SET_HCA_CAP:
274 return "SET_HCA_CAP";
275
276 case MLX5_CMD_OP_QUERY_ADAPTER:
277 return "QUERY_ADAPTER";
278
279 case MLX5_CMD_OP_INIT_HCA:
280 return "INIT_HCA";
281
282 case MLX5_CMD_OP_TEARDOWN_HCA:
283 return "TEARDOWN_HCA";
284
285 case MLX5_CMD_OP_QUERY_PAGES:
286 return "QUERY_PAGES";
287
288 case MLX5_CMD_OP_MANAGE_PAGES:
289 return "MANAGE_PAGES";
290
291 case MLX5_CMD_OP_CREATE_MKEY:
292 return "CREATE_MKEY";
293
294 case MLX5_CMD_OP_QUERY_MKEY:
295 return "QUERY_MKEY";
296
297 case MLX5_CMD_OP_DESTROY_MKEY:
298 return "DESTROY_MKEY";
299
300 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
301 return "QUERY_SPECIAL_CONTEXTS";
302
303 case MLX5_CMD_OP_CREATE_EQ:
304 return "CREATE_EQ";
305
306 case MLX5_CMD_OP_DESTROY_EQ:
307 return "DESTROY_EQ";
308
309 case MLX5_CMD_OP_QUERY_EQ:
310 return "QUERY_EQ";
311
312 case MLX5_CMD_OP_CREATE_CQ:
313 return "CREATE_CQ";
314
315 case MLX5_CMD_OP_DESTROY_CQ:
316 return "DESTROY_CQ";
317
318 case MLX5_CMD_OP_QUERY_CQ:
319 return "QUERY_CQ";
320
321 case MLX5_CMD_OP_MODIFY_CQ:
322 return "MODIFY_CQ";
323
324 case MLX5_CMD_OP_CREATE_QP:
325 return "CREATE_QP";
326
327 case MLX5_CMD_OP_DESTROY_QP:
328 return "DESTROY_QP";
329
330 case MLX5_CMD_OP_RST2INIT_QP:
331 return "RST2INIT_QP";
332
333 case MLX5_CMD_OP_INIT2RTR_QP:
334 return "INIT2RTR_QP";
335
336 case MLX5_CMD_OP_RTR2RTS_QP:
337 return "RTR2RTS_QP";
338
339 case MLX5_CMD_OP_RTS2RTS_QP:
340 return "RTS2RTS_QP";
341
342 case MLX5_CMD_OP_SQERR2RTS_QP:
343 return "SQERR2RTS_QP";
344
345 case MLX5_CMD_OP_2ERR_QP:
346 return "2ERR_QP";
347
348 case MLX5_CMD_OP_RTS2SQD_QP:
349 return "RTS2SQD_QP";
350
351 case MLX5_CMD_OP_SQD2RTS_QP:
352 return "SQD2RTS_QP";
353
354 case MLX5_CMD_OP_2RST_QP:
355 return "2RST_QP";
356
357 case MLX5_CMD_OP_QUERY_QP:
358 return "QUERY_QP";
359
360 case MLX5_CMD_OP_CONF_SQP:
361 return "CONF_SQP";
362
363 case MLX5_CMD_OP_MAD_IFC:
364 return "MAD_IFC";
365
366 case MLX5_CMD_OP_INIT2INIT_QP:
367 return "INIT2INIT_QP";
368
369 case MLX5_CMD_OP_SUSPEND_QP:
370 return "SUSPEND_QP";
371
372 case MLX5_CMD_OP_UNSUSPEND_QP:
373 return "UNSUSPEND_QP";
374
375 case MLX5_CMD_OP_SQD2SQD_QP:
376 return "SQD2SQD_QP";
377
378 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
379 return "ALLOC_QP_COUNTER_SET";
380
381 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
382 return "DEALLOC_QP_COUNTER_SET";
383
384 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
385 return "QUERY_QP_COUNTER_SET";
386
387 case MLX5_CMD_OP_CREATE_PSV:
388 return "CREATE_PSV";
389
390 case MLX5_CMD_OP_DESTROY_PSV:
391 return "DESTROY_PSV";
392
393 case MLX5_CMD_OP_QUERY_PSV:
394 return "QUERY_PSV";
395
396 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
397 return "QUERY_SIG_RULE_TABLE";
398
399 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
400 return "QUERY_BLOCK_SIZE_TABLE";
401
402 case MLX5_CMD_OP_CREATE_SRQ:
403 return "CREATE_SRQ";
404
405 case MLX5_CMD_OP_DESTROY_SRQ:
406 return "DESTROY_SRQ";
407
408 case MLX5_CMD_OP_QUERY_SRQ:
409 return "QUERY_SRQ";
410
411 case MLX5_CMD_OP_ARM_RQ:
412 return "ARM_RQ";
413
414 case MLX5_CMD_OP_RESIZE_SRQ:
415 return "RESIZE_SRQ";
416
417 case MLX5_CMD_OP_ALLOC_PD:
418 return "ALLOC_PD";
419
420 case MLX5_CMD_OP_DEALLOC_PD:
421 return "DEALLOC_PD";
422
423 case MLX5_CMD_OP_ALLOC_UAR:
424 return "ALLOC_UAR";
425
426 case MLX5_CMD_OP_DEALLOC_UAR:
427 return "DEALLOC_UAR";
428
429 case MLX5_CMD_OP_ATTACH_TO_MCG:
430 return "ATTACH_TO_MCG";
431
432 case MLX5_CMD_OP_DETACH_FROM_MCG:
433 return "DETACH_FROM_MCG";
434
435 case MLX5_CMD_OP_ALLOC_XRCD:
436 return "ALLOC_XRCD";
437
438 case MLX5_CMD_OP_DEALLOC_XRCD:
439 return "DEALLOC_XRCD";
440
441 case MLX5_CMD_OP_ACCESS_REG:
442 return "MLX5_CMD_OP_ACCESS_REG";
443
444 default: return "unknown command opcode";
445 }
446}
447
448static void dump_command(struct mlx5_core_dev *dev,
449 struct mlx5_cmd_work_ent *ent, int input)
450{
451 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
452 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
453 struct mlx5_cmd_mailbox *next = msg->next;
454 int data_only;
455 int offset = 0;
456 int dump_len;
457
458 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
459
460 if (data_only)
461 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
462 "dump command data %s(0x%x) %s\n",
463 mlx5_command_str(op), op,
464 input ? "INPUT" : "OUTPUT");
465 else
466 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
467 mlx5_command_str(op), op,
468 input ? "INPUT" : "OUTPUT");
469
470 if (data_only) {
471 if (input) {
472 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
473 offset += sizeof(ent->lay->in);
474 } else {
475 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
476 offset += sizeof(ent->lay->out);
477 }
478 } else {
479 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
480 offset += sizeof(*ent->lay);
481 }
482
483 while (next && offset < msg->len) {
484 if (data_only) {
485 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
486 dump_buf(next->buf, dump_len, 1, offset);
487 offset += MLX5_CMD_DATA_BLOCK_SIZE;
488 } else {
489 mlx5_core_dbg(dev, "command block:\n");
490 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
491 offset += sizeof(struct mlx5_cmd_prot_block);
492 }
493 next = next->next;
494 }
495
496 if (data_only)
497 pr_debug("\n");
498}
499
500static void cmd_work_handler(struct work_struct *work)
501{
502 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
503 struct mlx5_cmd *cmd = ent->cmd;
504 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
505 struct mlx5_cmd_layout *lay;
506 struct semaphore *sem;
507
508 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
509 down(sem);
510 if (!ent->page_queue) {
511 ent->idx = alloc_ent(cmd);
512 if (ent->idx < 0) {
513 mlx5_core_err(dev, "failed to allocate command entry\n");
514 up(sem);
515 return;
516 }
517 } else {
518 ent->idx = cmd->max_reg_cmds;
519 }
520
521 ent->token = alloc_token(cmd);
522 cmd->ent_arr[ent->idx] = ent;
523 lay = get_inst(cmd, ent->idx);
524 ent->lay = lay;
525 memset(lay, 0, sizeof(*lay));
526 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
527 if (ent->in->next)
528 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
529 lay->inlen = cpu_to_be32(ent->in->len);
530 if (ent->out->next)
531 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
532 lay->outlen = cpu_to_be32(ent->out->len);
533 lay->type = MLX5_PCI_CMD_XPORT;
534 lay->token = ent->token;
535 lay->status_own = CMD_OWNER_HW;
536 if (!cmd->checksum_disabled)
537 set_signature(ent);
538 dump_command(dev, ent, 1);
539 ktime_get_ts(&ent->ts1);
540
541 /* ring doorbell after the descriptor is valid */
542 wmb();
543 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
544 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
545 mmiowb();
546 if (cmd->mode == CMD_MODE_POLLING) {
547 poll_timeout(ent);
548 /* make sure we read the descriptor after ownership is SW */
549 rmb();
550 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
551 }
552}
553
554static const char *deliv_status_to_str(u8 status)
555{
556 switch (status) {
557 case MLX5_CMD_DELIVERY_STAT_OK:
558 return "no errors";
559 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
560 return "signature error";
561 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
562 return "token error";
563 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
564 return "bad block number";
565 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
566 return "output pointer not aligned to block size";
567 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
568 return "input pointer not aligned to block size";
569 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
570 return "firmware internal error";
571 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
572 return "command input length error";
573 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
574 return "command ouput length error";
575 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
576 return "reserved fields not cleared";
577 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
578 return "bad command descriptor type";
579 default:
580 return "unknown status code";
581 }
582}
583
584static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
585{
586 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
587
588 return be16_to_cpu(hdr->opcode);
589}
590
591static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
592{
593 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
594 struct mlx5_cmd *cmd = &dev->cmd;
595 int err;
596
597 if (cmd->mode == CMD_MODE_POLLING) {
598 wait_for_completion(&ent->done);
599 err = ent->ret;
600 } else {
601 if (!wait_for_completion_timeout(&ent->done, timeout))
602 err = -ETIMEDOUT;
603 else
604 err = 0;
605 }
606 if (err == -ETIMEDOUT) {
607 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
608 mlx5_command_str(msg_to_opcode(ent->in)),
609 msg_to_opcode(ent->in));
610 }
611 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
612 deliv_status_to_str(ent->status), ent->status);
613
614 return err;
615}
616
617/* Notes:
618 * 1. Callback functions may not sleep
619 * 2. page queue commands do not support asynchrous completion
620 */
621static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
622 struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback,
623 void *context, int page_queue, u8 *status)
624{
625 struct mlx5_cmd *cmd = &dev->cmd;
626 struct mlx5_cmd_work_ent *ent;
627 ktime_t t1, t2, delta;
628 struct mlx5_cmd_stats *stats;
629 int err = 0;
630 s64 ds;
631 u16 op;
632
633 if (callback && page_queue)
634 return -EINVAL;
635
636 ent = alloc_cmd(cmd, in, out, callback, context, page_queue);
637 if (IS_ERR(ent))
638 return PTR_ERR(ent);
639
640 if (!callback)
641 init_completion(&ent->done);
642
643 INIT_WORK(&ent->work, cmd_work_handler);
644 if (page_queue) {
645 cmd_work_handler(&ent->work);
646 } else if (!queue_work(cmd->wq, &ent->work)) {
647 mlx5_core_warn(dev, "failed to queue work\n");
648 err = -ENOMEM;
649 goto out_free;
650 }
651
652 if (!callback) {
653 err = wait_func(dev, ent);
654 if (err == -ETIMEDOUT)
655 goto out;
656
657 t1 = timespec_to_ktime(ent->ts1);
658 t2 = timespec_to_ktime(ent->ts2);
659 delta = ktime_sub(t2, t1);
660 ds = ktime_to_ns(delta);
661 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
662 if (op < ARRAY_SIZE(cmd->stats)) {
663 stats = &cmd->stats[op];
664 spin_lock(&stats->lock);
665 stats->sum += ds;
666 ++stats->n;
667 spin_unlock(&stats->lock);
668 }
669 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
670 "fw exec time for %s is %lld nsec\n",
671 mlx5_command_str(op), ds);
672 *status = ent->status;
673 free_cmd(ent);
674 }
675
676 return err;
677
678out_free:
679 free_cmd(ent);
680out:
681 return err;
682}
683
684static ssize_t dbg_write(struct file *filp, const char __user *buf,
685 size_t count, loff_t *pos)
686{
687 struct mlx5_core_dev *dev = filp->private_data;
688 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
689 char lbuf[3];
690 int err;
691
692 if (!dbg->in_msg || !dbg->out_msg)
693 return -ENOMEM;
694
695 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
696 return -EPERM;
697
698 lbuf[sizeof(lbuf) - 1] = 0;
699
700 if (strcmp(lbuf, "go"))
701 return -EINVAL;
702
703 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
704
705 return err ? err : count;
706}
707
708
709static const struct file_operations fops = {
710 .owner = THIS_MODULE,
711 .open = simple_open,
712 .write = dbg_write,
713};
714
715static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
716{
717 struct mlx5_cmd_prot_block *block;
718 struct mlx5_cmd_mailbox *next;
719 int copy;
720
721 if (!to || !from)
722 return -ENOMEM;
723
724 copy = min_t(int, size, sizeof(to->first.data));
725 memcpy(to->first.data, from, copy);
726 size -= copy;
727 from += copy;
728
729 next = to->next;
730 while (size) {
731 if (!next) {
732 /* this is a BUG */
733 return -ENOMEM;
734 }
735
736 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
737 block = next->buf;
738 memcpy(block->data, from, copy);
739 from += copy;
740 size -= copy;
741 next = next->next;
742 }
743
744 return 0;
745}
746
747static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
748{
749 struct mlx5_cmd_prot_block *block;
750 struct mlx5_cmd_mailbox *next;
751 int copy;
752
753 if (!to || !from)
754 return -ENOMEM;
755
756 copy = min_t(int, size, sizeof(from->first.data));
757 memcpy(to, from->first.data, copy);
758 size -= copy;
759 to += copy;
760
761 next = from->next;
762 while (size) {
763 if (!next) {
764 /* this is a BUG */
765 return -ENOMEM;
766 }
767
768 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
769 block = next->buf;
770 if (xor8_buf(block, sizeof(*block)) != 0xff)
771 return -EINVAL;
772
773 memcpy(to, block->data, copy);
774 to += copy;
775 size -= copy;
776 next = next->next;
777 }
778
779 return 0;
780}
781
782static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
783 gfp_t flags)
784{
785 struct mlx5_cmd_mailbox *mailbox;
786
787 mailbox = kmalloc(sizeof(*mailbox), flags);
788 if (!mailbox)
789 return ERR_PTR(-ENOMEM);
790
791 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
792 &mailbox->dma);
793 if (!mailbox->buf) {
794 mlx5_core_dbg(dev, "failed allocation\n");
795 kfree(mailbox);
796 return ERR_PTR(-ENOMEM);
797 }
798 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
799 mailbox->next = NULL;
800
801 return mailbox;
802}
803
804static void free_cmd_box(struct mlx5_core_dev *dev,
805 struct mlx5_cmd_mailbox *mailbox)
806{
807 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
808 kfree(mailbox);
809}
810
811static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
812 gfp_t flags, int size)
813{
814 struct mlx5_cmd_mailbox *tmp, *head = NULL;
815 struct mlx5_cmd_prot_block *block;
816 struct mlx5_cmd_msg *msg;
817 int blen;
818 int err;
819 int n;
820 int i;
821
822 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
823 if (!msg)
824 return ERR_PTR(-ENOMEM);
825
826 blen = size - min_t(int, sizeof(msg->first.data), size);
827 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
828
829 for (i = 0; i < n; i++) {
830 tmp = alloc_cmd_box(dev, flags);
831 if (IS_ERR(tmp)) {
832 mlx5_core_warn(dev, "failed allocating block\n");
833 err = PTR_ERR(tmp);
834 goto err_alloc;
835 }
836
837 block = tmp->buf;
838 tmp->next = head;
839 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
840 block->block_num = cpu_to_be32(n - i - 1);
841 head = tmp;
842 }
843 msg->next = head;
844 msg->len = size;
845 return msg;
846
847err_alloc:
848 while (head) {
849 tmp = head->next;
850 free_cmd_box(dev, head);
851 head = tmp;
852 }
853 kfree(msg);
854
855 return ERR_PTR(err);
856}
857
858static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
859 struct mlx5_cmd_msg *msg)
860{
861 struct mlx5_cmd_mailbox *head = msg->next;
862 struct mlx5_cmd_mailbox *next;
863
864 while (head) {
865 next = head->next;
866 free_cmd_box(dev, head);
867 head = next;
868 }
869 kfree(msg);
870}
871
872static ssize_t data_write(struct file *filp, const char __user *buf,
873 size_t count, loff_t *pos)
874{
875 struct mlx5_core_dev *dev = filp->private_data;
876 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
877 void *ptr;
878 int err;
879
880 if (*pos != 0)
881 return -EINVAL;
882
883 kfree(dbg->in_msg);
884 dbg->in_msg = NULL;
885 dbg->inlen = 0;
886
887 ptr = kzalloc(count, GFP_KERNEL);
888 if (!ptr)
889 return -ENOMEM;
890
891 if (copy_from_user(ptr, buf, count)) {
892 err = -EPERM;
893 goto out;
894 }
895 dbg->in_msg = ptr;
896 dbg->inlen = count;
897
898 *pos = count;
899
900 return count;
901
902out:
903 kfree(ptr);
904 return err;
905}
906
907static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
908 loff_t *pos)
909{
910 struct mlx5_core_dev *dev = filp->private_data;
911 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
912 int copy;
913
914 if (*pos)
915 return 0;
916
917 if (!dbg->out_msg)
918 return -ENOMEM;
919
920 copy = min_t(int, count, dbg->outlen);
921 if (copy_to_user(buf, dbg->out_msg, copy))
922 return -EPERM;
923
924 *pos += copy;
925
926 return copy;
927}
928
929static const struct file_operations dfops = {
930 .owner = THIS_MODULE,
931 .open = simple_open,
932 .write = data_write,
933 .read = data_read,
934};
935
936static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
937 loff_t *pos)
938{
939 struct mlx5_core_dev *dev = filp->private_data;
940 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
941 char outlen[8];
942 int err;
943
944 if (*pos)
945 return 0;
946
947 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
948 if (err < 0)
949 return err;
950
951 if (copy_to_user(buf, &outlen, err))
952 return -EPERM;
953
954 *pos += err;
955
956 return err;
957}
958
959static ssize_t outlen_write(struct file *filp, const char __user *buf,
960 size_t count, loff_t *pos)
961{
962 struct mlx5_core_dev *dev = filp->private_data;
963 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
964 char outlen_str[8];
965 int outlen;
966 void *ptr;
967 int err;
968
969 if (*pos != 0 || count > 6)
970 return -EINVAL;
971
972 kfree(dbg->out_msg);
973 dbg->out_msg = NULL;
974 dbg->outlen = 0;
975
976 if (copy_from_user(outlen_str, buf, count))
977 return -EPERM;
978
979 outlen_str[7] = 0;
980
981 err = sscanf(outlen_str, "%d", &outlen);
982 if (err < 0)
983 return err;
984
985 ptr = kzalloc(outlen, GFP_KERNEL);
986 if (!ptr)
987 return -ENOMEM;
988
989 dbg->out_msg = ptr;
990 dbg->outlen = outlen;
991
992 *pos = count;
993
994 return count;
995}
996
997static const struct file_operations olfops = {
998 .owner = THIS_MODULE,
999 .open = simple_open,
1000 .write = outlen_write,
1001 .read = outlen_read,
1002};
1003
1004static void set_wqname(struct mlx5_core_dev *dev)
1005{
1006 struct mlx5_cmd *cmd = &dev->cmd;
1007
1008 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1009 dev_name(&dev->pdev->dev));
1010}
1011
1012static void clean_debug_files(struct mlx5_core_dev *dev)
1013{
1014 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1015
1016 if (!mlx5_debugfs_root)
1017 return;
1018
1019 mlx5_cmdif_debugfs_cleanup(dev);
1020 debugfs_remove_recursive(dbg->dbg_root);
1021}
1022
1023static int create_debugfs_files(struct mlx5_core_dev *dev)
1024{
1025 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1026 int err = -ENOMEM;
1027
1028 if (!mlx5_debugfs_root)
1029 return 0;
1030
1031 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1032 if (!dbg->dbg_root)
1033 return err;
1034
1035 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1036 dev, &dfops);
1037 if (!dbg->dbg_in)
1038 goto err_dbg;
1039
1040 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1041 dev, &dfops);
1042 if (!dbg->dbg_out)
1043 goto err_dbg;
1044
1045 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1046 dev, &olfops);
1047 if (!dbg->dbg_outlen)
1048 goto err_dbg;
1049
1050 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1051 &dbg->status);
1052 if (!dbg->dbg_status)
1053 goto err_dbg;
1054
1055 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1056 if (!dbg->dbg_run)
1057 goto err_dbg;
1058
1059 mlx5_cmdif_debugfs_init(dev);
1060
1061 return 0;
1062
1063err_dbg:
1064 clean_debug_files(dev);
1065 return err;
1066}
1067
1068void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1069{
1070 struct mlx5_cmd *cmd = &dev->cmd;
1071 int i;
1072
1073 for (i = 0; i < cmd->max_reg_cmds; i++)
1074 down(&cmd->sem);
1075
1076 down(&cmd->pages_sem);
1077
1078 flush_workqueue(cmd->wq);
1079
1080 cmd->mode = CMD_MODE_EVENTS;
1081
1082 up(&cmd->pages_sem);
1083 for (i = 0; i < cmd->max_reg_cmds; i++)
1084 up(&cmd->sem);
1085}
1086
1087void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1088{
1089 struct mlx5_cmd *cmd = &dev->cmd;
1090 int i;
1091
1092 for (i = 0; i < cmd->max_reg_cmds; i++)
1093 down(&cmd->sem);
1094
1095 down(&cmd->pages_sem);
1096
1097 flush_workqueue(cmd->wq);
1098 cmd->mode = CMD_MODE_POLLING;
1099
1100 up(&cmd->pages_sem);
1101 for (i = 0; i < cmd->max_reg_cmds; i++)
1102 up(&cmd->sem);
1103}
1104
1105void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1106{
1107 struct mlx5_cmd *cmd = &dev->cmd;
1108 struct mlx5_cmd_work_ent *ent;
1109 mlx5_cmd_cbk_t callback;
1110 void *context;
1111 int err;
1112 int i;
1113
1114 for (i = 0; i < (1 << cmd->log_sz); i++) {
1115 if (test_bit(i, &vector)) {
1116 ent = cmd->ent_arr[i];
1117 ktime_get_ts(&ent->ts2);
1118 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1119 dump_command(dev, ent, 0);
1120 if (!ent->ret) {
1121 if (!cmd->checksum_disabled)
1122 ent->ret = verify_signature(ent);
1123 else
1124 ent->ret = 0;
1125 ent->status = ent->lay->status_own >> 1;
1126 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1127 ent->ret, deliv_status_to_str(ent->status), ent->status);
1128 }
1129 free_ent(cmd, ent->idx);
1130 if (ent->callback) {
1131 callback = ent->callback;
1132 context = ent->context;
1133 err = ent->ret;
1134 free_cmd(ent);
1135 callback(err, context);
1136 } else {
1137 complete(&ent->done);
1138 }
1139 if (ent->page_queue)
1140 up(&cmd->pages_sem);
1141 else
1142 up(&cmd->sem);
1143 }
1144 }
1145}
1146EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1147
1148static int status_to_err(u8 status)
1149{
1150 return status ? -1 : 0; /* TBD more meaningful codes */
1151}
1152
1153static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size)
1154{
1155 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1156 struct mlx5_cmd *cmd = &dev->cmd;
1157 struct cache_ent *ent = NULL;
1158
1159 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1160 ent = &cmd->cache.large;
1161 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1162 ent = &cmd->cache.med;
1163
1164 if (ent) {
1165 spin_lock(&ent->lock);
1166 if (!list_empty(&ent->head)) {
1167 msg = list_entry(ent->head.next, typeof(*msg), list);
1168 /* For cached lists, we must explicitly state what is
1169 * the real size
1170 */
1171 msg->len = in_size;
1172 list_del(&msg->list);
1173 }
1174 spin_unlock(&ent->lock);
1175 }
1176
1177 if (IS_ERR(msg))
1178 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size);
1179
1180 return msg;
1181}
1182
1183static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1184{
1185 if (msg->cache) {
1186 spin_lock(&msg->cache->lock);
1187 list_add_tail(&msg->list, &msg->cache->head);
1188 spin_unlock(&msg->cache->lock);
1189 } else {
1190 mlx5_free_cmd_msg(dev, msg);
1191 }
1192}
1193
1194static int is_manage_pages(struct mlx5_inbox_hdr *in)
1195{
1196 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1197}
1198
1199int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1200 int out_size)
1201{
1202 struct mlx5_cmd_msg *inb;
1203 struct mlx5_cmd_msg *outb;
1204 int pages_queue;
1205 int err;
1206 u8 status = 0;
1207
1208 pages_queue = is_manage_pages(in);
1209
1210 inb = alloc_msg(dev, in_size);
1211 if (IS_ERR(inb)) {
1212 err = PTR_ERR(inb);
1213 return err;
1214 }
1215
1216 err = mlx5_copy_to_msg(inb, in, in_size);
1217 if (err) {
1218 mlx5_core_warn(dev, "err %d\n", err);
1219 goto out_in;
1220 }
1221
1222 outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size);
1223 if (IS_ERR(outb)) {
1224 err = PTR_ERR(outb);
1225 goto out_in;
1226 }
1227
1228 err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status);
1229 if (err)
1230 goto out_out;
1231
1232 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1233 if (status) {
1234 err = status_to_err(status);
1235 goto out_out;
1236 }
1237
1238 err = mlx5_copy_from_msg(out, outb, out_size);
1239
1240out_out:
1241 mlx5_free_cmd_msg(dev, outb);
1242
1243out_in:
1244 free_msg(dev, inb);
1245 return err;
1246}
1247EXPORT_SYMBOL(mlx5_cmd_exec);
1248
1249static void destroy_msg_cache(struct mlx5_core_dev *dev)
1250{
1251 struct mlx5_cmd *cmd = &dev->cmd;
1252 struct mlx5_cmd_msg *msg;
1253 struct mlx5_cmd_msg *n;
1254
1255 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1256 list_del(&msg->list);
1257 mlx5_free_cmd_msg(dev, msg);
1258 }
1259
1260 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1261 list_del(&msg->list);
1262 mlx5_free_cmd_msg(dev, msg);
1263 }
1264}
1265
1266static int create_msg_cache(struct mlx5_core_dev *dev)
1267{
1268 struct mlx5_cmd *cmd = &dev->cmd;
1269 struct mlx5_cmd_msg *msg;
1270 int err;
1271 int i;
1272
1273 spin_lock_init(&cmd->cache.large.lock);
1274 INIT_LIST_HEAD(&cmd->cache.large.head);
1275 spin_lock_init(&cmd->cache.med.lock);
1276 INIT_LIST_HEAD(&cmd->cache.med.head);
1277
1278 for (i = 0; i < NUM_LONG_LISTS; i++) {
1279 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1280 if (IS_ERR(msg)) {
1281 err = PTR_ERR(msg);
1282 goto ex_err;
1283 }
1284 msg->cache = &cmd->cache.large;
1285 list_add_tail(&msg->list, &cmd->cache.large.head);
1286 }
1287
1288 for (i = 0; i < NUM_MED_LISTS; i++) {
1289 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1290 if (IS_ERR(msg)) {
1291 err = PTR_ERR(msg);
1292 goto ex_err;
1293 }
1294 msg->cache = &cmd->cache.med;
1295 list_add_tail(&msg->list, &cmd->cache.med.head);
1296 }
1297
1298 return 0;
1299
1300ex_err:
1301 destroy_msg_cache(dev);
1302 return err;
1303}
1304
1305int mlx5_cmd_init(struct mlx5_core_dev *dev)
1306{
1307 int size = sizeof(struct mlx5_cmd_prot_block);
1308 int align = roundup_pow_of_two(size);
1309 struct mlx5_cmd *cmd = &dev->cmd;
1310 u32 cmd_h, cmd_l;
1311 u16 cmd_if_rev;
1312 int err;
1313 int i;
1314
1315 cmd_if_rev = cmdif_rev(dev);
1316 if (cmd_if_rev != CMD_IF_REV) {
1317 dev_err(&dev->pdev->dev,
1318 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1319 CMD_IF_REV, cmd_if_rev);
1320 return -EINVAL;
1321 }
1322
1323 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1324 if (!cmd->pool)
1325 return -ENOMEM;
1326
1327 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1328 if (!cmd->cmd_buf) {
1329 err = -ENOMEM;
1330 goto err_free_pool;
1331 }
1332 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1333 DMA_BIDIRECTIONAL);
1334 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1335 err = -ENOMEM;
1336 goto err_free;
1337 }
1338
1339 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1340 cmd->log_sz = cmd_l >> 4 & 0xf;
1341 cmd->log_stride = cmd_l & 0xf;
1342 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1343 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1344 1 << cmd->log_sz);
1345 err = -EINVAL;
1346 goto err_map;
1347 }
1348
1349 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1350 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1351 err = -EINVAL;
1352 goto err_map;
1353 }
1354
1355 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1356 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1357
1358 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1359 if (cmd->cmdif_rev > CMD_IF_REV) {
1360 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1361 CMD_IF_REV, cmd->cmdif_rev);
1362 err = -ENOTSUPP;
1363 goto err_map;
1364 }
1365
1366 spin_lock_init(&cmd->alloc_lock);
1367 spin_lock_init(&cmd->token_lock);
1368 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1369 spin_lock_init(&cmd->stats[i].lock);
1370
1371 sema_init(&cmd->sem, cmd->max_reg_cmds);
1372 sema_init(&cmd->pages_sem, 1);
1373
1374 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1375 cmd_l = (u32)(cmd->dma);
1376 if (cmd_l & 0xfff) {
1377 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1378 err = -ENOMEM;
1379 goto err_map;
1380 }
1381
1382 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1383 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1384
1385 /* Make sure firmware sees the complete address before we proceed */
1386 wmb();
1387
1388 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1389
1390 cmd->mode = CMD_MODE_POLLING;
1391
1392 err = create_msg_cache(dev);
1393 if (err) {
1394 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1395 goto err_map;
1396 }
1397
1398 set_wqname(dev);
1399 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1400 if (!cmd->wq) {
1401 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1402 err = -ENOMEM;
1403 goto err_cache;
1404 }
1405
1406 err = create_debugfs_files(dev);
1407 if (err) {
1408 err = -ENOMEM;
1409 goto err_wq;
1410 }
1411
1412 return 0;
1413
1414err_wq:
1415 destroy_workqueue(cmd->wq);
1416
1417err_cache:
1418 destroy_msg_cache(dev);
1419
1420err_map:
1421 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1422 DMA_BIDIRECTIONAL);
1423err_free:
1424 free_pages((unsigned long)cmd->cmd_buf, 0);
1425
1426err_free_pool:
1427 pci_pool_destroy(cmd->pool);
1428
1429 return err;
1430}
1431EXPORT_SYMBOL(mlx5_cmd_init);
1432
1433void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1434{
1435 struct mlx5_cmd *cmd = &dev->cmd;
1436
1437 clean_debug_files(dev);
1438 destroy_workqueue(cmd->wq);
1439 destroy_msg_cache(dev);
1440 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1441 DMA_BIDIRECTIONAL);
1442 free_pages((unsigned long)cmd->cmd_buf, 0);
1443 pci_pool_destroy(cmd->pool);
1444}
1445EXPORT_SYMBOL(mlx5_cmd_cleanup);
1446
1447static const char *cmd_status_str(u8 status)
1448{
1449 switch (status) {
1450 case MLX5_CMD_STAT_OK:
1451 return "OK";
1452 case MLX5_CMD_STAT_INT_ERR:
1453 return "internal error";
1454 case MLX5_CMD_STAT_BAD_OP_ERR:
1455 return "bad operation";
1456 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1457 return "bad parameter";
1458 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1459 return "bad system state";
1460 case MLX5_CMD_STAT_BAD_RES_ERR:
1461 return "bad resource";
1462 case MLX5_CMD_STAT_RES_BUSY:
1463 return "resource busy";
1464 case MLX5_CMD_STAT_LIM_ERR:
1465 return "limits exceeded";
1466 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1467 return "bad resource state";
1468 case MLX5_CMD_STAT_IX_ERR:
1469 return "bad index";
1470 case MLX5_CMD_STAT_NO_RES_ERR:
1471 return "no resources";
1472 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1473 return "bad input length";
1474 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1475 return "bad output length";
1476 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1477 return "bad QP state";
1478 case MLX5_CMD_STAT_BAD_PKT_ERR:
1479 return "bad packet (discarded)";
1480 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1481 return "bad size too many outstanding CQEs";
1482 default:
1483 return "unknown status";
1484 }
1485}
1486
1487int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1488{
1489 if (!hdr->status)
1490 return 0;
1491
1492 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1493 cmd_status_str(hdr->status), hdr->status,
1494 be32_to_cpu(hdr->syndrome));
1495
1496 switch (hdr->status) {
1497 case MLX5_CMD_STAT_OK: return 0;
1498 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1499 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1500 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1501 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1502 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1503 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1504 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
1505 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1506 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1507 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1508 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1509 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1510 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1511 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1513 default: return -EIO;
1514 }
1515}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
new file mode 100644
index 000000000000..c2d660be6f76
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -0,0 +1,224 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/hardirq.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include <rdma/ib_verbs.h>
39#include <linux/mlx5/cq.h>
40#include "mlx5_core.h"
41
42void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
43{
44 struct mlx5_core_cq *cq;
45 struct mlx5_cq_table *table = &dev->priv.cq_table;
46
47 spin_lock(&table->lock);
48 cq = radix_tree_lookup(&table->tree, cqn);
49 if (likely(cq))
50 atomic_inc(&cq->refcount);
51 spin_unlock(&table->lock);
52
53 if (!cq) {
54 mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
55 return;
56 }
57
58 ++cq->arm_sn;
59
60 cq->comp(cq);
61
62 if (atomic_dec_and_test(&cq->refcount))
63 complete(&cq->free);
64}
65
66void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
67{
68 struct mlx5_cq_table *table = &dev->priv.cq_table;
69 struct mlx5_core_cq *cq;
70
71 spin_lock(&table->lock);
72
73 cq = radix_tree_lookup(&table->tree, cqn);
74 if (cq)
75 atomic_inc(&cq->refcount);
76
77 spin_unlock(&table->lock);
78
79 if (!cq) {
80 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
81 return;
82 }
83
84 cq->event(cq, event_type);
85
86 if (atomic_dec_and_test(&cq->refcount))
87 complete(&cq->free);
88}
89
90
91int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
92 struct mlx5_create_cq_mbox_in *in, int inlen)
93{
94 int err;
95 struct mlx5_cq_table *table = &dev->priv.cq_table;
96 struct mlx5_create_cq_mbox_out out;
97 struct mlx5_destroy_cq_mbox_in din;
98 struct mlx5_destroy_cq_mbox_out dout;
99
100 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
101 memset(&out, 0, sizeof(out));
102 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
103 if (err)
104 return err;
105
106 if (out.hdr.status)
107 return mlx5_cmd_status_to_err(&out.hdr);
108
109 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
110 cq->cons_index = 0;
111 cq->arm_sn = 0;
112 atomic_set(&cq->refcount, 1);
113 init_completion(&cq->free);
114
115 spin_lock_irq(&table->lock);
116 err = radix_tree_insert(&table->tree, cq->cqn, cq);
117 spin_unlock_irq(&table->lock);
118 if (err)
119 goto err_cmd;
120
121 cq->pid = current->pid;
122 err = mlx5_debug_cq_add(dev, cq);
123 if (err)
124 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
125 cq->cqn);
126
127 return 0;
128
129err_cmd:
130 memset(&din, 0, sizeof(din));
131 memset(&dout, 0, sizeof(dout));
132 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
133 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
134 return err;
135}
136EXPORT_SYMBOL(mlx5_core_create_cq);
137
138int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
139{
140 struct mlx5_cq_table *table = &dev->priv.cq_table;
141 struct mlx5_destroy_cq_mbox_in in;
142 struct mlx5_destroy_cq_mbox_out out;
143 struct mlx5_core_cq *tmp;
144 int err;
145
146 spin_lock_irq(&table->lock);
147 tmp = radix_tree_delete(&table->tree, cq->cqn);
148 spin_unlock_irq(&table->lock);
149 if (!tmp) {
150 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
151 return -EINVAL;
152 }
153 if (tmp != cq) {
154 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
155 return -EINVAL;
156 }
157
158 memset(&in, 0, sizeof(in));
159 memset(&out, 0, sizeof(out));
160 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
161 in.cqn = cpu_to_be32(cq->cqn);
162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
163 if (err)
164 return err;
165
166 if (out.hdr.status)
167 return mlx5_cmd_status_to_err(&out.hdr);
168
169 synchronize_irq(cq->irqn);
170
171 mlx5_debug_cq_remove(dev, cq);
172 if (atomic_dec_and_test(&cq->refcount))
173 complete(&cq->free);
174 wait_for_completion(&cq->free);
175
176 return 0;
177}
178EXPORT_SYMBOL(mlx5_core_destroy_cq);
179
180int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
181 struct mlx5_query_cq_mbox_out *out)
182{
183 struct mlx5_query_cq_mbox_in in;
184 int err;
185
186 memset(&in, 0, sizeof(in));
187 memset(out, 0, sizeof(*out));
188
189 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
190 in.cqn = cpu_to_be32(cq->cqn);
191 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
192 if (err)
193 return err;
194
195 if (out->hdr.status)
196 return mlx5_cmd_status_to_err(&out->hdr);
197
198 return err;
199}
200EXPORT_SYMBOL(mlx5_core_query_cq);
201
202
203int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
204 int type, struct mlx5_cq_modify_params *params)
205{
206 return -ENOSYS;
207}
208
209int mlx5_init_cq_table(struct mlx5_core_dev *dev)
210{
211 struct mlx5_cq_table *table = &dev->priv.cq_table;
212 int err;
213
214 spin_lock_init(&table->lock);
215 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
216 err = mlx5_cq_debugfs_init(dev);
217
218 return err;
219}
220
221void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
222{
223 mlx5_cq_debugfs_cleanup(dev);
224}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
new file mode 100644
index 000000000000..5e9cf2b9aaf7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -0,0 +1,587 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/debugfs.h>
35#include <linux/mlx5/qp.h>
36#include <linux/mlx5/cq.h>
37#include <linux/mlx5/driver.h>
38#include "mlx5_core.h"
39
40enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50};
51
52static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62};
63
64enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68};
69
70static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74};
75
76enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80};
81
82static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86};
87
88struct dentry *mlx5_debugfs_root;
89EXPORT_SYMBOL(mlx5_debugfs_root);
90
91void mlx5_register_debugfs(void)
92{
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 if (IS_ERR_OR_NULL(mlx5_debugfs_root))
95 mlx5_debugfs_root = NULL;
96}
97
98void mlx5_unregister_debugfs(void)
99{
100 debugfs_remove(mlx5_debugfs_root);
101}
102
103int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
104{
105 if (!mlx5_debugfs_root)
106 return 0;
107
108 atomic_set(&dev->num_qps, 0);
109
110 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
111 if (!dev->priv.qp_debugfs)
112 return -ENOMEM;
113
114 return 0;
115}
116
117void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
118{
119 if (!mlx5_debugfs_root)
120 return;
121
122 debugfs_remove_recursive(dev->priv.qp_debugfs);
123}
124
125int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
126{
127 if (!mlx5_debugfs_root)
128 return 0;
129
130 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
131 if (!dev->priv.eq_debugfs)
132 return -ENOMEM;
133
134 return 0;
135}
136
137void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
138{
139 if (!mlx5_debugfs_root)
140 return;
141
142 debugfs_remove_recursive(dev->priv.eq_debugfs);
143}
144
145static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
146 loff_t *pos)
147{
148 struct mlx5_cmd_stats *stats;
149 u64 field = 0;
150 int ret;
151 int err;
152 char tbuf[22];
153
154 if (*pos)
155 return 0;
156
157 stats = filp->private_data;
158 spin_lock(&stats->lock);
159 if (stats->n)
160 field = stats->sum / stats->n;
161 spin_unlock(&stats->lock);
162 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
163 if (ret > 0) {
164 err = copy_to_user(buf, tbuf, ret);
165 if (err)
166 return err;
167 }
168
169 *pos += ret;
170 return ret;
171}
172
173
174static ssize_t average_write(struct file *filp, const char __user *buf,
175 size_t count, loff_t *pos)
176{
177 struct mlx5_cmd_stats *stats;
178
179 stats = filp->private_data;
180 spin_lock(&stats->lock);
181 stats->sum = 0;
182 stats->n = 0;
183 spin_unlock(&stats->lock);
184
185 *pos += count;
186
187 return count;
188}
189
190static const struct file_operations stats_fops = {
191 .owner = THIS_MODULE,
192 .open = simple_open,
193 .read = average_read,
194 .write = average_write,
195};
196
197int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
198{
199 struct mlx5_cmd_stats *stats;
200 struct dentry **cmd;
201 const char *namep;
202 int err;
203 int i;
204
205 if (!mlx5_debugfs_root)
206 return 0;
207
208 cmd = &dev->priv.cmdif_debugfs;
209 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
210 if (!*cmd)
211 return -ENOMEM;
212
213 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
214 stats = &dev->cmd.stats[i];
215 namep = mlx5_command_str(i);
216 if (strcmp(namep, "unknown command opcode")) {
217 stats->root = debugfs_create_dir(namep, *cmd);
218 if (!stats->root) {
219 mlx5_core_warn(dev, "failed adding command %d\n",
220 i);
221 err = -ENOMEM;
222 goto out;
223 }
224
225 stats->avg = debugfs_create_file("average", 0400,
226 stats->root, stats,
227 &stats_fops);
228 if (!stats->avg) {
229 mlx5_core_warn(dev, "failed creating debugfs file\n");
230 err = -ENOMEM;
231 goto out;
232 }
233
234 stats->count = debugfs_create_u64("n", 0400,
235 stats->root,
236 &stats->n);
237 if (!stats->count) {
238 mlx5_core_warn(dev, "failed creating debugfs file\n");
239 err = -ENOMEM;
240 goto out;
241 }
242 }
243 }
244
245 return 0;
246out:
247 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
248 return err;
249}
250
251void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
252{
253 if (!mlx5_debugfs_root)
254 return;
255
256 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
257}
258
259int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
260{
261 if (!mlx5_debugfs_root)
262 return 0;
263
264 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
265 if (!dev->priv.cq_debugfs)
266 return -ENOMEM;
267
268 return 0;
269}
270
271void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
272{
273 if (!mlx5_debugfs_root)
274 return;
275
276 debugfs_remove_recursive(dev->priv.cq_debugfs);
277}
278
279static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
280 int index)
281{
282 struct mlx5_query_qp_mbox_out *out;
283 struct mlx5_qp_context *ctx;
284 u64 param = 0;
285 int err;
286 int no_sq;
287
288 out = kzalloc(sizeof(*out), GFP_KERNEL);
289 if (!out)
290 return param;
291
292 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
293 if (err) {
294 mlx5_core_warn(dev, "failed to query qp\n");
295 goto out;
296 }
297
298 ctx = &out->ctx;
299 switch (index) {
300 case QP_PID:
301 param = qp->pid;
302 break;
303 case QP_STATE:
304 param = be32_to_cpu(ctx->flags) >> 28;
305 break;
306 case QP_XPORT:
307 param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
308 break;
309 case QP_MTU:
310 param = ctx->mtu_msgmax >> 5;
311 break;
312 case QP_N_RECV:
313 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
314 break;
315 case QP_RECV_SZ:
316 param = 1 << ((ctx->rq_size_stride & 7) + 4);
317 break;
318 case QP_N_SEND:
319 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
320 if (!no_sq)
321 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
322 else
323 param = 0;
324 break;
325 case QP_LOG_PG_SZ:
326 param = ((cpu_to_be32(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f);
327 param += 12;
328 break;
329 case QP_RQPN:
330 param = cpu_to_be32(ctx->log_pg_sz_remote_qpn) & 0xffffff;
331 break;
332 }
333
334out:
335 kfree(out);
336 return param;
337}
338
339static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
340 int index)
341{
342 struct mlx5_query_eq_mbox_out *out;
343 struct mlx5_eq_context *ctx;
344 u64 param = 0;
345 int err;
346
347 out = kzalloc(sizeof(*out), GFP_KERNEL);
348 if (!out)
349 return param;
350
351 ctx = &out->ctx;
352
353 err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
354 if (err) {
355 mlx5_core_warn(dev, "failed to query eq\n");
356 goto out;
357 }
358
359 switch (index) {
360 case EQ_NUM_EQES:
361 param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
362 break;
363 case EQ_INTR:
364 param = ctx->intr;
365 break;
366 case EQ_LOG_PG_SZ:
367 param = (ctx->log_page_size & 0x1f) + 12;
368 break;
369 }
370
371out:
372 kfree(out);
373 return param;
374}
375
376static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
377 int index)
378{
379 struct mlx5_query_cq_mbox_out *out;
380 struct mlx5_cq_context *ctx;
381 u64 param = 0;
382 int err;
383
384 out = kzalloc(sizeof(*out), GFP_KERNEL);
385 if (!out)
386 return param;
387
388 ctx = &out->ctx;
389
390 err = mlx5_core_query_cq(dev, cq, out);
391 if (err) {
392 mlx5_core_warn(dev, "failed to query cq\n");
393 goto out;
394 }
395
396 switch (index) {
397 case CQ_PID:
398 param = cq->pid;
399 break;
400 case CQ_NUM_CQES:
401 param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
402 break;
403 case CQ_LOG_PG_SZ:
404 param = (ctx->log_pg_sz & 0x1f) + 12;
405 break;
406 }
407
408out:
409 kfree(out);
410 return param;
411}
412
413static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
414 loff_t *pos)
415{
416 struct mlx5_field_desc *desc;
417 struct mlx5_rsc_debug *d;
418 char tbuf[18];
419 u64 field;
420 int ret;
421 int err;
422
423 if (*pos)
424 return 0;
425
426 desc = filp->private_data;
427 d = (void *)(desc - desc->i) - sizeof(*d);
428 switch (d->type) {
429 case MLX5_DBG_RSC_QP:
430 field = qp_read_field(d->dev, d->object, desc->i);
431 break;
432
433 case MLX5_DBG_RSC_EQ:
434 field = eq_read_field(d->dev, d->object, desc->i);
435 break;
436
437 case MLX5_DBG_RSC_CQ:
438 field = cq_read_field(d->dev, d->object, desc->i);
439 break;
440
441 default:
442 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
443 return -EINVAL;
444 }
445
446 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
447 if (ret > 0) {
448 err = copy_to_user(buf, tbuf, ret);
449 if (err)
450 return err;
451 }
452
453 *pos += ret;
454 return ret;
455}
456
457static const struct file_operations fops = {
458 .owner = THIS_MODULE,
459 .open = simple_open,
460 .read = dbg_read,
461};
462
463static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
464 struct dentry *root, struct mlx5_rsc_debug **dbg,
465 int rsn, char **field, int nfile, void *data)
466{
467 struct mlx5_rsc_debug *d;
468 char resn[32];
469 int err;
470 int i;
471
472 d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
473 if (!d)
474 return -ENOMEM;
475
476 d->dev = dev;
477 d->object = data;
478 d->type = type;
479 sprintf(resn, "0x%x", rsn);
480 d->root = debugfs_create_dir(resn, root);
481 if (!d->root) {
482 err = -ENOMEM;
483 goto out_free;
484 }
485
486 for (i = 0; i < nfile; i++) {
487 d->fields[i].i = i;
488 d->fields[i].dent = debugfs_create_file(field[i], 0400,
489 d->root, &d->fields[i],
490 &fops);
491 if (!d->fields[i].dent) {
492 err = -ENOMEM;
493 goto out_rem;
494 }
495 }
496 *dbg = d;
497
498 return 0;
499out_rem:
500 debugfs_remove_recursive(d->root);
501
502out_free:
503 kfree(d);
504 return err;
505}
506
507static void rem_res_tree(struct mlx5_rsc_debug *d)
508{
509 debugfs_remove_recursive(d->root);
510 kfree(d);
511}
512
513int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
514{
515 int err;
516
517 if (!mlx5_debugfs_root)
518 return 0;
519
520 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
521 &qp->dbg, qp->qpn, qp_fields,
522 ARRAY_SIZE(qp_fields), qp);
523 if (err)
524 qp->dbg = NULL;
525
526 return err;
527}
528
529void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
530{
531 if (!mlx5_debugfs_root)
532 return;
533
534 if (qp->dbg)
535 rem_res_tree(qp->dbg);
536}
537
538
539int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
540{
541 int err;
542
543 if (!mlx5_debugfs_root)
544 return 0;
545
546 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
547 &eq->dbg, eq->eqn, eq_fields,
548 ARRAY_SIZE(eq_fields), eq);
549 if (err)
550 eq->dbg = NULL;
551
552 return err;
553}
554
555void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
556{
557 if (!mlx5_debugfs_root)
558 return;
559
560 if (eq->dbg)
561 rem_res_tree(eq->dbg);
562}
563
564int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
565{
566 int err;
567
568 if (!mlx5_debugfs_root)
569 return 0;
570
571 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
572 &cq->dbg, cq->cqn, cq_fields,
573 ARRAY_SIZE(cq_fields), cq);
574 if (err)
575 cq->dbg = NULL;
576
577 return err;
578}
579
580void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
581{
582 if (!mlx5_debugfs_root)
583 return;
584
585 if (cq->dbg)
586 rem_res_tree(cq->dbg);
587}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
new file mode 100644
index 000000000000..c02cbcfd0fb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -0,0 +1,521 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/interrupt.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39enum {
40 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
41 MLX5_EQE_OWNER_INIT_VAL = 0x1,
42};
43
44enum {
45 MLX5_EQ_STATE_ARMED = 0x9,
46 MLX5_EQ_STATE_FIRED = 0xa,
47 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
48};
49
50enum {
51 MLX5_NUM_SPARE_EQE = 0x80,
52 MLX5_NUM_ASYNC_EQE = 0x100,
53 MLX5_NUM_CMD_EQE = 32,
54};
55
56enum {
57 MLX5_EQ_DOORBEL_OFFSET = 0x40,
58};
59
60#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
61 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
62 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
63 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
64 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
65 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
66 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
68 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
69 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
71 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
72
73struct map_eq_in {
74 u64 mask;
75 u32 reserved;
76 u32 unmap_eqn;
77};
78
79struct cre_des_eq {
80 u8 reserved[15];
81 u8 eqn;
82};
83
84static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
85{
86 struct mlx5_destroy_eq_mbox_in in;
87 struct mlx5_destroy_eq_mbox_out out;
88 int err;
89
90 memset(&in, 0, sizeof(in));
91 memset(&out, 0, sizeof(out));
92 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
93 in.eqn = eqn;
94 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
95 if (!err)
96 goto ex;
97
98 if (out.hdr.status)
99 err = mlx5_cmd_status_to_err(&out.hdr);
100
101ex:
102 return err;
103}
104
105static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
106{
107 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
108}
109
110static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
111{
112 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
113
114 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
115}
116
117static const char *eqe_type_str(u8 type)
118{
119 switch (type) {
120 case MLX5_EVENT_TYPE_COMP:
121 return "MLX5_EVENT_TYPE_COMP";
122 case MLX5_EVENT_TYPE_PATH_MIG:
123 return "MLX5_EVENT_TYPE_PATH_MIG";
124 case MLX5_EVENT_TYPE_COMM_EST:
125 return "MLX5_EVENT_TYPE_COMM_EST";
126 case MLX5_EVENT_TYPE_SQ_DRAINED:
127 return "MLX5_EVENT_TYPE_SQ_DRAINED";
128 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
129 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
130 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
131 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
132 case MLX5_EVENT_TYPE_CQ_ERROR:
133 return "MLX5_EVENT_TYPE_CQ_ERROR";
134 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
135 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
136 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
137 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
138 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
139 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
140 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
141 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
142 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
143 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
144 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
145 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
146 case MLX5_EVENT_TYPE_PORT_CHANGE:
147 return "MLX5_EVENT_TYPE_PORT_CHANGE";
148 case MLX5_EVENT_TYPE_GPIO_EVENT:
149 return "MLX5_EVENT_TYPE_GPIO_EVENT";
150 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
151 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
153 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154 case MLX5_EVENT_TYPE_STALL_EVENT:
155 return "MLX5_EVENT_TYPE_STALL_EVENT";
156 case MLX5_EVENT_TYPE_CMD:
157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 default:
161 return "Unrecognized event";
162 }
163}
164
165static enum mlx5_dev_event port_subtype_event(u8 subtype)
166{
167 switch (subtype) {
168 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
169 return MLX5_DEV_EVENT_PORT_DOWN;
170 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
171 return MLX5_DEV_EVENT_PORT_UP;
172 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
173 return MLX5_DEV_EVENT_PORT_INITIALIZED;
174 case MLX5_PORT_CHANGE_SUBTYPE_LID:
175 return MLX5_DEV_EVENT_LID_CHANGE;
176 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
177 return MLX5_DEV_EVENT_PKEY_CHANGE;
178 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
179 return MLX5_DEV_EVENT_GUID_CHANGE;
180 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
181 return MLX5_DEV_EVENT_CLIENT_REREG;
182 }
183 return -1;
184}
185
186static void eq_update_ci(struct mlx5_eq *eq, int arm)
187{
188 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
189 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
190 __raw_writel((__force u32) cpu_to_be32(val), addr);
191 /* We still want ordering, just not swabbing, so add a barrier */
192 mb();
193}
194
195static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
196{
197 struct mlx5_eqe *eqe;
198 int eqes_found = 0;
199 int set_ci = 0;
200 u32 cqn;
201 u32 srqn;
202 u8 port;
203
204 while ((eqe = next_eqe_sw(eq))) {
205 /*
206 * Make sure we read EQ entry contents after we've
207 * checked the ownership bit.
208 */
209 rmb();
210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
215 mlx5_cq_completion(dev, cqn);
216 break;
217
218 case MLX5_EVENT_TYPE_PATH_MIG:
219 case MLX5_EVENT_TYPE_COMM_EST:
220 case MLX5_EVENT_TYPE_SQ_DRAINED:
221 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
222 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
223 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
224 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
225 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
226 mlx5_core_dbg(dev, "event %s(%d) arrived\n",
227 eqe_type_str(eqe->type), eqe->type);
228 mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff,
229 eqe->type);
230 break;
231
232 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
235 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
236 eqe_type_str(eqe->type), eqe->type, srqn);
237 mlx5_srq_event(dev, srqn, eqe->type);
238 break;
239
240 case MLX5_EVENT_TYPE_CMD:
241 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
242 break;
243
244 case MLX5_EVENT_TYPE_PORT_CHANGE:
245 port = (eqe->data.port.port >> 4) & 0xf;
246 switch (eqe->sub_type) {
247 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
248 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
249 case MLX5_PORT_CHANGE_SUBTYPE_LID:
250 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
251 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
252 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
253 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
254 dev->event(dev, port_subtype_event(eqe->sub_type), &port);
255 break;
256 default:
257 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
258 port, eqe->sub_type);
259 }
260 break;
261 case MLX5_EVENT_TYPE_CQ_ERROR:
262 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
263 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
264 cqn, eqe->data.cq_err.syndrome);
265 mlx5_cq_event(dev, cqn, eqe->type);
266 break;
267
268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages);
275 }
276 break;
277
278
279 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
281 break;
282 }
283
284 ++eq->cons_index;
285 eqes_found = 1;
286 ++set_ci;
287
288 /* The HCA will think the queue has overflowed if we
289 * don't tell it we've been processing events. We
290 * create our EQs with MLX5_NUM_SPARE_EQE extra
291 * entries, so we must update our consumer index at
292 * least that often.
293 */
294 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
295 eq_update_ci(eq, 0);
296 set_ci = 0;
297 }
298 }
299
300 eq_update_ci(eq, 1);
301
302 return eqes_found;
303}
304
305static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
306{
307 struct mlx5_eq *eq = eq_ptr;
308 struct mlx5_core_dev *dev = eq->dev;
309
310 mlx5_eq_int(dev, eq);
311
312 /* MSI-X vectors always belong to us */
313 return IRQ_HANDLED;
314}
315
316static void init_eq_buf(struct mlx5_eq *eq)
317{
318 struct mlx5_eqe *eqe;
319 int i;
320
321 for (i = 0; i < eq->nent; i++) {
322 eqe = get_eqe(eq, i);
323 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
324 }
325}
326
327int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
328 int nent, u64 mask, const char *name, struct mlx5_uar *uar)
329{
330 struct mlx5_eq_table *table = &dev->priv.eq_table;
331 struct mlx5_create_eq_mbox_in *in;
332 struct mlx5_create_eq_mbox_out out;
333 int err;
334 int inlen;
335
336 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
337 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
338 &eq->buf);
339 if (err)
340 return err;
341
342 init_eq_buf(eq);
343
344 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
345 in = mlx5_vzalloc(inlen);
346 if (!in) {
347 err = -ENOMEM;
348 goto err_buf;
349 }
350 memset(&out, 0, sizeof(out));
351
352 mlx5_fill_page_array(&eq->buf, in->pas);
353
354 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
355 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
356 in->ctx.intr = vecidx;
357 in->ctx.log_page_size = PAGE_SHIFT - 12;
358 in->events_mask = cpu_to_be64(mask);
359
360 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
361 if (err)
362 goto err_in;
363
364 if (out.hdr.status) {
365 err = mlx5_cmd_status_to_err(&out.hdr);
366 goto err_in;
367 }
368
369 eq->eqn = out.eq_number;
370 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
371 name, eq);
372 if (err)
373 goto err_eq;
374
375 eq->irqn = vecidx;
376 eq->dev = dev;
377 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
378
379 err = mlx5_debug_eq_add(dev, eq);
380 if (err)
381 goto err_irq;
382
383 /* EQs are created in ARMED state
384 */
385 eq_update_ci(eq, 1);
386
387 mlx5_vfree(in);
388 return 0;
389
390err_irq:
391 free_irq(table->msix_arr[vecidx].vector, eq);
392
393err_eq:
394 mlx5_cmd_destroy_eq(dev, eq->eqn);
395
396err_in:
397 mlx5_vfree(in);
398
399err_buf:
400 mlx5_buf_free(dev, &eq->buf);
401 return err;
402}
403EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
404
405int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
406{
407 struct mlx5_eq_table *table = &dev->priv.eq_table;
408 int err;
409
410 mlx5_debug_eq_remove(dev, eq);
411 free_irq(table->msix_arr[eq->irqn].vector, eq);
412 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
413 if (err)
414 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
415 eq->eqn);
416 mlx5_buf_free(dev, &eq->buf);
417
418 return err;
419}
420EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
421
422int mlx5_eq_init(struct mlx5_core_dev *dev)
423{
424 int err;
425
426 spin_lock_init(&dev->priv.eq_table.lock);
427
428 err = mlx5_eq_debugfs_init(dev);
429
430 return err;
431}
432
433
434void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
435{
436 mlx5_eq_debugfs_cleanup(dev);
437}
438
439int mlx5_start_eqs(struct mlx5_core_dev *dev)
440{
441 struct mlx5_eq_table *table = &dev->priv.eq_table;
442 int err;
443
444 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
445 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
446 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
447 if (err) {
448 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
449 return err;
450 }
451
452 mlx5_cmd_use_events(dev);
453
454 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
455 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
456 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
457 if (err) {
458 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
459 goto err1;
460 }
461
462 err = mlx5_create_map_eq(dev, &table->pages_eq,
463 MLX5_EQ_VEC_PAGES,
464 dev->caps.max_vf + 1,
465 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
466 &dev->priv.uuari.uars[0]);
467 if (err) {
468 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
469 goto err2;
470 }
471
472 return err;
473
474err2:
475 mlx5_destroy_unmap_eq(dev, &table->async_eq);
476
477err1:
478 mlx5_cmd_use_polling(dev);
479 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
480 return err;
481}
482
483int mlx5_stop_eqs(struct mlx5_core_dev *dev)
484{
485 struct mlx5_eq_table *table = &dev->priv.eq_table;
486 int err;
487
488 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
489 if (err)
490 return err;
491
492 mlx5_destroy_unmap_eq(dev, &table->async_eq);
493 mlx5_cmd_use_polling(dev);
494
495 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
496 if (err)
497 mlx5_cmd_use_events(dev);
498
499 return err;
500}
501
502int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
503 struct mlx5_query_eq_mbox_out *out, int outlen)
504{
505 struct mlx5_query_eq_mbox_in in;
506 int err;
507
508 memset(&in, 0, sizeof(in));
509 memset(out, 0, outlen);
510 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
511 in.eqn = eq->eqn;
512 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
513 if (err)
514 return err;
515
516 if (out->hdr.status)
517 err = mlx5_cmd_status_to_err(&out->hdr);
518
519 return err;
520}
521EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
new file mode 100644
index 000000000000..72a5222447f5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/cmd.h>
35#include <linux/module.h>
36#include "mlx5_core.h"
37
38int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
39{
40 struct mlx5_cmd_query_adapter_mbox_out *out;
41 struct mlx5_cmd_query_adapter_mbox_in in;
42 int err;
43
44 out = kzalloc(sizeof(*out), GFP_KERNEL);
45 if (!out)
46 return -ENOMEM;
47
48 memset(&in, 0, sizeof(in));
49 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
50 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
51 if (err)
52 goto out_out;
53
54 if (out->hdr.status) {
55 err = mlx5_cmd_status_to_err(&out->hdr);
56 goto out_out;
57 }
58
59 memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
60
61out_out:
62 kfree(out);
63
64 return err;
65}
66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
68 struct mlx5_caps *caps)
69{
70 struct mlx5_cmd_query_hca_cap_mbox_out *out;
71 struct mlx5_cmd_query_hca_cap_mbox_in in;
72 struct mlx5_query_special_ctxs_mbox_out ctx_out;
73 struct mlx5_query_special_ctxs_mbox_in ctx_in;
74 int err;
75 u16 t16;
76
77 out = kzalloc(sizeof(*out), GFP_KERNEL);
78 if (!out)
79 return -ENOMEM;
80
81 memset(&in, 0, sizeof(in));
82 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
83 in.hdr.opmod = cpu_to_be16(0x1);
84 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
85 if (err)
86 goto out_out;
87
88 if (out->hdr.status) {
89 err = mlx5_cmd_status_to_err(&out->hdr);
90 goto out_out;
91 }
92
93
94 caps->log_max_eq = out->hca_cap.log_max_eq & 0xf;
95 caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
96 caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
97 caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
98 caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
99 caps->flags = be64_to_cpu(out->hca_cap.flags);
100 caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
101 caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
102 caps->num_ports = out->hca_cap.num_ports & 0xf;
103 caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
104 if (caps->num_ports > MLX5_MAX_PORTS) {
105 mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n",
106 caps->num_ports, MLX5_MAX_PORTS);
107 err = -EINVAL;
108 goto out_out;
109 }
110 caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
111 caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
112 caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
120 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
121 if (t16 & 0x8000) {
122 caps->bf_reg_size = 1 << (t16 & 0x1f);
123 caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
124 } else {
125 caps->bf_reg_size = 0;
126 caps->bf_regs_per_page = 0;
127 }
128 caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1);
129
130 memset(&ctx_in, 0, sizeof(ctx_in));
131 memset(&ctx_out, 0, sizeof(ctx_out));
132 ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
133 err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in),
134 &ctx_out, sizeof(ctx_out));
135 if (err)
136 goto out_out;
137
138 if (ctx_out.hdr.status)
139 err = mlx5_cmd_status_to_err(&ctx_out.hdr);
140
141 caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey);
142
143out_out:
144 kfree(out);
145
146 return err;
147}
148
149int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
150{
151 struct mlx5_cmd_init_hca_mbox_in in;
152 struct mlx5_cmd_init_hca_mbox_out out;
153 int err;
154
155 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err)
160 return err;
161
162 if (out.hdr.status)
163 err = mlx5_cmd_status_to_err(&out.hdr);
164
165 return err;
166}
167
168int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
169{
170 struct mlx5_cmd_teardown_hca_mbox_in in;
171 struct mlx5_cmd_teardown_hca_mbox_out out;
172 int err;
173
174 memset(&in, 0, sizeof(in));
175 memset(&out, 0, sizeof(out));
176 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
177 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
178 if (err)
179 return err;
180
181 if (out.hdr.status)
182 err = mlx5_cmd_status_to_err(&out.hdr);
183
184 return err;
185}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
new file mode 100644
index 000000000000..ea4b9bca6d4a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/random.h>
36#include <linux/vmalloc.h>
37#include <linux/mlx5/driver.h>
38#include <linux/mlx5/cmd.h>
39#include "mlx5_core.h"
40
41enum {
42 MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
43 MAX_MISSES = 3,
44};
45
46enum {
47 MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
48 MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
49 MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
50 MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
51 MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
52 MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
53 MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
54 MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
55};
56
57static DEFINE_SPINLOCK(health_lock);
58
59static LIST_HEAD(health_list);
60static struct work_struct health_work;
61
62static health_handler_t reg_handler;
63int mlx5_register_health_report_handler(health_handler_t handler)
64{
65 spin_lock_irq(&health_lock);
66 if (reg_handler) {
67 spin_unlock_irq(&health_lock);
68 return -EEXIST;
69 }
70 reg_handler = handler;
71 spin_unlock_irq(&health_lock);
72
73 return 0;
74}
75EXPORT_SYMBOL(mlx5_register_health_report_handler);
76
77void mlx5_unregister_health_report_handler(void)
78{
79 spin_lock_irq(&health_lock);
80 reg_handler = NULL;
81 spin_unlock_irq(&health_lock);
82}
83EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
84
85static void health_care(struct work_struct *work)
86{
87 struct mlx5_core_health *health, *n;
88 struct mlx5_core_dev *dev;
89 struct mlx5_priv *priv;
90 LIST_HEAD(tlist);
91
92 spin_lock_irq(&health_lock);
93 list_splice_init(&health_list, &tlist);
94
95 spin_unlock_irq(&health_lock);
96
97 list_for_each_entry_safe(health, n, &tlist, list) {
98 priv = container_of(health, struct mlx5_priv, health);
99 dev = container_of(priv, struct mlx5_core_dev, priv);
100 mlx5_core_warn(dev, "handling bad device here\n");
101 spin_lock_irq(&health_lock);
102 if (reg_handler)
103 reg_handler(dev->pdev, health->health,
104 sizeof(health->health));
105
106 list_del_init(&health->list);
107 spin_unlock_irq(&health_lock);
108 }
109}
110
111static const char *hsynd_str(u8 synd)
112{
113 switch (synd) {
114 case MLX5_HEALTH_SYNDR_FW_ERR:
115 return "firmware internal error";
116 case MLX5_HEALTH_SYNDR_IRISC_ERR:
117 return "irisc not responding";
118 case MLX5_HEALTH_SYNDR_CRC_ERR:
119 return "firmware CRC error";
120 case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
121 return "ICM fetch PCI error";
122 case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
123 return "HW fatal error\n";
124 case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
125 return "async EQ buffer overrun";
126 case MLX5_HEALTH_SYNDR_EQ_ERR:
127 return "EQ error";
128 case MLX5_HEALTH_SYNDR_FFSER_ERR:
129 return "FFSER error";
130 default:
131 return "unrecognized error";
132 }
133}
134
135static void print_health_info(struct mlx5_core_dev *dev)
136{
137 struct mlx5_core_health *health = &dev->priv.health;
138 struct health_buffer __iomem *h = health->health;
139 int i;
140
141 for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
142 pr_info("assert_var[%d] 0x%08x\n", i, be32_to_cpu(h->assert_var[i]));
143
144 pr_info("assert_exit_ptr 0x%08x\n", be32_to_cpu(h->assert_exit_ptr));
145 pr_info("assert_callra 0x%08x\n", be32_to_cpu(h->assert_callra));
146 pr_info("fw_ver 0x%08x\n", be32_to_cpu(h->fw_ver));
147 pr_info("hw_id 0x%08x\n", be32_to_cpu(h->hw_id));
148 pr_info("irisc_index %d\n", h->irisc_index);
149 pr_info("synd 0x%x: %s\n", h->synd, hsynd_str(h->synd));
150 pr_info("ext_sync 0x%04x\n", be16_to_cpu(h->ext_sync));
151}
152
153static void poll_health(unsigned long data)
154{
155 struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
156 struct mlx5_core_health *health = &dev->priv.health;
157 unsigned long next;
158 u32 count;
159
160 count = ioread32be(health->health_counter);
161 if (count == health->prev)
162 ++health->miss_counter;
163 else
164 health->miss_counter = 0;
165
166 health->prev = count;
167 if (health->miss_counter == MAX_MISSES) {
168 mlx5_core_err(dev, "device's health compromised\n");
169 print_health_info(dev);
170 spin_lock_irq(&health_lock);
171 list_add_tail(&health->list, &health_list);
172 spin_unlock_irq(&health_lock);
173
174 queue_work(mlx5_core_wq, &health_work);
175 } else {
176 get_random_bytes(&next, sizeof(next));
177 next %= HZ;
178 next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
179 mod_timer(&health->timer, next);
180 }
181}
182
183void mlx5_start_health_poll(struct mlx5_core_dev *dev)
184{
185 struct mlx5_core_health *health = &dev->priv.health;
186
187 INIT_LIST_HEAD(&health->list);
188 init_timer(&health->timer);
189 health->health = &dev->iseg->health;
190 health->health_counter = &dev->iseg->health_counter;
191
192 health->timer.data = (unsigned long)dev;
193 health->timer.function = poll_health;
194 health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
195 add_timer(&health->timer);
196}
197
198void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
199{
200 struct mlx5_core_health *health = &dev->priv.health;
201
202 del_timer_sync(&health->timer);
203
204 spin_lock_irq(&health_lock);
205 if (!list_empty(&health->list))
206 list_del_init(&health->list);
207 spin_unlock_irq(&health_lock);
208}
209
210void mlx5_health_cleanup(void)
211{
212}
213
214void __init mlx5_health_init(void)
215{
216 INIT_WORK(&health_work, health_care);
217}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
new file mode 100644
index 000000000000..18d6fd5dd90b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
40 u16 opmod, int port)
41{
42 struct mlx5_mad_ifc_mbox_in *in = NULL;
43 struct mlx5_mad_ifc_mbox_out *out = NULL;
44 int err;
45
46 in = kzalloc(sizeof(*in), GFP_KERNEL);
47 if (!in)
48 return -ENOMEM;
49
50 out = kzalloc(sizeof(*out), GFP_KERNEL);
51 if (!out) {
52 err = -ENOMEM;
53 goto out;
54 }
55
56 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
57 in->hdr.opmod = cpu_to_be16(opmod);
58 in->port = port;
59
60 memcpy(in->data, inb, sizeof(in->data));
61
62 err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
63 if (err)
64 goto out;
65
66 if (out->hdr.status) {
67 err = mlx5_cmd_status_to_err(&out->hdr);
68 goto out;
69 }
70
71 memcpy(outb, out->data, sizeof(out->data));
72
73out:
74 kfree(out);
75 kfree(in);
76 return err;
77}
78EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
new file mode 100644
index 000000000000..f21cc397d1bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -0,0 +1,475 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/io-mapping.h>
41#include <linux/mlx5/driver.h>
42#include <linux/mlx5/cq.h>
43#include <linux/mlx5/qp.h>
44#include <linux/mlx5/srq.h>
45#include <linux/debugfs.h>
46#include "mlx5_core.h"
47
48#define DRIVER_NAME "mlx5_core"
49#define DRIVER_VERSION "1.0"
50#define DRIVER_RELDATE "June 2013"
51
52MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
54MODULE_LICENSE("Dual BSD/GPL");
55MODULE_VERSION(DRIVER_VERSION);
56
57int mlx5_core_debug_mask;
58module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
59MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
60
61struct workqueue_struct *mlx5_core_wq;
62
63static int set_dma_caps(struct pci_dev *pdev)
64{
65 int err;
66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
73 return err;
74 }
75 }
76
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) {
79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) {
83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n");
85 return err;
86 }
87 }
88
89 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
90 return err;
91}
92
93static int request_bar(struct pci_dev *pdev)
94{
95 int err = 0;
96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
99 return -ENODEV;
100 }
101
102 err = pci_request_regions(pdev, DRIVER_NAME);
103 if (err)
104 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
105
106 return err;
107}
108
109static void release_bar(struct pci_dev *pdev)
110{
111 pci_release_regions(pdev);
112}
113
114static int mlx5_enable_msix(struct mlx5_core_dev *dev)
115{
116 struct mlx5_eq_table *table = &dev->priv.eq_table;
117 int num_eqs = 1 << dev->caps.log_max_eq;
118 int nvec;
119 int err;
120 int i;
121
122 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
123 nvec = min_t(int, nvec, num_eqs);
124 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
125 return -ENOMEM;
126
127 table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
128 if (!table->msix_arr)
129 return -ENOMEM;
130
131 for (i = 0; i < nvec; i++)
132 table->msix_arr[i].entry = i;
133
134retry:
135 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
136 err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
137 if (err <= 0) {
138 return err;
139 } else if (err > 2) {
140 nvec = err;
141 goto retry;
142 }
143
144 mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
145
146 return 0;
147}
148
149static void mlx5_disable_msix(struct mlx5_core_dev *dev)
150{
151 struct mlx5_eq_table *table = &dev->priv.eq_table;
152
153 pci_disable_msix(dev->pdev);
154 kfree(table->msix_arr);
155}
156
157struct mlx5_reg_host_endianess {
158 u8 he;
159 u8 rsvd[15];
160};
161
162static int handle_hca_cap(struct mlx5_core_dev *dev)
163{
164 struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
167 struct mlx5_cmd_set_hca_cap_mbox_out set_out;
168 struct mlx5_profile *prof = dev->profile;
169 u64 flags;
170 int csum = 1;
171 int err;
172
173 memset(&query_ctx, 0, sizeof(query_ctx));
174 query_out = kzalloc(sizeof(*query_out), GFP_KERNEL);
175 if (!query_out)
176 return -ENOMEM;
177
178 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
179 if (!set_ctx) {
180 err = -ENOMEM;
181 goto query_ex;
182 }
183
184 query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
185 query_ctx.hdr.opmod = cpu_to_be16(0x1);
186 err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
187 query_out, sizeof(*query_out));
188 if (err)
189 goto query_ex;
190
191 err = mlx5_cmd_status_to_err(&query_out->hdr);
192 if (err) {
193 mlx5_core_warn(dev, "query hca cap failed, %d\n", err);
194 goto query_ex;
195 }
196
197 memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
198 sizeof(set_ctx->hca_cap));
199
200 if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
201 csum = !!prof->cmdif_csum;
202 flags = be64_to_cpu(set_ctx->hca_cap.flags);
203 if (csum)
204 flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
205 else
206 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
207
208 set_ctx->hca_cap.flags = cpu_to_be64(flags);
209 }
210
211 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
212 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
213
214 memset(&set_out, 0, sizeof(set_out));
215 set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
216 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
217 err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
218 &set_out, sizeof(set_out));
219 if (err) {
220 mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
221 goto query_ex;
222 }
223
224 err = mlx5_cmd_status_to_err(&set_out.hdr);
225 if (err)
226 goto query_ex;
227
228 if (!csum)
229 dev->cmd.checksum_disabled = 1;
230
231query_ex:
232 kfree(query_out);
233 kfree(set_ctx);
234
235 return err;
236}
237
238static int set_hca_ctrl(struct mlx5_core_dev *dev)
239{
240 struct mlx5_reg_host_endianess he_in;
241 struct mlx5_reg_host_endianess he_out;
242 int err;
243
244 memset(&he_in, 0, sizeof(he_in));
245 he_in.he = MLX5_SET_HOST_ENDIANNESS;
246 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
247 &he_out, sizeof(he_out),
248 MLX5_REG_HOST_ENDIANNESS, 0, 1);
249 return err;
250}
251
252int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
253{
254 struct mlx5_priv *priv = &dev->priv;
255 int err;
256
257 dev->pdev = pdev;
258 pci_set_drvdata(dev->pdev, dev);
259 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
260 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
261
262 mutex_init(&priv->pgdir_mutex);
263 INIT_LIST_HEAD(&priv->pgdir_list);
264 spin_lock_init(&priv->mkey_lock);
265
266 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
267 if (!priv->dbg_root)
268 return -ENOMEM;
269
270 err = pci_enable_device(pdev);
271 if (err) {
272 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
273 goto err_dbg;
274 }
275
276 err = request_bar(pdev);
277 if (err) {
278 dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
279 goto err_disable;
280 }
281
282 pci_set_master(pdev);
283
284 err = set_dma_caps(pdev);
285 if (err) {
286 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
287 goto err_clr_master;
288 }
289
290 dev->iseg_base = pci_resource_start(dev->pdev, 0);
291 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
292 if (!dev->iseg) {
293 err = -ENOMEM;
294 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
295 goto err_clr_master;
296 }
297 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
298 fw_rev_min(dev), fw_rev_sub(dev));
299
300 err = mlx5_cmd_init(dev);
301 if (err) {
302 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
303 goto err_unmap;
304 }
305
306 mlx5_pagealloc_init(dev);
307 err = set_hca_ctrl(dev);
308 if (err) {
309 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
310 goto err_pagealloc_cleanup;
311 }
312
313 err = handle_hca_cap(dev);
314 if (err) {
315 dev_err(&pdev->dev, "handle_hca_cap failed\n");
316 goto err_pagealloc_cleanup;
317 }
318
319 err = mlx5_satisfy_startup_pages(dev);
320 if (err) {
321 dev_err(&pdev->dev, "failed to allocate startup pages\n");
322 goto err_pagealloc_cleanup;
323 }
324
325 err = mlx5_pagealloc_start(dev);
326 if (err) {
327 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
328 goto err_reclaim_pages;
329 }
330
331 err = mlx5_cmd_init_hca(dev);
332 if (err) {
333 dev_err(&pdev->dev, "init hca failed\n");
334 goto err_pagealloc_stop;
335 }
336
337 mlx5_start_health_poll(dev);
338
339 err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
340 if (err) {
341 dev_err(&pdev->dev, "query hca failed\n");
342 goto err_stop_poll;
343 }
344
345 err = mlx5_cmd_query_adapter(dev);
346 if (err) {
347 dev_err(&pdev->dev, "query adapter failed\n");
348 goto err_stop_poll;
349 }
350
351 err = mlx5_enable_msix(dev);
352 if (err) {
353 dev_err(&pdev->dev, "enable msix failed\n");
354 goto err_stop_poll;
355 }
356
357 err = mlx5_eq_init(dev);
358 if (err) {
359 dev_err(&pdev->dev, "failed to initialize eq\n");
360 goto disable_msix;
361 }
362
363 err = mlx5_alloc_uuars(dev, &priv->uuari);
364 if (err) {
365 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
366 goto err_eq_cleanup;
367 }
368
369 err = mlx5_start_eqs(dev);
370 if (err) {
371 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
372 goto err_free_uar;
373 }
374
375 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
376
377 mlx5_init_cq_table(dev);
378 mlx5_init_qp_table(dev);
379 mlx5_init_srq_table(dev);
380
381 return 0;
382
383err_free_uar:
384 mlx5_free_uuars(dev, &priv->uuari);
385
386err_eq_cleanup:
387 mlx5_eq_cleanup(dev);
388
389disable_msix:
390 mlx5_disable_msix(dev);
391
392err_stop_poll:
393 mlx5_stop_health_poll(dev);
394 mlx5_cmd_teardown_hca(dev);
395
396err_pagealloc_stop:
397 mlx5_pagealloc_stop(dev);
398
399err_reclaim_pages:
400 mlx5_reclaim_startup_pages(dev);
401
402err_pagealloc_cleanup:
403 mlx5_pagealloc_cleanup(dev);
404 mlx5_cmd_cleanup(dev);
405
406err_unmap:
407 iounmap(dev->iseg);
408
409err_clr_master:
410 pci_clear_master(dev->pdev);
411 release_bar(dev->pdev);
412
413err_disable:
414 pci_disable_device(dev->pdev);
415
416err_dbg:
417 debugfs_remove(priv->dbg_root);
418 return err;
419}
420EXPORT_SYMBOL(mlx5_dev_init);
421
422void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
423{
424 struct mlx5_priv *priv = &dev->priv;
425
426 mlx5_cleanup_srq_table(dev);
427 mlx5_cleanup_qp_table(dev);
428 mlx5_cleanup_cq_table(dev);
429 mlx5_stop_eqs(dev);
430 mlx5_free_uuars(dev, &priv->uuari);
431 mlx5_eq_cleanup(dev);
432 mlx5_disable_msix(dev);
433 mlx5_stop_health_poll(dev);
434 mlx5_cmd_teardown_hca(dev);
435 mlx5_pagealloc_stop(dev);
436 mlx5_reclaim_startup_pages(dev);
437 mlx5_pagealloc_cleanup(dev);
438 mlx5_cmd_cleanup(dev);
439 iounmap(dev->iseg);
440 pci_clear_master(dev->pdev);
441 release_bar(dev->pdev);
442 pci_disable_device(dev->pdev);
443 debugfs_remove(priv->dbg_root);
444}
445EXPORT_SYMBOL(mlx5_dev_cleanup);
446
447static int __init init(void)
448{
449 int err;
450
451 mlx5_register_debugfs();
452 mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
453 if (!mlx5_core_wq) {
454 err = -ENOMEM;
455 goto err_debug;
456 }
457 mlx5_health_init();
458
459 return 0;
460
461 mlx5_health_cleanup();
462err_debug:
463 mlx5_unregister_debugfs();
464 return err;
465}
466
467static void __exit cleanup(void)
468{
469 mlx5_health_cleanup();
470 destroy_workqueue(mlx5_core_wq);
471 mlx5_unregister_debugfs();
472}
473
474module_init(init);
475module_exit(cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
new file mode 100644
index 000000000000..44837640bd7c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include <rdma/ib_verbs.h>
38#include "mlx5_core.h"
39
40struct mlx5_attach_mcg_mbox_in {
41 struct mlx5_inbox_hdr hdr;
42 __be32 qpn;
43 __be32 rsvd;
44 u8 gid[16];
45};
46
47struct mlx5_attach_mcg_mbox_out {
48 struct mlx5_outbox_hdr hdr;
49 u8 rsvf[8];
50};
51
52struct mlx5_detach_mcg_mbox_in {
53 struct mlx5_inbox_hdr hdr;
54 __be32 qpn;
55 __be32 rsvd;
56 u8 gid[16];
57};
58
59struct mlx5_detach_mcg_mbox_out {
60 struct mlx5_outbox_hdr hdr;
61 u8 rsvf[8];
62};
63
64int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
65{
66 struct mlx5_attach_mcg_mbox_in in;
67 struct mlx5_attach_mcg_mbox_out out;
68 int err;
69
70 memset(&in, 0, sizeof(in));
71 memset(&out, 0, sizeof(out));
72 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
73 memcpy(in.gid, mgid, sizeof(*mgid));
74 in.qpn = cpu_to_be32(qpn);
75 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
76 if (err)
77 return err;
78
79 if (out.hdr.status)
80 err = mlx5_cmd_status_to_err(&out.hdr);
81
82 return err;
83}
84EXPORT_SYMBOL(mlx5_core_attach_mcg);
85
86int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
87{
88 struct mlx5_detach_mcg_mbox_in in;
89 struct mlx5_detach_mcg_mbox_out out;
90 int err;
91
92 memset(&in, 0, sizeof(in));
93 memset(&out, 0, sizeof(out));
94 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
95 memcpy(in.gid, mgid, sizeof(*mgid));
96 in.qpn = cpu_to_be32(qpn);
97 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
98 if (err)
99 return err;
100
101 if (out.hdr.status)
102 err = mlx5_cmd_status_to_err(&out.hdr);
103
104 return err;
105}
106EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
new file mode 100644
index 000000000000..68b74e1ae1b0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39
40extern int mlx5_core_debug_mask;
41
42#define mlx5_core_dbg(dev, format, arg...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
44 current->pid, ##arg)
45
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
47do { \
48 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0)
52
53#define mlx5_core_err(dev, format, arg...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
55 current->pid, ##arg)
56
57#define mlx5_core_warn(dev, format, arg...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
59 current->pid, ##arg)
60
61enum {
62 MLX5_CMD_DATA, /* print command payload only */
63 MLX5_CMD_TIME, /* print command execution time */
64};
65
66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
68 struct mlx5_caps *caps);
69int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
70int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
71int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
72
73#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
new file mode 100644
index 000000000000..5b44e2e46daf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
40 struct mlx5_create_mkey_mbox_in *in, int inlen)
41{
42 struct mlx5_create_mkey_mbox_out out;
43 int err;
44 u8 key;
45
46 memset(&out, 0, sizeof(out));
47 spin_lock(&dev->priv.mkey_lock);
48 key = dev->priv.mkey_key++;
49 spin_unlock(&dev->priv.mkey_lock);
50 in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
51 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
52 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
53 if (err) {
54 mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
55 return err;
56 }
57
58 if (out.hdr.status) {
59 mlx5_core_dbg(dev, "status %d\n", out.hdr.status);
60 return mlx5_cmd_status_to_err(&out.hdr);
61 }
62
63 mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key;
64 mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key);
65
66 return err;
67}
68EXPORT_SYMBOL(mlx5_core_create_mkey);
69
70int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
71{
72 struct mlx5_destroy_mkey_mbox_in in;
73 struct mlx5_destroy_mkey_mbox_out out;
74 int err;
75
76 memset(&in, 0, sizeof(in));
77 memset(&out, 0, sizeof(out));
78
79 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
80 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
81 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
82 if (err)
83 return err;
84
85 if (out.hdr.status)
86 return mlx5_cmd_status_to_err(&out.hdr);
87
88 return err;
89}
90EXPORT_SYMBOL(mlx5_core_destroy_mkey);
91
92int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
93 struct mlx5_query_mkey_mbox_out *out, int outlen)
94{
95 struct mlx5_destroy_mkey_mbox_in in;
96 int err;
97
98 memset(&in, 0, sizeof(in));
99 memset(out, 0, outlen);
100
101 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
102 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
103 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
104 if (err)
105 return err;
106
107 if (out->hdr.status)
108 return mlx5_cmd_status_to_err(&out->hdr);
109
110 return err;
111}
112EXPORT_SYMBOL(mlx5_core_query_mkey);
113
114int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
115 u32 *mkey)
116{
117 struct mlx5_query_special_ctxs_mbox_in in;
118 struct mlx5_query_special_ctxs_mbox_out out;
119 int err;
120
121 memset(&in, 0, sizeof(in));
122 memset(&out, 0, sizeof(out));
123
124 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
125 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
126 if (err)
127 return err;
128
129 if (out.hdr.status)
130 return mlx5_cmd_status_to_err(&out.hdr);
131
132 *mkey = be32_to_cpu(out.dump_fill_mkey);
133
134 return err;
135}
136EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
new file mode 100644
index 000000000000..f0bf46339b28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -0,0 +1,435 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm-generic/kmap_types.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include "mlx5_core.h"
39
40enum {
41 MLX5_PAGES_CANT_GIVE = 0,
42 MLX5_PAGES_GIVE = 1,
43 MLX5_PAGES_TAKE = 2
44};
45
46struct mlx5_pages_req {
47 struct mlx5_core_dev *dev;
48 u32 func_id;
49 s16 npages;
50 struct work_struct work;
51};
52
53struct fw_page {
54 struct rb_node rb_node;
55 u64 addr;
56 struct page *page;
57 u16 func_id;
58};
59
60struct mlx5_query_pages_inbox {
61 struct mlx5_inbox_hdr hdr;
62 u8 rsvd[8];
63};
64
65struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr;
67 u8 reserved[2];
68 __be16 func_id;
69 __be16 init_pages;
70 __be16 num_pages;
71};
72
73struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0;
76 __be16 func_id;
77 __be16 rsvd1;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0];
81};
82
83struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2];
86 __be16 num_entries;
87 u8 rsvd1[20];
88 __be64 pas[0];
89};
90
91static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
92{
93 struct rb_root *root = &dev->priv.page_root;
94 struct rb_node **new = &root->rb_node;
95 struct rb_node *parent = NULL;
96 struct fw_page *nfp;
97 struct fw_page *tfp;
98
99 while (*new) {
100 parent = *new;
101 tfp = rb_entry(parent, struct fw_page, rb_node);
102 if (tfp->addr < addr)
103 new = &parent->rb_left;
104 else if (tfp->addr > addr)
105 new = &parent->rb_right;
106 else
107 return -EEXIST;
108 }
109
110 nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
111 if (!nfp)
112 return -ENOMEM;
113
114 nfp->addr = addr;
115 nfp->page = page;
116 nfp->func_id = func_id;
117
118 rb_link_node(&nfp->rb_node, parent, new);
119 rb_insert_color(&nfp->rb_node, root);
120
121 return 0;
122}
123
124static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
125{
126 struct rb_root *root = &dev->priv.page_root;
127 struct rb_node *tmp = root->rb_node;
128 struct page *result = NULL;
129 struct fw_page *tfp;
130
131 while (tmp) {
132 tfp = rb_entry(tmp, struct fw_page, rb_node);
133 if (tfp->addr < addr) {
134 tmp = tmp->rb_left;
135 } else if (tfp->addr > addr) {
136 tmp = tmp->rb_right;
137 } else {
138 rb_erase(&tfp->rb_node, root);
139 result = tfp->page;
140 kfree(tfp);
141 break;
142 }
143 }
144
145 return result;
146}
147
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages)
150{
151 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out;
153 int err;
154
155 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err)
160 return err;
161
162 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr);
164
165 if (pages)
166 *pages = be16_to_cpu(out.num_pages);
167 if (init_pages)
168 *init_pages = be16_to_cpu(out.init_pages);
169 *func_id = be16_to_cpu(out.func_id);
170
171 return err;
172}
173
174static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
175 int notify_fail)
176{
177 struct mlx5_manage_pages_inbox *in;
178 struct mlx5_manage_pages_outbox out;
179 struct page *page;
180 int inlen;
181 u64 addr;
182 int err;
183 int i;
184
185 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
186 in = mlx5_vzalloc(inlen);
187 if (!in) {
188 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
189 return -ENOMEM;
190 }
191 memset(&out, 0, sizeof(out));
192
193 for (i = 0; i < npages; i++) {
194 page = alloc_page(GFP_HIGHUSER);
195 if (!page) {
196 err = -ENOMEM;
197 mlx5_core_warn(dev, "failed to allocate page\n");
198 goto out_alloc;
199 }
200 addr = dma_map_page(&dev->pdev->dev, page, 0,
201 PAGE_SIZE, DMA_BIDIRECTIONAL);
202 if (dma_mapping_error(&dev->pdev->dev, addr)) {
203 mlx5_core_warn(dev, "failed dma mapping page\n");
204 __free_page(page);
205 err = -ENOMEM;
206 goto out_alloc;
207 }
208 err = insert_page(dev, addr, page, func_id);
209 if (err) {
210 mlx5_core_err(dev, "failed to track allocated page\n");
211 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
212 __free_page(page);
213 err = -ENOMEM;
214 goto out_alloc;
215 }
216 in->pas[i] = cpu_to_be64(addr);
217 }
218
219 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
220 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
221 in->func_id = cpu_to_be16(func_id);
222 in->num_entries = cpu_to_be16(npages);
223 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
224 mlx5_core_dbg(dev, "err %d\n", err);
225 if (err) {
226 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
227 goto out_alloc;
228 }
229 dev->priv.fw_pages += npages;
230
231 if (out.hdr.status) {
232 err = mlx5_cmd_status_to_err(&out.hdr);
233 if (err) {
234 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
235 goto out_alloc;
236 }
237 }
238
239 mlx5_core_dbg(dev, "err %d\n", err);
240
241 goto out_free;
242
243out_alloc:
244 if (notify_fail) {
245 memset(in, 0, inlen);
246 memset(&out, 0, sizeof(out));
247 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
248 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
249 if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
250 mlx5_core_warn(dev, "\n");
251 }
252 for (i--; i >= 0; i--) {
253 addr = be64_to_cpu(in->pas[i]);
254 page = remove_page(dev, addr);
255 if (!page) {
256 mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
257 addr);
258 continue;
259 }
260 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
261 __free_page(page);
262 }
263
264out_free:
265 mlx5_vfree(in);
266 return err;
267}
268
269static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
270 int *nclaimed)
271{
272 struct mlx5_manage_pages_inbox in;
273 struct mlx5_manage_pages_outbox *out;
274 struct page *page;
275 int num_claimed;
276 int outlen;
277 u64 addr;
278 int err;
279 int i;
280
281 memset(&in, 0, sizeof(in));
282 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
283 out = mlx5_vzalloc(outlen);
284 if (!out)
285 return -ENOMEM;
286
287 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
288 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
289 in.func_id = cpu_to_be16(func_id);
290 in.num_entries = cpu_to_be16(npages);
291 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
292 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
293 if (err) {
294 mlx5_core_err(dev, "failed recliaming pages\n");
295 goto out_free;
296 }
297 dev->priv.fw_pages -= npages;
298
299 if (out->hdr.status) {
300 err = mlx5_cmd_status_to_err(&out->hdr);
301 goto out_free;
302 }
303
304 num_claimed = be16_to_cpu(out->num_entries);
305 if (nclaimed)
306 *nclaimed = num_claimed;
307
308 for (i = 0; i < num_claimed; i++) {
309 addr = be64_to_cpu(out->pas[i]);
310 page = remove_page(dev, addr);
311 if (!page) {
312 mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
313 } else {
314 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
315 __free_page(page);
316 }
317 }
318
319out_free:
320 mlx5_vfree(out);
321 return err;
322}
323
324static void pages_work_handler(struct work_struct *work)
325{
326 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
327 struct mlx5_core_dev *dev = req->dev;
328 int err = 0;
329
330 if (req->npages < 0)
331 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
332 else if (req->npages > 0)
333 err = give_pages(dev, req->func_id, req->npages, 1);
334
335 if (err)
336 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
337 "reclaim" : "give", err);
338
339 kfree(req);
340}
341
342void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
343 s16 npages)
344{
345 struct mlx5_pages_req *req;
346
347 req = kzalloc(sizeof(*req), GFP_ATOMIC);
348 if (!req) {
349 mlx5_core_warn(dev, "failed to allocate pages request\n");
350 return;
351 }
352
353 req->dev = dev;
354 req->func_id = func_id;
355 req->npages = npages;
356 INIT_WORK(&req->work, pages_work_handler);
357 queue_work(dev->priv.pg_wq, &req->work);
358}
359
360int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
361{
362 s16 uninitialized_var(init_pages);
363 u16 uninitialized_var(func_id);
364 int err;
365
366 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
367 if (err)
368 return err;
369
370 mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
371
372 return give_pages(dev, func_id, init_pages, 0);
373}
374
375static int optimal_reclaimed_pages(void)
376{
377 struct mlx5_cmd_prot_block *block;
378 struct mlx5_cmd_layout *lay;
379 int ret;
380
381 ret = (sizeof(lay->in) + sizeof(block->data) -
382 sizeof(struct mlx5_manage_pages_outbox)) / 8;
383
384 return ret;
385}
386
387int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
388{
389 unsigned long end = jiffies + msecs_to_jiffies(5000);
390 struct fw_page *fwp;
391 struct rb_node *p;
392 int err;
393
394 do {
395 p = rb_first(&dev->priv.page_root);
396 if (p) {
397 fwp = rb_entry(p, struct fw_page, rb_node);
398 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
399 if (err) {
400 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
401 return err;
402 }
403 }
404 if (time_after(jiffies, end)) {
405 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
406 break;
407 }
408 } while (p);
409
410 return 0;
411}
412
413void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
414{
415 dev->priv.page_root = RB_ROOT;
416}
417
418void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
419{
420 /* nothing */
421}
422
423int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
424{
425 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
426 if (!dev->priv.pg_wq)
427 return -ENOMEM;
428
429 return 0;
430}
431
432void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
433{
434 destroy_workqueue(dev->priv.pg_wq);
435}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
new file mode 100644
index 000000000000..790da5c4ca4f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39struct mlx5_alloc_pd_mbox_in {
40 struct mlx5_inbox_hdr hdr;
41 u8 rsvd[8];
42};
43
44struct mlx5_alloc_pd_mbox_out {
45 struct mlx5_outbox_hdr hdr;
46 __be32 pdn;
47 u8 rsvd[4];
48};
49
50struct mlx5_dealloc_pd_mbox_in {
51 struct mlx5_inbox_hdr hdr;
52 __be32 pdn;
53 u8 rsvd[4];
54};
55
56struct mlx5_dealloc_pd_mbox_out {
57 struct mlx5_outbox_hdr hdr;
58 u8 rsvd[8];
59};
60
61int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
62{
63 struct mlx5_alloc_pd_mbox_in in;
64 struct mlx5_alloc_pd_mbox_out out;
65 int err;
66
67 memset(&in, 0, sizeof(in));
68 memset(&out, 0, sizeof(out));
69 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
70 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
71 if (err)
72 return err;
73
74 if (out.hdr.status)
75 return mlx5_cmd_status_to_err(&out.hdr);
76
77 *pdn = be32_to_cpu(out.pdn) & 0xffffff;
78 return err;
79}
80EXPORT_SYMBOL(mlx5_core_alloc_pd);
81
82int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
83{
84 struct mlx5_dealloc_pd_mbox_in in;
85 struct mlx5_dealloc_pd_mbox_out out;
86 int err;
87
88 memset(&in, 0, sizeof(in));
89 memset(&out, 0, sizeof(out));
90 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
91 in.pdn = cpu_to_be32(pdn);
92 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
93 if (err)
94 return err;
95
96 if (out.hdr.status)
97 return mlx5_cmd_status_to_err(&out.hdr);
98
99 return err;
100}
101EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
new file mode 100644
index 000000000000..f6afe7b5a675
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/cmd.h>
36#include "mlx5_core.h"
37
38int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
39 int size_in, void *data_out, int size_out,
40 u16 reg_num, int arg, int write)
41{
42 struct mlx5_access_reg_mbox_in *in = NULL;
43 struct mlx5_access_reg_mbox_out *out = NULL;
44 int err = -ENOMEM;
45
46 in = mlx5_vzalloc(sizeof(*in) + size_in);
47 if (!in)
48 return -ENOMEM;
49
50 out = mlx5_vzalloc(sizeof(*out) + size_out);
51 if (!out)
52 goto ex1;
53
54 memcpy(in->data, data_in, size_in);
55 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
56 in->hdr.opmod = cpu_to_be16(!write);
57 in->arg = cpu_to_be32(arg);
58 in->register_id = cpu_to_be16(reg_num);
59 err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
60 sizeof(out) + size_out);
61 if (err)
62 goto ex2;
63
64 if (out->hdr.status)
65 err = mlx5_cmd_status_to_err(&out->hdr);
66
67 if (!err)
68 memcpy(data_out, out->data, size_out);
69
70ex2:
71 mlx5_vfree(out);
72ex1:
73 mlx5_vfree(in);
74 return err;
75}
76EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
77
78
79struct mlx5_reg_pcap {
80 u8 rsvd0;
81 u8 port_num;
82 u8 rsvd1[2];
83 __be32 caps_127_96;
84 __be32 caps_95_64;
85 __be32 caps_63_32;
86 __be32 caps_31_0;
87};
88
89int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
90{
91 struct mlx5_reg_pcap in;
92 struct mlx5_reg_pcap out;
93 int err;
94
95 memset(&in, 0, sizeof(in));
96 in.caps_127_96 = cpu_to_be32(caps);
97 in.port_num = port_num;
98
99 err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
100 sizeof(out), MLX5_REG_PCAP, 0, 1);
101
102 return err;
103}
104EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
new file mode 100644
index 000000000000..54faf8bfcaf4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -0,0 +1,301 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/gfp.h>
35#include <linux/export.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/qp.h>
38#include <linux/mlx5/driver.h>
39
40#include "mlx5_core.h"
41
42void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type)
43{
44 struct mlx5_qp_table *table = &dev->priv.qp_table;
45 struct mlx5_core_qp *qp;
46
47 spin_lock(&table->lock);
48
49 qp = radix_tree_lookup(&table->tree, qpn);
50 if (qp)
51 atomic_inc(&qp->refcount);
52
53 spin_unlock(&table->lock);
54
55 if (!qp) {
56 mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn);
57 return;
58 }
59
60 qp->event(qp, event_type);
61
62 if (atomic_dec_and_test(&qp->refcount))
63 complete(&qp->free);
64}
65
66int mlx5_core_create_qp(struct mlx5_core_dev *dev,
67 struct mlx5_core_qp *qp,
68 struct mlx5_create_qp_mbox_in *in,
69 int inlen)
70{
71 struct mlx5_qp_table *table = &dev->priv.qp_table;
72 struct mlx5_create_qp_mbox_out out;
73 struct mlx5_destroy_qp_mbox_in din;
74 struct mlx5_destroy_qp_mbox_out dout;
75 int err;
76
77 memset(&dout, 0, sizeof(dout));
78 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err);
83 return err;
84 }
85
86 if (out.hdr.status) {
87 pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
88 return mlx5_cmd_status_to_err(&out.hdr);
89 }
90
91 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
92 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
93
94 spin_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, qp->qpn, qp);
96 spin_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "err %d", err);
99 goto err_cmd;
100 }
101
102 err = mlx5_debug_qp_add(dev, qp);
103 if (err)
104 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
105 qp->qpn);
106
107 qp->pid = current->pid;
108 atomic_set(&qp->refcount, 1);
109 atomic_inc(&dev->num_qps);
110 init_completion(&qp->free);
111
112 return 0;
113
114err_cmd:
115 memset(&din, 0, sizeof(din));
116 memset(&dout, 0, sizeof(dout));
117 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
118 din.qpn = cpu_to_be32(qp->qpn);
119 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
120
121 return err;
122}
123EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
124
125int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
126 struct mlx5_core_qp *qp)
127{
128 struct mlx5_destroy_qp_mbox_in in;
129 struct mlx5_destroy_qp_mbox_out out;
130 struct mlx5_qp_table *table = &dev->priv.qp_table;
131 unsigned long flags;
132 int err;
133
134 mlx5_debug_qp_remove(dev, qp);
135
136 spin_lock_irqsave(&table->lock, flags);
137 radix_tree_delete(&table->tree, qp->qpn);
138 spin_unlock_irqrestore(&table->lock, flags);
139
140 if (atomic_dec_and_test(&qp->refcount))
141 complete(&qp->free);
142 wait_for_completion(&qp->free);
143
144 memset(&in, 0, sizeof(in));
145 memset(&out, 0, sizeof(out));
146 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
147 in.qpn = cpu_to_be32(qp->qpn);
148 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
149 if (err)
150 return err;
151
152 if (out.hdr.status)
153 return mlx5_cmd_status_to_err(&out.hdr);
154
155 atomic_dec(&dev->num_qps);
156 return 0;
157}
158EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
159
160int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
161 enum mlx5_qp_state new_state,
162 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
163 struct mlx5_core_qp *qp)
164{
165 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
166 [MLX5_QP_STATE_RST] = {
167 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
168 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
169 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
170 },
171 [MLX5_QP_STATE_INIT] = {
172 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
173 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
174 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
175 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
176 },
177 [MLX5_QP_STATE_RTR] = {
178 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
179 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
180 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
181 },
182 [MLX5_QP_STATE_RTS] = {
183 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
184 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
185 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
186 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP,
187 },
188 [MLX5_QP_STATE_SQD] = {
189 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
190 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
191 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP,
192 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP,
193 },
194 [MLX5_QP_STATE_SQER] = {
195 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
196 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
197 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
198 },
199 [MLX5_QP_STATE_ERR] = {
200 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
201 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
202 }
203 };
204
205 struct mlx5_modify_qp_mbox_out out;
206 int err = 0;
207 u16 op;
208
209 if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
210 !optab[cur_state][new_state])
211 return -EINVAL;
212
213 memset(&out, 0, sizeof(out));
214 op = optab[cur_state][new_state];
215 in->hdr.opcode = cpu_to_be16(op);
216 in->qpn = cpu_to_be32(qp->qpn);
217 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
218 if (err)
219 return err;
220
221 return mlx5_cmd_status_to_err(&out.hdr);
222}
223EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
224
225void mlx5_init_qp_table(struct mlx5_core_dev *dev)
226{
227 struct mlx5_qp_table *table = &dev->priv.qp_table;
228
229 spin_lock_init(&table->lock);
230 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
231 mlx5_qp_debugfs_init(dev);
232}
233
234void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
235{
236 mlx5_qp_debugfs_cleanup(dev);
237}
238
239int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
240 struct mlx5_query_qp_mbox_out *out, int outlen)
241{
242 struct mlx5_query_qp_mbox_in in;
243 int err;
244
245 memset(&in, 0, sizeof(in));
246 memset(out, 0, outlen);
247 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
248 in.qpn = cpu_to_be32(qp->qpn);
249 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
250 if (err)
251 return err;
252
253 if (out->hdr.status)
254 return mlx5_cmd_status_to_err(&out->hdr);
255
256 return err;
257}
258EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
259
260int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
261{
262 struct mlx5_alloc_xrcd_mbox_in in;
263 struct mlx5_alloc_xrcd_mbox_out out;
264 int err;
265
266 memset(&in, 0, sizeof(in));
267 memset(&out, 0, sizeof(out));
268 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
269 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
270 if (err)
271 return err;
272
273 if (out.hdr.status)
274 err = mlx5_cmd_status_to_err(&out.hdr);
275 else
276 *xrcdn = be32_to_cpu(out.xrcdn);
277
278 return err;
279}
280EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
281
282int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
283{
284 struct mlx5_dealloc_xrcd_mbox_in in;
285 struct mlx5_dealloc_xrcd_mbox_out out;
286 int err;
287
288 memset(&in, 0, sizeof(in));
289 memset(&out, 0, sizeof(out));
290 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
291 in.xrcdn = cpu_to_be32(xrcdn);
292 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
293 if (err)
294 return err;
295
296 if (out.hdr.status)
297 err = mlx5_cmd_status_to_err(&out.hdr);
298
299 return err;
300}
301EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
new file mode 100644
index 000000000000..38bce93f8314
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include <linux/mlx5/srq.h>
38#include <rdma/ib_verbs.h>
39#include "mlx5_core.h"
40
41void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
42{
43 struct mlx5_srq_table *table = &dev->priv.srq_table;
44 struct mlx5_core_srq *srq;
45
46 spin_lock(&table->lock);
47
48 srq = radix_tree_lookup(&table->tree, srqn);
49 if (srq)
50 atomic_inc(&srq->refcount);
51
52 spin_unlock(&table->lock);
53
54 if (!srq) {
55 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
56 return;
57 }
58
59 srq->event(srq, event_type);
60
61 if (atomic_dec_and_test(&srq->refcount))
62 complete(&srq->free);
63}
64
65struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
66{
67 struct mlx5_srq_table *table = &dev->priv.srq_table;
68 struct mlx5_core_srq *srq;
69
70 spin_lock(&table->lock);
71
72 srq = radix_tree_lookup(&table->tree, srqn);
73 if (srq)
74 atomic_inc(&srq->refcount);
75
76 spin_unlock(&table->lock);
77
78 return srq;
79}
80EXPORT_SYMBOL(mlx5_core_get_srq);
81
82int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
83 struct mlx5_create_srq_mbox_in *in, int inlen)
84{
85 struct mlx5_create_srq_mbox_out out;
86 struct mlx5_srq_table *table = &dev->priv.srq_table;
87 struct mlx5_destroy_srq_mbox_in din;
88 struct mlx5_destroy_srq_mbox_out dout;
89 int err;
90
91 memset(&out, 0, sizeof(out));
92 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
93 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
94 if (err)
95 return err;
96
97 if (out.hdr.status)
98 return mlx5_cmd_status_to_err(&out.hdr);
99
100 srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
101
102 atomic_set(&srq->refcount, 1);
103 init_completion(&srq->free);
104
105 spin_lock_irq(&table->lock);
106 err = radix_tree_insert(&table->tree, srq->srqn, srq);
107 spin_unlock_irq(&table->lock);
108 if (err) {
109 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
110 goto err_cmd;
111 }
112
113 return 0;
114
115err_cmd:
116 memset(&din, 0, sizeof(din));
117 memset(&dout, 0, sizeof(dout));
118 din.srqn = cpu_to_be32(srq->srqn);
119 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
120 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
121 return err;
122}
123EXPORT_SYMBOL(mlx5_core_create_srq);
124
125int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
126{
127 struct mlx5_destroy_srq_mbox_in in;
128 struct mlx5_destroy_srq_mbox_out out;
129 struct mlx5_srq_table *table = &dev->priv.srq_table;
130 struct mlx5_core_srq *tmp;
131 int err;
132
133 spin_lock_irq(&table->lock);
134 tmp = radix_tree_delete(&table->tree, srq->srqn);
135 spin_unlock_irq(&table->lock);
136 if (!tmp) {
137 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
138 return -EINVAL;
139 }
140 if (tmp != srq) {
141 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
142 return -EINVAL;
143 }
144
145 memset(&in, 0, sizeof(in));
146 memset(&out, 0, sizeof(out));
147 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
148 in.srqn = cpu_to_be32(srq->srqn);
149 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
150 if (err)
151 return err;
152
153 if (out.hdr.status)
154 return mlx5_cmd_status_to_err(&out.hdr);
155
156 if (atomic_dec_and_test(&srq->refcount))
157 complete(&srq->free);
158 wait_for_completion(&srq->free);
159
160 return 0;
161}
162EXPORT_SYMBOL(mlx5_core_destroy_srq);
163
164int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
165 struct mlx5_query_srq_mbox_out *out)
166{
167 struct mlx5_query_srq_mbox_in in;
168 int err;
169
170 memset(&in, 0, sizeof(in));
171 memset(out, 0, sizeof(*out));
172
173 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
174 in.srqn = cpu_to_be32(srq->srqn);
175 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
176 if (err)
177 return err;
178
179 if (out->hdr.status)
180 return mlx5_cmd_status_to_err(&out->hdr);
181
182 return err;
183}
184EXPORT_SYMBOL(mlx5_core_query_srq);
185
186int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
187 u16 lwm, int is_srq)
188{
189 struct mlx5_arm_srq_mbox_in in;
190 struct mlx5_arm_srq_mbox_out out;
191 int err;
192
193 memset(&in, 0, sizeof(in));
194 memset(&out, 0, sizeof(out));
195
196 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
197 in.hdr.opmod = cpu_to_be16(!!is_srq);
198 in.srqn = cpu_to_be32(srq->srqn);
199 in.lwm = cpu_to_be16(lwm);
200
201 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
202 if (err)
203 return err;
204
205 if (out.hdr.status)
206 return mlx5_cmd_status_to_err(&out.hdr);
207
208 return err;
209}
210EXPORT_SYMBOL(mlx5_core_arm_srq);
211
212void mlx5_init_srq_table(struct mlx5_core_dev *dev)
213{
214 struct mlx5_srq_table *table = &dev->priv.srq_table;
215
216 spin_lock_init(&table->lock);
217 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
218}
219
220void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
221{
222 /* nothing */
223}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
new file mode 100644
index 000000000000..71d4a3937200
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39enum {
40 NUM_DRIVER_UARS = 4,
41 NUM_LOW_LAT_UUARS = 4,
42};
43
44
45struct mlx5_alloc_uar_mbox_in {
46 struct mlx5_inbox_hdr hdr;
47 u8 rsvd[8];
48};
49
50struct mlx5_alloc_uar_mbox_out {
51 struct mlx5_outbox_hdr hdr;
52 __be32 uarn;
53 u8 rsvd[4];
54};
55
56struct mlx5_free_uar_mbox_in {
57 struct mlx5_inbox_hdr hdr;
58 __be32 uarn;
59 u8 rsvd[4];
60};
61
62struct mlx5_free_uar_mbox_out {
63 struct mlx5_outbox_hdr hdr;
64 u8 rsvd[8];
65};
66
67int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
68{
69 struct mlx5_alloc_uar_mbox_in in;
70 struct mlx5_alloc_uar_mbox_out out;
71 int err;
72
73 memset(&in, 0, sizeof(in));
74 memset(&out, 0, sizeof(out));
75 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
76 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
77 if (err)
78 goto ex;
79
80 if (out.hdr.status) {
81 err = mlx5_cmd_status_to_err(&out.hdr);
82 goto ex;
83 }
84
85 *uarn = be32_to_cpu(out.uarn) & 0xffffff;
86
87ex:
88 return err;
89}
90EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
91
92int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
93{
94 struct mlx5_free_uar_mbox_in in;
95 struct mlx5_free_uar_mbox_out out;
96 int err;
97
98 memset(&in, 0, sizeof(in));
99 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
100 in.uarn = cpu_to_be32(uarn);
101 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
102 if (err)
103 goto ex;
104
105 if (out.hdr.status)
106 err = mlx5_cmd_status_to_err(&out.hdr);
107
108ex:
109 return err;
110}
111EXPORT_SYMBOL(mlx5_cmd_free_uar);
112
113static int need_uuar_lock(int uuarn)
114{
115 int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
116
117 if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
118 return 0;
119
120 return 1;
121}
122
123int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
124{
125 int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
126 struct mlx5_bf *bf;
127 phys_addr_t addr;
128 int err;
129 int i;
130
131 uuari->num_uars = NUM_DRIVER_UARS;
132 uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
133
134 mutex_init(&uuari->lock);
135 uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
136 if (!uuari->uars)
137 return -ENOMEM;
138
139 uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
140 if (!uuari->bfs) {
141 err = -ENOMEM;
142 goto out_uars;
143 }
144
145 uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
146 GFP_KERNEL);
147 if (!uuari->bitmap) {
148 err = -ENOMEM;
149 goto out_bfs;
150 }
151
152 uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
153 if (!uuari->count) {
154 err = -ENOMEM;
155 goto out_bitmap;
156 }
157
158 for (i = 0; i < uuari->num_uars; i++) {
159 err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
160 if (err)
161 goto out_count;
162
163 addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
164 uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
165 if (!uuari->uars[i].map) {
166 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
167 goto out_count;
168 }
169 mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
170 uuari->uars[i].index, uuari->uars[i].map);
171 }
172
173 for (i = 0; i < tot_uuars; i++) {
174 bf = &uuari->bfs[i];
175
176 bf->buf_size = dev->caps.bf_reg_size / 2;
177 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
178 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
179 bf->reg = NULL; /* Add WC support */
180 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size +
181 MLX5_BF_OFFSET;
182 bf->need_lock = need_uuar_lock(i);
183 spin_lock_init(&bf->lock);
184 spin_lock_init(&bf->lock32);
185 bf->uuarn = i;
186 }
187
188 return 0;
189
190out_count:
191 for (i--; i >= 0; i--) {
192 iounmap(uuari->uars[i].map);
193 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
194 }
195 kfree(uuari->count);
196
197out_bitmap:
198 kfree(uuari->bitmap);
199
200out_bfs:
201 kfree(uuari->bfs);
202
203out_uars:
204 kfree(uuari->uars);
205 return err;
206}
207
208int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
209{
210 int i = uuari->num_uars;
211
212 for (i--; i >= 0; i--) {
213 iounmap(uuari->uars[i].map);
214 mlx5_cmd_free_uar(dev, uuari->uars[i].index);
215 }
216
217 kfree(uuari->count);
218 kfree(uuari->bitmap);
219 kfree(uuari->bfs);
220 kfree(uuari->uars);
221
222 return 0;
223}