aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMoshe Lazer <moshel@mellanox.com>2013-08-14 10:46:48 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-15 18:42:57 -0400
commit0a324f3189ed9c78b1aaf48d88e93cb18643c655 (patch)
tree74ce14e88537117866a5327e7afb2d4ba3617990
parent15718ea0d844e4816dbd95d57a8a0e3e264ba90e (diff)
net/mlx5_core: Support MANAGE_PAGES and QUERY_PAGES firmware command changes
In the previous QUERY_PAGES command version we used one command to get the required amount of boot, init and post init pages. The new version uses the op_mod field to specify whether the query is for the required amount of boot, init or post init pages. In addition the output field size for the required amount of pages increased from 16 to 32 bits. In MANAGE_PAGES command the input_num_entries and output_num_entries fields sizes changed from 16 to 32 bits and the PAS tables offset changed to 0x10. In the pages request event the num_pages field also changed to 32 bits. In the HCA-capabilities-layout the size and location of max_qp_mcg field has been changed to support 24 bits. This patch isn't compatible with firmware versions < 5; however, it turns out that the first GA firmware we will publish will not support previous versions so this should be OK. Signed-off-by: Moshe Lazer <moshel@mellanox.com> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h4
6 files changed, 41 insertions, 49 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c571de85d0f9..5472cbd34028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
46#include "mlx5_core.h" 46#include "mlx5_core.h"
47 47
48enum { 48enum {
49 CMD_IF_REV = 4, 49 CMD_IF_REV = 5,
50}; 50};
51 51
52enum { 52enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb8..443cc4d7b024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
268 case MLX5_EVENT_TYPE_PAGE_REQUEST: 268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 { 269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); 271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 274 mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f5..f012658b6a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; 113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; 114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg; 115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); 116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); 117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); 118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; 119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4a3e137931a3..3a2408d44820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
43 MLX5_PAGES_TAKE = 2 43 MLX5_PAGES_TAKE = 2
44}; 44};
45 45
46enum {
47 MLX5_BOOT_PAGES = 1,
48 MLX5_INIT_PAGES = 2,
49 MLX5_POST_INIT_PAGES = 3
50};
51
46struct mlx5_pages_req { 52struct mlx5_pages_req {
47 struct mlx5_core_dev *dev; 53 struct mlx5_core_dev *dev;
48 u32 func_id; 54 u32 func_id;
49 s16 npages; 55 s32 npages;
50 struct work_struct work; 56 struct work_struct work;
51}; 57};
52 58
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
64 70
65struct mlx5_query_pages_outbox { 71struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr; 72 struct mlx5_outbox_hdr hdr;
67 __be16 num_boot_pages; 73 __be16 rsvd;
68 __be16 func_id; 74 __be16 func_id;
69 __be16 init_pages; 75 __be32 num_pages;
70 __be16 num_pages;
71}; 76};
72 77
73struct mlx5_manage_pages_inbox { 78struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr; 79 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0; 80 __be16 rsvd;
76 __be16 func_id; 81 __be16 func_id;
77 __be16 rsvd1; 82 __be32 num_entries;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0]; 83 __be64 pas[0];
81}; 84};
82 85
83struct mlx5_manage_pages_outbox { 86struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr; 87 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2]; 88 __be32 num_entries;
86 __be16 num_entries; 89 u8 rsvd[4];
87 u8 rsvd1[20];
88 __be64 pas[0]; 90 __be64 pas[0];
89}; 91};
90 92
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
146} 148}
147 149
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 150static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages, u16 *boot_pages) 151 s32 *npages, int boot)
150{ 152{
151 struct mlx5_query_pages_inbox in; 153 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out; 154 struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
155 memset(&in, 0, sizeof(in)); 157 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out)); 158 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); 159 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
160 in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
161
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err) 163 if (err)
160 return err; 164 return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
162 if (out.hdr.status) 166 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr); 167 return mlx5_cmd_status_to_err(&out.hdr);
164 168
165 if (pages) 169 *npages = be32_to_cpu(out.num_pages);
166 *pages = be16_to_cpu(out.num_pages);
167
168 if (init_pages)
169 *init_pages = be16_to_cpu(out.init_pages);
170
171 if (boot_pages)
172 *boot_pages = be16_to_cpu(out.num_boot_pages);
173
174 *func_id = be16_to_cpu(out.func_id); 170 *func_id = be16_to_cpu(out.func_id);
175 171
176 return err; 172 return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
224 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 220 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
225 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); 221 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
226 in->func_id = cpu_to_be16(func_id); 222 in->func_id = cpu_to_be16(func_id);
227 in->num_entries = cpu_to_be16(npages); 223 in->num_entries = cpu_to_be32(npages);
228 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 224 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
229 mlx5_core_dbg(dev, "err %d\n", err); 225 mlx5_core_dbg(dev, "err %d\n", err);
230 if (err) { 226 if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
292 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 288 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
293 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); 289 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
294 in.func_id = cpu_to_be16(func_id); 290 in.func_id = cpu_to_be16(func_id);
295 in.num_entries = cpu_to_be16(npages); 291 in.num_entries = cpu_to_be32(npages);
296 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 292 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
297 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 293 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
298 if (err) { 294 if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
306 goto out_free; 302 goto out_free;
307 } 303 }
308 304
309 num_claimed = be16_to_cpu(out->num_entries); 305 num_claimed = be32_to_cpu(out->num_entries);
310 if (nclaimed) 306 if (nclaimed)
311 *nclaimed = num_claimed; 307 *nclaimed = num_claimed;
312 308
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
345} 341}
346 342
347void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 343void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
348 s16 npages) 344 s32 npages)
349{ 345{
350 struct mlx5_pages_req *req; 346 struct mlx5_pages_req *req;
351 347
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
364 360
365int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) 361int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
366{ 362{
367 u16 uninitialized_var(boot_pages);
368 s16 uninitialized_var(init_pages);
369 u16 uninitialized_var(func_id); 363 u16 uninitialized_var(func_id);
364 s32 uninitialized_var(npages);
370 int err; 365 int err;
371 366
372 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, 367 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
373 &boot_pages);
374 if (err) 368 if (err)
375 return err; 369 return err;
376 370
371 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
372 npages, boot ? "boot" : "init", func_id);
377 373
378 mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", 374 return give_pages(dev, func_id, npages, 0);
379 init_pages, boot_pages, func_id);
380 return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
381} 375}
382 376
383static int optimal_reclaimed_pages(void) 377static int optimal_reclaimed_pages(void)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e9e852..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
309 __be16 max_desc_sz_rq; 309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2]; 310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc; 311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4]; 312 __be32 max_qp_mcg;
313 __be16 max_qp_mcg; 313 u8 rsvd22[3];
314 u8 rsvd23;
315 u8 log_max_mcg; 314 u8 log_max_mcg;
316 u8 rsvd24; 315 u8 rsvd23;
317 u8 log_max_pd; 316 u8 log_max_pd;
318 u8 rsvd25; 317 u8 rsvd24;
319 u8 log_max_xrcd; 318 u8 log_max_xrcd;
320 u8 rsvd26[42]; 319 u8 rsvd25[42];
321 __be16 log_uar_page_sz; 320 __be16 log_uar_page_sz;
322 u8 rsvd27[28]; 321 u8 rsvd26[28];
323 u8 log_msx_atomic_size_qp; 322 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2]; 323 u8 rsvd27[2];
325 u8 log_msx_atomic_size_dc; 324 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76]; 325 u8 rsvd28[76];
327}; 326};
328 327
329 328
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
472struct mlx5_eqe_page_req { 471struct mlx5_eqe_page_req {
473 u8 rsvd0[2]; 472 u8 rsvd0[2];
474 __be16 func_id; 473 __be16 func_id;
475 u8 rsvd1[2]; 474 __be32 num_pages;
476 __be16 num_pages; 475 __be32 rsvd1[5];
477 __be32 rsvd2[5];
478}; 476};
479 477
480union ev_data { 478union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 611e65e76b00..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
358 u32 reserved_lkey; 358 u32 reserved_lkey;
359 u8 local_ca_ack_delay; 359 u8 local_ca_ack_delay;
360 u8 log_max_mcg; 360 u8 log_max_mcg;
361 u16 max_qp_mcg; 361 u32 max_qp_mcg;
362 int min_page_sz; 362 int min_page_sz;
363}; 363};
364 364
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
691int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 691int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
694 s16 npages); 694 s32 npages);
695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
697void mlx5_register_debugfs(void); 697void mlx5_register_debugfs(void);