aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-06-18 11:15:02 -0400
committerRoland Dreier <rolandd@cisco.com>2007-06-18 11:15:02 -0400
commit5ae2a7a836be660ff1621cce1c46930f19200589 (patch)
tree655b94b9a016cec92f319761afe6bb3000f5f4fa /drivers
parent082dee3216c99a838af40be403799f60bcea2e97 (diff)
IB/mlx4: Handle FW command interface rev 3
Upcoming firmware introduces command interface revision 3, which changes the way port capabilities are queried and set. Update the driver to handle both the new and old command interfaces by adding a new MLX4_FLAG_OLD_PORT_CMDS that it is set after querying the firmware interface revision and then using the correct interface based on the setting of the flag. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c16
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c44
-rw-r--r--drivers/net/mlx4/fw.c110
-rw-r--r--drivers/net/mlx4/fw.h10
-rw-r--r--drivers/net/mlx4/main.c14
5 files changed, 113 insertions, 81 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 402f3a20ec0a..1095c82b38c2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -125,7 +125,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
125 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 125 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
126 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 126 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
127 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 127 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
128 props->max_pkeys = dev->dev->caps.pkey_table_len; 128 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
129 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 129 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
130 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 130 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
131 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 131 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -168,9 +168,9 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
168 props->state = out_mad->data[32] & 0xf; 168 props->state = out_mad->data[32] & 0xf;
169 props->phys_state = out_mad->data[33] >> 4; 169 props->phys_state = out_mad->data[33] >> 4;
170 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 170 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
171 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len; 171 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
172 props->max_msg_sz = 0x80000000; 172 props->max_msg_sz = 0x80000000;
173 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len; 173 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
174 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 174 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
175 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 175 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
176 props->active_width = out_mad->data[31] & 0xf; 176 props->active_width = out_mad->data[31] & 0xf;
@@ -280,8 +280,14 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
280 return PTR_ERR(mailbox); 280 return PTR_ERR(mailbox);
281 281
282 memset(mailbox->buf, 0, 256); 282 memset(mailbox->buf, 0, 256);
283 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; 283
284 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); 284 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
285 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
286 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
287 } else {
288 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
289 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
290 }
285 291
286 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 292 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
287 MLX4_CMD_TIME_CLASS_B); 293 MLX4_CMD_TIME_CLASS_B);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 95d4a9d6994c..355a31f9c03c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -603,24 +603,6 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
603 return 0; 603 return 0;
604} 604}
605 605
606static void init_port(struct mlx4_ib_dev *dev, int port)
607{
608 struct mlx4_init_port_param param;
609 int err;
610
611 memset(&param, 0, sizeof param);
612
613 param.port_width_cap = dev->dev->caps.port_width_cap;
614 param.vl_cap = dev->dev->caps.vl_cap;
615 param.mtu = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap);
616 param.max_gid = dev->dev->caps.gid_table_len;
617 param.max_pkey = dev->dev->caps.pkey_table_len;
618
619 err = mlx4_INIT_PORT(dev->dev, &param, port);
620 if (err)
621 printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err);
622}
623
624static int to_mlx4_st(enum ib_qp_type type) 606static int to_mlx4_st(enum ib_qp_type type)
625{ 607{
626 switch (type) { 608 switch (type) {
@@ -694,9 +676,9 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
694 path->counter_index = 0xff; 676 path->counter_index = 0xff;
695 677
696 if (ah->ah_flags & IB_AH_GRH) { 678 if (ah->ah_flags & IB_AH_GRH) {
697 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) { 679 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
698 printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", 680 printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
699 ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1); 681 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
700 return -1; 682 return -1;
701 } 683 }
702 684
@@ -812,13 +794,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
812 } 794 }
813 795
814 if (attr_mask & IB_QP_ALT_PATH) { 796 if (attr_mask & IB_QP_ALT_PATH) {
815 if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len)
816 return -EINVAL;
817
818 if (attr->alt_port_num == 0 || 797 if (attr->alt_port_num == 0 ||
819 attr->alt_port_num > dev->dev->caps.num_ports) 798 attr->alt_port_num > dev->dev->caps.num_ports)
820 return -EINVAL; 799 return -EINVAL;
821 800
801 if (attr->alt_pkey_index >=
802 dev->dev->caps.pkey_table_len[attr->alt_port_num])
803 return -EINVAL;
804
822 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 805 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
823 attr->alt_port_num)) 806 attr->alt_port_num))
824 return -EINVAL; 807 return -EINVAL;
@@ -949,7 +932,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
949 */ 932 */
950 if (is_qp0(dev, qp)) { 933 if (is_qp0(dev, qp)) {
951 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) 934 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
952 init_port(dev, qp->port); 935 if (mlx4_INIT_PORT(dev->dev, qp->port))
936 printk(KERN_WARNING "INIT_PORT failed for port %d\n",
937 qp->port);
953 938
954 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 939 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
955 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) 940 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
@@ -1012,16 +997,17 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1012 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) 997 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
1013 goto out; 998 goto out;
1014 999
1015 if ((attr_mask & IB_QP_PKEY_INDEX) &&
1016 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
1017 goto out;
1018 }
1019
1020 if ((attr_mask & IB_QP_PORT) && 1000 if ((attr_mask & IB_QP_PORT) &&
1021 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { 1001 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
1022 goto out; 1002 goto out;
1023 } 1003 }
1024 1004
1005 if (attr_mask & IB_QP_PKEY_INDEX) {
1006 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1007 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
1008 goto out;
1009 }
1010
1025 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1011 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1026 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { 1012 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
1027 goto out; 1013 goto out;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 81fc546a1c44..d2b065351e45 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -38,7 +38,9 @@
38#include "icm.h" 38#include "icm.h"
39 39
40enum { 40enum {
41 MLX4_COMMAND_INTERFACE_REV = 2, 41 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
42 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
43 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
42}; 44};
43 45
44extern void __buggy_use_of_MLX4_GET(void); 46extern void __buggy_use_of_MLX4_GET(void);
@@ -107,6 +109,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
107 u16 size; 109 u16 size;
108 u16 stat_rate; 110 u16 stat_rate;
109 int err; 111 int err;
112 int i;
110 113
111#define QUERY_DEV_CAP_OUT_SIZE 0x100 114#define QUERY_DEV_CAP_OUT_SIZE 0x100
112#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 115#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
@@ -176,7 +179,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
176 179
177 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 180 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
178 MLX4_CMD_TIME_CLASS_A); 181 MLX4_CMD_TIME_CLASS_A);
179
180 if (err) 182 if (err)
181 goto out; 183 goto out;
182 184
@@ -216,18 +218,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
216 dev_cap->max_rdma_global = 1 << (field & 0x3f); 218 dev_cap->max_rdma_global = 1 << (field & 0x3f);
217 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 219 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
218 dev_cap->local_ca_ack_delay = field & 0x1f; 220 dev_cap->local_ca_ack_delay = field & 0x1f;
219 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
220 dev_cap->max_mtu = field >> 4;
221 dev_cap->max_port_width = field & 0xf;
222 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 221 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
223 dev_cap->max_vl = field >> 4;
224 dev_cap->num_ports = field & 0xf; 222 dev_cap->num_ports = field & 0xf;
225 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
226 dev_cap->max_gids = 1 << (field & 0xf);
227 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 223 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
228 dev_cap->stat_rate_support = stat_rate; 224 dev_cap->stat_rate_support = stat_rate;
229 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
230 dev_cap->max_pkeys = 1 << (field & 0xf);
231 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 225 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
232 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 226 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
233 dev_cap->reserved_uars = field >> 4; 227 dev_cap->reserved_uars = field >> 4;
@@ -304,6 +298,42 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
304 MLX4_GET(dev_cap->max_icm_sz, outbox, 298 MLX4_GET(dev_cap->max_icm_sz, outbox,
305 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 299 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
306 300
301 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
302 for (i = 1; i <= dev_cap->num_ports; ++i) {
303 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
304 dev_cap->max_vl[i] = field >> 4;
305 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
306 dev_cap->max_mtu[i] = field >> 4;
307 dev_cap->max_port_width[i] = field & 0xf;
308 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
309 dev_cap->max_gids[i] = 1 << (field & 0xf);
310 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
311 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
312 }
313 } else {
314#define QUERY_PORT_MTU_OFFSET 0x01
315#define QUERY_PORT_WIDTH_OFFSET 0x06
316#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
317#define QUERY_PORT_MAX_VL_OFFSET 0x0b
318
319 for (i = 1; i <= dev_cap->num_ports; ++i) {
320 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
321 MLX4_CMD_TIME_CLASS_B);
322 if (err)
323 goto out;
324
325 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
326 dev_cap->max_mtu[i] = field & 0xf;
327 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
328 dev_cap->max_port_width[i] = field & 0xf;
329 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
330 dev_cap->max_gids[i] = 1 << (field >> 4);
331 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
332 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
333 dev_cap->max_vl[i] = field & 0xf;
334 }
335 }
336
307 if (dev_cap->bmme_flags & 1) 337 if (dev_cap->bmme_flags & 1)
308 mlx4_dbg(dev, "Base MM extensions: yes " 338 mlx4_dbg(dev, "Base MM extensions: yes "
309 "(flags %d, rsvd L_Key %08x)\n", 339 "(flags %d, rsvd L_Key %08x)\n",
@@ -338,8 +368,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
338 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 368 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
339 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 369 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
340 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 370 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
341 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu, 371 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
342 dev_cap->max_port_width); 372 dev_cap->max_port_width[1]);
343 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 373 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
344 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 374 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
345 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 375 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
@@ -491,7 +521,8 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
491 ((fw_ver & 0x0000ffffull) << 16); 521 ((fw_ver & 0x0000ffffull) << 16);
492 522
493 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 523 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
494 if (cmd_if_rev != MLX4_COMMAND_INTERFACE_REV) { 524 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
525 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
495 mlx4_err(dev, "Installed FW has unsupported " 526 mlx4_err(dev, "Installed FW has unsupported "
496 "command interface revision %d.\n", 527 "command interface revision %d.\n",
497 cmd_if_rev); 528 cmd_if_rev);
@@ -499,12 +530,15 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
499 (int) (dev->caps.fw_ver >> 32), 530 (int) (dev->caps.fw_ver >> 32),
500 (int) (dev->caps.fw_ver >> 16) & 0xffff, 531 (int) (dev->caps.fw_ver >> 16) & 0xffff,
501 (int) dev->caps.fw_ver & 0xffff); 532 (int) dev->caps.fw_ver & 0xffff);
502 mlx4_err(dev, "This driver version supports only revision %d.\n", 533 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
503 MLX4_COMMAND_INTERFACE_REV); 534 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
504 err = -ENODEV; 535 err = -ENODEV;
505 goto out; 536 goto out;
506 } 537 }
507 538
539 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
540 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
541
508 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 542 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
509 cmd->max_cmds = 1 << lg; 543 cmd->max_cmds = 1 << lg;
510 544
@@ -708,13 +742,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
708 return err; 742 return err;
709} 743}
710 744
711int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port) 745int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
712{ 746{
713 struct mlx4_cmd_mailbox *mailbox; 747 struct mlx4_cmd_mailbox *mailbox;
714 u32 *inbox; 748 u32 *inbox;
715 int err; 749 int err;
716 u32 flags; 750 u32 flags;
751 u16 field;
717 752
753 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
718#define INIT_PORT_IN_SIZE 256 754#define INIT_PORT_IN_SIZE 256
719#define INIT_PORT_FLAGS_OFFSET 0x00 755#define INIT_PORT_FLAGS_OFFSET 0x00
720#define INIT_PORT_FLAG_SIG (1 << 18) 756#define INIT_PORT_FLAG_SIG (1 << 18)
@@ -729,32 +765,32 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int
729#define INIT_PORT_NODE_GUID_OFFSET 0x18 765#define INIT_PORT_NODE_GUID_OFFSET 0x18
730#define INIT_PORT_SI_GUID_OFFSET 0x20 766#define INIT_PORT_SI_GUID_OFFSET 0x20
731 767
732 mailbox = mlx4_alloc_cmd_mailbox(dev); 768 mailbox = mlx4_alloc_cmd_mailbox(dev);
733 if (IS_ERR(mailbox)) 769 if (IS_ERR(mailbox))
734 return PTR_ERR(mailbox); 770 return PTR_ERR(mailbox);
735 inbox = mailbox->buf; 771 inbox = mailbox->buf;
736 772
737 memset(inbox, 0, INIT_PORT_IN_SIZE); 773 memset(inbox, 0, INIT_PORT_IN_SIZE);
738 774
739 flags = 0; 775 flags = 0;
740 flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0; 776 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
741 flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0; 777 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
742 flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0; 778 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
743 flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT;
744 flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
745 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
746 779
747 MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET); 780 field = 128 << dev->caps.mtu_cap[port];
748 MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET); 781 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
749 MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET); 782 field = dev->caps.gid_table_len[port];
750 MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET); 783 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
751 MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET); 784 field = dev->caps.pkey_table_len[port];
752 MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET); 785 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
753 786
754 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 787 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
755 MLX4_CMD_TIME_CLASS_A); 788 MLX4_CMD_TIME_CLASS_A);
756 789
757 mlx4_free_cmd_mailbox(dev, mailbox); 790 mlx4_free_cmd_mailbox(dev, mailbox);
791 } else
792 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
793 MLX4_CMD_TIME_CLASS_A);
758 794
759 return err; 795 return err;
760} 796}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 2616fa53d4d0..296254ac27c1 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -59,13 +59,13 @@ struct mlx4_dev_cap {
59 int max_responder_per_qp; 59 int max_responder_per_qp;
60 int max_rdma_global; 60 int max_rdma_global;
61 int local_ca_ack_delay; 61 int local_ca_ack_delay;
62 int max_mtu;
63 int max_port_width;
64 int max_vl;
65 int num_ports; 62 int num_ports;
66 int max_gids; 63 int max_mtu[MLX4_MAX_PORTS + 1];
64 int max_port_width[MLX4_MAX_PORTS + 1];
65 int max_vl[MLX4_MAX_PORTS + 1];
66 int max_gids[MLX4_MAX_PORTS + 1];
67 int max_pkeys[MLX4_MAX_PORTS + 1];
67 u16 stat_rate_support; 68 u16 stat_rate_support;
68 int max_pkeys;
69 u32 flags; 69 u32 flags;
70 int reserved_uars; 70 int reserved_uars;
71 int uar_size; 71 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d4172937025b..41eafebf5823 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -88,6 +88,7 @@ static struct mlx4_profile default_profile = {
88static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 88static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 89{
90 int err; 90 int err;
91 int i;
91 92
92 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 93 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
93 if (err) { 94 if (err) {
@@ -117,11 +118,15 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev
117 } 118 }
118 119
119 dev->caps.num_ports = dev_cap->num_ports; 120 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
127 }
128
120 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
121 dev->caps.vl_cap = dev_cap->max_vl;
122 dev->caps.mtu_cap = dev_cap->max_mtu;
123 dev->caps.gid_table_len = dev_cap->max_gids;
124 dev->caps.pkey_table_len = dev_cap->max_pkeys;
125 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 130 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
126 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 131 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
127 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 132 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
@@ -148,7 +153,6 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev
148 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 153 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
149 dev->caps.reserved_uars = dev_cap->reserved_uars; 154 dev->caps.reserved_uars = dev_cap->reserved_uars;
150 dev->caps.reserved_pds = dev_cap->reserved_pds; 155 dev->caps.reserved_pds = dev_cap->reserved_pds;
151 dev->caps.port_width_cap = dev_cap->max_port_width;
152 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 156 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
153 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 157 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
154 dev->caps.flags = dev_cap->flags; 158 dev->caps.flags = dev_cap->flags;