aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 20:20:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 20:20:40 -0400
commit7a9a2970b5c1c2ce73d4bb84edaa7ebf13e0c841 (patch)
treebd4909abfcd759b376cfd2fab06281df366f6a0f /drivers/net
parentfc47912d9cda50ae6bd9ca30e97e8c03de5b7b60 (diff)
parentd172f5a4ab151a952a0d898ba3b0ff6a020171a6 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "First batch of InfiniBand/RDMA changes for the 3.7 merge window: - mlx4 IB support for SR-IOV - A couple of SRP initiator fixes - Batch of nes hardware driver fixes - Fix for long-standing use-after-free crash in IPoIB - Other miscellaneous fixes" This merge also removes a new use of __cancel_delayed_work(), and replaces it with the regular cancel_delayed_work() that is now irq-safe thanks to the workqueue updates. That said, I suspect the sequence in question should probably use "mod_delayed_work()". I just did the minimal "don't use deprecated functions" fixup, though. * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (45 commits) IB/qib: Fix local access validation for user MRs mlx4_core: Disable SENSE_PORT for multifunction devices mlx4_core: Clean up enabling of SENSE_PORT for older (ConnectX-1/-2) HCAs mlx4_core: Stash PCI ID driver_data in mlx4_priv structure IB/srp: Avoid having aborted requests hang IB/srp: Fix use-after-free in srp_reset_req() IB/qib: Add a qib driver version RDMA/nes: Fix compilation error when nes_debug is enabled RDMA/nes: Print hardware resource type RDMA/nes: Fix for crash when TX checksum offload is off RDMA/nes: Cosmetic changes RDMA/nes: Fix for incorrect MSS when TSO is on RDMA/nes: Fix incorrect resolving of the loopback MAC address mlx4_core: Fix crash on uninitialized priv->cmd.slave_sem mlx4_core: Trivial cleanups to driver log messages mlx4_core: Trivial readability fix: "0X30" -> "0x30" IB/mlx4: Create paravirt contexts for VFs when master IB driver initializes mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations mlx4: Paravirtualize Node Guids for slaves mlx4: Activate SR-IOV mode for IB ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c246
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c222
9 files changed, 1113 insertions, 193 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c8fef4353021..3d1899ff1076 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -40,6 +40,7 @@
40 40
41#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
42#include <linux/semaphore.h> 42#include <linux/semaphore.h>
43#include <rdma/ib_smi.h>
43 44
44#include <asm/io.h> 45#include <asm/io.h>
45 46
@@ -394,7 +395,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
394 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; 395 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
395 int ret; 396 int ret;
396 397
397 down(&priv->cmd.slave_sem); 398 mutex_lock(&priv->cmd.slave_cmd_mutex);
399
398 vhcr->in_param = cpu_to_be64(in_param); 400 vhcr->in_param = cpu_to_be64(in_param);
399 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; 401 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
400 vhcr->in_modifier = cpu_to_be32(in_modifier); 402 vhcr->in_modifier = cpu_to_be32(in_modifier);
@@ -402,6 +404,7 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
402 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); 404 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
403 vhcr->status = 0; 405 vhcr->status = 0;
404 vhcr->flags = !!(priv->cmd.use_events) << 6; 406 vhcr->flags = !!(priv->cmd.use_events) << 6;
407
405 if (mlx4_is_master(dev)) { 408 if (mlx4_is_master(dev)) {
406 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); 409 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
407 if (!ret) { 410 if (!ret) {
@@ -438,7 +441,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
438 mlx4_err(dev, "failed execution of VHCR_POST command" 441 mlx4_err(dev, "failed execution of VHCR_POST command"
439 "opcode 0x%x\n", op); 442 "opcode 0x%x\n", op);
440 } 443 }
441 up(&priv->cmd.slave_sem); 444
445 mutex_unlock(&priv->cmd.slave_cmd_mutex);
442 return ret; 446 return ret;
443} 447}
444 448
@@ -627,6 +631,162 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
627 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 631 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
628} 632}
629 633
634static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
635 struct mlx4_cmd_mailbox *inbox,
636 struct mlx4_cmd_mailbox *outbox)
637{
638 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
639 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
640 int err;
641 int i;
642
643 if (index & 0x1f)
644 return -EINVAL;
645
646 in_mad->attr_mod = cpu_to_be32(index / 32);
647
648 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
649 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
650 MLX4_CMD_NATIVE);
651 if (err)
652 return err;
653
654 for (i = 0; i < 32; ++i)
655 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
656
657 return err;
658}
659
660static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
661 struct mlx4_cmd_mailbox *inbox,
662 struct mlx4_cmd_mailbox *outbox)
663{
664 int i;
665 int err;
666
667 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
668 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
669 if (err)
670 return err;
671 }
672
673 return 0;
674}
675#define PORT_CAPABILITY_LOCATION_IN_SMP 20
676#define PORT_STATE_OFFSET 32
677
678static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
679{
680 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
681 return IB_PORT_ACTIVE;
682 else
683 return IB_PORT_DOWN;
684}
685
686static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
687 struct mlx4_vhcr *vhcr,
688 struct mlx4_cmd_mailbox *inbox,
689 struct mlx4_cmd_mailbox *outbox,
690 struct mlx4_cmd_info *cmd)
691{
692 struct ib_smp *smp = inbox->buf;
693 u32 index;
694 u8 port;
695 u16 *table;
696 int err;
697 int vidx, pidx;
698 struct mlx4_priv *priv = mlx4_priv(dev);
699 struct ib_smp *outsmp = outbox->buf;
700 __be16 *outtab = (__be16 *)(outsmp->data);
701 __be32 slave_cap_mask;
702 __be64 slave_node_guid;
703 port = vhcr->in_modifier;
704
705 if (smp->base_version == 1 &&
706 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
707 smp->class_version == 1) {
708 if (smp->method == IB_MGMT_METHOD_GET) {
709 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
710 index = be32_to_cpu(smp->attr_mod);
711 if (port < 1 || port > dev->caps.num_ports)
712 return -EINVAL;
713 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
714 if (!table)
715 return -ENOMEM;
716 /* need to get the full pkey table because the paravirtualized
717 * pkeys may be scattered among several pkey blocks.
718 */
719 err = get_full_pkey_table(dev, port, table, inbox, outbox);
720 if (!err) {
721 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
722 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
723 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
724 }
725 }
726 kfree(table);
727 return err;
728 }
729 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
730 /*get the slave specific caps:*/
731 /*do the command */
732 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
733 vhcr->in_modifier, vhcr->op_modifier,
734 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
735 /* modify the response for slaves */
736 if (!err && slave != mlx4_master_func_num(dev)) {
737 u8 *state = outsmp->data + PORT_STATE_OFFSET;
738
739 *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
740 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
741 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
742 }
743 return err;
744 }
745 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
746 /* compute slave's gid block */
747 smp->attr_mod = cpu_to_be32(slave / 8);
748 /* execute cmd */
749 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
750 vhcr->in_modifier, vhcr->op_modifier,
751 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
752 if (!err) {
753 /* if needed, move slave gid to index 0 */
754 if (slave % 8)
755 memcpy(outsmp->data,
756 outsmp->data + (slave % 8) * 8, 8);
757 /* delete all other gids */
758 memset(outsmp->data + 8, 0, 56);
759 }
760 return err;
761 }
762 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
763 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
764 vhcr->in_modifier, vhcr->op_modifier,
765 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
766 if (!err) {
767 slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
768 memcpy(outsmp->data + 12, &slave_node_guid, 8);
769 }
770 return err;
771 }
772 }
773 }
774 if (slave != mlx4_master_func_num(dev) &&
775 ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
776 (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
777 smp->method == IB_MGMT_METHOD_SET))) {
778 mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
779 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
780 slave, smp->method, smp->mgmt_class,
781 be16_to_cpu(smp->attr_id));
782 return -EPERM;
783 }
784 /*default:*/
785 return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
786 vhcr->in_modifier, vhcr->op_modifier,
787 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
788}
789
630int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, 790int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
631 struct mlx4_vhcr *vhcr, 791 struct mlx4_vhcr *vhcr,
632 struct mlx4_cmd_mailbox *inbox, 792 struct mlx4_cmd_mailbox *inbox,
@@ -950,7 +1110,7 @@ static struct mlx4_cmd_info cmd_info[] = {
950 .out_is_imm = false, 1110 .out_is_imm = false,
951 .encode_slave_id = false, 1111 .encode_slave_id = false,
952 .verify = NULL, 1112 .verify = NULL,
953 .wrapper = mlx4_GEN_QP_wrapper 1113 .wrapper = mlx4_INIT2INIT_QP_wrapper
954 }, 1114 },
955 { 1115 {
956 .opcode = MLX4_CMD_INIT2RTR_QP, 1116 .opcode = MLX4_CMD_INIT2RTR_QP,
@@ -968,7 +1128,7 @@ static struct mlx4_cmd_info cmd_info[] = {
968 .out_is_imm = false, 1128 .out_is_imm = false,
969 .encode_slave_id = false, 1129 .encode_slave_id = false,
970 .verify = NULL, 1130 .verify = NULL,
971 .wrapper = mlx4_GEN_QP_wrapper 1131 .wrapper = mlx4_RTR2RTS_QP_wrapper
972 }, 1132 },
973 { 1133 {
974 .opcode = MLX4_CMD_RTS2RTS_QP, 1134 .opcode = MLX4_CMD_RTS2RTS_QP,
@@ -977,7 +1137,7 @@ static struct mlx4_cmd_info cmd_info[] = {
977 .out_is_imm = false, 1137 .out_is_imm = false,
978 .encode_slave_id = false, 1138 .encode_slave_id = false,
979 .verify = NULL, 1139 .verify = NULL,
980 .wrapper = mlx4_GEN_QP_wrapper 1140 .wrapper = mlx4_RTS2RTS_QP_wrapper
981 }, 1141 },
982 { 1142 {
983 .opcode = MLX4_CMD_SQERR2RTS_QP, 1143 .opcode = MLX4_CMD_SQERR2RTS_QP,
@@ -986,7 +1146,7 @@ static struct mlx4_cmd_info cmd_info[] = {
986 .out_is_imm = false, 1146 .out_is_imm = false,
987 .encode_slave_id = false, 1147 .encode_slave_id = false,
988 .verify = NULL, 1148 .verify = NULL,
989 .wrapper = mlx4_GEN_QP_wrapper 1149 .wrapper = mlx4_SQERR2RTS_QP_wrapper
990 }, 1150 },
991 { 1151 {
992 .opcode = MLX4_CMD_2ERR_QP, 1152 .opcode = MLX4_CMD_2ERR_QP,
@@ -1013,7 +1173,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1013 .out_is_imm = false, 1173 .out_is_imm = false,
1014 .encode_slave_id = false, 1174 .encode_slave_id = false,
1015 .verify = NULL, 1175 .verify = NULL,
1016 .wrapper = mlx4_GEN_QP_wrapper 1176 .wrapper = mlx4_SQD2SQD_QP_wrapper
1017 }, 1177 },
1018 { 1178 {
1019 .opcode = MLX4_CMD_SQD2RTS_QP, 1179 .opcode = MLX4_CMD_SQD2RTS_QP,
@@ -1022,7 +1182,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1022 .out_is_imm = false, 1182 .out_is_imm = false,
1023 .encode_slave_id = false, 1183 .encode_slave_id = false,
1024 .verify = NULL, 1184 .verify = NULL,
1025 .wrapper = mlx4_GEN_QP_wrapper 1185 .wrapper = mlx4_SQD2RTS_QP_wrapper
1026 }, 1186 },
1027 { 1187 {
1028 .opcode = MLX4_CMD_2RST_QP, 1188 .opcode = MLX4_CMD_2RST_QP,
@@ -1061,6 +1221,24 @@ static struct mlx4_cmd_info cmd_info[] = {
1061 .wrapper = mlx4_GEN_QP_wrapper 1221 .wrapper = mlx4_GEN_QP_wrapper
1062 }, 1222 },
1063 { 1223 {
1224 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1225 .has_inbox = false,
1226 .has_outbox = false,
1227 .out_is_imm = false,
1228 .encode_slave_id = false,
1229 .verify = NULL, /* XXX verify: only demux can do this */
1230 .wrapper = NULL
1231 },
1232 {
1233 .opcode = MLX4_CMD_MAD_IFC,
1234 .has_inbox = true,
1235 .has_outbox = true,
1236 .out_is_imm = false,
1237 .encode_slave_id = false,
1238 .verify = NULL,
1239 .wrapper = mlx4_MAD_IFC_wrapper
1240 },
1241 {
1064 .opcode = MLX4_CMD_QUERY_IF_STAT, 1242 .opcode = MLX4_CMD_QUERY_IF_STAT,
1065 .has_inbox = false, 1243 .has_inbox = false,
1066 .has_outbox = true, 1244 .has_outbox = true,
@@ -1340,6 +1518,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1340 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) 1518 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1341 goto inform_slave_state; 1519 goto inform_slave_state;
1342 1520
1521 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1522
1343 /* write the version in the event field */ 1523 /* write the version in the event field */
1344 reply |= mlx4_comm_get_version(); 1524 reply |= mlx4_comm_get_version();
1345 1525
@@ -1376,19 +1556,21 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1376 goto reset_slave; 1556 goto reset_slave;
1377 slave_state[slave].vhcr_dma |= param; 1557 slave_state[slave].vhcr_dma |= param;
1378 slave_state[slave].active = true; 1558 slave_state[slave].active = true;
1559 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1379 break; 1560 break;
1380 case MLX4_COMM_CMD_VHCR_POST: 1561 case MLX4_COMM_CMD_VHCR_POST:
1381 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && 1562 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1382 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) 1563 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1383 goto reset_slave; 1564 goto reset_slave;
1384 down(&priv->cmd.slave_sem); 1565
1566 mutex_lock(&priv->cmd.slave_cmd_mutex);
1385 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1567 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1386 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1568 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1387 " resetting slave.\n", slave); 1569 " resetting slave.\n", slave);
1388 up(&priv->cmd.slave_sem); 1570 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1389 goto reset_slave; 1571 goto reset_slave;
1390 } 1572 }
1391 up(&priv->cmd.slave_sem); 1573 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1392 break; 1574 break;
1393 default: 1575 default:
1394 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); 1576 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
@@ -1529,14 +1711,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1529 struct mlx4_slave_state *s_state; 1711 struct mlx4_slave_state *s_state;
1530 int i, j, err, port; 1712 int i, j, err, port;
1531 1713
1532 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1533 &priv->mfunc.vhcr_dma,
1534 GFP_KERNEL);
1535 if (!priv->mfunc.vhcr) {
1536 mlx4_err(dev, "Couldn't allocate vhcr.\n");
1537 return -ENOMEM;
1538 }
1539
1540 if (mlx4_is_master(dev)) 1714 if (mlx4_is_master(dev))
1541 priv->mfunc.comm = 1715 priv->mfunc.comm =
1542 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + 1716 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
@@ -1590,6 +1764,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1590 INIT_WORK(&priv->mfunc.master.slave_flr_event_work, 1764 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1591 mlx4_master_handle_slave_flr); 1765 mlx4_master_handle_slave_flr);
1592 spin_lock_init(&priv->mfunc.master.slave_state_lock); 1766 spin_lock_init(&priv->mfunc.master.slave_state_lock);
1767 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
1593 priv->mfunc.master.comm_wq = 1768 priv->mfunc.master.comm_wq =
1594 create_singlethread_workqueue("mlx4_comm"); 1769 create_singlethread_workqueue("mlx4_comm");
1595 if (!priv->mfunc.master.comm_wq) 1770 if (!priv->mfunc.master.comm_wq)
@@ -1598,7 +1773,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1598 if (mlx4_init_resource_tracker(dev)) 1773 if (mlx4_init_resource_tracker(dev))
1599 goto err_thread; 1774 goto err_thread;
1600 1775
1601 sema_init(&priv->cmd.slave_sem, 1);
1602 err = mlx4_ARM_COMM_CHANNEL(dev); 1776 err = mlx4_ARM_COMM_CHANNEL(dev);
1603 if (err) { 1777 if (err) {
1604 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 1778 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
@@ -1612,8 +1786,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1612 mlx4_err(dev, "Couldn't sync toggles\n"); 1786 mlx4_err(dev, "Couldn't sync toggles\n");
1613 goto err_comm; 1787 goto err_comm;
1614 } 1788 }
1615
1616 sema_init(&priv->cmd.slave_sem, 1);
1617 } 1789 }
1618 return 0; 1790 return 0;
1619 1791
@@ -1643,6 +1815,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
1643 struct mlx4_priv *priv = mlx4_priv(dev); 1815 struct mlx4_priv *priv = mlx4_priv(dev);
1644 1816
1645 mutex_init(&priv->cmd.hcr_mutex); 1817 mutex_init(&priv->cmd.hcr_mutex);
1818 mutex_init(&priv->cmd.slave_cmd_mutex);
1646 sema_init(&priv->cmd.poll_sem, 1); 1819 sema_init(&priv->cmd.poll_sem, 1);
1647 priv->cmd.use_events = 0; 1820 priv->cmd.use_events = 0;
1648 priv->cmd.toggle = 1; 1821 priv->cmd.toggle = 1;
@@ -1659,14 +1832,30 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
1659 } 1832 }
1660 } 1833 }
1661 1834
1835 if (mlx4_is_mfunc(dev)) {
1836 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1837 &priv->mfunc.vhcr_dma,
1838 GFP_KERNEL);
1839 if (!priv->mfunc.vhcr) {
1840 mlx4_err(dev, "Couldn't allocate VHCR.\n");
1841 goto err_hcr;
1842 }
1843 }
1844
1662 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 1845 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
1663 MLX4_MAILBOX_SIZE, 1846 MLX4_MAILBOX_SIZE,
1664 MLX4_MAILBOX_SIZE, 0); 1847 MLX4_MAILBOX_SIZE, 0);
1665 if (!priv->cmd.pool) 1848 if (!priv->cmd.pool)
1666 goto err_hcr; 1849 goto err_vhcr;
1667 1850
1668 return 0; 1851 return 0;
1669 1852
1853err_vhcr:
1854 if (mlx4_is_mfunc(dev))
1855 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1856 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1857 priv->mfunc.vhcr = NULL;
1858
1670err_hcr: 1859err_hcr:
1671 if (!mlx4_is_slave(dev)) 1860 if (!mlx4_is_slave(dev))
1672 iounmap(priv->cmd.hcr); 1861 iounmap(priv->cmd.hcr);
@@ -1689,9 +1878,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1689 } 1878 }
1690 1879
1691 iounmap(priv->mfunc.comm); 1880 iounmap(priv->mfunc.comm);
1692 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1693 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1694 priv->mfunc.vhcr = NULL;
1695} 1881}
1696 1882
1697void mlx4_cmd_cleanup(struct mlx4_dev *dev) 1883void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -1702,6 +1888,10 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1702 1888
1703 if (!mlx4_is_slave(dev)) 1889 if (!mlx4_is_slave(dev))
1704 iounmap(priv->cmd.hcr); 1890 iounmap(priv->cmd.hcr);
1891 if (mlx4_is_mfunc(dev))
1892 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1893 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1894 priv->mfunc.vhcr = NULL;
1705} 1895}
1706 1896
1707/* 1897/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 99a04648fab0..51c764901ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -164,13 +164,16 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
164{ 164{
165 struct mlx4_priv *priv = mlx4_priv(dev); 165 struct mlx4_priv *priv = mlx4_priv(dev);
166 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; 166 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
167 struct mlx4_eqe *s_eqe = 167 struct mlx4_eqe *s_eqe;
168 &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 168 unsigned long flags;
169 169
170 spin_lock_irqsave(&slave_eq->event_lock, flags);
171 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
170 if ((!!(s_eqe->owner & 0x80)) ^ 172 if ((!!(s_eqe->owner & 0x80)) ^
171 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 173 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
172 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 174 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
173 "No free EQE on slave events queue\n", slave); 175 "No free EQE on slave events queue\n", slave);
176 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
174 return; 177 return;
175 } 178 }
176 179
@@ -183,6 +186,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
183 186
184 queue_work(priv->mfunc.master.comm_wq, 187 queue_work(priv->mfunc.master.comm_wq,
185 &priv->mfunc.master.slave_event_work); 188 &priv->mfunc.master.slave_event_work);
189 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
186} 190}
187 191
188static void mlx4_slave_event(struct mlx4_dev *dev, int slave, 192static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
@@ -200,6 +204,196 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
200 slave_event(dev, slave, eqe); 204 slave_event(dev, slave, eqe);
201} 205}
202 206
207int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
208{
209 struct mlx4_eqe eqe;
210
211 struct mlx4_priv *priv = mlx4_priv(dev);
212 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
213
214 if (!s_slave->active)
215 return 0;
216
217 memset(&eqe, 0, sizeof eqe);
218
219 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
220 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
221 eqe.event.port_mgmt_change.port = port;
222
223 return mlx4_GEN_EQE(dev, slave, &eqe);
224}
225EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
226
227int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
228{
229 struct mlx4_eqe eqe;
230
231 /*don't send if we don't have the that slave */
232 if (dev->num_vfs < slave)
233 return 0;
234 memset(&eqe, 0, sizeof eqe);
235
236 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
237 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
238 eqe.event.port_mgmt_change.port = port;
239
240 return mlx4_GEN_EQE(dev, slave, &eqe);
241}
242EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
243
244int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
245 u8 port_subtype_change)
246{
247 struct mlx4_eqe eqe;
248
249 /*don't send if we don't have the that slave */
250 if (dev->num_vfs < slave)
251 return 0;
252 memset(&eqe, 0, sizeof eqe);
253
254 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
255 eqe.subtype = port_subtype_change;
256 eqe.event.port_change.port = cpu_to_be32(port << 28);
257
258 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
259 port_subtype_change, slave, port);
260 return mlx4_GEN_EQE(dev, slave, &eqe);
261}
262EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
263
264enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
265{
266 struct mlx4_priv *priv = mlx4_priv(dev);
267 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
268 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
269 pr_err("%s: Error: asking for slave:%d, port:%d\n",
270 __func__, slave, port);
271 return SLAVE_PORT_DOWN;
272 }
273 return s_state[slave].port_state[port];
274}
275EXPORT_SYMBOL(mlx4_get_slave_port_state);
276
277static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
278 enum slave_port_state state)
279{
280 struct mlx4_priv *priv = mlx4_priv(dev);
281 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
282
283 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
284 pr_err("%s: Error: asking for slave:%d, port:%d\n",
285 __func__, slave, port);
286 return -1;
287 }
288 s_state[slave].port_state[port] = state;
289
290 return 0;
291}
292
293static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
294{
295 int i;
296 enum slave_port_gen_event gen_event;
297
298 for (i = 0; i < dev->num_slaves; i++)
299 set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
300}
301/**************************************************************************
302 The function get as input the new event to that port,
303 and according to the prev state change the slave's port state.
304 The events are:
305 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
306 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
307 MLX4_PORT_STATE_IB_EVENT_GID_VALID
308 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
309***************************************************************************/
310int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
311 u8 port, int event,
312 enum slave_port_gen_event *gen_event)
313{
314 struct mlx4_priv *priv = mlx4_priv(dev);
315 struct mlx4_slave_state *ctx = NULL;
316 unsigned long flags;
317 int ret = -1;
318 enum slave_port_state cur_state =
319 mlx4_get_slave_port_state(dev, slave, port);
320
321 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
322
323 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
324 pr_err("%s: Error: asking for slave:%d, port:%d\n",
325 __func__, slave, port);
326 return ret;
327 }
328
329 ctx = &priv->mfunc.master.slave_state[slave];
330 spin_lock_irqsave(&ctx->lock, flags);
331
332 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
333 __func__, slave, cur_state, event);
334
335 switch (cur_state) {
336 case SLAVE_PORT_DOWN:
337 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
338 mlx4_set_slave_port_state(dev, slave, port,
339 SLAVE_PENDING_UP);
340 break;
341 case SLAVE_PENDING_UP:
342 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
343 mlx4_set_slave_port_state(dev, slave, port,
344 SLAVE_PORT_DOWN);
345 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
346 mlx4_set_slave_port_state(dev, slave, port,
347 SLAVE_PORT_UP);
348 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
349 }
350 break;
351 case SLAVE_PORT_UP:
352 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
353 mlx4_set_slave_port_state(dev, slave, port,
354 SLAVE_PORT_DOWN);
355 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
356 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
357 event) {
358 mlx4_set_slave_port_state(dev, slave, port,
359 SLAVE_PENDING_UP);
360 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
361 }
362 break;
363 default:
364 pr_err("%s: BUG!!! UNKNOWN state: "
365 "slave:%d, port:%d\n", __func__, slave, port);
366 goto out;
367 }
368 ret = mlx4_get_slave_port_state(dev, slave, port);
369 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
370 " :%d gen_event: %d\n",
371 __func__, slave, cur_state, event, *gen_event);
372
373out:
374 spin_unlock_irqrestore(&ctx->lock, flags);
375 return ret;
376}
377
378EXPORT_SYMBOL(set_and_calc_slave_port_state);
379
380int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
381{
382 struct mlx4_eqe eqe;
383
384 memset(&eqe, 0, sizeof eqe);
385
386 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
387 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
388 eqe.event.port_mgmt_change.port = port;
389 eqe.event.port_mgmt_change.params.port_info.changed_attr =
390 cpu_to_be32((u32) attr);
391
392 slave_event(dev, ALL_SLAVES, &eqe);
393 return 0;
394}
395EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
396
203void mlx4_master_handle_slave_flr(struct work_struct *work) 397void mlx4_master_handle_slave_flr(struct work_struct *work)
204{ 398{
205 struct mlx4_mfunc_master_ctx *master = 399 struct mlx4_mfunc_master_ctx *master =
@@ -251,6 +445,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
251 u32 flr_slave; 445 u32 flr_slave;
252 u8 update_slave_state; 446 u8 update_slave_state;
253 int i; 447 int i;
448 enum slave_port_gen_event gen_event;
254 449
255 while ((eqe = next_eqe_sw(eq))) { 450 while ((eqe = next_eqe_sw(eq))) {
256 /* 451 /*
@@ -347,35 +542,49 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
347 case MLX4_EVENT_TYPE_PORT_CHANGE: 542 case MLX4_EVENT_TYPE_PORT_CHANGE:
348 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 543 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
349 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 544 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
350 mlx4_dispatch_event(dev, 545 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
351 MLX4_DEV_EVENT_PORT_DOWN,
352 port); 546 port);
353 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 547 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
354 if (mlx4_is_master(dev)) 548 if (!mlx4_is_master(dev))
355 /*change the state of all slave's port 549 break;
356 * to down:*/ 550 for (i = 0; i < dev->num_slaves; i++) {
357 for (i = 0; i < dev->num_slaves; i++) { 551 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
358 mlx4_dbg(dev, "%s: Sending " 552 if (i == mlx4_master_func_num(dev))
359 "MLX4_PORT_CHANGE_SUBTYPE_DOWN" 553 continue;
554 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
360 " to slave: %d, port:%d\n", 555 " to slave: %d, port:%d\n",
361 __func__, i, port); 556 __func__, i, port);
362 if (i == dev->caps.function)
363 continue;
364 mlx4_slave_event(dev, i, eqe); 557 mlx4_slave_event(dev, i, eqe);
558 } else { /* IB port */
559 set_and_calc_slave_port_state(dev, i, port,
560 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
561 &gen_event);
562 /*we can be in pending state, then do not send port_down event*/
563 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
564 if (i == mlx4_master_func_num(dev))
565 continue;
566 mlx4_slave_event(dev, i, eqe);
567 }
365 } 568 }
569 }
366 } else { 570 } else {
367 mlx4_dispatch_event(dev, 571 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
368 MLX4_DEV_EVENT_PORT_UP, 572
369 port);
370 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 573 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
371 574
372 if (mlx4_is_master(dev)) { 575 if (!mlx4_is_master(dev))
576 break;
577 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
373 for (i = 0; i < dev->num_slaves; i++) { 578 for (i = 0; i < dev->num_slaves; i++) {
374 if (i == dev->caps.function) 579 if (i == mlx4_master_func_num(dev))
375 continue; 580 continue;
376 mlx4_slave_event(dev, i, eqe); 581 mlx4_slave_event(dev, i, eqe);
377 } 582 }
378 } 583 else /* IB port */
584 /* port-up event will be sent to a slave when the
585 * slave's alias-guid is set. This is done in alias_GUID.c
586 */
587 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
379 } 588 }
380 break; 589 break;
381 590
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c69648487321..4f30b99324cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -183,7 +183,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
183#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 183#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
184#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 184#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
185#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 185#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
186#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 186#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
187 187
188#define QUERY_FUNC_CAP_FMR_FLAG 0x80 188#define QUERY_FUNC_CAP_FMR_FLAG 0x80
189#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 189#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
@@ -194,21 +194,39 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
194#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 194#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
195#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc 195#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
196 196
197#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
198#define QUERY_FUNC_CAP_QP0_PROXY 0x14
199#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
200#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
201
197#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 202#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
198#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 203#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
199 204
200#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 205#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
201 206
202 if (vhcr->op_modifier == 1) { 207 if (vhcr->op_modifier == 1) {
203 field = vhcr->in_modifier;
204 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
205
206 field = 0; 208 field = 0;
207 /* ensure force vlan and force mac bits are not set */ 209 /* ensure force vlan and force mac bits are not set */
208 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); 210 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
209 /* ensure that phy_wqe_gid bit is not set */ 211 /* ensure that phy_wqe_gid bit is not set */
210 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); 212 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
211 213
214 field = vhcr->in_modifier; /* phys-port = logical-port */
215 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
216
217 /* size is now the QP number */
218 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
219 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
220
221 size += 2;
222 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
223
224 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
225 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
226
227 size += 2;
228 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
229
212 } else if (vhcr->op_modifier == 0) { 230 } else if (vhcr->op_modifier == 0) {
213 /* enable rdma and ethernet interfaces */ 231 /* enable rdma and ethernet interfaces */
214 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); 232 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
@@ -253,99 +271,118 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
253 return err; 271 return err;
254} 272}
255 273
256int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) 274int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
275 struct mlx4_func_cap *func_cap)
257{ 276{
258 struct mlx4_cmd_mailbox *mailbox; 277 struct mlx4_cmd_mailbox *mailbox;
259 u32 *outbox; 278 u32 *outbox;
260 u8 field; 279 u8 field, op_modifier;
261 u32 size; 280 u32 size;
262 int i;
263 int err = 0; 281 int err = 0;
264 282
283 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
265 284
266 mailbox = mlx4_alloc_cmd_mailbox(dev); 285 mailbox = mlx4_alloc_cmd_mailbox(dev);
267 if (IS_ERR(mailbox)) 286 if (IS_ERR(mailbox))
268 return PTR_ERR(mailbox); 287 return PTR_ERR(mailbox);
269 288
270 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP, 289 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
290 MLX4_CMD_QUERY_FUNC_CAP,
271 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 291 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
272 if (err) 292 if (err)
273 goto out; 293 goto out;
274 294
275 outbox = mailbox->buf; 295 outbox = mailbox->buf;
276 296
277 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 297 if (!op_modifier) {
278 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 298 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
279 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 299 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
280 err = -EPROTONOSUPPORT; 300 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
281 goto out; 301 err = -EPROTONOSUPPORT;
282 } 302 goto out;
283 func_cap->flags = field; 303 }
304 func_cap->flags = field;
305
306 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
307 func_cap->num_ports = field;
284 308
285 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 309 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
286 func_cap->num_ports = field; 310 func_cap->pf_context_behaviour = size;
287 311
288 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 312 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
289 func_cap->pf_context_behaviour = size; 313 func_cap->qp_quota = size & 0xFFFFFF;
290 314
291 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 315 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
292 func_cap->qp_quota = size & 0xFFFFFF; 316 func_cap->srq_quota = size & 0xFFFFFF;
293 317
294 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 318 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
295 func_cap->srq_quota = size & 0xFFFFFF; 319 func_cap->cq_quota = size & 0xFFFFFF;
296 320
297 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 321 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
298 func_cap->cq_quota = size & 0xFFFFFF; 322 func_cap->max_eq = size & 0xFFFFFF;
299 323
300 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 324 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
301 func_cap->max_eq = size & 0xFFFFFF; 325 func_cap->reserved_eq = size & 0xFFFFFF;
302 326
303 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 327 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
304 func_cap->reserved_eq = size & 0xFFFFFF; 328 func_cap->mpt_quota = size & 0xFFFFFF;
305 329
306 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 330 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
307 func_cap->mpt_quota = size & 0xFFFFFF; 331 func_cap->mtt_quota = size & 0xFFFFFF;
308 332
309 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 333 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
310 func_cap->mtt_quota = size & 0xFFFFFF; 334 func_cap->mcg_quota = size & 0xFFFFFF;
335 goto out;
336 }
311 337
312 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 338 /* logical port query */
313 func_cap->mcg_quota = size & 0xFFFFFF; 339 if (gen_or_port > dev->caps.num_ports) {
340 err = -EINVAL;
341 goto out;
342 }
314 343
315 for (i = 1; i <= func_cap->num_ports; ++i) { 344 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
316 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1, 345 MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
317 MLX4_CMD_QUERY_FUNC_CAP, 346 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
318 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 347 mlx4_err(dev, "VLAN is enforced on this port\n");
319 if (err) 348 err = -EPROTONOSUPPORT;
320 goto out; 349 goto out;
350 }
321 351
322 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 352 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
323 MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); 353 mlx4_err(dev, "Force mac is enabled on this port\n");
324 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { 354 err = -EPROTONOSUPPORT;
325 mlx4_err(dev, "VLAN is enforced on this port\n"); 355 goto out;
326 err = -EPROTONOSUPPORT;
327 goto out;
328 }
329
330 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
331 mlx4_err(dev, "Force mac is enabled on this port\n");
332 err = -EPROTONOSUPPORT;
333 goto out;
334 }
335 } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
336 MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
337 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
338 mlx4_err(dev, "phy_wqe_gid is "
339 "enforced on this ib port\n");
340 err = -EPROTONOSUPPORT;
341 goto out;
342 }
343 } 356 }
357 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
358 MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
359 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
360 mlx4_err(dev, "phy_wqe_gid is "
361 "enforced on this ib port\n");
362 err = -EPROTONOSUPPORT;
363 goto out;
364 }
365 }
344 366
345 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 367 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
346 func_cap->physical_port[i] = field; 368 func_cap->physical_port = field;
369 if (func_cap->physical_port != gen_or_port) {
370 err = -ENOSYS;
371 goto out;
347 } 372 }
348 373
374 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
375 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
376
377 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
378 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
379
380 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
381 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
382
383 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
384 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
385
349 /* All other resources are allocated by the master, but we still report 386 /* All other resources are allocated by the master, but we still report
350 * 'num' and 'reserved' capabilities as follows: 387 * 'num' and 'reserved' capabilities as follows:
351 * - num remains the maximum resource index 388 * - num remains the maximum resource index
@@ -559,7 +596,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
559 dev_cap->max_pds = 1 << (field & 0x3f); 596 dev_cap->max_pds = 1 << (field & 0x3f);
560 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 597 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
561 dev_cap->reserved_xrcds = field >> 4; 598 dev_cap->reserved_xrcds = field >> 4;
562 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 599 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
563 dev_cap->max_xrcds = 1 << (field & 0x1f); 600 dev_cap->max_xrcds = 1 << (field & 0x1f);
564 601
565 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 602 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
@@ -715,6 +752,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
715 struct mlx4_cmd_mailbox *outbox, 752 struct mlx4_cmd_mailbox *outbox,
716 struct mlx4_cmd_info *cmd) 753 struct mlx4_cmd_info *cmd)
717{ 754{
755 u64 flags;
718 int err = 0; 756 int err = 0;
719 u8 field; 757 u8 field;
720 758
@@ -723,6 +761,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
723 if (err) 761 if (err)
724 return err; 762 return err;
725 763
764 /* add port mng change event capability unconditionally to slaves */
765 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
766 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
767 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
768
726 /* For guests, report Blueflame disabled */ 769 /* For guests, report Blueflame disabled */
727 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 770 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
728 field &= 0x7f; 771 field &= 0x7f;
@@ -1345,6 +1388,19 @@ out:
1345 return err; 1388 return err;
1346} 1389}
1347 1390
1391/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1392 * and real QP0 are active, so that the paravirtualized QP0 is ready
1393 * to operate */
1394static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1395{
1396 struct mlx4_priv *priv = mlx4_priv(dev);
1397 /* irrelevant if not infiniband */
1398 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1399 priv->mfunc.master.qp0_state[port].qp0_active)
1400 return 1;
1401 return 0;
1402}
1403
1348int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 1404int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1349 struct mlx4_vhcr *vhcr, 1405 struct mlx4_vhcr *vhcr,
1350 struct mlx4_cmd_mailbox *inbox, 1406 struct mlx4_cmd_mailbox *inbox,
@@ -1358,17 +1414,29 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1358 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 1414 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1359 return 0; 1415 return 0;
1360 1416
1361 if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) 1417 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1362 return -ENODEV; 1418 /* Enable port only if it was previously disabled */
1363 1419 if (!priv->mfunc.master.init_port_ref[port]) {
1364 /* Enable port only if it was previously disabled */ 1420 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1365 if (!priv->mfunc.master.init_port_ref[port]) { 1421 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1366 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 1422 if (err)
1367 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1423 return err;
1368 if (err) 1424 }
1369 return err; 1425 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1426 } else {
1427 if (slave == mlx4_master_func_num(dev)) {
1428 if (check_qp0_state(dev, slave, port) &&
1429 !priv->mfunc.master.qp0_state[port].port_active) {
1430 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1431 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1432 if (err)
1433 return err;
1434 priv->mfunc.master.qp0_state[port].port_active = 1;
1435 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1436 }
1437 } else
1438 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1370 } 1439 }
1371 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1372 ++priv->mfunc.master.init_port_ref[port]; 1440 ++priv->mfunc.master.init_port_ref[port];
1373 return 0; 1441 return 0;
1374} 1442}
@@ -1441,15 +1509,29 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1441 (1 << port))) 1509 (1 << port)))
1442 return 0; 1510 return 0;
1443 1511
1444 if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB) 1512 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1445 return -ENODEV; 1513 if (priv->mfunc.master.init_port_ref[port] == 1) {
1446 if (priv->mfunc.master.init_port_ref[port] == 1) { 1514 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1447 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 1515 1000, MLX4_CMD_NATIVE);
1448 MLX4_CMD_NATIVE); 1516 if (err)
1449 if (err) 1517 return err;
1450 return err; 1518 }
1519 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1520 } else {
1521 /* infiniband port */
1522 if (slave == mlx4_master_func_num(dev)) {
1523 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1524 priv->mfunc.master.qp0_state[port].port_active) {
1525 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1526 1000, MLX4_CMD_NATIVE);
1527 if (err)
1528 return err;
1529 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1530 priv->mfunc.master.qp0_state[port].port_active = 0;
1531 }
1532 } else
1533 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1451 } 1534 }
1452 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1453 --priv->mfunc.master.init_port_ref[port]; 1535 --priv->mfunc.master.init_port_ref[port];
1454 return 0; 1536 return 0;
1455} 1537}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 83fcbbf1b169..85abe9c11a22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -134,8 +134,12 @@ struct mlx4_func_cap {
134 int max_eq; 134 int max_eq;
135 int reserved_eq; 135 int reserved_eq;
136 int mcg_quota; 136 int mcg_quota;
137 u8 physical_port[MLX4_MAX_PORTS + 1]; 137 u32 qp0_tunnel_qpn;
138 u8 port_flags[MLX4_MAX_PORTS + 1]; 138 u32 qp0_proxy_qpn;
139 u32 qp1_tunnel_qpn;
140 u32 qp1_proxy_qpn;
141 u8 physical_port;
142 u8 port_flags;
139}; 143};
140 144
141struct mlx4_adapter { 145struct mlx4_adapter {
@@ -192,7 +196,8 @@ struct mlx4_set_ib_param {
192}; 196};
193 197
194int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); 198int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
195int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap); 199int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
200 struct mlx4_func_cap *func_cap);
196int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 201int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
197 struct mlx4_vhcr *vhcr, 202 struct mlx4_vhcr *vhcr,
198 struct mlx4_cmd_mailbox *inbox, 203 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index dd6ea942625c..80df2ab0177c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -95,8 +95,6 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
95 " Not in use with device managed" 95 " Not in use with device managed"
96 " flow steering"); 96 " flow steering");
97 97
98#define MLX4_VF (1 << 0)
99
100#define HCA_GLOBAL_CAP_MASK 0 98#define HCA_GLOBAL_CAP_MASK 0
101#define PF_CONTEXT_BEHAVIOUR_MASK 0 99#define PF_CONTEXT_BEHAVIOUR_MASK 0
102 100
@@ -299,9 +297,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
299 mlx4_dbg(dev, "Steering mode is: %s\n", 297 mlx4_dbg(dev, "Steering mode is: %s\n",
300 mlx4_steering_mode_str(dev->caps.steering_mode)); 298 mlx4_steering_mode_str(dev->caps.steering_mode));
301 299
302 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 300 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
303 if (dev->pdev->device != 0x1003) 301 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
304 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 302 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
303 /* Don't do sense port on multifunction devices (for now at least) */
304 if (mlx4_is_mfunc(dev))
305 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
305 306
306 dev->caps.log_num_macs = log_num_mac; 307 dev->caps.log_num_macs = log_num_mac;
307 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 308 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
@@ -384,6 +385,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
384 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 385 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
385 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 386 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
386 387
388 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
387 return 0; 389 return 0;
388} 390}
389/*The function checks if there are live vf, return the num of them*/ 391/*The function checks if there are live vf, return the num of them*/
@@ -409,20 +411,54 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
409int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 411int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
410{ 412{
411 u32 qk = MLX4_RESERVED_QKEY_BASE; 413 u32 qk = MLX4_RESERVED_QKEY_BASE;
412 if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 414
413 qpn < dev->caps.sqp_start) 415 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
416 qpn < dev->phys_caps.base_proxy_sqpn)
414 return -EINVAL; 417 return -EINVAL;
415 418
416 if (qpn >= dev->caps.base_tunnel_sqpn) 419 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
417 /* tunnel qp */ 420 /* tunnel qp */
418 qk += qpn - dev->caps.base_tunnel_sqpn; 421 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
419 else 422 else
420 qk += qpn - dev->caps.sqp_start; 423 qk += qpn - dev->phys_caps.base_proxy_sqpn;
421 *qkey = qk; 424 *qkey = qk;
422 return 0; 425 return 0;
423} 426}
424EXPORT_SYMBOL(mlx4_get_parav_qkey); 427EXPORT_SYMBOL(mlx4_get_parav_qkey);
425 428
429void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
430{
431 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
432
433 if (!mlx4_is_master(dev))
434 return;
435
436 priv->virt2phys_pkey[slave][port - 1][i] = val;
437}
438EXPORT_SYMBOL(mlx4_sync_pkey_table);
439
440void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
441{
442 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
443
444 if (!mlx4_is_master(dev))
445 return;
446
447 priv->slave_node_guids[slave] = guid;
448}
449EXPORT_SYMBOL(mlx4_put_slave_node_guid);
450
451__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
452{
453 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
454
455 if (!mlx4_is_master(dev))
456 return 0;
457
458 return priv->slave_node_guids[slave];
459}
460EXPORT_SYMBOL(mlx4_get_slave_node_guid);
461
426int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 462int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
427{ 463{
428 struct mlx4_priv *priv = mlx4_priv(dev); 464 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -493,9 +529,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
493 } 529 }
494 530
495 memset(&func_cap, 0, sizeof(func_cap)); 531 memset(&func_cap, 0, sizeof(func_cap));
496 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); 532 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
497 if (err) { 533 if (err) {
498 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); 534 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
535 err);
499 return err; 536 return err;
500 } 537 }
501 538
@@ -523,12 +560,33 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
523 return -ENODEV; 560 return -ENODEV;
524 } 561 }
525 562
563 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
564 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
565 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
566 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
567
568 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
569 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
570 err = -ENOMEM;
571 goto err_mem;
572 }
573
526 for (i = 1; i <= dev->caps.num_ports; ++i) { 574 for (i = 1; i <= dev->caps.num_ports; ++i) {
575 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
576 if (err) {
577 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
578 " port %d, aborting (%d).\n", i, err);
579 goto err_mem;
580 }
581 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
582 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
583 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
584 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
527 dev->caps.port_mask[i] = dev->caps.port_type[i]; 585 dev->caps.port_mask[i] = dev->caps.port_type[i];
528 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 586 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
529 &dev->caps.gid_table_len[i], 587 &dev->caps.gid_table_len[i],
530 &dev->caps.pkey_table_len[i])) 588 &dev->caps.pkey_table_len[i]))
531 return -ENODEV; 589 goto err_mem;
532 } 590 }
533 591
534 if (dev->caps.uar_page_size * (dev->caps.num_uars - 592 if (dev->caps.uar_page_size * (dev->caps.num_uars -
@@ -538,10 +596,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
538 "PCI resource 2 size of 0x%llx, aborting.\n", 596 "PCI resource 2 size of 0x%llx, aborting.\n",
539 dev->caps.uar_page_size * dev->caps.num_uars, 597 dev->caps.uar_page_size * dev->caps.num_uars,
540 (unsigned long long) pci_resource_len(dev->pdev, 2)); 598 (unsigned long long) pci_resource_len(dev->pdev, 2));
541 return -ENODEV; 599 goto err_mem;
542 } 600 }
543 601
544 return 0; 602 return 0;
603
604err_mem:
605 kfree(dev->caps.qp0_tunnel);
606 kfree(dev->caps.qp0_proxy);
607 kfree(dev->caps.qp1_tunnel);
608 kfree(dev->caps.qp1_proxy);
609 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
610 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
611
612 return err;
545} 613}
546 614
547/* 615/*
@@ -1092,10 +1160,10 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1092{ 1160{
1093 struct mlx4_priv *priv = mlx4_priv(dev); 1161 struct mlx4_priv *priv = mlx4_priv(dev);
1094 1162
1095 down(&priv->cmd.slave_sem); 1163 mutex_lock(&priv->cmd.slave_cmd_mutex);
1096 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1164 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1097 mlx4_warn(dev, "Failed to close slave function.\n"); 1165 mlx4_warn(dev, "Failed to close slave function.\n");
1098 up(&priv->cmd.slave_sem); 1166 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1099} 1167}
1100 1168
1101static int map_bf_area(struct mlx4_dev *dev) 1169static int map_bf_area(struct mlx4_dev *dev)
@@ -1147,7 +1215,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1147 u32 slave_read; 1215 u32 slave_read;
1148 u32 cmd_channel_ver; 1216 u32 cmd_channel_ver;
1149 1217
1150 down(&priv->cmd.slave_sem); 1218 mutex_lock(&priv->cmd.slave_cmd_mutex);
1151 priv->cmd.max_cmds = 1; 1219 priv->cmd.max_cmds = 1;
1152 mlx4_warn(dev, "Sending reset\n"); 1220 mlx4_warn(dev, "Sending reset\n");
1153 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1221 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
@@ -1196,12 +1264,13 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1196 goto err; 1264 goto err;
1197 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1265 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1198 goto err; 1266 goto err;
1199 up(&priv->cmd.slave_sem); 1267
1268 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1200 return 0; 1269 return 0;
1201 1270
1202err: 1271err:
1203 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1272 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1204 up(&priv->cmd.slave_sem); 1273 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1205 return -EIO; 1274 return -EIO;
1206} 1275}
1207 1276
@@ -1848,7 +1917,7 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
1848 iounmap(owner); 1917 iounmap(owner);
1849} 1918}
1850 1919
1851static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1920static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
1852{ 1921{
1853 struct mlx4_priv *priv; 1922 struct mlx4_priv *priv;
1854 struct mlx4_dev *dev; 1923 struct mlx4_dev *dev;
@@ -1871,12 +1940,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1871 /* 1940 /*
1872 * Check for BARs. 1941 * Check for BARs.
1873 */ 1942 */
1874 if (((id == NULL) || !(id->driver_data & MLX4_VF)) && 1943 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
1875 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1944 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1876 dev_err(&pdev->dev, "Missing DCS, aborting." 1945 dev_err(&pdev->dev, "Missing DCS, aborting."
1877 "(id == 0X%p, id->driver_data: 0x%lx," 1946 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
1878 " pci_resource_flags(pdev, 0):0x%lx)\n", id, 1947 pci_dev_data, pci_resource_flags(pdev, 0));
1879 id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
1880 err = -ENODEV; 1948 err = -ENODEV;
1881 goto err_disable_pdev; 1949 goto err_disable_pdev;
1882 } 1950 }
@@ -1941,7 +2009,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1941 2009
1942 dev->rev_id = pdev->revision; 2010 dev->rev_id = pdev->revision;
1943 /* Detect if this device is a virtual function */ 2011 /* Detect if this device is a virtual function */
1944 if (id && id->driver_data & MLX4_VF) { 2012 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
1945 /* When acting as pf, we normally skip vfs unless explicitly 2013 /* When acting as pf, we normally skip vfs unless explicitly
1946 * requested to probe them. */ 2014 * requested to probe them. */
1947 if (num_vfs && extended_func_num(pdev) > probe_vf) { 2015 if (num_vfs && extended_func_num(pdev) > probe_vf) {
@@ -1969,12 +2037,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1969 } 2037 }
1970 2038
1971 if (num_vfs) { 2039 if (num_vfs) {
1972 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); 2040 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
1973 err = pci_enable_sriov(pdev, num_vfs); 2041 err = pci_enable_sriov(pdev, num_vfs);
1974 if (err) { 2042 if (err) {
1975 mlx4_err(dev, "Failed to enable sriov," 2043 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
1976 "continuing without sriov enabled" 2044 err);
1977 " (err = %d).\n", err);
1978 err = 0; 2045 err = 0;
1979 } else { 2046 } else {
1980 mlx4_warn(dev, "Running in master mode\n"); 2047 mlx4_warn(dev, "Running in master mode\n");
@@ -2089,6 +2156,7 @@ slave_start:
2089 mlx4_sense_init(dev); 2156 mlx4_sense_init(dev);
2090 mlx4_start_sense(dev); 2157 mlx4_start_sense(dev);
2091 2158
2159 priv->pci_dev_data = pci_dev_data;
2092 pci_set_drvdata(pdev, dev); 2160 pci_set_drvdata(pdev, dev);
2093 2161
2094 return 0; 2162 return 0;
@@ -2158,7 +2226,7 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev,
2158{ 2226{
2159 printk_once(KERN_INFO "%s", mlx4_version); 2227 printk_once(KERN_INFO "%s", mlx4_version);
2160 2228
2161 return __mlx4_init_one(pdev, id); 2229 return __mlx4_init_one(pdev, id->driver_data);
2162} 2230}
2163 2231
2164static void mlx4_remove_one(struct pci_dev *pdev) 2232static void mlx4_remove_one(struct pci_dev *pdev)
@@ -2217,12 +2285,18 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2217 if (dev->flags & MLX4_FLAG_MSI_X) 2285 if (dev->flags & MLX4_FLAG_MSI_X)
2218 pci_disable_msix(pdev); 2286 pci_disable_msix(pdev);
2219 if (dev->flags & MLX4_FLAG_SRIOV) { 2287 if (dev->flags & MLX4_FLAG_SRIOV) {
2220 mlx4_warn(dev, "Disabling sriov\n"); 2288 mlx4_warn(dev, "Disabling SR-IOV\n");
2221 pci_disable_sriov(pdev); 2289 pci_disable_sriov(pdev);
2222 } 2290 }
2223 2291
2224 if (!mlx4_is_slave(dev)) 2292 if (!mlx4_is_slave(dev))
2225 mlx4_free_ownership(dev); 2293 mlx4_free_ownership(dev);
2294
2295 kfree(dev->caps.qp0_tunnel);
2296 kfree(dev->caps.qp0_proxy);
2297 kfree(dev->caps.qp1_tunnel);
2298 kfree(dev->caps.qp1_proxy);
2299
2226 kfree(priv); 2300 kfree(priv);
2227 pci_release_regions(pdev); 2301 pci_release_regions(pdev);
2228 pci_disable_device(pdev); 2302 pci_disable_device(pdev);
@@ -2232,41 +2306,46 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2232 2306
2233int mlx4_restart_one(struct pci_dev *pdev) 2307int mlx4_restart_one(struct pci_dev *pdev)
2234{ 2308{
2309 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2310 struct mlx4_priv *priv = mlx4_priv(dev);
2311 int pci_dev_data;
2312
2313 pci_dev_data = priv->pci_dev_data;
2235 mlx4_remove_one(pdev); 2314 mlx4_remove_one(pdev);
2236 return __mlx4_init_one(pdev, NULL); 2315 return __mlx4_init_one(pdev, pci_dev_data);
2237} 2316}
2238 2317
2239static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2318static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2240 /* MT25408 "Hermon" SDR */ 2319 /* MT25408 "Hermon" SDR */
2241 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2320 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2242 /* MT25408 "Hermon" DDR */ 2321 /* MT25408 "Hermon" DDR */
2243 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2322 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2244 /* MT25408 "Hermon" QDR */ 2323 /* MT25408 "Hermon" QDR */
2245 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2324 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2246 /* MT25408 "Hermon" DDR PCIe gen2 */ 2325 /* MT25408 "Hermon" DDR PCIe gen2 */
2247 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2326 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2248 /* MT25408 "Hermon" QDR PCIe gen2 */ 2327 /* MT25408 "Hermon" QDR PCIe gen2 */
2249 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2328 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2250 /* MT25408 "Hermon" EN 10GigE */ 2329 /* MT25408 "Hermon" EN 10GigE */
2251 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2330 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2252 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2331 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2253 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2332 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2254 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2333 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2255 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2334 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2256 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2335 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2257 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2336 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2258 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2337 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2259 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2338 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2260 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2339 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2261 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2340 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2262 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2341 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2263 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2342 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2264 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2343 /* MT25400 Family [ConnectX-2 Virtual Function] */
2265 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, 2344 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
2266 /* MT27500 Family [ConnectX-3] */ 2345 /* MT27500 Family [ConnectX-3] */
2267 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2346 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2268 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2347 /* MT27500 Family [ConnectX-3 Virtual Function] */
2269 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, 2348 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
2270 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2349 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2271 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2350 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2272 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2351 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
@@ -2295,7 +2374,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2295 2374
2296static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2375static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2297{ 2376{
2298 int ret = __mlx4_init_one(pdev, NULL); 2377 int ret = __mlx4_init_one(pdev, 0);
2299 2378
2300 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2379 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2301} 2380}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index dba69d98734a..1cf42036d7bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -452,6 +452,7 @@ struct mlx4_slave_state {
452 /*initialized via the kzalloc*/ 452 /*initialized via the kzalloc*/
453 u8 is_slave_going_down; 453 u8 is_slave_going_down;
454 u32 cookie; 454 u32 cookie;
455 enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
455}; 456};
456 457
457struct slave_list { 458struct slave_list {
@@ -472,6 +473,7 @@ struct mlx4_slave_event_eq {
472 u32 eqn; 473 u32 eqn;
473 u32 cons; 474 u32 cons;
474 u32 prod; 475 u32 prod;
476 spinlock_t event_lock;
475 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; 477 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
476}; 478};
477 479
@@ -511,9 +513,9 @@ struct mlx4_cmd {
511 struct pci_pool *pool; 513 struct pci_pool *pool;
512 void __iomem *hcr; 514 void __iomem *hcr;
513 struct mutex hcr_mutex; 515 struct mutex hcr_mutex;
516 struct mutex slave_cmd_mutex;
514 struct semaphore poll_sem; 517 struct semaphore poll_sem;
515 struct semaphore event_sem; 518 struct semaphore event_sem;
516 struct semaphore slave_sem;
517 int max_cmds; 519 int max_cmds;
518 spinlock_t context_lock; 520 spinlock_t context_lock;
519 int free_head; 521 int free_head;
@@ -766,6 +768,11 @@ struct _rule_hw {
766 }; 768 };
767}; 769};
768 770
771enum {
772 MLX4_PCI_DEV_IS_VF = 1 << 0,
773 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
774};
775
769struct mlx4_priv { 776struct mlx4_priv {
770 struct mlx4_dev dev; 777 struct mlx4_dev dev;
771 778
@@ -773,6 +780,8 @@ struct mlx4_priv {
773 struct list_head ctx_list; 780 struct list_head ctx_list;
774 spinlock_t ctx_lock; 781 spinlock_t ctx_lock;
775 782
783 int pci_dev_data;
784
776 struct list_head pgdir_list; 785 struct list_head pgdir_list;
777 struct mutex pgdir_mutex; 786 struct mutex pgdir_mutex;
778 787
@@ -807,6 +816,9 @@ struct mlx4_priv {
807 struct io_mapping *bf_mapping; 816 struct io_mapping *bf_mapping;
808 int reserved_mtts; 817 int reserved_mtts;
809 int fs_hash_mode; 818 int fs_hash_mode;
819 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
820 __be64 slave_node_guids[MLX4_MFUNC_MAX];
821
810}; 822};
811 823
812static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 824static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1011,16 +1023,61 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1011 struct mlx4_cmd_mailbox *inbox, 1023 struct mlx4_cmd_mailbox *inbox,
1012 struct mlx4_cmd_mailbox *outbox, 1024 struct mlx4_cmd_mailbox *outbox,
1013 struct mlx4_cmd_info *cmd); 1025 struct mlx4_cmd_info *cmd);
1026int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1027 struct mlx4_vhcr *vhcr,
1028 struct mlx4_cmd_mailbox *inbox,
1029 struct mlx4_cmd_mailbox *outbox,
1030 struct mlx4_cmd_info *cmd);
1014int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 1031int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1015 struct mlx4_vhcr *vhcr, 1032 struct mlx4_vhcr *vhcr,
1016 struct mlx4_cmd_mailbox *inbox, 1033 struct mlx4_cmd_mailbox *inbox,
1017 struct mlx4_cmd_mailbox *outbox, 1034 struct mlx4_cmd_mailbox *outbox,
1018 struct mlx4_cmd_info *cmd); 1035 struct mlx4_cmd_info *cmd);
1036int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1037 struct mlx4_vhcr *vhcr,
1038 struct mlx4_cmd_mailbox *inbox,
1039 struct mlx4_cmd_mailbox *outbox,
1040 struct mlx4_cmd_info *cmd);
1041int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1042 struct mlx4_vhcr *vhcr,
1043 struct mlx4_cmd_mailbox *inbox,
1044 struct mlx4_cmd_mailbox *outbox,
1045 struct mlx4_cmd_info *cmd);
1046int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1047 struct mlx4_vhcr *vhcr,
1048 struct mlx4_cmd_mailbox *inbox,
1049 struct mlx4_cmd_mailbox *outbox,
1050 struct mlx4_cmd_info *cmd);
1051int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave,
1052 struct mlx4_vhcr *vhcr,
1053 struct mlx4_cmd_mailbox *inbox,
1054 struct mlx4_cmd_mailbox *outbox,
1055 struct mlx4_cmd_info *cmd);
1056int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
1057 struct mlx4_vhcr *vhcr,
1058 struct mlx4_cmd_mailbox *inbox,
1059 struct mlx4_cmd_mailbox *outbox,
1060 struct mlx4_cmd_info *cmd);
1061int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
1062 struct mlx4_vhcr *vhcr,
1063 struct mlx4_cmd_mailbox *inbox,
1064 struct mlx4_cmd_mailbox *outbox,
1065 struct mlx4_cmd_info *cmd);
1066int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1067 struct mlx4_vhcr *vhcr,
1068 struct mlx4_cmd_mailbox *inbox,
1069 struct mlx4_cmd_mailbox *outbox,
1070 struct mlx4_cmd_info *cmd);
1019int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 1071int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
1020 struct mlx4_vhcr *vhcr, 1072 struct mlx4_vhcr *vhcr,
1021 struct mlx4_cmd_mailbox *inbox, 1073 struct mlx4_cmd_mailbox *inbox,
1022 struct mlx4_cmd_mailbox *outbox, 1074 struct mlx4_cmd_mailbox *outbox,
1023 struct mlx4_cmd_info *cmd); 1075 struct mlx4_cmd_info *cmd);
1076int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
1077 struct mlx4_vhcr *vhcr,
1078 struct mlx4_cmd_mailbox *inbox,
1079 struct mlx4_cmd_mailbox *outbox,
1080 struct mlx4_cmd_info *cmd);
1024 1081
1025int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); 1082int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
1026 1083
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index e36dd0f2fa73..4c51b05efa28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -732,6 +732,16 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
732 new_cap_mask = ((__be32 *) inbox->buf)[1]; 732 new_cap_mask = ((__be32 *) inbox->buf)[1];
733 } 733 }
734 734
735 /* slave may not set the IS_SM capability for the port */
736 if (slave != mlx4_master_func_num(dev) &&
737 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
738 return -EINVAL;
739
740 /* No DEV_MGMT in multifunc mode */
741 if (mlx4_is_mfunc(dev) &&
742 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
743 return -EINVAL;
744
735 agg_cap_mask = 0; 745 agg_cap_mask = 0;
736 slave_cap_mask = 746 slave_cap_mask =
737 priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; 747 priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index fb2b36759cbf..81e2abe07bbb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -67,10 +67,18 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
67 complete(&qp->free); 67 complete(&qp->free);
68} 68}
69 69
70static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) 70/* used for INIT/CLOSE port logic */
71static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
71{ 72{
72 return qp->qpn >= dev->caps.sqp_start && 73 /* this procedure is called after we already know we are on the master */
73 qp->qpn <= dev->caps.sqp_start + 1; 74 /* qp0 is either the proxy qp0, or the real qp0 */
75 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
76 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
77
78 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
79 qp->qpn <= dev->phys_caps.base_sqpn + 1;
80
81 return *real_qp0 || *proxy_qp0;
74} 82}
75 83
76static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 84static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
@@ -122,6 +130,8 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
122 struct mlx4_priv *priv = mlx4_priv(dev); 130 struct mlx4_priv *priv = mlx4_priv(dev);
123 struct mlx4_cmd_mailbox *mailbox; 131 struct mlx4_cmd_mailbox *mailbox;
124 int ret = 0; 132 int ret = 0;
133 int real_qp0 = 0;
134 int proxy_qp0 = 0;
125 u8 port; 135 u8 port;
126 136
127 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || 137 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
@@ -133,9 +143,12 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
133 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); 143 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
134 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && 144 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
135 cur_state != MLX4_QP_STATE_RST && 145 cur_state != MLX4_QP_STATE_RST &&
136 is_qp0(dev, qp)) { 146 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
137 port = (qp->qpn & 1) + 1; 147 port = (qp->qpn & 1) + 1;
138 priv->mfunc.master.qp0_state[port].qp0_active = 0; 148 if (proxy_qp0)
149 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
150 else
151 priv->mfunc.master.qp0_state[port].qp0_active = 0;
139 } 152 }
140 return ret; 153 return ret;
141 } 154 }
@@ -162,6 +175,23 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
162 new_state == MLX4_QP_STATE_RST ? 2 : 0, 175 new_state == MLX4_QP_STATE_RST ? 2 : 0,
163 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); 176 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
164 177
178 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
179 port = (qp->qpn & 1) + 1;
180 if (cur_state != MLX4_QP_STATE_ERR &&
181 cur_state != MLX4_QP_STATE_RST &&
182 new_state == MLX4_QP_STATE_ERR) {
183 if (proxy_qp0)
184 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
185 else
186 priv->mfunc.master.qp0_state[port].qp0_active = 0;
187 } else if (new_state == MLX4_QP_STATE_RTR) {
188 if (proxy_qp0)
189 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
190 else
191 priv->mfunc.master.qp0_state[port].qp0_active = 1;
192 }
193 }
194
165 mlx4_free_cmd_mailbox(dev, mailbox); 195 mlx4_free_cmd_mailbox(dev, mailbox);
166 return ret; 196 return ret;
167} 197}
@@ -392,6 +422,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
392 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 422 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
393 int err; 423 int err;
394 int reserved_from_top = 0; 424 int reserved_from_top = 0;
425 int k;
395 426
396 spin_lock_init(&qp_table->lock); 427 spin_lock_init(&qp_table->lock);
397 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 428 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -406,7 +437,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
406 * We also reserve the MSB of the 24-bit QP number to indicate 437 * We also reserve the MSB of the 24-bit QP number to indicate
407 * that a QP is an XRC QP. 438 * that a QP is an XRC QP.
408 */ 439 */
409 dev->caps.sqp_start = 440 dev->phys_caps.base_sqpn =
410 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); 441 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
411 442
412 { 443 {
@@ -437,13 +468,66 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
437 468
438 } 469 }
439 470
471 /* Reserve 8 real SQPs in both native and SRIOV modes.
472 * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
473 * (for all PFs and VFs), and 8 corresponding tunnel QPs.
474 * Each proxy SQP works opposite its own tunnel QP.
475 *
476 * The QPs are arranged as follows:
477 * a. 8 real SQPs
478 * b. All the proxy SQPs (8 per function)
479 * c. All the tunnel QPs (8 per function)
480 */
481
440 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 482 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
441 (1 << 23) - 1, dev->caps.sqp_start + 8, 483 (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
484 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
442 reserved_from_top); 485 reserved_from_top);
443 if (err) 486 if (err)
444 return err; 487 return err;
445 488
446 return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); 489 if (mlx4_is_mfunc(dev)) {
490 /* for PPF use */
491 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
492 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
493
494 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
495 * since the PF does not call mlx4_slave_caps */
496 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
497 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
498 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
499 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
500
501 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
502 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
503 err = -ENOMEM;
504 goto err_mem;
505 }
506
507 for (k = 0; k < dev->caps.num_ports; k++) {
508 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
509 8 * mlx4_master_func_num(dev) + k;
510 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
511 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
512 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
513 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
514 }
515 }
516
517
518 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
519 if (err)
520 goto err_mem;
521 return 0;
522
523err_mem:
524 kfree(dev->caps.qp0_tunnel);
525 kfree(dev->caps.qp0_proxy);
526 kfree(dev->caps.qp1_tunnel);
527 kfree(dev->caps.qp1_proxy);
528 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
529 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
530 return err;
447} 531}
448 532
449void mlx4_cleanup_qp_table(struct mlx4_dev *dev) 533void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 293c9e820c49..ba6506ff4abb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -242,6 +242,15 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
242 return 0; 242 return 0;
243} 243}
244 244
245enum qp_transition {
246 QP_TRANS_INIT2RTR,
247 QP_TRANS_RTR2RTS,
248 QP_TRANS_RTS2RTS,
249 QP_TRANS_SQERR2RTS,
250 QP_TRANS_SQD2SQD,
251 QP_TRANS_SQD2RTS
252};
253
245/* For Debug uses */ 254/* For Debug uses */
246static const char *ResourceType(enum mlx4_resource rt) 255static const char *ResourceType(enum mlx4_resource rt)
247{ 256{
@@ -308,14 +317,41 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
308 } 317 }
309} 318}
310 319
311static void update_ud_gid(struct mlx4_dev *dev, 320static void update_pkey_index(struct mlx4_dev *dev, int slave,
312 struct mlx4_qp_context *qp_ctx, u8 slave) 321 struct mlx4_cmd_mailbox *inbox)
313{ 322{
314 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 323 u8 sched = *(u8 *)(inbox->buf + 64);
324 u8 orig_index = *(u8 *)(inbox->buf + 35);
325 u8 new_index;
326 struct mlx4_priv *priv = mlx4_priv(dev);
327 int port;
328
329 port = (sched >> 6 & 1) + 1;
330
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index;
333
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
336}
337
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339 u8 slave)
340{
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
315 344
316 if (MLX4_QP_ST_UD == ts) 345 if (MLX4_QP_ST_UD == ts)
317 qp_ctx->pri_path.mgid_index = 0x80 | slave; 346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
318 347
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 }
354
319 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", 355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
320 slave, qp_ctx->pri_path.mgid_index); 356 slave, qp_ctx->pri_path.mgid_index);
321} 357}
@@ -360,8 +396,6 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
360 396
361 r->from_state = r->state; 397 r->from_state = r->state;
362 r->state = RES_ANY_BUSY; 398 r->state = RES_ANY_BUSY;
363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
364 ResourceType(type), r->res_id);
365 399
366 if (res) 400 if (res)
367 *((struct res_common **)res) = r; 401 *((struct res_common **)res) = r;
@@ -1105,7 +1139,13 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
1105 1139
1106static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) 1140static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1107{ 1141{
1108 return mlx4_is_qp_reserved(dev, qpn); 1142 return mlx4_is_qp_reserved(dev, qpn) &&
1143 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1144}
1145
1146static int fw_reserved(struct mlx4_dev *dev, int qpn)
1147{
1148 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1109} 1149}
1110 1150
1111static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1151static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1145,7 +1185,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1145 if (err) 1185 if (err)
1146 return err; 1186 return err;
1147 1187
1148 if (!valid_reserved(dev, slave, qpn)) { 1188 if (!fw_reserved(dev, qpn)) {
1149 err = __mlx4_qp_alloc_icm(dev, qpn); 1189 err = __mlx4_qp_alloc_icm(dev, qpn);
1150 if (err) { 1190 if (err) {
1151 res_abort_move(dev, slave, RES_QP, qpn); 1191 res_abort_move(dev, slave, RES_QP, qpn);
@@ -1498,7 +1538,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1498 if (err) 1538 if (err)
1499 return err; 1539 return err;
1500 1540
1501 if (!valid_reserved(dev, slave, qpn)) 1541 if (!fw_reserved(dev, qpn))
1502 __mlx4_qp_free_icm(dev, qpn); 1542 __mlx4_qp_free_icm(dev, qpn);
1503 1543
1504 res_end_move(dev, slave, RES_QP, qpn); 1544 res_end_move(dev, slave, RES_QP, qpn);
@@ -1938,6 +1978,19 @@ static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1938 return be32_to_cpu(qpc->srqn) & 0x1ffffff; 1978 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1939} 1979}
1940 1980
1981static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1982 struct mlx4_qp_context *context)
1983{
1984 u32 qpn = vhcr->in_modifier & 0xffffff;
1985 u32 qkey = 0;
1986
1987 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1988 return;
1989
1990 /* adjust qkey in qp context */
1991 context->qkey = cpu_to_be32(qkey);
1992}
1993
1941int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 1994int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1942 struct mlx4_vhcr *vhcr, 1995 struct mlx4_vhcr *vhcr,
1943 struct mlx4_cmd_mailbox *inbox, 1996 struct mlx4_cmd_mailbox *inbox,
@@ -1990,6 +2043,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1990 goto ex_put_scq; 2043 goto ex_put_scq;
1991 } 2044 }
1992 2045
2046 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2047 update_pkey_index(dev, slave, inbox);
1993 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2048 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1994 if (err) 2049 if (err)
1995 goto ex_put_srq; 2050 goto ex_put_srq;
@@ -2135,6 +2190,48 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2135 return err; 2190 return err;
2136} 2191}
2137 2192
2193static int verify_qp_parameters(struct mlx4_dev *dev,
2194 struct mlx4_cmd_mailbox *inbox,
2195 enum qp_transition transition, u8 slave)
2196{
2197 u32 qp_type;
2198 struct mlx4_qp_context *qp_ctx;
2199 enum mlx4_qp_optpar optpar;
2200
2201 qp_ctx = inbox->buf + 8;
2202 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2203 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2204
2205 switch (qp_type) {
2206 case MLX4_QP_ST_RC:
2207 case MLX4_QP_ST_UC:
2208 switch (transition) {
2209 case QP_TRANS_INIT2RTR:
2210 case QP_TRANS_RTR2RTS:
2211 case QP_TRANS_RTS2RTS:
2212 case QP_TRANS_SQD2SQD:
2213 case QP_TRANS_SQD2RTS:
2214 if (slave != mlx4_master_func_num(dev))
2215 /* slaves have only gid index 0 */
2216 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2217 if (qp_ctx->pri_path.mgid_index)
2218 return -EINVAL;
2219 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2220 if (qp_ctx->alt_path.mgid_index)
2221 return -EINVAL;
2222 break;
2223 default:
2224 break;
2225 }
2226
2227 break;
2228 default:
2229 break;
2230 }
2231
2232 return 0;
2233}
2234
2138int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 2235int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2139 struct mlx4_vhcr *vhcr, 2236 struct mlx4_vhcr *vhcr,
2140 struct mlx4_cmd_mailbox *inbox, 2237 struct mlx4_cmd_mailbox *inbox,
@@ -2622,16 +2719,123 @@ out:
2622 return err; 2719 return err;
2623} 2720}
2624 2721
2722int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2723 struct mlx4_vhcr *vhcr,
2724 struct mlx4_cmd_mailbox *inbox,
2725 struct mlx4_cmd_mailbox *outbox,
2726 struct mlx4_cmd_info *cmd)
2727{
2728 struct mlx4_qp_context *context = inbox->buf + 8;
2729 adjust_proxy_tun_qkey(dev, vhcr, context);
2730 update_pkey_index(dev, slave, inbox);
2731 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2732}
2733
2625int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 2734int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2626 struct mlx4_vhcr *vhcr, 2735 struct mlx4_vhcr *vhcr,
2627 struct mlx4_cmd_mailbox *inbox, 2736 struct mlx4_cmd_mailbox *inbox,
2628 struct mlx4_cmd_mailbox *outbox, 2737 struct mlx4_cmd_mailbox *outbox,
2629 struct mlx4_cmd_info *cmd) 2738 struct mlx4_cmd_info *cmd)
2630{ 2739{
2740 int err;
2631 struct mlx4_qp_context *qpc = inbox->buf + 8; 2741 struct mlx4_qp_context *qpc = inbox->buf + 8;
2632 2742
2633 update_ud_gid(dev, qpc, (u8)slave); 2743 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2744 if (err)
2745 return err;
2746
2747 update_pkey_index(dev, slave, inbox);
2748 update_gid(dev, inbox, (u8)slave);
2749 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2750
2751 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2752}
2753
2754int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2755 struct mlx4_vhcr *vhcr,
2756 struct mlx4_cmd_mailbox *inbox,
2757 struct mlx4_cmd_mailbox *outbox,
2758 struct mlx4_cmd_info *cmd)
2759{
2760 int err;
2761 struct mlx4_qp_context *context = inbox->buf + 8;
2762
2763 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2764 if (err)
2765 return err;
2766
2767 update_pkey_index(dev, slave, inbox);
2768 update_gid(dev, inbox, (u8)slave);
2769 adjust_proxy_tun_qkey(dev, vhcr, context);
2770 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2771}
2772
2773int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2774 struct mlx4_vhcr *vhcr,
2775 struct mlx4_cmd_mailbox *inbox,
2776 struct mlx4_cmd_mailbox *outbox,
2777 struct mlx4_cmd_info *cmd)
2778{
2779 int err;
2780 struct mlx4_qp_context *context = inbox->buf + 8;
2781
2782 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2783 if (err)
2784 return err;
2785
2786 update_pkey_index(dev, slave, inbox);
2787 update_gid(dev, inbox, (u8)slave);
2788 adjust_proxy_tun_qkey(dev, vhcr, context);
2789 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2790}
2791
2792
2793int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2794 struct mlx4_vhcr *vhcr,
2795 struct mlx4_cmd_mailbox *inbox,
2796 struct mlx4_cmd_mailbox *outbox,
2797 struct mlx4_cmd_info *cmd)
2798{
2799 struct mlx4_qp_context *context = inbox->buf + 8;
2800 adjust_proxy_tun_qkey(dev, vhcr, context);
2801 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2802}
2803
2804int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2805 struct mlx4_vhcr *vhcr,
2806 struct mlx4_cmd_mailbox *inbox,
2807 struct mlx4_cmd_mailbox *outbox,
2808 struct mlx4_cmd_info *cmd)
2809{
2810 int err;
2811 struct mlx4_qp_context *context = inbox->buf + 8;
2812
2813 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2814 if (err)
2815 return err;
2816
2817 adjust_proxy_tun_qkey(dev, vhcr, context);
2818 update_gid(dev, inbox, (u8)slave);
2819 update_pkey_index(dev, slave, inbox);
2820 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2821}
2822
2823int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2824 struct mlx4_vhcr *vhcr,
2825 struct mlx4_cmd_mailbox *inbox,
2826 struct mlx4_cmd_mailbox *outbox,
2827 struct mlx4_cmd_info *cmd)
2828{
2829 int err;
2830 struct mlx4_qp_context *context = inbox->buf + 8;
2831
2832 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2833 if (err)
2834 return err;
2634 2835
2836 adjust_proxy_tun_qkey(dev, vhcr, context);
2837 update_gid(dev, inbox, (u8)slave);
2838 update_pkey_index(dev, slave, inbox);
2635 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2839 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2636} 2840}
2637 2841