aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.co.il>2011-12-12 23:15:24 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-13 13:56:07 -0500
commitc82e9aa0a8bcf7a1643ccb71678bab57f3cb4bc6 (patch)
treed19f8112999c2a587bd33d68dd52653a95c1c6e9 /drivers
parentacba2420f9d20082b17d0cbeb1137fcffe0f5b7e (diff)
mlx4_core: resource tracking for HCA resources used by guests
The resource tracker is used to track usage of HCA resources by the different guests. Virtual functions (VFs) are attached to guest operating systems but resources are allocated from the same pool and are assigned to VFs. It is essential that hostile/buggy guests not be able to affect the operation of other VFs, possibly attached to other guest OSs since ConnectX firmware is not tolerant to misuse of resources. The resource tracker module associates each resource with a VF and maintains state information for the allocated object. It also defines allowed state transitions and enforces them. Relationships between resources are also referred to. For example, CQs are pointed to by QPs, so it is forbidden to destroy a CQ if a QP refers to it. ICM memory is always accessible through the primary function and hence it is allocated by the owner of the primary function. When a guest dies, an FLR is generated for all the VFs it owns and all the resources it used are freed. The tracked resource types are: QPs, CQs, SRQs, MPTs, MTTs, MACs, RES_EQs, and XRCDNs. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c24
9 files changed, 3628 insertions, 120 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a15854..4a40ab967eeb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
5 5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 9c0bdcabaea0..bce0579a2855 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -172,12 +172,6 @@ static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
172 mmiowb(); 172 mmiowb();
173} 173}
174 174
175/* dummy procedure for this patch */
176int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
177{
178 return 0;
179}
180
181static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, 175static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
182 unsigned long timeout) 176 unsigned long timeout)
183{ 177{
@@ -614,6 +608,403 @@ static struct mlx4_cmd_info cmd_info[] = {
614 .verify = NULL, 608 .verify = NULL,
615 .wrapper = NULL 609 .wrapper = NULL
616 }, 610 },
611 {
612 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
613 .has_inbox = false,
614 .has_outbox = true,
615 .out_is_imm = false,
616 .encode_slave_id = false,
617 .verify = NULL,
618 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
619 },
620 {
621 .opcode = MLX4_CMD_QUERY_ADAPTER,
622 .has_inbox = false,
623 .has_outbox = true,
624 .out_is_imm = false,
625 .encode_slave_id = false,
626 .verify = NULL,
627 .wrapper = NULL
628 },
629 {
630 .opcode = MLX4_CMD_INIT_PORT,
631 .has_inbox = false,
632 .has_outbox = false,
633 .out_is_imm = false,
634 .encode_slave_id = false,
635 .verify = NULL,
636 .wrapper = mlx4_INIT_PORT_wrapper
637 },
638 {
639 .opcode = MLX4_CMD_CLOSE_PORT,
640 .has_inbox = false,
641 .has_outbox = false,
642 .out_is_imm = false,
643 .encode_slave_id = false,
644 .verify = NULL,
645 .wrapper = mlx4_CLOSE_PORT_wrapper
646 },
647 {
648 .opcode = MLX4_CMD_QUERY_PORT,
649 .has_inbox = false,
650 .has_outbox = true,
651 .out_is_imm = false,
652 .encode_slave_id = false,
653 .verify = NULL,
654 .wrapper = mlx4_QUERY_PORT_wrapper
655 },
656 {
657 .opcode = MLX4_CMD_MAP_EQ,
658 .has_inbox = false,
659 .has_outbox = false,
660 .out_is_imm = false,
661 .encode_slave_id = false,
662 .verify = NULL,
663 .wrapper = mlx4_MAP_EQ_wrapper
664 },
665 {
666 .opcode = MLX4_CMD_SW2HW_EQ,
667 .has_inbox = true,
668 .has_outbox = false,
669 .out_is_imm = false,
670 .encode_slave_id = true,
671 .verify = NULL,
672 .wrapper = mlx4_SW2HW_EQ_wrapper
673 },
674 {
675 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
676 .has_inbox = false,
677 .has_outbox = false,
678 .out_is_imm = false,
679 .encode_slave_id = false,
680 .verify = NULL,
681 .wrapper = NULL
682 },
683 {
684 .opcode = MLX4_CMD_NOP,
685 .has_inbox = false,
686 .has_outbox = false,
687 .out_is_imm = false,
688 .encode_slave_id = false,
689 .verify = NULL,
690 .wrapper = NULL
691 },
692 {
693 .opcode = MLX4_CMD_ALLOC_RES,
694 .has_inbox = false,
695 .has_outbox = false,
696 .out_is_imm = true,
697 .encode_slave_id = false,
698 .verify = NULL,
699 .wrapper = mlx4_ALLOC_RES_wrapper
700 },
701 {
702 .opcode = MLX4_CMD_FREE_RES,
703 .has_inbox = false,
704 .has_outbox = false,
705 .out_is_imm = false,
706 .encode_slave_id = false,
707 .verify = NULL,
708 .wrapper = mlx4_FREE_RES_wrapper
709 },
710 {
711 .opcode = MLX4_CMD_SW2HW_MPT,
712 .has_inbox = true,
713 .has_outbox = false,
714 .out_is_imm = false,
715 .encode_slave_id = true,
716 .verify = NULL,
717 .wrapper = mlx4_SW2HW_MPT_wrapper
718 },
719 {
720 .opcode = MLX4_CMD_QUERY_MPT,
721 .has_inbox = false,
722 .has_outbox = true,
723 .out_is_imm = false,
724 .encode_slave_id = false,
725 .verify = NULL,
726 .wrapper = mlx4_QUERY_MPT_wrapper
727 },
728 {
729 .opcode = MLX4_CMD_HW2SW_MPT,
730 .has_inbox = false,
731 .has_outbox = false,
732 .out_is_imm = false,
733 .encode_slave_id = false,
734 .verify = NULL,
735 .wrapper = mlx4_HW2SW_MPT_wrapper
736 },
737 {
738 .opcode = MLX4_CMD_READ_MTT,
739 .has_inbox = false,
740 .has_outbox = true,
741 .out_is_imm = false,
742 .encode_slave_id = false,
743 .verify = NULL,
744 .wrapper = NULL
745 },
746 {
747 .opcode = MLX4_CMD_WRITE_MTT,
748 .has_inbox = true,
749 .has_outbox = false,
750 .out_is_imm = false,
751 .encode_slave_id = false,
752 .verify = NULL,
753 .wrapper = mlx4_WRITE_MTT_wrapper
754 },
755 {
756 .opcode = MLX4_CMD_SYNC_TPT,
757 .has_inbox = true,
758 .has_outbox = false,
759 .out_is_imm = false,
760 .encode_slave_id = false,
761 .verify = NULL,
762 .wrapper = NULL
763 },
764 {
765 .opcode = MLX4_CMD_HW2SW_EQ,
766 .has_inbox = false,
767 .has_outbox = true,
768 .out_is_imm = false,
769 .encode_slave_id = true,
770 .verify = NULL,
771 .wrapper = mlx4_HW2SW_EQ_wrapper
772 },
773 {
774 .opcode = MLX4_CMD_QUERY_EQ,
775 .has_inbox = false,
776 .has_outbox = true,
777 .out_is_imm = false,
778 .encode_slave_id = true,
779 .verify = NULL,
780 .wrapper = mlx4_QUERY_EQ_wrapper
781 },
782 {
783 .opcode = MLX4_CMD_SW2HW_CQ,
784 .has_inbox = true,
785 .has_outbox = false,
786 .out_is_imm = false,
787 .encode_slave_id = true,
788 .verify = NULL,
789 .wrapper = mlx4_SW2HW_CQ_wrapper
790 },
791 {
792 .opcode = MLX4_CMD_HW2SW_CQ,
793 .has_inbox = false,
794 .has_outbox = false,
795 .out_is_imm = false,
796 .encode_slave_id = false,
797 .verify = NULL,
798 .wrapper = mlx4_HW2SW_CQ_wrapper
799 },
800 {
801 .opcode = MLX4_CMD_QUERY_CQ,
802 .has_inbox = false,
803 .has_outbox = true,
804 .out_is_imm = false,
805 .encode_slave_id = false,
806 .verify = NULL,
807 .wrapper = mlx4_QUERY_CQ_wrapper
808 },
809 {
810 .opcode = MLX4_CMD_MODIFY_CQ,
811 .has_inbox = true,
812 .has_outbox = false,
813 .out_is_imm = true,
814 .encode_slave_id = false,
815 .verify = NULL,
816 .wrapper = mlx4_MODIFY_CQ_wrapper
817 },
818 {
819 .opcode = MLX4_CMD_SW2HW_SRQ,
820 .has_inbox = true,
821 .has_outbox = false,
822 .out_is_imm = false,
823 .encode_slave_id = true,
824 .verify = NULL,
825 .wrapper = mlx4_SW2HW_SRQ_wrapper
826 },
827 {
828 .opcode = MLX4_CMD_HW2SW_SRQ,
829 .has_inbox = false,
830 .has_outbox = false,
831 .out_is_imm = false,
832 .encode_slave_id = false,
833 .verify = NULL,
834 .wrapper = mlx4_HW2SW_SRQ_wrapper
835 },
836 {
837 .opcode = MLX4_CMD_QUERY_SRQ,
838 .has_inbox = false,
839 .has_outbox = true,
840 .out_is_imm = false,
841 .encode_slave_id = false,
842 .verify = NULL,
843 .wrapper = mlx4_QUERY_SRQ_wrapper
844 },
845 {
846 .opcode = MLX4_CMD_ARM_SRQ,
847 .has_inbox = false,
848 .has_outbox = false,
849 .out_is_imm = false,
850 .encode_slave_id = false,
851 .verify = NULL,
852 .wrapper = mlx4_ARM_SRQ_wrapper
853 },
854 {
855 .opcode = MLX4_CMD_RST2INIT_QP,
856 .has_inbox = true,
857 .has_outbox = false,
858 .out_is_imm = false,
859 .encode_slave_id = true,
860 .verify = NULL,
861 .wrapper = mlx4_RST2INIT_QP_wrapper
862 },
863 {
864 .opcode = MLX4_CMD_INIT2INIT_QP,
865 .has_inbox = true,
866 .has_outbox = false,
867 .out_is_imm = false,
868 .encode_slave_id = false,
869 .verify = NULL,
870 .wrapper = mlx4_GEN_QP_wrapper
871 },
872 {
873 .opcode = MLX4_CMD_INIT2RTR_QP,
874 .has_inbox = true,
875 .has_outbox = false,
876 .out_is_imm = false,
877 .encode_slave_id = false,
878 .verify = NULL,
879 .wrapper = mlx4_INIT2RTR_QP_wrapper
880 },
881 {
882 .opcode = MLX4_CMD_RTR2RTS_QP,
883 .has_inbox = true,
884 .has_outbox = false,
885 .out_is_imm = false,
886 .encode_slave_id = false,
887 .verify = NULL,
888 .wrapper = mlx4_GEN_QP_wrapper
889 },
890 {
891 .opcode = MLX4_CMD_RTS2RTS_QP,
892 .has_inbox = true,
893 .has_outbox = false,
894 .out_is_imm = false,
895 .encode_slave_id = false,
896 .verify = NULL,
897 .wrapper = mlx4_GEN_QP_wrapper
898 },
899 {
900 .opcode = MLX4_CMD_SQERR2RTS_QP,
901 .has_inbox = true,
902 .has_outbox = false,
903 .out_is_imm = false,
904 .encode_slave_id = false,
905 .verify = NULL,
906 .wrapper = mlx4_GEN_QP_wrapper
907 },
908 {
909 .opcode = MLX4_CMD_2ERR_QP,
910 .has_inbox = false,
911 .has_outbox = false,
912 .out_is_imm = false,
913 .encode_slave_id = false,
914 .verify = NULL,
915 .wrapper = mlx4_GEN_QP_wrapper
916 },
917 {
918 .opcode = MLX4_CMD_RTS2SQD_QP,
919 .has_inbox = false,
920 .has_outbox = false,
921 .out_is_imm = false,
922 .encode_slave_id = false,
923 .verify = NULL,
924 .wrapper = mlx4_GEN_QP_wrapper
925 },
926 {
927 .opcode = MLX4_CMD_SQD2SQD_QP,
928 .has_inbox = true,
929 .has_outbox = false,
930 .out_is_imm = false,
931 .encode_slave_id = false,
932 .verify = NULL,
933 .wrapper = mlx4_GEN_QP_wrapper
934 },
935 {
936 .opcode = MLX4_CMD_SQD2RTS_QP,
937 .has_inbox = true,
938 .has_outbox = false,
939 .out_is_imm = false,
940 .encode_slave_id = false,
941 .verify = NULL,
942 .wrapper = mlx4_GEN_QP_wrapper
943 },
944 {
945 .opcode = MLX4_CMD_2RST_QP,
946 .has_inbox = false,
947 .has_outbox = false,
948 .out_is_imm = false,
949 .encode_slave_id = false,
950 .verify = NULL,
951 .wrapper = mlx4_2RST_QP_wrapper
952 },
953 {
954 .opcode = MLX4_CMD_QUERY_QP,
955 .has_inbox = false,
956 .has_outbox = true,
957 .out_is_imm = false,
958 .encode_slave_id = false,
959 .verify = NULL,
960 .wrapper = mlx4_GEN_QP_wrapper
961 },
962 {
963 .opcode = MLX4_CMD_SUSPEND_QP,
964 .has_inbox = false,
965 .has_outbox = false,
966 .out_is_imm = false,
967 .encode_slave_id = false,
968 .verify = NULL,
969 .wrapper = mlx4_GEN_QP_wrapper
970 },
971 {
972 .opcode = MLX4_CMD_UNSUSPEND_QP,
973 .has_inbox = false,
974 .has_outbox = false,
975 .out_is_imm = false,
976 .encode_slave_id = false,
977 .verify = NULL,
978 .wrapper = mlx4_GEN_QP_wrapper
979 },
980 {
981 .opcode = MLX4_CMD_QUERY_IF_STAT,
982 .has_inbox = false,
983 .has_outbox = true,
984 .out_is_imm = false,
985 .encode_slave_id = false,
986 .verify = NULL,
987 .wrapper = mlx4_QUERY_IF_STAT_wrapper
988 },
989 /* Native multicast commands are not available for guests */
990 {
991 .opcode = MLX4_CMD_QP_ATTACH,
992 .has_inbox = true,
993 .has_outbox = false,
994 .out_is_imm = false,
995 .encode_slave_id = false,
996 .verify = NULL,
997 .wrapper = mlx4_QP_ATTACH_wrapper
998 },
999 {
1000 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1001 .has_inbox = false,
1002 .has_outbox = false,
1003 .out_is_imm = false,
1004 .encode_slave_id = false,
1005 .verify = NULL,
1006 .wrapper = NULL
1007 },
617}; 1008};
618 1009
619static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1010static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@@ -877,6 +1268,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
877 return; 1268 return;
878 1269
879reset_slave: 1270reset_slave:
1271 /* cleanup any slave resources */
1272 mlx4_delete_all_resources_for_slave(dev, slave);
880 spin_lock(&priv->mfunc.master.slave_state_lock); 1273 spin_lock(&priv->mfunc.master.slave_state_lock);
881 if (!slave_state[slave].is_slave_going_down) 1274 if (!slave_state[slave].is_slave_going_down)
882 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; 1275 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index dd9211f1d500..475f9d6af955 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -44,27 +44,6 @@
44#include "mlx4.h" 44#include "mlx4.h"
45#include "icm.h" 45#include "icm.h"
46 46
47struct mlx4_cq_context {
48 __be32 flags;
49 u16 reserved1[3];
50 __be16 page_offset;
51 __be32 logsize_usrpage;
52 __be16 cq_period;
53 __be16 cq_max_count;
54 u8 reserved2[3];
55 u8 comp_eqn;
56 u8 log_page_size;
57 u8 reserved3[2];
58 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l;
60 __be32 last_notified_index;
61 __be32 solicit_producer_index;
62 __be32 consumer_index;
63 __be32 producer_index;
64 u32 reserved4[2];
65 __be64 db_rec_addr;
66};
67
68#define MLX4_CQ_STATUS_OK ( 0 << 28) 47#define MLX4_CQ_STATUS_OK ( 0 << 28)
69#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) 48#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
70#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) 49#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -189,7 +168,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
189} 168}
190EXPORT_SYMBOL_GPL(mlx4_cq_resize); 169EXPORT_SYMBOL_GPL(mlx4_cq_resize);
191 170
192static int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) 171int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
193{ 172{
194 struct mlx4_priv *priv = mlx4_priv(dev); 173 struct mlx4_priv *priv = mlx4_priv(dev);
195 struct mlx4_cq_table *cq_table = &priv->cq_table; 174 struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -235,7 +214,7 @@ static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
235 return __mlx4_cq_alloc_icm(dev, cqn); 214 return __mlx4_cq_alloc_icm(dev, cqn);
236} 215}
237 216
238static void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 217void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
239{ 218{
240 struct mlx4_priv *priv = mlx4_priv(dev); 219 struct mlx4_priv *priv = mlx4_priv(dev);
241 struct mlx4_cq_table *cq_table = &priv->cq_table; 220 struct mlx4_cq_table *cq_table = &priv->cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7416ef20c203..1e9b55eb7217 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,30 +53,6 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56/*
57 * Must be packed because start is 64 bits but only aligned to 32 bits.
58 */
59struct mlx4_eq_context {
60 __be32 flags;
61 u16 reserved1[3];
62 __be16 page_offset;
63 u8 log_eq_size;
64 u8 reserved2[4];
65 u8 eq_period;
66 u8 reserved3;
67 u8 eq_max_count;
68 u8 reserved4[3];
69 u8 intr;
70 u8 log_page_size;
71 u8 reserved5[2];
72 u8 mtt_base_addr_h;
73 __be32 mtt_base_addr_l;
74 u32 reserved6[2];
75 __be32 consumer_index;
76 __be32 producer_index;
77 u32 reserved7[4];
78};
79
80#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_OK ( 0 << 28)
81#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
82#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -135,19 +111,6 @@ static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_e
135 eqe : NULL; 111 eqe : NULL;
136} 112}
137 113
138/* dummies for now */
139void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
140{
141}
142
143int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
144 enum mlx4_resource type,
145 int res_id, int *slave)
146{
147 return -ENOENT;
148}
149/* end dummies */
150
151void mlx4_gen_slave_eqe(struct work_struct *work) 114void mlx4_gen_slave_eqe(struct work_struct *work)
152{ 115{
153 struct mlx4_mfunc_master_ctx *master = 116 struct mlx4_mfunc_master_ctx *master =
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 23701a25daa9..2488be8bb02a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -224,6 +224,91 @@ struct mlx4_icm_table {
224 struct mlx4_icm **icm; 224 struct mlx4_icm **icm;
225}; 225};
226 226
227/*
228 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
229 */
230struct mlx4_mpt_entry {
231 __be32 flags;
232 __be32 qpn;
233 __be32 key;
234 __be32 pd_flags;
235 __be64 start;
236 __be64 length;
237 __be32 lkey;
238 __be32 win_cnt;
239 u8 reserved1[3];
240 u8 mtt_rep;
241 __be64 mtt_seg;
242 __be32 mtt_sz;
243 __be32 entity_size;
244 __be32 first_byte_offset;
245} __packed;
246
247/*
248 * Must be packed because start is 64 bits but only aligned to 32 bits.
249 */
250struct mlx4_eq_context {
251 __be32 flags;
252 u16 reserved1[3];
253 __be16 page_offset;
254 u8 log_eq_size;
255 u8 reserved2[4];
256 u8 eq_period;
257 u8 reserved3;
258 u8 eq_max_count;
259 u8 reserved4[3];
260 u8 intr;
261 u8 log_page_size;
262 u8 reserved5[2];
263 u8 mtt_base_addr_h;
264 __be32 mtt_base_addr_l;
265 u32 reserved6[2];
266 __be32 consumer_index;
267 __be32 producer_index;
268 u32 reserved7[4];
269};
270
271struct mlx4_cq_context {
272 __be32 flags;
273 u16 reserved1[3];
274 __be16 page_offset;
275 __be32 logsize_usrpage;
276 __be16 cq_period;
277 __be16 cq_max_count;
278 u8 reserved2[3];
279 u8 comp_eqn;
280 u8 log_page_size;
281 u8 reserved3[2];
282 u8 mtt_base_addr_h;
283 __be32 mtt_base_addr_l;
284 __be32 last_notified_index;
285 __be32 solicit_producer_index;
286 __be32 consumer_index;
287 __be32 producer_index;
288 u32 reserved4[2];
289 __be64 db_rec_addr;
290};
291
292struct mlx4_srq_context {
293 __be32 state_logsize_srqn;
294 u8 logstride;
295 u8 reserved1;
296 __be16 xrcd;
297 __be32 pg_offset_cqn;
298 u32 reserved2;
299 u8 log_page_size;
300 u8 reserved3[2];
301 u8 mtt_base_addr_h;
302 __be32 mtt_base_addr_l;
303 __be32 pd;
304 __be16 limit_watermark;
305 __be16 wqe_cnt;
306 u16 reserved4;
307 __be16 wqe_counter;
308 u32 reserved5;
309 __be64 db_rec_addr;
310};
311
227struct mlx4_eqe { 312struct mlx4_eqe {
228 u8 reserved1; 313 u8 reserved1;
229 u8 type; 314 u8 type;
@@ -657,6 +742,18 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
657void mlx4_cleanup_qp_table(struct mlx4_dev *dev); 742void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
658void mlx4_cleanup_srq_table(struct mlx4_dev *dev); 743void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
659void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); 744void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
745int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
746void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
747int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
748void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
749int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
750void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
751int __mlx4_mr_reserve(struct mlx4_dev *dev);
752void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
753int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
754void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
755u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
756void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
660 757
661int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 758int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
662 struct mlx4_vhcr *vhcr, 759 struct mlx4_vhcr *vhcr,
@@ -693,6 +790,14 @@ int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
693 struct mlx4_cmd_mailbox *inbox, 790 struct mlx4_cmd_mailbox *inbox,
694 struct mlx4_cmd_mailbox *outbox, 791 struct mlx4_cmd_mailbox *outbox,
695 struct mlx4_cmd_info *cmd); 792 struct mlx4_cmd_info *cmd);
793int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
794 int *base);
795void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
796int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
797void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
798int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
799int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
800 int start_index, int npages, u64 *page_list);
696 801
697void mlx4_start_catas_poll(struct mlx4_dev *dev); 802void mlx4_start_catas_poll(struct mlx4_dev *dev);
698void mlx4_stop_catas_poll(struct mlx4_dev *dev); 803void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -936,6 +1041,11 @@ static inline u32 get_param_h(u64 *arg)
936 return (u32)(*arg >> 32); 1041 return (u32)(*arg >> 32);
937} 1042}
938 1043
1044static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
1045{
1046 return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
1047}
1048
939#define NOT_MASKED_PD_BITS 17 1049#define NOT_MASKED_PD_BITS 17
940 1050
941#endif /* MLX4_H */ 1051#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 916eba4572b7..f8fd0a1d73af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -43,26 +43,6 @@
43#include "mlx4.h" 43#include "mlx4.h"
44#include "icm.h" 44#include "icm.h"
45 45
46/*
47 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
48 */
49struct mlx4_mpt_entry {
50 __be32 flags;
51 __be32 qpn;
52 __be32 key;
53 __be32 pd_flags;
54 __be64 start;
55 __be64 length;
56 __be32 lkey;
57 __be32 win_cnt;
58 u8 reserved1[3];
59 u8 mtt_rep;
60 __be64 mtt_seg;
61 __be32 mtt_sz;
62 __be32 entity_size;
63 __be32 first_byte_offset;
64} __packed;
65
66#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 46#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
67#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 47#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
68#define MLX4_MPT_FLAG_MIO (1 << 17) 48#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -182,7 +162,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
182 kfree(buddy->num_free); 162 kfree(buddy->num_free);
183} 163}
184 164
185static u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 165u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
186{ 166{
187 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 167 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
188 u32 seg; 168 u32 seg;
@@ -243,7 +223,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
243} 223}
244EXPORT_SYMBOL_GPL(mlx4_mtt_init); 224EXPORT_SYMBOL_GPL(mlx4_mtt_init);
245 225
246static void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, 226void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg,
247 int order) 227 int order)
248{ 228{
249 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 229 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
@@ -360,7 +340,7 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
360 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 340 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
361} 341}
362 342
363static int __mlx4_mr_reserve(struct mlx4_dev *dev) 343int __mlx4_mr_reserve(struct mlx4_dev *dev)
364{ 344{
365 struct mlx4_priv *priv = mlx4_priv(dev); 345 struct mlx4_priv *priv = mlx4_priv(dev);
366 346
@@ -381,7 +361,7 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
381 return __mlx4_mr_reserve(dev); 361 return __mlx4_mr_reserve(dev);
382} 362}
383 363
384static void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) 364void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
385{ 365{
386 struct mlx4_priv *priv = mlx4_priv(dev); 366 struct mlx4_priv *priv = mlx4_priv(dev);
387 367
@@ -404,7 +384,7 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
404 __mlx4_mr_release(dev, index); 384 __mlx4_mr_release(dev, index);
405} 385}
406 386
407static int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 387int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
408{ 388{
409 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 389 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
410 390
@@ -425,7 +405,7 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
425 return __mlx4_mr_alloc_icm(dev, index); 405 return __mlx4_mr_alloc_icm(dev, index);
426} 406}
427 407
428static void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 408void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
429{ 409{
430 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 410 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
431 411
@@ -595,7 +575,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
595 return 0; 575 return 0;
596} 576}
597 577
598static int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 578int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
599 int start_index, int npages, u64 *page_list) 579 int start_index, int npages, u64 *page_list)
600{ 580{
601 int err = 0; 581 int err = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index d0489740563e..6b03ac8b9002 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -182,7 +182,7 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
182} 182}
183EXPORT_SYMBOL_GPL(mlx4_qp_modify); 183EXPORT_SYMBOL_GPL(mlx4_qp_modify);
184 184
185static int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 185int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
186 int *base) 186 int *base)
187{ 187{
188 struct mlx4_priv *priv = mlx4_priv(dev); 188 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -218,7 +218,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
218} 218}
219EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 219EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
220 220
221static void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 221void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
222{ 222{
223 struct mlx4_priv *priv = mlx4_priv(dev); 223 struct mlx4_priv *priv = mlx4_priv(dev);
224 struct mlx4_qp_table *qp_table = &priv->qp_table; 224 struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -248,7 +248,7 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
248} 248}
249EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 249EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
250 250
251static int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) 251int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
252{ 252{
253 struct mlx4_priv *priv = mlx4_priv(dev); 253 struct mlx4_priv *priv = mlx4_priv(dev);
254 struct mlx4_qp_table *qp_table = &priv->qp_table; 254 struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -305,7 +305,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
305 return __mlx4_qp_alloc_icm(dev, qpn); 305 return __mlx4_qp_alloc_icm(dev, qpn);
306} 306}
307 307
308static void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 308void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
309{ 309{
310 struct mlx4_priv *priv = mlx4_priv(dev); 310 struct mlx4_priv *priv = mlx4_priv(dev);
311 struct mlx4_qp_table *qp_table = &priv->qp_table; 311 struct mlx4_qp_table *qp_table = &priv->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 000000000000..59fc35ee66ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3103 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/qp.h>
43
44#include "mlx4.h"
45#include "fw.h"
46
47#define MLX4_MAC_VALID (1ull << 63)
48#define MLX4_MAC_MASK 0x7fffffffffffffffULL
49#define ETH_ALEN 6
50
51struct mac_res {
52 struct list_head list;
53 u64 mac;
54 u8 port;
55};
56
57struct res_common {
58 struct list_head list;
59 u32 res_id;
60 int owner;
61 int state;
62 int from_state;
63 int to_state;
64 int removing;
65};
66
67enum {
68 RES_ANY_BUSY = 1
69};
70
71struct res_gid {
72 struct list_head list;
73 u8 gid[16];
74 enum mlx4_protocol prot;
75};
76
77enum res_qp_states {
78 RES_QP_BUSY = RES_ANY_BUSY,
79
80 /* QP number was allocated */
81 RES_QP_RESERVED,
82
83 /* ICM memory for QP context was mapped */
84 RES_QP_MAPPED,
85
86 /* QP is in hw ownership */
87 RES_QP_HW
88};
89
90static inline const char *qp_states_str(enum res_qp_states state)
91{
92 switch (state) {
93 case RES_QP_BUSY: return "RES_QP_BUSY";
94 case RES_QP_RESERVED: return "RES_QP_RESERVED";
95 case RES_QP_MAPPED: return "RES_QP_MAPPED";
96 case RES_QP_HW: return "RES_QP_HW";
97 default: return "Unknown";
98 }
99}
100
101struct res_qp {
102 struct res_common com;
103 struct res_mtt *mtt;
104 struct res_cq *rcq;
105 struct res_cq *scq;
106 struct res_srq *srq;
107 struct list_head mcg_list;
108 spinlock_t mcg_spl;
109 int local_qpn;
110};
111
112enum res_mtt_states {
113 RES_MTT_BUSY = RES_ANY_BUSY,
114 RES_MTT_ALLOCATED,
115};
116
117static inline const char *mtt_states_str(enum res_mtt_states state)
118{
119 switch (state) {
120 case RES_MTT_BUSY: return "RES_MTT_BUSY";
121 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
122 default: return "Unknown";
123 }
124}
125
126struct res_mtt {
127 struct res_common com;
128 int order;
129 atomic_t ref_count;
130};
131
132enum res_mpt_states {
133 RES_MPT_BUSY = RES_ANY_BUSY,
134 RES_MPT_RESERVED,
135 RES_MPT_MAPPED,
136 RES_MPT_HW,
137};
138
139struct res_mpt {
140 struct res_common com;
141 struct res_mtt *mtt;
142 int key;
143};
144
145enum res_eq_states {
146 RES_EQ_BUSY = RES_ANY_BUSY,
147 RES_EQ_RESERVED,
148 RES_EQ_HW,
149};
150
151struct res_eq {
152 struct res_common com;
153 struct res_mtt *mtt;
154};
155
156enum res_cq_states {
157 RES_CQ_BUSY = RES_ANY_BUSY,
158 RES_CQ_ALLOCATED,
159 RES_CQ_HW,
160};
161
162struct res_cq {
163 struct res_common com;
164 struct res_mtt *mtt;
165 atomic_t ref_count;
166};
167
168enum res_srq_states {
169 RES_SRQ_BUSY = RES_ANY_BUSY,
170 RES_SRQ_ALLOCATED,
171 RES_SRQ_HW,
172};
173
174static inline const char *srq_states_str(enum res_srq_states state)
175{
176 switch (state) {
177 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
178 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
179 case RES_SRQ_HW: return "RES_SRQ_HW";
180 default: return "Unknown";
181 }
182}
183
184struct res_srq {
185 struct res_common com;
186 struct res_mtt *mtt;
187 struct res_cq *cq;
188 atomic_t ref_count;
189};
190
191enum res_counter_states {
192 RES_COUNTER_BUSY = RES_ANY_BUSY,
193 RES_COUNTER_ALLOCATED,
194};
195
196static inline const char *counter_states_str(enum res_counter_states state)
197{
198 switch (state) {
199 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
200 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
201 default: return "Unknown";
202 }
203}
204
205struct res_counter {
206 struct res_common com;
207 int port;
208};
209
210/* For Debug uses */
211static const char *ResourceType(enum mlx4_resource rt)
212{
213 switch (rt) {
214 case RES_QP: return "RES_QP";
215 case RES_CQ: return "RES_CQ";
216 case RES_SRQ: return "RES_SRQ";
217 case RES_MPT: return "RES_MPT";
218 case RES_MTT: return "RES_MTT";
219 case RES_MAC: return "RES_MAC";
220 case RES_EQ: return "RES_EQ";
221 case RES_COUNTER: return "RES_COUNTER";
222 default: return "Unknown resource type !!!";
223 };
224}
225
226/* dummy procedures */
227int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
228{
229 return 0;
230}
231
232void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
233{
234}
235/* end dummies */
236
237int mlx4_init_resource_tracker(struct mlx4_dev *dev)
238{
239 struct mlx4_priv *priv = mlx4_priv(dev);
240 int i;
241 int t;
242
243 priv->mfunc.master.res_tracker.slave_list =
244 kzalloc(dev->num_slaves * sizeof(struct slave_list),
245 GFP_KERNEL);
246 if (!priv->mfunc.master.res_tracker.slave_list)
247 return -ENOMEM;
248
249 for (i = 0 ; i < dev->num_slaves; i++) {
250 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
251 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
252 slave_list[i].res_list[t]);
253 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
254 }
255
256 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
257 dev->num_slaves);
258 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
259 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
260 GFP_ATOMIC|__GFP_NOWARN);
261
262 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
263 return 0 ;
264}
265
266void mlx4_free_resource_tracker(struct mlx4_dev *dev)
267{
268 struct mlx4_priv *priv = mlx4_priv(dev);
269 int i;
270
271 if (priv->mfunc.master.res_tracker.slave_list) {
272 for (i = 0 ; i < dev->num_slaves; i++)
273 mlx4_delete_all_resources_for_slave(dev, i);
274
275 kfree(priv->mfunc.master.res_tracker.slave_list);
276 }
277}
278
279static void update_ud_gid(struct mlx4_dev *dev,
280 struct mlx4_qp_context *qp_ctx, u8 slave)
281{
282 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
283
284 if (MLX4_QP_ST_UD == ts)
285 qp_ctx->pri_path.mgid_index = 0x80 | slave;
286
287 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
288 slave, qp_ctx->pri_path.mgid_index);
289}
290
291static int mpt_mask(struct mlx4_dev *dev)
292{
293 return dev->caps.num_mpts - 1;
294}
295
296static void *find_res(struct mlx4_dev *dev, int res_id,
297 enum mlx4_resource type)
298{
299 struct mlx4_priv *priv = mlx4_priv(dev);
300
301 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
302 res_id);
303}
304
305static int get_res(struct mlx4_dev *dev, int slave, int res_id,
306 enum mlx4_resource type,
307 void *res)
308{
309 struct res_common *r;
310 int err = 0;
311
312 spin_lock_irq(mlx4_tlock(dev));
313 r = find_res(dev, res_id, type);
314 if (!r) {
315 err = -ENONET;
316 goto exit;
317 }
318
319 if (r->state == RES_ANY_BUSY) {
320 err = -EBUSY;
321 goto exit;
322 }
323
324 if (r->owner != slave) {
325 err = -EPERM;
326 goto exit;
327 }
328
329 r->from_state = r->state;
330 r->state = RES_ANY_BUSY;
331 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
332 ResourceType(type), r->res_id);
333
334 if (res)
335 *((struct res_common **)res) = r;
336
337exit:
338 spin_unlock_irq(mlx4_tlock(dev));
339 return err;
340}
341
342int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
343 enum mlx4_resource type,
344 int res_id, int *slave)
345{
346
347 struct res_common *r;
348 int err = -ENOENT;
349 int id = res_id;
350
351 if (type == RES_QP)
352 id &= 0x7fffff;
353 spin_lock_irq(mlx4_tlock(dev));
354
355 r = find_res(dev, id, type);
356 if (r) {
357 *slave = r->owner;
358 err = 0;
359 }
360 spin_unlock_irq(mlx4_tlock(dev));
361
362 return err;
363}
364
365static void put_res(struct mlx4_dev *dev, int slave, int res_id,
366 enum mlx4_resource type)
367{
368 struct res_common *r;
369
370 spin_lock_irq(mlx4_tlock(dev));
371 r = find_res(dev, res_id, type);
372 if (r)
373 r->state = r->from_state;
374 spin_unlock_irq(mlx4_tlock(dev));
375}
376
377static struct res_common *alloc_qp_tr(int id)
378{
379 struct res_qp *ret;
380
381 ret = kzalloc(sizeof *ret, GFP_KERNEL);
382 if (!ret)
383 return NULL;
384
385 ret->com.res_id = id;
386 ret->com.state = RES_QP_RESERVED;
387 INIT_LIST_HEAD(&ret->mcg_list);
388 spin_lock_init(&ret->mcg_spl);
389
390 return &ret->com;
391}
392
393static struct res_common *alloc_mtt_tr(int id, int order)
394{
395 struct res_mtt *ret;
396
397 ret = kzalloc(sizeof *ret, GFP_KERNEL);
398 if (!ret)
399 return NULL;
400
401 ret->com.res_id = id;
402 ret->order = order;
403 ret->com.state = RES_MTT_ALLOCATED;
404 atomic_set(&ret->ref_count, 0);
405
406 return &ret->com;
407}
408
409static struct res_common *alloc_mpt_tr(int id, int key)
410{
411 struct res_mpt *ret;
412
413 ret = kzalloc(sizeof *ret, GFP_KERNEL);
414 if (!ret)
415 return NULL;
416
417 ret->com.res_id = id;
418 ret->com.state = RES_MPT_RESERVED;
419 ret->key = key;
420
421 return &ret->com;
422}
423
424static struct res_common *alloc_eq_tr(int id)
425{
426 struct res_eq *ret;
427
428 ret = kzalloc(sizeof *ret, GFP_KERNEL);
429 if (!ret)
430 return NULL;
431
432 ret->com.res_id = id;
433 ret->com.state = RES_EQ_RESERVED;
434
435 return &ret->com;
436}
437
438static struct res_common *alloc_cq_tr(int id)
439{
440 struct res_cq *ret;
441
442 ret = kzalloc(sizeof *ret, GFP_KERNEL);
443 if (!ret)
444 return NULL;
445
446 ret->com.res_id = id;
447 ret->com.state = RES_CQ_ALLOCATED;
448 atomic_set(&ret->ref_count, 0);
449
450 return &ret->com;
451}
452
453static struct res_common *alloc_srq_tr(int id)
454{
455 struct res_srq *ret;
456
457 ret = kzalloc(sizeof *ret, GFP_KERNEL);
458 if (!ret)
459 return NULL;
460
461 ret->com.res_id = id;
462 ret->com.state = RES_SRQ_ALLOCATED;
463 atomic_set(&ret->ref_count, 0);
464
465 return &ret->com;
466}
467
468static struct res_common *alloc_counter_tr(int id)
469{
470 struct res_counter *ret;
471
472 ret = kzalloc(sizeof *ret, GFP_KERNEL);
473 if (!ret)
474 return NULL;
475
476 ret->com.res_id = id;
477 ret->com.state = RES_COUNTER_ALLOCATED;
478
479 return &ret->com;
480}
481
482static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
483 int extra)
484{
485 struct res_common *ret;
486
487 switch (type) {
488 case RES_QP:
489 ret = alloc_qp_tr(id);
490 break;
491 case RES_MPT:
492 ret = alloc_mpt_tr(id, extra);
493 break;
494 case RES_MTT:
495 ret = alloc_mtt_tr(id, extra);
496 break;
497 case RES_EQ:
498 ret = alloc_eq_tr(id);
499 break;
500 case RES_CQ:
501 ret = alloc_cq_tr(id);
502 break;
503 case RES_SRQ:
504 ret = alloc_srq_tr(id);
505 break;
506 case RES_MAC:
507 printk(KERN_ERR "implementation missing\n");
508 return NULL;
509 case RES_COUNTER:
510 ret = alloc_counter_tr(id);
511 break;
512
513 default:
514 return NULL;
515 }
516 if (ret)
517 ret->owner = slave;
518
519 return ret;
520}
521
522static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
523 enum mlx4_resource type, int extra)
524{
525 int i;
526 int err;
527 struct mlx4_priv *priv = mlx4_priv(dev);
528 struct res_common **res_arr;
529 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
530 struct radix_tree_root *root = &tracker->res_tree[type];
531
532 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
533 if (!res_arr)
534 return -ENOMEM;
535
536 for (i = 0; i < count; ++i) {
537 res_arr[i] = alloc_tr(base + i, type, slave, extra);
538 if (!res_arr[i]) {
539 for (--i; i >= 0; --i)
540 kfree(res_arr[i]);
541
542 kfree(res_arr);
543 return -ENOMEM;
544 }
545 }
546
547 spin_lock_irq(mlx4_tlock(dev));
548 for (i = 0; i < count; ++i) {
549 if (find_res(dev, base + i, type)) {
550 err = -EEXIST;
551 goto undo;
552 }
553 err = radix_tree_insert(root, base + i, res_arr[i]);
554 if (err)
555 goto undo;
556 list_add_tail(&res_arr[i]->list,
557 &tracker->slave_list[slave].res_list[type]);
558 }
559 spin_unlock_irq(mlx4_tlock(dev));
560 kfree(res_arr);
561
562 return 0;
563
564undo:
565 for (--i; i >= base; --i)
566 radix_tree_delete(&tracker->res_tree[type], i);
567
568 spin_unlock_irq(mlx4_tlock(dev));
569
570 for (i = 0; i < count; ++i)
571 kfree(res_arr[i]);
572
573 kfree(res_arr);
574
575 return err;
576}
577
578static int remove_qp_ok(struct res_qp *res)
579{
580 if (res->com.state == RES_QP_BUSY)
581 return -EBUSY;
582 else if (res->com.state != RES_QP_RESERVED)
583 return -EPERM;
584
585 return 0;
586}
587
588static int remove_mtt_ok(struct res_mtt *res, int order)
589{
590 if (res->com.state == RES_MTT_BUSY ||
591 atomic_read(&res->ref_count)) {
592 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
593 __func__, __LINE__,
594 mtt_states_str(res->com.state),
595 atomic_read(&res->ref_count));
596 return -EBUSY;
597 } else if (res->com.state != RES_MTT_ALLOCATED)
598 return -EPERM;
599 else if (res->order != order)
600 return -EINVAL;
601
602 return 0;
603}
604
605static int remove_mpt_ok(struct res_mpt *res)
606{
607 if (res->com.state == RES_MPT_BUSY)
608 return -EBUSY;
609 else if (res->com.state != RES_MPT_RESERVED)
610 return -EPERM;
611
612 return 0;
613}
614
615static int remove_eq_ok(struct res_eq *res)
616{
617 if (res->com.state == RES_MPT_BUSY)
618 return -EBUSY;
619 else if (res->com.state != RES_MPT_RESERVED)
620 return -EPERM;
621
622 return 0;
623}
624
625static int remove_counter_ok(struct res_counter *res)
626{
627 if (res->com.state == RES_COUNTER_BUSY)
628 return -EBUSY;
629 else if (res->com.state != RES_COUNTER_ALLOCATED)
630 return -EPERM;
631
632 return 0;
633}
634
635static int remove_cq_ok(struct res_cq *res)
636{
637 if (res->com.state == RES_CQ_BUSY)
638 return -EBUSY;
639 else if (res->com.state != RES_CQ_ALLOCATED)
640 return -EPERM;
641
642 return 0;
643}
644
645static int remove_srq_ok(struct res_srq *res)
646{
647 if (res->com.state == RES_SRQ_BUSY)
648 return -EBUSY;
649 else if (res->com.state != RES_SRQ_ALLOCATED)
650 return -EPERM;
651
652 return 0;
653}
654
655static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
656{
657 switch (type) {
658 case RES_QP:
659 return remove_qp_ok((struct res_qp *)res);
660 case RES_CQ:
661 return remove_cq_ok((struct res_cq *)res);
662 case RES_SRQ:
663 return remove_srq_ok((struct res_srq *)res);
664 case RES_MPT:
665 return remove_mpt_ok((struct res_mpt *)res);
666 case RES_MTT:
667 return remove_mtt_ok((struct res_mtt *)res, extra);
668 case RES_MAC:
669 return -ENOSYS;
670 case RES_EQ:
671 return remove_eq_ok((struct res_eq *)res);
672 case RES_COUNTER:
673 return remove_counter_ok((struct res_counter *)res);
674 default:
675 return -EINVAL;
676 }
677}
678
679static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
680 enum mlx4_resource type, int extra)
681{
682 int i;
683 int err;
684 struct mlx4_priv *priv = mlx4_priv(dev);
685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
686 struct res_common *r;
687
688 spin_lock_irq(mlx4_tlock(dev));
689 for (i = base; i < base + count; ++i) {
690 r = radix_tree_lookup(&tracker->res_tree[type], i);
691 if (!r) {
692 err = -ENOENT;
693 goto out;
694 }
695 if (r->owner != slave) {
696 err = -EPERM;
697 goto out;
698 }
699 err = remove_ok(r, type, extra);
700 if (err)
701 goto out;
702 }
703
704 for (i = base; i < base + count; ++i) {
705 r = radix_tree_lookup(&tracker->res_tree[type], i);
706 radix_tree_delete(&tracker->res_tree[type], i);
707 list_del(&r->list);
708 kfree(r);
709 }
710 err = 0;
711
712out:
713 spin_unlock_irq(mlx4_tlock(dev));
714
715 return err;
716}
717
718static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
719 enum res_qp_states state, struct res_qp **qp,
720 int alloc)
721{
722 struct mlx4_priv *priv = mlx4_priv(dev);
723 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
724 struct res_qp *r;
725 int err = 0;
726
727 spin_lock_irq(mlx4_tlock(dev));
728 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
729 if (!r)
730 err = -ENOENT;
731 else if (r->com.owner != slave)
732 err = -EPERM;
733 else {
734 switch (state) {
735 case RES_QP_BUSY:
736 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
737 __func__, r->com.res_id);
738 err = -EBUSY;
739 break;
740
741 case RES_QP_RESERVED:
742 if (r->com.state == RES_QP_MAPPED && !alloc)
743 break;
744
745 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
746 err = -EINVAL;
747 break;
748
749 case RES_QP_MAPPED:
750 if ((r->com.state == RES_QP_RESERVED && alloc) ||
751 r->com.state == RES_QP_HW)
752 break;
753 else {
754 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
755 r->com.res_id);
756 err = -EINVAL;
757 }
758
759 break;
760
761 case RES_QP_HW:
762 if (r->com.state != RES_QP_MAPPED)
763 err = -EINVAL;
764 break;
765 default:
766 err = -EINVAL;
767 }
768
769 if (!err) {
770 r->com.from_state = r->com.state;
771 r->com.to_state = state;
772 r->com.state = RES_QP_BUSY;
773 if (qp)
774 *qp = (struct res_qp *)r;
775 }
776 }
777
778 spin_unlock_irq(mlx4_tlock(dev));
779
780 return err;
781}
782
783static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
784 enum res_mpt_states state, struct res_mpt **mpt)
785{
786 struct mlx4_priv *priv = mlx4_priv(dev);
787 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
788 struct res_mpt *r;
789 int err = 0;
790
791 spin_lock_irq(mlx4_tlock(dev));
792 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
793 if (!r)
794 err = -ENOENT;
795 else if (r->com.owner != slave)
796 err = -EPERM;
797 else {
798 switch (state) {
799 case RES_MPT_BUSY:
800 err = -EINVAL;
801 break;
802
803 case RES_MPT_RESERVED:
804 if (r->com.state != RES_MPT_MAPPED)
805 err = -EINVAL;
806 break;
807
808 case RES_MPT_MAPPED:
809 if (r->com.state != RES_MPT_RESERVED &&
810 r->com.state != RES_MPT_HW)
811 err = -EINVAL;
812 break;
813
814 case RES_MPT_HW:
815 if (r->com.state != RES_MPT_MAPPED)
816 err = -EINVAL;
817 break;
818 default:
819 err = -EINVAL;
820 }
821
822 if (!err) {
823 r->com.from_state = r->com.state;
824 r->com.to_state = state;
825 r->com.state = RES_MPT_BUSY;
826 if (mpt)
827 *mpt = (struct res_mpt *)r;
828 }
829 }
830
831 spin_unlock_irq(mlx4_tlock(dev));
832
833 return err;
834}
835
836static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
837 enum res_eq_states state, struct res_eq **eq)
838{
839 struct mlx4_priv *priv = mlx4_priv(dev);
840 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
841 struct res_eq *r;
842 int err = 0;
843
844 spin_lock_irq(mlx4_tlock(dev));
845 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
846 if (!r)
847 err = -ENOENT;
848 else if (r->com.owner != slave)
849 err = -EPERM;
850 else {
851 switch (state) {
852 case RES_EQ_BUSY:
853 err = -EINVAL;
854 break;
855
856 case RES_EQ_RESERVED:
857 if (r->com.state != RES_EQ_HW)
858 err = -EINVAL;
859 break;
860
861 case RES_EQ_HW:
862 if (r->com.state != RES_EQ_RESERVED)
863 err = -EINVAL;
864 break;
865
866 default:
867 err = -EINVAL;
868 }
869
870 if (!err) {
871 r->com.from_state = r->com.state;
872 r->com.to_state = state;
873 r->com.state = RES_EQ_BUSY;
874 if (eq)
875 *eq = r;
876 }
877 }
878
879 spin_unlock_irq(mlx4_tlock(dev));
880
881 return err;
882}
883
884static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
885 enum res_cq_states state, struct res_cq **cq)
886{
887 struct mlx4_priv *priv = mlx4_priv(dev);
888 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
889 struct res_cq *r;
890 int err;
891
892 spin_lock_irq(mlx4_tlock(dev));
893 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
894 if (!r)
895 err = -ENOENT;
896 else if (r->com.owner != slave)
897 err = -EPERM;
898 else {
899 switch (state) {
900 case RES_CQ_BUSY:
901 err = -EBUSY;
902 break;
903
904 case RES_CQ_ALLOCATED:
905 if (r->com.state != RES_CQ_HW)
906 err = -EINVAL;
907 else if (atomic_read(&r->ref_count))
908 err = -EBUSY;
909 else
910 err = 0;
911 break;
912
913 case RES_CQ_HW:
914 if (r->com.state != RES_CQ_ALLOCATED)
915 err = -EINVAL;
916 else
917 err = 0;
918 break;
919
920 default:
921 err = -EINVAL;
922 }
923
924 if (!err) {
925 r->com.from_state = r->com.state;
926 r->com.to_state = state;
927 r->com.state = RES_CQ_BUSY;
928 if (cq)
929 *cq = r;
930 }
931 }
932
933 spin_unlock_irq(mlx4_tlock(dev));
934
935 return err;
936}
937
938static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
939 enum res_cq_states state, struct res_srq **srq)
940{
941 struct mlx4_priv *priv = mlx4_priv(dev);
942 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
943 struct res_srq *r;
944 int err = 0;
945
946 spin_lock_irq(mlx4_tlock(dev));
947 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
948 if (!r)
949 err = -ENOENT;
950 else if (r->com.owner != slave)
951 err = -EPERM;
952 else {
953 switch (state) {
954 case RES_SRQ_BUSY:
955 err = -EINVAL;
956 break;
957
958 case RES_SRQ_ALLOCATED:
959 if (r->com.state != RES_SRQ_HW)
960 err = -EINVAL;
961 else if (atomic_read(&r->ref_count))
962 err = -EBUSY;
963 break;
964
965 case RES_SRQ_HW:
966 if (r->com.state != RES_SRQ_ALLOCATED)
967 err = -EINVAL;
968 break;
969
970 default:
971 err = -EINVAL;
972 }
973
974 if (!err) {
975 r->com.from_state = r->com.state;
976 r->com.to_state = state;
977 r->com.state = RES_SRQ_BUSY;
978 if (srq)
979 *srq = r;
980 }
981 }
982
983 spin_unlock_irq(mlx4_tlock(dev));
984
985 return err;
986}
987
988static void res_abort_move(struct mlx4_dev *dev, int slave,
989 enum mlx4_resource type, int id)
990{
991 struct mlx4_priv *priv = mlx4_priv(dev);
992 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
993 struct res_common *r;
994
995 spin_lock_irq(mlx4_tlock(dev));
996 r = radix_tree_lookup(&tracker->res_tree[type], id);
997 if (r && (r->owner == slave))
998 r->state = r->from_state;
999 spin_unlock_irq(mlx4_tlock(dev));
1000}
1001
1002static void res_end_move(struct mlx4_dev *dev, int slave,
1003 enum mlx4_resource type, int id)
1004{
1005 struct mlx4_priv *priv = mlx4_priv(dev);
1006 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007 struct res_common *r;
1008
1009 spin_lock_irq(mlx4_tlock(dev));
1010 r = radix_tree_lookup(&tracker->res_tree[type], id);
1011 if (r && (r->owner == slave))
1012 r->state = r->to_state;
1013 spin_unlock_irq(mlx4_tlock(dev));
1014}
1015
1016static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1017{
1018 return mlx4_is_qp_reserved(dev, qpn);
1019}
1020
1021static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1022 u64 in_param, u64 *out_param)
1023{
1024 int err;
1025 int count;
1026 int align;
1027 int base;
1028 int qpn;
1029
1030 switch (op) {
1031 case RES_OP_RESERVE:
1032 count = get_param_l(&in_param);
1033 align = get_param_h(&in_param);
1034 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1035 if (err)
1036 return err;
1037
1038 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1039 if (err) {
1040 __mlx4_qp_release_range(dev, base, count);
1041 return err;
1042 }
1043 set_param_l(out_param, base);
1044 break;
1045 case RES_OP_MAP_ICM:
1046 qpn = get_param_l(&in_param) & 0x7fffff;
1047 if (valid_reserved(dev, slave, qpn)) {
1048 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1049 if (err)
1050 return err;
1051 }
1052
1053 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1054 NULL, 1);
1055 if (err)
1056 return err;
1057
1058 if (!valid_reserved(dev, slave, qpn)) {
1059 err = __mlx4_qp_alloc_icm(dev, qpn);
1060 if (err) {
1061 res_abort_move(dev, slave, RES_QP, qpn);
1062 return err;
1063 }
1064 }
1065
1066 res_end_move(dev, slave, RES_QP, qpn);
1067 break;
1068
1069 default:
1070 err = -EINVAL;
1071 break;
1072 }
1073 return err;
1074}
1075
1076static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1077 u64 in_param, u64 *out_param)
1078{
1079 int err = -EINVAL;
1080 int base;
1081 int order;
1082
1083 if (op != RES_OP_RESERVE_AND_MAP)
1084 return err;
1085
1086 order = get_param_l(&in_param);
1087 base = __mlx4_alloc_mtt_range(dev, order);
1088 if (base == -1)
1089 return -ENOMEM;
1090
1091 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1092 if (err)
1093 __mlx4_free_mtt_range(dev, base, order);
1094 else
1095 set_param_l(out_param, base);
1096
1097 return err;
1098}
1099
1100static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1101 u64 in_param, u64 *out_param)
1102{
1103 int err = -EINVAL;
1104 int index;
1105 int id;
1106 struct res_mpt *mpt;
1107
1108 switch (op) {
1109 case RES_OP_RESERVE:
1110 index = __mlx4_mr_reserve(dev);
1111 if (index == -1)
1112 break;
1113 id = index & mpt_mask(dev);
1114
1115 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1116 if (err) {
1117 __mlx4_mr_release(dev, index);
1118 break;
1119 }
1120 set_param_l(out_param, index);
1121 break;
1122 case RES_OP_MAP_ICM:
1123 index = get_param_l(&in_param);
1124 id = index & mpt_mask(dev);
1125 err = mr_res_start_move_to(dev, slave, id,
1126 RES_MPT_MAPPED, &mpt);
1127 if (err)
1128 return err;
1129
1130 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1131 if (err) {
1132 res_abort_move(dev, slave, RES_MPT, id);
1133 return err;
1134 }
1135
1136 res_end_move(dev, slave, RES_MPT, id);
1137 break;
1138 }
1139 return err;
1140}
1141
1142static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1143 u64 in_param, u64 *out_param)
1144{
1145 int cqn;
1146 int err;
1147
1148 switch (op) {
1149 case RES_OP_RESERVE_AND_MAP:
1150 err = __mlx4_cq_alloc_icm(dev, &cqn);
1151 if (err)
1152 break;
1153
1154 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1155 if (err) {
1156 __mlx4_cq_free_icm(dev, cqn);
1157 break;
1158 }
1159
1160 set_param_l(out_param, cqn);
1161 break;
1162
1163 default:
1164 err = -EINVAL;
1165 }
1166
1167 return err;
1168}
1169
1170static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1171 u64 in_param, u64 *out_param)
1172{
1173 int srqn;
1174 int err;
1175
1176 switch (op) {
1177 case RES_OP_RESERVE_AND_MAP:
1178 err = __mlx4_srq_alloc_icm(dev, &srqn);
1179 if (err)
1180 break;
1181
1182 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1183 if (err) {
1184 __mlx4_srq_free_icm(dev, srqn);
1185 break;
1186 }
1187
1188 set_param_l(out_param, srqn);
1189 break;
1190
1191 default:
1192 err = -EINVAL;
1193 }
1194
1195 return err;
1196}
1197
1198static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1199{
1200 struct mlx4_priv *priv = mlx4_priv(dev);
1201 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1202 struct mac_res *res;
1203
1204 res = kzalloc(sizeof *res, GFP_KERNEL);
1205 if (!res)
1206 return -ENOMEM;
1207 res->mac = mac;
1208 res->port = (u8) port;
1209 list_add_tail(&res->list,
1210 &tracker->slave_list[slave].res_list[RES_MAC]);
1211 return 0;
1212}
1213
1214static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1215 int port)
1216{
1217 struct mlx4_priv *priv = mlx4_priv(dev);
1218 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1219 struct list_head *mac_list =
1220 &tracker->slave_list[slave].res_list[RES_MAC];
1221 struct mac_res *res, *tmp;
1222
1223 list_for_each_entry_safe(res, tmp, mac_list, list) {
1224 if (res->mac == mac && res->port == (u8) port) {
1225 list_del(&res->list);
1226 kfree(res);
1227 break;
1228 }
1229 }
1230}
1231
1232static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1233{
1234 struct mlx4_priv *priv = mlx4_priv(dev);
1235 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1236 struct list_head *mac_list =
1237 &tracker->slave_list[slave].res_list[RES_MAC];
1238 struct mac_res *res, *tmp;
1239
1240 list_for_each_entry_safe(res, tmp, mac_list, list) {
1241 list_del(&res->list);
1242 __mlx4_unregister_mac(dev, res->port, res->mac);
1243 kfree(res);
1244 }
1245}
1246
1247static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1248 u64 in_param, u64 *out_param)
1249{
1250 int err = -EINVAL;
1251 int port;
1252 u64 mac;
1253
1254 if (op != RES_OP_RESERVE_AND_MAP)
1255 return err;
1256
1257 port = get_param_l(out_param);
1258 mac = in_param;
1259
1260 err = __mlx4_register_mac(dev, port, mac);
1261 if (err >= 0) {
1262 set_param_l(out_param, err);
1263 err = 0;
1264 }
1265
1266 if (!err) {
1267 err = mac_add_to_slave(dev, slave, mac, port);
1268 if (err)
1269 __mlx4_unregister_mac(dev, port, mac);
1270 }
1271 return err;
1272}
1273
1274int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1275 struct mlx4_vhcr *vhcr,
1276 struct mlx4_cmd_mailbox *inbox,
1277 struct mlx4_cmd_mailbox *outbox,
1278 struct mlx4_cmd_info *cmd)
1279{
1280 int err;
1281 int alop = vhcr->op_modifier;
1282
1283 switch (vhcr->in_modifier) {
1284 case RES_QP:
1285 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1286 vhcr->in_param, &vhcr->out_param);
1287 break;
1288
1289 case RES_MTT:
1290 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1291 vhcr->in_param, &vhcr->out_param);
1292 break;
1293
1294 case RES_MPT:
1295 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1296 vhcr->in_param, &vhcr->out_param);
1297 break;
1298
1299 case RES_CQ:
1300 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1301 vhcr->in_param, &vhcr->out_param);
1302 break;
1303
1304 case RES_SRQ:
1305 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1306 vhcr->in_param, &vhcr->out_param);
1307 break;
1308
1309 case RES_MAC:
1310 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1311 vhcr->in_param, &vhcr->out_param);
1312 break;
1313
1314 default:
1315 err = -EINVAL;
1316 break;
1317 }
1318
1319 return err;
1320}
1321
1322static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1323 u64 in_param)
1324{
1325 int err;
1326 int count;
1327 int base;
1328 int qpn;
1329
1330 switch (op) {
1331 case RES_OP_RESERVE:
1332 base = get_param_l(&in_param) & 0x7fffff;
1333 count = get_param_h(&in_param);
1334 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1335 if (err)
1336 break;
1337 __mlx4_qp_release_range(dev, base, count);
1338 break;
1339 case RES_OP_MAP_ICM:
1340 qpn = get_param_l(&in_param) & 0x7fffff;
1341 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1342 NULL, 0);
1343 if (err)
1344 return err;
1345
1346 if (!valid_reserved(dev, slave, qpn))
1347 __mlx4_qp_free_icm(dev, qpn);
1348
1349 res_end_move(dev, slave, RES_QP, qpn);
1350
1351 if (valid_reserved(dev, slave, qpn))
1352 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1353 break;
1354 default:
1355 err = -EINVAL;
1356 break;
1357 }
1358 return err;
1359}
1360
1361static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1362 u64 in_param, u64 *out_param)
1363{
1364 int err = -EINVAL;
1365 int base;
1366 int order;
1367
1368 if (op != RES_OP_RESERVE_AND_MAP)
1369 return err;
1370
1371 base = get_param_l(&in_param);
1372 order = get_param_h(&in_param);
1373 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1374 if (!err)
1375 __mlx4_free_mtt_range(dev, base, order);
1376 return err;
1377}
1378
1379static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1380 u64 in_param)
1381{
1382 int err = -EINVAL;
1383 int index;
1384 int id;
1385 struct res_mpt *mpt;
1386
1387 switch (op) {
1388 case RES_OP_RESERVE:
1389 index = get_param_l(&in_param);
1390 id = index & mpt_mask(dev);
1391 err = get_res(dev, slave, id, RES_MPT, &mpt);
1392 if (err)
1393 break;
1394 index = mpt->key;
1395 put_res(dev, slave, id, RES_MPT);
1396
1397 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1398 if (err)
1399 break;
1400 __mlx4_mr_release(dev, index);
1401 break;
1402 case RES_OP_MAP_ICM:
1403 index = get_param_l(&in_param);
1404 id = index & mpt_mask(dev);
1405 err = mr_res_start_move_to(dev, slave, id,
1406 RES_MPT_RESERVED, &mpt);
1407 if (err)
1408 return err;
1409
1410 __mlx4_mr_free_icm(dev, mpt->key);
1411 res_end_move(dev, slave, RES_MPT, id);
1412 return err;
1413 break;
1414 default:
1415 err = -EINVAL;
1416 break;
1417 }
1418 return err;
1419}
1420
1421static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1422 u64 in_param, u64 *out_param)
1423{
1424 int cqn;
1425 int err;
1426
1427 switch (op) {
1428 case RES_OP_RESERVE_AND_MAP:
1429 cqn = get_param_l(&in_param);
1430 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1431 if (err)
1432 break;
1433
1434 __mlx4_cq_free_icm(dev, cqn);
1435 break;
1436
1437 default:
1438 err = -EINVAL;
1439 break;
1440 }
1441
1442 return err;
1443}
1444
1445static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 u64 in_param, u64 *out_param)
1447{
1448 int srqn;
1449 int err;
1450
1451 switch (op) {
1452 case RES_OP_RESERVE_AND_MAP:
1453 srqn = get_param_l(&in_param);
1454 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1455 if (err)
1456 break;
1457
1458 __mlx4_srq_free_icm(dev, srqn);
1459 break;
1460
1461 default:
1462 err = -EINVAL;
1463 break;
1464 }
1465
1466 return err;
1467}
1468
1469static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470 u64 in_param, u64 *out_param)
1471{
1472 int port;
1473 int err = 0;
1474
1475 switch (op) {
1476 case RES_OP_RESERVE_AND_MAP:
1477 port = get_param_l(out_param);
1478 mac_del_from_slave(dev, slave, in_param, port);
1479 __mlx4_unregister_mac(dev, port, in_param);
1480 break;
1481 default:
1482 err = -EINVAL;
1483 break;
1484 }
1485
1486 return err;
1487
1488}
1489
1490int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1491 struct mlx4_vhcr *vhcr,
1492 struct mlx4_cmd_mailbox *inbox,
1493 struct mlx4_cmd_mailbox *outbox,
1494 struct mlx4_cmd_info *cmd)
1495{
1496 int err = -EINVAL;
1497 int alop = vhcr->op_modifier;
1498
1499 switch (vhcr->in_modifier) {
1500 case RES_QP:
1501 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1502 vhcr->in_param);
1503 break;
1504
1505 case RES_MTT:
1506 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1507 vhcr->in_param, &vhcr->out_param);
1508 break;
1509
1510 case RES_MPT:
1511 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1512 vhcr->in_param);
1513 break;
1514
1515 case RES_CQ:
1516 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1517 vhcr->in_param, &vhcr->out_param);
1518 break;
1519
1520 case RES_SRQ:
1521 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1522 vhcr->in_param, &vhcr->out_param);
1523 break;
1524
1525 case RES_MAC:
1526 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1527 vhcr->in_param, &vhcr->out_param);
1528 break;
1529
1530 default:
1531 break;
1532 }
1533 return err;
1534}
1535
1536/* ugly but other choices are uglier */
1537static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1538{
1539 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1540}
1541
1542static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt)
1543{
1544 return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8;
1545}
1546
1547static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1548{
1549 return be32_to_cpu(mpt->mtt_sz);
1550}
1551
1552static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1553{
1554 return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1555}
1556
1557static int qp_get_mtt_seg(struct mlx4_qp_context *qpc)
1558{
1559 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1560}
1561
1562static int srq_get_mtt_seg(struct mlx4_srq_context *srqc)
1563{
1564 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1565}
1566
1567static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1568{
1569 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1570 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1571 int log_sq_sride = qpc->sq_size_stride & 7;
1572 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1573 int log_rq_stride = qpc->rq_size_stride & 7;
1574 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1575 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1576 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1577 int sq_size;
1578 int rq_size;
1579 int total_pages;
1580 int total_mem;
1581 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1582
1583 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1584 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1585 total_mem = sq_size + rq_size;
1586 total_pages =
1587 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1588 page_shift);
1589
1590 return total_pages;
1591}
1592
1593static int qp_get_pdn(struct mlx4_qp_context *qpc)
1594{
1595 return be32_to_cpu(qpc->pd) & 0xffffff;
1596}
1597
1598static int pdn2slave(int pdn)
1599{
1600 return (pdn >> NOT_MASKED_PD_BITS) - 1;
1601}
1602
1603static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1604 int size, struct res_mtt *mtt)
1605{
1606 int res_start = mtt->com.res_id * dev->caps.mtts_per_seg;
1607 int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg;
1608
1609 if (start < res_start || start + size > res_start + res_size)
1610 return -EPERM;
1611 return 0;
1612}
1613
1614int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1615 struct mlx4_vhcr *vhcr,
1616 struct mlx4_cmd_mailbox *inbox,
1617 struct mlx4_cmd_mailbox *outbox,
1618 struct mlx4_cmd_info *cmd)
1619{
1620 int err;
1621 int index = vhcr->in_modifier;
1622 struct res_mtt *mtt;
1623 struct res_mpt *mpt;
1624 int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) *
1625 dev->caps.mtts_per_seg;
1626 int phys;
1627 int id;
1628
1629 id = index & mpt_mask(dev);
1630 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1631 if (err)
1632 return err;
1633
1634 phys = mr_phys_mpt(inbox->buf);
1635 if (!phys) {
1636 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
1637 RES_MTT, &mtt);
1638 if (err)
1639 goto ex_abort;
1640
1641 err = check_mtt_range(dev, slave, mtt_base,
1642 mr_get_mtt_size(inbox->buf), mtt);
1643 if (err)
1644 goto ex_put;
1645
1646 mpt->mtt = mtt;
1647 }
1648
1649 if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1650 err = -EPERM;
1651 goto ex_put;
1652 }
1653
1654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1655 if (err)
1656 goto ex_put;
1657
1658 if (!phys) {
1659 atomic_inc(&mtt->ref_count);
1660 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1661 }
1662
1663 res_end_move(dev, slave, RES_MPT, id);
1664 return 0;
1665
1666ex_put:
1667 if (!phys)
1668 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1669ex_abort:
1670 res_abort_move(dev, slave, RES_MPT, id);
1671
1672 return err;
1673}
1674
1675int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1676 struct mlx4_vhcr *vhcr,
1677 struct mlx4_cmd_mailbox *inbox,
1678 struct mlx4_cmd_mailbox *outbox,
1679 struct mlx4_cmd_info *cmd)
1680{
1681 int err;
1682 int index = vhcr->in_modifier;
1683 struct res_mpt *mpt;
1684 int id;
1685
1686 id = index & mpt_mask(dev);
1687 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1688 if (err)
1689 return err;
1690
1691 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1692 if (err)
1693 goto ex_abort;
1694
1695 if (mpt->mtt)
1696 atomic_dec(&mpt->mtt->ref_count);
1697
1698 res_end_move(dev, slave, RES_MPT, id);
1699 return 0;
1700
1701ex_abort:
1702 res_abort_move(dev, slave, RES_MPT, id);
1703
1704 return err;
1705}
1706
1707int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1708 struct mlx4_vhcr *vhcr,
1709 struct mlx4_cmd_mailbox *inbox,
1710 struct mlx4_cmd_mailbox *outbox,
1711 struct mlx4_cmd_info *cmd)
1712{
1713 int err;
1714 int index = vhcr->in_modifier;
1715 struct res_mpt *mpt;
1716 int id;
1717
1718 id = index & mpt_mask(dev);
1719 err = get_res(dev, slave, id, RES_MPT, &mpt);
1720 if (err)
1721 return err;
1722
1723 if (mpt->com.from_state != RES_MPT_HW) {
1724 err = -EBUSY;
1725 goto out;
1726 }
1727
1728 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1729
1730out:
1731 put_res(dev, slave, id, RES_MPT);
1732 return err;
1733}
1734
1735static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1736{
1737 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1738}
1739
1740static int qp_get_scqn(struct mlx4_qp_context *qpc)
1741{
1742 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1743}
1744
1745static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1746{
1747 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1748}
1749
1750int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1751 struct mlx4_vhcr *vhcr,
1752 struct mlx4_cmd_mailbox *inbox,
1753 struct mlx4_cmd_mailbox *outbox,
1754 struct mlx4_cmd_info *cmd)
1755{
1756 int err;
1757 int qpn = vhcr->in_modifier & 0x7fffff;
1758 struct res_mtt *mtt;
1759 struct res_qp *qp;
1760 struct mlx4_qp_context *qpc = inbox->buf + 8;
1761 int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) *
1762 dev->caps.mtts_per_seg;
1763 int mtt_size = qp_get_mtt_size(qpc);
1764 struct res_cq *rcq;
1765 struct res_cq *scq;
1766 int rcqn = qp_get_rcqn(qpc);
1767 int scqn = qp_get_scqn(qpc);
1768 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1769 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1770 struct res_srq *srq;
1771 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1772
1773 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1774 if (err)
1775 return err;
1776 qp->local_qpn = local_qpn;
1777
1778 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1779 &mtt);
1780 if (err)
1781 goto ex_abort;
1782
1783 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1784 if (err)
1785 goto ex_put_mtt;
1786
1787 if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1788 err = -EPERM;
1789 goto ex_put_mtt;
1790 }
1791
1792 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1793 if (err)
1794 goto ex_put_mtt;
1795
1796 if (scqn != rcqn) {
1797 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1798 if (err)
1799 goto ex_put_rcq;
1800 } else
1801 scq = rcq;
1802
1803 if (use_srq) {
1804 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1805 if (err)
1806 goto ex_put_scq;
1807 }
1808
1809 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1810 if (err)
1811 goto ex_put_srq;
1812 atomic_inc(&mtt->ref_count);
1813 qp->mtt = mtt;
1814 atomic_inc(&rcq->ref_count);
1815 qp->rcq = rcq;
1816 atomic_inc(&scq->ref_count);
1817 qp->scq = scq;
1818
1819 if (scqn != rcqn)
1820 put_res(dev, slave, scqn, RES_CQ);
1821
1822 if (use_srq) {
1823 atomic_inc(&srq->ref_count);
1824 put_res(dev, slave, srqn, RES_SRQ);
1825 qp->srq = srq;
1826 }
1827 put_res(dev, slave, rcqn, RES_CQ);
1828 put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
1829 res_end_move(dev, slave, RES_QP, qpn);
1830
1831 return 0;
1832
1833ex_put_srq:
1834 if (use_srq)
1835 put_res(dev, slave, srqn, RES_SRQ);
1836ex_put_scq:
1837 if (scqn != rcqn)
1838 put_res(dev, slave, scqn, RES_CQ);
1839ex_put_rcq:
1840 put_res(dev, slave, rcqn, RES_CQ);
1841ex_put_mtt:
1842 put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
1843ex_abort:
1844 res_abort_move(dev, slave, RES_QP, qpn);
1845
1846 return err;
1847}
1848
1849static int eq_get_mtt_seg(struct mlx4_eq_context *eqc)
1850{
1851 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1852}
1853
1854static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1855{
1856 int log_eq_size = eqc->log_eq_size & 0x1f;
1857 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1858
1859 if (log_eq_size + 5 < page_shift)
1860 return 1;
1861
1862 return 1 << (log_eq_size + 5 - page_shift);
1863}
1864
1865static int cq_get_mtt_seg(struct mlx4_cq_context *cqc)
1866{
1867 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1868}
1869
1870static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1871{
1872 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1873 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1874
1875 if (log_cq_size + 5 < page_shift)
1876 return 1;
1877
1878 return 1 << (log_cq_size + 5 - page_shift);
1879}
1880
1881int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1882 struct mlx4_vhcr *vhcr,
1883 struct mlx4_cmd_mailbox *inbox,
1884 struct mlx4_cmd_mailbox *outbox,
1885 struct mlx4_cmd_info *cmd)
1886{
1887 int err;
1888 int eqn = vhcr->in_modifier;
1889 int res_id = (slave << 8) | eqn;
1890 struct mlx4_eq_context *eqc = inbox->buf;
1891 int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) *
1892 dev->caps.mtts_per_seg;
1893 int mtt_size = eq_get_mtt_size(eqc);
1894 struct res_eq *eq;
1895 struct res_mtt *mtt;
1896
1897 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1898 if (err)
1899 return err;
1900 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1901 if (err)
1902 goto out_add;
1903
1904 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
1905 &mtt);
1906 if (err)
1907 goto out_move;
1908
1909 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1910 if (err)
1911 goto out_put;
1912
1913 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1914 if (err)
1915 goto out_put;
1916
1917 atomic_inc(&mtt->ref_count);
1918 eq->mtt = mtt;
1919 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1920 res_end_move(dev, slave, RES_EQ, res_id);
1921 return 0;
1922
1923out_put:
1924 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1925out_move:
1926 res_abort_move(dev, slave, RES_EQ, res_id);
1927out_add:
1928 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1929 return err;
1930}
1931
1932static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1933 int len, struct res_mtt **res)
1934{
1935 struct mlx4_priv *priv = mlx4_priv(dev);
1936 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1937 struct res_mtt *mtt;
1938 int err = -EINVAL;
1939
1940 spin_lock_irq(mlx4_tlock(dev));
1941 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1942 com.list) {
1943 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1944 *res = mtt;
1945 mtt->com.from_state = mtt->com.state;
1946 mtt->com.state = RES_MTT_BUSY;
1947 err = 0;
1948 break;
1949 }
1950 }
1951 spin_unlock_irq(mlx4_tlock(dev));
1952
1953 return err;
1954}
1955
1956int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1957 struct mlx4_vhcr *vhcr,
1958 struct mlx4_cmd_mailbox *inbox,
1959 struct mlx4_cmd_mailbox *outbox,
1960 struct mlx4_cmd_info *cmd)
1961{
1962 struct mlx4_mtt mtt;
1963 __be64 *page_list = inbox->buf;
1964 u64 *pg_list = (u64 *)page_list;
1965 int i;
1966 struct res_mtt *rmtt = NULL;
1967 int start = be64_to_cpu(page_list[0]);
1968 int npages = vhcr->in_modifier;
1969 int err;
1970
1971 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1972 if (err)
1973 return err;
1974
1975 /* Call the SW implementation of write_mtt:
1976 * - Prepare a dummy mtt struct
1977 * - Translate inbox contents to simple addresses in host endianess */
1978 mtt.first_seg = 0;
1979 mtt.order = 0;
1980 mtt.page_shift = 0;
1981 for (i = 0; i < npages; ++i)
1982 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1983
1984 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1985 ((u64 *)page_list + 2));
1986
1987 if (rmtt)
1988 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1989
1990 return err;
1991}
1992
1993int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1994 struct mlx4_vhcr *vhcr,
1995 struct mlx4_cmd_mailbox *inbox,
1996 struct mlx4_cmd_mailbox *outbox,
1997 struct mlx4_cmd_info *cmd)
1998{
1999 int eqn = vhcr->in_modifier;
2000 int res_id = eqn | (slave << 8);
2001 struct res_eq *eq;
2002 int err;
2003
2004 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2005 if (err)
2006 return err;
2007
2008 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2009 if (err)
2010 goto ex_abort;
2011
2012 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2013 if (err)
2014 goto ex_put;
2015
2016 atomic_dec(&eq->mtt->ref_count);
2017 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2018 res_end_move(dev, slave, RES_EQ, res_id);
2019 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2020
2021 return 0;
2022
2023ex_put:
2024 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2025ex_abort:
2026 res_abort_move(dev, slave, RES_EQ, res_id);
2027
2028 return err;
2029}
2030
2031int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2032{
2033 struct mlx4_priv *priv = mlx4_priv(dev);
2034 struct mlx4_slave_event_eq_info *event_eq;
2035 struct mlx4_cmd_mailbox *mailbox;
2036 u32 in_modifier = 0;
2037 int err;
2038 int res_id;
2039 struct res_eq *req;
2040
2041 if (!priv->mfunc.master.slave_state)
2042 return -EINVAL;
2043
2044 event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2045
2046 /* Create the event only if the slave is registered */
2047 if ((event_eq->event_type & (1 << eqe->type)) == 0)
2048 return 0;
2049
2050 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2051 res_id = (slave << 8) | event_eq->eqn;
2052 err = get_res(dev, slave, res_id, RES_EQ, &req);
2053 if (err)
2054 goto unlock;
2055
2056 if (req->com.from_state != RES_EQ_HW) {
2057 err = -EINVAL;
2058 goto put;
2059 }
2060
2061 mailbox = mlx4_alloc_cmd_mailbox(dev);
2062 if (IS_ERR(mailbox)) {
2063 err = PTR_ERR(mailbox);
2064 goto put;
2065 }
2066
2067 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2068 ++event_eq->token;
2069 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2070 }
2071
2072 memcpy(mailbox->buf, (u8 *) eqe, 28);
2073
2074 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2075
2076 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2077 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2078 MLX4_CMD_NATIVE);
2079
2080 put_res(dev, slave, res_id, RES_EQ);
2081 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2082 mlx4_free_cmd_mailbox(dev, mailbox);
2083 return err;
2084
2085put:
2086 put_res(dev, slave, res_id, RES_EQ);
2087
2088unlock:
2089 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2090 return err;
2091}
2092
2093int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2094 struct mlx4_vhcr *vhcr,
2095 struct mlx4_cmd_mailbox *inbox,
2096 struct mlx4_cmd_mailbox *outbox,
2097 struct mlx4_cmd_info *cmd)
2098{
2099 int eqn = vhcr->in_modifier;
2100 int res_id = eqn | (slave << 8);
2101 struct res_eq *eq;
2102 int err;
2103
2104 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2105 if (err)
2106 return err;
2107
2108 if (eq->com.from_state != RES_EQ_HW) {
2109 err = -EINVAL;
2110 goto ex_put;
2111 }
2112
2113 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2114
2115ex_put:
2116 put_res(dev, slave, res_id, RES_EQ);
2117 return err;
2118}
2119
2120int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2121 struct mlx4_vhcr *vhcr,
2122 struct mlx4_cmd_mailbox *inbox,
2123 struct mlx4_cmd_mailbox *outbox,
2124 struct mlx4_cmd_info *cmd)
2125{
2126 int err;
2127 int cqn = vhcr->in_modifier;
2128 struct mlx4_cq_context *cqc = inbox->buf;
2129 int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2130 dev->caps.mtts_per_seg;
2131 struct res_cq *cq;
2132 struct res_mtt *mtt;
2133
2134 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2135 if (err)
2136 return err;
2137 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2138 &mtt);
2139 if (err)
2140 goto out_move;
2141 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2142 if (err)
2143 goto out_put;
2144 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2145 if (err)
2146 goto out_put;
2147 atomic_inc(&mtt->ref_count);
2148 cq->mtt = mtt;
2149 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2150 res_end_move(dev, slave, RES_CQ, cqn);
2151 return 0;
2152
2153out_put:
2154 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2155out_move:
2156 res_abort_move(dev, slave, RES_CQ, cqn);
2157 return err;
2158}
2159
2160int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2161 struct mlx4_vhcr *vhcr,
2162 struct mlx4_cmd_mailbox *inbox,
2163 struct mlx4_cmd_mailbox *outbox,
2164 struct mlx4_cmd_info *cmd)
2165{
2166 int err;
2167 int cqn = vhcr->in_modifier;
2168 struct res_cq *cq;
2169
2170 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2171 if (err)
2172 return err;
2173 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2174 if (err)
2175 goto out_move;
2176 atomic_dec(&cq->mtt->ref_count);
2177 res_end_move(dev, slave, RES_CQ, cqn);
2178 return 0;
2179
2180out_move:
2181 res_abort_move(dev, slave, RES_CQ, cqn);
2182 return err;
2183}
2184
2185int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2186 struct mlx4_vhcr *vhcr,
2187 struct mlx4_cmd_mailbox *inbox,
2188 struct mlx4_cmd_mailbox *outbox,
2189 struct mlx4_cmd_info *cmd)
2190{
2191 int cqn = vhcr->in_modifier;
2192 struct res_cq *cq;
2193 int err;
2194
2195 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2196 if (err)
2197 return err;
2198
2199 if (cq->com.from_state != RES_CQ_HW)
2200 goto ex_put;
2201
2202 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2203ex_put:
2204 put_res(dev, slave, cqn, RES_CQ);
2205
2206 return err;
2207}
2208
2209static int handle_resize(struct mlx4_dev *dev, int slave,
2210 struct mlx4_vhcr *vhcr,
2211 struct mlx4_cmd_mailbox *inbox,
2212 struct mlx4_cmd_mailbox *outbox,
2213 struct mlx4_cmd_info *cmd,
2214 struct res_cq *cq)
2215{
2216 int err;
2217 struct res_mtt *orig_mtt;
2218 struct res_mtt *mtt;
2219 struct mlx4_cq_context *cqc = inbox->buf;
2220 int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
2221 dev->caps.mtts_per_seg;
2222
2223 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2224 if (err)
2225 return err;
2226
2227 if (orig_mtt != cq->mtt) {
2228 err = -EINVAL;
2229 goto ex_put;
2230 }
2231
2232 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
2233 &mtt);
2234 if (err)
2235 goto ex_put;
2236
2237 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2238 if (err)
2239 goto ex_put1;
2240 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2241 if (err)
2242 goto ex_put1;
2243 atomic_dec(&orig_mtt->ref_count);
2244 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2245 atomic_inc(&mtt->ref_count);
2246 cq->mtt = mtt;
2247 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2248 return 0;
2249
2250ex_put1:
2251 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2252ex_put:
2253 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2254
2255 return err;
2256
2257}
2258
2259int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2260 struct mlx4_vhcr *vhcr,
2261 struct mlx4_cmd_mailbox *inbox,
2262 struct mlx4_cmd_mailbox *outbox,
2263 struct mlx4_cmd_info *cmd)
2264{
2265 int cqn = vhcr->in_modifier;
2266 struct res_cq *cq;
2267 int err;
2268
2269 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2270 if (err)
2271 return err;
2272
2273 if (cq->com.from_state != RES_CQ_HW)
2274 goto ex_put;
2275
2276 if (vhcr->op_modifier == 0) {
2277 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2278 if (err)
2279 goto ex_put;
2280 }
2281
2282 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2283ex_put:
2284 put_res(dev, slave, cqn, RES_CQ);
2285
2286 return err;
2287}
2288
2289static int srq_get_pdn(struct mlx4_srq_context *srqc)
2290{
2291 return be32_to_cpu(srqc->pd) & 0xffffff;
2292}
2293
2294static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2295{
2296 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2297 int log_rq_stride = srqc->logstride & 7;
2298 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2299
2300 if (log_srq_size + log_rq_stride + 4 < page_shift)
2301 return 1;
2302
2303 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2304}
2305
2306int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2307 struct mlx4_vhcr *vhcr,
2308 struct mlx4_cmd_mailbox *inbox,
2309 struct mlx4_cmd_mailbox *outbox,
2310 struct mlx4_cmd_info *cmd)
2311{
2312 int err;
2313 int srqn = vhcr->in_modifier;
2314 struct res_mtt *mtt;
2315 struct res_srq *srq;
2316 struct mlx4_srq_context *srqc = inbox->buf;
2317 int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) *
2318 dev->caps.mtts_per_seg;
2319
2320 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2321 return -EINVAL;
2322
2323 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2324 if (err)
2325 return err;
2326 err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
2327 RES_MTT, &mtt);
2328 if (err)
2329 goto ex_abort;
2330 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2331 mtt);
2332 if (err)
2333 goto ex_put_mtt;
2334
2335 if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2336 err = -EPERM;
2337 goto ex_put_mtt;
2338 }
2339
2340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2341 if (err)
2342 goto ex_put_mtt;
2343
2344 atomic_inc(&mtt->ref_count);
2345 srq->mtt = mtt;
2346 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2347 res_end_move(dev, slave, RES_SRQ, srqn);
2348 return 0;
2349
2350ex_put_mtt:
2351 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2352ex_abort:
2353 res_abort_move(dev, slave, RES_SRQ, srqn);
2354
2355 return err;
2356}
2357
2358int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2359 struct mlx4_vhcr *vhcr,
2360 struct mlx4_cmd_mailbox *inbox,
2361 struct mlx4_cmd_mailbox *outbox,
2362 struct mlx4_cmd_info *cmd)
2363{
2364 int err;
2365 int srqn = vhcr->in_modifier;
2366 struct res_srq *srq;
2367
2368 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2369 if (err)
2370 return err;
2371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2372 if (err)
2373 goto ex_abort;
2374 atomic_dec(&srq->mtt->ref_count);
2375 if (srq->cq)
2376 atomic_dec(&srq->cq->ref_count);
2377 res_end_move(dev, slave, RES_SRQ, srqn);
2378
2379 return 0;
2380
2381ex_abort:
2382 res_abort_move(dev, slave, RES_SRQ, srqn);
2383
2384 return err;
2385}
2386
2387int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2388 struct mlx4_vhcr *vhcr,
2389 struct mlx4_cmd_mailbox *inbox,
2390 struct mlx4_cmd_mailbox *outbox,
2391 struct mlx4_cmd_info *cmd)
2392{
2393 int err;
2394 int srqn = vhcr->in_modifier;
2395 struct res_srq *srq;
2396
2397 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2398 if (err)
2399 return err;
2400 if (srq->com.from_state != RES_SRQ_HW) {
2401 err = -EBUSY;
2402 goto out;
2403 }
2404 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2405out:
2406 put_res(dev, slave, srqn, RES_SRQ);
2407 return err;
2408}
2409
2410int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2411 struct mlx4_vhcr *vhcr,
2412 struct mlx4_cmd_mailbox *inbox,
2413 struct mlx4_cmd_mailbox *outbox,
2414 struct mlx4_cmd_info *cmd)
2415{
2416 int err;
2417 int srqn = vhcr->in_modifier;
2418 struct res_srq *srq;
2419
2420 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2421 if (err)
2422 return err;
2423
2424 if (srq->com.from_state != RES_SRQ_HW) {
2425 err = -EBUSY;
2426 goto out;
2427 }
2428
2429 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2430out:
2431 put_res(dev, slave, srqn, RES_SRQ);
2432 return err;
2433}
2434
2435int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2436 struct mlx4_vhcr *vhcr,
2437 struct mlx4_cmd_mailbox *inbox,
2438 struct mlx4_cmd_mailbox *outbox,
2439 struct mlx4_cmd_info *cmd)
2440{
2441 int err;
2442 int qpn = vhcr->in_modifier & 0x7fffff;
2443 struct res_qp *qp;
2444
2445 err = get_res(dev, slave, qpn, RES_QP, &qp);
2446 if (err)
2447 return err;
2448 if (qp->com.from_state != RES_QP_HW) {
2449 err = -EBUSY;
2450 goto out;
2451 }
2452
2453 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2454out:
2455 put_res(dev, slave, qpn, RES_QP);
2456 return err;
2457}
2458
2459int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2460 struct mlx4_vhcr *vhcr,
2461 struct mlx4_cmd_mailbox *inbox,
2462 struct mlx4_cmd_mailbox *outbox,
2463 struct mlx4_cmd_info *cmd)
2464{
2465 struct mlx4_qp_context *qpc = inbox->buf + 8;
2466
2467 update_ud_gid(dev, qpc, (u8)slave);
2468
2469 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2470}
2471
2472int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2473 struct mlx4_vhcr *vhcr,
2474 struct mlx4_cmd_mailbox *inbox,
2475 struct mlx4_cmd_mailbox *outbox,
2476 struct mlx4_cmd_info *cmd)
2477{
2478 int err;
2479 int qpn = vhcr->in_modifier & 0x7fffff;
2480 struct res_qp *qp;
2481
2482 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2483 if (err)
2484 return err;
2485 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2486 if (err)
2487 goto ex_abort;
2488
2489 atomic_dec(&qp->mtt->ref_count);
2490 atomic_dec(&qp->rcq->ref_count);
2491 atomic_dec(&qp->scq->ref_count);
2492 if (qp->srq)
2493 atomic_dec(&qp->srq->ref_count);
2494 res_end_move(dev, slave, RES_QP, qpn);
2495 return 0;
2496
2497ex_abort:
2498 res_abort_move(dev, slave, RES_QP, qpn);
2499
2500 return err;
2501}
2502
2503static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2504 struct res_qp *rqp, u8 *gid)
2505{
2506 struct res_gid *res;
2507
2508 list_for_each_entry(res, &rqp->mcg_list, list) {
2509 if (!memcmp(res->gid, gid, 16))
2510 return res;
2511 }
2512 return NULL;
2513}
2514
2515static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2516 u8 *gid, enum mlx4_protocol prot)
2517{
2518 struct res_gid *res;
2519 int err;
2520
2521 res = kzalloc(sizeof *res, GFP_KERNEL);
2522 if (!res)
2523 return -ENOMEM;
2524
2525 spin_lock_irq(&rqp->mcg_spl);
2526 if (find_gid(dev, slave, rqp, gid)) {
2527 kfree(res);
2528 err = -EEXIST;
2529 } else {
2530 memcpy(res->gid, gid, 16);
2531 res->prot = prot;
2532 list_add_tail(&res->list, &rqp->mcg_list);
2533 err = 0;
2534 }
2535 spin_unlock_irq(&rqp->mcg_spl);
2536
2537 return err;
2538}
2539
2540static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2541 u8 *gid, enum mlx4_protocol prot)
2542{
2543 struct res_gid *res;
2544 int err;
2545
2546 spin_lock_irq(&rqp->mcg_spl);
2547 res = find_gid(dev, slave, rqp, gid);
2548 if (!res || res->prot != prot)
2549 err = -EINVAL;
2550 else {
2551 list_del(&res->list);
2552 kfree(res);
2553 err = 0;
2554 }
2555 spin_unlock_irq(&rqp->mcg_spl);
2556
2557 return err;
2558}
2559
2560int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2561 struct mlx4_vhcr *vhcr,
2562 struct mlx4_cmd_mailbox *inbox,
2563 struct mlx4_cmd_mailbox *outbox,
2564 struct mlx4_cmd_info *cmd)
2565{
2566 struct mlx4_qp qp; /* dummy for calling attach/detach */
2567 u8 *gid = inbox->buf;
2568 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2569 int err, err1;
2570 int qpn;
2571 struct res_qp *rqp;
2572 int attach = vhcr->op_modifier;
2573 int block_loopback = vhcr->in_modifier >> 31;
2574 u8 steer_type_mask = 2;
2575 enum mlx4_steer_type type = gid[7] & steer_type_mask;
2576
2577 qpn = vhcr->in_modifier & 0xffffff;
2578 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2579 if (err)
2580 return err;
2581
2582 qp.qpn = qpn;
2583 if (attach) {
2584 err = add_mcg_res(dev, slave, rqp, gid, prot);
2585 if (err)
2586 goto ex_put;
2587
2588 err = mlx4_qp_attach_common(dev, &qp, gid,
2589 block_loopback, prot, type);
2590 if (err)
2591 goto ex_rem;
2592 } else {
2593 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2594 if (err)
2595 goto ex_put;
2596 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2597 }
2598
2599 put_res(dev, slave, qpn, RES_QP);
2600 return 0;
2601
2602ex_rem:
2603 /* ignore error return below, already in error */
2604 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2605ex_put:
2606 put_res(dev, slave, qpn, RES_QP);
2607
2608 return err;
2609}
2610
2611enum {
2612 BUSY_MAX_RETRIES = 10
2613};
2614
2615int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2616 struct mlx4_vhcr *vhcr,
2617 struct mlx4_cmd_mailbox *inbox,
2618 struct mlx4_cmd_mailbox *outbox,
2619 struct mlx4_cmd_info *cmd)
2620{
2621 int err;
2622 int index = vhcr->in_modifier & 0xffff;
2623
2624 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2625 if (err)
2626 return err;
2627
2628 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2629 put_res(dev, slave, index, RES_COUNTER);
2630 return err;
2631}
2632
2633static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2634{
2635 struct res_gid *rgid;
2636 struct res_gid *tmp;
2637 int err;
2638 struct mlx4_qp qp; /* dummy for calling attach/detach */
2639
2640 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2641 qp.qpn = rqp->local_qpn;
2642 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2643 MLX4_MC_STEER);
2644 list_del(&rgid->list);
2645 kfree(rgid);
2646 }
2647}
2648
2649static int _move_all_busy(struct mlx4_dev *dev, int slave,
2650 enum mlx4_resource type, int print)
2651{
2652 struct mlx4_priv *priv = mlx4_priv(dev);
2653 struct mlx4_resource_tracker *tracker =
2654 &priv->mfunc.master.res_tracker;
2655 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2656 struct res_common *r;
2657 struct res_common *tmp;
2658 int busy;
2659
2660 busy = 0;
2661 spin_lock_irq(mlx4_tlock(dev));
2662 list_for_each_entry_safe(r, tmp, rlist, list) {
2663 if (r->owner == slave) {
2664 if (!r->removing) {
2665 if (r->state == RES_ANY_BUSY) {
2666 if (print)
2667 mlx4_dbg(dev,
2668 "%s id 0x%x is busy\n",
2669 ResourceType(type),
2670 r->res_id);
2671 ++busy;
2672 } else {
2673 r->from_state = r->state;
2674 r->state = RES_ANY_BUSY;
2675 r->removing = 1;
2676 }
2677 }
2678 }
2679 }
2680 spin_unlock_irq(mlx4_tlock(dev));
2681
2682 return busy;
2683}
2684
2685static int move_all_busy(struct mlx4_dev *dev, int slave,
2686 enum mlx4_resource type)
2687{
2688 unsigned long begin;
2689 int busy;
2690
2691 begin = jiffies;
2692 do {
2693 busy = _move_all_busy(dev, slave, type, 0);
2694 if (time_after(jiffies, begin + 5 * HZ))
2695 break;
2696 if (busy)
2697 cond_resched();
2698 } while (busy);
2699
2700 if (busy)
2701 busy = _move_all_busy(dev, slave, type, 1);
2702
2703 return busy;
2704}
2705static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2706{
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2708 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2709 struct list_head *qp_list =
2710 &tracker->slave_list[slave].res_list[RES_QP];
2711 struct res_qp *qp;
2712 struct res_qp *tmp;
2713 int state;
2714 u64 in_param;
2715 int qpn;
2716 int err;
2717
2718 err = move_all_busy(dev, slave, RES_QP);
2719 if (err)
2720 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2721 "for slave %d\n", slave);
2722
2723 spin_lock_irq(mlx4_tlock(dev));
2724 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2725 spin_unlock_irq(mlx4_tlock(dev));
2726 if (qp->com.owner == slave) {
2727 qpn = qp->com.res_id;
2728 detach_qp(dev, slave, qp);
2729 state = qp->com.from_state;
2730 while (state != 0) {
2731 switch (state) {
2732 case RES_QP_RESERVED:
2733 spin_lock_irq(mlx4_tlock(dev));
2734 radix_tree_delete(&tracker->res_tree[RES_QP],
2735 qp->com.res_id);
2736 list_del(&qp->com.list);
2737 spin_unlock_irq(mlx4_tlock(dev));
2738 kfree(qp);
2739 state = 0;
2740 break;
2741 case RES_QP_MAPPED:
2742 if (!valid_reserved(dev, slave, qpn))
2743 __mlx4_qp_free_icm(dev, qpn);
2744 state = RES_QP_RESERVED;
2745 break;
2746 case RES_QP_HW:
2747 in_param = slave;
2748 err = mlx4_cmd(dev, in_param,
2749 qp->local_qpn, 2,
2750 MLX4_CMD_2RST_QP,
2751 MLX4_CMD_TIME_CLASS_A,
2752 MLX4_CMD_NATIVE);
2753 if (err)
2754 mlx4_dbg(dev, "rem_slave_qps: failed"
2755 " to move slave %d qpn %d to"
2756 " reset\n", slave,
2757 qp->local_qpn);
2758 atomic_dec(&qp->rcq->ref_count);
2759 atomic_dec(&qp->scq->ref_count);
2760 atomic_dec(&qp->mtt->ref_count);
2761 if (qp->srq)
2762 atomic_dec(&qp->srq->ref_count);
2763 state = RES_QP_MAPPED;
2764 break;
2765 default:
2766 state = 0;
2767 }
2768 }
2769 }
2770 spin_lock_irq(mlx4_tlock(dev));
2771 }
2772 spin_unlock_irq(mlx4_tlock(dev));
2773}
2774
2775static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2776{
2777 struct mlx4_priv *priv = mlx4_priv(dev);
2778 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2779 struct list_head *srq_list =
2780 &tracker->slave_list[slave].res_list[RES_SRQ];
2781 struct res_srq *srq;
2782 struct res_srq *tmp;
2783 int state;
2784 u64 in_param;
2785 LIST_HEAD(tlist);
2786 int srqn;
2787 int err;
2788
2789 err = move_all_busy(dev, slave, RES_SRQ);
2790 if (err)
2791 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2792 "busy for slave %d\n", slave);
2793
2794 spin_lock_irq(mlx4_tlock(dev));
2795 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2796 spin_unlock_irq(mlx4_tlock(dev));
2797 if (srq->com.owner == slave) {
2798 srqn = srq->com.res_id;
2799 state = srq->com.from_state;
2800 while (state != 0) {
2801 switch (state) {
2802 case RES_SRQ_ALLOCATED:
2803 __mlx4_srq_free_icm(dev, srqn);
2804 spin_lock_irq(mlx4_tlock(dev));
2805 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2806 srqn);
2807 list_del(&srq->com.list);
2808 spin_unlock_irq(mlx4_tlock(dev));
2809 kfree(srq);
2810 state = 0;
2811 break;
2812
2813 case RES_SRQ_HW:
2814 in_param = slave;
2815 err = mlx4_cmd(dev, in_param, srqn, 1,
2816 MLX4_CMD_HW2SW_SRQ,
2817 MLX4_CMD_TIME_CLASS_A,
2818 MLX4_CMD_NATIVE);
2819 if (err)
2820 mlx4_dbg(dev, "rem_slave_srqs: failed"
2821 " to move slave %d srq %d to"
2822 " SW ownership\n",
2823 slave, srqn);
2824
2825 atomic_dec(&srq->mtt->ref_count);
2826 if (srq->cq)
2827 atomic_dec(&srq->cq->ref_count);
2828 state = RES_SRQ_ALLOCATED;
2829 break;
2830
2831 default:
2832 state = 0;
2833 }
2834 }
2835 }
2836 spin_lock_irq(mlx4_tlock(dev));
2837 }
2838 spin_unlock_irq(mlx4_tlock(dev));
2839}
2840
2841static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2842{
2843 struct mlx4_priv *priv = mlx4_priv(dev);
2844 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2845 struct list_head *cq_list =
2846 &tracker->slave_list[slave].res_list[RES_CQ];
2847 struct res_cq *cq;
2848 struct res_cq *tmp;
2849 int state;
2850 u64 in_param;
2851 LIST_HEAD(tlist);
2852 int cqn;
2853 int err;
2854
2855 err = move_all_busy(dev, slave, RES_CQ);
2856 if (err)
2857 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2858 "busy for slave %d\n", slave);
2859
2860 spin_lock_irq(mlx4_tlock(dev));
2861 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2862 spin_unlock_irq(mlx4_tlock(dev));
2863 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2864 cqn = cq->com.res_id;
2865 state = cq->com.from_state;
2866 while (state != 0) {
2867 switch (state) {
2868 case RES_CQ_ALLOCATED:
2869 __mlx4_cq_free_icm(dev, cqn);
2870 spin_lock_irq(mlx4_tlock(dev));
2871 radix_tree_delete(&tracker->res_tree[RES_CQ],
2872 cqn);
2873 list_del(&cq->com.list);
2874 spin_unlock_irq(mlx4_tlock(dev));
2875 kfree(cq);
2876 state = 0;
2877 break;
2878
2879 case RES_CQ_HW:
2880 in_param = slave;
2881 err = mlx4_cmd(dev, in_param, cqn, 1,
2882 MLX4_CMD_HW2SW_CQ,
2883 MLX4_CMD_TIME_CLASS_A,
2884 MLX4_CMD_NATIVE);
2885 if (err)
2886 mlx4_dbg(dev, "rem_slave_cqs: failed"
2887 " to move slave %d cq %d to"
2888 " SW ownership\n",
2889 slave, cqn);
2890 atomic_dec(&cq->mtt->ref_count);
2891 state = RES_CQ_ALLOCATED;
2892 break;
2893
2894 default:
2895 state = 0;
2896 }
2897 }
2898 }
2899 spin_lock_irq(mlx4_tlock(dev));
2900 }
2901 spin_unlock_irq(mlx4_tlock(dev));
2902}
2903
2904static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2905{
2906 struct mlx4_priv *priv = mlx4_priv(dev);
2907 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2908 struct list_head *mpt_list =
2909 &tracker->slave_list[slave].res_list[RES_MPT];
2910 struct res_mpt *mpt;
2911 struct res_mpt *tmp;
2912 int state;
2913 u64 in_param;
2914 LIST_HEAD(tlist);
2915 int mptn;
2916 int err;
2917
2918 err = move_all_busy(dev, slave, RES_MPT);
2919 if (err)
2920 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2921 "busy for slave %d\n", slave);
2922
2923 spin_lock_irq(mlx4_tlock(dev));
2924 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2925 spin_unlock_irq(mlx4_tlock(dev));
2926 if (mpt->com.owner == slave) {
2927 mptn = mpt->com.res_id;
2928 state = mpt->com.from_state;
2929 while (state != 0) {
2930 switch (state) {
2931 case RES_MPT_RESERVED:
2932 __mlx4_mr_release(dev, mpt->key);
2933 spin_lock_irq(mlx4_tlock(dev));
2934 radix_tree_delete(&tracker->res_tree[RES_MPT],
2935 mptn);
2936 list_del(&mpt->com.list);
2937 spin_unlock_irq(mlx4_tlock(dev));
2938 kfree(mpt);
2939 state = 0;
2940 break;
2941
2942 case RES_MPT_MAPPED:
2943 __mlx4_mr_free_icm(dev, mpt->key);
2944 state = RES_MPT_RESERVED;
2945 break;
2946
2947 case RES_MPT_HW:
2948 in_param = slave;
2949 err = mlx4_cmd(dev, in_param, mptn, 0,
2950 MLX4_CMD_HW2SW_MPT,
2951 MLX4_CMD_TIME_CLASS_A,
2952 MLX4_CMD_NATIVE);
2953 if (err)
2954 mlx4_dbg(dev, "rem_slave_mrs: failed"
2955 " to move slave %d mpt %d to"
2956 " SW ownership\n",
2957 slave, mptn);
2958 if (mpt->mtt)
2959 atomic_dec(&mpt->mtt->ref_count);
2960 state = RES_MPT_MAPPED;
2961 break;
2962 default:
2963 state = 0;
2964 }
2965 }
2966 }
2967 spin_lock_irq(mlx4_tlock(dev));
2968 }
2969 spin_unlock_irq(mlx4_tlock(dev));
2970}
2971
2972static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2973{
2974 struct mlx4_priv *priv = mlx4_priv(dev);
2975 struct mlx4_resource_tracker *tracker =
2976 &priv->mfunc.master.res_tracker;
2977 struct list_head *mtt_list =
2978 &tracker->slave_list[slave].res_list[RES_MTT];
2979 struct res_mtt *mtt;
2980 struct res_mtt *tmp;
2981 int state;
2982 LIST_HEAD(tlist);
2983 int base;
2984 int err;
2985
2986 err = move_all_busy(dev, slave, RES_MTT);
2987 if (err)
2988 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2989 "busy for slave %d\n", slave);
2990
2991 spin_lock_irq(mlx4_tlock(dev));
2992 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2993 spin_unlock_irq(mlx4_tlock(dev));
2994 if (mtt->com.owner == slave) {
2995 base = mtt->com.res_id;
2996 state = mtt->com.from_state;
2997 while (state != 0) {
2998 switch (state) {
2999 case RES_MTT_ALLOCATED:
3000 __mlx4_free_mtt_range(dev, base,
3001 mtt->order);
3002 spin_lock_irq(mlx4_tlock(dev));
3003 radix_tree_delete(&tracker->res_tree[RES_MTT],
3004 base);
3005 list_del(&mtt->com.list);
3006 spin_unlock_irq(mlx4_tlock(dev));
3007 kfree(mtt);
3008 state = 0;
3009 break;
3010
3011 default:
3012 state = 0;
3013 }
3014 }
3015 }
3016 spin_lock_irq(mlx4_tlock(dev));
3017 }
3018 spin_unlock_irq(mlx4_tlock(dev));
3019}
3020
3021static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3022{
3023 struct mlx4_priv *priv = mlx4_priv(dev);
3024 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3025 struct list_head *eq_list =
3026 &tracker->slave_list[slave].res_list[RES_EQ];
3027 struct res_eq *eq;
3028 struct res_eq *tmp;
3029 int err;
3030 int state;
3031 LIST_HEAD(tlist);
3032 int eqn;
3033 struct mlx4_cmd_mailbox *mailbox;
3034
3035 err = move_all_busy(dev, slave, RES_EQ);
3036 if (err)
3037 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3038 "busy for slave %d\n", slave);
3039
3040 spin_lock_irq(mlx4_tlock(dev));
3041 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3042 spin_unlock_irq(mlx4_tlock(dev));
3043 if (eq->com.owner == slave) {
3044 eqn = eq->com.res_id;
3045 state = eq->com.from_state;
3046 while (state != 0) {
3047 switch (state) {
3048 case RES_EQ_RESERVED:
3049 spin_lock_irq(mlx4_tlock(dev));
3050 radix_tree_delete(&tracker->res_tree[RES_EQ],
3051 eqn);
3052 list_del(&eq->com.list);
3053 spin_unlock_irq(mlx4_tlock(dev));
3054 kfree(eq);
3055 state = 0;
3056 break;
3057
3058 case RES_EQ_HW:
3059 mailbox = mlx4_alloc_cmd_mailbox(dev);
3060 if (IS_ERR(mailbox)) {
3061 cond_resched();
3062 continue;
3063 }
3064 err = mlx4_cmd_box(dev, slave, 0,
3065 eqn & 0xff, 0,
3066 MLX4_CMD_HW2SW_EQ,
3067 MLX4_CMD_TIME_CLASS_A,
3068 MLX4_CMD_NATIVE);
3069 mlx4_dbg(dev, "rem_slave_eqs: failed"
3070 " to move slave %d eqs %d to"
3071 " SW ownership\n", slave, eqn);
3072 mlx4_free_cmd_mailbox(dev, mailbox);
3073 if (!err) {
3074 atomic_dec(&eq->mtt->ref_count);
3075 state = RES_EQ_RESERVED;
3076 }
3077 break;
3078
3079 default:
3080 state = 0;
3081 }
3082 }
3083 }
3084 spin_lock_irq(mlx4_tlock(dev));
3085 }
3086 spin_unlock_irq(mlx4_tlock(dev));
3087}
3088
3089void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3090{
3091 struct mlx4_priv *priv = mlx4_priv(dev);
3092
3093 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3094 /*VLAN*/
3095 rem_slave_macs(dev, slave);
3096 rem_slave_qps(dev, slave);
3097 rem_slave_srqs(dev, slave);
3098 rem_slave_cqs(dev, slave);
3099 rem_slave_mrs(dev, slave);
3100 rem_slave_eqs(dev, slave);
3101 rem_slave_mtts(dev, slave);
3102 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3103}
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index ca9e1523718f..2823fffc6383 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -40,26 +40,6 @@
40#include "mlx4.h" 40#include "mlx4.h"
41#include "icm.h" 41#include "icm.h"
42 42
43struct mlx4_srq_context {
44 __be32 state_logsize_srqn;
45 u8 logstride;
46 u8 reserved1;
47 __be16 xrcd;
48 __be32 pg_offset_cqn;
49 u32 reserved2;
50 u8 log_page_size;
51 u8 reserved3[2];
52 u8 mtt_base_addr_h;
53 __be32 mtt_base_addr_l;
54 __be32 pd;
55 __be16 limit_watermark;
56 __be16 wqe_cnt;
57 u16 reserved4;
58 __be16 wqe_counter;
59 u32 reserved5;
60 __be64 db_rec_addr;
61};
62
63void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) 43void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
64{ 44{
65 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 45 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -113,7 +93,7 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
113 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 93 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
114} 94}
115 95
116static int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) 96int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
117{ 97{
118 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 98 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
119 int err; 99 int err;
@@ -158,7 +138,7 @@ static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
158 return __mlx4_srq_alloc_icm(dev, srqn); 138 return __mlx4_srq_alloc_icm(dev, srqn);
159} 139}
160 140
161static void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 141void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
162{ 142{
163 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 143 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
164 144