aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2011-12-12 23:13:05 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-13 13:56:06 -0500
commit3ec65b2be5bed241a0d7c01a54a5d64dcbaf1f2b (patch)
treea532efd476a90b57c6b977e711b63ae19b2eaebe /drivers
parent5cc914f10851d2dc17005c7d26cdd70adcbecbcd (diff)
mlx4_core: srq modifications for SRIOV
SRQs are resources which are allocated and tracked by the PF driver. In multifunction mode, the allocation and icm mapping is done in the resource tracker (later patch in this sequence). To accomplish this, we have "work" functions whose names start with "__", and "request" functions (same name, no __). If we are operating in multifunction mode, the request function actually results in comm-channel commands being sent (ALLOC_RES or FREE_RES). The PF-driver comm-channel handler will ultimately invoke the "work" (__) function and return the result. If we are not in multifunction mode, the "work" handler is invoked immediately. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c106
1 files changed, 83 insertions, 23 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f4ca096db62..ca9e1523718 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35
34#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
35#include <linux/export.h> 37#include <linux/export.h>
36#include <linux/gfp.h> 38#include <linux/gfp.h>
@@ -85,8 +87,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
85static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 87static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
86 int srq_num) 88 int srq_num)
87{ 89{
88 return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, 90 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
89 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 91 MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
92 MLX4_CMD_WRAPPED);
90} 93}
91 94
92static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 95static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -110,32 +113,93 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
110 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 113 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
111} 114}
112 115
113int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, 116static int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
114 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
115{ 117{
116 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 118 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
117 struct mlx4_cmd_mailbox *mailbox;
118 struct mlx4_srq_context *srq_context;
119 u64 mtt_addr;
120 int err; 119 int err;
121 120
122 srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); 121
123 if (srq->srqn == -1) 122 *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
123 if (*srqn == -1)
124 return -ENOMEM; 124 return -ENOMEM;
125 125
126 err = mlx4_table_get(dev, &srq_table->table, srq->srqn); 126 err = mlx4_table_get(dev, &srq_table->table, *srqn);
127 if (err) 127 if (err)
128 goto err_out; 128 goto err_out;
129 129
130 err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); 130 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
131 if (err) 131 if (err)
132 goto err_put; 132 goto err_put;
133 return 0;
134
135err_put:
136 mlx4_table_put(dev, &srq_table->table, *srqn);
137
138err_out:
139 mlx4_bitmap_free(&srq_table->bitmap, *srqn);
140 return err;
141}
142
143static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
144{
145 u64 out_param;
146 int err;
147
148 if (mlx4_is_mfunc(dev)) {
149 err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
150 RES_OP_RESERVE_AND_MAP,
151 MLX4_CMD_ALLOC_RES,
152 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
153 if (!err)
154 *srqn = get_param_l(&out_param);
155
156 return err;
157 }
158 return __mlx4_srq_alloc_icm(dev, srqn);
159}
160
161static void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
162{
163 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
164
165 mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
166 mlx4_table_put(dev, &srq_table->table, srqn);
167 mlx4_bitmap_free(&srq_table->bitmap, srqn);
168}
169
170static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
171{
172 u64 in_param;
173
174 if (mlx4_is_mfunc(dev)) {
175 set_param_l(&in_param, srqn);
176 if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
177 MLX4_CMD_FREE_RES,
178 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
179 mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
180 return;
181 }
182 __mlx4_srq_free_icm(dev, srqn);
183}
184
185int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
186 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
187{
188 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
189 struct mlx4_cmd_mailbox *mailbox;
190 struct mlx4_srq_context *srq_context;
191 u64 mtt_addr;
192 int err;
193
194 err = mlx4_srq_alloc_icm(dev, &srq->srqn);
195 if (err)
196 return err;
133 197
134 spin_lock_irq(&srq_table->lock); 198 spin_lock_irq(&srq_table->lock);
135 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); 199 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
136 spin_unlock_irq(&srq_table->lock); 200 spin_unlock_irq(&srq_table->lock);
137 if (err) 201 if (err)
138 goto err_cmpt_put; 202 goto err_icm;
139 203
140 mailbox = mlx4_alloc_cmd_mailbox(dev); 204 mailbox = mlx4_alloc_cmd_mailbox(dev);
141 if (IS_ERR(mailbox)) { 205 if (IS_ERR(mailbox)) {
@@ -174,15 +238,8 @@ err_radix:
174 radix_tree_delete(&srq_table->tree, srq->srqn); 238 radix_tree_delete(&srq_table->tree, srq->srqn);
175 spin_unlock_irq(&srq_table->lock); 239 spin_unlock_irq(&srq_table->lock);
176 240
177err_cmpt_put: 241err_icm:
178 mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); 242 mlx4_srq_free_icm(dev, srq->srqn);
179
180err_put:
181 mlx4_table_put(dev, &srq_table->table, srq->srqn);
182
183err_out:
184 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
185
186 return err; 243 return err;
187} 244}
188EXPORT_SYMBOL_GPL(mlx4_srq_alloc); 245EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +261,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
204 complete(&srq->free); 261 complete(&srq->free);
205 wait_for_completion(&srq->free); 262 wait_for_completion(&srq->free);
206 263
207 mlx4_table_put(dev, &srq_table->table, srq->srqn); 264 mlx4_srq_free_icm(dev, srq->srqn);
208 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
209} 265}
210EXPORT_SYMBOL_GPL(mlx4_srq_free); 266EXPORT_SYMBOL_GPL(mlx4_srq_free);
211 267
@@ -245,6 +301,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
245 301
246 spin_lock_init(&srq_table->lock); 302 spin_lock_init(&srq_table->lock);
247 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 303 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
304 if (mlx4_is_slave(dev))
305 return 0;
248 306
249 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 307 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
250 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); 308 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +314,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
256 314
257void mlx4_cleanup_srq_table(struct mlx4_dev *dev) 315void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
258{ 316{
317 if (mlx4_is_slave(dev))
318 return;
259 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); 319 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
260} 320}