aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 14:50:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 14:50:05 -0400
commit7c034dfd58bbc056280262887acf5b7a98944d0a (patch)
tree49dc3b77590f9929069db17922c823bb679c1456 /drivers/infiniband/hw
parent1204c464458e9837320a326a9fce550e3c5ef5de (diff)
parentc1c2fef6cfb04cf30a56bb37cff40d3498d7edbf (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA updates from Roland Dreier: - IPoIB fixes from Doug Ledford and Erez Shitrit - iSER updates from Sagi Grimberg - mlx4 GUID handling changes from Yishai Hadas - other misc fixes * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (51 commits) mlx5: wrong page mask if CONFIG_ARCH_DMA_ADDR_T_64BIT enabled for 32Bit architectures IB/iser: Rewrite bounce buffer code path IB/iser: Bump version to 1.6 IB/iser: Remove code duplication for a single DMA entry IB/iser: Pass struct iser_mem_reg to iser_fast_reg_mr and iser_reg_sig_mr IB/iser: Modify struct iser_mem_reg members IB/iser: Make fastreg pool cache friendly IB/iser: Move PI context alloc/free to routines IB/iser: Move fastreg descriptor pool get/put to helper functions IB/iser: Merge build page-vec into register page-vec IB/iser: Get rid of struct iser_rdma_regd IB/iser: Remove redundant assignments in iser_reg_page_vec IB/iser: Move memory reg/dereg routines to iser_memory.c IB/iser: Don't pass ib_device to fall_to_bounce_buff routine IB/iser: Remove a redundant struct iser_data_buf IB/iser: Remove redundant cmd_data_len calculation IB/iser: Fix wrong calculation of protection buffer length IB/iser: Handle fastreg/local_inv completion errors IB/iser: Fix unload during ep_poll wrong dereference ib_srpt: convert printk's to pr_* functions ...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c457
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c26
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c44
6 files changed, 391 insertions, 166 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index a31e031afd87..0f00204d2ece 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context {
58 int query_id; 58 int query_id;
59 struct list_head list; 59 struct list_head list;
60 int block_num; 60 int block_num;
61 ib_sa_comp_mask guid_indexes;
62 u8 method;
61}; 63};
62 64
63struct mlx4_next_alias_guid_work { 65struct mlx4_next_alias_guid_work {
64 u8 port; 66 u8 port;
65 u8 block_num; 67 u8 block_num;
68 u8 method;
66 struct mlx4_sriov_alias_guid_info_rec_det rec_det; 69 struct mlx4_sriov_alias_guid_info_rec_det rec_det;
67}; 70};
68 71
72static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
73 int *resched_delay_sec);
69 74
70void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, 75void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
71 u8 port_num, u8 *p_data) 76 u8 port_num, u8 *p_data)
@@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
118 return IB_SA_COMP_MASK(4 + index); 123 return IB_SA_COMP_MASK(4 + index);
119} 124}
120 125
126void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
127 int port, int slave_init)
128{
129 __be64 curr_guid, required_guid;
130 int record_num = slave / 8;
131 int index = slave % 8;
132 int port_index = port - 1;
133 unsigned long flags;
134 int do_work = 0;
135
136 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
137 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
138 GUID_STATE_NEED_PORT_INIT)
139 goto unlock;
140 if (!slave_init) {
141 curr_guid = *(__be64 *)&dev->sriov.
142 alias_guid.ports_guid[port_index].
143 all_rec_per_port[record_num].
144 all_recs[GUID_REC_SIZE * index];
145 if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
146 !curr_guid)
147 goto unlock;
148 required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
149 } else {
150 required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
151 if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
152 goto unlock;
153 }
154 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
155 all_rec_per_port[record_num].
156 all_recs[GUID_REC_SIZE * index] = required_guid;
157 dev->sriov.alias_guid.ports_guid[port_index].
158 all_rec_per_port[record_num].guid_indexes
159 |= mlx4_ib_get_aguid_comp_mask_from_ix(index);
160 dev->sriov.alias_guid.ports_guid[port_index].
161 all_rec_per_port[record_num].status
162 = MLX4_GUID_INFO_STATUS_IDLE;
163 /* set to run immediately */
164 dev->sriov.alias_guid.ports_guid[port_index].
165 all_rec_per_port[record_num].time_to_run = 0;
166 dev->sriov.alias_guid.ports_guid[port_index].
167 all_rec_per_port[record_num].
168 guids_retry_schedule[index] = 0;
169 do_work = 1;
170unlock:
171 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
172
173 if (do_work)
174 mlx4_ib_init_alias_guid_work(dev, port_index);
175}
176
121/* 177/*
122 * Whenever new GUID is set/unset (guid table change) create event and 178 * Whenever new GUID is set/unset (guid table change) create event and
123 * notify the relevant slave (master also should be notified). 179 * notify the relevant slave (master also should be notified).
@@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
138 enum slave_port_state prev_state; 194 enum slave_port_state prev_state;
139 __be64 tmp_cur_ag, form_cache_ag; 195 __be64 tmp_cur_ag, form_cache_ag;
140 enum slave_port_gen_event gen_event; 196 enum slave_port_gen_event gen_event;
197 struct mlx4_sriov_alias_guid_info_rec_det *rec;
198 unsigned long flags;
199 __be64 required_value;
141 200
142 if (!mlx4_is_master(dev->dev)) 201 if (!mlx4_is_master(dev->dev))
143 return; 202 return;
144 203
204 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
205 all_rec_per_port[block_num];
145 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. 206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
146 ports_guid[port_num - 1]. 207 ports_guid[port_num - 1].
147 all_rec_per_port[block_num].guid_indexes); 208 all_rec_per_port[block_num].guid_indexes);
@@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
166 */ 227 */
167 if (tmp_cur_ag != form_cache_ag) 228 if (tmp_cur_ag != form_cache_ag)
168 continue; 229 continue;
169 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
170 230
231 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
232 required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
233
234 if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
235 required_value = 0;
236
237 if (tmp_cur_ag == required_value) {
238 rec->guid_indexes = rec->guid_indexes &
239 ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
240 } else {
241 /* may notify port down if value is 0 */
242 if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
243 spin_unlock_irqrestore(&dev->sriov.
244 alias_guid.ag_work_lock, flags);
245 continue;
246 }
247 }
248 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
249 flags);
250 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
171 /*2 cases: Valid GUID, and Invalid Guid*/ 251 /*2 cases: Valid GUID, and Invalid Guid*/
172 252
173 if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ 253 if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
@@ -188,10 +268,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
188 set_and_calc_slave_port_state(dev->dev, slave_id, port_num, 268 set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
189 MLX4_PORT_STATE_IB_EVENT_GID_INVALID, 269 MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
190 &gen_event); 270 &gen_event);
191 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", 271 if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
192 slave_id, port_num); 272 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
193 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, 273 slave_id, port_num);
194 MLX4_PORT_CHANGE_SUBTYPE_DOWN); 274 mlx4_gen_port_state_change_eqe(dev->dev,
275 slave_id,
276 port_num,
277 MLX4_PORT_CHANGE_SUBTYPE_DOWN);
278 }
195 } 279 }
196 } 280 }
197} 281}
@@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status,
206 int i; 290 int i;
207 struct mlx4_sriov_alias_guid_info_rec_det *rec; 291 struct mlx4_sriov_alias_guid_info_rec_det *rec;
208 unsigned long flags, flags1; 292 unsigned long flags, flags1;
293 ib_sa_comp_mask declined_guid_indexes = 0;
294 ib_sa_comp_mask applied_guid_indexes = 0;
295 unsigned int resched_delay_sec = 0;
209 296
210 if (!context) 297 if (!context)
211 return; 298 return;
@@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status,
216 all_rec_per_port[cb_ctx->block_num]; 303 all_rec_per_port[cb_ctx->block_num];
217 304
218 if (status) { 305 if (status) {
219 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
220 pr_debug("(port: %d) failed: status = %d\n", 306 pr_debug("(port: %d) failed: status = %d\n",
221 cb_ctx->port, status); 307 cb_ctx->port, status);
308 rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
222 goto out; 309 goto out;
223 } 310 }
224 311
@@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status,
235 rec = &dev->sriov.alias_guid.ports_guid[port_index]. 322 rec = &dev->sriov.alias_guid.ports_guid[port_index].
236 all_rec_per_port[guid_rec->block_num]; 323 all_rec_per_port[guid_rec->block_num];
237 324
238 rec->status = MLX4_GUID_INFO_STATUS_SET; 325 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
239 rec->method = MLX4_GUID_INFO_RECORD_SET;
240
241 for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { 326 for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
242 __be64 tmp_cur_ag; 327 __be64 sm_response, required_val;
243 tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; 328
329 if (!(cb_ctx->guid_indexes &
330 mlx4_ib_get_aguid_comp_mask_from_ix(i)))
331 continue;
332 sm_response = *(__be64 *)&guid_rec->guid_info_list
333 [i * GUID_REC_SIZE];
334 required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
335 if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
336 if (required_val ==
337 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
338 goto next_entry;
339
340 /* A new value was set till we got the response */
341 pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
342 be64_to_cpu(required_val),
343 i, guid_rec->block_num);
344 goto entry_declined;
345 }
346
244 /* check if the SM didn't assign one of the records. 347 /* check if the SM didn't assign one of the records.
245 * if it didn't, if it was not sysadmin request: 348 * if it didn't, re-ask for.
246 * ask the SM to give a new GUID, (instead of the driver request).
247 */ 349 */
248 if (tmp_cur_ag == MLX4_NOT_SET_GUID) { 350 if (sm_response == MLX4_NOT_SET_GUID) {
249 mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " 351 if (rec->guids_retry_schedule[i] == 0)
250 "block_num: %d was declined by SM, " 352 mlx4_ib_warn(&dev->ib_dev,
251 "ownership by %d (0 = driver, 1=sysAdmin," 353 "%s:Record num %d in block_num: %d was declined by SM\n",
252 " 2=None)\n", __func__, i, 354 __func__, i,
253 guid_rec->block_num, rec->ownership); 355 guid_rec->block_num);
254 if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { 356 goto entry_declined;
255 /* if it is driver assign, asks for new GUID from SM*/
256 *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
257 MLX4_NOT_SET_GUID;
258
259 /* Mark the record as not assigned, and let it
260 * be sent again in the next work sched.*/
261 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
262 rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
263 }
264 } else { 357 } else {
265 /* properly assigned record. */ 358 /* properly assigned record. */
266 /* We save the GUID we just got from the SM in the 359 /* We save the GUID we just got from the SM in the
267 * admin_guid in order to be persistent, and in the 360 * admin_guid in order to be persistent, and in the
268 * request from the sm the process will ask for the same GUID */ 361 * request from the sm the process will ask for the same GUID */
269 if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && 362 if (required_val &&
270 tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { 363 sm_response != required_val) {
271 /* the sysadmin assignment failed.*/ 364 /* Warn only on first retry */
272 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" 365 if (rec->guids_retry_schedule[i] == 0)
273 " admin guid after SysAdmin " 366 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
274 "configuration. " 367 " admin guid after SysAdmin "
275 "Record num %d in block_num:%d " 368 "configuration. "
276 "was declined by SM, " 369 "Record num %d in block_num:%d "
277 "new val(0x%llx) was kept\n", 370 "was declined by SM, "
278 __func__, i, 371 "new val(0x%llx) was kept, SM returned (0x%llx)\n",
279 guid_rec->block_num, 372 __func__, i,
280 be64_to_cpu(*(__be64 *) & 373 guid_rec->block_num,
281 rec->all_recs[i * GUID_REC_SIZE])); 374 be64_to_cpu(required_val),
375 be64_to_cpu(sm_response));
376 goto entry_declined;
282 } else { 377 } else {
283 memcpy(&rec->all_recs[i * GUID_REC_SIZE], 378 *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
284 &guid_rec->guid_info_list[i * GUID_REC_SIZE], 379 sm_response;
285 GUID_REC_SIZE); 380 if (required_val == 0)
381 mlx4_set_admin_guid(dev->dev,
382 sm_response,
383 (guid_rec->block_num
384 * NUM_ALIAS_GUID_IN_REC) + i,
385 cb_ctx->port);
386 goto next_entry;
286 } 387 }
287 } 388 }
389entry_declined:
390 declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
391 rec->guids_retry_schedule[i] =
392 (rec->guids_retry_schedule[i] == 0) ? 1 :
393 min((unsigned int)60,
394 rec->guids_retry_schedule[i] * 2);
395 /* using the minimum value among all entries in that record */
396 resched_delay_sec = (resched_delay_sec == 0) ?
397 rec->guids_retry_schedule[i] :
398 min(resched_delay_sec,
399 rec->guids_retry_schedule[i]);
400 continue;
401
402next_entry:
403 rec->guids_retry_schedule[i] = 0;
288 } 404 }
405
406 applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes;
407 if (declined_guid_indexes ||
408 rec->guid_indexes & ~(applied_guid_indexes)) {
409 pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
410 guid_rec->block_num,
411 be64_to_cpu((__force __be64)rec->guid_indexes),
412 be64_to_cpu((__force __be64)applied_guid_indexes),
413 be64_to_cpu((__force __be64)declined_guid_indexes));
414 rec->time_to_run = ktime_get_real_ns() +
415 resched_delay_sec * NSEC_PER_SEC;
416 } else {
417 rec->status = MLX4_GUID_INFO_STATUS_SET;
418 }
419 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
289 /* 420 /*
290 The func is call here to close the cases when the 421 The func is call here to close the cases when the
291 sm doesn't send smp, so in the sa response the driver 422 sm doesn't send smp, so in the sa response the driver
@@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status,
297out: 428out:
298 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 429 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
299 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 430 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
300 if (!dev->sriov.is_going_down) 431 if (!dev->sriov.is_going_down) {
432 get_low_record_time_index(dev, port_index, &resched_delay_sec);
301 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, 433 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
302 &dev->sriov.alias_guid.ports_guid[port_index]. 434 &dev->sriov.alias_guid.ports_guid[port_index].
303 alias_guid_work, 0); 435 alias_guid_work,
436 msecs_to_jiffies(resched_delay_sec * 1000));
437 }
304 if (cb_ctx->sa_query) { 438 if (cb_ctx->sa_query) {
305 list_del(&cb_ctx->list); 439 list_del(&cb_ctx->list);
306 kfree(cb_ctx); 440 kfree(cb_ctx);
@@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
317 ib_sa_comp_mask comp_mask = 0; 451 ib_sa_comp_mask comp_mask = 0;
318 452
319 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status 453 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
320 = MLX4_GUID_INFO_STATUS_IDLE; 454 = MLX4_GUID_INFO_STATUS_SET;
321 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
322 = MLX4_GUID_INFO_RECORD_SET;
323 455
324 /* calculate the comp_mask for that record.*/ 456 /* calculate the comp_mask for that record.*/
325 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 457 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
333 need to assign GUIDs, then don't put it up for assignment. 465 need to assign GUIDs, then don't put it up for assignment.
334 */ 466 */
335 if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || 467 if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
336 (!index && !i) || 468 (!index && !i))
337 MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
338 ports_guid[port - 1].all_rec_per_port[index].ownership)
339 continue; 469 continue;
340 comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); 470 comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
341 } 471 }
342 dev->sriov.alias_guid.ports_guid[port - 1]. 472 dev->sriov.alias_guid.ports_guid[port - 1].
343 all_rec_per_port[index].guid_indexes = comp_mask; 473 all_rec_per_port[index].guid_indexes |= comp_mask;
474 if (dev->sriov.alias_guid.ports_guid[port - 1].
475 all_rec_per_port[index].guid_indexes)
476 dev->sriov.alias_guid.ports_guid[port - 1].
477 all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
478
344} 479}
345 480
346static int set_guid_rec(struct ib_device *ibdev, 481static int set_guid_rec(struct ib_device *ibdev,
347 u8 port, int index, 482 struct mlx4_next_alias_guid_work *rec)
348 struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
349{ 483{
350 int err; 484 int err;
351 struct mlx4_ib_dev *dev = to_mdev(ibdev); 485 struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev,
354 struct ib_port_attr attr; 488 struct ib_port_attr attr;
355 struct mlx4_alias_guid_work_context *callback_context; 489 struct mlx4_alias_guid_work_context *callback_context;
356 unsigned long resched_delay, flags, flags1; 490 unsigned long resched_delay, flags, flags1;
491 u8 port = rec->port + 1;
492 int index = rec->block_num;
493 struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
357 struct list_head *head = 494 struct list_head *head =
358 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; 495 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
359 496
@@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev,
380 callback_context->port = port; 517 callback_context->port = port;
381 callback_context->dev = dev; 518 callback_context->dev = dev;
382 callback_context->block_num = index; 519 callback_context->block_num = index;
520 callback_context->guid_indexes = rec_det->guid_indexes;
521 callback_context->method = rec->method;
383 522
384 memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); 523 memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
385 524
@@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev,
399 callback_context->query_id = 538 callback_context->query_id =
400 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, 539 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
401 ibdev, port, &guid_info_rec, 540 ibdev, port, &guid_info_rec,
402 comp_mask, rec_det->method, 1000, 541 comp_mask, rec->method, 1000,
403 GFP_KERNEL, aliasguid_query_handler, 542 GFP_KERNEL, aliasguid_query_handler,
404 callback_context, 543 callback_context,
405 &callback_context->sa_query); 544 &callback_context->sa_query);
@@ -434,6 +573,30 @@ out:
434 return err; 573 return err;
435} 574}
436 575
576static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
577{
578 int j, k, entry;
579 __be64 guid;
580
581 /*Check if the SM doesn't need to assign the GUIDs*/
582 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
583 for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
584 entry = j * NUM_ALIAS_GUID_IN_REC + k;
585 /* no request for the 0 entry (hw guid) */
586 if (!entry || entry > dev->dev->persist->num_vfs ||
587 !mlx4_is_slave_active(dev->dev, entry))
588 continue;
589 guid = mlx4_get_admin_guid(dev->dev, entry, port);
590 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
591 all_rec_per_port[j].all_recs
592 [GUID_REC_SIZE * k] = guid;
593 pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
594 entry,
595 be64_to_cpu(guid),
596 port);
597 }
598 }
599}
437void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) 600void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
438{ 601{
439 int i; 602 int i;
@@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
443 606
444 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 607 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
445 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 608 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
609
610 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
611 GUID_STATE_NEED_PORT_INIT) {
612 mlx4_ib_guid_port_init(dev, port);
613 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
614 (~GUID_STATE_NEED_PORT_INIT);
615 }
446 for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) 616 for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
447 invalidate_guid_record(dev, port, i); 617 invalidate_guid_record(dev, port, i);
448 618
@@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
462 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 632 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
463} 633}
464 634
465/* The function returns the next record that was 635static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
466 * not configured (or failed to be configured) */ 636 struct mlx4_next_alias_guid_work *next_rec,
467static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, 637 int record_index)
468 struct mlx4_next_alias_guid_work *rec)
469{ 638{
470 int j; 639 int i;
471 unsigned long flags; 640 int lowset_time_entry = -1;
641 int lowest_time = 0;
642 ib_sa_comp_mask delete_guid_indexes = 0;
643 ib_sa_comp_mask set_guid_indexes = 0;
644 struct mlx4_sriov_alias_guid_info_rec_det *rec =
645 &dev->sriov.alias_guid.ports_guid[port].
646 all_rec_per_port[record_index];
472 647
473 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { 648 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
474 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); 649 if (!(rec->guid_indexes &
475 if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == 650 mlx4_ib_get_aguid_comp_mask_from_ix(i)))
476 MLX4_GUID_INFO_STATUS_IDLE) { 651 continue;
477 memcpy(&rec->rec_det, 652
478 &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], 653 if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
479 sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); 654 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
480 rec->port = port; 655 delete_guid_indexes |=
481 rec->block_num = j; 656 mlx4_ib_get_aguid_comp_mask_from_ix(i);
482 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = 657 else
483 MLX4_GUID_INFO_STATUS_PENDING; 658 set_guid_indexes |=
484 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); 659 mlx4_ib_get_aguid_comp_mask_from_ix(i);
485 return 0; 660
661 if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
662 lowest_time) {
663 lowset_time_entry = i;
664 lowest_time = rec->guids_retry_schedule[i];
486 } 665 }
487 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
488 } 666 }
489 return -ENOENT; 667
668 memcpy(&next_rec->rec_det, rec, sizeof(*rec));
669 next_rec->port = port;
670 next_rec->block_num = record_index;
671
672 if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
673 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
674 next_rec->rec_det.guid_indexes = delete_guid_indexes;
675 next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
676 } else {
677 next_rec->rec_det.guid_indexes = set_guid_indexes;
678 next_rec->method = MLX4_GUID_INFO_RECORD_SET;
679 }
490} 680}
491 681
492static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, 682/* return index of record that should be updated based on lowest
493 int rec_index, 683 * rescheduled time
494 struct mlx4_sriov_alias_guid_info_rec_det *rec_det) 684 */
685static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
686 int *resched_delay_sec)
495{ 687{
496 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = 688 int record_index = -1;
497 rec_det->guid_indexes; 689 u64 low_record_time = 0;
498 memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, 690 struct mlx4_sriov_alias_guid_info_rec_det rec;
499 rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); 691 int j;
500 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = 692
501 rec_det->status; 693 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
694 rec = dev->sriov.alias_guid.ports_guid[port].
695 all_rec_per_port[j];
696 if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
697 rec.guid_indexes) {
698 if (record_index == -1 ||
699 rec.time_to_run < low_record_time) {
700 record_index = j;
701 low_record_time = rec.time_to_run;
702 }
703 }
704 }
705 if (resched_delay_sec) {
706 u64 curr_time = ktime_get_real_ns();
707
708 *resched_delay_sec = (low_record_time < curr_time) ? 0 :
709 div_u64((low_record_time - curr_time), NSEC_PER_SEC);
710 }
711
712 return record_index;
502} 713}
503 714
504static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) 715/* The function returns the next record that was
716 * not configured (or failed to be configured) */
717static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
718 struct mlx4_next_alias_guid_work *rec)
505{ 719{
506 int j; 720 unsigned long flags;
507 struct mlx4_sriov_alias_guid_info_rec_det rec_det ; 721 int record_index;
508 722 int ret = 0;
509 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { 723
510 memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); 724 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
511 rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | 725 record_index = get_low_record_time_index(dev, port, NULL);
512 IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | 726
513 IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | 727 if (record_index < 0) {
514 IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | 728 ret = -ENOENT;
515 IB_SA_GUIDINFO_REC_GID7; 729 goto out;
516 rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
517 set_administratively_guid_record(dev, port, j, &rec_det);
518 } 730 }
731
732 set_required_record(dev, port, rec, record_index);
733out:
734 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
735 return ret;
519} 736}
520 737
521static void alias_guid_work(struct work_struct *work) 738static void alias_guid_work(struct work_struct *work)
@@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work)
545 goto out; 762 goto out;
546 } 763 }
547 764
548 set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, 765 set_guid_rec(&dev->ib_dev, rec);
549 &rec->rec_det);
550
551out: 766out:
552 kfree(rec); 767 kfree(rec);
553} 768}
@@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
562 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 777 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
563 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 778 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
564 if (!dev->sriov.is_going_down) { 779 if (!dev->sriov.is_going_down) {
780 /* If there is pending one should cancell then run, otherwise
781 * won't run till previous one is ended as same work
782 * struct is used.
783 */
784 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
785 alias_guid_work);
565 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, 786 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
566 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); 787 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
567 } 788 }
@@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
609{ 830{
610 char alias_wq_name[15]; 831 char alias_wq_name[15];
611 int ret = 0; 832 int ret = 0;
612 int i, j, k; 833 int i, j;
613 union ib_gid gid; 834 union ib_gid gid;
614 835
615 if (!mlx4_is_master(dev->dev)) 836 if (!mlx4_is_master(dev->dev))
@@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
633 for (i = 0 ; i < dev->num_ports; i++) { 854 for (i = 0 ; i < dev->num_ports; i++) {
634 memset(&dev->sriov.alias_guid.ports_guid[i], 0, 855 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
635 sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); 856 sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
636 /*Check if the SM doesn't need to assign the GUIDs*/ 857 dev->sriov.alias_guid.ports_guid[i].state_flags |=
858 GUID_STATE_NEED_PORT_INIT;
637 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { 859 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
638 if (mlx4_ib_sm_guid_assign) { 860 /* mark each val as it was deleted */
639 dev->sriov.alias_guid.ports_guid[i]. 861 memset(dev->sriov.alias_guid.ports_guid[i].
640 all_rec_per_port[j]. 862 all_rec_per_port[j].all_recs, 0xFF,
641 ownership = MLX4_GUID_DRIVER_ASSIGN; 863 sizeof(dev->sriov.alias_guid.ports_guid[i].
642 continue; 864 all_rec_per_port[j].all_recs));
643 }
644 dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
645 ownership = MLX4_GUID_NONE_ASSIGN;
646 /*mark each val as it was deleted,
647 till the sysAdmin will give it valid val*/
648 for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
649 *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
650 all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
651 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
652 }
653 } 865 }
654 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); 866 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
655 /*prepare the records, set them to be allocated by sm*/ 867 /*prepare the records, set them to be allocated by sm*/
868 if (mlx4_ib_sm_guid_assign)
869 for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
870 mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
656 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) 871 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
657 invalidate_guid_record(dev, i + 1, j); 872 invalidate_guid_record(dev, i + 1, j);
658 873
659 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; 874 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
660 dev->sriov.alias_guid.ports_guid[i].port = i; 875 dev->sriov.alias_guid.ports_guid[i].port = i;
661 if (mlx4_ib_sm_guid_assign)
662 set_all_slaves_guids(dev, i);
663 876
664 snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); 877 snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
665 dev->sriov.alias_guid.ports_guid[i].wq = 878 dev->sriov.alias_guid.ports_guid[i].wq =
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 59040265e361..9cd2b002d7ae 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1430 tun_qp->ring[i].addr, 1430 tun_qp->ring[i].addr,
1431 rx_buf_size, 1431 rx_buf_size,
1432 DMA_FROM_DEVICE); 1432 DMA_FROM_DEVICE);
1433 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1434 kfree(tun_qp->ring[i].addr);
1435 goto err;
1436 }
1433 } 1437 }
1434 1438
1435 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { 1439 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
@@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1442 tun_qp->tx_ring[i].buf.addr, 1446 tun_qp->tx_ring[i].buf.addr,
1443 tx_buf_size, 1447 tx_buf_size,
1444 DMA_TO_DEVICE); 1448 DMA_TO_DEVICE);
1449 if (ib_dma_mapping_error(ctx->ib_dev,
1450 tun_qp->tx_ring[i].buf.map)) {
1451 kfree(tun_qp->tx_ring[i].buf.addr);
1452 goto tx_err;
1453 }
1445 tun_qp->tx_ring[i].ah = NULL; 1454 tun_qp->tx_ring[i].ah = NULL;
1446 } 1455 }
1447 spin_lock_init(&tun_qp->tx_lock); 1456 spin_lock_init(&tun_qp->tx_lock);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 976bea794b5f..57070c529dfb 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
66MODULE_LICENSE("Dual BSD/GPL"); 66MODULE_LICENSE("Dual BSD/GPL");
67MODULE_VERSION(DRV_VERSION); 67MODULE_VERSION(DRV_VERSION);
68 68
69int mlx4_ib_sm_guid_assign = 1; 69int mlx4_ib_sm_guid_assign = 0;
70module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); 70module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
71MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)"); 71MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
72 72
73static const char mlx4_ib_version[] = 73static const char mlx4_ib_version[] =
74 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 74 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
@@ -2791,9 +2791,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2791 case MLX4_DEV_EVENT_SLAVE_INIT: 2791 case MLX4_DEV_EVENT_SLAVE_INIT:
2792 /* here, p is the slave id */ 2792 /* here, p is the slave id */
2793 do_slave_init(ibdev, p, 1); 2793 do_slave_init(ibdev, p, 1);
2794 if (mlx4_is_master(dev)) {
2795 int i;
2796
2797 for (i = 1; i <= ibdev->num_ports; i++) {
2798 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2799 == IB_LINK_LAYER_INFINIBAND)
2800 mlx4_ib_slave_alias_guid_event(ibdev,
2801 p, i,
2802 1);
2803 }
2804 }
2794 return; 2805 return;
2795 2806
2796 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: 2807 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2808 if (mlx4_is_master(dev)) {
2809 int i;
2810
2811 for (i = 1; i <= ibdev->num_ports; i++) {
2812 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2813 == IB_LINK_LAYER_INFINIBAND)
2814 mlx4_ib_slave_alias_guid_event(ibdev,
2815 p, i,
2816 0);
2817 }
2818 }
2797 /* here, p is the slave id */ 2819 /* here, p is the slave id */
2798 do_slave_init(ibdev, p, 0); 2820 do_slave_init(ibdev, p, 0);
2799 return; 2821 return;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f829fd935b79..fce3934372a1 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -342,14 +342,9 @@ struct mlx4_ib_ah {
342enum mlx4_guid_alias_rec_status { 342enum mlx4_guid_alias_rec_status {
343 MLX4_GUID_INFO_STATUS_IDLE, 343 MLX4_GUID_INFO_STATUS_IDLE,
344 MLX4_GUID_INFO_STATUS_SET, 344 MLX4_GUID_INFO_STATUS_SET,
345 MLX4_GUID_INFO_STATUS_PENDING,
346}; 345};
347 346
348enum mlx4_guid_alias_rec_ownership { 347#define GUID_STATE_NEED_PORT_INIT 0x01
349 MLX4_GUID_DRIVER_ASSIGN,
350 MLX4_GUID_SYSADMIN_ASSIGN,
351 MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
352};
353 348
354enum mlx4_guid_alias_rec_method { 349enum mlx4_guid_alias_rec_method {
355 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET, 350 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
@@ -360,8 +355,8 @@ struct mlx4_sriov_alias_guid_info_rec_det {
360 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; 355 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
361 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ 356 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
362 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ 357 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
363 u8 method; /*set or delete*/ 358 unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
364 enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ 359 u64 time_to_run;
365}; 360};
366 361
367struct mlx4_sriov_alias_guid_port_rec_det { 362struct mlx4_sriov_alias_guid_port_rec_det {
@@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
369 struct workqueue_struct *wq; 364 struct workqueue_struct *wq;
370 struct delayed_work alias_guid_work; 365 struct delayed_work alias_guid_work;
371 u8 port; 366 u8 port;
367 u32 state_flags;
372 struct mlx4_sriov_alias_guid *parent; 368 struct mlx4_sriov_alias_guid *parent;
373 struct list_head cb_list; 369 struct list_head cb_list;
374}; 370};
@@ -802,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
802void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, 798void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
803 struct attribute *attr); 799 struct attribute *attr);
804ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index); 800ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
801void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
802 int port, int slave_init);
805 803
806int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ; 804int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
807 805
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ed2bd6701f9b..02fc91c68027 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
566 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, 566 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
567 sizeof (struct mlx4_ib_proxy_sqp_hdr), 567 sizeof (struct mlx4_ib_proxy_sqp_hdr),
568 DMA_FROM_DEVICE); 568 DMA_FROM_DEVICE);
569 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
570 kfree(qp->sqp_proxy_rcv[i].addr);
571 goto err;
572 }
569 } 573 }
570 return 0; 574 return 0;
571 575
@@ -2605,8 +2609,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
2605 2609
2606 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 2610 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
2607 2611
2608 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 2612 *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
2609 wr->wr.ud.hlen);
2610 *lso_seg_len = halign; 2613 *lso_seg_len = halign;
2611 return 0; 2614 return 0;
2612} 2615}
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index d10c2b8a5dad..6797108ce873 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -46,21 +46,17 @@
46static ssize_t show_admin_alias_guid(struct device *dev, 46static ssize_t show_admin_alias_guid(struct device *dev,
47 struct device_attribute *attr, char *buf) 47 struct device_attribute *attr, char *buf)
48{ 48{
49 int record_num;/*0-15*/
50 int guid_index_in_rec; /*0 - 7*/
51 struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = 49 struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
52 container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); 50 container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
53 struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; 51 struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
54 struct mlx4_ib_dev *mdev = port->dev; 52 struct mlx4_ib_dev *mdev = port->dev;
53 __be64 sysadmin_ag_val;
55 54
56 record_num = mlx4_ib_iov_dentry->entry_num / 8 ; 55 sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
57 guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ; 56 mlx4_ib_iov_dentry->entry_num,
57 port->num);
58 58
59 return sprintf(buf, "%llx\n", 59 return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
60 be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid.
61 ports_guid[port->num - 1].
62 all_rec_per_port[record_num].
63 all_recs[8 * guid_index_in_rec]));
64} 60}
65 61
66/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID. 62/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
80 struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; 76 struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
81 struct mlx4_ib_dev *mdev = port->dev; 77 struct mlx4_ib_dev *mdev = port->dev;
82 u64 sysadmin_ag_val; 78 u64 sysadmin_ag_val;
79 unsigned long flags;
83 80
84 record_num = mlx4_ib_iov_dentry->entry_num / 8; 81 record_num = mlx4_ib_iov_dentry->entry_num / 8;
85 guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8; 82 guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
@@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
87 pr_err("GUID 0 block 0 is RO\n"); 84 pr_err("GUID 0 block 0 is RO\n");
88 return count; 85 return count;
89 } 86 }
87 spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
90 sscanf(buf, "%llx", &sysadmin_ag_val); 88 sscanf(buf, "%llx", &sysadmin_ag_val);
91 *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. 89 *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
92 all_rec_per_port[record_num]. 90 all_rec_per_port[record_num].
@@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev,
96 /* Change the state to be pending for update */ 94 /* Change the state to be pending for update */
97 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status 95 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
98 = MLX4_GUID_INFO_STATUS_IDLE ; 96 = MLX4_GUID_INFO_STATUS_IDLE ;
99 97 mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
100 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method 98 mlx4_ib_iov_dentry->entry_num,
101 = MLX4_GUID_INFO_RECORD_SET; 99 port->num);
102
103 switch (sysadmin_ag_val) {
104 case MLX4_GUID_FOR_DELETE_VAL:
105 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
106 = MLX4_GUID_INFO_RECORD_DELETE;
107 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
108 = MLX4_GUID_SYSADMIN_ASSIGN;
109 break;
110 /* The sysadmin requests the SM to re-assign */
111 case MLX4_NOT_SET_GUID:
112 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
113 = MLX4_GUID_DRIVER_ASSIGN;
114 break;
115 /* The sysadmin requests a specific value.*/
116 default:
117 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
118 = MLX4_GUID_SYSADMIN_ASSIGN;
119 break;
120 }
121 100
122 /* set the record index */ 101 /* set the record index */
123 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes 102 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
124 = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); 103 |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
125 104
105 spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
126 mlx4_ib_init_alias_guid_work(mdev, port->num - 1); 106 mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
127 107
128 return count; 108 return count;