diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/alias_GUID.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 314 |
1 files changed, 239 insertions, 75 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index a31e031afd87..a968388b8176 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c | |||
@@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { | |||
58 | int query_id; | 58 | int query_id; |
59 | struct list_head list; | 59 | struct list_head list; |
60 | int block_num; | 60 | int block_num; |
61 | ib_sa_comp_mask guid_indexes; | ||
62 | u8 method; | ||
61 | }; | 63 | }; |
62 | 64 | ||
63 | struct mlx4_next_alias_guid_work { | 65 | struct mlx4_next_alias_guid_work { |
64 | u8 port; | 66 | u8 port; |
65 | u8 block_num; | 67 | u8 block_num; |
68 | u8 method; | ||
66 | struct mlx4_sriov_alias_guid_info_rec_det rec_det; | 69 | struct mlx4_sriov_alias_guid_info_rec_det rec_det; |
67 | }; | 70 | }; |
68 | 71 | ||
72 | static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, | ||
73 | int *resched_delay_sec); | ||
69 | 74 | ||
70 | void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, | 75 | void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, |
71 | u8 port_num, u8 *p_data) | 76 | u8 port_num, u8 *p_data) |
@@ -138,10 +143,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | |||
138 | enum slave_port_state prev_state; | 143 | enum slave_port_state prev_state; |
139 | __be64 tmp_cur_ag, form_cache_ag; | 144 | __be64 tmp_cur_ag, form_cache_ag; |
140 | enum slave_port_gen_event gen_event; | 145 | enum slave_port_gen_event gen_event; |
146 | struct mlx4_sriov_alias_guid_info_rec_det *rec; | ||
147 | unsigned long flags; | ||
148 | __be64 required_value; | ||
141 | 149 | ||
142 | if (!mlx4_is_master(dev->dev)) | 150 | if (!mlx4_is_master(dev->dev)) |
143 | return; | 151 | return; |
144 | 152 | ||
153 | rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. | ||
154 | all_rec_per_port[block_num]; | ||
145 | guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. | 155 | guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. |
146 | ports_guid[port_num - 1]. | 156 | ports_guid[port_num - 1]. |
147 | all_rec_per_port[block_num].guid_indexes); | 157 | all_rec_per_port[block_num].guid_indexes); |
@@ -166,8 +176,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | |||
166 | */ | 176 | */ |
167 | if (tmp_cur_ag != form_cache_ag) | 177 | if (tmp_cur_ag != form_cache_ag) |
168 | continue; | 178 | continue; |
169 | mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); | ||
170 | 179 | ||
180 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
181 | required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; | ||
182 | |||
183 | if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) | ||
184 | required_value = 0; | ||
185 | |||
186 | if (tmp_cur_ag == required_value) { | ||
187 | rec->guid_indexes = rec->guid_indexes & | ||
188 | ~mlx4_ib_get_aguid_comp_mask_from_ix(i); | ||
189 | } else { | ||
190 | /* may notify port down if value is 0 */ | ||
191 | if (tmp_cur_ag != MLX4_NOT_SET_GUID) { | ||
192 | spin_unlock_irqrestore(&dev->sriov. | ||
193 | alias_guid.ag_work_lock, flags); | ||
194 | continue; | ||
195 | } | ||
196 | } | ||
197 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, | ||
198 | flags); | ||
199 | mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); | ||
171 | /*2 cases: Valid GUID, and Invalid Guid*/ | 200 | /*2 cases: Valid GUID, and Invalid Guid*/ |
172 | 201 | ||
173 | if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ | 202 | if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ |
@@ -188,10 +217,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | |||
188 | set_and_calc_slave_port_state(dev->dev, slave_id, port_num, | 217 | set_and_calc_slave_port_state(dev->dev, slave_id, port_num, |
189 | MLX4_PORT_STATE_IB_EVENT_GID_INVALID, | 218 | MLX4_PORT_STATE_IB_EVENT_GID_INVALID, |
190 | &gen_event); | 219 | &gen_event); |
191 | pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", | 220 | if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { |
192 | slave_id, port_num); | 221 | pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", |
193 | mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, | 222 | slave_id, port_num); |
194 | MLX4_PORT_CHANGE_SUBTYPE_DOWN); | 223 | mlx4_gen_port_state_change_eqe(dev->dev, |
224 | slave_id, | ||
225 | port_num, | ||
226 | MLX4_PORT_CHANGE_SUBTYPE_DOWN); | ||
227 | } | ||
195 | } | 228 | } |
196 | } | 229 | } |
197 | } | 230 | } |
@@ -206,6 +239,9 @@ static void aliasguid_query_handler(int status, | |||
206 | int i; | 239 | int i; |
207 | struct mlx4_sriov_alias_guid_info_rec_det *rec; | 240 | struct mlx4_sriov_alias_guid_info_rec_det *rec; |
208 | unsigned long flags, flags1; | 241 | unsigned long flags, flags1; |
242 | ib_sa_comp_mask declined_guid_indexes = 0; | ||
243 | ib_sa_comp_mask applied_guid_indexes = 0; | ||
244 | unsigned int resched_delay_sec = 0; | ||
209 | 245 | ||
210 | if (!context) | 246 | if (!context) |
211 | return; | 247 | return; |
@@ -216,9 +252,9 @@ static void aliasguid_query_handler(int status, | |||
216 | all_rec_per_port[cb_ctx->block_num]; | 252 | all_rec_per_port[cb_ctx->block_num]; |
217 | 253 | ||
218 | if (status) { | 254 | if (status) { |
219 | rec->status = MLX4_GUID_INFO_STATUS_IDLE; | ||
220 | pr_debug("(port: %d) failed: status = %d\n", | 255 | pr_debug("(port: %d) failed: status = %d\n", |
221 | cb_ctx->port, status); | 256 | cb_ctx->port, status); |
257 | rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; | ||
222 | goto out; | 258 | goto out; |
223 | } | 259 | } |
224 | 260 | ||
@@ -235,57 +271,97 @@ static void aliasguid_query_handler(int status, | |||
235 | rec = &dev->sriov.alias_guid.ports_guid[port_index]. | 271 | rec = &dev->sriov.alias_guid.ports_guid[port_index]. |
236 | all_rec_per_port[guid_rec->block_num]; | 272 | all_rec_per_port[guid_rec->block_num]; |
237 | 273 | ||
238 | rec->status = MLX4_GUID_INFO_STATUS_SET; | 274 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); |
239 | rec->method = MLX4_GUID_INFO_RECORD_SET; | ||
240 | |||
241 | for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { | 275 | for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { |
242 | __be64 tmp_cur_ag; | 276 | __be64 sm_response, required_val; |
243 | tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; | 277 | |
278 | if (!(cb_ctx->guid_indexes & | ||
279 | mlx4_ib_get_aguid_comp_mask_from_ix(i))) | ||
280 | continue; | ||
281 | sm_response = *(__be64 *)&guid_rec->guid_info_list | ||
282 | [i * GUID_REC_SIZE]; | ||
283 | required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; | ||
284 | if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { | ||
285 | if (required_val == | ||
286 | cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) | ||
287 | goto next_entry; | ||
288 | |||
289 | /* A new value was set till we got the response */ | ||
290 | pr_debug("need to set new value %llx, record num %d, block_num:%d\n", | ||
291 | be64_to_cpu(required_val), | ||
292 | i, guid_rec->block_num); | ||
293 | goto entry_declined; | ||
294 | } | ||
295 | |||
244 | /* check if the SM didn't assign one of the records. | 296 | /* check if the SM didn't assign one of the records. |
245 | * if it didn't, if it was not sysadmin request: | 297 | * if it didn't, re-ask for. |
246 | * ask the SM to give a new GUID, (instead of the driver request). | ||
247 | */ | 298 | */ |
248 | if (tmp_cur_ag == MLX4_NOT_SET_GUID) { | 299 | if (sm_response == MLX4_NOT_SET_GUID) { |
249 | mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " | 300 | if (rec->guids_retry_schedule[i] == 0) |
250 | "block_num: %d was declined by SM, " | 301 | mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " |
251 | "ownership by %d (0 = driver, 1=sysAdmin," | 302 | "block_num: %d was declined by SM, " |
252 | " 2=None)\n", __func__, i, | 303 | "ownership by %d (0 = driver, 1=sysAdmin," |
253 | guid_rec->block_num, rec->ownership); | 304 | " 2=None)\n", __func__, i, |
254 | if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { | 305 | guid_rec->block_num, |
255 | /* if it is driver assign, asks for new GUID from SM*/ | 306 | rec->ownership); |
256 | *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = | 307 | goto entry_declined; |
257 | MLX4_NOT_SET_GUID; | ||
258 | |||
259 | /* Mark the record as not assigned, and let it | ||
260 | * be sent again in the next work sched.*/ | ||
261 | rec->status = MLX4_GUID_INFO_STATUS_IDLE; | ||
262 | rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); | ||
263 | } | ||
264 | } else { | 308 | } else { |
265 | /* properly assigned record. */ | 309 | /* properly assigned record. */ |
266 | /* We save the GUID we just got from the SM in the | 310 | /* We save the GUID we just got from the SM in the |
267 | * admin_guid in order to be persistent, and in the | 311 | * admin_guid in order to be persistent, and in the |
268 | * request from the sm the process will ask for the same GUID */ | 312 | * request from the sm the process will ask for the same GUID */ |
269 | if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && | 313 | if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && |
270 | tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { | 314 | sm_response != required_val) { |
271 | /* the sysadmin assignment failed.*/ | 315 | /* Warn only on first retry */ |
272 | mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" | 316 | if (rec->guids_retry_schedule[i] == 0) |
273 | " admin guid after SysAdmin " | 317 | mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" |
274 | "configuration. " | 318 | " admin guid after SysAdmin " |
275 | "Record num %d in block_num:%d " | 319 | "configuration. " |
276 | "was declined by SM, " | 320 | "Record num %d in block_num:%d " |
277 | "new val(0x%llx) was kept\n", | 321 | "was declined by SM, " |
278 | __func__, i, | 322 | "new val(0x%llx) was kept, SM returned (0x%llx)\n", |
279 | guid_rec->block_num, | 323 | __func__, i, |
280 | be64_to_cpu(*(__be64 *) & | 324 | guid_rec->block_num, |
281 | rec->all_recs[i * GUID_REC_SIZE])); | 325 | be64_to_cpu(required_val), |
326 | be64_to_cpu(sm_response)); | ||
327 | goto entry_declined; | ||
282 | } else { | 328 | } else { |
283 | memcpy(&rec->all_recs[i * GUID_REC_SIZE], | 329 | *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = |
284 | &guid_rec->guid_info_list[i * GUID_REC_SIZE], | 330 | sm_response; |
285 | GUID_REC_SIZE); | 331 | goto next_entry; |
286 | } | 332 | } |
287 | } | 333 | } |
334 | entry_declined: | ||
335 | declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); | ||
336 | rec->guids_retry_schedule[i] = | ||
337 | (rec->guids_retry_schedule[i] == 0) ? 1 : | ||
338 | min((unsigned int)60, | ||
339 | rec->guids_retry_schedule[i] * 2); | ||
340 | /* using the minimum value among all entries in that record */ | ||
341 | resched_delay_sec = (resched_delay_sec == 0) ? | ||
342 | rec->guids_retry_schedule[i] : | ||
343 | min(resched_delay_sec, | ||
344 | rec->guids_retry_schedule[i]); | ||
345 | continue; | ||
346 | |||
347 | next_entry: | ||
348 | rec->guids_retry_schedule[i] = 0; | ||
349 | } | ||
350 | |||
351 | applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; | ||
352 | if (declined_guid_indexes || | ||
353 | rec->guid_indexes & ~(applied_guid_indexes)) { | ||
354 | pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", | ||
355 | guid_rec->block_num, | ||
356 | be64_to_cpu((__force __be64)rec->guid_indexes), | ||
357 | be64_to_cpu((__force __be64)applied_guid_indexes), | ||
358 | be64_to_cpu((__force __be64)declined_guid_indexes)); | ||
359 | rec->time_to_run = ktime_get_real_ns() + | ||
360 | resched_delay_sec * NSEC_PER_SEC; | ||
361 | } else { | ||
362 | rec->status = MLX4_GUID_INFO_STATUS_SET; | ||
288 | } | 363 | } |
364 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
289 | /* | 365 | /* |
290 | The func is call here to close the cases when the | 366 | The func is call here to close the cases when the |
291 | sm doesn't send smp, so in the sa response the driver | 367 | sm doesn't send smp, so in the sa response the driver |
@@ -297,10 +373,13 @@ static void aliasguid_query_handler(int status, | |||
297 | out: | 373 | out: |
298 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | 374 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); |
299 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | 375 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); |
300 | if (!dev->sriov.is_going_down) | 376 | if (!dev->sriov.is_going_down) { |
377 | get_low_record_time_index(dev, port_index, &resched_delay_sec); | ||
301 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, | 378 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, |
302 | &dev->sriov.alias_guid.ports_guid[port_index]. | 379 | &dev->sriov.alias_guid.ports_guid[port_index]. |
303 | alias_guid_work, 0); | 380 | alias_guid_work, |
381 | msecs_to_jiffies(resched_delay_sec * 1000)); | ||
382 | } | ||
304 | if (cb_ctx->sa_query) { | 383 | if (cb_ctx->sa_query) { |
305 | list_del(&cb_ctx->list); | 384 | list_del(&cb_ctx->list); |
306 | kfree(cb_ctx); | 385 | kfree(cb_ctx); |
@@ -317,9 +396,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) | |||
317 | ib_sa_comp_mask comp_mask = 0; | 396 | ib_sa_comp_mask comp_mask = 0; |
318 | 397 | ||
319 | dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status | 398 | dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status |
320 | = MLX4_GUID_INFO_STATUS_IDLE; | 399 | = MLX4_GUID_INFO_STATUS_SET; |
321 | dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method | ||
322 | = MLX4_GUID_INFO_RECORD_SET; | ||
323 | 400 | ||
324 | /* calculate the comp_mask for that record.*/ | 401 | /* calculate the comp_mask for that record.*/ |
325 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { | 402 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { |
@@ -340,12 +417,16 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) | |||
340 | comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); | 417 | comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); |
341 | } | 418 | } |
342 | dev->sriov.alias_guid.ports_guid[port - 1]. | 419 | dev->sriov.alias_guid.ports_guid[port - 1]. |
343 | all_rec_per_port[index].guid_indexes = comp_mask; | 420 | all_rec_per_port[index].guid_indexes |= comp_mask; |
421 | if (dev->sriov.alias_guid.ports_guid[port - 1]. | ||
422 | all_rec_per_port[index].guid_indexes) | ||
423 | dev->sriov.alias_guid.ports_guid[port - 1]. | ||
424 | all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; | ||
425 | |||
344 | } | 426 | } |
345 | 427 | ||
346 | static int set_guid_rec(struct ib_device *ibdev, | 428 | static int set_guid_rec(struct ib_device *ibdev, |
347 | u8 port, int index, | 429 | struct mlx4_next_alias_guid_work *rec) |
348 | struct mlx4_sriov_alias_guid_info_rec_det *rec_det) | ||
349 | { | 430 | { |
350 | int err; | 431 | int err; |
351 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | 432 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
@@ -354,6 +435,9 @@ static int set_guid_rec(struct ib_device *ibdev, | |||
354 | struct ib_port_attr attr; | 435 | struct ib_port_attr attr; |
355 | struct mlx4_alias_guid_work_context *callback_context; | 436 | struct mlx4_alias_guid_work_context *callback_context; |
356 | unsigned long resched_delay, flags, flags1; | 437 | unsigned long resched_delay, flags, flags1; |
438 | u8 port = rec->port + 1; | ||
439 | int index = rec->block_num; | ||
440 | struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; | ||
357 | struct list_head *head = | 441 | struct list_head *head = |
358 | &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; | 442 | &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; |
359 | 443 | ||
@@ -380,6 +464,8 @@ static int set_guid_rec(struct ib_device *ibdev, | |||
380 | callback_context->port = port; | 464 | callback_context->port = port; |
381 | callback_context->dev = dev; | 465 | callback_context->dev = dev; |
382 | callback_context->block_num = index; | 466 | callback_context->block_num = index; |
467 | callback_context->guid_indexes = rec_det->guid_indexes; | ||
468 | callback_context->method = rec->method; | ||
383 | 469 | ||
384 | memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); | 470 | memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); |
385 | 471 | ||
@@ -399,7 +485,7 @@ static int set_guid_rec(struct ib_device *ibdev, | |||
399 | callback_context->query_id = | 485 | callback_context->query_id = |
400 | ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, | 486 | ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, |
401 | ibdev, port, &guid_info_rec, | 487 | ibdev, port, &guid_info_rec, |
402 | comp_mask, rec_det->method, 1000, | 488 | comp_mask, rec->method, 1000, |
403 | GFP_KERNEL, aliasguid_query_handler, | 489 | GFP_KERNEL, aliasguid_query_handler, |
404 | callback_context, | 490 | callback_context, |
405 | &callback_context->sa_query); | 491 | &callback_context->sa_query); |
@@ -462,31 +548,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) | |||
462 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | 548 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); |
463 | } | 549 | } |
464 | 550 | ||
551 | static void set_required_record(struct mlx4_ib_dev *dev, u8 port, | ||
552 | struct mlx4_next_alias_guid_work *next_rec, | ||
553 | int record_index) | ||
554 | { | ||
555 | int i; | ||
556 | int lowset_time_entry = -1; | ||
557 | int lowest_time = 0; | ||
558 | ib_sa_comp_mask delete_guid_indexes = 0; | ||
559 | ib_sa_comp_mask set_guid_indexes = 0; | ||
560 | struct mlx4_sriov_alias_guid_info_rec_det *rec = | ||
561 | &dev->sriov.alias_guid.ports_guid[port]. | ||
562 | all_rec_per_port[record_index]; | ||
563 | |||
564 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { | ||
565 | if (!(rec->guid_indexes & | ||
566 | mlx4_ib_get_aguid_comp_mask_from_ix(i))) | ||
567 | continue; | ||
568 | |||
569 | if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == | ||
570 | cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) | ||
571 | delete_guid_indexes |= | ||
572 | mlx4_ib_get_aguid_comp_mask_from_ix(i); | ||
573 | else | ||
574 | set_guid_indexes |= | ||
575 | mlx4_ib_get_aguid_comp_mask_from_ix(i); | ||
576 | |||
577 | if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= | ||
578 | lowest_time) { | ||
579 | lowset_time_entry = i; | ||
580 | lowest_time = rec->guids_retry_schedule[i]; | ||
581 | } | ||
582 | } | ||
583 | |||
584 | memcpy(&next_rec->rec_det, rec, sizeof(*rec)); | ||
585 | next_rec->port = port; | ||
586 | next_rec->block_num = record_index; | ||
587 | |||
588 | if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == | ||
589 | cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { | ||
590 | next_rec->rec_det.guid_indexes = delete_guid_indexes; | ||
591 | next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; | ||
592 | } else { | ||
593 | next_rec->rec_det.guid_indexes = set_guid_indexes; | ||
594 | next_rec->method = MLX4_GUID_INFO_RECORD_SET; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* return index of record that should be updated based on lowest | ||
599 | * rescheduled time | ||
600 | */ | ||
601 | static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, | ||
602 | int *resched_delay_sec) | ||
603 | { | ||
604 | int record_index = -1; | ||
605 | u64 low_record_time = 0; | ||
606 | struct mlx4_sriov_alias_guid_info_rec_det rec; | ||
607 | int j; | ||
608 | |||
609 | for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { | ||
610 | rec = dev->sriov.alias_guid.ports_guid[port]. | ||
611 | all_rec_per_port[j]; | ||
612 | if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && | ||
613 | rec.guid_indexes) { | ||
614 | if (record_index == -1 || | ||
615 | rec.time_to_run < low_record_time) { | ||
616 | record_index = j; | ||
617 | low_record_time = rec.time_to_run; | ||
618 | } | ||
619 | } | ||
620 | } | ||
621 | if (resched_delay_sec) { | ||
622 | u64 curr_time = ktime_get_real_ns(); | ||
623 | |||
624 | *resched_delay_sec = (low_record_time < curr_time) ? 0 : | ||
625 | div_u64((low_record_time - curr_time), NSEC_PER_SEC); | ||
626 | } | ||
627 | |||
628 | return record_index; | ||
629 | } | ||
630 | |||
465 | /* The function returns the next record that was | 631 | /* The function returns the next record that was |
466 | * not configured (or failed to be configured) */ | 632 | * not configured (or failed to be configured) */ |
467 | static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, | 633 | static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, |
468 | struct mlx4_next_alias_guid_work *rec) | 634 | struct mlx4_next_alias_guid_work *rec) |
469 | { | 635 | { |
470 | int j; | ||
471 | unsigned long flags; | 636 | unsigned long flags; |
637 | int record_index; | ||
638 | int ret = 0; | ||
472 | 639 | ||
473 | for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { | 640 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); |
474 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); | 641 | record_index = get_low_record_time_index(dev, port, NULL); |
475 | if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == | 642 | |
476 | MLX4_GUID_INFO_STATUS_IDLE) { | 643 | if (record_index < 0) { |
477 | memcpy(&rec->rec_det, | 644 | ret = -ENOENT; |
478 | &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], | 645 | goto out; |
479 | sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); | ||
480 | rec->port = port; | ||
481 | rec->block_num = j; | ||
482 | dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = | ||
483 | MLX4_GUID_INFO_STATUS_PENDING; | ||
484 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
485 | return 0; | ||
486 | } | ||
487 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
488 | } | 646 | } |
489 | return -ENOENT; | 647 | |
648 | set_required_record(dev, port, rec, record_index); | ||
649 | out: | ||
650 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
651 | return ret; | ||
490 | } | 652 | } |
491 | 653 | ||
492 | static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, | 654 | static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, |
@@ -497,8 +659,6 @@ static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, | |||
497 | rec_det->guid_indexes; | 659 | rec_det->guid_indexes; |
498 | memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, | 660 | memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, |
499 | rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); | 661 | rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); |
500 | dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = | ||
501 | rec_det->status; | ||
502 | } | 662 | } |
503 | 663 | ||
504 | static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) | 664 | static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) |
@@ -545,9 +705,7 @@ static void alias_guid_work(struct work_struct *work) | |||
545 | goto out; | 705 | goto out; |
546 | } | 706 | } |
547 | 707 | ||
548 | set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, | 708 | set_guid_rec(&dev->ib_dev, rec); |
549 | &rec->rec_det); | ||
550 | |||
551 | out: | 709 | out: |
552 | kfree(rec); | 710 | kfree(rec); |
553 | } | 711 | } |
@@ -562,6 +720,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) | |||
562 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | 720 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); |
563 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | 721 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); |
564 | if (!dev->sriov.is_going_down) { | 722 | if (!dev->sriov.is_going_down) { |
723 | /* If there is pending one should cancell then run, otherwise | ||
724 | * won't run till previous one is ended as same work | ||
725 | * struct is used. | ||
726 | */ | ||
727 | cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. | ||
728 | alias_guid_work); | ||
565 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, | 729 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, |
566 | &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); | 730 | &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); |
567 | } | 731 | } |