diff options
| -rw-r--r-- | drivers/infiniband/hw/mlx4/Makefile | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 688 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 19 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 37 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 74 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 6 |
6 files changed, 816 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx4/Makefile b/drivers/infiniband/hw/mlx4/Makefile index bf0aa901c313..31d4c8aac679 100644 --- a/drivers/infiniband/hw/mlx4/Makefile +++ b/drivers/infiniband/hw/mlx4/Makefile | |||
| @@ -1,3 +1,3 @@ | |||
| 1 | obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o | 1 | obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o |
| 2 | 2 | ||
| 3 | mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o | 3 | mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o |
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c new file mode 100644 index 000000000000..ef6d356927c3 --- /dev/null +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c | |||
| @@ -0,0 +1,688 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2012 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | */ | ||
| 32 | /***********************************************************/ | ||
| 33 | /*This file support the handling of the Alias GUID feature. */ | ||
| 34 | /***********************************************************/ | ||
| 35 | #include <rdma/ib_mad.h> | ||
| 36 | #include <rdma/ib_smi.h> | ||
| 37 | #include <rdma/ib_cache.h> | ||
| 38 | #include <rdma/ib_sa.h> | ||
| 39 | #include <rdma/ib_pack.h> | ||
| 40 | #include <linux/mlx4/cmd.h> | ||
| 41 | #include <linux/module.h> | ||
| 42 | #include <linux/init.h> | ||
| 43 | #include <linux/errno.h> | ||
| 44 | #include <rdma/ib_user_verbs.h> | ||
| 45 | #include <linux/delay.h> | ||
| 46 | #include "mlx4_ib.h" | ||
| 47 | |||
| 48 | /* | ||
| 49 | The driver keeps the current state of all guids, as they are in the HW. | ||
| 50 | Whenever we receive an smp mad GUIDInfo record, the data will be cached. | ||
| 51 | */ | ||
| 52 | |||
| 53 | struct mlx4_alias_guid_work_context { | ||
| 54 | u8 port; | ||
| 55 | struct mlx4_ib_dev *dev ; | ||
| 56 | struct ib_sa_query *sa_query; | ||
| 57 | struct completion done; | ||
| 58 | int query_id; | ||
| 59 | struct list_head list; | ||
| 60 | int block_num; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct mlx4_next_alias_guid_work { | ||
| 64 | u8 port; | ||
| 65 | u8 block_num; | ||
| 66 | struct mlx4_sriov_alias_guid_info_rec_det rec_det; | ||
| 67 | }; | ||
| 68 | |||
| 69 | |||
| 70 | void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, | ||
| 71 | u8 port_num, u8 *p_data) | ||
| 72 | { | ||
| 73 | int i; | ||
| 74 | u64 guid_indexes; | ||
| 75 | int slave_id; | ||
| 76 | int port_index = port_num - 1; | ||
| 77 | |||
| 78 | if (!mlx4_is_master(dev->dev)) | ||
| 79 | return; | ||
| 80 | |||
| 81 | guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. | ||
| 82 | ports_guid[port_num - 1]. | ||
| 83 | all_rec_per_port[block_num].guid_indexes); | ||
| 84 | pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); | ||
| 85 | |||
| 86 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { | ||
| 87 | /* The location of the specific index starts from bit number 4 | ||
| 88 | * until bit num 11 */ | ||
| 89 | if (test_bit(i + 4, (unsigned long *)&guid_indexes)) { | ||
| 90 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; | ||
| 91 | if (slave_id >= dev->dev->num_slaves) { | ||
| 92 | pr_debug("The last slave: %d\n", slave_id); | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* cache the guid: */ | ||
| 97 | memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], | ||
| 98 | &p_data[i * GUID_REC_SIZE], | ||
| 99 | GUID_REC_SIZE); | ||
| 100 | } else | ||
| 101 | pr_debug("Guid number: %d in block: %d" | ||
| 102 | " was not updated\n", i, block_num); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) | ||
| 107 | { | ||
| 108 | if (index >= NUM_ALIAS_GUID_PER_PORT) { | ||
| 109 | pr_err("%s: ERROR: asked for index:%d\n", __func__, index); | ||
| 110 | return (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL); | ||
| 111 | } | ||
| 112 | return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; | ||
| 113 | } | ||
| 114 | |||
| 115 | |||
| 116 | static ib_sa_comp_mask get_aguid_comp_mask_from_ix(int index) | ||
| 117 | { | ||
| 118 | return IB_SA_COMP_MASK(4 + index); | ||
| 119 | } | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Whenever new GUID is set/unset (guid table change) create event and | ||
| 123 | * notify the relevant slave (master also should be notified). | ||
| 124 | * If the GUID value is not as we have in the cache the slave will not be | ||
| 125 | * updated; in this case it waits for the smp_snoop or the port management | ||
| 126 | * event to call the function and to update the slave. | ||
| 127 | * block_number - the index of the block (16 blocks available) | ||
| 128 | * port_number - 1 or 2 | ||
| 129 | */ | ||
| 130 | void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | ||
| 131 | int block_num, u8 port_num, | ||
| 132 | u8 *p_data) | ||
| 133 | { | ||
| 134 | int i; | ||
| 135 | u64 guid_indexes; | ||
| 136 | int slave_id; | ||
| 137 | enum slave_port_state new_state; | ||
| 138 | enum slave_port_state prev_state; | ||
| 139 | __be64 tmp_cur_ag, form_cache_ag; | ||
| 140 | enum slave_port_gen_event gen_event; | ||
| 141 | |||
| 142 | if (!mlx4_is_master(dev->dev)) | ||
| 143 | return; | ||
| 144 | |||
| 145 | guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. | ||
| 146 | ports_guid[port_num - 1]. | ||
| 147 | all_rec_per_port[block_num].guid_indexes); | ||
| 148 | pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); | ||
| 149 | |||
| 150 | /*calculate the slaves and notify them*/ | ||
| 151 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { | ||
| 152 | /* the location of the specific index runs from bits 4..11 */ | ||
| 153 | if (!(test_bit(i + 4, (unsigned long *)&guid_indexes))) | ||
| 154 | continue; | ||
| 155 | |||
| 156 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; | ||
| 157 | if (slave_id >= dev->dev->num_slaves) | ||
| 158 | return; | ||
| 159 | tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; | ||
| 160 | form_cache_ag = get_cached_alias_guid(dev, port_num, | ||
| 161 | (NUM_ALIAS_GUID_IN_REC * block_num) + i); | ||
| 162 | /* | ||
| 163 | * Check if guid is not the same as in the cache, | ||
| 164 | * If it is different, wait for the snoop_smp or the port mgmt | ||
| 165 | * change event to update the slave on its port state change | ||
| 166 | */ | ||
| 167 | if (tmp_cur_ag != form_cache_ag) | ||
| 168 | continue; | ||
| 169 | mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); | ||
| 170 | |||
| 171 | /*2 cases: Valid GUID, and Invalid Guid*/ | ||
| 172 | |||
| 173 | if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ | ||
| 174 | prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num); | ||
| 175 | new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, | ||
| 176 | MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, | ||
| 177 | &gen_event); | ||
| 178 | pr_debug("slave: %d, port: %d prev_port_state: %d," | ||
| 179 | " new_port_state: %d, gen_event: %d\n", | ||
| 180 | slave_id, port_num, prev_state, new_state, gen_event); | ||
| 181 | if (gen_event == SLAVE_PORT_GEN_EVENT_UP) { | ||
| 182 | pr_debug("sending PORT_UP event to slave: %d, port: %d\n", | ||
| 183 | slave_id, port_num); | ||
| 184 | mlx4_gen_port_state_change_eqe(dev->dev, slave_id, | ||
| 185 | port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE); | ||
| 186 | } | ||
| 187 | } else { /* request to invalidate GUID */ | ||
| 188 | set_and_calc_slave_port_state(dev->dev, slave_id, port_num, | ||
| 189 | MLX4_PORT_STATE_IB_EVENT_GID_INVALID, | ||
| 190 | &gen_event); | ||
| 191 | pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", | ||
| 192 | slave_id, port_num); | ||
| 193 | mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, | ||
| 194 | MLX4_PORT_CHANGE_SUBTYPE_DOWN); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | } | ||
| 198 | |||
| 199 | static void aliasguid_query_handler(int status, | ||
| 200 | struct ib_sa_guidinfo_rec *guid_rec, | ||
| 201 | void *context) | ||
| 202 | { | ||
| 203 | struct mlx4_ib_dev *dev; | ||
| 204 | struct mlx4_alias_guid_work_context *cb_ctx = context; | ||
| 205 | u8 port_index ; | ||
| 206 | int i; | ||
| 207 | struct mlx4_sriov_alias_guid_info_rec_det *rec; | ||
| 208 | unsigned long flags, flags1; | ||
| 209 | |||
| 210 | if (!context) | ||
| 211 | return; | ||
| 212 | |||
| 213 | dev = cb_ctx->dev; | ||
| 214 | port_index = cb_ctx->port - 1; | ||
| 215 | rec = &dev->sriov.alias_guid.ports_guid[port_index]. | ||
| 216 | all_rec_per_port[cb_ctx->block_num]; | ||
| 217 | |||
| 218 | if (status) { | ||
| 219 | rec->status = MLX4_GUID_INFO_STATUS_IDLE; | ||
| 220 | pr_debug("(port: %d) failed: status = %d\n", | ||
| 221 | cb_ctx->port, status); | ||
| 222 | goto out; | ||
| 223 | } | ||
| 224 | |||
| 225 | if (guid_rec->block_num != cb_ctx->block_num) { | ||
| 226 | pr_err("block num mismatch: %d != %d\n", | ||
| 227 | cb_ctx->block_num, guid_rec->block_num); | ||
| 228 | goto out; | ||
| 229 | } | ||
| 230 | |||
| 231 | pr_debug("lid/port: %d/%d, block_num: %d\n", | ||
| 232 | be16_to_cpu(guid_rec->lid), cb_ctx->port, | ||
| 233 | guid_rec->block_num); | ||
| 234 | |||
| 235 | rec = &dev->sriov.alias_guid.ports_guid[port_index]. | ||
| 236 | all_rec_per_port[guid_rec->block_num]; | ||
| 237 | |||
| 238 | rec->status = MLX4_GUID_INFO_STATUS_SET; | ||
| 239 | rec->method = MLX4_GUID_INFO_RECORD_SET; | ||
| 240 | |||
| 241 | for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { | ||
| 242 | __be64 tmp_cur_ag; | ||
| 243 | tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; | ||
| 244 | /* check if the SM didn't assign one of the records. | ||
| 245 | * if it didn't, if it was not sysadmin request: | ||
| 246 | * ask the SM to give a new GUID, (instead of the driver request). | ||
| 247 | */ | ||
| 248 | if (tmp_cur_ag == MLX4_NOT_SET_GUID) { | ||
| 249 | mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " | ||
| 250 | "block_num: %d was declined by SM, " | ||
| 251 | "ownership by %d (0 = driver, 1=sysAdmin," | ||
| 252 | " 2=None)\n", __func__, i, | ||
| 253 | guid_rec->block_num, rec->ownership); | ||
| 254 | if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { | ||
| 255 | /* if it is driver assign, asks for new GUID from SM*/ | ||
| 256 | *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = | ||
| 257 | MLX4_NOT_SET_GUID; | ||
| 258 | |||
| 259 | /* Mark the record as not assigned, and let it | ||
| 260 | * be sent again in the next work sched.*/ | ||
| 261 | rec->status = MLX4_GUID_INFO_STATUS_IDLE; | ||
| 262 | rec->guid_indexes |= get_aguid_comp_mask_from_ix(i); | ||
| 263 | } | ||
| 264 | } else { | ||
| 265 | /* properly assigned record. */ | ||
| 266 | /* We save the GUID we just got from the SM in the | ||
| 267 | * admin_guid in order to be persistent, and in the | ||
| 268 | * request from the sm the process will ask for the same GUID */ | ||
| 269 | if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && | ||
| 270 | tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { | ||
| 271 | /* the sysadmin assignment failed.*/ | ||
| 272 | mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" | ||
| 273 | " admin guid after SysAdmin " | ||
| 274 | "configuration. " | ||
| 275 | "Record num %d in block_num:%d " | ||
| 276 | "was declined by SM, " | ||
| 277 | "new val(0x%llx) was kept\n", | ||
| 278 | __func__, i, | ||
| 279 | guid_rec->block_num, | ||
| 280 | be64_to_cpu(*(__be64 *) & | ||
| 281 | rec->all_recs[i * GUID_REC_SIZE])); | ||
| 282 | } else { | ||
| 283 | memcpy(&rec->all_recs[i * GUID_REC_SIZE], | ||
| 284 | &guid_rec->guid_info_list[i * GUID_REC_SIZE], | ||
| 285 | GUID_REC_SIZE); | ||
| 286 | } | ||
| 287 | } | ||
| 288 | } | ||
| 289 | /* | ||
| 290 | The func is call here to close the cases when the | ||
| 291 | sm doesn't send smp, so in the sa response the driver | ||
| 292 | notifies the slave. | ||
| 293 | */ | ||
| 294 | mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num, | ||
| 295 | cb_ctx->port, | ||
| 296 | guid_rec->guid_info_list); | ||
| 297 | out: | ||
| 298 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | ||
| 299 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 300 | if (!dev->sriov.is_going_down) | ||
| 301 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, | ||
| 302 | &dev->sriov.alias_guid.ports_guid[port_index]. | ||
| 303 | alias_guid_work, 0); | ||
| 304 | if (cb_ctx->sa_query) { | ||
| 305 | list_del(&cb_ctx->list); | ||
| 306 | kfree(cb_ctx); | ||
| 307 | } else | ||
| 308 | complete(&cb_ctx->done); | ||
| 309 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 310 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | ||
| 311 | } | ||
| 312 | |||
| 313 | static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) | ||
| 314 | { | ||
| 315 | int i; | ||
| 316 | u64 cur_admin_val; | ||
| 317 | ib_sa_comp_mask comp_mask = 0; | ||
| 318 | |||
| 319 | dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status | ||
| 320 | = MLX4_GUID_INFO_STATUS_IDLE; | ||
| 321 | dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method | ||
| 322 | = MLX4_GUID_INFO_RECORD_SET; | ||
| 323 | |||
| 324 | /* calculate the comp_mask for that record.*/ | ||
| 325 | for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { | ||
| 326 | cur_admin_val = | ||
| 327 | *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. | ||
| 328 | all_rec_per_port[index].all_recs[GUID_REC_SIZE * i]; | ||
| 329 | /* | ||
| 330 | check the admin value: if it's for delete (~00LL) or | ||
| 331 | it is the first guid of the first record (hw guid) or | ||
| 332 | the records is not in ownership of the sysadmin and the sm doesn't | ||
| 333 | need to assign GUIDs, then don't put it up for assignment. | ||
| 334 | */ | ||
| 335 | if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || | ||
| 336 | (!index && !i) || | ||
| 337 | MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. | ||
| 338 | ports_guid[port - 1].all_rec_per_port[index].ownership) | ||
| 339 | continue; | ||
| 340 | comp_mask |= get_aguid_comp_mask_from_ix(i); | ||
| 341 | } | ||
| 342 | dev->sriov.alias_guid.ports_guid[port - 1]. | ||
| 343 | all_rec_per_port[index].guid_indexes = comp_mask; | ||
| 344 | } | ||
| 345 | |||
| 346 | static int set_guid_rec(struct ib_device *ibdev, | ||
| 347 | u8 port, int index, | ||
| 348 | struct mlx4_sriov_alias_guid_info_rec_det *rec_det) | ||
| 349 | { | ||
| 350 | int err; | ||
| 351 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | ||
| 352 | struct ib_sa_guidinfo_rec guid_info_rec; | ||
| 353 | ib_sa_comp_mask comp_mask; | ||
| 354 | struct ib_port_attr attr; | ||
| 355 | struct mlx4_alias_guid_work_context *callback_context; | ||
| 356 | unsigned long resched_delay, flags, flags1; | ||
| 357 | struct list_head *head = | ||
| 358 | &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; | ||
| 359 | |||
| 360 | err = __mlx4_ib_query_port(ibdev, port, &attr, 1); | ||
| 361 | if (err) { | ||
| 362 | pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n", | ||
| 363 | err, port); | ||
| 364 | return err; | ||
| 365 | } | ||
| 366 | /*check the port was configured by the sm, otherwise no need to send */ | ||
| 367 | if (attr.state != IB_PORT_ACTIVE) { | ||
| 368 | pr_debug("port %d not active...rescheduling\n", port); | ||
| 369 | resched_delay = 5 * HZ; | ||
| 370 | err = -EAGAIN; | ||
| 371 | goto new_schedule; | ||
| 372 | } | ||
| 373 | |||
| 374 | callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL); | ||
| 375 | if (!callback_context) { | ||
| 376 | err = -ENOMEM; | ||
| 377 | resched_delay = HZ * 5; | ||
| 378 | goto new_schedule; | ||
| 379 | } | ||
| 380 | callback_context->port = port; | ||
| 381 | callback_context->dev = dev; | ||
| 382 | callback_context->block_num = index; | ||
| 383 | |||
| 384 | memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); | ||
| 385 | |||
| 386 | guid_info_rec.lid = cpu_to_be16(attr.lid); | ||
| 387 | guid_info_rec.block_num = index; | ||
| 388 | |||
| 389 | memcpy(guid_info_rec.guid_info_list, rec_det->all_recs, | ||
| 390 | GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC); | ||
| 391 | comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM | | ||
| 392 | rec_det->guid_indexes; | ||
| 393 | |||
| 394 | init_completion(&callback_context->done); | ||
| 395 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 396 | list_add_tail(&callback_context->list, head); | ||
| 397 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 398 | |||
| 399 | callback_context->query_id = | ||
| 400 | ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, | ||
| 401 | ibdev, port, &guid_info_rec, | ||
| 402 | comp_mask, rec_det->method, 1000, | ||
| 403 | GFP_KERNEL, aliasguid_query_handler, | ||
| 404 | callback_context, | ||
| 405 | &callback_context->sa_query); | ||
| 406 | if (callback_context->query_id < 0) { | ||
| 407 | pr_debug("ib_sa_guid_info_rec_query failed, query_id: " | ||
| 408 | "%d. will reschedule to the next 1 sec.\n", | ||
| 409 | callback_context->query_id); | ||
| 410 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 411 | list_del(&callback_context->list); | ||
| 412 | kfree(callback_context); | ||
| 413 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 414 | resched_delay = 1 * HZ; | ||
| 415 | err = -EAGAIN; | ||
| 416 | goto new_schedule; | ||
| 417 | } | ||
| 418 | err = 0; | ||
| 419 | goto out; | ||
| 420 | |||
| 421 | new_schedule: | ||
| 422 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | ||
| 423 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 424 | invalidate_guid_record(dev, port, index); | ||
| 425 | if (!dev->sriov.is_going_down) { | ||
| 426 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, | ||
| 427 | &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, | ||
| 428 | resched_delay); | ||
| 429 | } | ||
| 430 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 431 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | ||
| 432 | |||
| 433 | out: | ||
| 434 | return err; | ||
| 435 | } | ||
| 436 | |||
| 437 | void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) | ||
| 438 | { | ||
| 439 | int i; | ||
| 440 | unsigned long flags, flags1; | ||
| 441 | |||
| 442 | pr_debug("port %d\n", port); | ||
| 443 | |||
| 444 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | ||
| 445 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 446 | for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) | ||
| 447 | invalidate_guid_record(dev, port, i); | ||
| 448 | |||
| 449 | if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { | ||
| 450 | /* | ||
| 451 | make sure no work waits in the queue, if the work is already | ||
| 452 | queued(not on the timer) the cancel will fail. That is not a problem | ||
| 453 | because we just want the work started. | ||
| 454 | */ | ||
| 455 | __cancel_delayed_work(&dev->sriov.alias_guid. | ||
| 456 | ports_guid[port - 1].alias_guid_work); | ||
| 457 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, | ||
| 458 | &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, | ||
| 459 | 0); | ||
| 460 | } | ||
| 461 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 462 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | ||
| 463 | } | ||
| 464 | |||
| 465 | /* The function returns the next record that was | ||
| 466 | * not configured (or failed to be configured) */ | ||
| 467 | static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, | ||
| 468 | struct mlx4_next_alias_guid_work *rec) | ||
| 469 | { | ||
| 470 | int j; | ||
| 471 | unsigned long flags; | ||
| 472 | |||
| 473 | for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { | ||
| 474 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
| 475 | if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == | ||
| 476 | MLX4_GUID_INFO_STATUS_IDLE) { | ||
| 477 | memcpy(&rec->rec_det, | ||
| 478 | &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], | ||
| 479 | sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); | ||
| 480 | rec->port = port; | ||
| 481 | rec->block_num = j; | ||
| 482 | dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = | ||
| 483 | MLX4_GUID_INFO_STATUS_PENDING; | ||
| 484 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); | ||
| 488 | } | ||
| 489 | return -ENOENT; | ||
| 490 | } | ||
| 491 | |||
| 492 | static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, | ||
| 493 | int rec_index, | ||
| 494 | struct mlx4_sriov_alias_guid_info_rec_det *rec_det) | ||
| 495 | { | ||
| 496 | dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = | ||
| 497 | rec_det->guid_indexes; | ||
| 498 | memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, | ||
| 499 | rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); | ||
| 500 | dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = | ||
| 501 | rec_det->status; | ||
| 502 | } | ||
| 503 | |||
| 504 | static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) | ||
| 505 | { | ||
| 506 | int j; | ||
| 507 | struct mlx4_sriov_alias_guid_info_rec_det rec_det ; | ||
| 508 | |||
| 509 | for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { | ||
| 510 | memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); | ||
| 511 | rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | | ||
| 512 | IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | | ||
| 513 | IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | | ||
| 514 | IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | | ||
| 515 | IB_SA_GUIDINFO_REC_GID7; | ||
| 516 | rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; | ||
| 517 | set_administratively_guid_record(dev, port, j, &rec_det); | ||
| 518 | } | ||
| 519 | } | ||
| 520 | |||
| 521 | static void alias_guid_work(struct work_struct *work) | ||
| 522 | { | ||
| 523 | struct delayed_work *delay = to_delayed_work(work); | ||
| 524 | int ret = 0; | ||
| 525 | struct mlx4_next_alias_guid_work *rec; | ||
| 526 | struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port = | ||
| 527 | container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det, | ||
| 528 | alias_guid_work); | ||
| 529 | struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent; | ||
| 530 | struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid, | ||
| 531 | struct mlx4_ib_sriov, | ||
| 532 | alias_guid); | ||
| 533 | struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); | ||
| 534 | |||
| 535 | rec = kzalloc(sizeof *rec, GFP_KERNEL); | ||
| 536 | if (!rec) { | ||
| 537 | pr_err("alias_guid_work: No Memory\n"); | ||
| 538 | return; | ||
| 539 | } | ||
| 540 | |||
| 541 | pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); | ||
| 542 | ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); | ||
| 543 | if (ret) { | ||
| 544 | pr_debug("No more records to update.\n"); | ||
| 545 | goto out; | ||
| 546 | } | ||
| 547 | |||
| 548 | set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, | ||
| 549 | &rec->rec_det); | ||
| 550 | |||
| 551 | out: | ||
| 552 | kfree(rec); | ||
| 553 | } | ||
| 554 | |||
| 555 | |||
| 556 | void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) | ||
| 557 | { | ||
| 558 | unsigned long flags, flags1; | ||
| 559 | |||
| 560 | if (!mlx4_is_master(dev->dev)) | ||
| 561 | return; | ||
| 562 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | ||
| 563 | spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 564 | if (!dev->sriov.is_going_down) { | ||
| 565 | queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, | ||
| 566 | &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); | ||
| 567 | } | ||
| 568 | spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); | ||
| 569 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | ||
| 570 | } | ||
| 571 | |||
| 572 | void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) | ||
| 573 | { | ||
| 574 | int i; | ||
| 575 | struct mlx4_ib_sriov *sriov = &dev->sriov; | ||
| 576 | struct mlx4_alias_guid_work_context *cb_ctx; | ||
| 577 | struct mlx4_sriov_alias_guid_port_rec_det *det; | ||
| 578 | struct ib_sa_query *sa_query; | ||
| 579 | unsigned long flags; | ||
| 580 | |||
| 581 | for (i = 0 ; i < dev->num_ports; i++) { | ||
| 582 | cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); | ||
| 583 | det = &sriov->alias_guid.ports_guid[i]; | ||
| 584 | spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); | ||
| 585 | while (!list_empty(&det->cb_list)) { | ||
| 586 | cb_ctx = list_entry(det->cb_list.next, | ||
| 587 | struct mlx4_alias_guid_work_context, | ||
| 588 | list); | ||
| 589 | sa_query = cb_ctx->sa_query; | ||
| 590 | cb_ctx->sa_query = NULL; | ||
| 591 | list_del(&cb_ctx->list); | ||
| 592 | spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); | ||
| 593 | ib_sa_cancel_query(cb_ctx->query_id, sa_query); | ||
| 594 | wait_for_completion(&cb_ctx->done); | ||
| 595 | kfree(cb_ctx); | ||
| 596 | spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); | ||
| 597 | } | ||
| 598 | spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); | ||
| 599 | } | ||
| 600 | for (i = 0 ; i < dev->num_ports; i++) { | ||
| 601 | flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); | ||
| 602 | destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); | ||
| 603 | } | ||
| 604 | ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); | ||
| 605 | kfree(dev->sriov.alias_guid.sa_client); | ||
| 606 | } | ||
| 607 | |||
| 608 | int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) | ||
| 609 | { | ||
| 610 | char alias_wq_name[15]; | ||
| 611 | int ret = 0; | ||
| 612 | int i, j, k; | ||
| 613 | union ib_gid gid; | ||
| 614 | |||
| 615 | if (!mlx4_is_master(dev->dev)) | ||
| 616 | return 0; | ||
| 617 | dev->sriov.alias_guid.sa_client = | ||
| 618 | kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); | ||
| 619 | if (!dev->sriov.alias_guid.sa_client) | ||
| 620 | return -ENOMEM; | ||
| 621 | |||
| 622 | ib_sa_register_client(dev->sriov.alias_guid.sa_client); | ||
| 623 | |||
| 624 | spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); | ||
| 625 | |||
| 626 | for (i = 1; i <= dev->num_ports; ++i) { | ||
| 627 | if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) { | ||
| 628 | ret = -EFAULT; | ||
| 629 | goto err_unregister; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | |||
| 633 | for (i = 0 ; i < dev->num_ports; i++) { | ||
| 634 | memset(&dev->sriov.alias_guid.ports_guid[i], 0, | ||
| 635 | sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); | ||
| 636 | /*Check if the SM doesn't need to assign the GUIDs*/ | ||
| 637 | for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { | ||
| 638 | if (mlx4_ib_sm_guid_assign) { | ||
| 639 | dev->sriov.alias_guid.ports_guid[i]. | ||
| 640 | all_rec_per_port[j]. | ||
| 641 | ownership = MLX4_GUID_DRIVER_ASSIGN; | ||
| 642 | continue; | ||
| 643 | } | ||
| 644 | dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. | ||
| 645 | ownership = MLX4_GUID_NONE_ASSIGN; | ||
| 646 | /*mark each val as it was deleted, | ||
| 647 | till the sysAdmin will give it valid val*/ | ||
| 648 | for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { | ||
| 649 | *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. | ||
| 650 | all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = | ||
| 651 | cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); | ||
| 652 | } | ||
| 653 | } | ||
| 654 | INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); | ||
| 655 | /*prepare the records, set them to be allocated by sm*/ | ||
| 656 | for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) | ||
| 657 | invalidate_guid_record(dev, i + 1, j); | ||
| 658 | |||
| 659 | dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; | ||
| 660 | dev->sriov.alias_guid.ports_guid[i].port = i; | ||
| 661 | if (mlx4_ib_sm_guid_assign) | ||
| 662 | set_all_slaves_guids(dev, i); | ||
| 663 | |||
| 664 | snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); | ||
| 665 | dev->sriov.alias_guid.ports_guid[i].wq = | ||
| 666 | create_singlethread_workqueue(alias_wq_name); | ||
| 667 | if (!dev->sriov.alias_guid.ports_guid[i].wq) { | ||
| 668 | ret = -ENOMEM; | ||
| 669 | goto err_thread; | ||
| 670 | } | ||
| 671 | INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, | ||
| 672 | alias_guid_work); | ||
| 673 | } | ||
| 674 | return 0; | ||
| 675 | |||
| 676 | err_thread: | ||
| 677 | for (--i; i >= 0; i--) { | ||
| 678 | destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); | ||
| 679 | dev->sriov.alias_guid.ports_guid[i].wq = NULL; | ||
| 680 | } | ||
| 681 | |||
| 682 | err_unregister: | ||
| 683 | ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); | ||
| 684 | kfree(dev->sriov.alias_guid.sa_client); | ||
| 685 | dev->sriov.alias_guid.sa_client = NULL; | ||
| 686 | pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret); | ||
| 687 | return ret; | ||
| 688 | } | ||
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 2f13894299ee..b8cb25ebce50 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -791,8 +791,10 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |||
| 791 | 791 | ||
| 792 | static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) | 792 | static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) |
| 793 | { | 793 | { |
| 794 | /* re-configure the mcg's */ | 794 | /* re-configure the alias-guid and mcg's */ |
| 795 | if (mlx4_is_master(dev->dev)) { | 795 | if (mlx4_is_master(dev->dev)) { |
| 796 | mlx4_ib_invalidate_all_guid_record(dev, port_num); | ||
| 797 | |||
| 796 | if (!dev->sriov.is_going_down) | 798 | if (!dev->sriov.is_going_down) |
| 797 | mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); | 799 | mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); |
| 798 | } | 800 | } |
| @@ -1808,9 +1810,20 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) | |||
| 1808 | return 0; | 1810 | return 0; |
| 1809 | } | 1811 | } |
| 1810 | 1812 | ||
| 1813 | err = mlx4_ib_init_alias_guid_service(dev); | ||
| 1814 | if (err) { | ||
| 1815 | mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); | ||
| 1816 | goto paravirt_err; | ||
| 1817 | } | ||
| 1818 | |||
| 1811 | mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", | 1819 | mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", |
| 1812 | dev->dev->caps.sqp_demux); | 1820 | dev->dev->caps.sqp_demux); |
| 1813 | for (i = 0; i < dev->num_ports; i++) { | 1821 | for (i = 0; i < dev->num_ports; i++) { |
| 1822 | union ib_gid gid; | ||
| 1823 | err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); | ||
| 1824 | if (err) | ||
| 1825 | goto demux_err; | ||
| 1826 | dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; | ||
| 1814 | err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, | 1827 | err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, |
| 1815 | &dev->sriov.sqps[i]); | 1828 | &dev->sriov.sqps[i]); |
| 1816 | if (err) | 1829 | if (err) |
| @@ -1828,6 +1841,9 @@ demux_err: | |||
| 1828 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); | 1841 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); |
| 1829 | --i; | 1842 | --i; |
| 1830 | } | 1843 | } |
| 1844 | mlx4_ib_destroy_alias_guid_service(dev); | ||
| 1845 | |||
| 1846 | paravirt_err: | ||
| 1831 | mlx4_ib_cm_paravirt_clean(dev, -1); | 1847 | mlx4_ib_cm_paravirt_clean(dev, -1); |
| 1832 | 1848 | ||
| 1833 | return err; | 1849 | return err; |
| @@ -1854,5 +1870,6 @@ void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) | |||
| 1854 | } | 1870 | } |
| 1855 | 1871 | ||
| 1856 | mlx4_ib_cm_paravirt_clean(dev, -1); | 1872 | mlx4_ib_cm_paravirt_clean(dev, -1); |
| 1873 | mlx4_ib_destroy_alias_guid_service(dev); | ||
| 1857 | } | 1874 | } |
| 1858 | } | 1875 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b959fe4665dd..7d97578fbbaa 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -59,6 +59,10 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | |||
| 59 | MODULE_LICENSE("Dual BSD/GPL"); | 59 | MODULE_LICENSE("Dual BSD/GPL"); |
| 60 | MODULE_VERSION(DRV_VERSION); | 60 | MODULE_VERSION(DRV_VERSION); |
| 61 | 61 | ||
| 62 | int mlx4_ib_sm_guid_assign = 1; | ||
| 63 | module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); | ||
| 64 | MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)"); | ||
| 65 | |||
| 62 | static const char mlx4_ib_version[] = | 66 | static const char mlx4_ib_version[] = |
| 63 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" | 67 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" |
| 64 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 68 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
| @@ -349,12 +353,15 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
| 349 | return __mlx4_ib_query_port(ibdev, port, props, 0); | 353 | return __mlx4_ib_query_port(ibdev, port, props, 0); |
| 350 | } | 354 | } |
| 351 | 355 | ||
| 352 | static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | 356 | int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, |
| 353 | union ib_gid *gid) | 357 | union ib_gid *gid, int netw_view) |
| 354 | { | 358 | { |
| 355 | struct ib_smp *in_mad = NULL; | 359 | struct ib_smp *in_mad = NULL; |
| 356 | struct ib_smp *out_mad = NULL; | 360 | struct ib_smp *out_mad = NULL; |
| 357 | int err = -ENOMEM; | 361 | int err = -ENOMEM; |
| 362 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | ||
| 363 | int clear = 0; | ||
| 364 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; | ||
| 358 | 365 | ||
| 359 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 366 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 360 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 367 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| @@ -365,18 +372,29 @@ static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | |||
| 365 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | 372 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; |
| 366 | in_mad->attr_mod = cpu_to_be32(port); | 373 | in_mad->attr_mod = cpu_to_be32(port); |
| 367 | 374 | ||
| 368 | err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, port, | 375 | if (mlx4_is_mfunc(dev->dev) && netw_view) |
| 369 | NULL, NULL, in_mad, out_mad); | 376 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 377 | |||
| 378 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad); | ||
| 370 | if (err) | 379 | if (err) |
| 371 | goto out; | 380 | goto out; |
| 372 | 381 | ||
| 373 | memcpy(gid->raw, out_mad->data + 8, 8); | 382 | memcpy(gid->raw, out_mad->data + 8, 8); |
| 374 | 383 | ||
| 384 | if (mlx4_is_mfunc(dev->dev) && !netw_view) { | ||
| 385 | if (index) { | ||
| 386 | /* For any index > 0, return the null guid */ | ||
| 387 | err = 0; | ||
| 388 | clear = 1; | ||
| 389 | goto out; | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 375 | init_query_mad(in_mad); | 393 | init_query_mad(in_mad); |
| 376 | in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; | 394 | in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; |
| 377 | in_mad->attr_mod = cpu_to_be32(index / 8); | 395 | in_mad->attr_mod = cpu_to_be32(index / 8); |
| 378 | 396 | ||
| 379 | err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, port, | 397 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, |
| 380 | NULL, NULL, in_mad, out_mad); | 398 | NULL, NULL, in_mad, out_mad); |
| 381 | if (err) | 399 | if (err) |
| 382 | goto out; | 400 | goto out; |
| @@ -384,6 +402,8 @@ static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | |||
| 384 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); | 402 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
| 385 | 403 | ||
| 386 | out: | 404 | out: |
| 405 | if (clear) | ||
| 406 | memset(gid->raw + 8, 0, 8); | ||
| 387 | kfree(in_mad); | 407 | kfree(in_mad); |
| 388 | kfree(out_mad); | 408 | kfree(out_mad); |
| 389 | return err; | 409 | return err; |
| @@ -403,7 +423,7 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | |||
| 403 | union ib_gid *gid) | 423 | union ib_gid *gid) |
| 404 | { | 424 | { |
| 405 | if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) | 425 | if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) |
| 406 | return __mlx4_ib_query_gid(ibdev, port, index, gid); | 426 | return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); |
| 407 | else | 427 | else |
| 408 | return iboe_query_gid(ibdev, port, index, gid); | 428 | return iboe_query_gid(ibdev, port, index, gid); |
| 409 | } | 429 | } |
| @@ -1566,6 +1586,11 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
| 1566 | case MLX4_DEV_EVENT_PORT_UP: | 1586 | case MLX4_DEV_EVENT_PORT_UP: |
| 1567 | if (p > ibdev->num_ports) | 1587 | if (p > ibdev->num_ports) |
| 1568 | return; | 1588 | return; |
| 1589 | if (mlx4_is_master(dev) && | ||
| 1590 | rdma_port_get_link_layer(&ibdev->ib_dev, p) == | ||
| 1591 | IB_LINK_LAYER_INFINIBAND) { | ||
| 1592 | mlx4_ib_invalidate_all_guid_record(ibdev, p); | ||
| 1593 | } | ||
| 1569 | ibev.event = IB_EVENT_PORT_ACTIVE; | 1594 | ibev.event = IB_EVENT_PORT_ACTIVE; |
| 1570 | break; | 1595 | break; |
| 1571 | 1596 | ||
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 7476e2439f6b..f3f75f8229a7 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <rdma/ib_verbs.h> | 42 | #include <rdma/ib_verbs.h> |
| 43 | #include <rdma/ib_umem.h> | 43 | #include <rdma/ib_umem.h> |
| 44 | #include <rdma/ib_mad.h> | 44 | #include <rdma/ib_mad.h> |
| 45 | #include <rdma/ib_sa.h> | ||
| 45 | 46 | ||
| 46 | #include <linux/mlx4/device.h> | 47 | #include <linux/mlx4/device.h> |
| 47 | #include <linux/mlx4/doorbell.h> | 48 | #include <linux/mlx4/doorbell.h> |
| @@ -64,6 +65,9 @@ enum { | |||
| 64 | #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) | 65 | #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) |
| 65 | #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) | 66 | #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) |
| 66 | 67 | ||
| 68 | /*module param to indicate if SM assigns the alias_GUID*/ | ||
| 69 | extern int mlx4_ib_sm_guid_assign; | ||
| 70 | |||
| 67 | struct mlx4_ib_ucontext { | 71 | struct mlx4_ib_ucontext { |
| 68 | struct ib_ucontext ibucontext; | 72 | struct ib_ucontext ibucontext; |
| 69 | struct mlx4_uar uar; | 73 | struct mlx4_uar uar; |
| @@ -277,6 +281,57 @@ struct mlx4_ib_ah { | |||
| 277 | union mlx4_ext_av av; | 281 | union mlx4_ext_av av; |
| 278 | }; | 282 | }; |
| 279 | 283 | ||
| 284 | /****************************************/ | ||
| 285 | /* alias guid support */ | ||
| 286 | /****************************************/ | ||
| 287 | #define NUM_PORT_ALIAS_GUID 2 | ||
| 288 | #define NUM_ALIAS_GUID_IN_REC 8 | ||
| 289 | #define NUM_ALIAS_GUID_REC_IN_PORT 16 | ||
| 290 | #define GUID_REC_SIZE 8 | ||
| 291 | #define NUM_ALIAS_GUID_PER_PORT 128 | ||
| 292 | #define MLX4_NOT_SET_GUID (0x00LL) | ||
| 293 | #define MLX4_GUID_FOR_DELETE_VAL (~(0x00LL)) | ||
| 294 | |||
| 295 | enum mlx4_guid_alias_rec_status { | ||
| 296 | MLX4_GUID_INFO_STATUS_IDLE, | ||
| 297 | MLX4_GUID_INFO_STATUS_SET, | ||
| 298 | MLX4_GUID_INFO_STATUS_PENDING, | ||
| 299 | }; | ||
| 300 | |||
| 301 | enum mlx4_guid_alias_rec_ownership { | ||
| 302 | MLX4_GUID_DRIVER_ASSIGN, | ||
| 303 | MLX4_GUID_SYSADMIN_ASSIGN, | ||
| 304 | MLX4_GUID_NONE_ASSIGN, /*init state of each record*/ | ||
| 305 | }; | ||
| 306 | |||
| 307 | enum mlx4_guid_alias_rec_method { | ||
| 308 | MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET, | ||
| 309 | MLX4_GUID_INFO_RECORD_DELETE = IB_SA_METHOD_DELETE, | ||
| 310 | }; | ||
| 311 | |||
| 312 | struct mlx4_sriov_alias_guid_info_rec_det { | ||
| 313 | u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; | ||
| 314 | ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ | ||
| 315 | enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ | ||
| 316 | u8 method; /*set or delete*/ | ||
| 317 | enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ | ||
| 318 | }; | ||
| 319 | |||
| 320 | struct mlx4_sriov_alias_guid_port_rec_det { | ||
| 321 | struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT]; | ||
| 322 | struct workqueue_struct *wq; | ||
| 323 | struct delayed_work alias_guid_work; | ||
| 324 | u8 port; | ||
| 325 | struct mlx4_sriov_alias_guid *parent; | ||
| 326 | struct list_head cb_list; | ||
| 327 | }; | ||
| 328 | |||
| 329 | struct mlx4_sriov_alias_guid { | ||
| 330 | struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS]; | ||
| 331 | spinlock_t ag_work_lock; | ||
| 332 | struct ib_sa_client *sa_client; | ||
| 333 | }; | ||
| 334 | |||
| 280 | struct mlx4_ib_demux_work { | 335 | struct mlx4_ib_demux_work { |
| 281 | struct work_struct work; | 336 | struct work_struct work; |
| 282 | struct mlx4_ib_dev *dev; | 337 | struct mlx4_ib_dev *dev; |
| @@ -349,6 +404,8 @@ struct mlx4_ib_sriov { | |||
| 349 | spinlock_t going_down_lock; | 404 | spinlock_t going_down_lock; |
| 350 | int is_going_down; | 405 | int is_going_down; |
| 351 | 406 | ||
| 407 | struct mlx4_sriov_alias_guid alias_guid; | ||
| 408 | |||
| 352 | /* CM paravirtualization fields */ | 409 | /* CM paravirtualization fields */ |
| 353 | struct list_head cm_list; | 410 | struct list_head cm_list; |
| 354 | spinlock_t id_map_lock; | 411 | spinlock_t id_map_lock; |
| @@ -555,6 +612,9 @@ int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
| 555 | int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | 612 | int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 556 | u16 *pkey, int netw_view); | 613 | u16 *pkey, int netw_view); |
| 557 | 614 | ||
| 615 | int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | ||
| 616 | union ib_gid *gid, int netw_view); | ||
| 617 | |||
| 558 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, | 618 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, |
| 559 | u8 *mac, int *is_mcast, u8 port); | 619 | u8 *mac, int *is_mcast, u8 port); |
| 560 | 620 | ||
| @@ -606,4 +666,18 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id | |||
| 606 | void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev); | 666 | void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev); |
| 607 | void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id); | 667 | void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id); |
| 608 | 668 | ||
| 669 | /* alias guid support */ | ||
| 670 | void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port); | ||
| 671 | int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev); | ||
| 672 | void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev); | ||
| 673 | void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port); | ||
| 674 | |||
| 675 | void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | ||
| 676 | int block_num, | ||
| 677 | u8 port_num, u8 *p_data); | ||
| 678 | |||
| 679 | void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, | ||
| 680 | int block_num, u8 port_num, | ||
| 681 | u8 *p_data); | ||
| 682 | |||
| 609 | #endif /* MLX4_IB_H */ | 683 | #endif /* MLX4_IB_H */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a13d8a69b3bc..ea5c884ab899 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -674,8 +674,10 @@ static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, | |||
| 674 | 674 | ||
| 675 | static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) | 675 | static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) |
| 676 | { | 676 | { |
| 677 | /* will be modified when add alias_guid feature */ | 677 | if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) |
| 678 | return IB_PORT_DOWN; | 678 | return IB_PORT_ACTIVE; |
| 679 | else | ||
| 680 | return IB_PORT_DOWN; | ||
| 679 | } | 681 | } |
| 680 | 682 | ||
| 681 | static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, | 683 | static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, |
