diff options
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 421 | ||||
-rw-r--r-- | include/linux/mlx4/device.h | 1 |
4 files changed, 260 insertions, 185 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 02a2e90d581a..436c82f64304 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -580,8 +580,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
580 | 580 | ||
581 | err = context->result; | 581 | err = context->result; |
582 | if (err) { | 582 | if (err) { |
583 | mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", | 583 | /* Since we do not want to have this error message always |
584 | op, context->fw_status); | 584 | * displayed at driver start when there are ConnectX2 HCAs |
585 | * on the host, we deprecate the error message for this | ||
586 | * specific command/input_mod/opcode_mod/fw-status to be debug. | ||
587 | */ | ||
588 | if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && | ||
589 | op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) | ||
590 | mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", | ||
591 | op, context->fw_status); | ||
592 | else | ||
593 | mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", | ||
594 | op, context->fw_status); | ||
585 | goto out; | 595 | goto out; |
586 | } | 596 | } |
587 | 597 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 13b2e4a51ef4..2e88a235e26b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -982,8 +982,13 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
982 | if (port < 0) | 982 | if (port < 0) |
983 | return -EINVAL; | 983 | return -EINVAL; |
984 | 984 | ||
985 | vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | | 985 | /* Protect against untrusted guests: enforce that this is the |
986 | (port & 0xFF); | 986 | * QUERY_PORT general query. |
987 | */ | ||
988 | if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) | ||
989 | return -EINVAL; | ||
990 | |||
991 | vhcr->in_modifier = port; | ||
987 | 992 | ||
988 | err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, | 993 | err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, |
989 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, | 994 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 1f10023af1db..f6c32a947185 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2259,115 +2259,18 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) | |||
2259 | iounmap(owner); | 2259 | iounmap(owner); |
2260 | } | 2260 | } |
2261 | 2261 | ||
2262 | static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | 2262 | static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, |
2263 | int total_vfs, int *nvfs, struct mlx4_priv *priv) | ||
2263 | { | 2264 | { |
2264 | struct mlx4_priv *priv; | ||
2265 | struct mlx4_dev *dev; | 2265 | struct mlx4_dev *dev; |
2266 | unsigned sum = 0; | ||
2266 | int err; | 2267 | int err; |
2267 | int port; | 2268 | int port; |
2268 | int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; | 2269 | int i; |
2269 | int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; | 2270 | int existing_vfs = 0; |
2270 | const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { | ||
2271 | {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; | ||
2272 | unsigned total_vfs = 0; | ||
2273 | int sriov_initialized = 0; | ||
2274 | unsigned int i; | ||
2275 | |||
2276 | pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); | ||
2277 | |||
2278 | err = pci_enable_device(pdev); | ||
2279 | if (err) { | ||
2280 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); | ||
2281 | return err; | ||
2282 | } | ||
2283 | |||
2284 | /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS | ||
2285 | * per port, we must limit the number of VFs to 63 (since their are | ||
2286 | * 128 MACs) | ||
2287 | */ | ||
2288 | for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; | ||
2289 | total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { | ||
2290 | nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; | ||
2291 | if (nvfs[i] < 0) { | ||
2292 | dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); | ||
2293 | return -EINVAL; | ||
2294 | } | ||
2295 | } | ||
2296 | for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; | ||
2297 | i++) { | ||
2298 | prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; | ||
2299 | if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { | ||
2300 | dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); | ||
2301 | return -EINVAL; | ||
2302 | } | ||
2303 | } | ||
2304 | if (total_vfs >= MLX4_MAX_NUM_VF) { | ||
2305 | dev_err(&pdev->dev, | ||
2306 | "Requested more VF's (%d) than allowed (%d)\n", | ||
2307 | total_vfs, MLX4_MAX_NUM_VF - 1); | ||
2308 | return -EINVAL; | ||
2309 | } | ||
2310 | |||
2311 | for (i = 0; i < MLX4_MAX_PORTS; i++) { | ||
2312 | if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { | ||
2313 | dev_err(&pdev->dev, | ||
2314 | "Requested more VF's (%d) for port (%d) than allowed (%d)\n", | ||
2315 | nvfs[i] + nvfs[2], i + 1, | ||
2316 | MLX4_MAX_NUM_VF_P_PORT - 1); | ||
2317 | return -EINVAL; | ||
2318 | } | ||
2319 | } | ||
2320 | |||
2321 | |||
2322 | /* | ||
2323 | * Check for BARs. | ||
2324 | */ | ||
2325 | if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && | ||
2326 | !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
2327 | dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", | ||
2328 | pci_dev_data, pci_resource_flags(pdev, 0)); | ||
2329 | err = -ENODEV; | ||
2330 | goto err_disable_pdev; | ||
2331 | } | ||
2332 | if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { | ||
2333 | dev_err(&pdev->dev, "Missing UAR, aborting\n"); | ||
2334 | err = -ENODEV; | ||
2335 | goto err_disable_pdev; | ||
2336 | } | ||
2337 | |||
2338 | err = pci_request_regions(pdev, DRV_NAME); | ||
2339 | if (err) { | ||
2340 | dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); | ||
2341 | goto err_disable_pdev; | ||
2342 | } | ||
2343 | |||
2344 | pci_set_master(pdev); | ||
2345 | |||
2346 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
2347 | if (err) { | ||
2348 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); | ||
2349 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2350 | if (err) { | ||
2351 | dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); | ||
2352 | goto err_release_regions; | ||
2353 | } | ||
2354 | } | ||
2355 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
2356 | if (err) { | ||
2357 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); | ||
2358 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2359 | if (err) { | ||
2360 | dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); | ||
2361 | goto err_release_regions; | ||
2362 | } | ||
2363 | } | ||
2364 | 2271 | ||
2365 | /* Allow large DMA segments, up to the firmware limit of 1 GB */ | 2272 | dev = &priv->dev; |
2366 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); | ||
2367 | 2273 | ||
2368 | dev = pci_get_drvdata(pdev); | ||
2369 | priv = mlx4_priv(dev); | ||
2370 | dev->pdev = pdev; | ||
2371 | INIT_LIST_HEAD(&priv->ctx_list); | 2274 | INIT_LIST_HEAD(&priv->ctx_list); |
2372 | spin_lock_init(&priv->ctx_lock); | 2275 | spin_lock_init(&priv->ctx_lock); |
2373 | 2276 | ||
@@ -2381,28 +2284,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
2381 | 2284 | ||
2382 | dev->rev_id = pdev->revision; | 2285 | dev->rev_id = pdev->revision; |
2383 | dev->numa_node = dev_to_node(&pdev->dev); | 2286 | dev->numa_node = dev_to_node(&pdev->dev); |
2287 | |||
2384 | /* Detect if this device is a virtual function */ | 2288 | /* Detect if this device is a virtual function */ |
2385 | if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { | 2289 | if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { |
2386 | /* When acting as pf, we normally skip vfs unless explicitly | ||
2387 | * requested to probe them. */ | ||
2388 | if (total_vfs) { | ||
2389 | unsigned vfs_offset = 0; | ||
2390 | for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && | ||
2391 | vfs_offset + nvfs[i] < extended_func_num(pdev); | ||
2392 | vfs_offset += nvfs[i], i++) | ||
2393 | ; | ||
2394 | if (i == sizeof(nvfs)/sizeof(nvfs[0])) { | ||
2395 | err = -ENODEV; | ||
2396 | goto err_free_dev; | ||
2397 | } | ||
2398 | if ((extended_func_num(pdev) - vfs_offset) | ||
2399 | > prb_vf[i]) { | ||
2400 | mlx4_warn(dev, "Skipping virtual function:%d\n", | ||
2401 | extended_func_num(pdev)); | ||
2402 | err = -ENODEV; | ||
2403 | goto err_free_dev; | ||
2404 | } | ||
2405 | } | ||
2406 | mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); | 2290 | mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); |
2407 | dev->flags |= MLX4_FLAG_SLAVE; | 2291 | dev->flags |= MLX4_FLAG_SLAVE; |
2408 | } else { | 2292 | } else { |
@@ -2412,11 +2296,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
2412 | err = mlx4_get_ownership(dev); | 2296 | err = mlx4_get_ownership(dev); |
2413 | if (err) { | 2297 | if (err) { |
2414 | if (err < 0) | 2298 | if (err < 0) |
2415 | goto err_free_dev; | 2299 | return err; |
2416 | else { | 2300 | else { |
2417 | mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); | 2301 | mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); |
2418 | err = -EINVAL; | 2302 | return -EINVAL; |
2419 | goto err_free_dev; | ||
2420 | } | 2303 | } |
2421 | } | 2304 | } |
2422 | 2305 | ||
@@ -2428,21 +2311,28 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
2428 | GFP_KERNEL); | 2311 | GFP_KERNEL); |
2429 | if (NULL == dev->dev_vfs) { | 2312 | if (NULL == dev->dev_vfs) { |
2430 | mlx4_err(dev, "Failed to allocate memory for VFs\n"); | 2313 | mlx4_err(dev, "Failed to allocate memory for VFs\n"); |
2431 | err = 0; | 2314 | err = -ENOMEM; |
2315 | goto err_free_own; | ||
2432 | } else { | 2316 | } else { |
2433 | atomic_inc(&pf_loading); | 2317 | atomic_inc(&pf_loading); |
2434 | err = pci_enable_sriov(pdev, total_vfs); | 2318 | existing_vfs = pci_num_vf(pdev); |
2319 | if (existing_vfs) { | ||
2320 | err = 0; | ||
2321 | if (existing_vfs != total_vfs) | ||
2322 | mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", | ||
2323 | existing_vfs, total_vfs); | ||
2324 | } else { | ||
2325 | err = pci_enable_sriov(pdev, total_vfs); | ||
2326 | } | ||
2435 | if (err) { | 2327 | if (err) { |
2436 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", | 2328 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", |
2437 | err); | 2329 | err); |
2438 | atomic_dec(&pf_loading); | 2330 | atomic_dec(&pf_loading); |
2439 | err = 0; | ||
2440 | } else { | 2331 | } else { |
2441 | mlx4_warn(dev, "Running in master mode\n"); | 2332 | mlx4_warn(dev, "Running in master mode\n"); |
2442 | dev->flags |= MLX4_FLAG_SRIOV | | 2333 | dev->flags |= MLX4_FLAG_SRIOV | |
2443 | MLX4_FLAG_MASTER; | 2334 | MLX4_FLAG_MASTER; |
2444 | dev->num_vfs = total_vfs; | 2335 | dev->num_vfs = total_vfs; |
2445 | sriov_initialized = 1; | ||
2446 | } | 2336 | } |
2447 | } | 2337 | } |
2448 | } | 2338 | } |
@@ -2458,7 +2348,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
2458 | err = mlx4_reset(dev); | 2348 | err = mlx4_reset(dev); |
2459 | if (err) { | 2349 | if (err) { |
2460 | mlx4_err(dev, "Failed to reset HCA, aborting\n"); | 2350 | mlx4_err(dev, "Failed to reset HCA, aborting\n"); |
2461 | goto err_rel_own; | 2351 | goto err_sriov; |
2462 | } | 2352 | } |
2463 | } | 2353 | } |
2464 | 2354 | ||
@@ -2508,34 +2398,46 @@ slave_start: | |||
2508 | /* In master functions, the communication channel must be initialized | 2398 | /* In master functions, the communication channel must be initialized |
2509 | * after obtaining its address from fw */ | 2399 | * after obtaining its address from fw */ |
2510 | if (mlx4_is_master(dev)) { | 2400 | if (mlx4_is_master(dev)) { |
2511 | unsigned sum = 0; | 2401 | int ib_ports = 0; |
2512 | err = mlx4_multi_func_init(dev); | 2402 | |
2513 | if (err) { | 2403 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
2514 | mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); | 2404 | ib_ports++; |
2405 | |||
2406 | if (ib_ports && | ||
2407 | (num_vfs_argc > 1 || probe_vfs_argc > 1)) { | ||
2408 | mlx4_err(dev, | ||
2409 | "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); | ||
2410 | err = -EINVAL; | ||
2411 | goto err_close; | ||
2412 | } | ||
2413 | if (dev->caps.num_ports < 2 && | ||
2414 | num_vfs_argc > 1) { | ||
2415 | err = -EINVAL; | ||
2416 | mlx4_err(dev, | ||
2417 | "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", | ||
2418 | dev->caps.num_ports); | ||
2515 | goto err_close; | 2419 | goto err_close; |
2516 | } | 2420 | } |
2517 | if (sriov_initialized) { | 2421 | memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); |
2518 | int ib_ports = 0; | ||
2519 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
2520 | ib_ports++; | ||
2521 | 2422 | ||
2522 | if (ib_ports && | 2423 | for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { |
2523 | (num_vfs_argc > 1 || probe_vfs_argc > 1)) { | 2424 | unsigned j; |
2524 | mlx4_err(dev, | 2425 | |
2525 | "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); | 2426 | for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { |
2526 | err = -EINVAL; | 2427 | dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; |
2527 | goto err_master_mfunc; | 2428 | dev->dev_vfs[sum].n_ports = i < 2 ? 1 : |
2528 | } | 2429 | dev->caps.num_ports; |
2529 | for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { | ||
2530 | unsigned j; | ||
2531 | for (j = 0; j < nvfs[i]; ++sum, ++j) { | ||
2532 | dev->dev_vfs[sum].min_port = | ||
2533 | i < 2 ? i + 1 : 1; | ||
2534 | dev->dev_vfs[sum].n_ports = i < 2 ? 1 : | ||
2535 | dev->caps.num_ports; | ||
2536 | } | ||
2537 | } | 2430 | } |
2538 | } | 2431 | } |
2432 | |||
2433 | /* In master functions, the communication channel | ||
2434 | * must be initialized after obtaining its address from fw | ||
2435 | */ | ||
2436 | err = mlx4_multi_func_init(dev); | ||
2437 | if (err) { | ||
2438 | mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); | ||
2439 | goto err_close; | ||
2440 | } | ||
2539 | } | 2441 | } |
2540 | 2442 | ||
2541 | err = mlx4_alloc_eq_table(dev); | 2443 | err = mlx4_alloc_eq_table(dev); |
@@ -2556,7 +2458,7 @@ slave_start: | |||
2556 | if (!mlx4_is_slave(dev)) { | 2458 | if (!mlx4_is_slave(dev)) { |
2557 | err = mlx4_init_steering(dev); | 2459 | err = mlx4_init_steering(dev); |
2558 | if (err) | 2460 | if (err) |
2559 | goto err_free_eq; | 2461 | goto err_disable_msix; |
2560 | } | 2462 | } |
2561 | 2463 | ||
2562 | err = mlx4_setup_hca(dev); | 2464 | err = mlx4_setup_hca(dev); |
@@ -2616,6 +2518,10 @@ err_steer: | |||
2616 | if (!mlx4_is_slave(dev)) | 2518 | if (!mlx4_is_slave(dev)) |
2617 | mlx4_clear_steering(dev); | 2519 | mlx4_clear_steering(dev); |
2618 | 2520 | ||
2521 | err_disable_msix: | ||
2522 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
2523 | pci_disable_msix(pdev); | ||
2524 | |||
2619 | err_free_eq: | 2525 | err_free_eq: |
2620 | mlx4_free_eq_table(dev); | 2526 | mlx4_free_eq_table(dev); |
2621 | 2527 | ||
@@ -2632,9 +2538,6 @@ err_master_mfunc: | |||
2632 | } | 2538 | } |
2633 | 2539 | ||
2634 | err_close: | 2540 | err_close: |
2635 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
2636 | pci_disable_msix(pdev); | ||
2637 | |||
2638 | mlx4_close_hca(dev); | 2541 | mlx4_close_hca(dev); |
2639 | 2542 | ||
2640 | err_mfunc: | 2543 | err_mfunc: |
@@ -2645,20 +2548,154 @@ err_cmd: | |||
2645 | mlx4_cmd_cleanup(dev); | 2548 | mlx4_cmd_cleanup(dev); |
2646 | 2549 | ||
2647 | err_sriov: | 2550 | err_sriov: |
2648 | if (dev->flags & MLX4_FLAG_SRIOV) | 2551 | if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) |
2649 | pci_disable_sriov(pdev); | 2552 | pci_disable_sriov(pdev); |
2650 | 2553 | ||
2651 | err_rel_own: | ||
2652 | if (!mlx4_is_slave(dev)) | ||
2653 | mlx4_free_ownership(dev); | ||
2654 | |||
2655 | if (mlx4_is_master(dev) && dev->num_vfs) | 2554 | if (mlx4_is_master(dev) && dev->num_vfs) |
2656 | atomic_dec(&pf_loading); | 2555 | atomic_dec(&pf_loading); |
2657 | 2556 | ||
2658 | kfree(priv->dev.dev_vfs); | 2557 | kfree(priv->dev.dev_vfs); |
2659 | 2558 | ||
2660 | err_free_dev: | 2559 | err_free_own: |
2661 | kfree(priv); | 2560 | if (!mlx4_is_slave(dev)) |
2561 | mlx4_free_ownership(dev); | ||
2562 | |||
2563 | return err; | ||
2564 | } | ||
2565 | |||
2566 | static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, | ||
2567 | struct mlx4_priv *priv) | ||
2568 | { | ||
2569 | int err; | ||
2570 | int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; | ||
2571 | int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; | ||
2572 | const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { | ||
2573 | {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; | ||
2574 | unsigned total_vfs = 0; | ||
2575 | unsigned int i; | ||
2576 | |||
2577 | pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); | ||
2578 | |||
2579 | err = pci_enable_device(pdev); | ||
2580 | if (err) { | ||
2581 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); | ||
2582 | return err; | ||
2583 | } | ||
2584 | |||
2585 | /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS | ||
2586 | * per port, we must limit the number of VFs to 63 (since their are | ||
2587 | * 128 MACs) | ||
2588 | */ | ||
2589 | for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; | ||
2590 | total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { | ||
2591 | nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; | ||
2592 | if (nvfs[i] < 0) { | ||
2593 | dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); | ||
2594 | err = -EINVAL; | ||
2595 | goto err_disable_pdev; | ||
2596 | } | ||
2597 | } | ||
2598 | for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; | ||
2599 | i++) { | ||
2600 | prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; | ||
2601 | if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { | ||
2602 | dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); | ||
2603 | err = -EINVAL; | ||
2604 | goto err_disable_pdev; | ||
2605 | } | ||
2606 | } | ||
2607 | if (total_vfs >= MLX4_MAX_NUM_VF) { | ||
2608 | dev_err(&pdev->dev, | ||
2609 | "Requested more VF's (%d) than allowed (%d)\n", | ||
2610 | total_vfs, MLX4_MAX_NUM_VF - 1); | ||
2611 | err = -EINVAL; | ||
2612 | goto err_disable_pdev; | ||
2613 | } | ||
2614 | |||
2615 | for (i = 0; i < MLX4_MAX_PORTS; i++) { | ||
2616 | if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { | ||
2617 | dev_err(&pdev->dev, | ||
2618 | "Requested more VF's (%d) for port (%d) than allowed (%d)\n", | ||
2619 | nvfs[i] + nvfs[2], i + 1, | ||
2620 | MLX4_MAX_NUM_VF_P_PORT - 1); | ||
2621 | err = -EINVAL; | ||
2622 | goto err_disable_pdev; | ||
2623 | } | ||
2624 | } | ||
2625 | |||
2626 | /* Check for BARs. */ | ||
2627 | if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && | ||
2628 | !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
2629 | dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", | ||
2630 | pci_dev_data, pci_resource_flags(pdev, 0)); | ||
2631 | err = -ENODEV; | ||
2632 | goto err_disable_pdev; | ||
2633 | } | ||
2634 | if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { | ||
2635 | dev_err(&pdev->dev, "Missing UAR, aborting\n"); | ||
2636 | err = -ENODEV; | ||
2637 | goto err_disable_pdev; | ||
2638 | } | ||
2639 | |||
2640 | err = pci_request_regions(pdev, DRV_NAME); | ||
2641 | if (err) { | ||
2642 | dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); | ||
2643 | goto err_disable_pdev; | ||
2644 | } | ||
2645 | |||
2646 | pci_set_master(pdev); | ||
2647 | |||
2648 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
2649 | if (err) { | ||
2650 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); | ||
2651 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2652 | if (err) { | ||
2653 | dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); | ||
2654 | goto err_release_regions; | ||
2655 | } | ||
2656 | } | ||
2657 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
2658 | if (err) { | ||
2659 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); | ||
2660 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2661 | if (err) { | ||
2662 | dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); | ||
2663 | goto err_release_regions; | ||
2664 | } | ||
2665 | } | ||
2666 | |||
2667 | /* Allow large DMA segments, up to the firmware limit of 1 GB */ | ||
2668 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); | ||
2669 | /* Detect if this device is a virtual function */ | ||
2670 | if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { | ||
2671 | /* When acting as pf, we normally skip vfs unless explicitly | ||
2672 | * requested to probe them. | ||
2673 | */ | ||
2674 | if (total_vfs) { | ||
2675 | unsigned vfs_offset = 0; | ||
2676 | |||
2677 | for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && | ||
2678 | vfs_offset + nvfs[i] < extended_func_num(pdev); | ||
2679 | vfs_offset += nvfs[i], i++) | ||
2680 | ; | ||
2681 | if (i == sizeof(nvfs)/sizeof(nvfs[0])) { | ||
2682 | err = -ENODEV; | ||
2683 | goto err_release_regions; | ||
2684 | } | ||
2685 | if ((extended_func_num(pdev) - vfs_offset) | ||
2686 | > prb_vf[i]) { | ||
2687 | dev_warn(&pdev->dev, "Skipping virtual function:%d\n", | ||
2688 | extended_func_num(pdev)); | ||
2689 | err = -ENODEV; | ||
2690 | goto err_release_regions; | ||
2691 | } | ||
2692 | } | ||
2693 | } | ||
2694 | |||
2695 | err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); | ||
2696 | if (err) | ||
2697 | goto err_release_regions; | ||
2698 | return 0; | ||
2662 | 2699 | ||
2663 | err_release_regions: | 2700 | err_release_regions: |
2664 | pci_release_regions(pdev); | 2701 | pci_release_regions(pdev); |
@@ -2673,6 +2710,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2673 | { | 2710 | { |
2674 | struct mlx4_priv *priv; | 2711 | struct mlx4_priv *priv; |
2675 | struct mlx4_dev *dev; | 2712 | struct mlx4_dev *dev; |
2713 | int ret; | ||
2676 | 2714 | ||
2677 | printk_once(KERN_INFO "%s", mlx4_version); | 2715 | printk_once(KERN_INFO "%s", mlx4_version); |
2678 | 2716 | ||
@@ -2681,28 +2719,38 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2681 | return -ENOMEM; | 2719 | return -ENOMEM; |
2682 | 2720 | ||
2683 | dev = &priv->dev; | 2721 | dev = &priv->dev; |
2722 | dev->pdev = pdev; | ||
2684 | pci_set_drvdata(pdev, dev); | 2723 | pci_set_drvdata(pdev, dev); |
2685 | priv->pci_dev_data = id->driver_data; | 2724 | priv->pci_dev_data = id->driver_data; |
2686 | 2725 | ||
2687 | return __mlx4_init_one(pdev, id->driver_data); | 2726 | ret = __mlx4_init_one(pdev, id->driver_data, priv); |
2727 | if (ret) | ||
2728 | kfree(priv); | ||
2729 | |||
2730 | return ret; | ||
2688 | } | 2731 | } |
2689 | 2732 | ||
2690 | static void __mlx4_remove_one(struct pci_dev *pdev) | 2733 | static void mlx4_unload_one(struct pci_dev *pdev) |
2691 | { | 2734 | { |
2692 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 2735 | struct mlx4_dev *dev = pci_get_drvdata(pdev); |
2693 | struct mlx4_priv *priv = mlx4_priv(dev); | 2736 | struct mlx4_priv *priv = mlx4_priv(dev); |
2694 | int pci_dev_data; | 2737 | int pci_dev_data; |
2695 | int p; | 2738 | int p; |
2739 | int active_vfs = 0; | ||
2696 | 2740 | ||
2697 | if (priv->removed) | 2741 | if (priv->removed) |
2698 | return; | 2742 | return; |
2699 | 2743 | ||
2700 | pci_dev_data = priv->pci_dev_data; | 2744 | pci_dev_data = priv->pci_dev_data; |
2701 | 2745 | ||
2702 | /* in SRIOV it is not allowed to unload the pf's | 2746 | /* Disabling SR-IOV is not allowed while there are active vf's */ |
2703 | * driver while there are alive vf's */ | 2747 | if (mlx4_is_master(dev)) { |
2704 | if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) | 2748 | active_vfs = mlx4_how_many_lives_vf(dev); |
2705 | pr_warn("Removing PF when there are assigned VF's !!!\n"); | 2749 | if (active_vfs) { |
2750 | pr_warn("Removing PF when there are active VF's !!\n"); | ||
2751 | pr_warn("Will not disable SR-IOV.\n"); | ||
2752 | } | ||
2753 | } | ||
2706 | mlx4_stop_sense(dev); | 2754 | mlx4_stop_sense(dev); |
2707 | mlx4_unregister_device(dev); | 2755 | mlx4_unregister_device(dev); |
2708 | 2756 | ||
@@ -2745,7 +2793,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) | |||
2745 | 2793 | ||
2746 | if (dev->flags & MLX4_FLAG_MSI_X) | 2794 | if (dev->flags & MLX4_FLAG_MSI_X) |
2747 | pci_disable_msix(pdev); | 2795 | pci_disable_msix(pdev); |
2748 | if (dev->flags & MLX4_FLAG_SRIOV) { | 2796 | if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { |
2749 | mlx4_warn(dev, "Disabling SR-IOV\n"); | 2797 | mlx4_warn(dev, "Disabling SR-IOV\n"); |
2750 | pci_disable_sriov(pdev); | 2798 | pci_disable_sriov(pdev); |
2751 | dev->num_vfs = 0; | 2799 | dev->num_vfs = 0; |
@@ -2761,8 +2809,6 @@ static void __mlx4_remove_one(struct pci_dev *pdev) | |||
2761 | kfree(dev->caps.qp1_proxy); | 2809 | kfree(dev->caps.qp1_proxy); |
2762 | kfree(dev->dev_vfs); | 2810 | kfree(dev->dev_vfs); |
2763 | 2811 | ||
2764 | pci_release_regions(pdev); | ||
2765 | pci_disable_device(pdev); | ||
2766 | memset(priv, 0, sizeof(*priv)); | 2812 | memset(priv, 0, sizeof(*priv)); |
2767 | priv->pci_dev_data = pci_dev_data; | 2813 | priv->pci_dev_data = pci_dev_data; |
2768 | priv->removed = 1; | 2814 | priv->removed = 1; |
@@ -2773,7 +2819,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
2773 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 2819 | struct mlx4_dev *dev = pci_get_drvdata(pdev); |
2774 | struct mlx4_priv *priv = mlx4_priv(dev); | 2820 | struct mlx4_priv *priv = mlx4_priv(dev); |
2775 | 2821 | ||
2776 | __mlx4_remove_one(pdev); | 2822 | mlx4_unload_one(pdev); |
2823 | pci_release_regions(pdev); | ||
2824 | pci_disable_device(pdev); | ||
2777 | kfree(priv); | 2825 | kfree(priv); |
2778 | pci_set_drvdata(pdev, NULL); | 2826 | pci_set_drvdata(pdev, NULL); |
2779 | } | 2827 | } |
@@ -2782,11 +2830,22 @@ int mlx4_restart_one(struct pci_dev *pdev) | |||
2782 | { | 2830 | { |
2783 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 2831 | struct mlx4_dev *dev = pci_get_drvdata(pdev); |
2784 | struct mlx4_priv *priv = mlx4_priv(dev); | 2832 | struct mlx4_priv *priv = mlx4_priv(dev); |
2785 | int pci_dev_data; | 2833 | int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; |
2834 | int pci_dev_data, err, total_vfs; | ||
2786 | 2835 | ||
2787 | pci_dev_data = priv->pci_dev_data; | 2836 | pci_dev_data = priv->pci_dev_data; |
2788 | __mlx4_remove_one(pdev); | 2837 | total_vfs = dev->num_vfs; |
2789 | return __mlx4_init_one(pdev, pci_dev_data); | 2838 | memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); |
2839 | |||
2840 | mlx4_unload_one(pdev); | ||
2841 | err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); | ||
2842 | if (err) { | ||
2843 | mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", | ||
2844 | __func__, pci_name(pdev), err); | ||
2845 | return err; | ||
2846 | } | ||
2847 | |||
2848 | return err; | ||
2790 | } | 2849 | } |
2791 | 2850 | ||
2792 | static const struct pci_device_id mlx4_pci_table[] = { | 2851 | static const struct pci_device_id mlx4_pci_table[] = { |
@@ -2840,7 +2899,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table); | |||
2840 | static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, | 2899 | static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, |
2841 | pci_channel_state_t state) | 2900 | pci_channel_state_t state) |
2842 | { | 2901 | { |
2843 | __mlx4_remove_one(pdev); | 2902 | mlx4_unload_one(pdev); |
2844 | 2903 | ||
2845 | return state == pci_channel_io_perm_failure ? | 2904 | return state == pci_channel_io_perm_failure ? |
2846 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | 2905 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
@@ -2852,7 +2911,7 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) | |||
2852 | struct mlx4_priv *priv = mlx4_priv(dev); | 2911 | struct mlx4_priv *priv = mlx4_priv(dev); |
2853 | int ret; | 2912 | int ret; |
2854 | 2913 | ||
2855 | ret = __mlx4_init_one(pdev, priv->pci_dev_data); | 2914 | ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); |
2856 | 2915 | ||
2857 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | 2916 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; |
2858 | } | 2917 | } |
@@ -2866,7 +2925,7 @@ static struct pci_driver mlx4_driver = { | |||
2866 | .name = DRV_NAME, | 2925 | .name = DRV_NAME, |
2867 | .id_table = mlx4_pci_table, | 2926 | .id_table = mlx4_pci_table, |
2868 | .probe = mlx4_init_one, | 2927 | .probe = mlx4_init_one, |
2869 | .shutdown = __mlx4_remove_one, | 2928 | .shutdown = mlx4_unload_one, |
2870 | .remove = mlx4_remove_one, | 2929 | .remove = mlx4_remove_one, |
2871 | .err_handler = &mlx4_err_handler, | 2930 | .err_handler = &mlx4_err_handler, |
2872 | }; | 2931 | }; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 03b5608a4329..b2f8ab9a57c4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -707,6 +707,7 @@ struct mlx4_dev { | |||
707 | u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; | 707 | u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; |
708 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; | 708 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; |
709 | struct mlx4_vf_dev *dev_vfs; | 709 | struct mlx4_vf_dev *dev_vfs; |
710 | int nvfs[MLX4_MAX_PORTS + 1]; | ||
710 | }; | 711 | }; |
711 | 712 | ||
712 | struct mlx4_eqe { | 713 | struct mlx4_eqe { |