aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..8be6db816460 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
254 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 254 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
255 255
256 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 256 if (!mlx4_is_slave(dev->dev))
257 if (err) 257 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
258 goto out;
259 258
260 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 259 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
261 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
262 resp.response_length += sizeof(resp.hca_core_clock_offset); 260 resp.response_length += sizeof(resp.hca_core_clock_offset);
263 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 261 if (!err && !mlx4_is_slave(dev->dev)) {
262 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
263 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
264 }
264 } 265 }
265 266
266 if (uhw->outlen) { 267 if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2669 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 2670 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2670 if (!dm) { 2671 if (!dm) {
2671 pr_err("failed to allocate memory for tunneling qp update\n"); 2672 pr_err("failed to allocate memory for tunneling qp update\n");
2672 goto out; 2673 return;
2673 } 2674 }
2674 2675
2675 for (i = 0; i < ports; i++) { 2676 for (i = 0; i < ports; i++) {
2676 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2677 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2677 if (!dm[i]) { 2678 if (!dm[i]) {
2678 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2679 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2679 for (i = 0; i < dev->caps.num_ports; i++) { 2680 while (--i >= 0)
2680 if (dm[i]) 2681 kfree(dm[i]);
2681 kfree(dm[i]);
2682 }
2683 goto out; 2682 goto out;
2684 } 2683 }
2685 }
2686 /* initialize or tear down tunnel QPs for the slave */
2687 for (i = 0; i < ports; i++) {
2688 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2684 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2689 dm[i]->port = first_port + i + 1; 2685 dm[i]->port = first_port + i + 1;
2690 dm[i]->slave = slave; 2686 dm[i]->slave = slave;
2691 dm[i]->do_init = do_init; 2687 dm[i]->do_init = do_init;
2692 dm[i]->dev = ibdev; 2688 dm[i]->dev = ibdev;
2693 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 2689 }
2694 if (!ibdev->sriov.is_going_down) 2690 /* initialize or tear down tunnel QPs for the slave */
2691 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2692 if (!ibdev->sriov.is_going_down) {
2693 for (i = 0; i < ports; i++)
2695 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 2694 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2696 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2695 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2696 } else {
2697 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2698 for (i = 0; i < ports; i++)
2699 kfree(dm[i]);
2697 } 2700 }
2698out: 2701out:
2699 kfree(dm); 2702 kfree(dm);