diff options
author | Yishai Hadas <yishaih@mellanox.com> | 2015-01-25 09:59:35 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-25 17:43:13 -0500 |
commit | 872bf2fb69d90e3619befee842fc26db39d8e475 (patch) | |
tree | cd57553d6b75b48bf75a9aa6c4358fbed88e7b4a | |
parent | 7aee42c6764bae75d0eb2f674f0874193de90c05 (diff) |
net/mlx4_core: Maintain a persistent memory for mlx4 device
Maintain a persistent memory that should survive reset flow/PCI error.
This comes as a preparation for coming series to support above flows.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
24 files changed, 234 insertions, 163 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 0eb141c41416..a31e031afd87 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c | |||
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, | |||
154 | continue; | 154 | continue; |
155 | 155 | ||
156 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; | 156 | slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; |
157 | if (slave_id >= dev->dev->num_vfs + 1) | 157 | if (slave_id >= dev->dev->persist->num_vfs + 1) |
158 | return; | 158 | return; |
159 | tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; | 159 | tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; |
160 | form_cache_ag = get_cached_alias_guid(dev, port_num, | 160 | form_cache_ag = get_cached_alias_guid(dev, port_num, |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 82a7dd87089b..c7619716c31d 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, | |||
1951 | ctx->ib_dev = &dev->ib_dev; | 1951 | ctx->ib_dev = &dev->ib_dev; |
1952 | 1952 | ||
1953 | for (i = 0; | 1953 | for (i = 0; |
1954 | i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); | 1954 | i < min(dev->dev->caps.sqp_demux, |
1955 | (u16)(dev->dev->persist->num_vfs + 1)); | ||
1955 | i++) { | 1956 | i++) { |
1956 | struct mlx4_active_ports actv_ports = | 1957 | struct mlx4_active_ports actv_ports = |
1957 | mlx4_get_active_ports(dev->dev, i); | 1958 | mlx4_get_active_ports(dev->dev, i); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57ecc5b204f3..b4fa6f658800 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
198 | 198 | ||
199 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 199 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
200 | 0xffffff; | 200 | 0xffffff; |
201 | props->vendor_part_id = dev->dev->pdev->device; | 201 | props->vendor_part_id = dev->dev->persist->pdev->device; |
202 | props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); | 202 | props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); |
203 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); | 203 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); |
204 | 204 | ||
@@ -1375,7 +1375,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, | |||
1375 | { | 1375 | { |
1376 | struct mlx4_ib_dev *dev = | 1376 | struct mlx4_ib_dev *dev = |
1377 | container_of(device, struct mlx4_ib_dev, ib_dev.dev); | 1377 | container_of(device, struct mlx4_ib_dev, ib_dev.dev); |
1378 | return sprintf(buf, "MT%d\n", dev->dev->pdev->device); | 1378 | return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | 1381 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, |
@@ -1937,7 +1937,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) | |||
1937 | int i; | 1937 | int i; |
1938 | 1938 | ||
1939 | if (mlx4_is_master(ibdev->dev)) { | 1939 | if (mlx4_is_master(ibdev->dev)) { |
1940 | for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { | 1940 | for (slave = 0; slave <= ibdev->dev->persist->num_vfs; |
1941 | ++slave) { | ||
1941 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { | 1942 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { |
1942 | for (i = 0; | 1943 | for (i = 0; |
1943 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; | 1944 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; |
@@ -1994,7 +1995,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
1994 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { | 1995 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { |
1995 | for (j = 0; j < eq_per_port; j++) { | 1996 | for (j = 0; j < eq_per_port; j++) { |
1996 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", | 1997 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", |
1997 | i, j, dev->pdev->bus->name); | 1998 | i, j, dev->persist->pdev->bus->name); |
1998 | /* Set IRQ for specific name (per ring) */ | 1999 | /* Set IRQ for specific name (per ring) */ |
1999 | if (mlx4_assign_eq(dev, name, NULL, | 2000 | if (mlx4_assign_eq(dev, name, NULL, |
2000 | &ibdev->eq_table[eq])) { | 2001 | &ibdev->eq_table[eq])) { |
@@ -2058,7 +2059,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2058 | 2059 | ||
2059 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); | 2060 | ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); |
2060 | if (!ibdev) { | 2061 | if (!ibdev) { |
2061 | dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); | 2062 | dev_err(&dev->persist->pdev->dev, |
2063 | "Device struct alloc failed\n"); | ||
2062 | return NULL; | 2064 | return NULL; |
2063 | } | 2065 | } |
2064 | 2066 | ||
@@ -2085,7 +2087,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2085 | ibdev->num_ports = num_ports; | 2087 | ibdev->num_ports = num_ports; |
2086 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | 2088 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; |
2087 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 2089 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
2088 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 2090 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; |
2089 | 2091 | ||
2090 | if (dev->caps.userspace_caps) | 2092 | if (dev->caps.userspace_caps) |
2091 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; | 2093 | ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; |
@@ -2236,7 +2238,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2236 | sizeof(long), | 2238 | sizeof(long), |
2237 | GFP_KERNEL); | 2239 | GFP_KERNEL); |
2238 | if (!ibdev->ib_uc_qpns_bitmap) { | 2240 | if (!ibdev->ib_uc_qpns_bitmap) { |
2239 | dev_err(&dev->pdev->dev, "bit map alloc failed\n"); | 2241 | dev_err(&dev->persist->pdev->dev, |
2242 | "bit map alloc failed\n"); | ||
2240 | goto err_steer_qp_release; | 2243 | goto err_steer_qp_release; |
2241 | } | 2244 | } |
2242 | 2245 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index c36ccbd9a644..e0d271782d0a 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device | |||
401 | if (!mfrpl->ibfrpl.page_list) | 401 | if (!mfrpl->ibfrpl.page_list) |
402 | goto err_free; | 402 | goto err_free; |
403 | 403 | ||
404 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, | 404 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> |
405 | pdev->dev, | ||
405 | size, &mfrpl->map, | 406 | size, &mfrpl->map, |
406 | GFP_KERNEL); | 407 | GFP_KERNEL); |
407 | if (!mfrpl->mapped_page_list) | 408 | if (!mfrpl->mapped_page_list) |
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) | |||
423 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); | 424 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); |
424 | int size = page_list->max_page_list_len * sizeof (u64); | 425 | int size = page_list->max_page_list_len * sizeof (u64); |
425 | 426 | ||
426 | dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, | 427 | dma_free_coherent(&dev->dev->persist->pdev->dev, size, |
428 | mfrpl->mapped_page_list, | ||
427 | mfrpl->map); | 429 | mfrpl->map); |
428 | kfree(mfrpl->ibfrpl.page_list); | 430 | kfree(mfrpl->ibfrpl.page_list); |
429 | kfree(mfrpl); | 431 | kfree(mfrpl); |
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index cb4c66e723b5..d10c2b8a5dad 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c | |||
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) | |||
375 | char base_name[9]; | 375 | char base_name[9]; |
376 | 376 | ||
377 | /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ | 377 | /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ |
378 | strlcpy(name, pci_name(dev->dev->pdev), max); | 378 | strlcpy(name, pci_name(dev->dev->persist->pdev), max); |
379 | strncpy(base_name, name, 8); /*till xxxx:yy:*/ | 379 | strncpy(base_name, name, 8); /*till xxxx:yy:*/ |
380 | base_name[8] = '\0'; | 380 | base_name[8] = '\0'; |
381 | /* with no ARI only 3 last bits are used so when the fn is higher than 8 | 381 | /* with no ARI only 3 last bits are used so when the fn is higher than 8 |
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device) | |||
792 | if (!mlx4_is_master(device->dev)) | 792 | if (!mlx4_is_master(device->dev)) |
793 | return 0; | 793 | return 0; |
794 | 794 | ||
795 | for (i = 0; i <= device->dev->num_vfs; ++i) | 795 | for (i = 0; i <= device->dev->persist->num_vfs; ++i) |
796 | register_one_pkey_tree(device, i); | 796 | register_one_pkey_tree(device, i); |
797 | 797 | ||
798 | return 0; | 798 | return 0; |
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) | |||
807 | if (!mlx4_is_master(device->dev)) | 807 | if (!mlx4_is_master(device->dev)) |
808 | return; | 808 | return; |
809 | 809 | ||
810 | for (slave = device->dev->num_vfs; slave >= 0; --slave) { | 810 | for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { |
811 | list_for_each_entry_safe(p, t, | 811 | list_for_each_entry_safe(p, t, |
812 | &device->pkeys.pkey_port_list[slave], | 812 | &device->pkeys.pkey_port_list[slave], |
813 | entry) { | 813 | entry) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 963dd7e6d547..a716c26e0d99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c | |||
@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
592 | buf->nbufs = 1; | 592 | buf->nbufs = 1; |
593 | buf->npages = 1; | 593 | buf->npages = 1; |
594 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 594 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
595 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, | 595 | buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, |
596 | size, &t, gfp); | 596 | size, &t, gfp); |
597 | if (!buf->direct.buf) | 597 | if (!buf->direct.buf) |
598 | return -ENOMEM; | 598 | return -ENOMEM; |
@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
619 | 619 | ||
620 | for (i = 0; i < buf->nbufs; ++i) { | 620 | for (i = 0; i < buf->nbufs; ++i) { |
621 | buf->page_list[i].buf = | 621 | buf->page_list[i].buf = |
622 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | 622 | dma_alloc_coherent(&dev->persist->pdev->dev, |
623 | PAGE_SIZE, | ||
623 | &t, gfp); | 624 | &t, gfp); |
624 | if (!buf->page_list[i].buf) | 625 | if (!buf->page_list[i].buf) |
625 | goto err_free; | 626 | goto err_free; |
@@ -657,7 +658,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
657 | int i; | 658 | int i; |
658 | 659 | ||
659 | if (buf->nbufs == 1) | 660 | if (buf->nbufs == 1) |
660 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, | 661 | dma_free_coherent(&dev->persist->pdev->dev, size, |
662 | buf->direct.buf, | ||
661 | buf->direct.map); | 663 | buf->direct.map); |
662 | else { | 664 | else { |
663 | if (BITS_PER_LONG == 64 && buf->direct.buf) | 665 | if (BITS_PER_LONG == 64 && buf->direct.buf) |
@@ -665,7 +667,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
665 | 667 | ||
666 | for (i = 0; i < buf->nbufs; ++i) | 668 | for (i = 0; i < buf->nbufs; ++i) |
667 | if (buf->page_list[i].buf) | 669 | if (buf->page_list[i].buf) |
668 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 670 | dma_free_coherent(&dev->persist->pdev->dev, |
671 | PAGE_SIZE, | ||
669 | buf->page_list[i].buf, | 672 | buf->page_list[i].buf, |
670 | buf->page_list[i].map); | 673 | buf->page_list[i].map); |
671 | kfree(buf->page_list); | 674 | kfree(buf->page_list); |
@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp | |||
738 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | 741 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) |
739 | goto out; | 742 | goto out; |
740 | 743 | ||
741 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); | 744 | pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); |
742 | if (!pgdir) { | 745 | if (!pgdir) { |
743 | ret = -ENOMEM; | 746 | ret = -ENOMEM; |
744 | goto out; | 747 | goto out; |
@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | |||
775 | set_bit(i, db->u.pgdir->bits[o]); | 778 | set_bit(i, db->u.pgdir->bits[o]); |
776 | 779 | ||
777 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | 780 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { |
778 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | 781 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
779 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | 782 | db->u.pgdir->db_page, db->u.pgdir->db_dma); |
780 | list_del(&db->u.pgdir->list); | 783 | list_del(&db->u.pgdir->list); |
781 | kfree(db->u.pgdir); | 784 | kfree(db->u.pgdir); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 9c656fe4983d..1a102c9bac99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c | |||
@@ -70,7 +70,7 @@ static void poll_catas(unsigned long dev_ptr) | |||
70 | 70 | ||
71 | if (readl(priv->catas_err.map)) { | 71 | if (readl(priv->catas_err.map)) { |
72 | /* If the device is off-line, we cannot try to recover it */ | 72 | /* If the device is off-line, we cannot try to recover it */ |
73 | if (pci_channel_offline(dev->pdev)) | 73 | if (pci_channel_offline(dev->persist->pdev)) |
74 | mod_timer(&priv->catas_err.timer, | 74 | mod_timer(&priv->catas_err.timer, |
75 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); | 75 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); |
76 | else { | 76 | else { |
@@ -94,6 +94,7 @@ static void catas_reset(struct work_struct *work) | |||
94 | { | 94 | { |
95 | struct mlx4_priv *priv, *tmppriv; | 95 | struct mlx4_priv *priv, *tmppriv; |
96 | struct mlx4_dev *dev; | 96 | struct mlx4_dev *dev; |
97 | struct mlx4_dev_persistent *persist; | ||
97 | 98 | ||
98 | LIST_HEAD(tlist); | 99 | LIST_HEAD(tlist); |
99 | int ret; | 100 | int ret; |
@@ -103,20 +104,20 @@ static void catas_reset(struct work_struct *work) | |||
103 | spin_unlock_irq(&catas_lock); | 104 | spin_unlock_irq(&catas_lock); |
104 | 105 | ||
105 | list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { | 106 | list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { |
106 | struct pci_dev *pdev = priv->dev.pdev; | 107 | struct pci_dev *pdev = priv->dev.persist->pdev; |
107 | 108 | ||
108 | /* If the device is off-line, we cannot reset it */ | 109 | /* If the device is off-line, we cannot reset it */ |
109 | if (pci_channel_offline(pdev)) | 110 | if (pci_channel_offline(pdev)) |
110 | continue; | 111 | continue; |
111 | 112 | ||
112 | ret = mlx4_restart_one(priv->dev.pdev); | 113 | ret = mlx4_restart_one(priv->dev.persist->pdev); |
113 | /* 'priv' now is not valid */ | 114 | /* 'priv' now is not valid */ |
114 | if (ret) | 115 | if (ret) |
115 | pr_err("mlx4 %s: Reset failed (%d)\n", | 116 | pr_err("mlx4 %s: Reset failed (%d)\n", |
116 | pci_name(pdev), ret); | 117 | pci_name(pdev), ret); |
117 | else { | 118 | else { |
118 | dev = pci_get_drvdata(pdev); | 119 | persist = pci_get_drvdata(pdev); |
119 | mlx4_dbg(dev, "Reset succeeded\n"); | 120 | mlx4_dbg(persist->dev, "Reset succeeded\n"); |
120 | } | 121 | } |
121 | } | 122 | } |
122 | } | 123 | } |
@@ -134,7 +135,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) | |||
134 | init_timer(&priv->catas_err.timer); | 135 | init_timer(&priv->catas_err.timer); |
135 | priv->catas_err.map = NULL; | 136 | priv->catas_err.map = NULL; |
136 | 137 | ||
137 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + | 138 | addr = pci_resource_start(dev->persist->pdev, priv->fw.catas_bar) + |
138 | priv->fw.catas_offset; | 139 | priv->fw.catas_offset; |
139 | 140 | ||
140 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); | 141 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 5c93d1451c44..7cd90e6a4272 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -307,7 +307,7 @@ static int cmd_pending(struct mlx4_dev *dev) | |||
307 | { | 307 | { |
308 | u32 status; | 308 | u32 status; |
309 | 309 | ||
310 | if (pci_channel_offline(dev->pdev)) | 310 | if (pci_channel_offline(dev->persist->pdev)) |
311 | return -EIO; | 311 | return -EIO; |
312 | 312 | ||
313 | status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); | 313 | status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); |
@@ -328,7 +328,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, | |||
328 | 328 | ||
329 | mutex_lock(&cmd->hcr_mutex); | 329 | mutex_lock(&cmd->hcr_mutex); |
330 | 330 | ||
331 | if (pci_channel_offline(dev->pdev)) { | 331 | if (pci_channel_offline(dev->persist->pdev)) { |
332 | /* | 332 | /* |
333 | * Device is going through error recovery | 333 | * Device is going through error recovery |
334 | * and cannot accept commands. | 334 | * and cannot accept commands. |
@@ -342,7 +342,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, | |||
342 | end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); | 342 | end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); |
343 | 343 | ||
344 | while (cmd_pending(dev)) { | 344 | while (cmd_pending(dev)) { |
345 | if (pci_channel_offline(dev->pdev)) { | 345 | if (pci_channel_offline(dev->persist->pdev)) { |
346 | /* | 346 | /* |
347 | * Device is going through error recovery | 347 | * Device is going through error recovery |
348 | * and cannot accept commands. | 348 | * and cannot accept commands. |
@@ -464,7 +464,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
464 | 464 | ||
465 | down(&priv->cmd.poll_sem); | 465 | down(&priv->cmd.poll_sem); |
466 | 466 | ||
467 | if (pci_channel_offline(dev->pdev)) { | 467 | if (pci_channel_offline(dev->persist->pdev)) { |
468 | /* | 468 | /* |
469 | * Device is going through error recovery | 469 | * Device is going through error recovery |
470 | * and cannot accept commands. | 470 | * and cannot accept commands. |
@@ -487,7 +487,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
487 | 487 | ||
488 | end = msecs_to_jiffies(timeout) + jiffies; | 488 | end = msecs_to_jiffies(timeout) + jiffies; |
489 | while (cmd_pending(dev) && time_before(jiffies, end)) { | 489 | while (cmd_pending(dev) && time_before(jiffies, end)) { |
490 | if (pci_channel_offline(dev->pdev)) { | 490 | if (pci_channel_offline(dev->persist->pdev)) { |
491 | /* | 491 | /* |
492 | * Device is going through error recovery | 492 | * Device is going through error recovery |
493 | * and cannot accept commands. | 493 | * and cannot accept commands. |
@@ -612,7 +612,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
612 | int out_is_imm, u32 in_modifier, u8 op_modifier, | 612 | int out_is_imm, u32 in_modifier, u8 op_modifier, |
613 | u16 op, unsigned long timeout, int native) | 613 | u16 op, unsigned long timeout, int native) |
614 | { | 614 | { |
615 | if (pci_channel_offline(dev->pdev)) | 615 | if (pci_channel_offline(dev->persist->pdev)) |
616 | return -EIO; | 616 | return -EIO; |
617 | 617 | ||
618 | if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { | 618 | if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { |
@@ -1997,11 +1997,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) | |||
1997 | 1997 | ||
1998 | if (mlx4_is_master(dev)) | 1998 | if (mlx4_is_master(dev)) |
1999 | priv->mfunc.comm = | 1999 | priv->mfunc.comm = |
2000 | ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + | 2000 | ioremap(pci_resource_start(dev->persist->pdev, |
2001 | priv->fw.comm_bar) + | ||
2001 | priv->fw.comm_base, MLX4_COMM_PAGESIZE); | 2002 | priv->fw.comm_base, MLX4_COMM_PAGESIZE); |
2002 | else | 2003 | else |
2003 | priv->mfunc.comm = | 2004 | priv->mfunc.comm = |
2004 | ioremap(pci_resource_start(dev->pdev, 2) + | 2005 | ioremap(pci_resource_start(dev->persist->pdev, 2) + |
2005 | MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); | 2006 | MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); |
2006 | if (!priv->mfunc.comm) { | 2007 | if (!priv->mfunc.comm) { |
2007 | mlx4_err(dev, "Couldn't map communication vector\n"); | 2008 | mlx4_err(dev, "Couldn't map communication vector\n"); |
@@ -2107,9 +2108,9 @@ err_comm_admin: | |||
2107 | err_comm: | 2108 | err_comm: |
2108 | iounmap(priv->mfunc.comm); | 2109 | iounmap(priv->mfunc.comm); |
2109 | err_vhcr: | 2110 | err_vhcr: |
2110 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | 2111 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
2111 | priv->mfunc.vhcr, | 2112 | priv->mfunc.vhcr, |
2112 | priv->mfunc.vhcr_dma); | 2113 | priv->mfunc.vhcr_dma); |
2113 | priv->mfunc.vhcr = NULL; | 2114 | priv->mfunc.vhcr = NULL; |
2114 | return -ENOMEM; | 2115 | return -ENOMEM; |
2115 | } | 2116 | } |
@@ -2130,8 +2131,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) | |||
2130 | } | 2131 | } |
2131 | 2132 | ||
2132 | if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { | 2133 | if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { |
2133 | priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + | 2134 | priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, |
2134 | MLX4_HCR_BASE, MLX4_HCR_SIZE); | 2135 | 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); |
2135 | if (!priv->cmd.hcr) { | 2136 | if (!priv->cmd.hcr) { |
2136 | mlx4_err(dev, "Couldn't map command register\n"); | 2137 | mlx4_err(dev, "Couldn't map command register\n"); |
2137 | goto err; | 2138 | goto err; |
@@ -2140,7 +2141,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) | |||
2140 | } | 2141 | } |
2141 | 2142 | ||
2142 | if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { | 2143 | if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { |
2143 | priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, | 2144 | priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, |
2145 | PAGE_SIZE, | ||
2144 | &priv->mfunc.vhcr_dma, | 2146 | &priv->mfunc.vhcr_dma, |
2145 | GFP_KERNEL); | 2147 | GFP_KERNEL); |
2146 | if (!priv->mfunc.vhcr) | 2148 | if (!priv->mfunc.vhcr) |
@@ -2150,7 +2152,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) | |||
2150 | } | 2152 | } |
2151 | 2153 | ||
2152 | if (!priv->cmd.pool) { | 2154 | if (!priv->cmd.pool) { |
2153 | priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, | 2155 | priv->cmd.pool = pci_pool_create("mlx4_cmd", |
2156 | dev->persist->pdev, | ||
2154 | MLX4_MAILBOX_SIZE, | 2157 | MLX4_MAILBOX_SIZE, |
2155 | MLX4_MAILBOX_SIZE, 0); | 2158 | MLX4_MAILBOX_SIZE, 0); |
2156 | if (!priv->cmd.pool) | 2159 | if (!priv->cmd.pool) |
@@ -2202,7 +2205,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) | |||
2202 | } | 2205 | } |
2203 | if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && | 2206 | if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && |
2204 | (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { | 2207 | (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { |
2205 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | 2208 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
2206 | priv->mfunc.vhcr, priv->mfunc.vhcr_dma); | 2209 | priv->mfunc.vhcr, priv->mfunc.vhcr_dma); |
2207 | priv->mfunc.vhcr = NULL; | 2210 | priv->mfunc.vhcr = NULL; |
2208 | } | 2211 | } |
@@ -2306,8 +2309,9 @@ u32 mlx4_comm_get_version(void) | |||
2306 | 2309 | ||
2307 | static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) | 2310 | static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) |
2308 | { | 2311 | { |
2309 | if ((vf < 0) || (vf >= dev->num_vfs)) { | 2312 | if ((vf < 0) || (vf >= dev->persist->num_vfs)) { |
2310 | mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); | 2313 | mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", |
2314 | vf, dev->persist->num_vfs); | ||
2311 | return -EINVAL; | 2315 | return -EINVAL; |
2312 | } | 2316 | } |
2313 | 2317 | ||
@@ -2316,7 +2320,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) | |||
2316 | 2320 | ||
2317 | int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) | 2321 | int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) |
2318 | { | 2322 | { |
2319 | if (slave < 1 || slave > dev->num_vfs) { | 2323 | if (slave < 1 || slave > dev->persist->num_vfs) { |
2320 | mlx4_err(dev, | 2324 | mlx4_err(dev, |
2321 | "Bad slave number:%d (number of activated slaves: %lu)\n", | 2325 | "Bad slave number:%d (number of activated slaves: %lu)\n", |
2322 | slave, dev->num_slaves); | 2326 | slave, dev->num_slaves); |
@@ -2388,7 +2392,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, | |||
2388 | if (port <= 0 || port > dev->caps.num_ports) | 2392 | if (port <= 0 || port > dev->caps.num_ports) |
2389 | return slaves_pport; | 2393 | return slaves_pport; |
2390 | 2394 | ||
2391 | for (i = 0; i < dev->num_vfs + 1; i++) { | 2395 | for (i = 0; i < dev->persist->num_vfs + 1; i++) { |
2392 | struct mlx4_active_ports actv_ports = | 2396 | struct mlx4_active_ports actv_ports = |
2393 | mlx4_get_active_ports(dev, i); | 2397 | mlx4_get_active_ports(dev, i); |
2394 | if (test_bit(port - 1, actv_ports.ports)) | 2398 | if (test_bit(port - 1, actv_ports.ports)) |
@@ -2408,7 +2412,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( | |||
2408 | 2412 | ||
2409 | bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); | 2413 | bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); |
2410 | 2414 | ||
2411 | for (i = 0; i < dev->num_vfs + 1; i++) { | 2415 | for (i = 0; i < dev->persist->num_vfs + 1; i++) { |
2412 | struct mlx4_active_ports actv_ports = | 2416 | struct mlx4_active_ports actv_ports = |
2413 | mlx4_get_active_ports(dev, i); | 2417 | mlx4_get_active_ports(dev, i); |
2414 | if (bitmap_equal(crit_ports->ports, actv_ports.ports, | 2418 | if (bitmap_equal(crit_ports->ports, actv_ports.ports, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 82322b1c8411..22da4d0d0f05 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c | |||
@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
70 | /* Allocate HW buffers on provided NUMA node. | 70 | /* Allocate HW buffers on provided NUMA node. |
71 | * dev->numa_node is used in mtt range allocation flow. | 71 | * dev->numa_node is used in mtt range allocation flow. |
72 | */ | 72 | */ |
73 | set_dev_node(&mdev->dev->pdev->dev, node); | 73 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
74 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, | 74 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, |
75 | cq->buf_size, 2 * PAGE_SIZE); | 75 | cq->buf_size, 2 * PAGE_SIZE); |
76 | set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); | 76 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
77 | if (err) | 77 | if (err) |
78 | goto err_cq; | 78 | goto err_cq; |
79 | 79 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 90e0f045a6bc..569eda9e83d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | |||
92 | (u16) (mdev->dev->caps.fw_ver >> 32), | 92 | (u16) (mdev->dev->caps.fw_ver >> 32), |
93 | (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), | 93 | (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), |
94 | (u16) (mdev->dev->caps.fw_ver & 0xffff)); | 94 | (u16) (mdev->dev->caps.fw_ver & 0xffff)); |
95 | strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), | 95 | strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), |
96 | sizeof(drvinfo->bus_info)); | 96 | sizeof(drvinfo->bus_info)); |
97 | drvinfo->n_stats = 0; | 97 | drvinfo->n_stats = 0; |
98 | drvinfo->regdump_len = 0; | 98 | drvinfo->regdump_len = 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 9f16f754137b..c643d2bbb7b9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c | |||
@@ -241,8 +241,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
241 | spin_lock_init(&mdev->uar_lock); | 241 | spin_lock_init(&mdev->uar_lock); |
242 | 242 | ||
243 | mdev->dev = dev; | 243 | mdev->dev = dev; |
244 | mdev->dma_device = &(dev->pdev->dev); | 244 | mdev->dma_device = &dev->persist->pdev->dev; |
245 | mdev->pdev = dev->pdev; | 245 | mdev->pdev = dev->persist->pdev; |
246 | mdev->device_up = false; | 246 | mdev->device_up = false; |
247 | 247 | ||
248 | mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); | 248 | mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d0d6dc1b8e46..43a3f9822f74 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -2457,7 +2457,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2457 | netif_set_real_num_tx_queues(dev, prof->tx_ring_num); | 2457 | netif_set_real_num_tx_queues(dev, prof->tx_ring_num); |
2458 | netif_set_real_num_rx_queues(dev, prof->rx_ring_num); | 2458 | netif_set_real_num_rx_queues(dev, prof->rx_ring_num); |
2459 | 2459 | ||
2460 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | 2460 | SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); |
2461 | dev->dev_port = port - 1; | 2461 | dev->dev_port = port - 1; |
2462 | 2462 | ||
2463 | /* | 2463 | /* |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a0474eb94aa3..2ba5d368edce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -387,10 +387,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |||
387 | ring->rx_info, tmp); | 387 | ring->rx_info, tmp); |
388 | 388 | ||
389 | /* Allocate HW buffers on provided NUMA node */ | 389 | /* Allocate HW buffers on provided NUMA node */ |
390 | set_dev_node(&mdev->dev->pdev->dev, node); | 390 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
391 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, | 391 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, |
392 | ring->buf_size, 2 * PAGE_SIZE); | 392 | ring->buf_size, 2 * PAGE_SIZE); |
393 | set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); | 393 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
394 | if (err) | 394 | if (err) |
395 | goto err_info; | 395 | goto err_info; |
396 | 396 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 359bb1286eb5..55f9f5c5344e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
91 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); | 91 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); |
92 | 92 | ||
93 | /* Allocate HW buffers on provided NUMA node */ | 93 | /* Allocate HW buffers on provided NUMA node */ |
94 | set_dev_node(&mdev->dev->pdev->dev, node); | 94 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
95 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, | 95 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, |
96 | 2 * PAGE_SIZE); | 96 | 2 * PAGE_SIZE); |
97 | set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); | 97 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
98 | if (err) { | 98 | if (err) { |
99 | en_err(priv, "Failed allocating hwq resources\n"); | 99 | en_err(priv, "Failed allocating hwq resources\n"); |
100 | goto err_bounce; | 100 | goto err_bounce; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 3d275fbaf0eb..7538c9ce98a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -237,7 +237,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) | |||
237 | struct mlx4_eqe eqe; | 237 | struct mlx4_eqe eqe; |
238 | 238 | ||
239 | /*don't send if we don't have the that slave */ | 239 | /*don't send if we don't have the that slave */ |
240 | if (dev->num_vfs < slave) | 240 | if (dev->persist->num_vfs < slave) |
241 | return 0; | 241 | return 0; |
242 | memset(&eqe, 0, sizeof eqe); | 242 | memset(&eqe, 0, sizeof eqe); |
243 | 243 | ||
@@ -255,7 +255,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, | |||
255 | struct mlx4_eqe eqe; | 255 | struct mlx4_eqe eqe; |
256 | 256 | ||
257 | /*don't send if we don't have the that slave */ | 257 | /*don't send if we don't have the that slave */ |
258 | if (dev->num_vfs < slave) | 258 | if (dev->persist->num_vfs < slave) |
259 | return 0; | 259 | return 0; |
260 | memset(&eqe, 0, sizeof eqe); | 260 | memset(&eqe, 0, sizeof eqe); |
261 | 261 | ||
@@ -310,7 +310,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) | |||
310 | struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, | 310 | struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, |
311 | port); | 311 | port); |
312 | 312 | ||
313 | for (i = 0; i < dev->num_vfs + 1; i++) | 313 | for (i = 0; i < dev->persist->num_vfs + 1; i++) |
314 | if (test_bit(i, slaves_pport.slaves)) | 314 | if (test_bit(i, slaves_pport.slaves)) |
315 | set_and_calc_slave_port_state(dev, i, port, | 315 | set_and_calc_slave_port_state(dev, i, port, |
316 | event, &gen_event); | 316 | event, &gen_event); |
@@ -560,7 +560,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
560 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; | 560 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; |
561 | if (!mlx4_is_master(dev)) | 561 | if (!mlx4_is_master(dev)) |
562 | break; | 562 | break; |
563 | for (i = 0; i < dev->num_vfs + 1; i++) { | 563 | for (i = 0; i < dev->persist->num_vfs + 1; |
564 | i++) { | ||
564 | if (!test_bit(i, slaves_port.slaves)) | 565 | if (!test_bit(i, slaves_port.slaves)) |
565 | continue; | 566 | continue; |
566 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { | 567 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { |
@@ -596,7 +597,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
596 | if (!mlx4_is_master(dev)) | 597 | if (!mlx4_is_master(dev)) |
597 | break; | 598 | break; |
598 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | 599 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) |
599 | for (i = 0; i < dev->num_vfs + 1; i++) { | 600 | for (i = 0; |
601 | i < dev->persist->num_vfs + 1; | ||
602 | i++) { | ||
600 | if (!test_bit(i, slaves_port.slaves)) | 603 | if (!test_bit(i, slaves_port.slaves)) |
601 | continue; | 604 | continue; |
602 | if (i == mlx4_master_func_num(dev)) | 605 | if (i == mlx4_master_func_num(dev)) |
@@ -865,7 +868,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
865 | 868 | ||
866 | if (!priv->eq_table.uar_map[index]) { | 869 | if (!priv->eq_table.uar_map[index]) { |
867 | priv->eq_table.uar_map[index] = | 870 | priv->eq_table.uar_map[index] = |
868 | ioremap(pci_resource_start(dev->pdev, 2) + | 871 | ioremap(pci_resource_start(dev->persist->pdev, 2) + |
869 | ((eq->eqn / 4) << PAGE_SHIFT), | 872 | ((eq->eqn / 4) << PAGE_SHIFT), |
870 | PAGE_SIZE); | 873 | PAGE_SIZE); |
871 | if (!priv->eq_table.uar_map[index]) { | 874 | if (!priv->eq_table.uar_map[index]) { |
@@ -928,8 +931,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, | |||
928 | eq_context = mailbox->buf; | 931 | eq_context = mailbox->buf; |
929 | 932 | ||
930 | for (i = 0; i < npages; ++i) { | 933 | for (i = 0; i < npages; ++i) { |
931 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, | 934 | eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> |
932 | PAGE_SIZE, &t, GFP_KERNEL); | 935 | pdev->dev, |
936 | PAGE_SIZE, &t, | ||
937 | GFP_KERNEL); | ||
933 | if (!eq->page_list[i].buf) | 938 | if (!eq->page_list[i].buf) |
934 | goto err_out_free_pages; | 939 | goto err_out_free_pages; |
935 | 940 | ||
@@ -995,7 +1000,7 @@ err_out_free_eq: | |||
995 | err_out_free_pages: | 1000 | err_out_free_pages: |
996 | for (i = 0; i < npages; ++i) | 1001 | for (i = 0; i < npages; ++i) |
997 | if (eq->page_list[i].buf) | 1002 | if (eq->page_list[i].buf) |
998 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 1003 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
999 | eq->page_list[i].buf, | 1004 | eq->page_list[i].buf, |
1000 | eq->page_list[i].map); | 1005 | eq->page_list[i].map); |
1001 | 1006 | ||
@@ -1044,9 +1049,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
1044 | 1049 | ||
1045 | mlx4_mtt_cleanup(dev, &eq->mtt); | 1050 | mlx4_mtt_cleanup(dev, &eq->mtt); |
1046 | for (i = 0; i < npages; ++i) | 1051 | for (i = 0; i < npages; ++i) |
1047 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 1052 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
1048 | eq->page_list[i].buf, | 1053 | eq->page_list[i].buf, |
1049 | eq->page_list[i].map); | 1054 | eq->page_list[i].map); |
1050 | 1055 | ||
1051 | kfree(eq->page_list); | 1056 | kfree(eq->page_list); |
1052 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); | 1057 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); |
@@ -1060,7 +1065,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) | |||
1060 | int i, vec; | 1065 | int i, vec; |
1061 | 1066 | ||
1062 | if (eq_table->have_irq) | 1067 | if (eq_table->have_irq) |
1063 | free_irq(dev->pdev->irq, dev); | 1068 | free_irq(dev->persist->pdev->irq, dev); |
1064 | 1069 | ||
1065 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 1070 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
1066 | if (eq_table->eq[i].have_irq) { | 1071 | if (eq_table->eq[i].have_irq) { |
@@ -1089,7 +1094,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev) | |||
1089 | { | 1094 | { |
1090 | struct mlx4_priv *priv = mlx4_priv(dev); | 1095 | struct mlx4_priv *priv = mlx4_priv(dev); |
1091 | 1096 | ||
1092 | priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + | 1097 | priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, |
1098 | priv->fw.clr_int_bar) + | ||
1093 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); | 1099 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); |
1094 | if (!priv->clr_base) { | 1100 | if (!priv->clr_base) { |
1095 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); | 1101 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); |
@@ -1212,13 +1218,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
1212 | i * MLX4_IRQNAME_SIZE, | 1218 | i * MLX4_IRQNAME_SIZE, |
1213 | MLX4_IRQNAME_SIZE, | 1219 | MLX4_IRQNAME_SIZE, |
1214 | "mlx4-comp-%d@pci:%s", i, | 1220 | "mlx4-comp-%d@pci:%s", i, |
1215 | pci_name(dev->pdev)); | 1221 | pci_name(dev->persist->pdev)); |
1216 | } else { | 1222 | } else { |
1217 | snprintf(priv->eq_table.irq_names + | 1223 | snprintf(priv->eq_table.irq_names + |
1218 | i * MLX4_IRQNAME_SIZE, | 1224 | i * MLX4_IRQNAME_SIZE, |
1219 | MLX4_IRQNAME_SIZE, | 1225 | MLX4_IRQNAME_SIZE, |
1220 | "mlx4-async@pci:%s", | 1226 | "mlx4-async@pci:%s", |
1221 | pci_name(dev->pdev)); | 1227 | pci_name(dev->persist->pdev)); |
1222 | } | 1228 | } |
1223 | 1229 | ||
1224 | eq_name = priv->eq_table.irq_names + | 1230 | eq_name = priv->eq_table.irq_names + |
@@ -1235,8 +1241,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
1235 | snprintf(priv->eq_table.irq_names, | 1241 | snprintf(priv->eq_table.irq_names, |
1236 | MLX4_IRQNAME_SIZE, | 1242 | MLX4_IRQNAME_SIZE, |
1237 | DRV_NAME "@pci:%s", | 1243 | DRV_NAME "@pci:%s", |
1238 | pci_name(dev->pdev)); | 1244 | pci_name(dev->persist->pdev)); |
1239 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 1245 | err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, |
1240 | IRQF_SHARED, priv->eq_table.irq_names, dev); | 1246 | IRQF_SHARED, priv->eq_table.irq_names, dev); |
1241 | if (err) | 1247 | if (err) |
1242 | goto err_out_async; | 1248 | goto err_out_async; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 97c9b1db1d27..2a9dd460a95f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu | |||
56 | int i; | 56 | int i; |
57 | 57 | ||
58 | if (chunk->nsg > 0) | 58 | if (chunk->nsg > 0) |
59 | pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, | 59 | pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, |
60 | PCI_DMA_BIDIRECTIONAL); | 60 | PCI_DMA_BIDIRECTIONAL); |
61 | 61 | ||
62 | for (i = 0; i < chunk->npages; ++i) | 62 | for (i = 0; i < chunk->npages; ++i) |
@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * | |||
69 | int i; | 69 | int i; |
70 | 70 | ||
71 | for (i = 0; i < chunk->npages; ++i) | 71 | for (i = 0; i < chunk->npages; ++i) |
72 | dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, | 72 | dma_free_coherent(&dev->persist->pdev->dev, |
73 | chunk->mem[i].length, | ||
73 | lowmem_page_address(sg_page(&chunk->mem[i])), | 74 | lowmem_page_address(sg_page(&chunk->mem[i])), |
74 | sg_dma_address(&chunk->mem[i])); | 75 | sg_dma_address(&chunk->mem[i])); |
75 | } | 76 | } |
@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
173 | --cur_order; | 174 | --cur_order; |
174 | 175 | ||
175 | if (coherent) | 176 | if (coherent) |
176 | ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, | 177 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, |
177 | &chunk->mem[chunk->npages], | 178 | &chunk->mem[chunk->npages], |
178 | cur_order, gfp_mask); | 179 | cur_order, gfp_mask); |
179 | else | 180 | else |
@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
193 | if (coherent) | 194 | if (coherent) |
194 | ++chunk->nsg; | 195 | ++chunk->nsg; |
195 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | 196 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { |
196 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | 197 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, |
197 | chunk->npages, | 198 | chunk->npages, |
198 | PCI_DMA_BIDIRECTIONAL); | 199 | PCI_DMA_BIDIRECTIONAL); |
199 | 200 | ||
@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
208 | } | 209 | } |
209 | 210 | ||
210 | if (!coherent && chunk) { | 211 | if (!coherent && chunk) { |
211 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | 212 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, |
212 | chunk->npages, | 213 | chunk->npages, |
213 | PCI_DMA_BIDIRECTIONAL); | 214 | PCI_DMA_BIDIRECTIONAL); |
214 | 215 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 03e9eb0dc761..abcee61f8a47 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -318,10 +318,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
318 | return -ENODEV; | 318 | return -ENODEV; |
319 | } | 319 | } |
320 | 320 | ||
321 | if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { | 321 | if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { |
322 | mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", | 322 | mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", |
323 | dev_cap->uar_size, | 323 | dev_cap->uar_size, |
324 | (unsigned long long) pci_resource_len(dev->pdev, 2)); | 324 | (unsigned long long) |
325 | pci_resource_len(dev->persist->pdev, 2)); | ||
325 | return -ENODEV; | 326 | return -ENODEV; |
326 | } | 327 | } |
327 | 328 | ||
@@ -541,8 +542,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, | |||
541 | *speed = PCI_SPEED_UNKNOWN; | 542 | *speed = PCI_SPEED_UNKNOWN; |
542 | *width = PCIE_LNK_WIDTH_UNKNOWN; | 543 | *width = PCIE_LNK_WIDTH_UNKNOWN; |
543 | 544 | ||
544 | err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); | 545 | err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, |
545 | err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); | 546 | &lnkcap1); |
547 | err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, | ||
548 | &lnkcap2); | ||
546 | if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ | 549 | if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ |
547 | if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) | 550 | if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) |
548 | *speed = PCIE_SPEED_8_0GT; | 551 | *speed = PCIE_SPEED_8_0GT; |
@@ -587,7 +590,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev) | |||
587 | return; | 590 | return; |
588 | } | 591 | } |
589 | 592 | ||
590 | err = pcie_get_minimum_link(dev->pdev, &speed, &width); | 593 | err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); |
591 | if (err || speed == PCI_SPEED_UNKNOWN || | 594 | if (err || speed == PCI_SPEED_UNKNOWN || |
592 | width == PCIE_LNK_WIDTH_UNKNOWN) { | 595 | width == PCIE_LNK_WIDTH_UNKNOWN) { |
593 | mlx4_warn(dev, | 596 | mlx4_warn(dev, |
@@ -837,10 +840,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
837 | 840 | ||
838 | if (dev->caps.uar_page_size * (dev->caps.num_uars - | 841 | if (dev->caps.uar_page_size * (dev->caps.num_uars - |
839 | dev->caps.reserved_uars) > | 842 | dev->caps.reserved_uars) > |
840 | pci_resource_len(dev->pdev, 2)) { | 843 | pci_resource_len(dev->persist->pdev, |
844 | 2)) { | ||
841 | mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", | 845 | mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", |
842 | dev->caps.uar_page_size * dev->caps.num_uars, | 846 | dev->caps.uar_page_size * dev->caps.num_uars, |
843 | (unsigned long long) pci_resource_len(dev->pdev, 2)); | 847 | (unsigned long long) |
848 | pci_resource_len(dev->persist->pdev, 2)); | ||
844 | goto err_mem; | 849 | goto err_mem; |
845 | } | 850 | } |
846 | 851 | ||
@@ -1492,9 +1497,9 @@ static int map_bf_area(struct mlx4_dev *dev) | |||
1492 | if (!dev->caps.bf_reg_size) | 1497 | if (!dev->caps.bf_reg_size) |
1493 | return -ENXIO; | 1498 | return -ENXIO; |
1494 | 1499 | ||
1495 | bf_start = pci_resource_start(dev->pdev, 2) + | 1500 | bf_start = pci_resource_start(dev->persist->pdev, 2) + |
1496 | (dev->caps.num_uars << PAGE_SHIFT); | 1501 | (dev->caps.num_uars << PAGE_SHIFT); |
1497 | bf_len = pci_resource_len(dev->pdev, 2) - | 1502 | bf_len = pci_resource_len(dev->persist->pdev, 2) - |
1498 | (dev->caps.num_uars << PAGE_SHIFT); | 1503 | (dev->caps.num_uars << PAGE_SHIFT); |
1499 | priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); | 1504 | priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); |
1500 | if (!priv->bf_mapping) | 1505 | if (!priv->bf_mapping) |
@@ -1536,7 +1541,8 @@ static int map_internal_clock(struct mlx4_dev *dev) | |||
1536 | struct mlx4_priv *priv = mlx4_priv(dev); | 1541 | struct mlx4_priv *priv = mlx4_priv(dev); |
1537 | 1542 | ||
1538 | priv->clock_mapping = | 1543 | priv->clock_mapping = |
1539 | ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + | 1544 | ioremap(pci_resource_start(dev->persist->pdev, |
1545 | priv->fw.clock_bar) + | ||
1540 | priv->fw.clock_offset, MLX4_CLOCK_SIZE); | 1546 | priv->fw.clock_offset, MLX4_CLOCK_SIZE); |
1541 | 1547 | ||
1542 | if (!priv->clock_mapping) | 1548 | if (!priv->clock_mapping) |
@@ -1705,7 +1711,8 @@ static void choose_steering_mode(struct mlx4_dev *dev, | |||
1705 | if (mlx4_log_num_mgm_entry_size <= 0 && | 1711 | if (mlx4_log_num_mgm_entry_size <= 0 && |
1706 | dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && | 1712 | dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && |
1707 | (!mlx4_is_mfunc(dev) || | 1713 | (!mlx4_is_mfunc(dev) || |
1708 | (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && | 1714 | (dev_cap->fs_max_num_qp_per_entry >= |
1715 | (dev->persist->num_vfs + 1))) && | ||
1709 | choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= | 1716 | choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= |
1710 | MLX4_MIN_MGM_LOG_ENTRY_SIZE) { | 1717 | MLX4_MIN_MGM_LOG_ENTRY_SIZE) { |
1711 | dev->oper_log_mgm_entry_size = | 1718 | dev->oper_log_mgm_entry_size = |
@@ -2288,7 +2295,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
2288 | for (i = 0; i < nreq; ++i) | 2295 | for (i = 0; i < nreq; ++i) |
2289 | entries[i].entry = i; | 2296 | entries[i].entry = i; |
2290 | 2297 | ||
2291 | nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); | 2298 | nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, |
2299 | nreq); | ||
2292 | 2300 | ||
2293 | if (nreq < 0) { | 2301 | if (nreq < 0) { |
2294 | kfree(entries); | 2302 | kfree(entries); |
@@ -2316,7 +2324,7 @@ no_msi: | |||
2316 | dev->caps.comp_pool = 0; | 2324 | dev->caps.comp_pool = 0; |
2317 | 2325 | ||
2318 | for (i = 0; i < 2; ++i) | 2326 | for (i = 0; i < 2; ++i) |
2319 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 2327 | priv->eq_table.eq[i].irq = dev->persist->pdev->irq; |
2320 | } | 2328 | } |
2321 | 2329 | ||
2322 | static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | 2330 | static int mlx4_init_port_info(struct mlx4_dev *dev, int port) |
@@ -2344,7 +2352,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
2344 | info->port_attr.show = show_port_type; | 2352 | info->port_attr.show = show_port_type; |
2345 | sysfs_attr_init(&info->port_attr.attr); | 2353 | sysfs_attr_init(&info->port_attr.attr); |
2346 | 2354 | ||
2347 | err = device_create_file(&dev->pdev->dev, &info->port_attr); | 2355 | err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); |
2348 | if (err) { | 2356 | if (err) { |
2349 | mlx4_err(dev, "Failed to create file for port %d\n", port); | 2357 | mlx4_err(dev, "Failed to create file for port %d\n", port); |
2350 | info->port = -1; | 2358 | info->port = -1; |
@@ -2361,10 +2369,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
2361 | info->port_mtu_attr.show = show_port_ib_mtu; | 2369 | info->port_mtu_attr.show = show_port_ib_mtu; |
2362 | sysfs_attr_init(&info->port_mtu_attr.attr); | 2370 | sysfs_attr_init(&info->port_mtu_attr.attr); |
2363 | 2371 | ||
2364 | err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); | 2372 | err = device_create_file(&dev->persist->pdev->dev, |
2373 | &info->port_mtu_attr); | ||
2365 | if (err) { | 2374 | if (err) { |
2366 | mlx4_err(dev, "Failed to create mtu file for port %d\n", port); | 2375 | mlx4_err(dev, "Failed to create mtu file for port %d\n", port); |
2367 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | 2376 | device_remove_file(&info->dev->persist->pdev->dev, |
2377 | &info->port_attr); | ||
2368 | info->port = -1; | 2378 | info->port = -1; |
2369 | } | 2379 | } |
2370 | 2380 | ||
@@ -2376,8 +2386,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | |||
2376 | if (info->port < 0) | 2386 | if (info->port < 0) |
2377 | return; | 2387 | return; |
2378 | 2388 | ||
2379 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | 2389 | device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); |
2380 | device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); | 2390 | device_remove_file(&info->dev->persist->pdev->dev, |
2391 | &info->port_mtu_attr); | ||
2381 | } | 2392 | } |
2382 | 2393 | ||
2383 | static int mlx4_init_steering(struct mlx4_dev *dev) | 2394 | static int mlx4_init_steering(struct mlx4_dev *dev) |
@@ -2444,10 +2455,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev) | |||
2444 | void __iomem *owner; | 2455 | void __iomem *owner; |
2445 | u32 ret; | 2456 | u32 ret; |
2446 | 2457 | ||
2447 | if (pci_channel_offline(dev->pdev)) | 2458 | if (pci_channel_offline(dev->persist->pdev)) |
2448 | return -EIO; | 2459 | return -EIO; |
2449 | 2460 | ||
2450 | owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, | 2461 | owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + |
2462 | MLX4_OWNER_BASE, | ||
2451 | MLX4_OWNER_SIZE); | 2463 | MLX4_OWNER_SIZE); |
2452 | if (!owner) { | 2464 | if (!owner) { |
2453 | mlx4_err(dev, "Failed to obtain ownership bit\n"); | 2465 | mlx4_err(dev, "Failed to obtain ownership bit\n"); |
@@ -2463,10 +2475,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) | |||
2463 | { | 2475 | { |
2464 | void __iomem *owner; | 2476 | void __iomem *owner; |
2465 | 2477 | ||
2466 | if (pci_channel_offline(dev->pdev)) | 2478 | if (pci_channel_offline(dev->persist->pdev)) |
2467 | return; | 2479 | return; |
2468 | 2480 | ||
2469 | owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, | 2481 | owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + |
2482 | MLX4_OWNER_BASE, | ||
2470 | MLX4_OWNER_SIZE); | 2483 | MLX4_OWNER_SIZE); |
2471 | if (!owner) { | 2484 | if (!owner) { |
2472 | mlx4_err(dev, "Failed to obtain ownership bit\n"); | 2485 | mlx4_err(dev, "Failed to obtain ownership bit\n"); |
@@ -2514,13 +2527,13 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, | |||
2514 | dev_flags |= MLX4_FLAG_SRIOV | | 2527 | dev_flags |= MLX4_FLAG_SRIOV | |
2515 | MLX4_FLAG_MASTER; | 2528 | MLX4_FLAG_MASTER; |
2516 | dev_flags &= ~MLX4_FLAG_SLAVE; | 2529 | dev_flags &= ~MLX4_FLAG_SLAVE; |
2517 | dev->num_vfs = total_vfs; | 2530 | dev->persist->num_vfs = total_vfs; |
2518 | } | 2531 | } |
2519 | return dev_flags; | 2532 | return dev_flags; |
2520 | 2533 | ||
2521 | disable_sriov: | 2534 | disable_sriov: |
2522 | atomic_dec(&pf_loading); | 2535 | atomic_dec(&pf_loading); |
2523 | dev->num_vfs = 0; | 2536 | dev->persist->num_vfs = 0; |
2524 | kfree(dev->dev_vfs); | 2537 | kfree(dev->dev_vfs); |
2525 | return dev_flags & ~MLX4_FLAG_MASTER; | 2538 | return dev_flags & ~MLX4_FLAG_MASTER; |
2526 | } | 2539 | } |
@@ -2607,7 +2620,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, | |||
2607 | existing_vfs = pci_num_vf(pdev); | 2620 | existing_vfs = pci_num_vf(pdev); |
2608 | if (existing_vfs) | 2621 | if (existing_vfs) |
2609 | dev->flags |= MLX4_FLAG_SRIOV; | 2622 | dev->flags |= MLX4_FLAG_SRIOV; |
2610 | dev->num_vfs = total_vfs; | 2623 | dev->persist->num_vfs = total_vfs; |
2611 | } | 2624 | } |
2612 | } | 2625 | } |
2613 | 2626 | ||
@@ -2771,12 +2784,14 @@ slave_start: | |||
2771 | dev->caps.num_ports); | 2784 | dev->caps.num_ports); |
2772 | goto err_close; | 2785 | goto err_close; |
2773 | } | 2786 | } |
2774 | memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); | 2787 | memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); |
2775 | 2788 | ||
2776 | for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { | 2789 | for (i = 0; |
2790 | i < sizeof(dev->persist->nvfs)/ | ||
2791 | sizeof(dev->persist->nvfs[0]); i++) { | ||
2777 | unsigned j; | 2792 | unsigned j; |
2778 | 2793 | ||
2779 | for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { | 2794 | for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { |
2780 | dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; | 2795 | dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; |
2781 | dev->dev_vfs[sum].n_ports = i < 2 ? 1 : | 2796 | dev->dev_vfs[sum].n_ports = i < 2 ? 1 : |
2782 | dev->caps.num_ports; | 2797 | dev->caps.num_ports; |
@@ -2846,7 +2861,7 @@ slave_start: | |||
2846 | 2861 | ||
2847 | priv->removed = 0; | 2862 | priv->removed = 0; |
2848 | 2863 | ||
2849 | if (mlx4_is_master(dev) && dev->num_vfs) | 2864 | if (mlx4_is_master(dev) && dev->persist->num_vfs) |
2850 | atomic_dec(&pf_loading); | 2865 | atomic_dec(&pf_loading); |
2851 | 2866 | ||
2852 | kfree(dev_cap); | 2867 | kfree(dev_cap); |
@@ -2908,7 +2923,7 @@ err_sriov: | |||
2908 | if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) | 2923 | if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) |
2909 | pci_disable_sriov(pdev); | 2924 | pci_disable_sriov(pdev); |
2910 | 2925 | ||
2911 | if (mlx4_is_master(dev) && dev->num_vfs) | 2926 | if (mlx4_is_master(dev) && dev->persist->num_vfs) |
2912 | atomic_dec(&pf_loading); | 2927 | atomic_dec(&pf_loading); |
2913 | 2928 | ||
2914 | kfree(priv->dev.dev_vfs); | 2929 | kfree(priv->dev.dev_vfs); |
@@ -3076,20 +3091,28 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3076 | return -ENOMEM; | 3091 | return -ENOMEM; |
3077 | 3092 | ||
3078 | dev = &priv->dev; | 3093 | dev = &priv->dev; |
3079 | dev->pdev = pdev; | 3094 | dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); |
3080 | pci_set_drvdata(pdev, dev); | 3095 | if (!dev->persist) { |
3096 | kfree(priv); | ||
3097 | return -ENOMEM; | ||
3098 | } | ||
3099 | dev->persist->pdev = pdev; | ||
3100 | dev->persist->dev = dev; | ||
3101 | pci_set_drvdata(pdev, dev->persist); | ||
3081 | priv->pci_dev_data = id->driver_data; | 3102 | priv->pci_dev_data = id->driver_data; |
3082 | 3103 | ||
3083 | ret = __mlx4_init_one(pdev, id->driver_data, priv); | 3104 | ret = __mlx4_init_one(pdev, id->driver_data, priv); |
3084 | if (ret) | 3105 | if (ret) { |
3106 | kfree(dev->persist); | ||
3085 | kfree(priv); | 3107 | kfree(priv); |
3086 | 3108 | } | |
3087 | return ret; | 3109 | return ret; |
3088 | } | 3110 | } |
3089 | 3111 | ||
3090 | static void mlx4_unload_one(struct pci_dev *pdev) | 3112 | static void mlx4_unload_one(struct pci_dev *pdev) |
3091 | { | 3113 | { |
3092 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 3114 | struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); |
3115 | struct mlx4_dev *dev = persist->dev; | ||
3093 | struct mlx4_priv *priv = mlx4_priv(dev); | 3116 | struct mlx4_priv *priv = mlx4_priv(dev); |
3094 | int pci_dev_data; | 3117 | int pci_dev_data; |
3095 | int p; | 3118 | int p; |
@@ -3155,7 +3178,7 @@ static void mlx4_unload_one(struct pci_dev *pdev) | |||
3155 | mlx4_warn(dev, "Disabling SR-IOV\n"); | 3178 | mlx4_warn(dev, "Disabling SR-IOV\n"); |
3156 | pci_disable_sriov(pdev); | 3179 | pci_disable_sriov(pdev); |
3157 | dev->flags &= ~MLX4_FLAG_SRIOV; | 3180 | dev->flags &= ~MLX4_FLAG_SRIOV; |
3158 | dev->num_vfs = 0; | 3181 | dev->persist->num_vfs = 0; |
3159 | } | 3182 | } |
3160 | 3183 | ||
3161 | if (!mlx4_is_slave(dev)) | 3184 | if (!mlx4_is_slave(dev)) |
@@ -3175,26 +3198,29 @@ static void mlx4_unload_one(struct pci_dev *pdev) | |||
3175 | 3198 | ||
3176 | static void mlx4_remove_one(struct pci_dev *pdev) | 3199 | static void mlx4_remove_one(struct pci_dev *pdev) |
3177 | { | 3200 | { |
3178 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 3201 | struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); |
3202 | struct mlx4_dev *dev = persist->dev; | ||
3179 | struct mlx4_priv *priv = mlx4_priv(dev); | 3203 | struct mlx4_priv *priv = mlx4_priv(dev); |
3180 | 3204 | ||
3181 | mlx4_unload_one(pdev); | 3205 | mlx4_unload_one(pdev); |
3182 | pci_release_regions(pdev); | 3206 | pci_release_regions(pdev); |
3183 | pci_disable_device(pdev); | 3207 | pci_disable_device(pdev); |
3208 | kfree(dev->persist); | ||
3184 | kfree(priv); | 3209 | kfree(priv); |
3185 | pci_set_drvdata(pdev, NULL); | 3210 | pci_set_drvdata(pdev, NULL); |
3186 | } | 3211 | } |
3187 | 3212 | ||
3188 | int mlx4_restart_one(struct pci_dev *pdev) | 3213 | int mlx4_restart_one(struct pci_dev *pdev) |
3189 | { | 3214 | { |
3190 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 3215 | struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); |
3216 | struct mlx4_dev *dev = persist->dev; | ||
3191 | struct mlx4_priv *priv = mlx4_priv(dev); | 3217 | struct mlx4_priv *priv = mlx4_priv(dev); |
3192 | int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; | 3218 | int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; |
3193 | int pci_dev_data, err, total_vfs; | 3219 | int pci_dev_data, err, total_vfs; |
3194 | 3220 | ||
3195 | pci_dev_data = priv->pci_dev_data; | 3221 | pci_dev_data = priv->pci_dev_data; |
3196 | total_vfs = dev->num_vfs; | 3222 | total_vfs = dev->persist->num_vfs; |
3197 | memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); | 3223 | memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); |
3198 | 3224 | ||
3199 | mlx4_unload_one(pdev); | 3225 | mlx4_unload_one(pdev); |
3200 | err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); | 3226 | err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index bdd4eea2247c..faa37ab75a9d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -221,16 +221,17 @@ extern int mlx4_debug_level; | |||
221 | #define mlx4_dbg(mdev, format, ...) \ | 221 | #define mlx4_dbg(mdev, format, ...) \ |
222 | do { \ | 222 | do { \ |
223 | if (mlx4_debug_level) \ | 223 | if (mlx4_debug_level) \ |
224 | dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ | 224 | dev_printk(KERN_DEBUG, \ |
225 | &(mdev)->persist->pdev->dev, format, \ | ||
225 | ##__VA_ARGS__); \ | 226 | ##__VA_ARGS__); \ |
226 | } while (0) | 227 | } while (0) |
227 | 228 | ||
228 | #define mlx4_err(mdev, format, ...) \ | 229 | #define mlx4_err(mdev, format, ...) \ |
229 | dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) | 230 | dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
230 | #define mlx4_info(mdev, format, ...) \ | 231 | #define mlx4_info(mdev, format, ...) \ |
231 | dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) | 232 | dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
232 | #define mlx4_warn(mdev, format, ...) \ | 233 | #define mlx4_warn(mdev, format, ...) \ |
233 | dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) | 234 | dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
234 | 235 | ||
235 | extern int mlx4_log_num_mgm_entry_size; | 236 | extern int mlx4_log_num_mgm_entry_size; |
236 | extern int log_mtts_per_seg; | 237 | extern int log_mtts_per_seg; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7094a9c70fd5..8dbdf1d29357 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -708,13 +708,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
708 | if (!mtts) | 708 | if (!mtts) |
709 | return -ENOMEM; | 709 | return -ENOMEM; |
710 | 710 | ||
711 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | 711 | dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, |
712 | npages * sizeof (u64), DMA_TO_DEVICE); | 712 | npages * sizeof (u64), DMA_TO_DEVICE); |
713 | 713 | ||
714 | for (i = 0; i < npages; ++i) | 714 | for (i = 0; i < npages; ++i) |
715 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 715 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
716 | 716 | ||
717 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, | 717 | dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, |
718 | npages * sizeof (u64), DMA_TO_DEVICE); | 718 | npages * sizeof (u64), DMA_TO_DEVICE); |
719 | 719 | ||
720 | return 0; | 720 | return 0; |
@@ -1020,13 +1020,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list | |||
1020 | /* Make sure MPT status is visible before writing MTT entries */ | 1020 | /* Make sure MPT status is visible before writing MTT entries */ |
1021 | wmb(); | 1021 | wmb(); |
1022 | 1022 | ||
1023 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | 1023 | dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, |
1024 | npages * sizeof(u64), DMA_TO_DEVICE); | 1024 | npages * sizeof(u64), DMA_TO_DEVICE); |
1025 | 1025 | ||
1026 | for (i = 0; i < npages; ++i) | 1026 | for (i = 0; i < npages; ++i) |
1027 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 1027 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
1028 | 1028 | ||
1029 | dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, | 1029 | dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, |
1030 | npages * sizeof(u64), DMA_TO_DEVICE); | 1030 | npages * sizeof(u64), DMA_TO_DEVICE); |
1031 | 1031 | ||
1032 | fmr->mpt->key = cpu_to_be32(key); | 1032 | fmr->mpt->key = cpu_to_be32(key); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 74216071201f..a42b4c0a9ed9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c | |||
@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) | |||
151 | return -ENOMEM; | 151 | return -ENOMEM; |
152 | 152 | ||
153 | if (mlx4_is_slave(dev)) | 153 | if (mlx4_is_slave(dev)) |
154 | offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / | 154 | offset = uar->index % ((int)pci_resource_len(dev->persist->pdev, |
155 | 2) / | ||
155 | dev->caps.uar_page_size); | 156 | dev->caps.uar_page_size); |
156 | else | 157 | else |
157 | offset = uar->index; | 158 | offset = uar->index; |
158 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; | 159 | uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT) |
160 | + offset; | ||
159 | uar->map = NULL; | 161 | uar->map = NULL; |
160 | return 0; | 162 | return 0; |
161 | } | 163 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 30eb1ead0fe6..9f268f05290a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) | |||
553 | slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( | 553 | slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( |
554 | dev, &exclusive_ports); | 554 | dev, &exclusive_ports); |
555 | slave_gid -= bitmap_weight(slaves_pport_actv.slaves, | 555 | slave_gid -= bitmap_weight(slaves_pport_actv.slaves, |
556 | dev->num_vfs + 1); | 556 | dev->persist->num_vfs + 1); |
557 | } | 557 | } |
558 | vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; | 558 | vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; |
559 | if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) | 559 | if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) |
560 | return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; | 560 | return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; |
561 | return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; | 561 | return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; |
@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) | |||
590 | slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( | 590 | slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( |
591 | dev, &exclusive_ports); | 591 | dev, &exclusive_ports); |
592 | slave_gid -= bitmap_weight(slaves_pport_actv.slaves, | 592 | slave_gid -= bitmap_weight(slaves_pport_actv.slaves, |
593 | dev->num_vfs + 1); | 593 | dev->persist->num_vfs + 1); |
594 | } | 594 | } |
595 | gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; | 595 | gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; |
596 | vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; | 596 | vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; |
597 | if (slave_gid <= gids % vfs) | 597 | if (slave_gid <= gids % vfs) |
598 | return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); | 598 | return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); |
599 | 599 | ||
@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) | |||
644 | int num_eth_ports, err; | 644 | int num_eth_ports, err; |
645 | int i; | 645 | int i; |
646 | 646 | ||
647 | if (slave < 0 || slave > dev->num_vfs) | 647 | if (slave < 0 || slave > dev->persist->num_vfs) |
648 | return; | 648 | return; |
649 | 649 | ||
650 | actv_ports = mlx4_get_active_ports(dev, slave); | 650 | actv_ports = mlx4_get_active_ports(dev, slave); |
@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, | |||
1214 | return -EINVAL; | 1214 | return -EINVAL; |
1215 | 1215 | ||
1216 | slaves_pport = mlx4_phys_to_slaves_pport(dev, port); | 1216 | slaves_pport = mlx4_phys_to_slaves_pport(dev, port); |
1217 | num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; | 1217 | num_vfs = bitmap_weight(slaves_pport.slaves, |
1218 | dev->persist->num_vfs + 1) - 1; | ||
1218 | 1219 | ||
1219 | for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { | 1220 | for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { |
1220 | if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, | 1221 | if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, |
@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, | |||
1258 | dev, &exclusive_ports); | 1259 | dev, &exclusive_ports); |
1259 | num_vfs_before += bitmap_weight( | 1260 | num_vfs_before += bitmap_weight( |
1260 | slaves_pport_actv.slaves, | 1261 | slaves_pport_actv.slaves, |
1261 | dev->num_vfs + 1); | 1262 | dev->persist->num_vfs + 1); |
1262 | } | 1263 | } |
1263 | 1264 | ||
1264 | /* candidate_slave_gid isn't necessarily the correct slave, but | 1265 | /* candidate_slave_gid isn't necessarily the correct slave, but |
@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, | |||
1288 | dev, &exclusive_ports); | 1289 | dev, &exclusive_ports); |
1289 | slave_gid += bitmap_weight( | 1290 | slave_gid += bitmap_weight( |
1290 | slaves_pport_actv.slaves, | 1291 | slaves_pport_actv.slaves, |
1291 | dev->num_vfs + 1); | 1292 | dev->persist->num_vfs + 1); |
1292 | } | 1293 | } |
1293 | } | 1294 | } |
1294 | *slave_id = slave_gid; | 1295 | *slave_id = slave_gid; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c index ea1c6d092145..0076d88587ca 100644 --- a/drivers/net/ethernet/mellanox/mlx4/reset.c +++ b/drivers/net/ethernet/mellanox/mlx4/reset.c | |||
@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev) | |||
76 | goto out; | 76 | goto out; |
77 | } | 77 | } |
78 | 78 | ||
79 | pcie_cap = pci_pcie_cap(dev->pdev); | 79 | pcie_cap = pci_pcie_cap(dev->persist->pdev); |
80 | 80 | ||
81 | for (i = 0; i < 64; ++i) { | 81 | for (i = 0; i < 64; ++i) { |
82 | if (i == 22 || i == 23) | 82 | if (i == 22 || i == 23) |
83 | continue; | 83 | continue; |
84 | if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { | 84 | if (pci_read_config_dword(dev->persist->pdev, i * 4, |
85 | hca_header + i)) { | ||
85 | err = -ENODEV; | 86 | err = -ENODEV; |
86 | mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); | 87 | mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); |
87 | goto out; | 88 | goto out; |
88 | } | 89 | } |
89 | } | 90 | } |
90 | 91 | ||
91 | reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, | 92 | reset = ioremap(pci_resource_start(dev->persist->pdev, 0) + |
93 | MLX4_RESET_BASE, | ||
92 | MLX4_RESET_SIZE); | 94 | MLX4_RESET_SIZE); |
93 | if (!reset) { | 95 | if (!reset) { |
94 | err = -ENOMEM; | 96 | err = -ENOMEM; |
@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev) | |||
122 | 124 | ||
123 | end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; | 125 | end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; |
124 | do { | 126 | do { |
125 | if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && | 127 | if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID, |
126 | vendor != 0xffff) | 128 | &vendor) && vendor != 0xffff) |
127 | break; | 129 | break; |
128 | 130 | ||
129 | msleep(1); | 131 | msleep(1); |
@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev) | |||
138 | /* Now restore the PCI headers */ | 140 | /* Now restore the PCI headers */ |
139 | if (pcie_cap) { | 141 | if (pcie_cap) { |
140 | devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; | 142 | devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; |
141 | if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, | 143 | if (pcie_capability_write_word(dev->persist->pdev, |
144 | PCI_EXP_DEVCTL, | ||
142 | devctl)) { | 145 | devctl)) { |
143 | err = -ENODEV; | 146 | err = -ENODEV; |
144 | mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); | 147 | mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); |
145 | goto out; | 148 | goto out; |
146 | } | 149 | } |
147 | linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; | 150 | linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; |
148 | if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, | 151 | if (pcie_capability_write_word(dev->persist->pdev, |
152 | PCI_EXP_LNKCTL, | ||
149 | linkctl)) { | 153 | linkctl)) { |
150 | err = -ENODEV; | 154 | err = -ENODEV; |
151 | mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); | 155 | mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); |
@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev) | |||
157 | if (i * 4 == PCI_COMMAND) | 161 | if (i * 4 == PCI_COMMAND) |
158 | continue; | 162 | continue; |
159 | 163 | ||
160 | if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { | 164 | if (pci_write_config_dword(dev->persist->pdev, i * 4, |
165 | hca_header[i])) { | ||
161 | err = -ENODEV; | 166 | err = -ENODEV; |
162 | mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", | 167 | mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", |
163 | i); | 168 | i); |
@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev) | |||
165 | } | 170 | } |
166 | } | 171 | } |
167 | 172 | ||
168 | if (pci_write_config_dword(dev->pdev, PCI_COMMAND, | 173 | if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND, |
169 | hca_header[PCI_COMMAND / 4])) { | 174 | hca_header[PCI_COMMAND / 4])) { |
170 | err = -ENODEV; | 175 | err = -ENODEV; |
171 | mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); | 176 | mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 4efbd1eca611..3e93879bccce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, | |||
309 | int allocated, free, reserved, guaranteed, from_free; | 309 | int allocated, free, reserved, guaranteed, from_free; |
310 | int from_rsvd; | 310 | int from_rsvd; |
311 | 311 | ||
312 | if (slave > dev->num_vfs) | 312 | if (slave > dev->persist->num_vfs) |
313 | return -EINVAL; | 313 | return -EINVAL; |
314 | 314 | ||
315 | spin_lock(&res_alloc->alloc_lock); | 315 | spin_lock(&res_alloc->alloc_lock); |
316 | allocated = (port > 0) ? | 316 | allocated = (port > 0) ? |
317 | res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : | 317 | res_alloc->allocated[(port - 1) * |
318 | (dev->persist->num_vfs + 1) + slave] : | ||
318 | res_alloc->allocated[slave]; | 319 | res_alloc->allocated[slave]; |
319 | free = (port > 0) ? res_alloc->res_port_free[port - 1] : | 320 | free = (port > 0) ? res_alloc->res_port_free[port - 1] : |
320 | res_alloc->res_free; | 321 | res_alloc->res_free; |
@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, | |||
352 | if (!err) { | 353 | if (!err) { |
353 | /* grant the request */ | 354 | /* grant the request */ |
354 | if (port > 0) { | 355 | if (port > 0) { |
355 | res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; | 356 | res_alloc->allocated[(port - 1) * |
357 | (dev->persist->num_vfs + 1) + slave] += count; | ||
356 | res_alloc->res_port_free[port - 1] -= count; | 358 | res_alloc->res_port_free[port - 1] -= count; |
357 | res_alloc->res_port_rsvd[port - 1] -= from_rsvd; | 359 | res_alloc->res_port_rsvd[port - 1] -= from_rsvd; |
358 | } else { | 360 | } else { |
@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, | |||
376 | &priv->mfunc.master.res_tracker.res_alloc[res_type]; | 378 | &priv->mfunc.master.res_tracker.res_alloc[res_type]; |
377 | int allocated, guaranteed, from_rsvd; | 379 | int allocated, guaranteed, from_rsvd; |
378 | 380 | ||
379 | if (slave > dev->num_vfs) | 381 | if (slave > dev->persist->num_vfs) |
380 | return; | 382 | return; |
381 | 383 | ||
382 | spin_lock(&res_alloc->alloc_lock); | 384 | spin_lock(&res_alloc->alloc_lock); |
383 | 385 | ||
384 | allocated = (port > 0) ? | 386 | allocated = (port > 0) ? |
385 | res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : | 387 | res_alloc->allocated[(port - 1) * |
388 | (dev->persist->num_vfs + 1) + slave] : | ||
386 | res_alloc->allocated[slave]; | 389 | res_alloc->allocated[slave]; |
387 | guaranteed = res_alloc->guaranteed[slave]; | 390 | guaranteed = res_alloc->guaranteed[slave]; |
388 | 391 | ||
@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, | |||
397 | } | 400 | } |
398 | 401 | ||
399 | if (port > 0) { | 402 | if (port > 0) { |
400 | res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; | 403 | res_alloc->allocated[(port - 1) * |
404 | (dev->persist->num_vfs + 1) + slave] -= count; | ||
401 | res_alloc->res_port_free[port - 1] += count; | 405 | res_alloc->res_port_free[port - 1] += count; |
402 | res_alloc->res_port_rsvd[port - 1] += from_rsvd; | 406 | res_alloc->res_port_rsvd[port - 1] += from_rsvd; |
403 | } else { | 407 | } else { |
@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev, | |||
415 | enum mlx4_resource res_type, | 419 | enum mlx4_resource res_type, |
416 | int vf, int num_instances) | 420 | int vf, int num_instances) |
417 | { | 421 | { |
418 | res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); | 422 | res_alloc->guaranteed[vf] = num_instances / |
423 | (2 * (dev->persist->num_vfs + 1)); | ||
419 | res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; | 424 | res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; |
420 | if (vf == mlx4_master_func_num(dev)) { | 425 | if (vf == mlx4_master_func_num(dev)) { |
421 | res_alloc->res_free = num_instances; | 426 | res_alloc->res_free = num_instances; |
@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) | |||
486 | for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { | 491 | for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { |
487 | struct resource_allocator *res_alloc = | 492 | struct resource_allocator *res_alloc = |
488 | &priv->mfunc.master.res_tracker.res_alloc[i]; | 493 | &priv->mfunc.master.res_tracker.res_alloc[i]; |
489 | res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); | 494 | res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * |
490 | res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); | 495 | sizeof(int), GFP_KERNEL); |
496 | res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * | ||
497 | sizeof(int), GFP_KERNEL); | ||
491 | if (i == RES_MAC || i == RES_VLAN) | 498 | if (i == RES_MAC || i == RES_VLAN) |
492 | res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * | 499 | res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * |
493 | (dev->num_vfs + 1) * sizeof(int), | 500 | (dev->persist->num_vfs |
494 | GFP_KERNEL); | 501 | + 1) * |
502 | sizeof(int), GFP_KERNEL); | ||
495 | else | 503 | else |
496 | res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); | 504 | res_alloc->allocated = kzalloc((dev->persist-> |
505 | num_vfs + 1) * | ||
506 | sizeof(int), GFP_KERNEL); | ||
497 | 507 | ||
498 | if (!res_alloc->quota || !res_alloc->guaranteed || | 508 | if (!res_alloc->quota || !res_alloc->guaranteed || |
499 | !res_alloc->allocated) | 509 | !res_alloc->allocated) |
500 | goto no_mem_err; | 510 | goto no_mem_err; |
501 | 511 | ||
502 | spin_lock_init(&res_alloc->alloc_lock); | 512 | spin_lock_init(&res_alloc->alloc_lock); |
503 | for (t = 0; t < dev->num_vfs + 1; t++) { | 513 | for (t = 0; t < dev->persist->num_vfs + 1; t++) { |
504 | struct mlx4_active_ports actv_ports = | 514 | struct mlx4_active_ports actv_ports = |
505 | mlx4_get_active_ports(dev, t); | 515 | mlx4_get_active_ports(dev, t); |
506 | switch (i) { | 516 | switch (i) { |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f1e41b33462f..1069ce65e8b4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -744,8 +744,15 @@ struct mlx4_vf_dev { | |||
744 | u8 n_ports; | 744 | u8 n_ports; |
745 | }; | 745 | }; |
746 | 746 | ||
747 | struct mlx4_dev { | 747 | struct mlx4_dev_persistent { |
748 | struct pci_dev *pdev; | 748 | struct pci_dev *pdev; |
749 | struct mlx4_dev *dev; | ||
750 | int nvfs[MLX4_MAX_PORTS + 1]; | ||
751 | int num_vfs; | ||
752 | }; | ||
753 | |||
754 | struct mlx4_dev { | ||
755 | struct mlx4_dev_persistent *persist; | ||
749 | unsigned long flags; | 756 | unsigned long flags; |
750 | unsigned long num_slaves; | 757 | unsigned long num_slaves; |
751 | struct mlx4_caps caps; | 758 | struct mlx4_caps caps; |
@@ -754,13 +761,11 @@ struct mlx4_dev { | |||
754 | struct radix_tree_root qp_table_tree; | 761 | struct radix_tree_root qp_table_tree; |
755 | u8 rev_id; | 762 | u8 rev_id; |
756 | char board_id[MLX4_BOARD_ID_LEN]; | 763 | char board_id[MLX4_BOARD_ID_LEN]; |
757 | int num_vfs; | ||
758 | int numa_node; | 764 | int numa_node; |
759 | int oper_log_mgm_entry_size; | 765 | int oper_log_mgm_entry_size; |
760 | u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; | 766 | u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; |
761 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; | 767 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; |
762 | struct mlx4_vf_dev *dev_vfs; | 768 | struct mlx4_vf_dev *dev_vfs; |
763 | int nvfs[MLX4_MAX_PORTS + 1]; | ||
764 | }; | 769 | }; |
765 | 770 | ||
766 | struct mlx4_eqe { | 771 | struct mlx4_eqe { |