diff options
Diffstat (limited to 'drivers/net/ethernet/amazon/ena/ena_com.c')
| -rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_com.c | 61 |
1 files changed, 31 insertions, 30 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 420cede41ca4..b17d435de09f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
| @@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) | |||
| 111 | struct ena_com_admin_sq *sq = &queue->sq; | 111 | struct ena_com_admin_sq *sq = &queue->sq; |
| 112 | u16 size = ADMIN_SQ_SIZE(queue->q_depth); | 112 | u16 size = ADMIN_SQ_SIZE(queue->q_depth); |
| 113 | 113 | ||
| 114 | sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, | 114 | sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, |
| 115 | GFP_KERNEL); | 115 | GFP_KERNEL); |
| 116 | 116 | ||
| 117 | if (!sq->entries) { | 117 | if (!sq->entries) { |
| 118 | pr_err("memory allocation failed"); | 118 | pr_err("memory allocation failed"); |
| @@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) | |||
| 133 | struct ena_com_admin_cq *cq = &queue->cq; | 133 | struct ena_com_admin_cq *cq = &queue->cq; |
| 134 | u16 size = ADMIN_CQ_SIZE(queue->q_depth); | 134 | u16 size = ADMIN_CQ_SIZE(queue->q_depth); |
| 135 | 135 | ||
| 136 | cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, | 136 | cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, |
| 137 | GFP_KERNEL); | 137 | GFP_KERNEL); |
| 138 | 138 | ||
| 139 | if (!cq->entries) { | 139 | if (!cq->entries) { |
| 140 | pr_err("memory allocation failed"); | 140 | pr_err("memory allocation failed"); |
| @@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, | |||
| 156 | 156 | ||
| 157 | dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; | 157 | dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; |
| 158 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); | 158 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); |
| 159 | aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, | 159 | aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, |
| 160 | GFP_KERNEL); | 160 | GFP_KERNEL); |
| 161 | 161 | ||
| 162 | if (!aenq->entries) { | 162 | if (!aenq->entries) { |
| 163 | pr_err("memory allocation failed"); | 163 | pr_err("memory allocation failed"); |
| @@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, | |||
| 344 | dev_node = dev_to_node(ena_dev->dmadev); | 344 | dev_node = dev_to_node(ena_dev->dmadev); |
| 345 | set_dev_node(ena_dev->dmadev, ctx->numa_node); | 345 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
| 346 | io_sq->desc_addr.virt_addr = | 346 | io_sq->desc_addr.virt_addr = |
| 347 | dma_zalloc_coherent(ena_dev->dmadev, size, | 347 | dma_alloc_coherent(ena_dev->dmadev, size, |
| 348 | &io_sq->desc_addr.phys_addr, | 348 | &io_sq->desc_addr.phys_addr, |
| 349 | GFP_KERNEL); | 349 | GFP_KERNEL); |
| 350 | set_dev_node(ena_dev->dmadev, dev_node); | 350 | set_dev_node(ena_dev->dmadev, dev_node); |
| 351 | if (!io_sq->desc_addr.virt_addr) { | 351 | if (!io_sq->desc_addr.virt_addr) { |
| 352 | io_sq->desc_addr.virt_addr = | 352 | io_sq->desc_addr.virt_addr = |
| 353 | dma_zalloc_coherent(ena_dev->dmadev, size, | 353 | dma_alloc_coherent(ena_dev->dmadev, size, |
| 354 | &io_sq->desc_addr.phys_addr, | 354 | &io_sq->desc_addr.phys_addr, |
| 355 | GFP_KERNEL); | 355 | GFP_KERNEL); |
| 356 | } | 356 | } |
| 357 | 357 | ||
| 358 | if (!io_sq->desc_addr.virt_addr) { | 358 | if (!io_sq->desc_addr.virt_addr) { |
| @@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, | |||
| 425 | prev_node = dev_to_node(ena_dev->dmadev); | 425 | prev_node = dev_to_node(ena_dev->dmadev); |
| 426 | set_dev_node(ena_dev->dmadev, ctx->numa_node); | 426 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
| 427 | io_cq->cdesc_addr.virt_addr = | 427 | io_cq->cdesc_addr.virt_addr = |
| 428 | dma_zalloc_coherent(ena_dev->dmadev, size, | 428 | dma_alloc_coherent(ena_dev->dmadev, size, |
| 429 | &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); | 429 | &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); |
| 430 | set_dev_node(ena_dev->dmadev, prev_node); | 430 | set_dev_node(ena_dev->dmadev, prev_node); |
| 431 | if (!io_cq->cdesc_addr.virt_addr) { | 431 | if (!io_cq->cdesc_addr.virt_addr) { |
| 432 | io_cq->cdesc_addr.virt_addr = | 432 | io_cq->cdesc_addr.virt_addr = |
| 433 | dma_zalloc_coherent(ena_dev->dmadev, size, | 433 | dma_alloc_coherent(ena_dev->dmadev, size, |
| 434 | &io_cq->cdesc_addr.phys_addr, | 434 | &io_cq->cdesc_addr.phys_addr, |
| 435 | GFP_KERNEL); | 435 | GFP_KERNEL); |
| 436 | } | 436 | } |
| 437 | 437 | ||
| 438 | if (!io_cq->cdesc_addr.virt_addr) { | 438 | if (!io_cq->cdesc_addr.virt_addr) { |
| @@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) | |||
| 1026 | struct ena_rss *rss = &ena_dev->rss; | 1026 | struct ena_rss *rss = &ena_dev->rss; |
| 1027 | 1027 | ||
| 1028 | rss->hash_key = | 1028 | rss->hash_key = |
| 1029 | dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), | 1029 | dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), |
| 1030 | &rss->hash_key_dma_addr, GFP_KERNEL); | 1030 | &rss->hash_key_dma_addr, GFP_KERNEL); |
| 1031 | 1031 | ||
| 1032 | if (unlikely(!rss->hash_key)) | 1032 | if (unlikely(!rss->hash_key)) |
| 1033 | return -ENOMEM; | 1033 | return -ENOMEM; |
| @@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) | |||
| 1050 | struct ena_rss *rss = &ena_dev->rss; | 1050 | struct ena_rss *rss = &ena_dev->rss; |
| 1051 | 1051 | ||
| 1052 | rss->hash_ctrl = | 1052 | rss->hash_ctrl = |
| 1053 | dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), | 1053 | dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), |
| 1054 | &rss->hash_ctrl_dma_addr, GFP_KERNEL); | 1054 | &rss->hash_ctrl_dma_addr, GFP_KERNEL); |
| 1055 | 1055 | ||
| 1056 | if (unlikely(!rss->hash_ctrl)) | 1056 | if (unlikely(!rss->hash_ctrl)) |
| 1057 | return -ENOMEM; | 1057 | return -ENOMEM; |
| @@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, | |||
| 1094 | sizeof(struct ena_admin_rss_ind_table_entry); | 1094 | sizeof(struct ena_admin_rss_ind_table_entry); |
| 1095 | 1095 | ||
| 1096 | rss->rss_ind_tbl = | 1096 | rss->rss_ind_tbl = |
| 1097 | dma_zalloc_coherent(ena_dev->dmadev, tbl_size, | 1097 | dma_alloc_coherent(ena_dev->dmadev, tbl_size, |
| 1098 | &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); | 1098 | &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); |
| 1099 | if (unlikely(!rss->rss_ind_tbl)) | 1099 | if (unlikely(!rss->rss_ind_tbl)) |
| 1100 | goto mem_err1; | 1100 | goto mem_err1; |
| 1101 | 1101 | ||
| @@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) | |||
| 1649 | 1649 | ||
| 1650 | spin_lock_init(&mmio_read->lock); | 1650 | spin_lock_init(&mmio_read->lock); |
| 1651 | mmio_read->read_resp = | 1651 | mmio_read->read_resp = |
| 1652 | dma_zalloc_coherent(ena_dev->dmadev, | 1652 | dma_alloc_coherent(ena_dev->dmadev, |
| 1653 | sizeof(*mmio_read->read_resp), | 1653 | sizeof(*mmio_read->read_resp), |
| 1654 | &mmio_read->read_resp_dma_addr, GFP_KERNEL); | 1654 | &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
| 1655 | if (unlikely(!mmio_read->read_resp)) | 1655 | if (unlikely(!mmio_read->read_resp)) |
| 1656 | goto err; | 1656 | goto err; |
| 1657 | 1657 | ||
| @@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) | |||
| 2623 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 2623 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
| 2624 | 2624 | ||
| 2625 | host_attr->host_info = | 2625 | host_attr->host_info = |
| 2626 | dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, | 2626 | dma_alloc_coherent(ena_dev->dmadev, SZ_4K, |
| 2627 | &host_attr->host_info_dma_addr, GFP_KERNEL); | 2627 | &host_attr->host_info_dma_addr, GFP_KERNEL); |
| 2628 | if (unlikely(!host_attr->host_info)) | 2628 | if (unlikely(!host_attr->host_info)) |
| 2629 | return -ENOMEM; | 2629 | return -ENOMEM; |
| 2630 | 2630 | ||
| @@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, | |||
| 2641 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 2641 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
| 2642 | 2642 | ||
| 2643 | host_attr->debug_area_virt_addr = | 2643 | host_attr->debug_area_virt_addr = |
| 2644 | dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, | 2644 | dma_alloc_coherent(ena_dev->dmadev, debug_area_size, |
| 2645 | &host_attr->debug_area_dma_addr, GFP_KERNEL); | 2645 | &host_attr->debug_area_dma_addr, |
| 2646 | GFP_KERNEL); | ||
| 2646 | if (unlikely(!host_attr->debug_area_virt_addr)) { | 2647 | if (unlikely(!host_attr->debug_area_virt_addr)) { |
| 2647 | host_attr->debug_area_size = 0; | 2648 | host_attr->debug_area_size = 0; |
| 2648 | return -ENOMEM; | 2649 | return -ENOMEM; |
