diff options
author | Ivan Vecera <ivecera@redhat.com> | 2011-02-02 03:05:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-02-03 23:49:00 -0500 |
commit | 2b7bcebf958c74124220ee8103024def8597b36c (patch) | |
tree | 0dbf21ff45e4c0a7c2bee3b6fa90edf80e32e615 /drivers/net/benet/be_main.c | |
parent | fd95240568977ebd1ebb15b071464e0e392cde1a (diff) |
be2net: use device model DMA API
Use DMA API as PCI equivalents will be deprecated.
Signed-off-by: Ivan Vecera <ivecera@redhat.com>
Acked-by: Ajit Khaparde <ajit.khaparde@emulex.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r-- | drivers/net/benet/be_main.c | 98 |
1 files changed, 51 insertions, 47 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index de40d3b7152f..c4966d46f692 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) | |||
125 | { | 125 | { |
126 | struct be_dma_mem *mem = &q->dma_mem; | 126 | struct be_dma_mem *mem = &q->dma_mem; |
127 | if (mem->va) | 127 | if (mem->va) |
128 | pci_free_consistent(adapter->pdev, mem->size, | 128 | dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, |
129 | mem->va, mem->dma); | 129 | mem->dma); |
130 | } | 130 | } |
131 | 131 | ||
132 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | 132 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, |
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |||
138 | q->len = len; | 138 | q->len = len; |
139 | q->entry_size = entry_size; | 139 | q->entry_size = entry_size; |
140 | mem->size = len * entry_size; | 140 | mem->size = len * entry_size; |
141 | mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); | 141 | mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, |
142 | GFP_KERNEL); | ||
142 | if (!mem->va) | 143 | if (!mem->va) |
143 | return -1; | 144 | return -1; |
144 | memset(mem->va, 0, mem->size); | 145 | memset(mem->va, 0, mem->size); |
@@ -486,7 +487,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, | |||
486 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | 487 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); |
487 | } | 488 | } |
488 | 489 | ||
489 | static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, | 490 | static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, |
490 | bool unmap_single) | 491 | bool unmap_single) |
491 | { | 492 | { |
492 | dma_addr_t dma; | 493 | dma_addr_t dma; |
@@ -496,11 +497,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, | |||
496 | dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; | 497 | dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; |
497 | if (wrb->frag_len) { | 498 | if (wrb->frag_len) { |
498 | if (unmap_single) | 499 | if (unmap_single) |
499 | pci_unmap_single(pdev, dma, wrb->frag_len, | 500 | dma_unmap_single(dev, dma, wrb->frag_len, |
500 | PCI_DMA_TODEVICE); | 501 | DMA_TO_DEVICE); |
501 | else | 502 | else |
502 | pci_unmap_page(pdev, dma, wrb->frag_len, | 503 | dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); |
503 | PCI_DMA_TODEVICE); | ||
504 | } | 504 | } |
505 | } | 505 | } |
506 | 506 | ||
@@ -509,7 +509,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
509 | { | 509 | { |
510 | dma_addr_t busaddr; | 510 | dma_addr_t busaddr; |
511 | int i, copied = 0; | 511 | int i, copied = 0; |
512 | struct pci_dev *pdev = adapter->pdev; | 512 | struct device *dev = &adapter->pdev->dev; |
513 | struct sk_buff *first_skb = skb; | 513 | struct sk_buff *first_skb = skb; |
514 | struct be_queue_info *txq = &adapter->tx_obj.q; | 514 | struct be_queue_info *txq = &adapter->tx_obj.q; |
515 | struct be_eth_wrb *wrb; | 515 | struct be_eth_wrb *wrb; |
@@ -523,9 +523,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
523 | 523 | ||
524 | if (skb->len > skb->data_len) { | 524 | if (skb->len > skb->data_len) { |
525 | int len = skb_headlen(skb); | 525 | int len = skb_headlen(skb); |
526 | busaddr = pci_map_single(pdev, skb->data, len, | 526 | busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); |
527 | PCI_DMA_TODEVICE); | 527 | if (dma_mapping_error(dev, busaddr)) |
528 | if (pci_dma_mapping_error(pdev, busaddr)) | ||
529 | goto dma_err; | 528 | goto dma_err; |
530 | map_single = true; | 529 | map_single = true; |
531 | wrb = queue_head_node(txq); | 530 | wrb = queue_head_node(txq); |
@@ -538,10 +537,9 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
538 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 537 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
539 | struct skb_frag_struct *frag = | 538 | struct skb_frag_struct *frag = |
540 | &skb_shinfo(skb)->frags[i]; | 539 | &skb_shinfo(skb)->frags[i]; |
541 | busaddr = pci_map_page(pdev, frag->page, | 540 | busaddr = dma_map_page(dev, frag->page, frag->page_offset, |
542 | frag->page_offset, | 541 | frag->size, DMA_TO_DEVICE); |
543 | frag->size, PCI_DMA_TODEVICE); | 542 | if (dma_mapping_error(dev, busaddr)) |
544 | if (pci_dma_mapping_error(pdev, busaddr)) | ||
545 | goto dma_err; | 543 | goto dma_err; |
546 | wrb = queue_head_node(txq); | 544 | wrb = queue_head_node(txq); |
547 | wrb_fill(wrb, busaddr, frag->size); | 545 | wrb_fill(wrb, busaddr, frag->size); |
@@ -565,7 +563,7 @@ dma_err: | |||
565 | txq->head = map_head; | 563 | txq->head = map_head; |
566 | while (copied) { | 564 | while (copied) { |
567 | wrb = queue_head_node(txq); | 565 | wrb = queue_head_node(txq); |
568 | unmap_tx_frag(pdev, wrb, map_single); | 566 | unmap_tx_frag(dev, wrb, map_single); |
569 | map_single = false; | 567 | map_single = false; |
570 | copied -= wrb->frag_len; | 568 | copied -= wrb->frag_len; |
571 | queue_head_inc(txq); | 569 | queue_head_inc(txq); |
@@ -890,8 +888,9 @@ get_rx_page_info(struct be_adapter *adapter, | |||
890 | BUG_ON(!rx_page_info->page); | 888 | BUG_ON(!rx_page_info->page); |
891 | 889 | ||
892 | if (rx_page_info->last_page_user) { | 890 | if (rx_page_info->last_page_user) { |
893 | pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), | 891 | dma_unmap_page(&adapter->pdev->dev, |
894 | adapter->big_page_size, PCI_DMA_FROMDEVICE); | 892 | dma_unmap_addr(rx_page_info, bus), |
893 | adapter->big_page_size, DMA_FROM_DEVICE); | ||
895 | rx_page_info->last_page_user = false; | 894 | rx_page_info->last_page_user = false; |
896 | } | 895 | } |
897 | 896 | ||
@@ -1197,9 +1196,9 @@ static void be_post_rx_frags(struct be_rx_obj *rxo) | |||
1197 | rxo->stats.rx_post_fail++; | 1196 | rxo->stats.rx_post_fail++; |
1198 | break; | 1197 | break; |
1199 | } | 1198 | } |
1200 | page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, | 1199 | page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, |
1201 | adapter->big_page_size, | 1200 | 0, adapter->big_page_size, |
1202 | PCI_DMA_FROMDEVICE); | 1201 | DMA_FROM_DEVICE); |
1203 | page_info->page_offset = 0; | 1202 | page_info->page_offset = 0; |
1204 | } else { | 1203 | } else { |
1205 | get_page(pagep); | 1204 | get_page(pagep); |
@@ -1272,8 +1271,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | |||
1272 | do { | 1271 | do { |
1273 | cur_index = txq->tail; | 1272 | cur_index = txq->tail; |
1274 | wrb = queue_tail_node(txq); | 1273 | wrb = queue_tail_node(txq); |
1275 | unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && | 1274 | unmap_tx_frag(&adapter->pdev->dev, wrb, |
1276 | skb_headlen(sent_skb))); | 1275 | (unmap_skb_hdr && skb_headlen(sent_skb))); |
1277 | unmap_skb_hdr = false; | 1276 | unmap_skb_hdr = false; |
1278 | 1277 | ||
1279 | num_wrbs++; | 1278 | num_wrbs++; |
@@ -2181,7 +2180,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) | |||
2181 | memset(mac, 0, ETH_ALEN); | 2180 | memset(mac, 0, ETH_ALEN); |
2182 | 2181 | ||
2183 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); | 2182 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); |
2184 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 2183 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
2184 | GFP_KERNEL); | ||
2185 | if (cmd.va == NULL) | 2185 | if (cmd.va == NULL) |
2186 | return -1; | 2186 | return -1; |
2187 | memset(cmd.va, 0, cmd.size); | 2187 | memset(cmd.va, 0, cmd.size); |
@@ -2192,8 +2192,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) | |||
2192 | if (status) { | 2192 | if (status) { |
2193 | dev_err(&adapter->pdev->dev, | 2193 | dev_err(&adapter->pdev->dev, |
2194 | "Could not enable Wake-on-lan\n"); | 2194 | "Could not enable Wake-on-lan\n"); |
2195 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, | 2195 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
2196 | cmd.dma); | 2196 | cmd.dma); |
2197 | return status; | 2197 | return status; |
2198 | } | 2198 | } |
2199 | status = be_cmd_enable_magic_wol(adapter, | 2199 | status = be_cmd_enable_magic_wol(adapter, |
@@ -2206,7 +2206,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) | |||
2206 | pci_enable_wake(adapter->pdev, PCI_D3cold, 0); | 2206 | pci_enable_wake(adapter->pdev, PCI_D3cold, 0); |
2207 | } | 2207 | } |
2208 | 2208 | ||
2209 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 2209 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); |
2210 | return status; | 2210 | return status; |
2211 | } | 2211 | } |
2212 | 2212 | ||
@@ -2530,8 +2530,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) | |||
2530 | dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); | 2530 | dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); |
2531 | 2531 | ||
2532 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; | 2532 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; |
2533 | flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, | 2533 | flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, |
2534 | &flash_cmd.dma); | 2534 | &flash_cmd.dma, GFP_KERNEL); |
2535 | if (!flash_cmd.va) { | 2535 | if (!flash_cmd.va) { |
2536 | status = -ENOMEM; | 2536 | status = -ENOMEM; |
2537 | dev_err(&adapter->pdev->dev, | 2537 | dev_err(&adapter->pdev->dev, |
@@ -2560,8 +2560,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) | |||
2560 | status = -1; | 2560 | status = -1; |
2561 | } | 2561 | } |
2562 | 2562 | ||
2563 | pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, | 2563 | dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, |
2564 | flash_cmd.dma); | 2564 | flash_cmd.dma); |
2565 | if (status) { | 2565 | if (status) { |
2566 | dev_err(&adapter->pdev->dev, "Firmware load error\n"); | 2566 | dev_err(&adapter->pdev->dev, "Firmware load error\n"); |
2567 | goto fw_exit; | 2567 | goto fw_exit; |
@@ -2704,13 +2704,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) | |||
2704 | be_unmap_pci_bars(adapter); | 2704 | be_unmap_pci_bars(adapter); |
2705 | 2705 | ||
2706 | if (mem->va) | 2706 | if (mem->va) |
2707 | pci_free_consistent(adapter->pdev, mem->size, | 2707 | dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, |
2708 | mem->va, mem->dma); | 2708 | mem->dma); |
2709 | 2709 | ||
2710 | mem = &adapter->mc_cmd_mem; | 2710 | mem = &adapter->mc_cmd_mem; |
2711 | if (mem->va) | 2711 | if (mem->va) |
2712 | pci_free_consistent(adapter->pdev, mem->size, | 2712 | dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, |
2713 | mem->va, mem->dma); | 2713 | mem->dma); |
2714 | } | 2714 | } |
2715 | 2715 | ||
2716 | static int be_ctrl_init(struct be_adapter *adapter) | 2716 | static int be_ctrl_init(struct be_adapter *adapter) |
@@ -2725,8 +2725,10 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
2725 | goto done; | 2725 | goto done; |
2726 | 2726 | ||
2727 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 2727 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
2728 | mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, | 2728 | mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, |
2729 | mbox_mem_alloc->size, &mbox_mem_alloc->dma); | 2729 | mbox_mem_alloc->size, |
2730 | &mbox_mem_alloc->dma, | ||
2731 | GFP_KERNEL); | ||
2730 | if (!mbox_mem_alloc->va) { | 2732 | if (!mbox_mem_alloc->va) { |
2731 | status = -ENOMEM; | 2733 | status = -ENOMEM; |
2732 | goto unmap_pci_bars; | 2734 | goto unmap_pci_bars; |
@@ -2738,8 +2740,9 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
2738 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | 2740 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); |
2739 | 2741 | ||
2740 | mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); | 2742 | mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); |
2741 | mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, | 2743 | mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev, |
2742 | &mc_cmd_mem->dma); | 2744 | mc_cmd_mem->size, &mc_cmd_mem->dma, |
2745 | GFP_KERNEL); | ||
2743 | if (mc_cmd_mem->va == NULL) { | 2746 | if (mc_cmd_mem->va == NULL) { |
2744 | status = -ENOMEM; | 2747 | status = -ENOMEM; |
2745 | goto free_mbox; | 2748 | goto free_mbox; |
@@ -2755,8 +2758,8 @@ static int be_ctrl_init(struct be_adapter *adapter) | |||
2755 | return 0; | 2758 | return 0; |
2756 | 2759 | ||
2757 | free_mbox: | 2760 | free_mbox: |
2758 | pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, | 2761 | dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, |
2759 | mbox_mem_alloc->va, mbox_mem_alloc->dma); | 2762 | mbox_mem_alloc->va, mbox_mem_alloc->dma); |
2760 | 2763 | ||
2761 | unmap_pci_bars: | 2764 | unmap_pci_bars: |
2762 | be_unmap_pci_bars(adapter); | 2765 | be_unmap_pci_bars(adapter); |
@@ -2770,8 +2773,8 @@ static void be_stats_cleanup(struct be_adapter *adapter) | |||
2770 | struct be_dma_mem *cmd = &adapter->stats_cmd; | 2773 | struct be_dma_mem *cmd = &adapter->stats_cmd; |
2771 | 2774 | ||
2772 | if (cmd->va) | 2775 | if (cmd->va) |
2773 | pci_free_consistent(adapter->pdev, cmd->size, | 2776 | dma_free_coherent(&adapter->pdev->dev, cmd->size, |
2774 | cmd->va, cmd->dma); | 2777 | cmd->va, cmd->dma); |
2775 | } | 2778 | } |
2776 | 2779 | ||
2777 | static int be_stats_init(struct be_adapter *adapter) | 2780 | static int be_stats_init(struct be_adapter *adapter) |
@@ -2779,7 +2782,8 @@ static int be_stats_init(struct be_adapter *adapter) | |||
2779 | struct be_dma_mem *cmd = &adapter->stats_cmd; | 2782 | struct be_dma_mem *cmd = &adapter->stats_cmd; |
2780 | 2783 | ||
2781 | cmd->size = sizeof(struct be_cmd_req_get_stats); | 2784 | cmd->size = sizeof(struct be_cmd_req_get_stats); |
2782 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | 2785 | cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, |
2786 | GFP_KERNEL); | ||
2783 | if (cmd->va == NULL) | 2787 | if (cmd->va == NULL) |
2784 | return -1; | 2788 | return -1; |
2785 | memset(cmd->va, 0, cmd->size); | 2789 | memset(cmd->va, 0, cmd->size); |
@@ -2922,11 +2926,11 @@ static int __devinit be_probe(struct pci_dev *pdev, | |||
2922 | adapter->netdev = netdev; | 2926 | adapter->netdev = netdev; |
2923 | SET_NETDEV_DEV(netdev, &pdev->dev); | 2927 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2924 | 2928 | ||
2925 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 2929 | status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
2926 | if (!status) { | 2930 | if (!status) { |
2927 | netdev->features |= NETIF_F_HIGHDMA; | 2931 | netdev->features |= NETIF_F_HIGHDMA; |
2928 | } else { | 2932 | } else { |
2929 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 2933 | status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
2930 | if (status) { | 2934 | if (status) { |
2931 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | 2935 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); |
2932 | goto free_netdev; | 2936 | goto free_netdev; |