aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-05-17 12:46:55 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-07-17 11:12:30 -0400
commit0e33d87033d84768ae700217e7c52dfb0c3399ca (patch)
tree4ad70825c5d85218f80783efb94505db58743704
parent62f8dc529c76aca43113312079299d40741407dd (diff)
sfc: Use generic DMA API, not PCI-DMA API
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.c8
-rw-r--r--drivers/net/ethernet/sfc/rx.c22
-rw-r--r--drivers/net/ethernet/sfc/tx.c83
5 files changed, 62 insertions, 63 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b95f2e1b33f..70554a1b2b0 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
1103 * masks event though they reject 46 bit masks. 1103 * masks event though they reject 46 bit masks.
1104 */ 1104 */
1105 while (dma_mask > 0x7fffffffUL) { 1105 while (dma_mask > 0x7fffffffUL) {
1106 if (pci_dma_supported(pci_dev, dma_mask)) { 1106 if (dma_supported(&pci_dev->dev, dma_mask)) {
1107 rc = pci_set_dma_mask(pci_dev, dma_mask); 1107 rc = dma_set_mask(&pci_dev->dev, dma_mask);
1108 if (rc == 0) 1108 if (rc == 0)
1109 break; 1109 break;
1110 } 1110 }
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
1117 } 1117 }
1118 netif_dbg(efx, probe, efx->net_dev, 1118 netif_dbg(efx, probe, efx->net_dev,
1119 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1119 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1120 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 1120 rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1121 if (rc) { 1121 if (rc) {
1122 /* pci_set_consistent_dma_mask() is not *allowed* to 1122 /* dma_set_coherent_mask() is not *allowed* to
1123 * fail with a mask that pci_set_dma_mask() accepted, 1123 * fail with a mask that dma_set_mask() accepted,
1124 * but just in case... 1124 * but just in case...
1125 */ 1125 */
1126 netif_err(efx, probe, efx->net_dev, 1126 netif_err(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e575359af1..8a9f6d48214 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -100,7 +100,7 @@ struct efx_special_buffer {
100 * @len: Length of this fragment. 100 * @len: Length of this fragment.
101 * This field is zero when the queue slot is empty. 101 * This field is zero when the queue slot is empty.
102 * @continuation: True if this fragment is not the end of a packet. 102 * @continuation: True if this fragment is not the end of a packet.
103 * @unmap_single: True if pci_unmap_single should be used. 103 * @unmap_single: True if dma_unmap_single should be used.
104 * @unmap_len: Length of this fragment to unmap 104 * @unmap_len: Length of this fragment to unmap
105 */ 105 */
106struct efx_tx_buffer { 106struct efx_tx_buffer {
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 4a9a5beec8f..287738db24e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -308,8 +308,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
308int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 308int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
309 unsigned int len) 309 unsigned int len)
310{ 310{
311 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 311 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
312 &buffer->dma_addr); 312 &buffer->dma_addr, GFP_ATOMIC);
313 if (!buffer->addr) 313 if (!buffer->addr)
314 return -ENOMEM; 314 return -ENOMEM;
315 buffer->len = len; 315 buffer->len = len;
@@ -320,8 +320,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
320void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 320void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
321{ 321{
322 if (buffer->addr) { 322 if (buffer->addr) {
323 pci_free_consistent(efx->pci_dev, buffer->len, 323 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
324 buffer->addr, buffer->dma_addr); 324 buffer->addr, buffer->dma_addr);
325 buffer->addr = NULL; 325 buffer->addr = NULL;
326 } 326 }
327} 327}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff..6d1c6cfd6ba 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
155 rx_buf->len = skb_len - NET_IP_ALIGN; 155 rx_buf->len = skb_len - NET_IP_ALIGN;
156 rx_buf->flags = 0; 156 rx_buf->flags = 0;
157 157
158 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 158 rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
159 skb->data, rx_buf->len, 159 skb->data, rx_buf->len,
160 PCI_DMA_FROMDEVICE); 160 DMA_FROM_DEVICE);
161 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 161 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
162 rx_buf->dma_addr))) { 162 rx_buf->dma_addr))) {
163 dev_kfree_skb_any(skb); 163 dev_kfree_skb_any(skb);
164 rx_buf->u.skb = NULL; 164 rx_buf->u.skb = NULL;
165 return -EIO; 165 return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
200 efx->rx_buffer_order); 200 efx->rx_buffer_order);
201 if (unlikely(page == NULL)) 201 if (unlikely(page == NULL))
202 return -ENOMEM; 202 return -ENOMEM;
203 dma_addr = pci_map_page(efx->pci_dev, page, 0, 203 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
204 efx_rx_buf_size(efx), 204 efx_rx_buf_size(efx),
205 PCI_DMA_FROMDEVICE); 205 DMA_FROM_DEVICE);
206 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { 206 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
207 __free_pages(page, efx->rx_buffer_order); 207 __free_pages(page, efx->rx_buffer_order);
208 return -EIO; 208 return -EIO;
209 } 209 }
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 247
248 state = page_address(rx_buf->u.page); 248 state = page_address(rx_buf->u.page);
249 if (--state->refcnt == 0) { 249 if (--state->refcnt == 0) {
250 pci_unmap_page(efx->pci_dev, 250 dma_unmap_page(&efx->pci_dev->dev,
251 state->dma_addr, 251 state->dma_addr,
252 efx_rx_buf_size(efx), 252 efx_rx_buf_size(efx),
253 PCI_DMA_FROMDEVICE); 253 DMA_FROM_DEVICE);
254 } 254 }
255 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 255 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
256 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 256 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
257 rx_buf->len, PCI_DMA_FROMDEVICE); 257 rx_buf->len, DMA_FROM_DEVICE);
258 } 258 }
259} 259}
260 260
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 94d0365b31c..18860f241bc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
36 unsigned int *bytes_compl) 36 unsigned int *bytes_compl)
37{ 37{
38 if (buffer->unmap_len) { 38 if (buffer->unmap_len) {
39 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 41 buffer->unmap_len);
42 if (buffer->unmap_single) 42 if (buffer->unmap_single)
43 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, 43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 PCI_DMA_TODEVICE); 44 DMA_TO_DEVICE);
45 else 45 else
46 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, 46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 PCI_DMA_TODEVICE); 47 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 48 buffer->unmap_len = 0;
49 buffer->unmap_single = false; 49 buffer->unmap_single = false;
50 } 50 }
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
138netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 138netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
139{ 139{
140 struct efx_nic *efx = tx_queue->efx; 140 struct efx_nic *efx = tx_queue->efx;
141 struct pci_dev *pci_dev = efx->pci_dev; 141 struct device *dma_dev = &efx->pci_dev->dev;
142 struct efx_tx_buffer *buffer; 142 struct efx_tx_buffer *buffer;
143 skb_frag_t *fragment; 143 skb_frag_t *fragment;
144 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 144 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
167 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 167 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
168 q_space = efx->txq_entries - 1 - fill_level; 168 q_space = efx->txq_entries - 1 - fill_level;
169 169
170 /* Map for DMA. Use pci_map_single rather than pci_map_page 170 /* Map for DMA. Use dma_map_single rather than dma_map_page
171 * since this is more efficient on machines with sparse 171 * since this is more efficient on machines with sparse
172 * memory. 172 * memory.
173 */ 173 */
174 unmap_single = true; 174 unmap_single = true;
175 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 175 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
176 176
177 /* Process all fragments */ 177 /* Process all fragments */
178 while (1) { 178 while (1) {
179 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) 179 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
180 goto pci_err; 180 goto dma_err;
181 181
182 /* Store fields for marking in the per-fragment final 182 /* Store fields for marking in the per-fragment final
183 * descriptor */ 183 * descriptor */
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
246 i++; 246 i++;
247 /* Map for DMA */ 247 /* Map for DMA */
248 unmap_single = false; 248 unmap_single = false;
249 dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, 249 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
250 DMA_TO_DEVICE); 250 DMA_TO_DEVICE);
251 } 251 }
252 252
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
261 261
262 return NETDEV_TX_OK; 262 return NETDEV_TX_OK;
263 263
264 pci_err: 264 dma_err:
265 netif_err(efx, tx_err, efx->net_dev, 265 netif_err(efx, tx_err, efx->net_dev,
266 " TX queue %d could not map skb with %d bytes %d " 266 " TX queue %d could not map skb with %d bytes %d "
267 "fragments for DMA\n", tx_queue->queue, skb->len, 267 "fragments for DMA\n", tx_queue->queue, skb->len,
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
284 /* Free the fragment we were mid-way through pushing */ 284 /* Free the fragment we were mid-way through pushing */
285 if (unmap_len) { 285 if (unmap_len) {
286 if (unmap_single) 286 if (unmap_single)
287 pci_unmap_single(pci_dev, unmap_addr, unmap_len, 287 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
288 PCI_DMA_TODEVICE); 288 DMA_TO_DEVICE);
289 else 289 else
290 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 290 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
291 PCI_DMA_TODEVICE); 291 DMA_TO_DEVICE);
292 } 292 }
293 293
294 return rc; 294 return rc;
@@ -684,20 +684,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
684 */ 684 */
685static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 685static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
686{ 686{
687 687 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
688 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
689 struct efx_tso_header *tsoh; 688 struct efx_tso_header *tsoh;
690 dma_addr_t dma_addr; 689 dma_addr_t dma_addr;
691 u8 *base_kva, *kva; 690 u8 *base_kva, *kva;
692 691
693 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 692 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
694 if (base_kva == NULL) { 693 if (base_kva == NULL) {
695 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, 694 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
696 "Unable to allocate page for TSO headers\n"); 695 "Unable to allocate page for TSO headers\n");
697 return -ENOMEM; 696 return -ENOMEM;
698 } 697 }
699 698
700 /* pci_alloc_consistent() allocates pages. */ 699 /* dma_alloc_coherent() allocates pages. */
701 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 700 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
702 701
703 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 702 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
@@ -714,7 +713,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
714/* Free up a TSO header, and all others in the same page. */ 713/* Free up a TSO header, and all others in the same page. */
715static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 714static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
716 struct efx_tso_header *tsoh, 715 struct efx_tso_header *tsoh,
717 struct pci_dev *pci_dev) 716 struct device *dma_dev)
718{ 717{
719 struct efx_tso_header **p; 718 struct efx_tso_header **p;
720 unsigned long base_kva; 719 unsigned long base_kva;
@@ -731,7 +730,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
731 p = &(*p)->next; 730 p = &(*p)->next;
732 } 731 }
733 732
734 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 733 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
735} 734}
736 735
737static struct efx_tso_header * 736static struct efx_tso_header *
@@ -743,11 +742,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
743 if (unlikely(!tsoh)) 742 if (unlikely(!tsoh))
744 return NULL; 743 return NULL;
745 744
746 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 745 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
747 TSOH_BUFFER(tsoh), header_len, 746 TSOH_BUFFER(tsoh), header_len,
748 PCI_DMA_TODEVICE); 747 DMA_TO_DEVICE);
749 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, 748 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
750 tsoh->dma_addr))) { 749 tsoh->dma_addr))) {
751 kfree(tsoh); 750 kfree(tsoh);
752 return NULL; 751 return NULL;
753 } 752 }
@@ -759,9 +758,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
759static void 758static void
760efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 759efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
761{ 760{
762 pci_unmap_single(tx_queue->efx->pci_dev, 761 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
763 tsoh->dma_addr, tsoh->unmap_len, 762 tsoh->dma_addr, tsoh->unmap_len,
764 PCI_DMA_TODEVICE); 763 DMA_TO_DEVICE);
765 kfree(tsoh); 764 kfree(tsoh);
766} 765}
767 766
@@ -892,13 +891,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
892 unmap_addr = (buffer->dma_addr + buffer->len - 891 unmap_addr = (buffer->dma_addr + buffer->len -
893 buffer->unmap_len); 892 buffer->unmap_len);
894 if (buffer->unmap_single) 893 if (buffer->unmap_single)
895 pci_unmap_single(tx_queue->efx->pci_dev, 894 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
896 unmap_addr, buffer->unmap_len, 895 unmap_addr, buffer->unmap_len,
897 PCI_DMA_TODEVICE); 896 DMA_TO_DEVICE);
898 else 897 else
899 pci_unmap_page(tx_queue->efx->pci_dev, 898 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
900 unmap_addr, buffer->unmap_len, 899 unmap_addr, buffer->unmap_len,
901 PCI_DMA_TODEVICE); 900 DMA_TO_DEVICE);
902 buffer->unmap_len = 0; 901 buffer->unmap_len = 0;
903 } 902 }
904 buffer->len = 0; 903 buffer->len = 0;
@@ -954,9 +953,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
954 int hl = st->header_len; 953 int hl = st->header_len;
955 int len = skb_headlen(skb) - hl; 954 int len = skb_headlen(skb) - hl;
956 955
957 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, 956 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
958 len, PCI_DMA_TODEVICE); 957 len, DMA_TO_DEVICE);
959 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 958 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
960 st->unmap_single = true; 959 st->unmap_single = true;
961 st->unmap_len = len; 960 st->unmap_len = len;
962 st->in_len = len; 961 st->in_len = len;
@@ -1008,7 +1007,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1008 buffer->continuation = !end_of_packet; 1007 buffer->continuation = !end_of_packet;
1009 1008
1010 if (st->in_len == 0) { 1009 if (st->in_len == 0) {
1011 /* Transfer ownership of the pci mapping */ 1010 /* Transfer ownership of the DMA mapping */
1012 buffer->unmap_len = st->unmap_len; 1011 buffer->unmap_len = st->unmap_len;
1013 buffer->unmap_single = st->unmap_single; 1012 buffer->unmap_single = st->unmap_single;
1014 st->unmap_len = 0; 1013 st->unmap_len = 0;
@@ -1181,18 +1180,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1181 1180
1182 mem_err: 1181 mem_err:
1183 netif_err(efx, tx_err, efx->net_dev, 1182 netif_err(efx, tx_err, efx->net_dev,
1184 "Out of memory for TSO headers, or PCI mapping error\n"); 1183 "Out of memory for TSO headers, or DMA mapping error\n");
1185 dev_kfree_skb_any(skb); 1184 dev_kfree_skb_any(skb);
1186 1185
1187 unwind: 1186 unwind:
1188 /* Free the DMA mapping we were in the process of writing out */ 1187 /* Free the DMA mapping we were in the process of writing out */
1189 if (state.unmap_len) { 1188 if (state.unmap_len) {
1190 if (state.unmap_single) 1189 if (state.unmap_single)
1191 pci_unmap_single(efx->pci_dev, state.unmap_addr, 1190 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1192 state.unmap_len, PCI_DMA_TODEVICE); 1191 state.unmap_len, DMA_TO_DEVICE);
1193 else 1192 else
1194 pci_unmap_page(efx->pci_dev, state.unmap_addr, 1193 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1195 state.unmap_len, PCI_DMA_TODEVICE); 1194 state.unmap_len, DMA_TO_DEVICE);
1196 } 1195 }
1197 1196
1198 efx_enqueue_unwind(tx_queue); 1197 efx_enqueue_unwind(tx_queue);
@@ -1216,5 +1215,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1216 1215
1217 while (tx_queue->tso_headers_free != NULL) 1216 while (tx_queue->tso_headers_free != NULL)
1218 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1217 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1219 tx_queue->efx->pci_dev); 1218 &tx_queue->efx->pci_dev->dev);
1220} 1219}