aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
2 files changed, 57 insertions, 53 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe..9f356d5d0f3 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
126 } 126 }
127 unmap_array[unmap_cons].skb = NULL; 127 unmap_array[unmap_cons].skb = NULL;
128 128
129 pci_unmap_single(bnad->pcidev, 129 dma_unmap_single(&bnad->pcidev->dev,
130 pci_unmap_addr(&unmap_array[unmap_cons], 130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb), 131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE); 132 DMA_TO_DEVICE);
133 133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth) 135 if (++unmap_cons >= unmap_q->q_depth)
136 break; 136 break;
137 137
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev, 139 dma_unmap_page(&bnad->pcidev->dev,
140 pci_unmap_addr(&unmap_array[unmap_cons], 140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr), 141 dma_addr),
142 skb_shinfo(skb)->frags[i].size, 142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE); 143 DMA_TO_DEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0); 145 0);
146 if (++unmap_cons >= unmap_q->q_depth) 146 if (++unmap_cons >= unmap_q->q_depth)
147 break; 147 break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
199 sent_bytes += skb->len; 199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201 201
202 pci_unmap_single(bnad->pcidev, 202 dma_unmap_single(&bnad->pcidev->dev,
203 pci_unmap_addr(&unmap_array[unmap_cons], 203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb), 204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE); 205 DMA_TO_DEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208 208
209 prefetch(&unmap_array[unmap_cons + 1]); 209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]); 211 prefetch(&unmap_array[unmap_cons + 1]);
212 212
213 pci_unmap_page(bnad->pcidev, 213 dma_unmap_page(&bnad->pcidev->dev,
214 pci_unmap_addr(&unmap_array[unmap_cons], 214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr), 215 dma_addr),
216 skb_shinfo(skb)->frags[i].size, 216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE); 217 DMA_TO_DEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0); 219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 } 221 }
@@ -340,19 +340,22 @@ static void
340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341{ 341{
342 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
343 struct sk_buff *skb; 344 struct sk_buff *skb;
344 int unmap_cons; 345 int unmap_cons;
345 346
346 unmap_q = rcb->unmap_q; 347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb; 350 skb = unmap_array[unmap_cons].skb;
349 if (!skb) 351 if (!skb)
350 continue; 352 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL; 353 unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 354 dma_unmap_single(&bnad->pcidev->dev,
353 unmap_array[unmap_cons], 355 dma_unmap_addr(&unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size, 356 dma_addr),
355 PCI_DMA_FROMDEVICE); 357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
356 dev_kfree_skb(skb); 359 dev_kfree_skb(skb);
357 } 360 }
358 bnad_reset_rcb(bnad, rcb); 361 bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
391 skb->dev = bnad->netdev; 394 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN); 395 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb; 396 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data, 397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 398 rcb->rxq->buffer_size,
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr); 401 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
434 struct bna_rcb *rcb = NULL; 438 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0; 439 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q; 440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
437 struct sk_buff *skb; 442 struct sk_buff *skb;
438 u32 flags; 443 u32 flags, unmap_cons;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441 446
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
456 rcb = ccb->rcb[1]; 461 rcb = ccb->rcb[1];
457 462
458 unmap_q = rcb->unmap_q; 463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
459 466
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 467 skb = unmap_array[unmap_cons].skb;
461 BUG_ON(!(skb)); 468 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 469 unmap_array[unmap_cons].skb = NULL;
463 pci_unmap_single(bnad->pcidev, 470 dma_unmap_single(&bnad->pcidev->dev,
464 pci_unmap_addr(&unmap_q-> 471 dma_unmap_addr(&unmap_array[unmap_cons],
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr), 472 dma_addr),
468 rcb->rxq->buffer_size, 473 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE); 474 DMA_FROM_DEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471 476
472 /* Should be more efficient ? Performance ? */ 477 /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa); 1022 dma_pa);
1018 pci_free_consistent(bnad->pcidev, 1023 dma_free_coherent(&bnad->pcidev->dev,
1019 mem_info->mdl[i].len, 1024 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa); 1025 mem_info->mdl[i].kva, dma_pa);
1021 } else 1026 } else
1022 kfree(mem_info->mdl[i].kva); 1027 kfree(mem_info->mdl[i].kva);
1023 } 1028 }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
1047 for (i = 0; i < mem_info->num; i++) { 1052 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len; 1053 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = 1054 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev, 1055 dma_alloc_coherent(&bnad->pcidev->dev,
1051 mem_info->len, &dma_pa); 1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1052 1058
1053 if (mem_info->mdl[i].kva == NULL) 1059 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return; 1060 goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2604 PCI_DMA_TODEVICE); 2610 skb_headlen(skb), DMA_TO_DEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr); 2612 dma_addr);
2607 2613
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2630 2636
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size); 2638 txqent->vector[vect_id].length = htons(size);
2633 dma_addr = 2639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2634 pci_map_page(bnad->pcidev, frag->page, 2640 frag->page_offset, size, DMA_TO_DEVICE);
2635 frag->page_offset, size, 2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr); 2642 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
3022 err = pci_request_regions(pdev, BNAD_NAME); 3026 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err) 3027 if (err)
3024 goto disable_device; 3028 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3029 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3030 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1; 3031 *using_dac = 1;
3028 } else { 3032 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (err) { 3034 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev, 3035 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32)); 3036 DMA_BIT_MASK(32));
3033 if (err) 3037 if (err)
3034 goto release_regions; 3038 goto release_regions;
3035 } 3039 }
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557de..a89117fa497 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
181/* Unmap queues for Tx / Rx cleanup */ 181/* Unmap queues for Tx / Rx cleanup */
182struct bnad_skb_unmap { 182struct bnad_skb_unmap {
183 struct sk_buff *skb; 183 struct sk_buff *skb;
184 DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 DEFINE_DMA_UNMAP_ADDR(dma_addr);
185}; 185};
186 186
187struct bnad_unmap_q { 187struct bnad_unmap_q {