diff options
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r-- | drivers/net/chelsio/sge.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index df3a1410696e..f01cfdb995de 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -162,14 +162,14 @@ struct respQ_e { | |||
162 | */ | 162 | */ |
163 | struct cmdQ_ce { | 163 | struct cmdQ_ce { |
164 | struct sk_buff *skb; | 164 | struct sk_buff *skb; |
165 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | 165 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
166 | DECLARE_PCI_UNMAP_LEN(dma_len); | 166 | DEFINE_DMA_UNMAP_LEN(dma_len); |
167 | }; | 167 | }; |
168 | 168 | ||
169 | struct freelQ_ce { | 169 | struct freelQ_ce { |
170 | struct sk_buff *skb; | 170 | struct sk_buff *skb; |
171 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | 171 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
172 | DECLARE_PCI_UNMAP_LEN(dma_len); | 172 | DEFINE_DMA_UNMAP_LEN(dma_len); |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* | 175 | /* |
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, | |||
460 | 460 | ||
461 | again: | 461 | again: |
462 | for (i = 0; i < MAX_NPORTS; i++) { | 462 | for (i = 0; i < MAX_NPORTS; i++) { |
463 | s->port = ++s->port & (MAX_NPORTS - 1); | 463 | s->port = (s->port + 1) & (MAX_NPORTS - 1); |
464 | skbq = &s->p[s->port].skbq; | 464 | skbq = &s->p[s->port].skbq; |
465 | 465 | ||
466 | skb = skb_peek(skbq); | 466 | skb = skb_peek(skbq); |
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) | |||
518 | while (q->credits--) { | 518 | while (q->credits--) { |
519 | struct freelQ_ce *ce = &q->centries[cidx]; | 519 | struct freelQ_ce *ce = &q->centries[cidx]; |
520 | 520 | ||
521 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | 521 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
522 | pci_unmap_len(ce, dma_len), | 522 | dma_unmap_len(ce, dma_len), |
523 | PCI_DMA_FROMDEVICE); | 523 | PCI_DMA_FROMDEVICE); |
524 | dev_kfree_skb(ce->skb); | 524 | dev_kfree_skb(ce->skb); |
525 | ce->skb = NULL; | 525 | ce->skb = NULL; |
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) | |||
633 | q->in_use -= n; | 633 | q->in_use -= n; |
634 | ce = &q->centries[cidx]; | 634 | ce = &q->centries[cidx]; |
635 | while (n--) { | 635 | while (n--) { |
636 | if (likely(pci_unmap_len(ce, dma_len))) { | 636 | if (likely(dma_unmap_len(ce, dma_len))) { |
637 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | 637 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
638 | pci_unmap_len(ce, dma_len), | 638 | dma_unmap_len(ce, dma_len), |
639 | PCI_DMA_TODEVICE); | 639 | PCI_DMA_TODEVICE); |
640 | if (q->sop) | 640 | if (q->sop) |
641 | q->sop = 0; | 641 | q->sop = 0; |
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) | |||
851 | skb_reserve(skb, sge->rx_pkt_pad); | 851 | skb_reserve(skb, sge->rx_pkt_pad); |
852 | 852 | ||
853 | ce->skb = skb; | 853 | ce->skb = skb; |
854 | pci_unmap_addr_set(ce, dma_addr, mapping); | 854 | dma_unmap_addr_set(ce, dma_addr, mapping); |
855 | pci_unmap_len_set(ce, dma_len, dma_len); | 855 | dma_unmap_len_set(ce, dma_len, dma_len); |
856 | e->addr_lo = (u32)mapping; | 856 | e->addr_lo = (u32)mapping; |
857 | e->addr_hi = (u64)mapping >> 32; | 857 | e->addr_hi = (u64)mapping >> 32; |
858 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); | 858 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); |
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, | |||
1059 | skb_reserve(skb, 2); /* align IP header */ | 1059 | skb_reserve(skb, 2); /* align IP header */ |
1060 | skb_put(skb, len); | 1060 | skb_put(skb, len); |
1061 | pci_dma_sync_single_for_cpu(pdev, | 1061 | pci_dma_sync_single_for_cpu(pdev, |
1062 | pci_unmap_addr(ce, dma_addr), | 1062 | dma_unmap_addr(ce, dma_addr), |
1063 | pci_unmap_len(ce, dma_len), | 1063 | dma_unmap_len(ce, dma_len), |
1064 | PCI_DMA_FROMDEVICE); | 1064 | PCI_DMA_FROMDEVICE); |
1065 | skb_copy_from_linear_data(ce->skb, skb->data, len); | 1065 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
1066 | pci_dma_sync_single_for_device(pdev, | 1066 | pci_dma_sync_single_for_device(pdev, |
1067 | pci_unmap_addr(ce, dma_addr), | 1067 | dma_unmap_addr(ce, dma_addr), |
1068 | pci_unmap_len(ce, dma_len), | 1068 | dma_unmap_len(ce, dma_len), |
1069 | PCI_DMA_FROMDEVICE); | 1069 | PCI_DMA_FROMDEVICE); |
1070 | recycle_fl_buf(fl, fl->cidx); | 1070 | recycle_fl_buf(fl, fl->cidx); |
1071 | return skb; | 1071 | return skb; |
@@ -1077,8 +1077,8 @@ use_orig_buf: | |||
1077 | return NULL; | 1077 | return NULL; |
1078 | } | 1078 | } |
1079 | 1079 | ||
1080 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | 1080 | pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
1081 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | 1081 | dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
1082 | skb = ce->skb; | 1082 | skb = ce->skb; |
1083 | prefetch(skb->data); | 1083 | prefetch(skb->data); |
1084 | 1084 | ||
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |||
1100 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | 1100 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
1101 | struct sk_buff *skb = ce->skb; | 1101 | struct sk_buff *skb = ce->skb; |
1102 | 1102 | ||
1103 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), | 1103 | pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), |
1104 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | 1104 | dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
1105 | pr_err("%s: unexpected offload packet, cmd %u\n", | 1105 | pr_err("%s: unexpected offload packet, cmd %u\n", |
1106 | adapter->name, *skb->data); | 1106 | adapter->name, *skb->data); |
1107 | recycle_fl_buf(fl, fl->cidx); | 1107 | recycle_fl_buf(fl, fl->cidx); |
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) | |||
1123 | 1123 | ||
1124 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { | 1124 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { |
1125 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | 1125 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; |
1126 | unsigned int i, len = skb->len - skb->data_len; | 1126 | unsigned int i, len = skb_headlen(skb); |
1127 | while (len > SGE_TX_DESC_MAX_PLEN) { | 1127 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1128 | count++; | 1128 | count++; |
1129 | len -= SGE_TX_DESC_MAX_PLEN; | 1129 | len -= SGE_TX_DESC_MAX_PLEN; |
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx, | |||
1182 | write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, | 1182 | write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, |
1183 | *gen, nfrags == 0 && *desc_len == 0); | 1183 | *gen, nfrags == 0 && *desc_len == 0); |
1184 | ce1->skb = NULL; | 1184 | ce1->skb = NULL; |
1185 | pci_unmap_len_set(ce1, dma_len, 0); | 1185 | dma_unmap_len_set(ce1, dma_len, 0); |
1186 | *desc_mapping += SGE_TX_DESC_MAX_PLEN; | 1186 | *desc_mapping += SGE_TX_DESC_MAX_PLEN; |
1187 | if (*desc_len) { | 1187 | if (*desc_len) { |
1188 | ce1++; | 1188 | ce1++; |
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1219 | ce = &q->centries[pidx]; | 1219 | ce = &q->centries[pidx]; |
1220 | 1220 | ||
1221 | mapping = pci_map_single(adapter->pdev, skb->data, | 1221 | mapping = pci_map_single(adapter->pdev, skb->data, |
1222 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | 1222 | skb_headlen(skb), PCI_DMA_TODEVICE); |
1223 | 1223 | ||
1224 | desc_mapping = mapping; | 1224 | desc_mapping = mapping; |
1225 | desc_len = skb->len - skb->data_len; | 1225 | desc_len = skb_headlen(skb); |
1226 | 1226 | ||
1227 | flags = F_CMD_DATAVALID | F_CMD_SOP | | 1227 | flags = F_CMD_DATAVALID | F_CMD_SOP | |
1228 | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | | 1228 | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1233 | e->addr_hi = (u64)desc_mapping >> 32; | 1233 | e->addr_hi = (u64)desc_mapping >> 32; |
1234 | e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); | 1234 | e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); |
1235 | ce->skb = NULL; | 1235 | ce->skb = NULL; |
1236 | pci_unmap_len_set(ce, dma_len, 0); | 1236 | dma_unmap_len_set(ce, dma_len, 0); |
1237 | 1237 | ||
1238 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && | 1238 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && |
1239 | desc_len > SGE_TX_DESC_MAX_PLEN) { | 1239 | desc_len > SGE_TX_DESC_MAX_PLEN) { |
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | ce->skb = NULL; | 1259 | ce->skb = NULL; |
1260 | pci_unmap_addr_set(ce, dma_addr, mapping); | 1260 | dma_unmap_addr_set(ce, dma_addr, mapping); |
1261 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); | 1261 | dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); |
1262 | 1262 | ||
1263 | for (i = 0; nfrags--; i++) { | 1263 | for (i = 0; nfrags--; i++) { |
1264 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1264 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1284 | write_tx_desc(e1, desc_mapping, desc_len, gen, | 1284 | write_tx_desc(e1, desc_mapping, desc_len, gen, |
1285 | nfrags == 0); | 1285 | nfrags == 0); |
1286 | ce->skb = NULL; | 1286 | ce->skb = NULL; |
1287 | pci_unmap_addr_set(ce, dma_addr, mapping); | 1287 | dma_unmap_addr_set(ce, dma_addr, mapping); |
1288 | pci_unmap_len_set(ce, dma_len, frag->size); | 1288 | dma_unmap_len_set(ce, dma_len, frag->size); |
1289 | } | 1289 | } |
1290 | ce->skb = skb; | 1290 | ce->skb = skb; |
1291 | wmb(); | 1291 | wmb(); |