diff options
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r-- | drivers/net/via-velocity.c | 70 |
1 files changed, 32 insertions, 38 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 35cd65d6b9ed..8c9fb824cbd4 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * for 64bit hardware platforms. | 8 | * for 64bit hardware platforms. |
9 | * | 9 | * |
10 | * TODO | 10 | * TODO |
11 | * Big-endian support | ||
12 | * rx_copybreak/alignment | 11 | * rx_copybreak/alignment |
13 | * Scatter gather | 12 | * Scatter gather |
14 | * More testing | 13 | * More testing |
@@ -681,7 +680,7 @@ static void velocity_rx_reset(struct velocity_info *vptr) | |||
681 | * Init state, all RD entries belong to the NIC | 680 | * Init state, all RD entries belong to the NIC |
682 | */ | 681 | */ |
683 | for (i = 0; i < vptr->options.numrx; ++i) | 682 | for (i = 0; i < vptr->options.numrx; ++i) |
684 | vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; | 683 | vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; |
685 | 684 | ||
686 | writew(vptr->options.numrx, ®s->RBRDU); | 685 | writew(vptr->options.numrx, ®s->RBRDU); |
687 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); | 686 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); |
@@ -777,7 +776,7 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
777 | 776 | ||
778 | vptr->int_mask = INT_MASK_DEF; | 777 | vptr->int_mask = INT_MASK_DEF; |
779 | 778 | ||
780 | writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); | 779 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); |
781 | writew(vptr->options.numrx - 1, ®s->RDCSize); | 780 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
782 | mac_rx_queue_run(regs); | 781 | mac_rx_queue_run(regs); |
783 | mac_rx_queue_wake(regs); | 782 | mac_rx_queue_wake(regs); |
@@ -785,7 +784,7 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
785 | writew(vptr->options.numtx - 1, ®s->TDCSize); | 784 | writew(vptr->options.numtx - 1, ®s->TDCSize); |
786 | 785 | ||
787 | for (i = 0; i < vptr->num_txq; i++) { | 786 | for (i = 0; i < vptr->num_txq; i++) { |
788 | writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); | 787 | writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]); |
789 | mac_tx_queue_run(regs, i); | 788 | mac_tx_queue_run(regs, i); |
790 | } | 789 | } |
791 | 790 | ||
@@ -1195,7 +1194,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) | |||
1195 | dirty = vptr->rd_dirty - unusable; | 1194 | dirty = vptr->rd_dirty - unusable; |
1196 | for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { | 1195 | for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { |
1197 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; | 1196 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; |
1198 | vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; | 1197 | vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; |
1199 | } | 1198 | } |
1200 | 1199 | ||
1201 | writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); | 1200 | writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); |
@@ -1210,7 +1209,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1210 | struct rx_desc *rd = vptr->rd_ring + dirty; | 1209 | struct rx_desc *rd = vptr->rd_ring + dirty; |
1211 | 1210 | ||
1212 | /* Fine for an all zero Rx desc at init time as well */ | 1211 | /* Fine for an all zero Rx desc at init time as well */ |
1213 | if (rd->rdesc0.owner == OWNED_BY_NIC) | 1212 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1214 | break; | 1213 | break; |
1215 | 1214 | ||
1216 | if (!vptr->rd_info[dirty].skb) { | 1215 | if (!vptr->rd_info[dirty].skb) { |
@@ -1413,7 +1412,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1413 | if (!vptr->rd_info[rd_curr].skb) | 1412 | if (!vptr->rd_info[rd_curr].skb) |
1414 | break; | 1413 | break; |
1415 | 1414 | ||
1416 | if (rd->rdesc0.owner == OWNED_BY_NIC) | 1415 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1417 | break; | 1416 | break; |
1418 | 1417 | ||
1419 | rmb(); | 1418 | rmb(); |
@@ -1421,7 +1420,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1421 | /* | 1420 | /* |
1422 | * Don't drop CE or RL error frame although RXOK is off | 1421 | * Don't drop CE or RL error frame although RXOK is off |
1423 | */ | 1422 | */ |
1424 | if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { | 1423 | if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { |
1425 | if (velocity_receive_frame(vptr, rd_curr) < 0) | 1424 | if (velocity_receive_frame(vptr, rd_curr) < 0) |
1426 | stats->rx_dropped++; | 1425 | stats->rx_dropped++; |
1427 | } else { | 1426 | } else { |
@@ -1433,7 +1432,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1433 | stats->rx_dropped++; | 1432 | stats->rx_dropped++; |
1434 | } | 1433 | } |
1435 | 1434 | ||
1436 | rd->inten = 1; | 1435 | rd->size |= RX_INTEN; |
1437 | 1436 | ||
1438 | vptr->dev->last_rx = jiffies; | 1437 | vptr->dev->last_rx = jiffies; |
1439 | 1438 | ||
@@ -1554,7 +1553,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1554 | struct net_device_stats *stats = &vptr->stats; | 1553 | struct net_device_stats *stats = &vptr->stats; |
1555 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1554 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); |
1556 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1555 | struct rx_desc *rd = &(vptr->rd_ring[idx]); |
1557 | int pkt_len = rd->rdesc0.len; | 1556 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; |
1558 | struct sk_buff *skb; | 1557 | struct sk_buff *skb; |
1559 | 1558 | ||
1560 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { | 1559 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { |
@@ -1637,8 +1636,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1637 | */ | 1636 | */ |
1638 | 1637 | ||
1639 | *((u32 *) & (rd->rdesc0)) = 0; | 1638 | *((u32 *) & (rd->rdesc0)) = 0; |
1640 | rd->len = cpu_to_le32(vptr->rx_buf_sz); | 1639 | rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; |
1641 | rd->inten = 1; | ||
1642 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); | 1640 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); |
1643 | rd->pa_high = 0; | 1641 | rd->pa_high = 0; |
1644 | return 0; | 1642 | return 0; |
@@ -1674,7 +1672,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |||
1674 | td = &(vptr->td_rings[qnum][idx]); | 1672 | td = &(vptr->td_rings[qnum][idx]); |
1675 | tdinfo = &(vptr->td_infos[qnum][idx]); | 1673 | tdinfo = &(vptr->td_infos[qnum][idx]); |
1676 | 1674 | ||
1677 | if (td->tdesc0.owner == OWNED_BY_NIC) | 1675 | if (td->tdesc0.len & OWNED_BY_NIC) |
1678 | break; | 1676 | break; |
1679 | 1677 | ||
1680 | if ((works++ > 15)) | 1678 | if ((works++ > 15)) |
@@ -1874,7 +1872,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1874 | 1872 | ||
1875 | for (i = 0; i < tdinfo->nskb_dma; i++) { | 1873 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
1876 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 1874 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
1877 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); | 1875 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); |
1878 | #else | 1876 | #else |
1879 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); | 1877 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); |
1880 | #endif | 1878 | #endif |
@@ -2067,8 +2065,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2067 | struct velocity_td_info *tdinfo; | 2065 | struct velocity_td_info *tdinfo; |
2068 | unsigned long flags; | 2066 | unsigned long flags; |
2069 | int index; | 2067 | int index; |
2070 | |||
2071 | int pktlen = skb->len; | 2068 | int pktlen = skb->len; |
2069 | __le16 len = cpu_to_le16(pktlen); | ||
2072 | 2070 | ||
2073 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2071 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2074 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | 2072 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { |
@@ -2083,9 +2081,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2083 | td_ptr = &(vptr->td_rings[qnum][index]); | 2081 | td_ptr = &(vptr->td_rings[qnum][index]); |
2084 | tdinfo = &(vptr->td_infos[qnum][index]); | 2082 | tdinfo = &(vptr->td_infos[qnum][index]); |
2085 | 2083 | ||
2086 | td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; | ||
2087 | td_ptr->tdesc1.TCR = TCR0_TIC; | 2084 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2088 | td_ptr->td_buf[0].queue = 0; | 2085 | td_ptr->td_buf[0].size &= ~TD_QUEUE; |
2089 | 2086 | ||
2090 | /* | 2087 | /* |
2091 | * Pad short frames. | 2088 | * Pad short frames. |
@@ -2093,16 +2090,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2093 | if (pktlen < ETH_ZLEN) { | 2090 | if (pktlen < ETH_ZLEN) { |
2094 | /* Cannot occur until ZC support */ | 2091 | /* Cannot occur until ZC support */ |
2095 | pktlen = ETH_ZLEN; | 2092 | pktlen = ETH_ZLEN; |
2093 | len = cpu_to_le16(ETH_ZLEN); | ||
2096 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | 2094 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2097 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); | 2095 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); |
2098 | tdinfo->skb = skb; | 2096 | tdinfo->skb = skb; |
2099 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 2097 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2100 | td_ptr->tdesc0.pktsize = pktlen; | 2098 | td_ptr->tdesc0.len = len; |
2101 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2099 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2102 | td_ptr->td_buf[0].pa_high = 0; | 2100 | td_ptr->td_buf[0].pa_high = 0; |
2103 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2101 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ |
2104 | tdinfo->nskb_dma = 1; | 2102 | tdinfo->nskb_dma = 1; |
2105 | td_ptr->tdesc1.CMDZ = 2; | ||
2106 | } else | 2103 | } else |
2107 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2104 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2108 | if (skb_shinfo(skb)->nr_frags > 0) { | 2105 | if (skb_shinfo(skb)->nr_frags > 0) { |
@@ -2111,36 +2108,35 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2111 | if (nfrags > 6) { | 2108 | if (nfrags > 6) { |
2112 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | 2109 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2113 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 2110 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2114 | td_ptr->tdesc0.pktsize = | 2111 | td_ptr->tdesc0.len = len; |
2115 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2112 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2116 | td_ptr->td_buf[0].pa_high = 0; | 2113 | td_ptr->td_buf[0].pa_high = 0; |
2117 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2114 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ |
2118 | tdinfo->nskb_dma = 1; | 2115 | tdinfo->nskb_dma = 1; |
2119 | td_ptr->tdesc1.CMDZ = 2; | ||
2120 | } else { | 2116 | } else { |
2121 | int i = 0; | 2117 | int i = 0; |
2122 | tdinfo->nskb_dma = 0; | 2118 | tdinfo->nskb_dma = 0; |
2123 | tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); | 2119 | tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, |
2120 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
2124 | 2121 | ||
2125 | td_ptr->tdesc0.pktsize = pktlen; | 2122 | td_ptr->tdesc0.len = len; |
2126 | 2123 | ||
2127 | /* FIXME: support 48bit DMA later */ | 2124 | /* FIXME: support 48bit DMA later */ |
2128 | td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); | 2125 | td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); |
2129 | td_ptr->td_buf[i].pa_high = 0; | 2126 | td_ptr->td_buf[i].pa_high = 0; |
2130 | td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; | 2127 | td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); |
2131 | 2128 | ||
2132 | for (i = 0; i < nfrags; i++) { | 2129 | for (i = 0; i < nfrags; i++) { |
2133 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2130 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2134 | void *addr = ((void *) page_address(frag->page + frag->page_offset)); | 2131 | void *addr = (void *)page_address(frag->page) + frag->page_offset; |
2135 | 2132 | ||
2136 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); | 2133 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); |
2137 | 2134 | ||
2138 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | 2135 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); |
2139 | td_ptr->td_buf[i + 1].pa_high = 0; | 2136 | td_ptr->td_buf[i + 1].pa_high = 0; |
2140 | td_ptr->td_buf[i + 1].bufsize = frag->size; | 2137 | td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); |
2141 | } | 2138 | } |
2142 | tdinfo->nskb_dma = i - 1; | 2139 | tdinfo->nskb_dma = i - 1; |
2143 | td_ptr->tdesc1.CMDZ = i; | ||
2144 | } | 2140 | } |
2145 | 2141 | ||
2146 | } else | 2142 | } else |
@@ -2152,18 +2148,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2152 | */ | 2148 | */ |
2153 | tdinfo->skb = skb; | 2149 | tdinfo->skb = skb; |
2154 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); | 2150 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); |
2155 | td_ptr->tdesc0.pktsize = pktlen; | 2151 | td_ptr->tdesc0.len = len; |
2156 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2152 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2157 | td_ptr->td_buf[0].pa_high = 0; | 2153 | td_ptr->td_buf[0].pa_high = 0; |
2158 | td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; | 2154 | td_ptr->td_buf[0].size = len; |
2159 | tdinfo->nskb_dma = 1; | 2155 | tdinfo->nskb_dma = 1; |
2160 | td_ptr->tdesc1.CMDZ = 2; | ||
2161 | } | 2156 | } |
2157 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; | ||
2162 | 2158 | ||
2163 | if (vptr->vlgrp && vlan_tx_tag_present(skb)) { | 2159 | if (vptr->vlgrp && vlan_tx_tag_present(skb)) { |
2164 | td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); | 2160 | td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); |
2165 | td_ptr->tdesc1.pqinf.priority = 0; | ||
2166 | td_ptr->tdesc1.pqinf.CFI = 0; | ||
2167 | td_ptr->tdesc1.TCR |= TCR0_VETAG; | 2161 | td_ptr->tdesc1.TCR |= TCR0_VETAG; |
2168 | } | 2162 | } |
2169 | 2163 | ||
@@ -2185,7 +2179,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2185 | 2179 | ||
2186 | if (prev < 0) | 2180 | if (prev < 0) |
2187 | prev = vptr->options.numtx - 1; | 2181 | prev = vptr->options.numtx - 1; |
2188 | td_ptr->tdesc0.owner = OWNED_BY_NIC; | 2182 | td_ptr->tdesc0.len |= OWNED_BY_NIC; |
2189 | vptr->td_used[qnum]++; | 2183 | vptr->td_used[qnum]++; |
2190 | vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; | 2184 | vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; |
2191 | 2185 | ||
@@ -2193,7 +2187,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2193 | netif_stop_queue(dev); | 2187 | netif_stop_queue(dev); |
2194 | 2188 | ||
2195 | td_ptr = &(vptr->td_rings[qnum][prev]); | 2189 | td_ptr = &(vptr->td_rings[qnum][prev]); |
2196 | td_ptr->td_buf[0].queue = 1; | 2190 | td_ptr->td_buf[0].size |= TD_QUEUE; |
2197 | mac_tx_queue_wake(vptr->mac_regs, qnum); | 2191 | mac_tx_queue_wake(vptr->mac_regs, qnum); |
2198 | } | 2192 | } |
2199 | dev->trans_start = jiffies; | 2193 | dev->trans_start = jiffies; |
@@ -3410,7 +3404,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3410 | velocity_save_context(vptr, &vptr->context); | 3404 | velocity_save_context(vptr, &vptr->context); |
3411 | velocity_shutdown(vptr); | 3405 | velocity_shutdown(vptr); |
3412 | velocity_set_wol(vptr); | 3406 | velocity_set_wol(vptr); |
3413 | pci_enable_wake(pdev, 3, 1); | 3407 | pci_enable_wake(pdev, PCI_D3hot, 1); |
3414 | pci_set_power_state(pdev, PCI_D3hot); | 3408 | pci_set_power_state(pdev, PCI_D3hot); |
3415 | } else { | 3409 | } else { |
3416 | velocity_save_context(vptr, &vptr->context); | 3410 | velocity_save_context(vptr, &vptr->context); |