diff options
Diffstat (limited to 'drivers/net/b44.c')
| -rw-r--r-- | drivers/net/b44.c | 136 |
1 files changed, 128 insertions, 8 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 94939f570f78..282ebd15f011 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
| @@ -106,6 +106,29 @@ static int b44_poll(struct net_device *dev, int *budget); | |||
| 106 | static void b44_poll_controller(struct net_device *dev); | 106 | static void b44_poll_controller(struct net_device *dev); |
| 107 | #endif | 107 | #endif |
| 108 | 108 | ||
| 109 | static int dma_desc_align_mask; | ||
| 110 | static int dma_desc_sync_size; | ||
| 111 | |||
| 112 | static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, | ||
| 113 | dma_addr_t dma_base, | ||
| 114 | unsigned long offset, | ||
| 115 | enum dma_data_direction dir) | ||
| 116 | { | ||
| 117 | dma_sync_single_range_for_device(&pdev->dev, dma_base, | ||
| 118 | offset & dma_desc_align_mask, | ||
| 119 | dma_desc_sync_size, dir); | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, | ||
| 123 | dma_addr_t dma_base, | ||
| 124 | unsigned long offset, | ||
| 125 | enum dma_data_direction dir) | ||
| 126 | { | ||
| 127 | dma_sync_single_range_for_cpu(&pdev->dev, dma_base, | ||
| 128 | offset & dma_desc_align_mask, | ||
| 129 | dma_desc_sync_size, dir); | ||
| 130 | } | ||
| 131 | |||
| 109 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) | 132 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) |
| 110 | { | 133 | { |
| 111 | return readl(bp->regs + reg); | 134 | return readl(bp->regs + reg); |
| @@ -668,6 +691,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 668 | dp->ctrl = cpu_to_le32(ctrl); | 691 | dp->ctrl = cpu_to_le32(ctrl); |
| 669 | dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); | 692 | dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); |
| 670 | 693 | ||
| 694 | if (bp->flags & B44_FLAG_RX_RING_HACK) | ||
| 695 | b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, | ||
| 696 | dest_idx * sizeof(dp), | ||
| 697 | DMA_BIDIRECTIONAL); | ||
| 698 | |||
| 671 | return RX_PKT_BUF_SZ; | 699 | return RX_PKT_BUF_SZ; |
| 672 | } | 700 | } |
| 673 | 701 | ||
| @@ -692,6 +720,11 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 692 | pci_unmap_addr_set(dest_map, mapping, | 720 | pci_unmap_addr_set(dest_map, mapping, |
| 693 | pci_unmap_addr(src_map, mapping)); | 721 | pci_unmap_addr(src_map, mapping)); |
| 694 | 722 | ||
| 723 | if (bp->flags & B44_FLAG_RX_RING_HACK) | ||
| 724 | b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, | ||
| 725 | src_idx * sizeof(src_desc), | ||
| 726 | DMA_BIDIRECTIONAL); | ||
| 727 | |||
| 695 | ctrl = src_desc->ctrl; | 728 | ctrl = src_desc->ctrl; |
| 696 | if (dest_idx == (B44_RX_RING_SIZE - 1)) | 729 | if (dest_idx == (B44_RX_RING_SIZE - 1)) |
| 697 | ctrl |= cpu_to_le32(DESC_CTRL_EOT); | 730 | ctrl |= cpu_to_le32(DESC_CTRL_EOT); |
| @@ -700,8 +733,14 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 700 | 733 | ||
| 701 | dest_desc->ctrl = ctrl; | 734 | dest_desc->ctrl = ctrl; |
| 702 | dest_desc->addr = src_desc->addr; | 735 | dest_desc->addr = src_desc->addr; |
| 736 | |||
| 703 | src_map->skb = NULL; | 737 | src_map->skb = NULL; |
| 704 | 738 | ||
| 739 | if (bp->flags & B44_FLAG_RX_RING_HACK) | ||
| 740 | b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, | ||
| 741 | dest_idx * sizeof(dest_desc), | ||
| 742 | DMA_BIDIRECTIONAL); | ||
| 743 | |||
| 705 | pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, | 744 | pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, |
| 706 | RX_PKT_BUF_SZ, | 745 | RX_PKT_BUF_SZ, |
| 707 | PCI_DMA_FROMDEVICE); | 746 | PCI_DMA_FROMDEVICE); |
| @@ -959,6 +998,11 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 959 | bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); | 998 | bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); |
| 960 | bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); | 999 | bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); |
| 961 | 1000 | ||
| 1001 | if (bp->flags & B44_FLAG_TX_RING_HACK) | ||
| 1002 | b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, | ||
| 1003 | entry * sizeof(bp->tx_ring[0]), | ||
| 1004 | DMA_TO_DEVICE); | ||
| 1005 | |||
| 962 | entry = NEXT_TX(entry); | 1006 | entry = NEXT_TX(entry); |
| 963 | 1007 | ||
| 964 | bp->tx_prod = entry; | 1008 | bp->tx_prod = entry; |
| @@ -1064,6 +1108,16 @@ static void b44_init_rings(struct b44 *bp) | |||
| 1064 | memset(bp->rx_ring, 0, B44_RX_RING_BYTES); | 1108 | memset(bp->rx_ring, 0, B44_RX_RING_BYTES); |
| 1065 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1109 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
| 1066 | 1110 | ||
| 1111 | if (bp->flags & B44_FLAG_RX_RING_HACK) | ||
| 1112 | dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, | ||
| 1113 | DMA_TABLE_BYTES, | ||
| 1114 | PCI_DMA_BIDIRECTIONAL); | ||
| 1115 | |||
| 1116 | if (bp->flags & B44_FLAG_TX_RING_HACK) | ||
| 1117 | dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, | ||
| 1118 | DMA_TABLE_BYTES, | ||
| 1119 | PCI_DMA_TODEVICE); | ||
| 1120 | |||
| 1067 | for (i = 0; i < bp->rx_pending; i++) { | 1121 | for (i = 0; i < bp->rx_pending; i++) { |
| 1068 | if (b44_alloc_rx_skb(bp, -1, i) < 0) | 1122 | if (b44_alloc_rx_skb(bp, -1, i) < 0) |
| 1069 | break; | 1123 | break; |
| @@ -1085,14 +1139,28 @@ static void b44_free_consistent(struct b44 *bp) | |||
| 1085 | bp->tx_buffers = NULL; | 1139 | bp->tx_buffers = NULL; |
| 1086 | } | 1140 | } |
| 1087 | if (bp->rx_ring) { | 1141 | if (bp->rx_ring) { |
| 1088 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | 1142 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
| 1089 | bp->rx_ring, bp->rx_ring_dma); | 1143 | dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, |
| 1144 | DMA_TABLE_BYTES, | ||
| 1145 | DMA_BIDIRECTIONAL); | ||
| 1146 | kfree(bp->rx_ring); | ||
| 1147 | } else | ||
| 1148 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | ||
| 1149 | bp->rx_ring, bp->rx_ring_dma); | ||
| 1090 | bp->rx_ring = NULL; | 1150 | bp->rx_ring = NULL; |
| 1151 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | ||
| 1091 | } | 1152 | } |
| 1092 | if (bp->tx_ring) { | 1153 | if (bp->tx_ring) { |
| 1093 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | 1154 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
| 1094 | bp->tx_ring, bp->tx_ring_dma); | 1155 | dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, |
| 1156 | DMA_TABLE_BYTES, | ||
| 1157 | DMA_TO_DEVICE); | ||
| 1158 | kfree(bp->tx_ring); | ||
| 1159 | } else | ||
| 1160 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | ||
| 1161 | bp->tx_ring, bp->tx_ring_dma); | ||
| 1095 | bp->tx_ring = NULL; | 1162 | bp->tx_ring = NULL; |
| 1163 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | ||
| 1096 | } | 1164 | } |
| 1097 | } | 1165 | } |
| 1098 | 1166 | ||
| @@ -1118,12 +1186,56 @@ static int b44_alloc_consistent(struct b44 *bp) | |||
| 1118 | 1186 | ||
| 1119 | size = DMA_TABLE_BYTES; | 1187 | size = DMA_TABLE_BYTES; |
| 1120 | bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); | 1188 | bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); |
| 1121 | if (!bp->rx_ring) | 1189 | if (!bp->rx_ring) { |
| 1122 | goto out_err; | 1190 | /* Allocation may have failed due to pci_alloc_consistent |
| 1191 | insisting on use of GFP_DMA, which is more restrictive | ||
| 1192 | than necessary... */ | ||
| 1193 | struct dma_desc *rx_ring; | ||
| 1194 | dma_addr_t rx_ring_dma; | ||
| 1195 | |||
| 1196 | if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) | ||
| 1197 | goto out_err; | ||
| 1198 | |||
| 1199 | memset(rx_ring, 0, size); | ||
| 1200 | rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, | ||
| 1201 | DMA_TABLE_BYTES, | ||
| 1202 | DMA_BIDIRECTIONAL); | ||
| 1203 | |||
| 1204 | if (rx_ring_dma + size > B44_DMA_MASK) { | ||
| 1205 | kfree(rx_ring); | ||
| 1206 | goto out_err; | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | bp->rx_ring = rx_ring; | ||
| 1210 | bp->rx_ring_dma = rx_ring_dma; | ||
| 1211 | bp->flags |= B44_FLAG_RX_RING_HACK; | ||
| 1212 | } | ||
| 1123 | 1213 | ||
| 1124 | bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); | 1214 | bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); |
| 1125 | if (!bp->tx_ring) | 1215 | if (!bp->tx_ring) { |
| 1126 | goto out_err; | 1216 | /* Allocation may have failed due to pci_alloc_consistent |
| 1217 | insisting on use of GFP_DMA, which is more restrictive | ||
| 1218 | than necessary... */ | ||
| 1219 | struct dma_desc *tx_ring; | ||
| 1220 | dma_addr_t tx_ring_dma; | ||
| 1221 | |||
| 1222 | if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) | ||
| 1223 | goto out_err; | ||
| 1224 | |||
| 1225 | memset(tx_ring, 0, size); | ||
| 1226 | tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, | ||
| 1227 | DMA_TABLE_BYTES, | ||
| 1228 | DMA_TO_DEVICE); | ||
| 1229 | |||
| 1230 | if (tx_ring_dma + size > B44_DMA_MASK) { | ||
| 1231 | kfree(tx_ring); | ||
| 1232 | goto out_err; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | bp->tx_ring = tx_ring; | ||
| 1236 | bp->tx_ring_dma = tx_ring_dma; | ||
| 1237 | bp->flags |= B44_FLAG_TX_RING_HACK; | ||
| 1238 | } | ||
| 1127 | 1239 | ||
| 1128 | return 0; | 1240 | return 0; |
| 1129 | 1241 | ||
| @@ -1676,6 +1788,7 @@ static struct ethtool_ops b44_ethtool_ops = { | |||
| 1676 | .set_pauseparam = b44_set_pauseparam, | 1788 | .set_pauseparam = b44_set_pauseparam, |
| 1677 | .get_msglevel = b44_get_msglevel, | 1789 | .get_msglevel = b44_get_msglevel, |
| 1678 | .set_msglevel = b44_set_msglevel, | 1790 | .set_msglevel = b44_set_msglevel, |
| 1791 | .get_perm_addr = ethtool_op_get_perm_addr, | ||
| 1679 | }; | 1792 | }; |
| 1680 | 1793 | ||
| 1681 | static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 1794 | static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| @@ -1718,6 +1831,7 @@ static int __devinit b44_get_invariants(struct b44 *bp) | |||
| 1718 | bp->dev->dev_addr[3] = eeprom[80]; | 1831 | bp->dev->dev_addr[3] = eeprom[80]; |
| 1719 | bp->dev->dev_addr[4] = eeprom[83]; | 1832 | bp->dev->dev_addr[4] = eeprom[83]; |
| 1720 | bp->dev->dev_addr[5] = eeprom[82]; | 1833 | bp->dev->dev_addr[5] = eeprom[82]; |
| 1834 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); | ||
| 1721 | 1835 | ||
| 1722 | bp->phy_addr = eeprom[90] & 0x1f; | 1836 | bp->phy_addr = eeprom[90] & 0x1f; |
| 1723 | 1837 | ||
| @@ -1971,6 +2085,12 @@ static struct pci_driver b44_driver = { | |||
| 1971 | 2085 | ||
| 1972 | static int __init b44_init(void) | 2086 | static int __init b44_init(void) |
| 1973 | { | 2087 | { |
| 2088 | unsigned int dma_desc_align_size = dma_get_cache_alignment(); | ||
| 2089 | |||
| 2090 | /* Setup paramaters for syncing RX/TX DMA descriptors */ | ||
| 2091 | dma_desc_align_mask = ~(dma_desc_align_size - 1); | ||
| 2092 | dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); | ||
| 2093 | |||
| 1974 | return pci_module_init(&b44_driver); | 2094 | return pci_module_init(&b44_driver); |
| 1975 | } | 2095 | } |
| 1976 | 2096 | ||
