diff options
author | Roger Luethi <rl@hellgate.ch> | 2010-12-05 19:59:40 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-08 13:23:36 -0500 |
commit | 38f49e8801565674c424896c3dcb4228410b43a8 (patch) | |
tree | 5765289b26b9b7b4e06f0219d585312a9f78ae11 /drivers/net/via-rhine.c | |
parent | 941666c2e3e0f9f6a1cb5808d02352d445bd702c (diff) |
via-rhine: hardware VLAN support
This patch adds VLAN hardware support for Rhine chips.
The driver uses up to 3 additional bytes of buffer space when extracting
802.1Q headers; PKT_BUF_SZ should still be sufficient.
The initial code was provided by David Lv. I reworked it to use standard
kernel facilities. Coding style clean up mostly follows via-velocity.
Adapted to new interface for VLAN acceleration (per request of Jesse Gross).
Signed-off-by: David Lv <DavidLv@viatech.com.cn>
Signed-off-by: Roger Luethi <rl@hellgate.ch>
drivers/net/via-rhine.c | 326 +++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 312 insertions(+), 14 deletions(-)
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/via-rhine.c')
-rw-r--r-- | drivers/net/via-rhine.c | 326 |
1 files changed, 312 insertions, 14 deletions
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 4930f9dbc493..5e7f069eab53 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -30,8 +30,8 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define DRV_NAME "via-rhine" | 32 | #define DRV_NAME "via-rhine" |
33 | #define DRV_VERSION "1.4.3" | 33 | #define DRV_VERSION "1.5.0" |
34 | #define DRV_RELDATE "2007-03-06" | 34 | #define DRV_RELDATE "2010-10-09" |
35 | 35 | ||
36 | 36 | ||
37 | /* A few user-configurable values. | 37 | /* A few user-configurable values. |
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32; | |||
100 | #include <linux/mii.h> | 100 | #include <linux/mii.h> |
101 | #include <linux/ethtool.h> | 101 | #include <linux/ethtool.h> |
102 | #include <linux/crc32.h> | 102 | #include <linux/crc32.h> |
103 | #include <linux/if_vlan.h> | ||
103 | #include <linux/bitops.h> | 104 | #include <linux/bitops.h> |
104 | #include <linux/workqueue.h> | 105 | #include <linux/workqueue.h> |
105 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 106 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); | |||
133 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); | 134 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); |
134 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); | 135 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); |
135 | 136 | ||
137 | #define MCAM_SIZE 32 | ||
138 | #define VCAM_SIZE 32 | ||
139 | |||
136 | /* | 140 | /* |
137 | Theory of Operation | 141 | Theory of Operation |
138 | 142 | ||
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); | |||
279 | /* Offsets to the device registers. */ | 283 | /* Offsets to the device registers. */ |
280 | enum register_offsets { | 284 | enum register_offsets { |
281 | StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, | 285 | StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, |
282 | ChipCmd1=0x09, | 286 | ChipCmd1=0x09, TQWake=0x0A, |
283 | IntrStatus=0x0C, IntrEnable=0x0E, | 287 | IntrStatus=0x0C, IntrEnable=0x0E, |
284 | MulticastFilter0=0x10, MulticastFilter1=0x14, | 288 | MulticastFilter0=0x10, MulticastFilter1=0x14, |
285 | RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, | 289 | RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, |
286 | MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, | 290 | MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, |
287 | MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, | 291 | MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, |
288 | ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, | 292 | ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, |
289 | RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, | 293 | RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, |
290 | StickyHW=0x83, IntrStatus2=0x84, | 294 | StickyHW=0x83, IntrStatus2=0x84, |
295 | CamMask=0x88, CamCon=0x92, CamAddr=0x93, | ||
291 | WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, | 296 | WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, |
292 | WOLcrClr1=0xA6, WOLcgClr=0xA7, | 297 | WOLcrClr1=0xA6, WOLcgClr=0xA7, |
293 | PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, | 298 | PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, |
@@ -299,6 +304,40 @@ enum backoff_bits { | |||
299 | BackCaptureEffect=0x04, BackRandom=0x08 | 304 | BackCaptureEffect=0x04, BackRandom=0x08 |
300 | }; | 305 | }; |
301 | 306 | ||
307 | /* Bits in the TxConfig (TCR) register */ | ||
308 | enum tcr_bits { | ||
309 | TCR_PQEN=0x01, | ||
310 | TCR_LB0=0x02, /* loopback[0] */ | ||
311 | TCR_LB1=0x04, /* loopback[1] */ | ||
312 | TCR_OFSET=0x08, | ||
313 | TCR_RTGOPT=0x10, | ||
314 | TCR_RTFT0=0x20, | ||
315 | TCR_RTFT1=0x40, | ||
316 | TCR_RTSF=0x80, | ||
317 | }; | ||
318 | |||
319 | /* Bits in the CamCon (CAMC) register */ | ||
320 | enum camcon_bits { | ||
321 | CAMC_CAMEN=0x01, | ||
322 | CAMC_VCAMSL=0x02, | ||
323 | CAMC_CAMWR=0x04, | ||
324 | CAMC_CAMRD=0x08, | ||
325 | }; | ||
326 | |||
327 | /* Bits in the PCIBusConfig1 (BCR1) register */ | ||
328 | enum bcr1_bits { | ||
329 | BCR1_POT0=0x01, | ||
330 | BCR1_POT1=0x02, | ||
331 | BCR1_POT2=0x04, | ||
332 | BCR1_CTFT0=0x08, | ||
333 | BCR1_CTFT1=0x10, | ||
334 | BCR1_CTSF=0x20, | ||
335 | BCR1_TXQNOBK=0x40, /* for VT6105 */ | ||
336 | BCR1_VIDFR=0x80, /* for VT6105 */ | ||
337 | BCR1_MED0=0x40, /* for VT6102 */ | ||
338 | BCR1_MED1=0x80, /* for VT6102 */ | ||
339 | }; | ||
340 | |||
302 | #ifdef USE_MMIO | 341 | #ifdef USE_MMIO |
303 | /* Registers we check that mmio and reg are the same. */ | 342 | /* Registers we check that mmio and reg are the same. */ |
304 | static const int mmio_verify_registers[] = { | 343 | static const int mmio_verify_registers[] = { |
@@ -356,6 +395,11 @@ enum desc_status_bits { | |||
356 | DescOwn=0x80000000 | 395 | DescOwn=0x80000000 |
357 | }; | 396 | }; |
358 | 397 | ||
398 | /* Bits in *_desc.*_length */ | ||
399 | enum desc_length_bits { | ||
400 | DescTag=0x00010000 | ||
401 | }; | ||
402 | |||
359 | /* Bits in ChipCmd. */ | 403 | /* Bits in ChipCmd. */ |
360 | enum chip_cmd_bits { | 404 | enum chip_cmd_bits { |
361 | CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, | 405 | CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, |
@@ -365,6 +409,9 @@ enum chip_cmd_bits { | |||
365 | }; | 409 | }; |
366 | 410 | ||
367 | struct rhine_private { | 411 | struct rhine_private { |
412 | /* Bit mask for configured VLAN ids */ | ||
413 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
414 | |||
368 | /* Descriptor rings */ | 415 | /* Descriptor rings */ |
369 | struct rx_desc *rx_ring; | 416 | struct rx_desc *rx_ring; |
370 | struct tx_desc *tx_ring; | 417 | struct tx_desc *tx_ring; |
@@ -405,6 +452,23 @@ struct rhine_private { | |||
405 | void __iomem *base; | 452 | void __iomem *base; |
406 | }; | 453 | }; |
407 | 454 | ||
455 | #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) | ||
456 | #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) | ||
457 | #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) | ||
458 | |||
459 | #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) | ||
460 | #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) | ||
461 | #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) | ||
462 | |||
463 | #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) | ||
464 | #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) | ||
465 | #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) | ||
466 | |||
467 | #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) | ||
468 | #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) | ||
469 | #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) | ||
470 | |||
471 | |||
408 | static int mdio_read(struct net_device *dev, int phy_id, int location); | 472 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
409 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | 473 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
410 | static int rhine_open(struct net_device *dev); | 474 | static int rhine_open(struct net_device *dev); |
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |||
422 | static const struct ethtool_ops netdev_ethtool_ops; | 486 | static const struct ethtool_ops netdev_ethtool_ops; |
423 | static int rhine_close(struct net_device *dev); | 487 | static int rhine_close(struct net_device *dev); |
424 | static void rhine_shutdown (struct pci_dev *pdev); | 488 | static void rhine_shutdown (struct pci_dev *pdev); |
489 | static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); | ||
490 | static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); | ||
491 | static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); | ||
492 | static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); | ||
493 | static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); | ||
494 | static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask); | ||
495 | static void rhine_init_cam_filter(struct net_device *dev); | ||
496 | static void rhine_update_vcam(struct net_device *dev); | ||
425 | 497 | ||
426 | #define RHINE_WAIT_FOR(condition) do { \ | 498 | #define RHINE_WAIT_FOR(condition) do { \ |
427 | int i=1024; \ | 499 | int i=1024; \ |
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = { | |||
629 | .ndo_set_mac_address = eth_mac_addr, | 701 | .ndo_set_mac_address = eth_mac_addr, |
630 | .ndo_do_ioctl = netdev_ioctl, | 702 | .ndo_do_ioctl = netdev_ioctl, |
631 | .ndo_tx_timeout = rhine_tx_timeout, | 703 | .ndo_tx_timeout = rhine_tx_timeout, |
704 | .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, | ||
705 | .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, | ||
632 | #ifdef CONFIG_NET_POLL_CONTROLLER | 706 | #ifdef CONFIG_NET_POLL_CONTROLLER |
633 | .ndo_poll_controller = rhine_poll, | 707 | .ndo_poll_controller = rhine_poll, |
634 | #endif | 708 | #endif |
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
795 | if (rp->quirks & rqRhineI) | 869 | if (rp->quirks & rqRhineI) |
796 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | 870 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
797 | 871 | ||
872 | if (pdev->revision >= VT6105M) | ||
873 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | ||
874 | NETIF_F_HW_VLAN_FILTER; | ||
875 | |||
798 | /* dev->name not defined before register_netdev()! */ | 876 | /* dev->name not defined before register_netdev()! */ |
799 | rc = register_netdev(dev); | 877 | rc = register_netdev(dev); |
800 | if (rc) | 878 | if (rc) |
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii) | |||
1040 | netif_carrier_ok(mii->dev)); | 1118 | netif_carrier_ok(mii->dev)); |
1041 | } | 1119 | } |
1042 | 1120 | ||
1121 | /** | ||
1122 | * rhine_set_cam - set CAM multicast filters | ||
1123 | * @ioaddr: register block of this Rhine | ||
1124 | * @idx: multicast CAM index [0..MCAM_SIZE-1] | ||
1125 | * @addr: multicast address (6 bytes) | ||
1126 | * | ||
1127 | * Load addresses into multicast filters. | ||
1128 | */ | ||
1129 | static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) | ||
1130 | { | ||
1131 | int i; | ||
1132 | |||
1133 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); | ||
1134 | wmb(); | ||
1135 | |||
1136 | /* Paranoid -- idx out of range should never happen */ | ||
1137 | idx &= (MCAM_SIZE - 1); | ||
1138 | |||
1139 | iowrite8((u8) idx, ioaddr + CamAddr); | ||
1140 | |||
1141 | for (i = 0; i < 6; i++, addr++) | ||
1142 | iowrite8(*addr, ioaddr + MulticastFilter0 + i); | ||
1143 | udelay(10); | ||
1144 | wmb(); | ||
1145 | |||
1146 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); | ||
1147 | udelay(10); | ||
1148 | |||
1149 | iowrite8(0, ioaddr + CamCon); | ||
1150 | } | ||
1151 | |||
1152 | /** | ||
1153 | * rhine_set_vlan_cam - set CAM VLAN filters | ||
1154 | * @ioaddr: register block of this Rhine | ||
1155 | * @idx: VLAN CAM index [0..VCAM_SIZE-1] | ||
1156 | * @addr: VLAN ID (2 bytes) | ||
1157 | * | ||
1158 | * Load addresses into VLAN filters. | ||
1159 | */ | ||
1160 | static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) | ||
1161 | { | ||
1162 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); | ||
1163 | wmb(); | ||
1164 | |||
1165 | /* Paranoid -- idx out of range should never happen */ | ||
1166 | idx &= (VCAM_SIZE - 1); | ||
1167 | |||
1168 | iowrite8((u8) idx, ioaddr + CamAddr); | ||
1169 | |||
1170 | iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); | ||
1171 | udelay(10); | ||
1172 | wmb(); | ||
1173 | |||
1174 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); | ||
1175 | udelay(10); | ||
1176 | |||
1177 | iowrite8(0, ioaddr + CamCon); | ||
1178 | } | ||
1179 | |||
1180 | /** | ||
1181 | * rhine_set_cam_mask - set multicast CAM mask | ||
1182 | * @ioaddr: register block of this Rhine | ||
1183 | * @mask: multicast CAM mask | ||
1184 | * | ||
1185 | * Mask sets multicast filters active/inactive. | ||
1186 | */ | ||
1187 | static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) | ||
1188 | { | ||
1189 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); | ||
1190 | wmb(); | ||
1191 | |||
1192 | /* write mask */ | ||
1193 | iowrite32(mask, ioaddr + CamMask); | ||
1194 | |||
1195 | /* disable CAMEN */ | ||
1196 | iowrite8(0, ioaddr + CamCon); | ||
1197 | } | ||
1198 | |||
1199 | /** | ||
1200 | * rhine_set_vlan_cam_mask - set VLAN CAM mask | ||
1201 | * @ioaddr: register block of this Rhine | ||
1202 | * @mask: VLAN CAM mask | ||
1203 | * | ||
1204 | * Mask sets VLAN filters active/inactive. | ||
1205 | */ | ||
1206 | static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) | ||
1207 | { | ||
1208 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); | ||
1209 | wmb(); | ||
1210 | |||
1211 | /* write mask */ | ||
1212 | iowrite32(mask, ioaddr + CamMask); | ||
1213 | |||
1214 | /* disable CAMEN */ | ||
1215 | iowrite8(0, ioaddr + CamCon); | ||
1216 | } | ||
1217 | |||
1218 | /** | ||
1219 | * rhine_init_cam_filter - initialize CAM filters | ||
1220 | * @dev: network device | ||
1221 | * | ||
1222 | * Initialize (disable) hardware VLAN and multicast support on this | ||
1223 | * Rhine. | ||
1224 | */ | ||
1225 | static void rhine_init_cam_filter(struct net_device *dev) | ||
1226 | { | ||
1227 | struct rhine_private *rp = netdev_priv(dev); | ||
1228 | void __iomem *ioaddr = rp->base; | ||
1229 | |||
1230 | /* Disable all CAMs */ | ||
1231 | rhine_set_vlan_cam_mask(ioaddr, 0); | ||
1232 | rhine_set_cam_mask(ioaddr, 0); | ||
1233 | |||
1234 | /* disable hardware VLAN support */ | ||
1235 | BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); | ||
1236 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); | ||
1237 | } | ||
1238 | |||
1239 | /** | ||
1240 | * rhine_update_vcam - update VLAN CAM filters | ||
1241 | * @rp: rhine_private data of this Rhine | ||
1242 | * | ||
1243 | * Update VLAN CAM filters to match configuration change. | ||
1244 | */ | ||
1245 | static void rhine_update_vcam(struct net_device *dev) | ||
1246 | { | ||
1247 | struct rhine_private *rp = netdev_priv(dev); | ||
1248 | void __iomem *ioaddr = rp->base; | ||
1249 | u16 vid; | ||
1250 | u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ | ||
1251 | unsigned int i = 0; | ||
1252 | |||
1253 | for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { | ||
1254 | rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); | ||
1255 | vCAMmask |= 1 << i; | ||
1256 | if (++i >= VCAM_SIZE) | ||
1257 | break; | ||
1258 | } | ||
1259 | rhine_set_vlan_cam_mask(ioaddr, vCAMmask); | ||
1260 | } | ||
1261 | |||
1262 | static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
1263 | { | ||
1264 | struct rhine_private *rp = netdev_priv(dev); | ||
1265 | |||
1266 | spin_lock_irq(&rp->lock); | ||
1267 | set_bit(vid, rp->active_vlans); | ||
1268 | rhine_update_vcam(dev); | ||
1269 | spin_unlock_irq(&rp->lock); | ||
1270 | } | ||
1271 | |||
1272 | static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
1273 | { | ||
1274 | struct rhine_private *rp = netdev_priv(dev); | ||
1275 | |||
1276 | spin_lock_irq(&rp->lock); | ||
1277 | clear_bit(vid, rp->active_vlans); | ||
1278 | rhine_update_vcam(dev); | ||
1279 | spin_unlock_irq(&rp->lock); | ||
1280 | } | ||
1281 | |||
1043 | static void init_registers(struct net_device *dev) | 1282 | static void init_registers(struct net_device *dev) |
1044 | { | 1283 | { |
1045 | struct rhine_private *rp = netdev_priv(dev); | 1284 | struct rhine_private *rp = netdev_priv(dev); |
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev) | |||
1061 | 1300 | ||
1062 | rhine_set_rx_mode(dev); | 1301 | rhine_set_rx_mode(dev); |
1063 | 1302 | ||
1303 | if (rp->pdev->revision >= VT6105M) | ||
1304 | rhine_init_cam_filter(dev); | ||
1305 | |||
1064 | napi_enable(&rp->napi); | 1306 | napi_enable(&rp->napi); |
1065 | 1307 | ||
1066 | /* Enable interrupts by setting the interrupt mask. */ | 1308 | /* Enable interrupts by setting the interrupt mask. */ |
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1276 | rp->tx_ring[entry].desc_length = | 1518 | rp->tx_ring[entry].desc_length = |
1277 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | 1519 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
1278 | 1520 | ||
1521 | if (unlikely(vlan_tx_tag_present(skb))) { | ||
1522 | rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); | ||
1523 | /* request tagging */ | ||
1524 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); | ||
1525 | } | ||
1526 | else | ||
1527 | rp->tx_ring[entry].tx_status = 0; | ||
1528 | |||
1279 | /* lock eth irq */ | 1529 | /* lock eth irq */ |
1280 | spin_lock_irqsave(&rp->lock, flags); | 1530 | spin_lock_irqsave(&rp->lock, flags); |
1281 | wmb(); | 1531 | wmb(); |
1282 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); | 1532 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
1283 | wmb(); | 1533 | wmb(); |
1284 | 1534 | ||
1285 | rp->cur_tx++; | 1535 | rp->cur_tx++; |
1286 | 1536 | ||
1287 | /* Non-x86 Todo: explicitly flush cache lines here. */ | 1537 | /* Non-x86 Todo: explicitly flush cache lines here. */ |
1288 | 1538 | ||
1539 | if (vlan_tx_tag_present(skb)) | ||
1540 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ | ||
1541 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); | ||
1542 | |||
1289 | /* Wake the potentially-idle transmit channel */ | 1543 | /* Wake the potentially-idle transmit channel */ |
1290 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, | 1544 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, |
1291 | ioaddr + ChipCmd1); | 1545 | ioaddr + ChipCmd1); |
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev) | |||
1437 | spin_unlock(&rp->lock); | 1691 | spin_unlock(&rp->lock); |
1438 | } | 1692 | } |
1439 | 1693 | ||
1694 | /** | ||
1695 | * rhine_get_vlan_tci - extract TCI from Rx data buffer | ||
1696 | * @skb: pointer to sk_buff | ||
1697 | * @data_size: used data area of the buffer including CRC | ||
1698 | * | ||
1699 | * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q | ||
1700 | * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte | ||
1701 | * aligned following the CRC. | ||
1702 | */ | ||
1703 | static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) | ||
1704 | { | ||
1705 | u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; | ||
1706 | return ntohs(*(u16 *)trailer); | ||
1707 | } | ||
1708 | |||
1440 | /* Process up to limit frames from receive ring */ | 1709 | /* Process up to limit frames from receive ring */ |
1441 | static int rhine_rx(struct net_device *dev, int limit) | 1710 | static int rhine_rx(struct net_device *dev, int limit) |
1442 | { | 1711 | { |
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1454 | for (count = 0; count < limit; ++count) { | 1723 | for (count = 0; count < limit; ++count) { |
1455 | struct rx_desc *desc = rp->rx_head_desc; | 1724 | struct rx_desc *desc = rp->rx_head_desc; |
1456 | u32 desc_status = le32_to_cpu(desc->rx_status); | 1725 | u32 desc_status = le32_to_cpu(desc->rx_status); |
1726 | u32 desc_length = le32_to_cpu(desc->desc_length); | ||
1457 | int data_size = desc_status >> 16; | 1727 | int data_size = desc_status >> 16; |
1458 | 1728 | ||
1459 | if (desc_status & DescOwn) | 1729 | if (desc_status & DescOwn) |
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1498 | struct sk_buff *skb = NULL; | 1768 | struct sk_buff *skb = NULL; |
1499 | /* Length should omit the CRC */ | 1769 | /* Length should omit the CRC */ |
1500 | int pkt_len = data_size - 4; | 1770 | int pkt_len = data_size - 4; |
1771 | u16 vlan_tci = 0; | ||
1501 | 1772 | ||
1502 | /* Check if the packet is long enough to accept without | 1773 | /* Check if the packet is long enough to accept without |
1503 | copying to a minimally-sized skbuff. */ | 1774 | copying to a minimally-sized skbuff. */ |
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1532 | rp->rx_buf_sz, | 1803 | rp->rx_buf_sz, |
1533 | PCI_DMA_FROMDEVICE); | 1804 | PCI_DMA_FROMDEVICE); |
1534 | } | 1805 | } |
1806 | |||
1807 | if (unlikely(desc_length & DescTag)) | ||
1808 | vlan_tci = rhine_get_vlan_tci(skb, data_size); | ||
1809 | |||
1535 | skb->protocol = eth_type_trans(skb, dev); | 1810 | skb->protocol = eth_type_trans(skb, dev); |
1811 | |||
1812 | if (unlikely(desc_length & DescTag)) | ||
1813 | __vlan_hwaccel_put_tag(skb, vlan_tci); | ||
1536 | netif_receive_skb(skb); | 1814 | netif_receive_skb(skb); |
1537 | dev->stats.rx_bytes += pkt_len; | 1815 | dev->stats.rx_bytes += pkt_len; |
1538 | dev->stats.rx_packets++; | 1816 | dev->stats.rx_packets++; |
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) { | |||
1596 | 1874 | ||
1597 | iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, | 1875 | iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, |
1598 | ioaddr + ChipCmd); | 1876 | ioaddr + ChipCmd); |
1877 | |||
1878 | if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) | ||
1879 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ | ||
1880 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); | ||
1881 | |||
1599 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, | 1882 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, |
1600 | ioaddr + ChipCmd1); | 1883 | ioaddr + ChipCmd1); |
1601 | IOSYNC; | 1884 | IOSYNC; |
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status) | |||
1631 | } | 1914 | } |
1632 | if (intr_status & IntrTxUnderrun) { | 1915 | if (intr_status & IntrTxUnderrun) { |
1633 | if (rp->tx_thresh < 0xE0) | 1916 | if (rp->tx_thresh < 0xE0) |
1634 | iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); | 1917 | BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); |
1635 | if (debug > 1) | 1918 | if (debug > 1) |
1636 | printk(KERN_INFO "%s: Transmitter underrun, Tx " | 1919 | printk(KERN_INFO "%s: Transmitter underrun, Tx " |
1637 | "threshold now %2.2x.\n", | 1920 | "threshold now %2.2x.\n", |
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status) | |||
1646 | (intr_status & (IntrTxAborted | | 1929 | (intr_status & (IntrTxAborted | |
1647 | IntrTxUnderrun | IntrTxDescRace)) == 0) { | 1930 | IntrTxUnderrun | IntrTxDescRace)) == 0) { |
1648 | if (rp->tx_thresh < 0xE0) { | 1931 | if (rp->tx_thresh < 0xE0) { |
1649 | iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); | 1932 | BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); |
1650 | } | 1933 | } |
1651 | if (debug > 1) | 1934 | if (debug > 1) |
1652 | printk(KERN_INFO "%s: Unspecified error. Tx " | 1935 | printk(KERN_INFO "%s: Unspecified error. Tx " |
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev) | |||
1688 | struct rhine_private *rp = netdev_priv(dev); | 1971 | struct rhine_private *rp = netdev_priv(dev); |
1689 | void __iomem *ioaddr = rp->base; | 1972 | void __iomem *ioaddr = rp->base; |
1690 | u32 mc_filter[2]; /* Multicast hash filter */ | 1973 | u32 mc_filter[2]; /* Multicast hash filter */ |
1691 | u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ | 1974 | u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ |
1975 | struct netdev_hw_addr *ha; | ||
1692 | 1976 | ||
1693 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | 1977 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1694 | rx_mode = 0x1C; | 1978 | rx_mode = 0x1C; |
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev) | |||
1699 | /* Too many to match, or accept all multicasts. */ | 1983 | /* Too many to match, or accept all multicasts. */ |
1700 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); | 1984 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); |
1701 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); | 1985 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); |
1702 | rx_mode = 0x0C; | 1986 | } else if (rp->pdev->revision >= VT6105M) { |
1987 | int i = 0; | ||
1988 | u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ | ||
1989 | netdev_for_each_mc_addr(ha, dev) { | ||
1990 | if (i == MCAM_SIZE) | ||
1991 | break; | ||
1992 | rhine_set_cam(ioaddr, i, ha->addr); | ||
1993 | mCAMmask |= 1 << i; | ||
1994 | i++; | ||
1995 | } | ||
1996 | rhine_set_cam_mask(ioaddr, mCAMmask); | ||
1703 | } else { | 1997 | } else { |
1704 | struct netdev_hw_addr *ha; | ||
1705 | |||
1706 | memset(mc_filter, 0, sizeof(mc_filter)); | 1998 | memset(mc_filter, 0, sizeof(mc_filter)); |
1707 | netdev_for_each_mc_addr(ha, dev) { | 1999 | netdev_for_each_mc_addr(ha, dev) { |
1708 | int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; | 2000 | int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; |
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev) | |||
1711 | } | 2003 | } |
1712 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); | 2004 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); |
1713 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); | 2005 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); |
1714 | rx_mode = 0x0C; | ||
1715 | } | 2006 | } |
1716 | iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); | 2007 | /* enable/disable VLAN receive filtering */ |
2008 | if (rp->pdev->revision >= VT6105M) { | ||
2009 | if (dev->flags & IFF_PROMISC) | ||
2010 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); | ||
2011 | else | ||
2012 | BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); | ||
2013 | } | ||
2014 | BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); | ||
1717 | } | 2015 | } |
1718 | 2016 | ||
1719 | static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 2017 | static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev) | |||
1966 | if (!netif_running(dev)) | 2264 | if (!netif_running(dev)) |
1967 | return 0; | 2265 | return 0; |
1968 | 2266 | ||
1969 | if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) | 2267 | if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) |
1970 | printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); | 2268 | printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); |
1971 | 2269 | ||
1972 | ret = pci_set_power_state(pdev, PCI_D0); | 2270 | ret = pci_set_power_state(pdev, PCI_D0); |