diff options
Diffstat (limited to 'drivers/net/ipg.c')
-rw-r--r-- | drivers/net/ipg.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index a5b0f0e194bb..58cd3202b48c 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c | |||
@@ -486,14 +486,14 @@ static int ipg_config_autoneg(struct net_device *dev) | |||
486 | phyctrl = ipg_r8(PHY_CTRL); | 486 | phyctrl = ipg_r8(PHY_CTRL); |
487 | mac_ctrl_val = ipg_r32(MAC_CTRL); | 487 | mac_ctrl_val = ipg_r32(MAC_CTRL); |
488 | 488 | ||
489 | /* Set flags for use in resolving auto-negotation, assuming | 489 | /* Set flags for use in resolving auto-negotiation, assuming |
490 | * non-1000Mbps, half duplex, no flow control. | 490 | * non-1000Mbps, half duplex, no flow control. |
491 | */ | 491 | */ |
492 | fullduplex = 0; | 492 | fullduplex = 0; |
493 | txflowcontrol = 0; | 493 | txflowcontrol = 0; |
494 | rxflowcontrol = 0; | 494 | rxflowcontrol = 0; |
495 | 495 | ||
496 | /* To accomodate a problem in 10Mbps operation, | 496 | /* To accommodate a problem in 10Mbps operation, |
497 | * set a global flag if PHY running in 10Mbps mode. | 497 | * set a global flag if PHY running in 10Mbps mode. |
498 | */ | 498 | */ |
499 | sp->tenmbpsmode = 0; | 499 | sp->tenmbpsmode = 0; |
@@ -846,7 +846,7 @@ static void init_tfdlist(struct net_device *dev) | |||
846 | } | 846 | } |
847 | 847 | ||
848 | /* | 848 | /* |
849 | * Free all transmit buffers which have already been transfered | 849 | * Free all transmit buffers which have already been transferred |
850 | * via DMA to the IPG. | 850 | * via DMA to the IPG. |
851 | */ | 851 | */ |
852 | static void ipg_nic_txfree(struct net_device *dev) | 852 | static void ipg_nic_txfree(struct net_device *dev) |
@@ -920,7 +920,7 @@ static void ipg_tx_timeout(struct net_device *dev) | |||
920 | 920 | ||
921 | /* | 921 | /* |
922 | * For TxComplete interrupts, free all transmit | 922 | * For TxComplete interrupts, free all transmit |
923 | * buffers which have already been transfered via DMA | 923 | * buffers which have already been transferred via DMA |
924 | * to the IPG. | 924 | * to the IPG. |
925 | */ | 925 | */ |
926 | static void ipg_nic_txcleanup(struct net_device *dev) | 926 | static void ipg_nic_txcleanup(struct net_device *dev) |
@@ -1141,13 +1141,13 @@ static int ipg_nic_rx_check_error(struct net_device *dev) | |||
1141 | 1141 | ||
1142 | /* Increment detailed receive error statistics. */ | 1142 | /* Increment detailed receive error statistics. */ |
1143 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | 1143 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { |
1144 | IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); | 1144 | IPG_DEBUG_MSG("RX FIFO overrun occurred.\n"); |
1145 | 1145 | ||
1146 | sp->stats.rx_fifo_errors++; | 1146 | sp->stats.rx_fifo_errors++; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | 1149 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { |
1150 | IPG_DEBUG_MSG("RX runt occured.\n"); | 1150 | IPG_DEBUG_MSG("RX runt occurred.\n"); |
1151 | sp->stats.rx_length_errors++; | 1151 | sp->stats.rx_length_errors++; |
1152 | } | 1152 | } |
1153 | 1153 | ||
@@ -1156,7 +1156,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev) | |||
1156 | */ | 1156 | */ |
1157 | 1157 | ||
1158 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | 1158 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { |
1159 | IPG_DEBUG_MSG("RX alignment error occured.\n"); | 1159 | IPG_DEBUG_MSG("RX alignment error occurred.\n"); |
1160 | sp->stats.rx_frame_errors++; | 1160 | sp->stats.rx_frame_errors++; |
1161 | } | 1161 | } |
1162 | 1162 | ||
@@ -1421,12 +1421,12 @@ static int ipg_nic_rx(struct net_device *dev) | |||
1421 | 1421 | ||
1422 | /* Increment detailed receive error statistics. */ | 1422 | /* Increment detailed receive error statistics. */ |
1423 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | 1423 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { |
1424 | IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); | 1424 | IPG_DEBUG_MSG("RX FIFO overrun occurred.\n"); |
1425 | sp->stats.rx_fifo_errors++; | 1425 | sp->stats.rx_fifo_errors++; |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | 1428 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { |
1429 | IPG_DEBUG_MSG("RX runt occured.\n"); | 1429 | IPG_DEBUG_MSG("RX runt occurred.\n"); |
1430 | sp->stats.rx_length_errors++; | 1430 | sp->stats.rx_length_errors++; |
1431 | } | 1431 | } |
1432 | 1432 | ||
@@ -1436,7 +1436,7 @@ static int ipg_nic_rx(struct net_device *dev) | |||
1436 | */ | 1436 | */ |
1437 | 1437 | ||
1438 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | 1438 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { |
1439 | IPG_DEBUG_MSG("RX alignment error occured.\n"); | 1439 | IPG_DEBUG_MSG("RX alignment error occurred.\n"); |
1440 | sp->stats.rx_frame_errors++; | 1440 | sp->stats.rx_frame_errors++; |
1441 | } | 1441 | } |
1442 | 1442 | ||
@@ -1460,7 +1460,7 @@ static int ipg_nic_rx(struct net_device *dev) | |||
1460 | } | 1460 | } |
1461 | } else { | 1461 | } else { |
1462 | 1462 | ||
1463 | /* Adjust the new buffer length to accomodate the size | 1463 | /* Adjust the new buffer length to accommodate the size |
1464 | * of the received frame. | 1464 | * of the received frame. |
1465 | */ | 1465 | */ |
1466 | skb_put(skb, framelen); | 1466 | skb_put(skb, framelen); |
@@ -1488,7 +1488,7 @@ static int ipg_nic_rx(struct net_device *dev) | |||
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | /* | 1490 | /* |
1491 | * If there are more RFDs to proces and the allocated amount of RFD | 1491 | * If there are more RFDs to process and the allocated amount of RFD |
1492 | * processing time has expired, assert Interrupt Requested to make | 1492 | * processing time has expired, assert Interrupt Requested to make |
1493 | * sure we come back to process the remaining RFDs. | 1493 | * sure we come back to process the remaining RFDs. |
1494 | */ | 1494 | */ |
@@ -1886,7 +1886,7 @@ static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, | |||
1886 | /* Request TxComplete interrupts at an interval defined | 1886 | /* Request TxComplete interrupts at an interval defined |
1887 | * by the constant IPG_FRAMESBETWEENTXCOMPLETES. | 1887 | * by the constant IPG_FRAMESBETWEENTXCOMPLETES. |
1888 | * Request TxComplete interrupt for every frame | 1888 | * Request TxComplete interrupt for every frame |
1889 | * if in 10Mbps mode to accomodate problem with 10Mbps | 1889 | * if in 10Mbps mode to accommodate problem with 10Mbps |
1890 | * processing. | 1890 | * processing. |
1891 | */ | 1891 | */ |
1892 | if (sp->tenmbpsmode) | 1892 | if (sp->tenmbpsmode) |
@@ -2098,7 +2098,7 @@ static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) | |||
2098 | struct ipg_nic_private *sp = netdev_priv(dev); | 2098 | struct ipg_nic_private *sp = netdev_priv(dev); |
2099 | int err; | 2099 | int err; |
2100 | 2100 | ||
2101 | /* Function to accomodate changes to Maximum Transfer Unit | 2101 | /* Function to accommodate changes to Maximum Transfer Unit |
2102 | * (or MTU) of IPG NIC. Cannot use default function since | 2102 | * (or MTU) of IPG NIC. Cannot use default function since |
2103 | * the default will not allow for MTU > 1500 bytes. | 2103 | * the default will not allow for MTU > 1500 bytes. |
2104 | */ | 2104 | */ |