aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDale Farnsworth <dale@farnsworth.org>2006-03-03 12:03:36 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-03 12:12:36 -0500
commit7303fde88a149c4cee54dae7e46d1895fa7214b4 (patch)
tree394af13d130346d095c0830a48e3e421decf6b86 /drivers/net
parentff561eef9fb37c7180085e08418acfc009a9ada7 (diff)
[PATCH] mv643xx_eth: Move #defines of constants to mv643xx_eth.h
Signed-off-by: Dale Farnsworth <dale@farnsworth.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/mv643xx_eth.c66
-rw-r--r--drivers/net/mv643xx_eth.h47
2 files changed, 51 insertions, 62 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8a24b39f3ccb..50ee08518c6b 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -50,37 +50,6 @@
50#include <asm/delay.h> 50#include <asm/delay.h>
51#include "mv643xx_eth.h" 51#include "mv643xx_eth.h"
52 52
53/*
54 * The first part is the high level driver of the gigE ethernet ports.
55 */
56
57/* Constants */
58#define VLAN_HLEN 4
59#define FCS_LEN 4
60#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
61#define HW_IP_ALIGN 2 /* hw aligns IP header */
62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
64
65#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
66#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
67
68#define INT_UNMASK_ALL 0x0007ffff
69#define INT_UNMASK_ALL_EXT 0x0011ffff
70#define INT_MASK_ALL 0x00000000
71#define INT_MASK_ALL_EXT 0x00000000
72#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
73#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
74
75#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
76#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
77#else
78#define MAX_DESCS_PER_SKB 1
79#endif
80
81#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
82#define PHY_WAIT_MICRO_SECONDS 10
83
84/* Static function declarations */ 53/* Static function declarations */
85static void eth_port_uc_addr_get(struct net_device *dev, 54static void eth_port_uc_addr_get(struct net_device *dev,
86 unsigned char *MacAddr); 55 unsigned char *MacAddr);
@@ -182,24 +151,24 @@ static void mv643xx_eth_rx_task(void *data)
182 panic("%s: Error in test_set_bit / clear_bit", dev->name); 151 panic("%s: Error in test_set_bit / clear_bit", dev->name);
183 152
184 while (mp->rx_desc_count < (mp->rx_ring_size - 5)) { 153 while (mp->rx_desc_count < (mp->rx_ring_size - 5)) {
185 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); 154 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
186 if (!skb) 155 if (!skb)
187 break; 156 break;
188 mp->rx_desc_count++; 157 mp->rx_desc_count++;
189 unaligned = (u32)skb->data & (DMA_ALIGN - 1); 158 unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
190 if (unaligned) 159 if (unaligned)
191 skb_reserve(skb, DMA_ALIGN - unaligned); 160 skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
192 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 161 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
193 pkt_info.byte_cnt = RX_SKB_SIZE; 162 pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
194 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, 163 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
195 DMA_FROM_DEVICE); 164 ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
196 pkt_info.return_info = skb; 165 pkt_info.return_info = skb;
197 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { 166 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
198 printk(KERN_ERR 167 printk(KERN_ERR
199 "%s: Error allocating RX Ring\n", dev->name); 168 "%s: Error allocating RX Ring\n", dev->name);
200 break; 169 break;
201 } 170 }
202 skb_reserve(skb, HW_IP_ALIGN); 171 skb_reserve(skb, ETH_HW_IP_ALIGN);
203 } 172 }
204 clear_bit(0, &mp->rx_task_busy); 173 clear_bit(0, &mp->rx_task_busy);
205 /* 174 /*
@@ -375,7 +344,7 @@ int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
375 344
376 spin_unlock_irqrestore(&mp->lock, flags); 345 spin_unlock_irqrestore(&mp->lock, flags);
377 346
378 if (cmd_sts & BIT0) { 347 if (cmd_sts & ETH_ERROR_SUMMARY) {
379 printk("%s: Error in TX\n", dev->name); 348 printk("%s: Error in TX\n", dev->name);
380 mp->stats.tx_errors++; 349 mp->stats.tx_errors++;
381 } 350 }
@@ -562,12 +531,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
562 531
563 /* Read interrupt cause registers */ 532 /* Read interrupt cause registers */
564 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 533 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
565 INT_UNMASK_ALL; 534 ETH_INT_UNMASK_ALL;
566 535
567 if (eth_int_cause & BIT1) 536 if (eth_int_cause & BIT1)
568 eth_int_cause_ext = mv_read( 537 eth_int_cause_ext = mv_read(
569 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 538 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
570 INT_UNMASK_ALL_EXT; 539 ETH_INT_UNMASK_ALL_EXT;
571 540
572#ifdef MV643XX_NAPI 541#ifdef MV643XX_NAPI
573 if (!(eth_int_cause & 0x0007fffd)) { 542 if (!(eth_int_cause & 0x0007fffd)) {
@@ -591,7 +560,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
591 if (netif_rx_schedule_prep(dev)) { 560 if (netif_rx_schedule_prep(dev)) {
592 /* Mask all the interrupts */ 561 /* Mask all the interrupts */
593 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 562 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
594 INT_MASK_ALL); 563 ETH_INT_MASK_ALL);
595 /* wait for previous write to complete */ 564 /* wait for previous write to complete */
596 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 565 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
597 __netif_rx_schedule(dev); 566 __netif_rx_schedule(dev);
@@ -619,6 +588,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
619#endif 588#endif
620#endif 589#endif
621 } 590 }
591
622 /* PHY status changed */ 592 /* PHY status changed */
623 if (eth_int_cause_ext & (BIT16 | BIT20)) { 593 if (eth_int_cause_ext & (BIT16 | BIT20)) {
624 struct ethtool_cmd cmd; 594 struct ethtool_cmd cmd;
@@ -966,10 +936,10 @@ static int mv643xx_eth_open(struct net_device *dev)
966 936
967 /* Unmask phy and link status changes interrupts */ 937 /* Unmask phy and link status changes interrupts */
968 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 938 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
969 INT_UNMASK_ALL_EXT); 939 ETH_INT_UNMASK_ALL_EXT);
970 940
971 /* Unmask RX buffer and TX end interrupt */ 941 /* Unmask RX buffer and TX end interrupt */
972 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); 942 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
973 943
974 return 0; 944 return 0;
975 945
@@ -1049,7 +1019,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
1049 unsigned int port_num = mp->port_num; 1019 unsigned int port_num = mp->port_num;
1050 1020
1051 /* Mask all interrupts on ethernet port */ 1021 /* Mask all interrupts on ethernet port */
1052 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); 1022 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1053 /* wait for previous write to complete */ 1023 /* wait for previous write to complete */
1054 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1024 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1055 1025
@@ -1110,7 +1080,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1110 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1080 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1111 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1081 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1112 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1082 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1113 INT_UNMASK_ALL); 1083 ETH_INT_UNMASK_ALL);
1114 } 1084 }
1115 1085
1116 return done ? 0 : 1; 1086 return done ? 0 : 1;
@@ -1325,13 +1295,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
1325 struct mv643xx_private *mp = netdev_priv(netdev); 1295 struct mv643xx_private *mp = netdev_priv(netdev);
1326 int port_num = mp->port_num; 1296 int port_num = mp->port_num;
1327 1297
1328 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); 1298 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1329 /* wait for previous write to complete */ 1299 /* wait for previous write to complete */
1330 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1300 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1331 1301
1332 mv643xx_eth_int_handler(netdev->irq, netdev, NULL); 1302 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1333 1303
1334 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); 1304 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1335} 1305}
1336#endif 1306#endif
1337 1307
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index cade2705423c..2e59f193e267 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -74,21 +74,40 @@
74#define MV643XX_RX_COAL 100 74#define MV643XX_RX_COAL 100
75#endif 75#endif
76 76
77/* 77#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
78 * The second part is the low level driver of the gigE ethernet ports. 78#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
79 */ 79#else
80#define MAX_DESCS_PER_SKB 1
81#endif
80 82
81/* 83#define ETH_VLAN_HLEN 4
82 * Header File for : MV-643xx network interface header 84#define ETH_FCS_LEN 4
83 * 85#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */
84 * DESCRIPTION: 86#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
85 * This header file contains macros typedefs and function declaration for 87#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
86 * the Marvell Gig Bit Ethernet Controller. 88 ETH_VLAN_HLEN + ETH_FCS_LEN)
87 * 89#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7)
88 * DEPENDENCIES: 90
89 * None. 91#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
90 * 92#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
91 */ 93
94#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
95#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
96#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
97#define ETH_INT_CAUSE_EXT 0x00000002
98#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
99
100#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
101#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
102#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
103#define ETH_INT_CAUSE_PHY 0x00010000
104#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
105
106#define ETH_INT_MASK_ALL 0x00000000
107#define ETH_INT_MASK_ALL_EXT 0x00000000
108
109#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
110#define PHY_WAIT_MICRO_SECONDS 10
92 111
93/* Buffer offset from buffer pointer */ 112/* Buffer offset from buffer pointer */
94#define RX_BUF_OFFSET 0x2 113#define RX_BUF_OFFSET 0x2